repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
unitedstates/congress-legislators | scripts/validator.py | 13 | 5024 | # Runs various validation tests on current legislators.
import rtyaml
# Congressional district apportionment for the 113th-... Congresses.
# The territories with delegates have 'T'. All others have the number
# of districts (e.g. 1 for one at-large district).
apportionment = {'AL': 7, 'AK': 1, 'AS': 'T', 'AZ': 9, 'AR': 4, 'CA': 53, 'CO': 7, 'CT': 5, 'DE': 1, 'DC': 'T', 'FL': 27, 'GA': 14, 'GU': 'T', 'HI': 2, 'ID': 2, 'IL': 18, 'IN': 9, 'IA': 4, 'KS': 4, 'KY': 6, 'LA': 6, 'ME': 2, 'MD': 8, 'MA': 9, 'MI': 14, 'MN': 8, 'MS': 4, 'MO': 8, 'MT': 1, 'NE': 3, 'NV': 4, 'NH': 2, 'NJ': 12, 'NM': 3, 'NY': 27, 'NC': 13, 'ND': 1, 'MP': 'T', 'OH': 16, 'OK': 5, 'OR': 5, 'PA': 18, 'PR': 'T', 'RI': 2, 'SC': 7, 'SD': 1, 'TN': 9, 'TX': 36, 'UT': 4, 'VT': 1, 'VI': 'T', 'VA': 11, 'WA': 10, 'WV': 3, 'WI': 8, 'WY': 1}
def run():
P = rtyaml.load(open("../legislators-current.yaml"))
P_historical = rtyaml.load(open("../legislators-historical.yaml"))
offices = { }
senate_ranks = { }
for p in P:
# IDs.
if not p['id'].get('thomas'):
print("No THOMAS ID for %s." % p['id']['bioguide'])
elif not isinstance(p['id']['thomas'], str) or p['id']['thomas'][0] != '0':
print("Invalid THOMAS ID for %s: %s." % (p['id']['bioguide'], str(p['id']['thomas'])))
# Biographical data.
if p.get("bio", {}).get("gender") not in ("M", "F"):
print("Gender of %s is not valid: %s." % (p['id']['bioguide'], str(p.get("bio", {}).get("gender")) ))
if len(p.get("bio", {}).get("birthday", "")) != 10:
print("Birthday of %s is not valid: %s." % (p['id']['bioguide'], p.get("bio", {}).get("birthday", "")))
# Get the current term.
term = p['terms'][-1]
# Start/end dates.
if term['start'] not in ('2011-01-05', '2013-01-03', '2015-01-06'):
print("Term start date of %s is not right: %s." % (p['id']['bioguide'], term['start']))
if term['end'] not in ('2017-01-03', '2019-01-03', '2021-01-03'):
print("Term end date of %s is not right: %s." % (p['id']['bioguide'], term['end']))
# State and district.
if term['state'] not in apportionment:
print("Term state in %s is invalid: %s." % (p['id']['bioguide'], term['state']))
else:
if term['type'] == 'rep':
ap = apportionment[term['state']]
if not isinstance(term['district'], int) or term['district'] < 0:
print("Term district in %s is invalid: %s." % (p['id']['bioguide'], str(term['district'])))
elif ap in ("T", 1) and term['district'] != 0:
print("Term district in %s is invalid for an at-large state: %s." % (p['id']['bioguide'], str(term['district'])))
elif ap not in ("T", 1) and term['district'] == 0:
print("Term district in %s is invalid for a not-at-large state: %s." % (p['id']['bioguide'], str(term['district'])))
elif ap not in ("T", 1) and term['district'] > ap:
print("Term district in %s is invalid: %s." % (p['id']['bioguide'], str(term['district'])))
elif term['type'] == 'sen':
if term.get("class") not in (1, 2, 3):
print("Term class in %s is invalid: %s." % (p['id']['bioguide'], str(term['class'])))
# Make sure there are no duplicate offices -- checked at the end.
office = (term['type'], term['state'], term['district'] if term['type'] == 'rep' else term['class'])
offices.setdefault(office, []).append(p)
# Seate state rank.
# Collect all of the senate state ranks so we can check that the distribution
# within each state is correct, at the end.
if term['type'] == 'sen':
senate_ranks.setdefault(term['state'], []).append((p['id']['bioguide'], term['state_rank']))
# Party.
if term['party'] not in ("Republican", "Democrat", "Independent"):
print("Suspicious party for %s: %s." % (p['id']['bioguide'], term['party']))
elif term['party'] != "Independent" and term.get("caucus") != None:
print("caucus field should not be used if the party is not Indpeendent, in %s: %s." % (p['id']['bioguide'], term['caucus']))
elif term['party'] == "Independent" and term.get("caucus") is None:
print("caucus field should be used if the party is Indpeendent, in %s: %s." % (p['id']['bioguide'], term['caucus']))
# Check for duplicate offices.
for k, v in offices.items():
if len(v) > 1:
print("Multiple holders of the office", k)
print(rtyaml.dump(v))
# Check for duplicate use of any of the IDs.
ids = set()
for p in P + P_historical:
# Collect IDs for uniqueness test.
for k, v1 in p['id'].items():
# The 'fec' ID is a list, convert the others to a list.
if not isinstance(v1, list):
v1 = [v1]
for v in v1:
key = (k, v)
if key in ids:
print("Duplicate ID: %s %s" % (k, v))
continue
ids.add(key)
for state, ranks in senate_ranks.items():
# There can be a junior and senior senator, a senior senator, or no senators.
# There can't be two juniors, two seniors, or just a junior senator.
r = sorted(rr[1] for rr in ranks)
if r not in [['junior', 'senior'], ['senior'], []]:
print("State ranks for %s cannot be right: %s." % (state, ranks))
if __name__ == '__main__':
run()
| cc0-1.0 | 858d2d673223d67bc5dc461e913b525e | 42.310345 | 545 | 0.575239 | 2.652587 | false | false | false | false |
eliben/code-for-blog | 2009/eblib/libcollect.py | 14 | 14080 | """
libcollect.py
Provides the LibCollect class, used for collecting the various libraries
your script uses for delivery as a self-contained distribution package.
Author: Eli Bendersky (http://eli.thegreenplace.net)
License: Same as Python
Motivation:
Imagine that you've written a script that uses several libraries, some of
which you've written and some you've downloaded and installed (for example
PyYAML). You want to distribute the script to your friends and co-workers,
who already have Python installed with all the standard library. But your
script won't run on their machines, because they have neither your personal
libraries, nor PyYAML installed. So what can you do ?
* You can ask them to install PyYAML and other libraries your script uses,
and send them your own libraries. This is a lengthy and inconvenient
process.
* You can use a tool like py2exe to package your delivery. This has a
downside, however. py2exe produces large files (several MBs) and you may
not want that.
* You can painstakingly collect the libraries into a directory where your
script can find them, and package the directory together with the script.
LibCollect makes the third option trivial, by doing all the dirty work
for you.
Example:
Suppose your script is named script.py, and is located in directory $DIR
(although I'm using Unix-y notation here, it is for convenience only.
LibCollect works similarly well on Windows platforms). Follow these steps
to prepare a self-contained distribution with LibCollect:
Create a distribution setup script in the same directory. Lets assume
you call it distrib_script.py. You can easily place it in any directory
you like, I'm using the same one to make the example simpler.
Add the following to distrib_script.py (assuming that libcollect.py is
in your sys.path):
**************************************************************
import libcollect
# Create a LibCollect object
lc = libcollect.LibCollect()
# Prepare arguments for do_collect
#
# Path to the script (can be absolute or relative)
scriptname = 'script.py'
# Ask the resulting distribution to be placed in
# directory distrib
targetdir = 'distrib'
# Specify which libraries to exclude from the
# distribution (because you know they're installed
# on the target machine)
excludes = ["wx",
"pywin",
"win32api",
"win32com"]
# This does the actual work
# See the documentation of LibCollect for more options
#
lc.do_collect( scriptname,
targetdir,
excludes,
verbose=True)
**************************************************************
Now run distrib_script.py.
When it finishes running, you will see that the distrib directory
has been created in $DIR.
In $DIR/distrib you will see a file: script.py and
a directory: lib
* script.py is a loader that replaces your original script.py - this
is the program your users should run. All it does (look at the
code, it's short!) is prepare the sys.path to include the
packaged libraries, and runs your own script.py that was also
packaged into the .zip file
* lib is the distribution library, containing all the code
your script needs to run on any machine with Python installed,
and nothing else (except the modules you specified in the exclusion
list).
How to use LibCollect:
* It is most convenient to use LibCollect in the way demonstrated
in the example above. You may want to update your application from
time to time, and having a distribution script handy will turn
the preparation of a new distribution into a 5-second process.
* If you don't want to create a distribution script, you can use
a more direct method of invoking libcollect.py as a program on
your script. Call it without arguments and it will print
a usage string that will explain what you need to do.
How it works:
* LibCollect uses the standard modulefinder module to find out which
libraries are used by your script. It categorizes them into two
types: standard libraries that came with Python, and non-standard
libraries you've installed or written.
* Only libraries of the second type are included in the distribution
(bar the libraries you've explicitly asked to exclude).
* It then builds a directory with all the included libraries, in a
way that your script will be able to find them. The script itself
is also packaged into the same place.
* On request, this directory can be zipped into a single file, to
employ Python's built-in zipimport facility (but read the docstring
of the LibCollect class for more information on this)
* In the distribution directory, a new file with the name of your
script is created. It is a simple loader that uses the runpy module
to transparently load your script from the distribution library.
This way your script is not being modified (sys.path is rigged
from the loader).
Compatibility:
Written in pure Python 2.5
Tested on Windows and Linux, but should work on other platforms
where the standard Python distribution works.
Version history:
1.0 (2008.06.07): initial release
1.1 (2008.07.03): create an unzipped distribution library
by default, because of the limitations
of zipimport.
"""
from distutils.archive_util import make_zipfile
from distutils.dir_util import mkpath, create_tree
import distutils.sysconfig
import os, sys
import shutil
from modulefinder import ModuleFinder
version = "1.1"
class LibCollect(object):
""" See module documentation for an introduction and example.
Usage:
lc = LibCollect()
lc.do_collect(...)
The documentation of do_collect provides the gory details.
"""
def __init__(self):
pass
def do_collect(self, scriptname, targetdir, excludes=[], distlib='lib', zip_lib=False, verbose=False):
""" See module documentation for an introduction and example.
do_collect performs the actual work of this module.
Arguments:
scriptname Path to your top-level application file. Can be
either relative or absolute.
targetdir Path to the target directory where the packaged
distribution will be placed. The distribution
consists of a loader script and a distribution
library (either a directory or a zip file).
This directory may not exist prior to invocation.
If it exists, it will be overridden.
excludes A list of module names for exclusion from the
distribution. For example, if you know all your
users have wxPython installed, you may want to
add 'wx' to this list - it will save a lot of
space.
distlib Name of the distribution library that will be
created in targetdir.
zip_lib True if you want the distribution library to be
zipped into a single file. False if you want it
to be an uncompressed directory.
Notes:
* While saving disk space, this option is likely
to hinder the start-up performance of the
script, because Python won't pre-compile the
.py files into .pyc files after the first load
if the .py files are in a zip archive.
* Due to a limitation of zipimport (Python's
built-in importer from zip files), your script
won't work after distribution if the library
contains extensions (.pyd & .pyo) or
console-less Windows scripts (.pyw). See the
documentation of zipimport for more information.
verbose True to make do_collect print out its progress
to stdout. May be useful for the first time you
create a distribution for some application.
Returns:
Nothing. An exception may be thrown with an error message from
one of the undelying method calls.
"""
self.site_packages = os.path.normcase(distutils.sysconfig.get_python_lib(standard_lib=False))
self.standard_lib = os.path.normcase(distutils.sysconfig.get_python_lib(standard_lib=True))
self.sys_prefix = os.path.normcase(sys.prefix)
self.verbose = verbose
self.log("\nLibCollect v%s running in verbose mode\n" % version)
# Initial preparation to create the lib directory
#
if os.path.exists(targetdir):
self.log("Directory '%s' exists. Removing it." % targetdir)
shutil.rmtree(targetdir)
libdir = os.path.join(targetdir, distlib)
self.log("Creating path '%s'" % libdir)
mkpath(libdir)
# Find the modules we need to collect
#
modules = self.find_modules(scriptname, excludes, verbose)
self.log("Collecting modules into '%s'" % libdir)
# Collect the modules in the lib directory
#
for modname, modtype, modfile in modules:
modname_components = modname.split('.')
if modtype == 'm':
if len(modname_components) > 1:
new_path = os.path.join(libdir, *modname_components[0:-1])
else:
new_path = libdir
elif modtype == 'P':
new_path = os.path.join(libdir, *modname_components)
else:
assert False
mkpath(new_path)
shutil.copy(modfile, new_path)
os.chdir(targetdir)
if zip_lib:
self.log("Zipping directory '%s' into '%s'" % (libdir, libdir + '.zip'))
make_zipfile(distlib, distlib)
self.log("Removing directory '%s'" % libdir)
shutil.rmtree(distlib)
path_add = "os.path.join('" + distlib + ".zip', '" + distlib + "')"
else:
path_add = "'" + distlib + "'"
# Create the loader script
#
self.log("Writing loader script: %s" % scriptname)
loader = open(os.path.basename(scriptname), 'w')
loader_name = os.path.splitext(scriptname)[0]
loader.write("import os, sys, runpy\n")
loader.write("sys.path.insert(0, %s)\n" % path_add)
loader.write("runpy.run_module('%s', run_name=\"__main__\", alter_sys=True)\n" % loader_name)
loader.close()
def find_modules(self, scriptname, excludes=[], verbose=False):
""" Find the modules we'd want to include in the
distribution.
"""
path = sys.path[:]
path.insert(0, os.path.dirname(scriptname))
mf = ModuleFinder(path=path, excludes=excludes)
mf.run_script(scriptname)
modulenames = mf.modules.keys()
modulenames.sort()
self.log("Looking for modules used by '%s'...\n" % scriptname)
log_format = "%-2s %-30s %s"
self.log(log_format % ('', 'Module name', 'Module location'))
self.log(log_format % ('--', '-' * 30, '-' * 30))
modules = []
for name in modulenames:
m = mf.modules[name]
# builtin
#
if not m.__file__: continue
mpath = os.path.normcase(m.__file__)
# Modules in Python distribution.
# Pass on only those that live in site-packages
#
if mpath.startswith(self.site_packages):
pass
elif mpath.startswith(self.sys_prefix):
continue
type = "P" if m.__path__ else "m"
modules.append((name, type, m.__file__))
self.log(log_format % (type, name, m.__file__))
self.log("")
return modules
def log(self, msg):
if self.verbose: print msg
if __name__ == "__main__":
from optparse import OptionParser
usage = "usage: %prog [options] script"
description = "Collect the script with the libraries it uses into a distribution. See module documentation for more details"
opts = OptionParser(usage=usage, description=description)
#~ opts.add_option("-h", "--help", action="help")
opts.add_option('-t', '--targetdir', dest='targetdir',
help='place distribution into TARGETDIR')
opts.add_option('-z', '--zip_lib', dest='zip_lib', action='store_true',
help='zip the distribution library')
opts.add_option('-v', '--verbose', dest='verbose', action='store_true',
help='print progress')
opts.add_option('-e', '--exclude', dest='excludes', action='append',
help='exclude library from distribution. You can provide several of thsese')
opts.set_defaults( targetdir='distrib',
zip_lib=True,
excludes=[],
verbose=False)
(options, args) = opts.parse_args()
if len(args) != 1:
opts.print_help()
sys.exit(0)
lc = LibCollect()
lc.do_collect( args[0],
options.targetdir,
options.excludes,
distlib='lib',
verbose=options.verbose,
zip_lib=options.zip_lib)
| unlicense | 330e418eb98ae74dc6bf728f6e73ef81 | 38.220056 | 128 | 0.598935 | 4.583333 | false | false | false | false |
eliben/code-for-blog | 2010/aes-encrypt-pycrypto/pycrypto_file.py | 1 | 3281 | import argparse
import os
import struct
import random
from eblib.utils import Timer
from Crypto.Cipher import AES
def encrypt_file(key, in_filename, out_filename=None, chunksize=64*1024):
""" Encrypts a file using AES (CBC mode) with the
given key.
key:
The encryption key - a bytes object that must be
either 16, 24 or 32 bytes long. Longer keys
are more secure.
in_filename:
Name of the input file
out_filename:
If None, '<in_filename>.enc' will be used.
chunksize:
Sets the size of the chunk which the function
uses to read and encrypt the file. Larger chunk
sizes can be faster for some files and machines.
chunksize must be divisible by 16.
"""
if not out_filename:
out_filename = in_filename + '.enc'
iv = os.urandom(16)
encryptor = AES.new(key, AES.MODE_CBC, iv)
filesize = os.path.getsize(in_filename)
with open(in_filename, 'rb') as infile:
with open(out_filename, 'wb') as outfile:
outfile.write(struct.pack('<Q', filesize))
outfile.write(iv)
while True:
chunk = infile.read(chunksize)
if len(chunk) == 0:
break
elif len(chunk) % 16 != 0:
chunk += b' ' * (16 - len(chunk) % 16)
outfile.write(encryptor.encrypt(chunk))
return out_filename
def decrypt_file(key, in_filename, out_filename=None, chunksize=24*1024):
""" Decrypts a file using AES (CBC mode) with the
given key. Parameters are similar to encrypt_file,
with one difference: out_filename, if not supplied
will be in_filename without its last extension
(i.e. if in_filename is 'aaa.zip.enc' then
out_filename will be 'aaa.zip')
"""
if not out_filename:
out_filename = os.path.splitext(in_filename)[0]
with open(in_filename, 'rb') as infile:
origsize = struct.unpack('<Q', infile.read(struct.calcsize('Q')))[0]
iv = infile.read(16)
decryptor = AES.new(key, AES.MODE_CBC, iv)
with open(out_filename, 'wb') as outfile:
while True:
chunk = infile.read(chunksize)
if len(chunk) == 0:
break
outfile.write(decryptor.decrypt(chunk))
outfile.truncate(origsize)
return out_filename
if __name__ == "__main__":
argparser = argparse.ArgumentParser(description='Encrypt/Decrypt AES')
argparser.add_argument('filename', nargs=1)
argparser.add_argument('-e', dest='encrypt', action='store_true')
argparser.add_argument('-d', dest='decrypt', action='store_true')
args = argparser.parse_args()
infile = args.filename[0]
key = b'1' * 32
if args.encrypt:
ofname = encrypt_file(key, infile, out_filename=infile+'.enc')
print('Encrypted to', ofname)
elif args.decrypt:
ofname = decrypt_file(key, infile, out_filename=infile+'.dec')
print('Decrypted to', ofname)
else:
argparser.print_help()
os.Exit(1)
| unlicense | b5786838e04b7c8469f2ddd8a6a78dd7 | 31.479592 | 76 | 0.572082 | 3.996346 | false | false | false | false |
awslabs/aws-config-rules | python/ec2_no_internet_access.py | 1 | 6314 | #
# This file made available under CC0 1.0 Universal (https://creativecommons.org/publicdomain/zero/1.0/legalcode)
#
# Ensures that there is no internet connectivity
# Description: checks the given resource on potential internet access
#
# Trigger Type: Change Triggered
# Scope of Changes: EC2:Instance, EC2:VPC, EC2:RouteTable, EC2:Subnet, EC2:NetworkInterface
# Optional Parameter: None
# Example Value: N/A
#
# Requires additional AWS Config permissions for GetResourceConfigHistory
from __future__ import print_function
import json
import boto3
aws_config = boto3.client('config')
aws_ec2 = boto3.client('ec2')
# this is a utility class for parsing config rules events. RaiseInternetConnectivity inherhits from it
class ConfigRule:
"""Base class for implementing a custom config rule in AWS Lambda"""
def __init__(self, configurationItem):
self.configurationItem = configurationItem
self.relationships = configurationItem['relationships']
def evaluate_compliance(self, configurationItem=None):
"""Actual evaluation logic will be implemented here"""
return 'NOT_APPLICABLE'
def get_relationship(self, relationships, id):
for i in relationships:
if i['resourceId'] == id:
return i
return None
def find_relationships_by_type(self, type, relationships=None):
if not relationships:
relationships = self.relationships
result = []
for i in relationships:
if i['resourceType'] == type:
result.append(i)
return result
def get_related_configuration_item(self, relationship):
result = aws_config.get_resource_config_history(
resourceType=relationship['resourceType'],
resourceId=relationship['resourceId'],
limit=1,
)
item = result['configurationItems'][0]
if item.has_key('configuration'):
item['configuration'] = json.loads(item['configuration'])
return item
def put_evaluations(self, compliance, resultToken):
aws_config.put_evaluations(
Evaluations=[
{
'ComplianceResourceType': self.configurationItem['resourceType'],
'ComplianceResourceId': self.configurationItem['resourceId'],
'ComplianceType': compliance,
'OrderingTimestamp': self.configurationItem['configurationItemCaptureTime']
},
],
ResultToken=resultToken
)
class RaiseInternetConnectivity(ConfigRule):
"""
Class for checking given resources for potential internet access.
Supported types are: VPC, RouteTable, Subnet, Instance, NetworkInterface
Implemented checks are:
VPC: Check for attached IGW
RouteTable: Check for route to an IGW
Subnet: check if public ip address mapping is enabled, check if assigned route table has a route to an IGW
Instance: check if instance has a public ip assigned
NetworkInterface: check if interface has a public ip assigned
"""
def evaluate_compliance(self, configurationItem=None):
if not configurationItem:
configurationItem = self.configurationItem
relationships = self.relationships
if configurationItem['configurationItemStatus'] == 'ResourceDeleted':
return 'NOT_APPLICABLE'
# check if VPC has an internet gateway attached
if configurationItem['resourceType'] == 'AWS::EC2::VPC':
if self.find_relationships_by_type('AWS::EC2::InternetGateway'):
return 'NON_COMPLIANT'
else:
return 'COMPLIANT'
# check if the route table has a rule with an internet gateway
if configurationItem['resourceType'] == 'AWS::EC2::RouteTable':
return self.evaluate_route_table(configurationItem)
# check the subnet for potential internet accessibility
if configurationItem['resourceType'] == "AWS::EC2::Subnet":
# check if subnet has configured public ip assignment as default
if configurationItem['configuration']['mapPublicIpOnLaunch']:
return 'NON_COMPLIANT'
# check if subnet has a route to an internet gateway
try:
route_table = self.get_related_configuration_item(self.find_relationships_by_type('AWS::EC2::RouteTable').pop())
except:
# no routing table associated, get main routing table of VPC
vpc = self.get_related_configuration_item(self.find_relationships_by_type('AWS::EC2::VPC').pop())
route_tables = self.find_relationships_by_type('AWS::EC2::RouteTable', vpc['relationships'])
for i in route_tables:
r = self.get_related_configuration_item(i)
if r['configuration']['associations'][0]['main']:
route_table = r
break
else:
raise Exception('Main route table not found', vpc)
# check if assigned route table has a rule with an internet gateway
return self.evaluate_route_table(route_table)
# check if the instance has a public ip assigned
if configurationItem['resourceType'] == 'AWS::EC2::Instance':
if configurationItem['configuration']['publicIpAddress']:
return 'NON_COMPLIANT'
return 'COMPLIANT'
# check if network interface has a public ip associated
if configurationItem['resourceType'] == 'AWS::EC2::NetworkInterface':
for i in configurationItem['configuration']['privateIpAddresses']:
if i['association']:
return 'NON_COMPLIANT'
return 'COMPLIANT'
return 'NOT_APPLICABLE'
def evaluate_route_table(self, route_table):
for route in route_table['configuration']['routes']:
if route['gatewayId'] and route['gatewayId'].startswith('igw-'):
return 'NON_COMPLIANT'
return 'COMPLIANT'
def lambda_handler(event, context):
try:
invokingEvent = json.loads(event['invokingEvent'])
configurationItem = invokingEvent['configurationItem']
except:
raise Exception('Could not load configuration item', event)
try:
rule = RaiseInternetConnectivity(configurationItem)
except:
raise Exception('Could not process configuration item', configurationItem)
compliance = rule.evaluate_compliance()
print('Compliance evaluation for %s: %s' % (configurationItem['resourceId'], compliance))
# inform config rules about our evaluation result
rule.put_evaluations(compliance, event['resultToken'])
| cc0-1.0 | 7ef65bdfd21921ed4eb445d05956fb49 | 35.709302 | 120 | 0.693538 | 4.321697 | false | true | false | false |
awslabs/aws-config-rules | python/SQS_TRANSIT_ENCRYPTION_CHECK/SQS_TRANSIT_ENCRYPTION_CHECK.py | 1 | 21117 | # Copyright 2017-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
# Rule Name:
# SQS_TRANSIT_ENCRYPTION_CHECK
# Description:
# Check whether SQS queue has encryption in transit queue access policy enforced.
#
# Rationale:
# Regular checks to ensure that SQS queue has encryption in transit queue access policy enforced.
#
# Indicative Severity:
# Medium
#
# Trigger:
# Periodic checks on AWS::SQS::Queue
#
# Reports on:
# AWS::SQS::Queue
#
# Rule Parameters:
# QueueNameStartsWith
# (Optional) Specify your SQS queue names to check for. Starting SQS queue names will suffice. For example, your SQS queue names are "processimages" and "extractdocs".
# You can specify process, extract as the value for QueueNameStartsWith
#
# Scenarios:
# Scenario: 1
# Given: Rules parameter is provided
# And: It contains a parameter key other than QueueNameStartsWith
# Then: Return ERROR
# Scenario: 2
# Given: Rules parameter is provided
# And: There is no value specified for QueueNameStartsWith
# Then: Checks all SQS queues. Return COMPLIANT with annotation if SQS queues have encryption in transit queue access policy enforced. Otherwise, return NON_COMPLIANT.
# Scenario: 3
# Given: Rules parameter is provided and starting SQS queue names values are provided
# And: Checks SQS queue names that match the specified starting SQS queue names in QueueNameStartsWith
# Then: Checks specific SQS queues. Return COMPLIANT with annotation if SQS queues have encryption in transit queue access policy enforced. Otherwise, return NON_COMPLIANT.
# Scenario: 4
# Given: Rules parameter is provided and (optional) starting SQS queue names values are provided
# And: No SQS queue names exist or (optional) no matching SQS queue names found.
# Then: Return NO RESULTS and print no SQS queues to check for to CloudWatch.
import json
import sys
import datetime
import re
import boto3
import botocore
try:
import liblogging
except ImportError:
pass
##############
# Parameters #
##############
# Define the default resource to report to Config Rules
DEFAULT_RESOURCE_TYPE = 'AWS::SQS::Queue'
# Set to True to get the lambda to assume the Role attached on the Config Service (useful for cross-account).
ASSUME_ROLE_MODE = False
# Other parameters (no change needed)
CONFIG_ROLE_TIMEOUT_SECONDS = 900
#############
# Main Code #
#############
def evaluate_compliance(event, configuration_item, valid_rule_parameters):
evaluations = []
yourqueues = []
check = {}
sqs = get_client('sqs', event)
if valid_rule_parameters:
yourqueues = valid_rule_parameters["QueueNameStartsWith"].split(",")
for queue in yourqueues:
caseinsensitivequeue = queue.lower()
response = sqs.list_queues(QueueNamePrefix=caseinsensitivequeue.strip())
if "QueueUrls" not in response.keys():
print("There are no SQS queues to check for.")
return None
for qurl in response["QueueUrls"]:
check = sqs.get_queue_attributes(QueueUrl=qurl, AttributeNames=['Policy'],)
if "Attributes" in check.keys():
# case sensitive boolean match
encrypted = re.compile('"Condition":{"Bool":{"aws:SecureTransport":"true"')
if encrypted.search(check["Attributes"]["Policy"]):
evaluations.append(build_evaluation(qurl, 'COMPLIANT', event, annotation='SQS Queue is TLS encrypted.'))
else:
evaluations.append(build_evaluation(qurl, 'NON_COMPLIANT', event, annotation='SQS Queue is not TLS encrypted.'))
else:
evaluations.append(build_evaluation(qurl, 'NON_COMPLIANT', event, annotation='SQS Queue does not have a queue access policy.'))
else:
# Checking all queues. Maximum number is 1000 queues.
response = sqs.list_queues()
if "QueueUrls" not in response.keys():
print("There are no SQS queues to check for.")
return None
for qurl in response["QueueUrls"]:
check = sqs.get_queue_attributes(QueueUrl=qurl, AttributeNames=['Policy'],)
if "Attributes" in check.keys():
encrypted = re.compile('"Condition":{"Bool":{"aws:SecureTransport":"true"')
if encrypted.search(check["Attributes"]["Policy"]):
evaluations.append(build_evaluation(qurl, 'COMPLIANT', event, annotation='SQS Queue is TLS encrypted.'))
else:
evaluations.append(build_evaluation(qurl, 'NON_COMPLIANT', event, annotation='SQS Queue is not TLS encrypted.'))
else:
evaluations.append(build_evaluation(qurl, 'NON_COMPLIANT', event, annotation='SQS Queue does not have a queue access policy.'))
return evaluations
def evaluate_parameters(rule_parameters):
try:
if rule_parameters["QueueNameStartsWith"] != "" and isinstance(rule_parameters["QueueNameStartsWith"], str):
valid_rule_parameters = rule_parameters
else:
print("Please specify a valid starting SQS queue name or multiple queue names separated by comma(,)")
return valid_rule_parameters
except LookupError:
print("Please input QueueNameStartsWith as the key.")
####################
# Helper Functions #
####################
# Build an error to be displayed in the logs when the parameter is invalid.
def build_parameters_value_error_response(ex):
"""Return an error dictionary when the evaluate_parameters() raises a ValueError.
Keyword arguments:
ex -- Exception text
"""
return build_error_response(internal_error_message="Parameter value is invalid",
internal_error_details="An ValueError was raised during the validation of the Parameter value",
customer_error_code="InvalidParameterValueException",
customer_error_message=str(ex))
# This gets the client after assuming the Config service role
# either in the same AWS account or cross-account.
def get_client(service, event, region=None):
"""Return the service boto client. It should be used instead of directly calling the client.
Keyword arguments:
service -- the service name used for calling the boto.client()
event -- the event variable given in the lambda handler
region -- the region where the client is called (default: None)
"""
if not ASSUME_ROLE_MODE:
return boto3.client(service, region)
credentials = get_assume_role_credentials(get_execution_role_arn(event), region)
return boto3.client(service, aws_access_key_id=credentials['AccessKeyId'],
aws_secret_access_key=credentials['SecretAccessKey'],
aws_session_token=credentials['SessionToken'],
region_name=region
)
# This generate an evaluation for config
def build_evaluation(resource_id, compliance_type, event, resource_type=DEFAULT_RESOURCE_TYPE, annotation=None):
"""Form an evaluation as a dictionary. Usually suited to report on scheduled rules.
Keyword arguments:
resource_id -- the unique id of the resource to report
compliance_type -- either COMPLIANT, NON_COMPLIANT or NOT_APPLICABLE
event -- the event variable given in the lambda handler
resource_type -- the CloudFormation resource type (or AWS::::Account) to report on the rule (default DEFAULT_RESOURCE_TYPE)
annotation -- an annotation to be added to the evaluation (default None). It will be truncated to 255 if longer.
"""
eval_cc = {}
if annotation:
eval_cc['Annotation'] = build_annotation(annotation)
eval_cc['ComplianceResourceType'] = resource_type
eval_cc['ComplianceResourceId'] = resource_id
eval_cc['ComplianceType'] = compliance_type
eval_cc['OrderingTimestamp'] = str(json.loads(event['invokingEvent'])['notificationCreationTime'])
return eval_cc
def build_evaluation_from_config_item(configuration_item, compliance_type, annotation=None):
"""Form an evaluation as a dictionary. Usually suited to report on configuration change rules.
Keyword arguments:
configuration_item -- the configurationItem dictionary in the invokingEvent
compliance_type -- either COMPLIANT, NON_COMPLIANT or NOT_APPLICABLE
annotation -- an annotation to be added to the evaluation (default None). It will be truncated to 255 if longer.
"""
eval_ci = {}
if annotation:
eval_ci['Annotation'] = build_annotation(annotation)
eval_ci['ComplianceResourceType'] = configuration_item['resourceType']
eval_ci['ComplianceResourceId'] = configuration_item['resourceId']
eval_ci['ComplianceType'] = compliance_type
eval_ci['OrderingTimestamp'] = configuration_item['configurationItemCaptureTime']
return eval_ci
####################
# Boilerplate Code #
####################
# Get execution role for Lambda function
def get_execution_role_arn(event):
role_arn = None
if 'ruleParameters' in event:
rule_params = json.loads(event['ruleParameters'])
role_name = rule_params.get("ExecutionRoleName")
if role_name:
execution_role_prefix = event["executionRoleArn"].split("/")[0]
role_arn = "{}/{}".format(execution_role_prefix, role_name)
if not role_arn:
role_arn = event['executionRoleArn']
return role_arn
# Build annotation within Service constraints
def build_annotation(annotation_string):
if len(annotation_string) > 256:
return annotation_string[:244] + " [truncated]"
return annotation_string
# Helper function used to validate input
def check_defined(reference, reference_name):
if not reference:
raise Exception('Error: ', reference_name, 'is not defined')
return reference
# Check whether the message is OversizedConfigurationItemChangeNotification or not
def is_oversized_changed_notification(message_type):
check_defined(message_type, 'messageType')
return message_type == 'OversizedConfigurationItemChangeNotification'
# Check whether the message is a ScheduledNotification or not.
def is_scheduled_notification(message_type):
check_defined(message_type, 'messageType')
return message_type == 'ScheduledNotification'
# Get configurationItem using getResourceConfigHistory API
# in case of OversizedConfigurationItemChangeNotification
def get_configuration(resource_type, resource_id, configuration_capture_time):
result = AWS_CONFIG_CLIENT.get_resource_config_history(
resourceType=resource_type,
resourceId=resource_id,
laterTime=configuration_capture_time,
limit=1)
configuration_item = result['configurationItems'][0]
return convert_api_configuration(configuration_item)
# Convert from the API model to the original invocation model
def convert_api_configuration(configuration_item):
for k, v in configuration_item.items():
if isinstance(v, datetime.datetime):
configuration_item[k] = str(v)
configuration_item['awsAccountId'] = configuration_item['accountId']
configuration_item['ARN'] = configuration_item['arn']
configuration_item['configurationStateMd5Hash'] = configuration_item['configurationItemMD5Hash']
configuration_item['configurationItemVersion'] = configuration_item['version']
configuration_item['configuration'] = json.loads(configuration_item['configuration'])
if 'relationships' in configuration_item:
for i in range(len(configuration_item['relationships'])):
configuration_item['relationships'][i]['name'] = configuration_item['relationships'][i]['relationshipName']
return configuration_item
# Based on the type of message get the configuration item
# either from configurationItem in the invoking event
# or using the getResourceConfigHistiry API in getConfiguration function.
def get_configuration_item(invoking_event):
check_defined(invoking_event, 'invokingEvent')
if is_oversized_changed_notification(invoking_event['messageType']):
configuration_item_summary = check_defined(invoking_event['configurationItemSummary'], 'configurationItemSummary')
return get_configuration(configuration_item_summary['resourceType'], configuration_item_summary['resourceId'], configuration_item_summary['configurationItemCaptureTime'])
if is_scheduled_notification(invoking_event['messageType']):
return None
return check_defined(invoking_event['configurationItem'], 'configurationItem')
# Check whether the resource has been deleted. If it has, then the evaluation is unnecessary.
def is_applicable(configuration_item, event):
try:
check_defined(configuration_item, 'configurationItem')
check_defined(event, 'event')
except:
return True
status = configuration_item['configurationItemStatus']
event_left_scope = event['eventLeftScope']
if status == 'ResourceDeleted':
print("Resource Deleted, setting Compliance Status to NOT_APPLICABLE.")
return status in ('OK', 'ResourceDiscovered') and not event_left_scope
def get_assume_role_credentials(role_arn, region=None):
sts_client = boto3.client('sts', region)
try:
assume_role_response = sts_client.assume_role(RoleArn=role_arn,
RoleSessionName="configLambdaExecution",
DurationSeconds=CONFIG_ROLE_TIMEOUT_SECONDS)
if 'liblogging' in sys.modules:
liblogging.logSession(role_arn, assume_role_response)
return assume_role_response['Credentials']
except botocore.exceptions.ClientError as ex:
# Scrub error message for any internal account info leaks
print(str(ex))
if 'AccessDenied' in ex.response['Error']['Code']:
ex.response['Error']['Message'] = "AWS Config does not have permission to assume the IAM role."
else:
ex.response['Error']['Message'] = "InternalError"
ex.response['Error']['Code'] = "InternalError"
raise ex
# This removes older evaluation (usually useful for periodic rule not reporting on AWS::::Account).
def clean_up_old_evaluations(latest_evaluations, event):
cleaned_evaluations = []
old_eval = AWS_CONFIG_CLIENT.get_compliance_details_by_config_rule(
ConfigRuleName=event['configRuleName'],
ComplianceTypes=['COMPLIANT', 'NON_COMPLIANT'],
Limit=100)
old_eval_list = []
while True:
for old_result in old_eval['EvaluationResults']:
old_eval_list.append(old_result)
if 'NextToken' in old_eval:
next_token = old_eval['NextToken']
old_eval = AWS_CONFIG_CLIENT.get_compliance_details_by_config_rule(
ConfigRuleName=event['configRuleName'],
ComplianceTypes=['COMPLIANT', 'NON_COMPLIANT'],
Limit=100,
NextToken=next_token)
else:
break
for old_eval in old_eval_list:
old_resource_id = old_eval['EvaluationResultIdentifier']['EvaluationResultQualifier']['ResourceId']
newer_founded = False
for latest_eval in latest_evaluations:
if old_resource_id == latest_eval['ComplianceResourceId']:
newer_founded = True
if not newer_founded:
cleaned_evaluations.append(build_evaluation(old_resource_id, "NOT_APPLICABLE", event))
return cleaned_evaluations + latest_evaluations
def lambda_handler(event, context):
if 'liblogging' in sys.modules:
liblogging.logEvent(event)
global AWS_CONFIG_CLIENT
#print(event)
check_defined(event, 'event')
invoking_event = json.loads(event['invokingEvent'])
rule_parameters = {}
if 'ruleParameters' in event:
rule_parameters = json.loads(event['ruleParameters'])
try:
valid_rule_parameters = evaluate_parameters(rule_parameters)
except ValueError as ex:
return build_parameters_value_error_response(ex)
try:
AWS_CONFIG_CLIENT = get_client('config', event)
if invoking_event['messageType'] in ['ConfigurationItemChangeNotification', 'ScheduledNotification', 'OversizedConfigurationItemChangeNotification']:
configuration_item = get_configuration_item(invoking_event)
if is_applicable(configuration_item, event):
compliance_result = evaluate_compliance(event, configuration_item, valid_rule_parameters)
else:
compliance_result = "NOT_APPLICABLE"
else:
return build_internal_error_response('Unexpected message type', str(invoking_event))
except botocore.exceptions.ClientError as ex:
if is_internal_error(ex):
return build_internal_error_response("Unexpected error while completing API request", str(ex))
return build_error_response("Customer error while making API request", str(ex), ex.response['Error']['Code'], ex.response['Error']['Message'])
except ValueError as ex:
return build_internal_error_response(str(ex), str(ex))
evaluations = []
latest_evaluations = []
if not compliance_result:
latest_evaluations.append(build_evaluation(event['accountId'], "NOT_APPLICABLE", event, resource_type='AWS::::Account'))
evaluations = clean_up_old_evaluations(latest_evaluations, event)
elif isinstance(compliance_result, str):
if configuration_item:
evaluations.append(build_evaluation_from_config_item(configuration_item, compliance_result))
else:
evaluations.append(build_evaluation(event['accountId'], compliance_result, event, resource_type=DEFAULT_RESOURCE_TYPE))
elif isinstance(compliance_result, list):
for evaluation in compliance_result:
missing_fields = False
for field in ('ComplianceResourceType', 'ComplianceResourceId', 'ComplianceType', 'OrderingTimestamp'):
if field not in evaluation:
print("Missing " + field + " from custom evaluation.")
missing_fields = True
if not missing_fields:
latest_evaluations.append(evaluation)
evaluations = clean_up_old_evaluations(latest_evaluations, event)
elif isinstance(compliance_result, dict):
missing_fields = False
for field in ('ComplianceResourceType', 'ComplianceResourceId', 'ComplianceType', 'OrderingTimestamp'):
if field not in compliance_result:
print("Missing " + field + " from custom evaluation.")
missing_fields = True
if not missing_fields:
evaluations.append(compliance_result)
else:
evaluations.append(build_evaluation_from_config_item(configuration_item, 'NOT_APPLICABLE'))
# Put together the request that reports the evaluation status
result_token = event['resultToken']
test_mode = False
if result_token == 'TESTMODE':
# Used solely for RDK test to skip actual put_evaluation API call
test_mode = True
# Invoke the Config API to report the result of the evaluation
evaluation_copy = []
evaluation_copy = evaluations[:]
while evaluation_copy:
AWS_CONFIG_CLIENT.put_evaluations(Evaluations=evaluation_copy[:100], ResultToken=result_token, TestMode=test_mode)
del evaluation_copy[:100]
# Used solely for RDK test to be able to test Lambda function
return evaluations
def is_internal_error(exception):
return ((not isinstance(exception, botocore.exceptions.ClientError)) or exception.response['Error']['Code'].startswith('5')
or 'InternalError' in exception.response['Error']['Code'] or 'ServiceError' in exception.response['Error']['Code'])
def build_internal_error_response(internal_error_message, internal_error_details=None):
return build_error_response(internal_error_message, internal_error_details, 'InternalError', 'InternalError')
def build_error_response(internal_error_message, internal_error_details=None, customer_error_code=None, customer_error_message=None):
error_response = {
'internalErrorMessage': internal_error_message,
'internalErrorDetails': internal_error_details,
'customerErrorMessage': customer_error_message,
'customerErrorCode': customer_error_code
}
print(error_response)
return error_response
| cc0-1.0 | ffc0872df8fd4cd63ebc77d7690fdd85 | 45.207877 | 178 | 0.683572 | 4.306077 | false | true | false | false |
awslabs/aws-config-rules | python/REDSHIFT_CLUSTER_PUBLIC_ACCESS_CHECK/REDSHIFT_CLUSTER_PUBLIC_ACCESS_CHECK_test.py | 1 | 7640 | import sys
import unittest
try:
from unittest.mock import MagicMock
except ImportError:
import mock
from mock import MagicMock
import botocore
from botocore.exceptions import ClientError
##############
# Parameters #
##############
# Define the default resource to report to Config Rules
DEFAULT_RESOURCE_TYPE = 'AWS::Redshift::Cluster'
CONFIG_CLIENT_MOCK = MagicMock()
STS_CLIENT_MOCK = MagicMock()
class Boto3Mock():
def client(self, client_name, *args, **kwargs):
if client_name == 'config':
return CONFIG_CLIENT_MOCK
elif client_name == 'sts':
return STS_CLIENT_MOCK
else:
raise Exception("Attempting to create an unknown client")
sys.modules['boto3'] = Boto3Mock()
RULE = __import__('REDSHIFT_CLUSTER_PUBLIC_ACCESS_CHECK')
class ComplianceTest(unittest.TestCase):
invoking_event_redshift_public_sample = '{"configurationItem":{"configuration":{"publiclyAccessible": true},"configurationItemCaptureTime":"2018-07-02T03:37:52.418Z","configurationItemStatus":"ResourceDiscovered","resourceType":"AWS::Redshift::Cluster","resourceId":"some-resource-id"},"messageType":"ConfigurationItemChangeNotification"}'
invoking_event_redshift_not_public_sample = '{"configurationItem":{"configuration":{"publiclyAccessible": false},"configurationItemCaptureTime":"2018-07-02T03:37:52.418Z","configurationItemStatus":"ResourceDiscovered","resourceType":"AWS::Redshift::Cluster","resourceId":"some-resource-id"},"messageType":"ConfigurationItemChangeNotification"}'
def test_scenario_1_is_public(self):
response = RULE.lambda_handler(build_lambda_configurationchange_event(self.invoking_event_redshift_public_sample), {})
resp_expected = []
resp_expected.append(build_expected_response('NON_COMPLIANT', 'some-resource-id', 'AWS::Redshift::Cluster', 'This Amazon Redshift Cluster has the publiclyAccessible field set to True.'))
assert_successful_evaluation(self, response, resp_expected)
def test_scenario_2_isnot_public(self):
response = RULE.lambda_handler(build_lambda_configurationchange_event(self.invoking_event_redshift_not_public_sample), {})
resp_expected = []
resp_expected.append(build_expected_response('COMPLIANT', 'some-resource-id', 'AWS::Redshift::Cluster'))
assert_successful_evaluation(self, response, resp_expected)
####################
# Helper Functions #
####################
def build_lambda_configurationchange_event(invoking_event, rule_parameters=None):
event_to_return = {
'configRuleName':'myrule',
'executionRoleArn':'roleArn',
'eventLeftScope': False,
'invokingEvent': invoking_event,
'accountId': '123456789012',
'configRuleArn': 'arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan',
'resultToken':'token'
}
if rule_parameters:
event_to_return['ruleParameters'] = rule_parameters
return event_to_return
def build_lambda_scheduled_event(rule_parameters=None):
invoking_event = '{"messageType":"ScheduledNotification","notificationCreationTime":"2017-12-23T22:11:18.158Z"}'
event_to_return = {
'configRuleName':'myrule',
'executionRoleArn':'roleArn',
'eventLeftScope': False,
'invokingEvent': invoking_event,
'accountId': '123456789012',
'configRuleArn': 'arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan',
'resultToken':'token'
}
if rule_parameters:
event_to_return['ruleParameters'] = rule_parameters
return event_to_return
def build_expected_response(compliance_type, compliance_resource_id, compliance_resource_type=DEFAULT_RESOURCE_TYPE, annotation=None):
if not annotation:
return {
'ComplianceType': compliance_type,
'ComplianceResourceId': compliance_resource_id,
'ComplianceResourceType': compliance_resource_type
}
return {
'ComplianceType': compliance_type,
'ComplianceResourceId': compliance_resource_id,
'ComplianceResourceType': compliance_resource_type,
'Annotation': annotation
}
def assert_successful_evaluation(test_class, response, resp_expected, evaluations_count=1):
if isinstance(response, dict):
test_class.assertEquals(resp_expected['ComplianceResourceType'], response['ComplianceResourceType'])
test_class.assertEquals(resp_expected['ComplianceResourceId'], response['ComplianceResourceId'])
test_class.assertEquals(resp_expected['ComplianceType'], response['ComplianceType'])
test_class.assertTrue(response['OrderingTimestamp'])
if 'Annotation' in resp_expected or 'Annotation' in response:
test_class.assertEquals(resp_expected['Annotation'], response['Annotation'])
elif isinstance(response, list):
test_class.assertEquals(evaluations_count, len(response))
for i, response_expected in enumerate(resp_expected):
test_class.assertEquals(response_expected['ComplianceResourceType'], response[i]['ComplianceResourceType'])
test_class.assertEquals(response_expected['ComplianceResourceId'], response[i]['ComplianceResourceId'])
test_class.assertEquals(response_expected['ComplianceType'], response[i]['ComplianceType'])
test_class.assertTrue(response[i]['OrderingTimestamp'])
if 'Annotation' in response_expected or 'Annotation' in response[i]:
test_class.assertEquals(response_expected['Annotation'], response[i]['Annotation'])
def assert_customer_error_response(test_class, response, customer_error_code=None, customer_error_message=None):
if customer_error_code:
test_class.assertEqual(customer_error_code, response['customerErrorCode'])
if customer_error_message:
test_class.assertEqual(customer_error_message, response['customerErrorMessage'])
test_class.assertTrue(response['customerErrorCode'])
test_class.assertTrue(response['customerErrorMessage'])
if "internalErrorMessage" in response:
test_class.assertTrue(response['internalErrorMessage'])
if "internalErrorDetails" in response:
test_class.assertTrue(response['internalErrorDetails'])
def sts_mock():
assume_role_response = {
"Credentials": {
"AccessKeyId": "string",
"SecretAccessKey": "string",
"SessionToken": "string"}}
STS_CLIENT_MOCK.reset_mock(return_value=True)
STS_CLIENT_MOCK.assume_role = MagicMock(return_value=assume_role_response)
##################
# Common Testing #
##################
class TestStsErrors(unittest.TestCase):
def test_sts_unknown_error(self):
RULE.ASSUME_ROLE_MODE = True
STS_CLIENT_MOCK.assume_role = MagicMock(side_effect=botocore.exceptions.ClientError(
{'Error': {'Code': 'unknown-code', 'Message': 'unknown-message'}}, 'operation'))
response = RULE.lambda_handler(build_lambda_configurationchange_event('{}'), {})
assert_customer_error_response(
self, response, 'InternalError', 'InternalError')
def test_sts_access_denied(self):
RULE.ASSUME_ROLE_MODE = True
STS_CLIENT_MOCK.assume_role = MagicMock(side_effect=botocore.exceptions.ClientError(
{'Error': {'Code': 'AccessDenied', 'Message': 'access-denied'}}, 'operation'))
response = RULE.lambda_handler(build_lambda_configurationchange_event('{}'), {})
assert_customer_error_response(
self, response, 'AccessDenied', 'AWS Config does not have permission to assume the IAM role.')
| cc0-1.0 | 8fc88e6416b52b0bbc87f1eeea0bdcbf | 47.35443 | 348 | 0.692539 | 4.055202 | false | true | false | false |
awslabs/aws-config-rules | python-rdklib/SECURITYHUB_ENABLED/SECURITYHUB_ENABLED.py | 1 | 2079 | """
#####################################
## Gherkin ##
#####################################
Rule Name:
SECURITYHUB_ENABLED
Description:
Checks that AWS Security Hub is enabled for an AWS Account. The rule is NON_COMPLIANT if AWS Security Hub is not enabled.
Rationale:
AWS Security Hub gives you a comprehensive view of your high-priority security alerts, and compliance status across AWS accounts.
Indicative Severity:
Medium
Trigger:
Periodic
Reports on:
AWS::::Account
Rule Parameters:
None
Scenarios:
Scenario: 1
Given: SecurityHub is enabled for an AWS Account.
Then: Return COMPLIANT
Scenario: 2
Given: SecurityHub is not enabled for an AWS Account.
Then: Return NON_COMPLIANT
"""
import botocore
from rdklib import Evaluator, Evaluation, ConfigRule, ComplianceType
APPLICABLE_RESOURCES = ['AWS::::Account']
class SECURITYHUB_ENABLED(ConfigRule):
# Set this to false to prevent unnecessary API calls
delete_old_evaluations_on_scheduled_notification = False
def evaluate_periodic(self, event, client_factory, valid_rule_parameters):
client = client_factory.build_client('securityhub')
evaluations = []
try:
security_hub_enabled = client.describe_hub()
# Scenario:1 SecurityHub is enabled for an AWS Account.
if security_hub_enabled:
evaluations.append(Evaluation(ComplianceType.COMPLIANT, event['accountId'], APPLICABLE_RESOURCES[0]))
except botocore.exceptions.ClientError as error:
# Scenario:2 SecurityHub is not enabled for an AWS Account.
if error.response['Error']['Code'] == 'InvalidAccessException':
evaluations.append(Evaluation(ComplianceType.NON_COMPLIANT, event['accountId'], APPLICABLE_RESOURCES[0]))
else:
raise error
return evaluations
def lambda_handler(event, context):
my_rule = SECURITYHUB_ENABLED()
evaluator = Evaluator(my_rule, APPLICABLE_RESOURCES)
return evaluator.handle(event, context)
| cc0-1.0 | 3ef378e7110af78aeebdcd4d4862174e | 30.5 | 132 | 0.66811 | 4.100592 | false | false | false | false |
awslabs/aws-config-rules | python/API_GW_RESTRICTED_IP/API_GW_RESTRICTED_IP.py | 1 | 22693 | #
# This file made available under CC0 1.0 Universal (https://creativecommons.org/publicdomain/zero/1.0/legalcode)
#
# Created with the Rule Development Kit: https://github.com/awslabs/aws-config-rdk
# Can be used stand-alone or with the Rule Compliance Engine: https://github.com/awslabs/aws-config-engine-for-compliance-as-code
#
'''
#####################################
## Gherkin ##
#####################################
Rule Name:
api-gw-restricted-to-ip
Description:
Verify that non-private API GW have a resource based policy which limit their usage based on IP source (configurable)
Trigger:
Periodic
Reports on:
AWS::ApiGateway::RestApi
Parameters:
| --------------------|-----------|-----------------------------------------------|-------------------------|
| Parameter Name | Type | Description | Notes |
| ------------------- | --------- | --------------------------------------------- |-------------------------|
| WhitelistedIPs | Mandatory | IP addresses whitelisted to invoke the rest | Seperated by comma (,) |
| | | API. | |
|---------------------|-----------|-----------------------------------------------|-------------------------|
Feature:
In order to: to limit the access to API
As: a Security Officer
I want: To ensure that non-private REST APIs in API GW have a resource based policy which limit their usage based on whitelisted IPs.
Scenarios:
Scenario 1:
Given: WhitelistedIPs parameter is not defined
Then: return Error
Scenario 2:
Given: WhitelistedIPs parameter has an incorrect value (empty, non-CIDR, list of non-CIDR)
Then: return Error
Scenario 3:
Given: API is in private mode
Then: return NOT_APPLICABLE
Scenario 3:
Given: WhitelistedIPs parameter is defined and valid
And: APIs do not have resource policy attached
Then: return NON_COMPLIANT
Scenario 4:
Given: WhitelistedIPs parameter is defined and valid
And: APIs have resource policy attached
And: The Resource policy does not contain any 'Allow' statement
Then: return COMPLIANT
Scenario 5:
Given: WhitelistedIPs parameter is defined and valid
And: APIs have resource policy attached
And: The Resource policy does not contain <Options of Policy>
Then: return NON_COMPLIANT
With:
| Options of Policy |
| any 'Condition' |
| any 'Condition' about 'IpAddress' |
| any 'Condition' about 'IpAddress' about 'aws:SourceIp' |
Scenario 6:
Given: WhitelistedIPs parameter is defined and valid
And: APIs have resource policy attached
And: The Resource policy contains a 'Condition' about 'IpAddress' about 'aws:SourceIp'
And: Those IPs are not a subset of the WhitelistedIPs
Then: return NON_COMPLIANT
Scenario 7:
Given: WhitelistedIPs parameter is defined and valid
And: APIs have resource policy attached
And: The Resource policy contains a 'Condition' about 'IpAddress' about 'aws:SourceIp'
And: Those IPs are a subset of the WhitelistedIPs
Then: return COMPLIANT
'''
import json
import datetime
import ipaddress
import boto3
import botocore
##############
# Parameters #
##############
# Define the default resource to report to Config Rules
DEFAULT_RESOURCE_TYPE = 'AWS::ApiGateway::RestApi'
# Set to True to get the lambda to assume the Role attached on the Config Service (useful for cross-account).
ASSUME_ROLE_MODE = False
#############
# Main Code #
#############
def evaluate_compliance(event, configuration_item, rule_parameters):
"""Form the evaluation(s) to be return to Config Rules
Return either:
None -- when no result needs to be displayed
a string -- either COMPLIANT, NON_COMPLIANT or NOT_APPLICABLE
a dictionary -- the evaluation dictionary, usually built by build_evaluation_from_config_item()
a list of dictionary -- a list of evaluation dictionary , usually built by build_evaluation()
Keyword arguments:
event -- the event variable given in the lambda handler
configuration_item -- the configurationItem dictionary in the invokingEvent
rule_parameters -- the Key/Value dictionary of the Config Rules parameters
Advanced Notes:
1 -- the deleted resources are taken care of by the Boilerplate code
2 -- if a list of dictionary is returned, the old evaluation(s) which are not returned in the new evaluation list are returned as NOT_APPLICABLE by the Boilerplate code
3 -- if None or an empty string, list or dict is returned, the Boilerplate code will put a "shadow" evaluation to feedback that the evaluation took place properly
"""
apigw_client = get_client('apigateway', event)
gateways_list = get_all_api_gateway(apigw_client)
if not gateways_list:
return None
evaluations = []
for gateway in gateways_list:
if gateway['endpointConfiguration']['types'] == ['PRIVATE']:
evaluations.append(build_evaluation(gateway['id'], 'NOT_APPLICABLE', event))
continue
if 'policy' not in gateway:
evaluations.append(build_evaluation(gateway['id'], 'NON_COMPLIANT', event, annotation='No resource policy is attached.'))
continue
policy = json.loads(gateway['policy'].replace('\\',''))
if is_policy_allows_more_than_whitelist(policy, rule_parameters):
evaluations.append(build_evaluation(gateway['id'], 'NON_COMPLIANT', event, annotation='The attached policy allows more than the whitelist.'))
continue
evaluations.append(build_evaluation(gateway['id'], 'COMPLIANT', event))
return evaluations
def is_policy_allows_more_than_whitelist(policy, whitelist):
for statement in policy['Statement']:
if statement['Effect'] != 'Allow':
continue
if 'Condition' not in statement:
return True
if 'IpAddress' not in statement['Condition']:
return True
if 'aws:SourceIp' not in statement['Condition']['IpAddress']:
return True
if not is_ip_in_whitelist(statement['Condition']['IpAddress']['aws:SourceIp'], whitelist):
return True
return False
def is_ip_in_whitelist(ip_list_or_str, whitelist):
all_network_in_ip_list = get_all_ip_networks(ip_list_or_str)
all_network_in_whitelist = get_all_ip_networks(whitelist)
for net in all_network_in_ip_list:
is_network_included = False
for net_whitelisted in all_network_in_whitelist:
try:
list(net_whitelisted.address_exclude(net))
is_network_included = True
except:
continue
if not is_network_included:
return False
return True
def get_all_ip_networks(ip_list_or_str):
ip_network_to_return = []
if isinstance(ip_list_or_str, str):
ip_network_to_return.append(ipaddress.ip_network(ip_list_or_str, strict=False))
elif isinstance(ip_list_or_str, list):
for addr in ip_list_or_str:
ip_network_to_return.append(ipaddress.ip_network(addr, strict=False))
else:
raise ValueError("Unexpected value in the aws:SourceIp field of the policy.")
return ip_network_to_return
def get_all_api_gateway(client):
rest_apis_list = client.get_rest_apis(limit=500)
apis_list = []
while True:
for item in rest_apis_list['items']:
apis_list.append(item)
if 'position' in rest_apis_list:
next_position = rest_apis_list['position']
rest_apis_list = client.get_rest_apis(position=next_position,limit=500)
else:
break
return apis_list
def evaluate_parameters(rule_parameters):
if 'WhitelistedIPs' not in rule_parameters:
raise ValueError('The parameter with "WhitelistedIPs" as key must be defined.')
if not rule_parameters['WhitelistedIPs']:
raise ValueError('The parameter "WhitelistedIPs" must have a defined value.')
try:
cleaned_parameters = rule_parameters['WhitelistedIPs'].replace(', ',',').split(',')
except:
raise ValueError('The parameter "WhitelistedIPs" must be a string or a list of strings separated by comma.')
for addr in cleaned_parameters:
if not is_ip_address(addr) and not is_ip_network(addr):
raise ValueError('The value in parameter "WhitelistedIPs" [' + str(addr) + '] is not a valid IP or a valid IP network.')
return cleaned_parameters
def is_ip_address(addr):
try:
ipaddress.ip_address(addr)
return True
except:
return False
def is_ip_network(net):
try:
ipaddress.ip_network(net, strict=False)
return True
except:
return False
def build_parameters_value_error_response(ex):
return build_error_response(internalErrorMessage="Customer error while parsing input parameters",
internalErrorDetails="Parameter value is invalid",
customerErrorCode="InvalidParameterValueException",
customerErrorMessage=str(ex))
####################
# Helper Functions #
####################
# This gets the client after assuming the Config service role
# either in the same AWS account or cross-account.
def get_client(service, event):
"""Return the service boto client. It should be used instead of directly calling the client.
Keyword arguments:
service -- the service name used for calling the boto.client()
event -- the event variable given in the lambda handler
"""
if not ASSUME_ROLE_MODE:
return boto3.client(service)
credentials = get_assume_role_credentials(event["executionRoleArn"])
return boto3.client(service, aws_access_key_id=credentials['AccessKeyId'],
aws_secret_access_key=credentials['SecretAccessKey'],
aws_session_token=credentials['SessionToken']
)
# This generate an evaluation for config
def build_evaluation(resource_id, compliance_type, event, resource_type=DEFAULT_RESOURCE_TYPE, annotation=None):
"""Form an evaluation as a dictionary. Usually suited to report on scheduled rules.
Keyword arguments:
resource_id -- the unique id of the resource to report
compliance_type -- either COMPLIANT, NON_COMPLIANT or NOT_APPLICABLE
event -- the event variable given in the lambda handler
resource_type -- the CloudFormation resource type (or AWS::::Account) to report on the rule (default DEFAULT_RESOURCE_TYPE)
annotation -- an annotation to be added to the evaluation (default None)
"""
eval_cc = {}
if annotation:
eval_cc['Annotation'] = annotation
eval_cc['ComplianceResourceType'] = resource_type
eval_cc['ComplianceResourceId'] = resource_id
eval_cc['ComplianceType'] = compliance_type
eval_cc['OrderingTimestamp'] = str(json.loads(event['invokingEvent'])['notificationCreationTime'])
return eval_cc
def build_evaluation_from_config_item(configuration_item, compliance_type, annotation=None):
"""Form an evaluation as a dictionary. Usually suited to report on configuration change rules.
Keyword arguments:
configuration_item -- the configurationItem dictionary in the invokingEvent
compliance_type -- either COMPLIANT, NON_COMPLIANT or NOT_APPLICABLE
annotation -- an annotation to be added to the evaluation (default None)
"""
eval_ci = {}
if annotation:
eval_ci['Annotation'] = annotation
eval_ci['ComplianceResourceType'] = configuration_item['resourceType']
eval_ci['ComplianceResourceId'] = configuration_item['resourceId']
eval_ci['ComplianceType'] = compliance_type
eval_ci['OrderingTimestamp'] = configuration_item['configurationItemCaptureTime']
return eval_ci
####################
# Boilerplate Code #
####################
# Helper function to check if rule parameters exist
def parameters_exist(parameters):
return len(parameters) != 0
# Helper function used to validate input
def check_defined(reference, reference_name):
if not reference:
raise Exception('Error: ', reference_name, 'is not defined')
return reference
# Check whether the message is OversizedConfigurationItemChangeNotification or not
def is_oversized_changed_notification(message_type):
check_defined(message_type, 'messageType')
return message_type == 'OversizedConfigurationItemChangeNotification'
# Check whether the message is a ScheduledNotification or not.
def is_scheduled_notification(message_type):
check_defined(message_type, 'messageType')
return message_type == 'ScheduledNotification'
# Get configurationItem using getResourceConfigHistory API
# in case of OversizedConfigurationItemChangeNotification
def get_configuration(resource_type, resource_id, configuration_capture_time):
result = AWS_CONFIG_CLIENT.get_resource_config_history(
resourceType=resource_type,
resourceId=resource_id,
laterTime=configuration_capture_time,
limit=1)
configurationItem = result['configurationItems'][0]
return convert_api_configuration(configurationItem)
# Convert from the API model to the original invocation model
def convert_api_configuration(configurationItem):
for k, v in configurationItem.items():
if isinstance(v, datetime.datetime):
configurationItem[k] = str(v)
configurationItem['awsAccountId'] = configurationItem['accountId']
configurationItem['ARN'] = configurationItem['arn']
configurationItem['configurationStateMd5Hash'] = configurationItem['configurationItemMD5Hash']
configurationItem['configurationItemVersion'] = configurationItem['version']
configurationItem['configuration'] = json.loads(configurationItem['configuration'])
if 'relationships' in configurationItem:
for i in range(len(configurationItem['relationships'])):
configurationItem['relationships'][i]['name'] = configurationItem['relationships'][i]['relationshipName']
return configurationItem
# Based on the type of message get the configuration item
# either from configurationItem in the invoking event
# or using the getResourceConfigHistiry API in getConfiguration function.
def get_configuration_item(invokingEvent):
check_defined(invokingEvent, 'invokingEvent')
if is_oversized_changed_notification(invokingEvent['messageType']):
configurationItemSummary = check_defined(invokingEvent['configurationItemSummary'], 'configurationItemSummary')
return get_configuration(configurationItemSummary['resourceType'], configurationItemSummary['resourceId'], configurationItemSummary['configurationItemCaptureTime'])
elif is_scheduled_notification(invokingEvent['messageType']):
return None
return check_defined(invokingEvent['configurationItem'], 'configurationItem')
# Check whether the resource has been deleted. If it has, then the evaluation is unnecessary.
def is_applicable(configurationItem, event):
try:
check_defined(configurationItem, 'configurationItem')
check_defined(event, 'event')
except:
return True
status = configurationItem['configurationItemStatus']
eventLeftScope = event['eventLeftScope']
if status == 'ResourceDeleted':
print("Resource Deleted, setting Compliance Status to NOT_APPLICABLE.")
return (status == 'OK' or status == 'ResourceDiscovered') and not eventLeftScope
def get_assume_role_credentials(role_arn):
sts_client = boto3.client('sts')
try:
assume_role_response = sts_client.assume_role(RoleArn=role_arn, RoleSessionName="configLambdaExecution")
return assume_role_response['Credentials']
except botocore.exceptions.ClientError as ex:
# Scrub error message for any internal account info leaks
if 'AccessDenied' in ex.response['Error']['Code']:
ex.response['Error']['Message'] = "AWS Config does not have permission to assume the IAM role."
else:
ex.response['Error']['Message'] = "InternalError"
ex.response['Error']['Code'] = "InternalError"
raise ex
# This removes older evaluation (usually useful for periodic rule not reporting on AWS::::Account).
def clean_up_old_evaluations(latest_evaluations, event):
cleaned_evalations = []
old_eval = AWS_CONFIG_CLIENT.get_compliance_details_by_config_rule(
ConfigRuleName=event['configRuleName'],
ComplianceTypes=['COMPLIANT', 'NON_COMPLIANT'],
Limit=100)
old_eval_list = []
while True:
for old_result in old_eval['EvaluationResults']:
old_eval_list.append(old_result)
if 'NextToken' in old_eval:
next_token = old_eval['NextToken']
old_eval = AWS_CONFIG_CLIENT.get_compliance_details_by_config_rule(
ConfigRuleName=event['configRuleName'],
ComplianceTypes=['COMPLIANT', 'NON_COMPLIANT'],
Limit=100,
NextToken=next_token)
else:
break
for old_eval in old_eval_list:
old_resource_id = old_eval['EvaluationResultIdentifier']['EvaluationResultQualifier']['ResourceId']
newer_founded = False
for latest_eval in latest_evaluations:
if old_resource_id == latest_eval['ComplianceResourceId']:
newer_founded = True
if not newer_founded:
cleaned_evalations.append(build_evaluation(old_resource_id, "NOT_APPLICABLE", event))
return cleaned_evalations + latest_evaluations
# This decorates the lambda_handler in rule_code with the actual PutEvaluation call
def lambda_handler(event, context):
global AWS_CONFIG_CLIENT
AWS_CONFIG_CLIENT = get_client('config', event)
#print(event)
check_defined(event, 'event')
invoking_event = json.loads(event['invokingEvent'])
rule_parameters = {}
if 'ruleParameters' in event:
rule_parameters = json.loads(event['ruleParameters'])
try:
rule_parameters_clean = evaluate_parameters(rule_parameters)
except ValueError as ex:
return build_parameters_value_error_response(ex)
try:
configuration_item = get_configuration_item(invoking_event)
if invoking_event['messageType'] in ['ConfigurationItemChangeNotification', 'ScheduledNotification', 'OversizedConfigurationItemChangeNotification']:
if is_applicable(configuration_item, event):
compliance_result = evaluate_compliance(event, configuration_item, rule_parameters_clean)
else:
compliance_result = "NOT_APPLICABLE"
else:
return {'internalErrorMessage': 'Unexpected message type ' + str(invoking_event)}
except botocore.exceptions.ClientError as ex:
if is_internal_error(ex):
return build_internal_error_response("Unexpected error while completing API request", str(ex))
return build_error_response("Customer error while making API request", str(ex), ex.response['Error']['Code'], ex.response['Error']['Message'])
except ValueError as ex:
return build_internal_error_response(str(ex), str(ex))
evaluations = []
latest_evaluations = []
if not compliance_result:
latest_evaluations.append(build_evaluation(event['accountId'], "NOT_APPLICABLE", event, resource_type='AWS::::Account'))
evaluations = clean_up_old_evaluations(latest_evaluations, event)
elif isinstance(compliance_result, str):
evaluations.append(build_evaluation_from_config_item(configuration_item, compliance_result))
elif isinstance(compliance_result, list):
for evaluation in compliance_result:
missing_fields = False
for field in ('ComplianceResourceType', 'ComplianceResourceId', 'ComplianceType', 'OrderingTimestamp'):
if field not in evaluation:
print("Missing " + field + " from custom evaluation.")
missing_fields = True
if not missing_fields:
latest_evaluations.append(evaluation)
evaluations = clean_up_old_evaluations(latest_evaluations, event)
elif isinstance(compliance_result, dict):
missing_fields = False
for field in ('ComplianceResourceType', 'ComplianceResourceId', 'ComplianceType', 'OrderingTimestamp'):
if field not in compliance_result:
print("Missing " + field + " from custom evaluation.")
missing_fields = True
if not missing_fields:
evaluations.append(compliance_result)
else:
evaluations.append(build_evaluation_from_config_item(configuration_item, 'NOT_APPLICABLE'))
# Put together the request that reports the evaluation status
resultToken = event['resultToken']
testMode = False
if resultToken == 'TESTMODE':
# Used solely for RDK test to skip actual put_evaluation API call
testMode = True
# Invoke the Config API to report the result of the evaluation
evaluation_copy = []
evaluation_copy = evaluations[:]
while(evaluation_copy):
AWS_CONFIG_CLIENT.put_evaluations(Evaluations=evaluation_copy[:100], ResultToken=resultToken, TestMode=testMode)
del evaluation_copy[:100]
# Used solely for RDK test to be able to test Lambda function
return evaluations
def is_internal_error(exception):
return ((not isinstance(exception, botocore.exceptions.ClientError)) or exception.response['Error']['Code'].startswith('5')
or 'InternalError' in exception.response['Error']['Code'] or 'ServiceError' in exception.response['Error']['Code'])
def build_internal_error_response(internalErrorMessage, internalErrorDetails=None):
return build_error_response(internalErrorMessage, internalErrorDetails, 'InternalError', 'InternalError')
def build_error_response(internalErrorMessage, internalErrorDetails=None, customerErrorCode=None, customerErrorMessage=None):
error_response = {
'internalErrorMessage': internalErrorMessage,
'internalErrorDetails': internalErrorDetails,
'customerErrorMessage': customerErrorMessage,
'customerErrorCode': customerErrorCode
}
print(error_response)
return error_response
| cc0-1.0 | 55b9358d1b1a1d5e1da221fed8eb1a8f | 41.979167 | 172 | 0.667386 | 4.351486 | false | true | false | false |
awslabs/aws-config-rules | python/ECS_ECRIMAGE_CHECK/ECS_ECRIMAGE_CHECK.py | 1 | 20288 | # Copyright 2017-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
# Rule Name:
# ECS_ECRIMAGE_CHECK
# Description:
# Check ECS Task Definitions for presence of non-ECR container repository. ECR repository is dkr.ecr.region.amazonaws.com
#
# Rationale:
# ECS task definitions should be configured to retrieve authorized container images from ECR repository in a business controlled environment.
#
# Indicative Severity:
# Medium
#
# Trigger:
# Periodic checks against AWS::ECS::TaskDefinition
#
# Reports on:
# AWS::ECS::TaskDefinition
#
#
# Scenarios:
# Scenario: 1
# Given: Rules parameters are provided
# And: It contains a parameter key other than TaskDefinition and RegionName
# Then: Return ERROR to specify valid keys
# Scenario: 2
# Given: Rules parameters are provided
# And: There is no value specified for TaskDefinition and RegionName does not match any AWS regions
# Then: Return ERROR and print to Cloudwatch to specify valid region name
# Scenario: 3
# Given: Rules parameters are provided. TaskDefinition and RegionName are provided
# And: Task Definitions match TaskDefinitions (case sensitive) specified in values. RegionName value matches valid region name of where ECR resides
# Then: Return COMPLIANT with annotation if Task Definitions are configured to retrieve from ECR Repository.
# Scenario: 4
# Given: Rules parameters are provided. TaskDefinition and RegionName are provided
# Given: ECS task definitions are found configured to retrieve container images from a non-ECR repository
# Then: Return NON_COMPLIANT
#
import json
import sys
import datetime
import re
import boto3
import botocore
try:
import liblogging
except ImportError:
pass
##############
# Parameters #
##############
# Define the default resource to report to Config Rules
DEFAULT_RESOURCE_TYPE = 'AWS::ECS::TaskDefinition'
# DEFAULT_RESOURCE_TYPE = 'AWS::::Account'
# Set to True to get the lambda to assume the Role attached on the Config Service (useful for cross-account).
ASSUME_ROLE_MODE = False
# Other parameters (no change needed)
CONFIG_ROLE_TIMEOUT_SECONDS = 900
#############
# Main Code #
#############
REGIONS = ["us-east-1", "us-east-2", "us-west-1", "us-west-2", "ap-east-1", "ap-south-1", "ap-northeast-3", "ap-northeast-2",
"ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ca-central-1", "eu-central-1", "eu-west-1", "eu-west-2", "eu-west-3",
"eu-north-1", "me-south-1", "sa-east-1"]
def evaluate_compliance(event, configuration_item, valid_rule_parameters):
evaluations = []
taskdefs = []
ecs = get_client('ecs', event, valid_rule_parameters["RegionName"].strip())
if valid_rule_parameters:
taskdefs = valid_rule_parameters["TaskDefinition"].split(",")
for taskdef in taskdefs:
response = ecs.describe_task_definition(taskDefinition=taskdef.strip())
if "taskDefinition" not in response.keys():
print("There is no task definition named: "+taskdef)
return None
if "containerDefinitions" in response["taskDefinition"].keys():
ecrrepo = re.compile(".dkr.ecr."+valid_rule_parameters["RegionName"].strip()+".amazonaws.com")
for containerdef in response['taskDefinition']["containerDefinitions"]:
if ecrrepo.search(containerdef["image"]):
evaluations.append(build_evaluation(taskdef, 'COMPLIANT', event, annotation='Task Definition has compliant ECR repo images: '+containerdef["image"]))
else:
evaluations.append(build_evaluation(taskdef, 'NON_COMPLIANT', event, annotation='Task Definition has non-ECR repo images: '+containerdef["image"]))
return evaluations
def evaluate_parameters(rule_parameters):
valid_rule_parameters = []
try:
if rule_parameters["TaskDefinition"] != "" and isinstance(rule_parameters["TaskDefinition"], str):
if rule_parameters["RegionName"] != "" and isinstance(rule_parameters["RegionName"], str):
validregion = rule_parameters["RegionName"].strip()
if validregion in REGIONS:
valid_rule_parameters = rule_parameters
else:
print("Please specify a valid region name e.g. us-east-1")
else:
print("Please specify a valid task definition.")
return valid_rule_parameters
except LookupError:
print("Please input TaskDefinition as the key.")
####################
# Helper Functions #
####################
# Build an error to be displayed in the logs when the parameter is invalid.
def build_parameters_value_error_response(ex):
"""Return an error dictionary when the evaluate_parameters() raises a ValueError.
Keyword arguments:
ex -- Exception text
"""
return build_error_response(internal_error_message="Parameter value is invalid",
internal_error_details="An ValueError was raised during the validation of the Parameter value",
customer_error_code="InvalidParameterValueException",
customer_error_message=str(ex))
# This gets the client after assuming the Config service role
# either in the same AWS account or cross-account.
def get_client(service, event, region=None):
"""Return the service boto client. It should be used instead of directly calling the client.
Keyword arguments:
service -- the service name used for calling the boto.client()
event -- the event variable given in the lambda handler
region -- the region where the client is called (default: None)
"""
if not ASSUME_ROLE_MODE:
return boto3.client(service, region)
credentials = get_assume_role_credentials(get_execution_role_arn(event), region)
return boto3.client(service, aws_access_key_id=credentials['AccessKeyId'],
aws_secret_access_key=credentials['SecretAccessKey'],
aws_session_token=credentials['SessionToken'],
region_name=region
)
# This generate an evaluation for config
def build_evaluation(resource_id, compliance_type, event, resource_type=DEFAULT_RESOURCE_TYPE, annotation=None):
"""Form an evaluation as a dictionary. Usually suited to report on scheduled rules.
Keyword arguments:
resource_id -- the unique id of the resource to report
compliance_type -- either COMPLIANT, NON_COMPLIANT or NOT_APPLICABLE
event -- the event variable given in the lambda handler
resource_type -- the CloudFormation resource type (or AWS::::Account) to report on the rule (default DEFAULT_RESOURCE_TYPE)
annotation -- an annotation to be added to the evaluation (default None). It will be truncated to 255 if longer.
"""
eval_cc = {}
if annotation:
eval_cc['Annotation'] = build_annotation(annotation)
eval_cc['ComplianceResourceType'] = resource_type
eval_cc['ComplianceResourceId'] = resource_id
eval_cc['ComplianceType'] = compliance_type
eval_cc['OrderingTimestamp'] = str(json.loads(event['invokingEvent'])['notificationCreationTime'])
return eval_cc
def build_evaluation_from_config_item(configuration_item, compliance_type, annotation=None):
"""Form an evaluation as a dictionary. Usually suited to report on configuration change rules.
Keyword arguments:
configuration_item -- the configurationItem dictionary in the invokingEvent
compliance_type -- either COMPLIANT, NON_COMPLIANT or NOT_APPLICABLE
annotation -- an annotation to be added to the evaluation (default None). It will be truncated to 255 if longer.
"""
eval_ci = {}
if annotation:
eval_ci['Annotation'] = build_annotation(annotation)
eval_ci['ComplianceResourceType'] = configuration_item['resourceType']
eval_ci['ComplianceResourceId'] = configuration_item['resourceId']
eval_ci['ComplianceType'] = compliance_type
eval_ci['OrderingTimestamp'] = configuration_item['configurationItemCaptureTime']
return eval_ci
####################
# Boilerplate Code #
####################
# Get execution role for Lambda function
def get_execution_role_arn(event):
role_arn = None
if 'ruleParameters' in event:
rule_params = json.loads(event['ruleParameters'])
role_name = rule_params.get("ExecutionRoleName")
if role_name:
execution_role_prefix = event["executionRoleArn"].split("/")[0]
role_arn = "{}/{}".format(execution_role_prefix, role_name)
if not role_arn:
role_arn = event['executionRoleArn']
return role_arn
# Build annotation within Service constraints
def build_annotation(annotation_string):
if len(annotation_string) > 256:
return annotation_string[:244] + " [truncated]"
return annotation_string
# Helper function used to validate input
def check_defined(reference, reference_name):
if not reference:
raise Exception('Error: ', reference_name, 'is not defined')
return reference
# Check whether the message is OversizedConfigurationItemChangeNotification or not
def is_oversized_changed_notification(message_type):
check_defined(message_type, 'messageType')
return message_type == 'OversizedConfigurationItemChangeNotification'
# Check whether the message is a ScheduledNotification or not.
def is_scheduled_notification(message_type):
check_defined(message_type, 'messageType')
return message_type == 'ScheduledNotification'
# Get configurationItem using getResourceConfigHistory API
# in case of OversizedConfigurationItemChangeNotification
def get_configuration(resource_type, resource_id, configuration_capture_time):
result = AWS_CONFIG_CLIENT.get_resource_config_history(
resourceType=resource_type,
resourceId=resource_id,
laterTime=configuration_capture_time,
limit=1)
configuration_item = result['configurationItems'][0]
return convert_api_configuration(configuration_item)
# Convert from the API model to the original invocation model
def convert_api_configuration(configuration_item):
for k, v in configuration_item.items():
if isinstance(v, datetime.datetime):
configuration_item[k] = str(v)
configuration_item['awsAccountId'] = configuration_item['accountId']
configuration_item['ARN'] = configuration_item['arn']
configuration_item['configurationStateMd5Hash'] = configuration_item['configurationItemMD5Hash']
configuration_item['configurationItemVersion'] = configuration_item['version']
configuration_item['configuration'] = json.loads(configuration_item['configuration'])
if 'relationships' in configuration_item:
for i in range(len(configuration_item['relationships'])):
configuration_item['relationships'][i]['name'] = configuration_item['relationships'][i]['relationshipName']
return configuration_item
# Based on the type of message get the configuration item
# either from configurationItem in the invoking event
# or using the getResourceConfigHistiry API in getConfiguration function.
def get_configuration_item(invoking_event):
check_defined(invoking_event, 'invokingEvent')
if is_oversized_changed_notification(invoking_event['messageType']):
configuration_item_summary = check_defined(invoking_event['configurationItemSummary'], 'configurationItemSummary')
return get_configuration(configuration_item_summary['resourceType'], configuration_item_summary['resourceId'], configuration_item_summary['configurationItemCaptureTime'])
if is_scheduled_notification(invoking_event['messageType']):
return None
return check_defined(invoking_event['configurationItem'], 'configurationItem')
# Check whether the resource has been deleted. If it has, then the evaluation is unnecessary.
def is_applicable(configuration_item, event):
try:
check_defined(configuration_item, 'configurationItem')
check_defined(event, 'event')
except:
return True
status = configuration_item['configurationItemStatus']
event_left_scope = event['eventLeftScope']
if status == 'ResourceDeleted':
print("Resource Deleted, setting Compliance Status to NOT_APPLICABLE.")
return status in ('OK', 'ResourceDiscovered') and not event_left_scope
def get_assume_role_credentials(role_arn, region=None):
sts_client = boto3.client('sts', region)
try:
assume_role_response = sts_client.assume_role(RoleArn=role_arn,
RoleSessionName="configLambdaExecution",
DurationSeconds=CONFIG_ROLE_TIMEOUT_SECONDS)
if 'liblogging' in sys.modules:
liblogging.logSession(role_arn, assume_role_response)
return assume_role_response['Credentials']
except botocore.exceptions.ClientError as ex:
# Scrub error message for any internal account info leaks
print(str(ex))
if 'AccessDenied' in ex.response['Error']['Code']:
ex.response['Error']['Message'] = "AWS Config does not have permission to assume the IAM role."
else:
ex.response['Error']['Message'] = "InternalError"
ex.response['Error']['Code'] = "InternalError"
raise ex
# This removes older evaluation (usually useful for periodic rule not reporting on AWS::::Account).
def clean_up_old_evaluations(latest_evaluations, event):
cleaned_evaluations = []
old_eval = AWS_CONFIG_CLIENT.get_compliance_details_by_config_rule(
ConfigRuleName=event['configRuleName'],
ComplianceTypes=['COMPLIANT', 'NON_COMPLIANT'],
Limit=100)
old_eval_list = []
while True:
for old_result in old_eval['EvaluationResults']:
old_eval_list.append(old_result)
if 'NextToken' in old_eval:
next_token = old_eval['NextToken']
old_eval = AWS_CONFIG_CLIENT.get_compliance_details_by_config_rule(
ConfigRuleName=event['configRuleName'],
ComplianceTypes=['COMPLIANT', 'NON_COMPLIANT'],
Limit=100,
NextToken=next_token)
else:
break
for old_eval in old_eval_list:
old_resource_id = old_eval['EvaluationResultIdentifier']['EvaluationResultQualifier']['ResourceId']
newer_founded = False
for latest_eval in latest_evaluations:
if old_resource_id == latest_eval['ComplianceResourceId']:
newer_founded = True
if not newer_founded:
cleaned_evaluations.append(build_evaluation(old_resource_id, "NOT_APPLICABLE", event))
return cleaned_evaluations + latest_evaluations
def lambda_handler(event, context):
if 'liblogging' in sys.modules:
liblogging.logEvent(event)
global AWS_CONFIG_CLIENT
#print(event)
check_defined(event, 'event')
invoking_event = json.loads(event['invokingEvent'])
rule_parameters = {}
if 'ruleParameters' in event:
rule_parameters = json.loads(event['ruleParameters'])
try:
valid_rule_parameters = evaluate_parameters(rule_parameters)
except ValueError as ex:
return build_parameters_value_error_response(ex)
try:
AWS_CONFIG_CLIENT = get_client('config', event)
if invoking_event['messageType'] in ['ConfigurationItemChangeNotification', 'ScheduledNotification', 'OversizedConfigurationItemChangeNotification']:
configuration_item = get_configuration_item(invoking_event)
if is_applicable(configuration_item, event):
compliance_result = evaluate_compliance(event, configuration_item, valid_rule_parameters)
else:
compliance_result = "NOT_APPLICABLE"
else:
return build_internal_error_response('Unexpected message type', str(invoking_event))
except botocore.exceptions.ClientError as ex:
if is_internal_error(ex):
return build_internal_error_response("Unexpected error while completing API request", str(ex))
return build_error_response("Customer error while making API request", str(ex), ex.response['Error']['Code'], ex.response['Error']['Message'])
except ValueError as ex:
return build_internal_error_response(str(ex), str(ex))
evaluations = []
latest_evaluations = []
if not compliance_result:
latest_evaluations.append(build_evaluation(event['accountId'], "NOT_APPLICABLE", event, resource_type='AWS::::Account'))
evaluations = clean_up_old_evaluations(latest_evaluations, event)
elif isinstance(compliance_result, str):
if configuration_item:
evaluations.append(build_evaluation_from_config_item(configuration_item, compliance_result))
else:
evaluations.append(build_evaluation(event['accountId'], compliance_result, event, resource_type=DEFAULT_RESOURCE_TYPE))
elif isinstance(compliance_result, list):
for evaluation in compliance_result:
missing_fields = False
for field in ('ComplianceResourceType', 'ComplianceResourceId', 'ComplianceType', 'OrderingTimestamp'):
if field not in evaluation:
print("Missing " + field + " from custom evaluation.")
missing_fields = True
if not missing_fields:
latest_evaluations.append(evaluation)
evaluations = clean_up_old_evaluations(latest_evaluations, event)
elif isinstance(compliance_result, dict):
missing_fields = False
for field in ('ComplianceResourceType', 'ComplianceResourceId', 'ComplianceType', 'OrderingTimestamp'):
if field not in compliance_result:
print("Missing " + field + " from custom evaluation.")
missing_fields = True
if not missing_fields:
evaluations.append(compliance_result)
else:
evaluations.append(build_evaluation_from_config_item(configuration_item, 'NOT_APPLICABLE'))
# Put together the request that reports the evaluation status
result_token = event['resultToken']
test_mode = False
if result_token == 'TESTMODE':
# Used solely for RDK test to skip actual put_evaluation API call
test_mode = True
# Invoke the Config API to report the result of the evaluation
evaluation_copy = []
evaluation_copy = evaluations[:]
while evaluation_copy:
AWS_CONFIG_CLIENT.put_evaluations(Evaluations=evaluation_copy[:100], ResultToken=result_token, TestMode=test_mode)
del evaluation_copy[:100]
# Used solely for RDK test to be able to test Lambda function
return evaluations
def is_internal_error(exception):
return ((not isinstance(exception, botocore.exceptions.ClientError)) or exception.response['Error']['Code'].startswith('5')
or 'InternalError' in exception.response['Error']['Code'] or 'ServiceError' in exception.response['Error']['Code'])
def build_internal_error_response(internal_error_message, internal_error_details=None):
return build_error_response(internal_error_message, internal_error_details, 'InternalError', 'InternalError')
def build_error_response(internal_error_message, internal_error_details=None, customer_error_code=None, customer_error_message=None):
error_response = {
'internalErrorMessage': internal_error_message,
'internalErrorDetails': internal_error_details,
'customerErrorMessage': customer_error_message,
'customerErrorCode': customer_error_code
}
print(error_response)
return error_response
| cc0-1.0 | 786f2142f320c434444c01f0f20048a3 | 45.319635 | 178 | 0.686465 | 4.255926 | false | true | false | false |
awslabs/aws-config-rules | python/cloudtrail_encrypted.py | 1 | 3455 | #
# This file made available under CC0 1.0 Universal (https://creativecommons.org/publicdomain/zero/1.0/legalcode)
#
# Ensure CloudTrail is encrypted
# Description: Checks that tracked trails are encrypted (optionally with a specific KMS Key).
#
# Trigger Type: Change Triggered
# Scope of Changes: AWS::CloudTrail::Trail
# Required Parameters: None
# Optional Parameter: KMSKeyARN
# Optional Parameter value example : arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
import json
import boto3
APPLICABLE_RESOURCES = ["AWS::CloudTrail::Trail"]
OPTIONAL_PARAMETER = "KMSKeyARN"
# Verify the optional parameter, set the parameter to "None" if not existant
def normalize_optional_parameter(rule_parameters,optional_parameter):
if not rule_parameters:
rule_parameters = {optional_parameter: "None"}
print(optional_parameter+ " set to 'None'")
else:
if not optional_parameter in rule_parameters:
rule_parameters = {optional_parameter: "None"}
print(optional_parameter+ " set to 'None'")
else:
print(optional_parameter+ " set to rule parameter value: " + rule_parameters[optional_parameter])
return rule_parameters
# Verify compliance
def evaluate_compliance(configuration_item, rule_parameters, optional_parameter):
if (configuration_item["resourceType"] not in APPLICABLE_RESOURCES) or (configuration_item["configurationItemStatus"] == "ResourceDeleted"):
return {
"compliance_type": "NOT_APPLICABLE",
"annotation": "NOT_APPLICABLE"
}
compliance_status = False
print configuration_item
kms_key_id = configuration_item["configuration"]["kmsKeyId"]
print kms_key_id
if kms_key_id == rule_parameters[optional_parameter] and kms_key_id != "None":
return {
"compliance_type": "COMPLIANT",
"annotation": 'Encryption is enabled with the specified KMS key [' + kms_key_id + '].'
}
elif rule_parameters[optional_parameter] == "None" and kms_key_id != "None":
return {
"compliance_type": "COMPLIANT",
"annotation": 'Encryption is enabled (no key specified in the Rule).'
}
elif kms_key_id != rule_parameters[optional_parameter] and kms_key_id != "None":
return {
"compliance_type": "NON_COMPLIANT",
"annotation": 'Encryption is enabled with [' + kms_key_id + ']. It is not with the specified KMS key in the rule [' + rule_parameters[optional_parameter] + '].'
}
else:
return {
"compliance_type": "NON_COMPLIANT",
"annotation": 'Encryption is disabled.'
}
# Start of the lambda function
def lambda_handler(event, context):
invoking_event = json.loads(event['invokingEvent'])
configuration_item = invoking_event["configurationItem"]
rule_parameters = json.loads(event["ruleParameters"])
print rule_parameters
rule_parameters = normalize_optional_parameter(rule_parameters,OPTIONAL_PARAMETER)
print rule_parameters
evaluation = evaluate_compliance(configuration_item, rule_parameters)
config = boto3.client('config')
result_token = "No token found."
if "resultToken" in event:
result_token = event["resultToken"]
config.put_evaluations(
Evaluations=[
{
"ComplianceResourceType": configuration_item["resourceType"],
"ComplianceResourceId": configuration_item["resourceId"],
"ComplianceType": evaluation["compliance_type"],
"Annotation": evaluation["annotation"],
"OrderingTimestamp": configuration_item["configurationItemCaptureTime"]
},
],
ResultToken=result_token
) | cc0-1.0 | 5dee1b2447792c8b34aaf6745242fb49 | 35.765957 | 164 | 0.726483 | 3.380626 | false | true | false | false |
cfpb/owning-a-home-api | settings_for_testing.py | 1 | 1277 | import os
import sys
import dj_database_url
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.abspath(os.path.join(BASE_DIR, '..')))
INSTALLED_APPS = (
'oahapi',
'ratechecker',
'countylimits',
'rest_framework',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
SECRET_KEY = "django_tests_secret_key"
DEBUG = True
TEMPLATE_DEBUG = False
ROOT_URLCONF = 'oahapi.urls'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'oah.sqlite3',
}
}
if 'DATABASE_URL' in os.environ:
DATABASES['default'] = dj_database_url.config()
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
}
]
| cc0-1.0 | 1199e27bdd40d7d910113c116c650d99 | 22.218182 | 69 | 0.675803 | 3.265985 | false | false | true | false |
mozilla-services/tokenserver | loadtest/populate-db.py | 2 | 2119 | #! /usr/bin/env python
# script to populate the database with records
import time
import random
from wimms.sql import SQLMetadata, _CREATE_USER_RECORD
def populate_db(sqluri, service, nodes, user_range, host="loadtest.local"):
"""Create a bunch of users for the given service.
The resulting users will have an adress in the form of <uid>@<host> where
uid is an int from 0 to :param user_range:.
This function is useful to populate the database during the loadtest. It
allows to test a specific behaviour: making sure that we are not reading
the values from memory when retrieving the node information.
:param sqluri: the sqluri string used to connect to the database
:param service: the service to assign the users to.
:param nodes: the list of availables nodes for this service
:param user_range: the number of users to create
:param host: the hostname to use when generating users
"""
params = {
'service': service,
'generation': 0,
'client_state': '',
'timestamp': int(time.time() * 1000),
}
# for each user in the range, assign him to a node
md = SQLMetadata(sqluri, create_tables=True)
for idx in range(0, user_range):
email = "%s@%s" % (idx, host)
node = random.choice(nodes)
md._safe_execute(_CREATE_USER_RECORD, email=email, node=node, **params)
def main():
"""Read the arguments from the command line and pass them to the
populate_db function.
Example use:
python populate-db.py sqlite:////tmp/tokenserver aitc\
node1,node2,node3,node4,node5,node6 100
"""
import sys
if len(sys.argv) < 5:
raise ValueError('You need to specify (in this order) sqluri, '
'service, nodes (comma separated) and user_range')
# transform the values from the cli to python objects
sys.argv[3] = sys.argv[3].split(',') # comma separated => list
sys.argv[4] = int(sys.argv[4])
populate_db(*sys.argv[1:])
print("created {nb_users} users".format(nb_users=sys.argv[4]))
if __name__ == '__main__':
main()
| mpl-2.0 | fd8972d04c02054308b2df831643e06d | 34.316667 | 79 | 0.655026 | 3.790698 | false | false | false | false |
mcedit/mcedit | editortools/player.py | 1 | 18883 | """Copyright (c) 2010-2012 David Rio Vierra
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE."""
from OpenGL import GL
import numpy
from albow import TableView, TableColumn, Label, Button, Column, CheckBox, AttrRef, Row, ask, alert
import config
from editortools.editortool import EditorTool
from editortools.tooloptions import ToolOptions
from glbackground import Panel
from glutils import DisplayList
from mceutils import loadPNGTexture, alertException, drawTerrainCuttingWire, drawCube
from operation import Operation
import pymclevel
from pymclevel.box import BoundingBox, FloatBox
import logging
log = logging.getLogger(__name__)
class PlayerMoveOperation(Operation):
undoPos = None
def __init__(self, tool, pos, player="Player", yp=(None, None)):
super(PlayerMoveOperation, self).__init__(tool.editor, tool.editor.level)
self.tool = tool
self.pos = pos
self.player = player
self.yp = yp
def perform(self, recordUndo=True):
try:
level = self.tool.editor.level
try:
self.undoPos = level.getPlayerPosition(self.player)
self.undoDim = level.getPlayerDimension(self.player)
self.undoYP = level.getPlayerOrientation(self.player)
except Exception, e:
log.info("Couldn't get player position! ({0!r})".format(e))
yaw, pitch = self.yp
if yaw is not None and pitch is not None:
level.setPlayerOrientation((yaw, pitch), self.player)
level.setPlayerPosition(self.pos, self.player)
level.setPlayerDimension(level.dimNo, self.player)
self.tool.markerList.invalidate()
except pymclevel.PlayerNotFound, e:
print "Player move failed: ", e
def undo(self):
if not (self.undoPos is None):
level = self.tool.editor.level
level.setPlayerPosition(self.undoPos, self.player)
level.setPlayerDimension(self.undoDim, self.player)
level.setPlayerOrientation(self.undoYP, self.player)
self.tool.markerList.invalidate()
def bufferSize(self):
return 20
class SpawnPositionInvalid(Exception):
pass
def okayAt63(level, pos):
"""blocks 63 or 64 must be occupied"""
return level.blockAt(pos[0], 63, pos[2]) != 0 or level.blockAt(pos[0], 64, pos[2]) != 0
def okayAboveSpawn(level, pos):
"""3 blocks above spawn must be open"""
return not any([level.blockAt(pos[0], pos[1] + i, pos[2]) for i in range(1, 4)])
def positionValid(level, pos):
try:
return okayAt63(level, pos) and okayAboveSpawn(level, pos)
except EnvironmentError:
return False
class PlayerSpawnMoveOperation(Operation):
undoPos = None
def __init__(self, tool, pos):
self.tool, self.pos = tool, pos
def perform(self, recordUndo=True):
level = self.tool.editor.level
if isinstance(level, pymclevel.MCInfdevOldLevel):
if not positionValid(level, self.pos):
if SpawnSettings.spawnProtection.get():
raise SpawnPositionInvalid("You cannot have two air blocks at Y=63 and Y=64 in your spawn point's column. Additionally, you cannot have a solid block in the three blocks above your spawn point. It's weird, I know.")
self.undoPos = level.playerSpawnPosition()
level.setPlayerSpawnPosition(self.pos)
self.tool.markerList.invalidate()
def undo(self):
if self.undoPos is not None:
level = self.tool.editor.level
level.setPlayerSpawnPosition(self.undoPos)
self.tool.markerList.invalidate()
class PlayerPositionPanel(Panel):
def __init__(self, tool):
Panel.__init__(self)
self.tool = tool
level = tool.editor.level
if hasattr(level, 'players'):
players = level.players or ["[No players]"]
else:
players = ["Player"]
self.players = players
tableview = TableView(columns=[
TableColumn("Player Name", 200),
])
tableview.index = 0
tableview.num_rows = lambda: len(players)
tableview.row_data = lambda i: (players[i],)
tableview.row_is_selected = lambda x: x == tableview.index
tableview.zebra_color = (0, 0, 0, 48)
def selectTableRow(i, evt):
tableview.index = i
tableview.click_row = selectTableRow
self.table = tableview
l = Label("Player: ")
col = [l, tableview]
gotoButton = Button("Goto Player", action=self.tool.gotoPlayer)
gotoCameraButton = Button("Goto Player's View", action=self.tool.gotoPlayerCamera)
moveButton = Button("Move Player", action=self.tool.movePlayer)
moveToCameraButton = Button("Align Player to Camera", action=self.tool.movePlayerToCamera)
col.extend([gotoButton, gotoCameraButton, moveButton, moveToCameraButton])
col = Column(col)
self.add(col)
self.shrink_wrap()
@property
def selectedPlayer(self):
return self.players[self.table.index]
class PlayerPositionTool(EditorTool):
surfaceBuild = True
toolIconName = "player"
tooltipText = "Move Player"
movingPlayer = None
def reloadTextures(self):
self.charTex = loadPNGTexture('char.png')
@alertException
def movePlayer(self):
self.movingPlayer = self.panel.selectedPlayer
@alertException
def movePlayerToCamera(self):
player = self.panel.selectedPlayer
pos = self.editor.mainViewport.cameraPosition
y = self.editor.mainViewport.yaw
p = self.editor.mainViewport.pitch
d = self.editor.level.dimNo
op = PlayerMoveOperation(self, pos, player, (y, p))
self.movingPlayer = None
op.perform()
self.editor.addOperation(op)
self.editor.addUnsavedEdit()
def gotoPlayerCamera(self):
player = self.panel.selectedPlayer
try:
pos = self.editor.level.getPlayerPosition(player)
y, p = self.editor.level.getPlayerOrientation(player)
self.editor.gotoDimension(self.editor.level.getPlayerDimension(player))
self.editor.mainViewport.cameraPosition = pos
self.editor.mainViewport.yaw = y
self.editor.mainViewport.pitch = p
self.editor.mainViewport.stopMoving()
self.editor.mainViewport.invalidate()
except pymclevel.PlayerNotFound:
pass
def gotoPlayer(self):
player = self.panel.selectedPlayer
try:
if self.editor.mainViewport.pitch < 0:
self.editor.mainViewport.pitch = -self.editor.mainViewport.pitch
self.editor.mainViewport.cameraVector = self.editor.mainViewport._cameraVector()
cv = self.editor.mainViewport.cameraVector
pos = self.editor.level.getPlayerPosition(player)
pos = map(lambda p, c: p - c * 5, pos, cv)
self.editor.gotoDimension(self.editor.level.getPlayerDimension(player))
self.editor.mainViewport.cameraPosition = pos
self.editor.mainViewport.stopMoving()
except pymclevel.PlayerNotFound:
pass
def __init__(self, *args):
EditorTool.__init__(self, *args)
self.reloadTextures()
textureVertices = numpy.array(
(
24, 16,
24, 8,
32, 8,
32, 16,
8, 16,
8, 8,
16, 8,
16, 16,
24, 0,
16, 0,
16, 8,
24, 8,
16, 0,
16, 8,
8, 8,
8, 0,
8, 8,
0, 8,
0, 16,
8, 16,
16, 16,
24, 16,
24, 8,
16, 8,
), dtype='f4')
textureVertices.shape = (24, 2)
textureVertices *= 4
textureVertices[:, 1] *= 2
self.texVerts = textureVertices
self.markerList = DisplayList()
panel = None
def showPanel(self):
if not self.panel:
self.panel = PlayerPositionPanel(self)
self.panel.left = self.editor.left
self.panel.centery = self.editor.centery
self.editor.add(self.panel)
def hidePanel(self):
if self.panel and self.panel.parent:
self.panel.parent.remove(self.panel)
self.panel = None
def drawToolReticle(self):
if self.movingPlayer is None:
return
pos, direction = self.editor.blockFaceUnderCursor
pos = (pos[0], pos[1] + 2, pos[2])
x, y, z = pos
#x,y,z=map(lambda p,d: p+d, pos, direction)
GL.glEnable(GL.GL_BLEND)
GL.glColor(1.0, 1.0, 1.0, 0.5)
self.drawCharacterHead(x + 0.5, y + 0.75, z + 0.5)
GL.glDisable(GL.GL_BLEND)
GL.glEnable(GL.GL_DEPTH_TEST)
self.drawCharacterHead(x + 0.5, y + 0.75, z + 0.5)
drawTerrainCuttingWire(BoundingBox((x, y, z), (1, 1, 1)))
drawTerrainCuttingWire(BoundingBox((x, y - 1, z), (1, 1, 1)))
#drawTerrainCuttingWire( BoundingBox((x,y-2,z), (1,1,1)) )
GL.glDisable(GL.GL_DEPTH_TEST)
markerLevel = None
def drawToolMarkers(self):
if self.markerLevel != self.editor.level:
self.markerList.invalidate()
self.markerLevel = self.editor.level
self.markerList.call(self._drawToolMarkers)
def _drawToolMarkers(self):
GL.glColor(1.0, 1.0, 1.0, 0.5)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glMatrixMode(GL.GL_MODELVIEW)
for player in self.editor.level.players:
try:
pos = self.editor.level.getPlayerPosition(player)
yaw, pitch = self.editor.level.getPlayerOrientation(player)
dim = self.editor.level.getPlayerDimension(player)
if dim != self.editor.level.dimNo:
continue
x, y, z = pos
GL.glPushMatrix()
GL.glTranslate(x, y, z)
GL.glRotate(-yaw, 0, 1, 0)
GL.glRotate(pitch, 1, 0, 0)
GL.glColor(1, 1, 1, 1)
self.drawCharacterHead(0, 0, 0)
GL.glPopMatrix()
#GL.glEnable(GL.GL_BLEND)
drawTerrainCuttingWire(FloatBox((x - .5, y - .5, z - .5), (1, 1, 1)),
c0=(0.3, 0.9, 0.7, 1.0),
c1=(0, 0, 0, 0),
)
#GL.glDisable(GL.GL_BLEND)
except Exception, e:
print repr(e)
continue
GL.glDisable(GL.GL_DEPTH_TEST)
def drawCharacterHead(self, x, y, z):
GL.glEnable(GL.GL_CULL_FACE)
origin = (x - 0.25, y - 0.25, z - 0.25)
size = (0.5, 0.5, 0.5)
box = FloatBox(origin, size)
drawCube(box,
texture=self.charTex, textureVertices=self.texVerts)
GL.glDisable(GL.GL_CULL_FACE)
@property
def statusText(self):
if not self.panel:
return ""
player = self.panel.selectedPlayer
if player == "Player":
return "Click to move the player"
return "Click to move the player \"{0}\"".format(player)
@alertException
def mouseDown(self, evt, pos, direction):
if self.movingPlayer is None:
return
pos = (pos[0] + 0.5, pos[1] + 2.75, pos[2] + 0.5)
op = PlayerMoveOperation(self, pos, self.movingPlayer)
self.movingPlayer = None
op.perform()
self.editor.addOperation(op)
self.editor.addUnsavedEdit()
def levelChanged(self):
self.markerList.invalidate()
@alertException
def toolSelected(self):
self.showPanel()
self.movingPlayer = None
@alertException
def toolReselected(self):
if self.panel:
self.gotoPlayer()
class PlayerSpawnPositionOptions(ToolOptions):
def __init__(self, tool):
Panel.__init__(self)
self.tool = tool
self.spawnProtectionCheckBox = CheckBox(ref=AttrRef(tool, "spawnProtection"))
self.spawnProtectionLabel = Label("Spawn Position Safety")
self.spawnProtectionLabel.mouse_down = self.spawnProtectionCheckBox.mouse_down
tooltipText = "Minecraft will randomly move your spawn point if you try to respawn in a column where there are no blocks at Y=63 and Y=64. Only uncheck this box if Minecraft is changed."
self.spawnProtectionLabel.tooltipText = self.spawnProtectionCheckBox.tooltipText = tooltipText
row = Row((self.spawnProtectionCheckBox, self.spawnProtectionLabel))
col = Column((Label("Spawn Point Options"), row, Button("OK", action=self.dismiss)))
self.add(col)
self.shrink_wrap()
SpawnSettings = config.Settings("Spawn")
SpawnSettings.spawnProtection = SpawnSettings("Spawn Protection", True)
class PlayerSpawnPositionTool(PlayerPositionTool):
surfaceBuild = True
toolIconName = "playerspawn"
tooltipText = "Move Spawn Point"
def __init__(self, *args):
PlayerPositionTool.__init__(self, *args)
self.optionsPanel = PlayerSpawnPositionOptions(self)
def toolEnabled(self):
return self.editor.level.dimNo == 0
def showPanel(self):
self.panel = Panel()
button = Button("Goto Spawn", action=self.gotoSpawn)
self.panel.add(button)
self.panel.shrink_wrap()
self.panel.left = self.editor.left
self.panel.centery = self.editor.centery
self.editor.add(self.panel)
def gotoSpawn(self):
cv = self.editor.mainViewport.cameraVector
pos = self.editor.level.playerSpawnPosition()
pos = map(lambda p, c: p - c * 5, pos, cv)
self.editor.mainViewport.cameraPosition = pos
self.editor.mainViewport.stopMoving()
@property
def statusText(self):
return "Click to set the spawn position."
spawnProtection = SpawnSettings.spawnProtection.configProperty()
def drawToolReticle(self):
pos, direction = self.editor.blockFaceUnderCursor
x, y, z = map(lambda p, d: p + d, pos, direction)
color = (1.0, 1.0, 1.0, 0.5)
if isinstance(self.editor.level, pymclevel.MCInfdevOldLevel) and self.spawnProtection:
if not positionValid(self.editor.level, (x, y, z)):
color = (1.0, 0.0, 0.0, 0.5)
GL.glColor(*color)
GL.glEnable(GL.GL_BLEND)
self.drawCage(x, y, z)
self.drawCharacterHead(x + 0.5, y + 0.5, z + 0.5)
GL.glDisable(GL.GL_BLEND)
GL.glEnable(GL.GL_DEPTH_TEST)
self.drawCage(x, y, z)
self.drawCharacterHead(x + 0.5, y + 0.5, z + 0.5)
color2 = map(lambda a: a * 0.4, color)
drawTerrainCuttingWire(BoundingBox((x, y, z), (1, 1, 1)), color2, color)
GL.glDisable(GL.GL_DEPTH_TEST)
def _drawToolMarkers(self):
x, y, z = self.editor.level.playerSpawnPosition()
GL.glColor(1.0, 1.0, 1.0, 1.0)
GL.glEnable(GL.GL_DEPTH_TEST)
self.drawCage(x, y, z)
self.drawCharacterHead(x + 0.5, y + 0.5 + 0.125 * numpy.sin(self.editor.frames * 0.05), z + 0.5)
GL.glDisable(GL.GL_DEPTH_TEST)
def drawCage(self, x, y, z):
cageTexVerts = numpy.array(pymclevel.MCInfdevOldLevel.materials.blockTextures[52, 0])
pixelScale = 0.5 if self.editor.level.materials.name in ("Pocket", "Alpha") else 1.0
texSize = 16 * pixelScale
cageTexVerts *= pixelScale
cageTexVerts = numpy.array([((tx, ty), (tx + texSize, ty), (tx + texSize, ty + texSize), (tx, ty + texSize)) for (tx, ty) in cageTexVerts], dtype='float32')
GL.glEnable(GL.GL_ALPHA_TEST)
drawCube(BoundingBox((x, y, z), (1, 1, 1)), texture=pymclevel.alphaMaterials.terrainTexture, textureVertices=cageTexVerts)
GL.glDisable(GL.GL_ALPHA_TEST)
@alertException
def mouseDown(self, evt, pos, direction):
pos = map(lambda p, d: p + d, pos, direction)
op = PlayerSpawnMoveOperation(self, pos)
try:
op.perform()
self.editor.addOperation(op)
self.editor.addUnsavedEdit()
self.markerList.invalidate()
except SpawnPositionInvalid, e:
if "Okay" != ask(str(e), responses=["Okay", "Fix it for me!"]):
level = self.editor.level
status = ""
if not okayAt63(level, pos):
level.setBlockAt(pos[0], 63, pos[2], 1)
status += "Block added at y=63.\n"
if 59 < pos[1] < 63:
pos[1] = 63
status += "Spawn point moved upward to y=63.\n"
if not okayAboveSpawn(level, pos):
if pos[1] > 63 or pos[1] < 59:
lpos = (pos[0], pos[1] - 1, pos[2])
if level.blockAt(*pos) == 0 and level.blockAt(*lpos) != 0 and okayAboveSpawn(level, lpos):
pos = lpos
status += "Spawn point shifted down by one block.\n"
if not okayAboveSpawn(level, pos):
for i in range(1, 4):
level.setBlockAt(pos[0], pos[1] + i, pos[2], 0)
status += "Blocks above spawn point cleared.\n"
self.editor.invalidateChunks([(pos[0] // 16, pos[2] // 16)])
op = PlayerSpawnMoveOperation(self, pos)
try:
op.perform()
except SpawnPositionInvalid, e:
alert(str(e))
return
self.editor.addOperation(op)
self.editor.addUnsavedEdit()
self.markerList.invalidate()
if len(status):
alert("Spawn point fixed. Changes: \n\n" + status)
@alertException
def toolReselected(self):
self.gotoSpawn()
| isc | cc1d514479310ed3666952aeed030f2f | 33.521024 | 235 | 0.589737 | 3.731081 | false | false | false | false |
mcedit/mcedit | renderer.py | 1 | 98319 | """Copyright (c) 2010-2012 David Rio Vierra
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE."""
"""
renderer.py
What is going on in this file?
Here is an attempt to show the relationships between classes and
their responsibilities
MCRenderer:
has "position", "origin", optionally "viewFrustum"
Loads chunks near position+origin, draws chunks offset by origin
Calls visible on viewFrustum to exclude chunks
(+) ChunkRenderer
Has "chunkPosition", "invalidLayers", "lists"
One per chunk and detail level.
Creates display lists from BlockRenderers
(*) BlockRenderer
Has "vertexArrays"
One per block type, plus one for low detail and one for Entity
"""
from collections import defaultdict, deque
from datetime import datetime, timedelta
from depths import DepthOffset
from glutils import gl, Texture
import logging
import numpy
from OpenGL import GL
import pymclevel
import sys
#import time
def chunkMarkers(chunkSet):
""" Returns a mapping { size: [position, ...] } for different powers of 2
as size.
"""
sizedChunks = defaultdict(list)
size = 1
def all4(cx, cz):
cx &= ~size
cz &= ~size
return [(cx, cz), (cx + size, cz), (cx + size, cz + size), (cx, cz + size)]
# lastsize = 6
size = 1
while True:
nextsize = size << 1
chunkSet = set(chunkSet)
while len(chunkSet):
cx, cz = chunkSet.pop()
chunkSet.add((cx, cz))
o = all4(cx, cz)
others = set(o).intersection(chunkSet)
if len(others) == 4:
sizedChunks[nextsize].append(o[0])
for c in others:
chunkSet.discard(c)
else:
for c in others:
sizedChunks[size].append(c)
chunkSet.discard(c)
if len(sizedChunks[nextsize]):
chunkSet = set(sizedChunks[nextsize])
sizedChunks[nextsize] = []
size <<= 1
else:
break
return sizedChunks
class ChunkRenderer(object):
maxlod = 2
minlod = 0
def __init__(self, renderer, chunkPosition):
self.renderer = renderer
self.blockRenderers = []
self.detailLevel = 0
self.invalidLayers = set(Layer.AllLayers)
self.chunkPosition = chunkPosition
self.bufferSize = 0
self.renderstateLists = None
@property
def visibleLayers(self):
return self.renderer.visibleLayers
def forgetDisplayLists(self, states=None):
if self.renderstateLists is not None:
# print "Discarded {0}, gained {1} bytes".format(self.chunkPosition,self.bufferSize)
for k in states or self.renderstateLists.iterkeys():
a = self.renderstateLists.get(k, [])
# print a
for i in a:
gl.glDeleteLists(i, 1)
if states:
del self.renderstateLists[states]
else:
self.renderstateLists = None
self.needsRedisplay = True
self.renderer.discardMasterList()
def debugDraw(self):
for blockRenderer in self.blockRenderers:
blockRenderer.drawArrays(self.chunkPosition, False)
def makeDisplayLists(self):
if not self.needsRedisplay:
return
self.forgetDisplayLists()
if not self.blockRenderers:
return
lists = defaultdict(list)
showRedraw = self.renderer.showRedraw
if not (showRedraw and self.needsBlockRedraw):
GL.glEnableClientState(GL.GL_COLOR_ARRAY)
renderers = self.blockRenderers
for blockRenderer in renderers:
if self.detailLevel not in blockRenderer.detailLevels:
continue
if blockRenderer.layer not in self.visibleLayers:
continue
l = blockRenderer.makeArrayList(self.chunkPosition, self.needsBlockRedraw and showRedraw)
lists[blockRenderer.renderstate].append(l)
if not (showRedraw and self.needsBlockRedraw):
GL.glDisableClientState(GL.GL_COLOR_ARRAY)
self.needsRedisplay = False
self.renderstateLists = lists
@property
def needsBlockRedraw(self):
return Layer.Blocks in self.invalidLayers
def invalidate(self, layers=None):
if layers is None:
layers = Layer.AllLayers
if layers:
layers = set(layers)
self.invalidLayers.update(layers)
blockRenderers = [br for br in self.blockRenderers
if br.layer is Layer.Blocks
or br.layer not in layers]
if len(blockRenderers) < len(self.blockRenderers):
self.forgetDisplayLists()
self.blockRenderers = blockRenderers
if self.renderer.showRedraw and Layer.Blocks in layers:
self.needsRedisplay = True
def calcFaces(self):
minlod = self.renderer.detailLevelForChunk(self.chunkPosition)
minlod = min(minlod, self.maxlod)
if self.detailLevel != minlod:
self.forgetDisplayLists()
self.detailLevel = minlod
self.invalidLayers.add(Layer.Blocks)
# discard the standard detail renderers
if minlod > 0:
blockRenderers = []
for br in self.blockRenderers:
if br.detailLevels != (0,):
blockRenderers.append(br)
self.blockRenderers = blockRenderers
if self.renderer.chunkCalculator:
for i in self.renderer.chunkCalculator.calcFacesForChunkRenderer(self):
yield
else:
raise StopIteration
yield
def vertexArraysDone(self):
bufferSize = 0
for br in self.blockRenderers:
bufferSize += br.bufferSize()
if self.renderer.alpha != 0xff:
br.setAlpha(self.renderer.alpha)
self.bufferSize = bufferSize
self.invalidLayers = set()
self.needsRedisplay = True
self.renderer.invalidateMasterList()
needsRedisplay = False
@property
def done(self):
return len(self.invalidLayers) == 0
_XYZ = numpy.s_[..., 0:3]
_ST = numpy.s_[..., 3:5]
_XYZST = numpy.s_[..., :5]
_RGBA = numpy.s_[..., 20:24]
_RGB = numpy.s_[..., 20:23]
_A = numpy.s_[..., 23]
def makeVertexTemplates(xmin=0, ymin=0, zmin=0, xmax=1, ymax=1, zmax=1):
return numpy.array([
# FaceXIncreasing:
[[xmax, ymin, zmax, (zmin * 16), 16 - (ymin * 16), 0x0b],
[xmax, ymin, zmin, (zmax * 16), 16 - (ymin * 16), 0x0b],
[xmax, ymax, zmin, (zmax * 16), 16 - (ymax * 16), 0x0b],
[xmax, ymax, zmax, (zmin * 16), 16 - (ymax * 16), 0x0b],
],
# FaceXDecreasing:
[[xmin, ymin, zmin, (zmin * 16), 16 - (ymin * 16), 0x0b],
[xmin, ymin, zmax, (zmax * 16), 16 - (ymin * 16), 0x0b],
[xmin, ymax, zmax, (zmax * 16), 16 - (ymax * 16), 0x0b],
[xmin, ymax, zmin, (zmin * 16), 16 - (ymax * 16), 0x0b]],
# FaceYIncreasing:
[[xmin, ymax, zmin, xmin * 16, 16 - (zmax * 16), 0x11], # ne
[xmin, ymax, zmax, xmin * 16, 16 - (zmin * 16), 0x11], # nw
[xmax, ymax, zmax, xmax * 16, 16 - (zmin * 16), 0x11], # sw
[xmax, ymax, zmin, xmax * 16, 16 - (zmax * 16), 0x11]], # se
# FaceYDecreasing:
[[xmin, ymin, zmin, xmin * 16, 16 - (zmax * 16), 0x08],
[xmax, ymin, zmin, xmax * 16, 16 - (zmax * 16), 0x08],
[xmax, ymin, zmax, xmax * 16, 16 - (zmin * 16), 0x08],
[xmin, ymin, zmax, xmin * 16, 16 - (zmin * 16), 0x08]],
# FaceZIncreasing:
[[xmin, ymin, zmax, xmin * 16, 16 - (ymin * 16), 0x0d],
[xmax, ymin, zmax, xmax * 16, 16 - (ymin * 16), 0x0d],
[xmax, ymax, zmax, xmax * 16, 16 - (ymax * 16), 0x0d],
[xmin, ymax, zmax, xmin * 16, 16 - (ymax * 16), 0x0d]],
# FaceZDecreasing:
[[xmax, ymin, zmin, xmin * 16, 16 - (ymin * 16), 0x0d],
[xmin, ymin, zmin, xmax * 16, 16 - (ymin * 16), 0x0d],
[xmin, ymax, zmin, xmax * 16, 16 - (ymax * 16), 0x0d],
[xmax, ymax, zmin, xmin * 16, 16 - (ymax * 16), 0x0d],
],
])
elementByteLength = 24
def createPrecomputedVertices():
height = 16
precomputedVertices = [numpy.zeros(shape=(16, 16, height, 4, 6), # x,y,z,s,t,rg, ba
dtype='float32') for d in faceVertexTemplates]
xArray = numpy.arange(16)[:, numpy.newaxis, numpy.newaxis, numpy.newaxis]
zArray = numpy.arange(16)[numpy.newaxis, :, numpy.newaxis, numpy.newaxis]
yArray = numpy.arange(height)[numpy.newaxis, numpy.newaxis, :, numpy.newaxis]
for dir in range(len(faceVertexTemplates)):
precomputedVertices[dir][_XYZ][..., 0] = xArray
precomputedVertices[dir][_XYZ][..., 1] = yArray
precomputedVertices[dir][_XYZ][..., 2] = zArray
precomputedVertices[dir][_XYZ] += faceVertexTemplates[dir][..., 0:3] # xyz
precomputedVertices[dir][_ST] = faceVertexTemplates[dir][..., 3:5] # s
precomputedVertices[dir].view('uint8')[_RGB] = faceVertexTemplates[dir][..., 5, numpy.newaxis]
precomputedVertices[dir].view('uint8')[_A] = 0xff
return precomputedVertices
faceVertexTemplates = makeVertexTemplates()
class ChunkCalculator (object):
cachedTemplate = None
cachedTemplateHeight = 0
whiteLight = numpy.array([[[15] * 16] * 16] * 16, numpy.uint8)
precomputedVertices = createPrecomputedVertices()
def __init__(self, level):
self.makeRenderstates(level.materials)
# del xArray, zArray, yArray
self.nullVertices = numpy.zeros((0,) * len(self.precomputedVertices[0].shape), dtype=self.precomputedVertices[0].dtype)
from leveleditor import Settings
Settings.fastLeaves.addObserver(self)
Settings.roughGraphics.addObserver(self)
class renderstatePlain(object):
@classmethod
def bind(self):
pass
@classmethod
def release(self):
pass
class renderstateVines(object):
@classmethod
def bind(self):
GL.glDisable(GL.GL_CULL_FACE)
GL.glEnable(GL.GL_ALPHA_TEST)
@classmethod
def release(self):
GL.glEnable(GL.GL_CULL_FACE)
GL.glDisable(GL.GL_ALPHA_TEST)
class renderstateLowDetail(object):
@classmethod
def bind(self):
GL.glDisable(GL.GL_CULL_FACE)
GL.glDisable(GL.GL_TEXTURE_2D)
@classmethod
def release(self):
GL.glEnable(GL.GL_CULL_FACE)
GL.glEnable(GL.GL_TEXTURE_2D)
class renderstateAlphaTest(object):
@classmethod
def bind(self):
GL.glEnable(GL.GL_ALPHA_TEST)
@classmethod
def release(self):
GL.glDisable(GL.GL_ALPHA_TEST)
class _renderstateAlphaBlend(object):
@classmethod
def bind(self):
GL.glEnable(GL.GL_BLEND)
@classmethod
def release(self):
GL.glDisable(GL.GL_BLEND)
class renderstateWater(_renderstateAlphaBlend):
pass
class renderstateIce(_renderstateAlphaBlend):
pass
class renderstateEntity(object):
@classmethod
def bind(self):
GL.glDisable(GL.GL_DEPTH_TEST)
# GL.glDisable(GL.GL_CULL_FACE)
GL.glDisable(GL.GL_TEXTURE_2D)
GL.glEnable(GL.GL_BLEND)
@classmethod
def release(self):
GL.glEnable(GL.GL_DEPTH_TEST)
# GL.glEnable(GL.GL_CULL_FACE)
GL.glEnable(GL.GL_TEXTURE_2D)
GL.glDisable(GL.GL_BLEND)
renderstates = (
renderstatePlain,
renderstateVines,
renderstateLowDetail,
renderstateAlphaTest,
renderstateIce,
renderstateWater,
renderstateEntity,
)
def makeRenderstates(self, materials):
self.blockRendererClasses = [
GenericBlockRenderer,
LeafBlockRenderer,
PlantBlockRenderer,
TorchBlockRenderer,
WaterBlockRenderer,
SlabBlockRenderer,
]
if materials.name in ("Alpha", "Pocket"):
self.blockRendererClasses += [
RailBlockRenderer,
LadderBlockRenderer,
SnowBlockRenderer,
RedstoneBlockRenderer,
IceBlockRenderer,
FeatureBlockRenderer,
StairBlockRenderer,
VineBlockRenderer,
# button, floor plate, door -> 1-cube features
# lever, sign, wall sign, stairs -> 2-cube features
# repeater
# fence
# bed
# cake
# portal
]
self.materialMap = materialMap = numpy.zeros((pymclevel.materials.id_limit,), 'uint8')
materialMap[1:] = 1 # generic blocks
materialCount = 2
for br in self.blockRendererClasses[1:]: # skip generic blocks
materialMap[br.getBlocktypes(materials)] = materialCount
br.materialIndex = materialCount
materialCount += 1
self.exposedMaterialMap = numpy.array(materialMap)
self.addTransparentMaterials(self.exposedMaterialMap, materialCount)
def addTransparentMaterials(self, mats, materialCount):
transparentMaterials = [
pymclevel.materials.alphaMaterials.Glass,
pymclevel.materials.alphaMaterials.GlassPane,
pymclevel.materials.alphaMaterials.IronBars,
pymclevel.materials.alphaMaterials.MonsterSpawner,
pymclevel.materials.alphaMaterials.Vines,
pymclevel.materials.alphaMaterials.Fire,
pymclevel.materials.alphaMaterials.Trapdoor,
pymclevel.materials.alphaMaterials.Lever,
pymclevel.materials.alphaMaterials.BrewingStand,
]
for b in transparentMaterials:
mats[b.ID] = materialCount
materialCount += 1
hiddenOreMaterials = numpy.arange(pymclevel.materials.id_limit, dtype='uint8')
hiddenOreMaterials[2] = 1 # don't show boundaries between dirt,grass,sand,gravel,stone
hiddenOreMaterials[3] = 1
hiddenOreMaterials[12] = 1
hiddenOreMaterials[13] = 1
roughMaterials = numpy.ones((pymclevel.materials.id_limit,), dtype='uint8')
roughMaterials[0] = 0
addTransparentMaterials(None, roughMaterials, 2)
def calcFacesForChunkRenderer(self, cr):
if 0 == len(cr.invalidLayers):
# layers = set(br.layer for br in cr.blockRenderers)
# assert set() == cr.visibleLayers.difference(layers)
return
lod = cr.detailLevel
cx, cz = cr.chunkPosition
level = cr.renderer.level
try:
chunk = level.getChunk(cx, cz)
except Exception, e:
logging.warn(u"Error reading chunk: %s", e)
yield
return
yield
brs = []
classes = [
TileEntityRenderer,
MonsterRenderer,
ItemRenderer,
TileTicksRenderer,
TerrainPopulatedRenderer,
LowDetailBlockRenderer,
OverheadBlockRenderer,
]
existingBlockRenderers = dict(((type(b), b) for b in cr.blockRenderers))
for blockRendererClass in classes:
if cr.detailLevel not in blockRendererClass.detailLevels:
continue
if blockRendererClass.layer not in cr.visibleLayers:
continue
if blockRendererClass.layer not in cr.invalidLayers:
if blockRendererClass in existingBlockRenderers:
brs.append(existingBlockRenderers[blockRendererClass])
continue
br = blockRendererClass(self)
br.detailLevel = cr.detailLevel
for _ in br.makeChunkVertices(chunk):
yield
brs.append(br)
blockRenderers = []
# Recalculate high detail blocks if needed, otherwise retain the high detail renderers
if lod == 0 and Layer.Blocks in cr.invalidLayers:
for _ in self.calcHighDetailFaces(cr, blockRenderers):
yield
else:
blockRenderers.extend(br for br in cr.blockRenderers if type(br) not in classes)
# Add the layer renderers
blockRenderers.extend(brs)
cr.blockRenderers = blockRenderers
cr.vertexArraysDone()
raise StopIteration
def getNeighboringChunks(self, chunk):
cx, cz = chunk.chunkPosition
level = chunk.world
neighboringChunks = {}
for dir, dx, dz in ((pymclevel.faces.FaceXDecreasing, -1, 0),
(pymclevel.faces.FaceXIncreasing, 1, 0),
(pymclevel.faces.FaceZDecreasing, 0, -1),
(pymclevel.faces.FaceZIncreasing, 0, 1)):
if not level.containsChunk(cx + dx, cz + dz):
neighboringChunks[dir] = pymclevel.infiniteworld.ZeroChunk(level.Height)
else:
# if not level.chunkIsLoaded(cx+dx,cz+dz):
# raise StopIteration
try:
neighboringChunks[dir] = level.getChunk(cx + dx, cz + dz)
except (EnvironmentError, pymclevel.mclevelbase.ChunkNotPresent, pymclevel.mclevelbase.ChunkMalformed):
neighboringChunks[dir] = pymclevel.infiniteworld.ZeroChunk(level.Height)
return neighboringChunks
def getAreaBlocks(self, chunk, neighboringChunks):
chunkWidth, chunkLength, chunkHeight = chunk.Blocks.shape
areaBlocks = numpy.zeros((chunkWidth + 2, chunkLength + 2, chunkHeight + 2), numpy.uint16)
areaBlocks[1:-1, 1:-1, 1:-1] = chunk.Blocks
areaBlocks[:1, 1:-1, 1:-1] = neighboringChunks[pymclevel.faces.FaceXDecreasing].Blocks[-1:, :chunkLength, :chunkHeight]
areaBlocks[-1:, 1:-1, 1:-1] = neighboringChunks[pymclevel.faces.FaceXIncreasing].Blocks[:1, :chunkLength, :chunkHeight]
areaBlocks[1:-1, :1, 1:-1] = neighboringChunks[pymclevel.faces.FaceZDecreasing].Blocks[:chunkWidth, -1:, :chunkHeight]
areaBlocks[1:-1, -1:, 1:-1] = neighboringChunks[pymclevel.faces.FaceZIncreasing].Blocks[:chunkWidth, :1, :chunkHeight]
return areaBlocks
def getFacingBlockIndices(self, areaBlocks, areaBlockMats):
facingBlockIndices = [None] * 6
exposedFacesX = (areaBlockMats[:-1, 1:-1, 1:-1] != areaBlockMats[1:, 1:-1, 1:-1])
facingBlockIndices[pymclevel.faces.FaceXDecreasing] = exposedFacesX[:-1]
facingBlockIndices[pymclevel.faces.FaceXIncreasing] = exposedFacesX[1:]
exposedFacesZ = (areaBlockMats[1:-1, :-1, 1:-1] != areaBlockMats[1:-1, 1:, 1:-1])
facingBlockIndices[pymclevel.faces.FaceZDecreasing] = exposedFacesZ[:, :-1]
facingBlockIndices[pymclevel.faces.FaceZIncreasing] = exposedFacesZ[:, 1:]
exposedFacesY = (areaBlockMats[1:-1, 1:-1, :-1] != areaBlockMats[1:-1, 1:-1, 1:])
facingBlockIndices[pymclevel.faces.FaceYDecreasing] = exposedFacesY[:, :, :-1]
facingBlockIndices[pymclevel.faces.FaceYIncreasing] = exposedFacesY[:, :, 1:]
return facingBlockIndices
def getAreaBlockLights(self, chunk, neighboringChunks):
chunkWidth, chunkLength, chunkHeight = chunk.Blocks.shape
lights = chunk.BlockLight
skyLight = chunk.SkyLight
finalLight = self.whiteLight
if lights != None:
finalLight = lights
if skyLight != None:
finalLight = numpy.maximum(skyLight, lights)
areaBlockLights = numpy.ones((chunkWidth + 2, chunkLength + 2, chunkHeight + 2), numpy.uint8)
areaBlockLights[:] = 15
areaBlockLights[1:-1, 1:-1, 1:-1] = finalLight
nc = neighboringChunks[pymclevel.faces.FaceXDecreasing]
numpy.maximum(nc.SkyLight[-1:, :chunkLength, :chunkHeight],
nc.BlockLight[-1:, :chunkLength, :chunkHeight],
areaBlockLights[0:1, 1:-1, 1:-1])
nc = neighboringChunks[pymclevel.faces.FaceXIncreasing]
numpy.maximum(nc.SkyLight[:1, :chunkLength, :chunkHeight],
nc.BlockLight[:1, :chunkLength, :chunkHeight],
areaBlockLights[-1:, 1:-1, 1:-1])
nc = neighboringChunks[pymclevel.faces.FaceZDecreasing]
numpy.maximum(nc.SkyLight[:chunkWidth, -1:, :chunkHeight],
nc.BlockLight[:chunkWidth, -1:, :chunkHeight],
areaBlockLights[1:-1, 0:1, 1:-1])
nc = neighboringChunks[pymclevel.faces.FaceZIncreasing]
numpy.maximum(nc.SkyLight[:chunkWidth, :1, :chunkHeight],
nc.BlockLight[:chunkWidth, :1, :chunkHeight],
areaBlockLights[1:-1, -1:, 1:-1])
minimumLight = 4
# areaBlockLights[areaBlockLights<minimumLight]=minimumLight
numpy.clip(areaBlockLights, minimumLight, 16, areaBlockLights)
return areaBlockLights
def calcHighDetailFaces(self, cr, blockRenderers): # ForChunk(self, chunkPosition = (0,0), level = None, alpha = 1.0):
""" calculate the geometry for a chunk renderer from its blockMats, data,
and lighting array. fills in the cr's blockRenderers with verts
for each block facing and material"""
# chunkBlocks and chunkLights shall be indexed [x,z,y] to follow infdev's convention
cx, cz = cr.chunkPosition
level = cr.renderer.level
chunk = level.getChunk(cx, cz)
neighboringChunks = self.getNeighboringChunks(chunk)
areaBlocks = self.getAreaBlocks(chunk, neighboringChunks)
yield
areaBlockLights = self.getAreaBlockLights(chunk, neighboringChunks)
yield
slabs = areaBlocks == pymclevel.materials.alphaMaterials.StoneSlab.ID
if slabs.any():
areaBlockLights[slabs] = areaBlockLights[:, :, 1:][slabs[:, :, :-1]]
yield
showHiddenOres = cr.renderer.showHiddenOres
if showHiddenOres:
facingMats = self.hiddenOreMaterials[areaBlocks]
else:
facingMats = self.exposedMaterialMap[areaBlocks]
yield
if self.roughGraphics:
areaBlockMats = self.roughMaterials[areaBlocks]
else:
areaBlockMats = self.materialMap[areaBlocks]
facingBlockIndices = self.getFacingBlockIndices(areaBlocks, facingMats)
yield
for i in self.computeGeometry(chunk, areaBlockMats, facingBlockIndices, areaBlockLights, cr, blockRenderers):
yield
def computeGeometry(self, chunk, areaBlockMats, facingBlockIndices, areaBlockLights, chunkRenderer, blockRenderers):
blocks, blockData = chunk.Blocks, chunk.Data
blockData = blockData & 0xf
blockMaterials = areaBlockMats[1:-1, 1:-1, 1:-1]
if self.roughGraphics:
blockMaterials.clip(0, 1, blockMaterials)
sx = sz = slice(0, 16)
asx = asz = slice(0, 18)
for y in range(0, chunk.world.Height, 16):
sy = slice(y, y + 16)
asy = slice(y, y + 18)
for _i in self.computeCubeGeometry(
y,
blockRenderers,
blocks[sx, sz, sy],
blockData[sx, sz, sy],
chunk.materials,
blockMaterials[sx, sz, sy],
[f[sx, sz, sy] for f in facingBlockIndices],
areaBlockLights[asx, asz, asy],
chunkRenderer):
yield
def computeCubeGeometry(self, y, blockRenderers, blocks, blockData, materials, blockMaterials, facingBlockIndices, areaBlockLights, chunkRenderer):
materialCounts = numpy.bincount(blockMaterials.ravel())
def texMap(blocks, blockData=0, direction=slice(None)):
return materials.blockTextures[blocks, blockData, direction] # xxx slow
for blockRendererClass in self.blockRendererClasses:
mi = blockRendererClass.materialIndex
if mi >= len(materialCounts) or materialCounts[mi] == 0:
continue
blockRenderer = blockRendererClass(self)
blockRenderer.y = y
blockRenderer.materials = materials
for _ in blockRenderer.makeVertices(facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
yield
blockRenderers.append(blockRenderer)
yield
def makeTemplate(self, direction, blockIndices):
return self.precomputedVertices[direction][blockIndices]
class Layer:
Blocks = "Blocks"
Entities = "Entities"
Monsters = "Monsters"
Items = "Items"
TileEntities = "TileEntities"
TileTicks = "TileTicks"
TerrainPopulated = "TerrainPopulated"
AllLayers = (Blocks, Entities, Monsters, Items, TileEntities, TileTicks, TerrainPopulated)
class BlockRenderer(object):
# vertexArrays = None
detailLevels = (0,)
layer = Layer.Blocks
directionOffsets = {
pymclevel.faces.FaceXDecreasing: numpy.s_[:-2, 1:-1, 1:-1],
pymclevel.faces.FaceXIncreasing: numpy.s_[2:, 1:-1, 1:-1],
pymclevel.faces.FaceYDecreasing: numpy.s_[1:-1, 1:-1, :-2],
pymclevel.faces.FaceYIncreasing: numpy.s_[1:-1, 1:-1, 2:],
pymclevel.faces.FaceZDecreasing: numpy.s_[1:-1, :-2, 1:-1],
pymclevel.faces.FaceZIncreasing: numpy.s_[1:-1, 2:, 1:-1],
}
renderstate = ChunkCalculator.renderstateAlphaTest
def __init__(self, cc):
self.makeTemplate = cc.makeTemplate
self.chunkCalculator = cc
self.vertexArrays = []
pass
@classmethod
def getBlocktypes(cls, mats):
return cls.blocktypes
def setAlpha(self, alpha):
"alpha is an unsigned byte value"
for a in self.vertexArrays:
a.view('uint8')[_RGBA][..., 3] = alpha
def bufferSize(self):
return sum(a.size for a in self.vertexArrays) * 4
def getMaterialIndices(self, blockMaterials):
return blockMaterials == self.materialIndex
def makeVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
arrays = []
materialIndices = self.getMaterialIndices(blockMaterials)
yield
blockLight = areaBlockLights[1:-1, 1:-1, 1:-1]
for (direction, exposedFaceIndices) in enumerate(facingBlockIndices):
facingBlockLight = areaBlockLights[self.directionOffsets[direction]]
vertexArray = self.makeFaceVertices(direction, materialIndices, exposedFaceIndices, blocks, blockData, blockLight, facingBlockLight, texMap)
yield
if len(vertexArray):
arrays.append(vertexArray)
self.vertexArrays = arrays
def makeArrayList(self, chunkPosition, showRedraw):
l = gl.glGenLists(1)
GL.glNewList(l, GL.GL_COMPILE)
self.drawArrays(chunkPosition, showRedraw)
GL.glEndList()
return l
def drawArrays(self, chunkPosition, showRedraw):
cx, cz = chunkPosition
y = 0
if hasattr(self, 'y'):
y = self.y
with gl.glPushMatrix(GL.GL_MODELVIEW):
GL.glTranslate(cx << 4, y, cz << 4)
if showRedraw:
GL.glColor(1.0, 0.25, 0.25, 1.0)
self.drawVertices()
def drawVertices(self):
if self.vertexArrays:
for buf in self.vertexArrays:
self.drawFaceVertices(buf)
def drawFaceVertices(self, buf):
if 0 == len(buf):
return
stride = elementByteLength
GL.glVertexPointer(3, GL.GL_FLOAT, stride, (buf.ravel()))
GL.glTexCoordPointer(2, GL.GL_FLOAT, stride, (buf.ravel()[3:]))
GL.glColorPointer(4, GL.GL_UNSIGNED_BYTE, stride, (buf.view(dtype=numpy.uint8).ravel()[20:]))
GL.glDrawArrays(GL.GL_QUADS, 0, len(buf) * 4)
class EntityRendererGeneric(BlockRenderer):
renderstate = ChunkCalculator.renderstateEntity
detailLevels = (0, 1, 2)
def drawFaceVertices(self, buf):
if 0 == len(buf):
return
stride = elementByteLength
GL.glVertexPointer(3, GL.GL_FLOAT, stride, (buf.ravel()))
GL.glTexCoordPointer(2, GL.GL_FLOAT, stride, (buf.ravel()[3:]))
GL.glColorPointer(4, GL.GL_UNSIGNED_BYTE, stride, (buf.view(dtype=numpy.uint8).ravel()[20:]))
GL.glDepthMask(False)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)
GL.glLineWidth(2.0)
GL.glDrawArrays(GL.GL_QUADS, 0, len(buf) * 4)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
GL.glPolygonOffset(DepthOffset.TerrainWire, DepthOffset.TerrainWire)
with gl.glEnable(GL.GL_POLYGON_OFFSET_FILL, GL.GL_DEPTH_TEST):
GL.glDrawArrays(GL.GL_QUADS, 0, len(buf) * 4)
GL.glDepthMask(True)
def _computeVertices(self, positions, colors, offset=False, chunkPosition=(0, 0)):
cx, cz = chunkPosition
x = cx << 4
z = cz << 4
vertexArray = numpy.zeros(shape=(len(positions), 6, 4, 6), dtype='float32')
if len(positions):
positions = numpy.array(positions)
positions[:, (0, 2)] -= (x, z)
if offset:
positions -= 0.5
vertexArray.view('uint8')[_RGBA] = colors
vertexArray[_XYZ] = positions[:, numpy.newaxis, numpy.newaxis, :]
vertexArray[_XYZ] += faceVertexTemplates[_XYZ]
vertexArray.shape = (len(positions) * 6, 4, 6)
return vertexArray
class TileEntityRenderer(EntityRendererGeneric):
layer = Layer.TileEntities
def makeChunkVertices(self, chunk):
tilePositions = []
for i, ent in enumerate(chunk.TileEntities):
if i % 10 == 0:
yield
if not 'x' in ent:
continue
tilePositions.append(pymclevel.TileEntity.pos(ent))
tiles = self._computeVertices(tilePositions, (0xff, 0xff, 0x33, 0x44), chunkPosition=chunk.chunkPosition)
yield
self.vertexArrays = [tiles]
class BaseEntityRenderer(EntityRendererGeneric):
pass
class MonsterRenderer(BaseEntityRenderer):
layer = Layer.Entities # xxx Monsters
notMonsters = set(["Item", "XPOrb", "Painting"])
def makeChunkVertices(self, chunk):
monsterPositions = []
for i, ent in enumerate(chunk.Entities):
if i % 10 == 0:
yield
id = ent["id"].value
if id in self.notMonsters:
continue
monsterPositions.append(pymclevel.Entity.pos(ent))
monsters = self._computeVertices(monsterPositions,
(0xff, 0x22, 0x22, 0x44),
offset=True,
chunkPosition=chunk.chunkPosition)
yield
self.vertexArrays = [monsters]
class EntityRenderer(BaseEntityRenderer):
def makeChunkVertices(self, chunk):
yield
# entityPositions = []
# for i, ent in enumerate(chunk.Entities):
# if i % 10 == 0:
# yield
# entityPositions.append(pymclevel.Entity.pos(ent))
#
# entities = self._computeVertices(entityPositions, (0x88, 0x00, 0x00, 0x66), offset=True, chunkPosition=chunk.chunkPosition)
# yield
# self.vertexArrays = [entities]
class ItemRenderer(BaseEntityRenderer):
layer = Layer.Items
def makeChunkVertices(self, chunk):
entityPositions = []
entityColors = []
colorMap = {
"Item": (0x22, 0xff, 0x22, 0x5f),
"XPOrb": (0x88, 0xff, 0x88, 0x5f),
"Painting": (134, 96, 67, 0x5f),
}
for i, ent in enumerate(chunk.Entities):
if i % 10 == 0:
yield
color = colorMap.get(ent["id"].value)
if color is None:
continue
entityPositions.append(pymclevel.Entity.pos(ent))
entityColors.append(color)
entities = self._computeVertices(entityPositions, numpy.array(entityColors, dtype='uint8')[:, numpy.newaxis, numpy.newaxis], offset=True, chunkPosition=chunk.chunkPosition)
yield
self.vertexArrays = [entities]
class TileTicksRenderer(EntityRendererGeneric):
layer = Layer.TileTicks
def makeChunkVertices(self, chunk):
if chunk.root_tag and "Level" in chunk.root_tag and "TileTicks" in chunk.root_tag["Level"]:
ticks = chunk.root_tag["Level"]["TileTicks"]
if len(ticks):
self.vertexArrays.append(self._computeVertices([[t[i].value for i in "xyz"] for t in ticks],
(0xff, 0xff, 0xff, 0x44),
chunkPosition=chunk.chunkPosition))
yield
class TerrainPopulatedRenderer(EntityRendererGeneric):
layer = Layer.TerrainPopulated
vertexTemplate = numpy.zeros((6, 4, 6), 'float32')
vertexTemplate[_XYZ] = faceVertexTemplates[_XYZ]
vertexTemplate[_XYZ] *= (16, 128, 16)
color = (255, 200, 155)
vertexTemplate.view('uint8')[_RGBA] = color + (72,)
def drawFaceVertices(self, buf):
if 0 == len(buf):
return
stride = elementByteLength
GL.glVertexPointer(3, GL.GL_FLOAT, stride, (buf.ravel()))
GL.glTexCoordPointer(2, GL.GL_FLOAT, stride, (buf.ravel()[3:]))
GL.glColorPointer(4, GL.GL_UNSIGNED_BYTE, stride, (buf.view(dtype=numpy.uint8).ravel()[20:]))
GL.glDepthMask(False)
# GL.glDrawArrays(GL.GL_QUADS, 0, len(buf) * 4)
GL.glDisable(GL.GL_CULL_FACE)
with gl.glEnable(GL.GL_DEPTH_TEST):
GL.glDrawArrays(GL.GL_QUADS, 0, len(buf) * 4)
GL.glEnable(GL.GL_CULL_FACE)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)
GL.glLineWidth(1.0)
GL.glDrawArrays(GL.GL_QUADS, 0, len(buf) * 4)
GL.glLineWidth(2.0)
with gl.glEnable(GL.GL_DEPTH_TEST):
GL.glDrawArrays(GL.GL_QUADS, 0, len(buf) * 4)
GL.glLineWidth(1.0)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
GL.glDepthMask(True)
# GL.glPolygonOffset(DepthOffset.TerrainWire, DepthOffset.TerrainWire)
# with gl.glEnable(GL.GL_POLYGON_OFFSET_FILL, GL.GL_DEPTH_TEST):
# GL.glDrawArrays(GL.GL_QUADS, 0, len(buf) * 4)
#
def makeChunkVertices(self, chunk):
neighbors = self.chunkCalculator.getNeighboringChunks(chunk)
def getpop(ch):
return getattr(ch, "TerrainPopulated", True)
pop = getpop(chunk)
yield
if pop:
return
visibleFaces = [
getpop(neighbors[pymclevel.faces.FaceXIncreasing]),
getpop(neighbors[pymclevel.faces.FaceXDecreasing]),
True,
True,
getpop(neighbors[pymclevel.faces.FaceZIncreasing]),
getpop(neighbors[pymclevel.faces.FaceZDecreasing]),
]
visibleFaces = numpy.array(visibleFaces, dtype='bool')
verts = self.vertexTemplate[visibleFaces]
self.vertexArrays.append(verts)
yield
class LowDetailBlockRenderer(BlockRenderer):
renderstate = ChunkCalculator.renderstateLowDetail
detailLevels = (1,)
def drawFaceVertices(self, buf):
if not len(buf):
return
stride = 16
GL.glVertexPointer(3, GL.GL_FLOAT, stride, numpy.ravel(buf.ravel()))
GL.glColorPointer(4, GL.GL_UNSIGNED_BYTE, stride, (buf.view(dtype='uint8').ravel()[12:]))
GL.glDisableClientState(GL.GL_TEXTURE_COORD_ARRAY)
GL.glDrawArrays(GL.GL_QUADS, 0, len(buf) * 4)
GL.glEnableClientState(GL.GL_TEXTURE_COORD_ARRAY)
def setAlpha(self, alpha):
for va in self.vertexArrays:
va.view('uint8')[..., -1] = alpha
def makeChunkVertices(self, ch):
step = 1
level = ch.world
vertexArrays = []
blocks = ch.Blocks
heightMap = ch.HeightMap
heightMap = heightMap[::step, ::step]
blocks = blocks[::step, ::step]
if 0 in blocks.shape:
return
chunkWidth, chunkLength, chunkHeight = blocks.shape
blockIndices = numpy.zeros((chunkWidth, chunkLength, chunkHeight), bool)
gridaxes = list(numpy.indices((chunkWidth, chunkLength)))
h = numpy.swapaxes(heightMap - 1, 0, 1)[:chunkWidth, :chunkLength]
numpy.clip(h, 0, chunkHeight - 1, out=h)
gridaxes = [gridaxes[0], gridaxes[1], h]
depths = numpy.zeros((chunkWidth, chunkLength), dtype='uint16')
depths[1:-1, 1:-1] = reduce(numpy.minimum, (h[1:-1, :-2], h[1:-1, 2:], h[:-2, 1:-1]), h[2:, 1:-1])
yield
try:
topBlocks = blocks[gridaxes]
nonAirBlocks = (topBlocks != 0)
blockIndices[gridaxes] = nonAirBlocks
h += 1
numpy.clip(h, 0, chunkHeight - 1, out=h)
overblocks = blocks[gridaxes][nonAirBlocks].ravel()
except ValueError, e:
raise ValueError(str(e.args) + "Chunk shape: {0}".format(blockIndices.shape), sys.exc_info()[-1])
if nonAirBlocks.any():
blockTypes = blocks[blockIndices]
flatcolors = level.materials.flatColors[blockTypes, ch.Data[blockIndices] & 0xf][:, numpy.newaxis, :]
# flatcolors[:,:,:3] *= (0.6 + (h * (0.4 / float(chunkHeight-1)))) [topBlocks != 0][:, numpy.newaxis, numpy.newaxis]
x, z, y = blockIndices.nonzero()
yield
vertexArray = numpy.zeros((len(x), 4, 4), dtype='float32')
vertexArray[_XYZ][..., 0] = x[:, numpy.newaxis]
vertexArray[_XYZ][..., 1] = y[:, numpy.newaxis]
vertexArray[_XYZ][..., 2] = z[:, numpy.newaxis]
va0 = numpy.array(vertexArray)
va0[..., :3] += faceVertexTemplates[pymclevel.faces.FaceYIncreasing, ..., :3]
overmask = overblocks > 0
flatcolors[overmask] = level.materials.flatColors[:, 0][overblocks[overmask]][:, numpy.newaxis]
if self.detailLevel == 2:
heightfactor = (y / float(2.0 * ch.world.Height)) + 0.5
flatcolors[..., :3] *= heightfactor[:, numpy.newaxis, numpy.newaxis]
_RGBA = numpy.s_[..., 12:16]
va0.view('uint8')[_RGBA] = flatcolors
va0[_XYZ][:, :, 0] *= step
va0[_XYZ][:, :, 2] *= step
yield
if self.detailLevel == 2:
self.vertexArrays = [va0]
return
va1 = numpy.array(vertexArray)
va1[..., :3] += faceVertexTemplates[pymclevel.faces.FaceXIncreasing, ..., :3]
va1[_XYZ][:, (0, 1), 1] = depths[nonAirBlocks].ravel()[:, numpy.newaxis] # stretch to floor
va1[_XYZ][:, (1, 2), 0] -= 1.0 # turn diagonally
va1[_XYZ][:, (2, 3), 1] -= 0.5 # drop down to prevent intersection pixels
va1[_XYZ][:, :, 0] *= step
va1[_XYZ][:, :, 2] *= step
flatcolors *= 0.8
va1.view('uint8')[_RGBA] = flatcolors
grassmask = topBlocks[nonAirBlocks] == 2
# color grass sides with dirt's color
va1.view('uint8')[_RGBA][grassmask] = level.materials.flatColors[:, 0][[3]][:, numpy.newaxis]
va2 = numpy.array(va1)
va2[_XYZ][:, (1, 2), 0] += step
va2[_XYZ][:, (0, 3), 0] -= step
vertexArrays = [va1, va2, va0]
self.vertexArrays = vertexArrays
class OverheadBlockRenderer(LowDetailBlockRenderer):
detailLevels = (2,)
class GenericBlockRenderer(BlockRenderer):
renderstate = ChunkCalculator.renderstateAlphaTest
materialIndex = 1
def makeGenericVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
vertexArrays = []
materialIndices = self.getMaterialIndices(blockMaterials)
yield
for (direction, exposedFaceIndices) in enumerate(facingBlockIndices):
facingBlockLight = areaBlockLights[self.directionOffsets[direction]]
blockIndices = materialIndices & exposedFaceIndices
theseBlocks = blocks[blockIndices]
bdata = blockData[blockIndices]
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
continue
def setTexture():
vertexArray[_ST] += texMap(theseBlocks, bdata, direction)[:, numpy.newaxis, 0:2]
setTexture()
def setGrassColors():
grass = theseBlocks == pymclevel.materials.alphaMaterials.Grass.ID
vertexArray.view('uint8')[_RGB][grass] *= self.grassColor
def getBlockLight():
return facingBlockLight[blockIndices]
def setColors():
vertexArray.view('uint8')[_RGB] *= getBlockLight()[..., numpy.newaxis, numpy.newaxis]
if self.materials.name in ("Alpha", "Pocket"):
if direction == pymclevel.faces.FaceYIncreasing:
setGrassColors()
# leaves = theseBlocks == pymclevel.materials.alphaMaterials.Leaves.ID
# vertexArray.view('uint8')[_RGBA][leaves] *= [0.15, 0.88, 0.15, 1.0]
# snow = theseBlocks == pymclevel.materials.alphaMaterials.SnowLayer.ID
# if direction == pymclevel.faces.FaceYIncreasing:
# vertexArray[_XYZ][snow, ...,1] -= 0.875
#
# if direction != pymclevel.faces.FaceYIncreasing and direction != pymclevel.faces.FaceYDecreasing:
# vertexArray[_XYZ][snow, ...,2:4,1] -= 0.875
# vertexArray[_ST][snow, ...,2:4,1] += 14
#
setColors()
yield
vertexArrays.append(vertexArray)
self.vertexArrays = vertexArrays
grassColor = grassColorDefault = [0.39, 0.77, 0.23] # 62C743
makeVertices = makeGenericVertices
class LeafBlockRenderer(BlockRenderer):
blocktypes = [18]
@property
def renderstate(self):
if self.chunkCalculator.fastLeaves:
return ChunkCalculator.renderstatePlain
else:
return ChunkCalculator.renderstateAlphaTest
def makeLeafVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
arrays = []
materialIndices = self.getMaterialIndices(blockMaterials)
yield
if self.materials.name in ("Alpha", "Pocket"):
if not self.chunkCalculator.fastLeaves:
blockIndices = materialIndices
data = blockData[blockIndices]
data &= 0x3 # ignore decay states
leaves = (data == 0) | (data == 3)
pines = (data == pymclevel.materials.alphaMaterials.PineLeaves.blockData)
birches = (data == pymclevel.materials.alphaMaterials.BirchLeaves.blockData)
texes = texMap(18, data, 0)
else:
blockIndices = materialIndices
texes = texMap(18, [0], 0)
for (direction, exposedFaceIndices) in enumerate(facingBlockIndices):
if self.materials.name in ("Alpha", "Pocket"):
if self.chunkCalculator.fastLeaves:
blockIndices = materialIndices & exposedFaceIndices
data = blockData[blockIndices]
data &= 0x3 # ignore decay states
leaves = (data == 0)
pines = (data == pymclevel.materials.alphaMaterials.PineLeaves.blockData)
birches = (data == pymclevel.materials.alphaMaterials.BirchLeaves.blockData)
type3 = (data == 3)
leaves |= type3
texes = texMap(18, data, 0)
facingBlockLight = areaBlockLights[self.directionOffsets[direction]]
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
continue
vertexArray[_ST] += texes[:, numpy.newaxis]
if not self.chunkCalculator.fastLeaves:
vertexArray[_ST] -= (0x10, 0x0)
vertexArray.view('uint8')[_RGB] *= facingBlockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
if self.materials.name in ("Alpha", "Pocket"):
vertexArray.view('uint8')[_RGB][leaves] *= self.leafColor
vertexArray.view('uint8')[_RGB][pines] *= self.pineLeafColor
vertexArray.view('uint8')[_RGB][birches] *= self.birchLeafColor
yield
arrays.append(vertexArray)
self.vertexArrays = arrays
leafColor = leafColorDefault = [0x48 / 255., 0xb5 / 255., 0x18 / 255.] # 48b518
pineLeafColor = pineLeafColorDefault = [0x61 / 255., 0x99 / 255., 0x61 / 255.] # 0x619961
birchLeafColor = birchLeafColorDefault = [0x80 / 255., 0xa7 / 255., 0x55 / 255.] # 0x80a755
makeVertices = makeLeafVertices
class PlantBlockRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
# blocktypes = [6, 37, 38, 39, 40, 59, 83]
# if mats.name != "Classic": blocktypes += [31, 32] # shrubs, tall grass
# if mats.name == "Alpha": blocktypes += [115] # nether wart
blocktypes = [b.ID for b in mats if b.type in ("DECORATION_CROSS", "NETHER_WART", "CROPS", "STEM")]
return blocktypes
renderstate = ChunkCalculator.renderstateAlphaTest
def makePlantVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
arrays = []
blockIndices = self.getMaterialIndices(blockMaterials)
yield
theseBlocks = blocks[blockIndices]
bdata = blockData[blockIndices]
bdata[theseBlocks == 6] &= 0x3 # xxx saplings only
texes = texMap(blocks[blockIndices], bdata, 0)
blockLight = areaBlockLights[1:-1, 1:-1, 1:-1]
lights = blockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
colorize = None
if self.materials.name == "Alpha":
colorize = (theseBlocks == pymclevel.materials.alphaMaterials.TallGrass.ID) & (bdata != 0)
for direction in (pymclevel.faces.FaceXIncreasing, pymclevel.faces.FaceXDecreasing, pymclevel.faces.FaceZIncreasing, pymclevel.faces.FaceZDecreasing):
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
return
if direction == pymclevel.faces.FaceXIncreasing:
vertexArray[_XYZ][..., 1:3, 0] -= 1
if direction == pymclevel.faces.FaceXDecreasing:
vertexArray[_XYZ][..., 1:3, 0] += 1
if direction == pymclevel.faces.FaceZIncreasing:
vertexArray[_XYZ][..., 1:3, 2] -= 1
if direction == pymclevel.faces.FaceZDecreasing:
vertexArray[_XYZ][..., 1:3, 2] += 1
vertexArray[_ST] += texes[:, numpy.newaxis, 0:2]
vertexArray.view('uint8')[_RGB] = 0xf # ignore precomputed directional light
vertexArray.view('uint8')[_RGB] *= lights
if colorize is not None:
vertexArray.view('uint8')[_RGB][colorize] *= LeafBlockRenderer.leafColor
arrays.append(vertexArray)
yield
self.vertexArrays = arrays
makeVertices = makePlantVertices
class TorchBlockRenderer(BlockRenderer):
blocktypes = [50, 75, 76]
renderstate = ChunkCalculator.renderstateAlphaTest
torchOffsetsStraight = [
[ # FaceXIncreasing
(-7 / 16., 0, 0),
(-7 / 16., 0, 0),
(-7 / 16., 0, 0),
(-7 / 16., 0, 0),
],
[ # FaceXDecreasing
(7 / 16., 0, 0),
(7 / 16., 0, 0),
(7 / 16., 0, 0),
(7 / 16., 0, 0),
],
[ # FaceYIncreasing
(7 / 16., -6 / 16., 7 / 16.),
(7 / 16., -6 / 16., -7 / 16.),
(-7 / 16., -6 / 16., -7 / 16.),
(-7 / 16., -6 / 16., 7 / 16.),
],
[ # FaceYDecreasing
(7 / 16., 0., 7 / 16.),
(-7 / 16., 0., 7 / 16.),
(-7 / 16., 0., -7 / 16.),
(7 / 16., 0., -7 / 16.),
],
[ # FaceZIncreasing
(0, 0, -7 / 16.),
(0, 0, -7 / 16.),
(0, 0, -7 / 16.),
(0, 0, -7 / 16.)
],
[ # FaceZDecreasing
(0, 0, 7 / 16.),
(0, 0, 7 / 16.),
(0, 0, 7 / 16.),
(0, 0, 7 / 16.)
],
]
torchOffsetsSouth = [
[ # FaceXIncreasing
(-7 / 16., 3 / 16., 0),
(-7 / 16., 3 / 16., 0),
(-7 / 16., 3 / 16., 0),
(-7 / 16., 3 / 16., 0),
],
[ # FaceXDecreasing
(7 / 16., 3 / 16., 0),
(7 / 16., 3 / 16., 0),
(7 / 16., 3 / 16., 0),
(7 / 16., 3 / 16., 0),
],
[ # FaceYIncreasing
(7 / 16., -3 / 16., 7 / 16.),
(7 / 16., -3 / 16., -7 / 16.),
(-7 / 16., -3 / 16., -7 / 16.),
(-7 / 16., -3 / 16., 7 / 16.),
],
[ # FaceYDecreasing
(7 / 16., 3 / 16., 7 / 16.),
(-7 / 16., 3 / 16., 7 / 16.),
(-7 / 16., 3 / 16., -7 / 16.),
(7 / 16., 3 / 16., -7 / 16.),
],
[ # FaceZIncreasing
(0, 3 / 16., -7 / 16.),
(0, 3 / 16., -7 / 16.),
(0, 3 / 16., -7 / 16.),
(0, 3 / 16., -7 / 16.)
],
[ # FaceZDecreasing
(0, 3 / 16., 7 / 16.),
(0, 3 / 16., 7 / 16.),
(0, 3 / 16., 7 / 16.),
(0, 3 / 16., 7 / 16.),
],
]
torchOffsetsNorth = torchOffsetsWest = torchOffsetsEast = torchOffsetsSouth
torchOffsets = [
torchOffsetsStraight,
torchOffsetsSouth,
torchOffsetsNorth,
torchOffsetsWest,
torchOffsetsEast,
torchOffsetsStraight,
] + [torchOffsetsStraight] * 10
torchOffsets = numpy.array(torchOffsets, dtype='float32')
torchOffsets[1][..., 3, :, 0] -= 0.5
torchOffsets[1][..., 0:2, 0:2, 0] -= 0.5
torchOffsets[1][..., 4:6, 0:2, 0] -= 0.5
torchOffsets[1][..., 0:2, 2:4, 0] -= 0.1
torchOffsets[1][..., 4:6, 2:4, 0] -= 0.1
torchOffsets[1][..., 2, :, 0] -= 0.25
torchOffsets[2][..., 3, :, 0] += 0.5
torchOffsets[2][..., 0:2, 0:2, 0] += 0.5
torchOffsets[2][..., 4:6, 0:2, 0] += 0.5
torchOffsets[2][..., 0:2, 2:4, 0] += 0.1
torchOffsets[2][..., 4:6, 2:4, 0] += 0.1
torchOffsets[2][..., 2, :, 0] += 0.25
torchOffsets[3][..., 3, :, 2] -= 0.5
torchOffsets[3][..., 0:2, 0:2, 2] -= 0.5
torchOffsets[3][..., 4:6, 0:2, 2] -= 0.5
torchOffsets[3][..., 0:2, 2:4, 2] -= 0.1
torchOffsets[3][..., 4:6, 2:4, 2] -= 0.1
torchOffsets[3][..., 2, :, 2] -= 0.25
torchOffsets[4][..., 3, :, 2] += 0.5
torchOffsets[4][..., 0:2, 0:2, 2] += 0.5
torchOffsets[4][..., 4:6, 0:2, 2] += 0.5
torchOffsets[4][..., 0:2, 2:4, 2] += 0.1
torchOffsets[4][..., 4:6, 2:4, 2] += 0.1
torchOffsets[4][..., 2, :, 2] += 0.25
upCoords = ((7, 6), (7, 8), (9, 8), (9, 6))
downCoords = ((7, 14), (7, 16), (9, 16), (9, 14))
def makeTorchVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
blockIndices = self.getMaterialIndices(blockMaterials)
torchOffsets = self.torchOffsets[blockData[blockIndices]]
texes = texMap(blocks[blockIndices], blockData[blockIndices])
yield
arrays = []
for direction in range(6):
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
return
vertexArray.view('uint8')[_RGBA] = 0xff
vertexArray[_XYZ] += torchOffsets[:, direction]
if direction == pymclevel.faces.FaceYIncreasing:
vertexArray[_ST] = self.upCoords
if direction == pymclevel.faces.FaceYDecreasing:
vertexArray[_ST] = self.downCoords
vertexArray[_ST] += texes[:, numpy.newaxis, direction]
arrays.append(vertexArray)
yield
self.vertexArrays = arrays
makeVertices = makeTorchVertices
class RailBlockRenderer(BlockRenderer):
blocktypes = [pymclevel.materials.alphaMaterials.Rail.ID, pymclevel.materials.alphaMaterials.PoweredRail.ID, pymclevel.materials.alphaMaterials.DetectorRail.ID]
renderstate = ChunkCalculator.renderstateAlphaTest
railTextures = numpy.array([
[(0, 128), (0, 144), (16, 144), (16, 128)], # east-west
[(0, 128), (16, 128), (16, 144), (0, 144)], # north-south
[(0, 128), (16, 128), (16, 144), (0, 144)], # south-ascending
[(0, 128), (16, 128), (16, 144), (0, 144)], # north-ascending
[(0, 128), (0, 144), (16, 144), (16, 128)], # east-ascending
[(0, 128), (0, 144), (16, 144), (16, 128)], # west-ascending
[(0, 112), (0, 128), (16, 128), (16, 112)], # northeast corner
[(0, 128), (16, 128), (16, 112), (0, 112)], # southeast corner
[(16, 128), (16, 112), (0, 112), (0, 128)], # southwest corner
[(16, 112), (0, 112), (0, 128), (16, 128)], # northwest corner
[(0, 192), (0, 208), (16, 208), (16, 192)], # unknown
[(0, 192), (0, 208), (16, 208), (16, 192)], # unknown
[(0, 192), (0, 208), (16, 208), (16, 192)], # unknown
[(0, 192), (0, 208), (16, 208), (16, 192)], # unknown
[(0, 192), (0, 208), (16, 208), (16, 192)], # unknown
[(0, 192), (0, 208), (16, 208), (16, 192)], # unknown
], dtype='float32')
railTextures -= pymclevel.materials.alphaMaterials.blockTextures[pymclevel.materials.alphaMaterials.Rail.ID, 0, 0]
railOffsets = numpy.array([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 1, 1], # south-ascending
[1, 1, 0, 0], # north-ascending
[1, 0, 0, 1], # east-ascending
[0, 1, 1, 0], # west-ascending
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
], dtype='float32')
def makeRailVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
direction = pymclevel.faces.FaceYIncreasing
blockIndices = self.getMaterialIndices(blockMaterials)
yield
bdata = blockData[blockIndices]
railBlocks = blocks[blockIndices]
tex = texMap(railBlocks, bdata, pymclevel.faces.FaceYIncreasing)[:, numpy.newaxis, :]
# disable 'powered' or 'pressed' bit for powered and detector rails
bdata[railBlocks != pymclevel.materials.alphaMaterials.Rail.ID] &= ~0x8
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
return
vertexArray[_ST] = self.railTextures[bdata]
vertexArray[_ST] += tex
vertexArray[_XYZ][..., 1] -= 0.9
vertexArray[_XYZ][..., 1] += self.railOffsets[bdata]
blockLight = areaBlockLights[1:-1, 1:-1, 1:-1]
vertexArray.view('uint8')[_RGB] *= blockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
yield
self.vertexArrays = [vertexArray]
makeVertices = makeRailVertices
class LadderBlockRenderer(BlockRenderer):
blocktypes = [pymclevel.materials.alphaMaterials.Ladder.ID]
ladderOffsets = numpy.array([
[(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0)],
[(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0)],
[(0, -1, 0.9), (0, 0, -0.1), (0, 0, -0.1), (0, -1, 0.9)], # facing east
[(0, 0, 0.1), (0, -1, -.9), (0, -1, -.9), (0, 0, 0.1)], # facing west
[(.9, -1, 0), (.9, -1, 0), (-.1, 0, 0), (-.1, 0, 0)], # north
[(0.1, 0, 0), (0.1, 0, 0), (-.9, -1, 0), (-.9, -1, 0)], # south
] + [[(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0)]] * 10, dtype='float32')
ladderTextures = numpy.array([
[(0, 192), (0, 208), (16, 208), (16, 192)], # unknown
[(0, 192), (0, 208), (16, 208), (16, 192)], # unknown
[(64, 96), (64, 80), (48, 80), (48, 96), ], # e
[(48, 80), (48, 96), (64, 96), (64, 80), ], # w
[(48, 96), (64, 96), (64, 80), (48, 80), ], # n
[(64, 80), (48, 80), (48, 96), (64, 96), ], # s
] + [[(0, 192), (0, 208), (16, 208), (16, 192)]] * 10, dtype='float32')
def ladderVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
blockIndices = self.getMaterialIndices(blockMaterials)
blockLight = areaBlockLights[1:-1, 1:-1, 1:-1]
yield
bdata = blockData[blockIndices]
vertexArray = self.makeTemplate(pymclevel.faces.FaceYIncreasing, blockIndices)
if not len(vertexArray):
return
vertexArray[_ST] = self.ladderTextures[bdata]
vertexArray[_XYZ] += self.ladderOffsets[bdata]
vertexArray.view('uint8')[_RGB] *= blockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
yield
self.vertexArrays = [vertexArray]
makeVertices = ladderVertices
class SnowBlockRenderer(BlockRenderer):
snowID = 78
blocktypes = [snowID]
def makeSnowVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
snowIndices = self.getMaterialIndices(blockMaterials)
arrays = []
yield
for direction, exposedFaceIndices in enumerate(facingBlockIndices):
# def makeFaceVertices(self, direction, blockIndices, exposedFaceIndices, blocks, blockData, blockLight, facingBlockLight, texMap):
# return []
if direction != pymclevel.faces.FaceYIncreasing:
blockIndices = snowIndices & exposedFaceIndices
else:
blockIndices = snowIndices
facingBlockLight = areaBlockLights[self.directionOffsets[direction]]
lights = facingBlockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
continue
vertexArray[_ST] += texMap([self.snowID], 0, 0)[:, numpy.newaxis, 0:2]
vertexArray.view('uint8')[_RGB] *= lights
if direction == pymclevel.faces.FaceYIncreasing:
vertexArray[_XYZ][..., 1] -= 0.875
if direction != pymclevel.faces.FaceYIncreasing and direction != pymclevel.faces.FaceYDecreasing:
vertexArray[_XYZ][..., 2:4, 1] -= 0.875
vertexArray[_ST][..., 2:4, 1] += 14
arrays.append(vertexArray)
yield
self.vertexArrays = arrays
makeVertices = makeSnowVertices
class RedstoneBlockRenderer(BlockRenderer):
blocktypes = [55]
def redstoneVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
blockIndices = self.getMaterialIndices(blockMaterials)
yield
vertexArray = self.makeTemplate(pymclevel.faces.FaceYIncreasing, blockIndices)
if not len(vertexArray):
return
vertexArray[_ST] += pymclevel.materials.alphaMaterials.blockTextures[55, 0, 0]
vertexArray[_XYZ][..., 1] -= 0.9
bdata = blockData[blockIndices]
bdata <<= 3
# bdata &= 0xe0
bdata[bdata > 0] |= 0x80
vertexArray.view('uint8')[_RGBA][..., 0] = bdata[..., numpy.newaxis]
vertexArray.view('uint8')[_RGBA][..., 0:3] *= [1, 0, 0]
yield
self.vertexArrays = [vertexArray]
makeVertices = redstoneVertices
# button, floor plate, door -> 1-cube features
class FeatureBlockRenderer(BlockRenderer):
# blocktypes = [pymclevel.materials.alphaMaterials.Button.ID,
# pymclevel.materials.alphaMaterials.StoneFloorPlate.ID,
# pymclevel.materials.alphaMaterials.WoodFloorPlate.ID,
# pymclevel.materials.alphaMaterials.WoodenDoor.ID,
# pymclevel.materials.alphaMaterials.IronDoor.ID,
# ]
#
blocktypes = [pymclevel.materials.alphaMaterials.Fence.ID]
buttonOffsets = [
[[-14 / 16., 6 / 16., -5 / 16.],
[-14 / 16., 6 / 16., 5 / 16.],
[-14 / 16., -7 / 16., 5 / 16.],
[-14 / 16., -7 / 16., -5 / 16.],
],
[[0 / 16., 6 / 16., 5 / 16.],
[0 / 16., 6 / 16., -5 / 16.],
[0 / 16., -7 / 16., -5 / 16.],
[0 / 16., -7 / 16., 5 / 16.],
],
[[0 / 16., -7 / 16., 5 / 16.],
[0 / 16., -7 / 16., -5 / 16.],
[-14 / 16., -7 / 16., -5 / 16.],
[-14 / 16., -7 / 16., 5 / 16.],
],
[[0 / 16., 6 / 16., 5 / 16.],
[-14 / 16., 6 / 16., 5 / 16.],
[-14 / 16., 6 / 16., -5 / 16.],
[0 / 16., 6 / 16., -5 / 16.],
],
[[0 / 16., 6 / 16., -5 / 16.],
[-14 / 16., 6 / 16., -5 / 16.],
[-14 / 16., -7 / 16., -5 / 16.],
[0 / 16., -7 / 16., -5 / 16.],
],
[[-14 / 16., 6 / 16., 5 / 16.],
[0 / 16., 6 / 16., 5 / 16.],
[0 / 16., -7 / 16., 5 / 16.],
[-14 / 16., -7 / 16., 5 / 16.],
],
]
buttonOffsets = numpy.array(buttonOffsets)
buttonOffsets[buttonOffsets < 0] += 1.0
dirIndexes = ((3, 2), (-3, 2), (1, 3), (1, 3), (-1, 2), (1, 2))
def buttonVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
blockIndices = blocks == pymclevel.materials.alphaMaterials.Button.ID
axes = blockIndices.nonzero()
vertexArray = numpy.zeros((len(axes[0]), 6, 4, 6), dtype=numpy.float32)
vertexArray[_XYZ][..., 0] = axes[0][..., numpy.newaxis, numpy.newaxis]
vertexArray[_XYZ][..., 1] = axes[2][..., numpy.newaxis, numpy.newaxis]
vertexArray[_XYZ][..., 2] = axes[1][..., numpy.newaxis, numpy.newaxis]
vertexArray[_XYZ] += self.buttonOffsets
vertexArray[_ST] = [[0, 0], [0, 16], [16, 16], [16, 0]]
vertexArray[_ST] += texMap(pymclevel.materials.alphaMaterials.Stone.ID, 0)[numpy.newaxis, :, numpy.newaxis]
# if direction == 0:
# for i, j in enumerate(self.dirIndexes[direction]):
# if j < 0:
# j = -j
# j -= 1
# offs = self.buttonOffsets[direction, ..., j] * 16
# offs = 16 - offs
#
# else:
# j -= 1
# offs =self.buttonOffsets[direction, ..., j] * 16
#
# # if i == 1:
# #
# # vertexArray[_ST][...,i] -= offs
# # else:
# vertexArray[_ST][...,i] -= offs
#
vertexArray.view('uint8')[_RGB] = 255
vertexArray.shape = (len(axes[0]) * 6, 4, 6)
self.vertexArrays = [vertexArray]
fenceTemplates = makeVertexTemplates(3 / 8., 0, 3 / 8., 5 / 8., 1, 5 / 8.)
def fenceVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
fenceMask = blocks == pymclevel.materials.alphaMaterials.Fence.ID
fenceIndices = fenceMask.nonzero()
yield
vertexArray = numpy.zeros((len(fenceIndices[0]), 6, 4, 6), dtype='float32')
for i in range(3):
j = (0, 2, 1)[i]
vertexArray[..., i] = fenceIndices[j][:, numpy.newaxis, numpy.newaxis] # xxx swap z with y using ^
vertexArray[..., 0:5] += self.fenceTemplates[..., 0:5]
vertexArray[_ST] += pymclevel.materials.alphaMaterials.blockTextures[pymclevel.materials.alphaMaterials.WoodPlanks.ID, 0, 0]
vertexArray.view('uint8')[_RGB] = self.fenceTemplates[..., 5][..., numpy.newaxis]
vertexArray.view('uint8')[_A] = 0xFF
vertexArray.view('uint8')[_RGB] *= areaBlockLights[1:-1, 1:-1, 1:-1][fenceIndices][..., numpy.newaxis, numpy.newaxis, numpy.newaxis]
vertexArray.shape = (vertexArray.shape[0] * 6, 4, 6)
yield
self.vertexArrays = [vertexArray]
makeVertices = fenceVertices
class StairBlockRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
return [a.ID for a in mats.AllStairs]
# South - FaceXIncreasing
# North - FaceXDecreasing
# West - FaceZIncreasing
# East - FaceZDecreasing
stairTemplates = numpy.array([makeVertexTemplates(**kw) for kw in [
# South - FaceXIncreasing
{"xmin":0.5},
# North - FaceXDecreasing
{"xmax":0.5},
# West - FaceZIncreasing
{"zmin":0.5},
# East - FaceZDecreasing
{"zmax":0.5},
# Slabtype
{"ymax":0.5},
]
])
def stairVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
arrays = []
materialIndices = self.getMaterialIndices(blockMaterials)
yield
stairBlocks = blocks[materialIndices]
stairData = blockData[materialIndices]
stairTop = (stairData >> 2).astype(bool)
stairData &= 3
blockLight = areaBlockLights[1:-1, 1:-1, 1:-1]
x, z, y = materialIndices.nonzero()
for _ in ("slab", "step"):
vertexArray = numpy.zeros((len(x), 6, 4, 6), dtype='float32')
for i in range(3):
vertexArray[_XYZ][..., i] = (x, y, z)[i][:, numpy.newaxis, numpy.newaxis]
if _ == "step":
vertexArray[_XYZST] += self.stairTemplates[4][..., :5]
vertexArray[_XYZ][..., 1][stairTop] += 0.5
else:
vertexArray[_XYZST] += self.stairTemplates[stairData][..., :5]
vertexArray[_ST] += texMap(stairBlocks, 0)[..., numpy.newaxis, :]
vertexArray.view('uint8')[_RGB] = self.stairTemplates[4][numpy.newaxis, ..., 5, numpy.newaxis]
vertexArray.view('uint8')[_RGB] *= 0xf
vertexArray.view('uint8')[_A] = 0xff
vertexArray.shape = (len(x) * 6, 4, 6)
yield
arrays.append(vertexArray)
self.vertexArrays = arrays
makeVertices = stairVertices
class VineBlockRenderer(BlockRenderer):
blocktypes = [106]
SouthBit = 1 #FaceZIncreasing
WestBit = 2 #FaceXDecreasing
NorthBit = 4 #FaceZDecreasing
EastBit = 8 #FaceXIncreasing
renderstate = ChunkCalculator.renderstateVines
def vineFaceVertices(self, direction, blockIndices, exposedFaceIndices, blocks, blockData, blockLight, facingBlockLight, texMap):
bdata = blockData[blockIndices]
blockIndices = numpy.array(blockIndices)
if direction == pymclevel.faces.FaceZIncreasing:
blockIndices[blockIndices] = (bdata & 1).astype(bool)
elif direction == pymclevel.faces.FaceXDecreasing:
blockIndices[blockIndices] = (bdata & 2).astype(bool)
elif direction == pymclevel.faces.FaceZDecreasing:
blockIndices[blockIndices] = (bdata & 4).astype(bool)
elif direction == pymclevel.faces.FaceXIncreasing:
blockIndices[blockIndices] = (bdata & 8).astype(bool)
else:
return []
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
return vertexArray
vertexArray[_ST] += texMap(self.blocktypes[0], [0], direction)[:, numpy.newaxis, 0:2]
lights = blockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
vertexArray.view('uint8')[_RGB] *= lights
vertexArray.view('uint8')[_RGB] *= LeafBlockRenderer.leafColor
if direction == pymclevel.faces.FaceZIncreasing:
vertexArray[_XYZ][..., 2] -= 0.0625
if direction == pymclevel.faces.FaceXDecreasing:
vertexArray[_XYZ][..., 0] += 0.0625
if direction == pymclevel.faces.FaceZDecreasing:
vertexArray[_XYZ][..., 2] += 0.0625
if direction == pymclevel.faces.FaceXIncreasing:
vertexArray[_XYZ][..., 0] -= 0.0625
return vertexArray
makeFaceVertices = vineFaceVertices
class SlabBlockRenderer(BlockRenderer):
blocktypes = [44, 126]
def slabFaceVertices(self, direction, blockIndices, exposedFaceIndices, blocks, blockData, blockLight, facingBlockLight, texMap):
if direction != pymclevel.faces.FaceYIncreasing:
blockIndices = blockIndices & exposedFaceIndices
lights = facingBlockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
bdata = blockData[blockIndices]
top = (bdata >> 3).astype(bool)
bdata &= 7
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
return vertexArray
vertexArray[_ST] += texMap(blocks[blockIndices], bdata, direction)[:, numpy.newaxis, 0:2]
vertexArray.view('uint8')[_RGB] *= lights
if direction == pymclevel.faces.FaceYIncreasing:
vertexArray[_XYZ][..., 1] -= 0.5
if direction != pymclevel.faces.FaceYIncreasing and direction != pymclevel.faces.FaceYDecreasing:
vertexArray[_XYZ][..., 2:4, 1] -= 0.5
vertexArray[_ST][..., 2:4, 1] += 8
vertexArray[_XYZ][..., 1][top] += 0.5
return vertexArray
makeFaceVertices = slabFaceVertices
class WaterBlockRenderer(BlockRenderer):
waterID = 9
blocktypes = [8, waterID]
renderstate = ChunkCalculator.renderstateWater
def waterFaceVertices(self, direction, blockIndices, exposedFaceIndices, blocks, blockData, blockLight, facingBlockLight, texMap):
blockIndices = blockIndices & exposedFaceIndices
vertexArray = self.makeTemplate(direction, blockIndices)
vertexArray[_ST] += texMap(self.waterID, 0, 0)[numpy.newaxis, numpy.newaxis]
vertexArray.view('uint8')[_RGB] *= facingBlockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
return vertexArray
makeFaceVertices = waterFaceVertices
class IceBlockRenderer(BlockRenderer):
iceID = 79
blocktypes = [iceID]
renderstate = ChunkCalculator.renderstateIce
def iceFaceVertices(self, direction, blockIndices, exposedFaceIndices, blocks, blockData, blockLight, facingBlockLight, texMap):
blockIndices = blockIndices & exposedFaceIndices
vertexArray = self.makeTemplate(direction, blockIndices)
vertexArray[_ST] += texMap(self.iceID, 0, 0)[numpy.newaxis, numpy.newaxis]
vertexArray.view('uint8')[_RGB] *= facingBlockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
return vertexArray
makeFaceVertices = iceFaceVertices
from glutils import DisplayList
class MCRenderer(object):
isPreviewer = False
def __init__(self, level=None, alpha=1.0):
self.render = True
self.origin = (0, 0, 0)
self.rotation = 0
self.bufferUsage = 0
self.invalidChunkQueue = deque()
self._chunkWorker = None
self.chunkRenderers = {}
self.loadableChunkMarkers = DisplayList()
self.visibleLayers = set(Layer.AllLayers)
self.masterLists = None
alpha = alpha * 255
self.alpha = (int(alpha) & 0xff)
self.chunkStartTime = datetime.now()
self.oldChunkStartTime = self.chunkStartTime
self.oldPosition = None
self.chunkSamples = [timedelta(0, 0, 0)] * 15
self.chunkIterator = None
import leveleditor
Settings = leveleditor.Settings
Settings.fastLeaves.addObserver(self)
Settings.roughGraphics.addObserver(self)
Settings.showHiddenOres.addObserver(self)
Settings.vertexBufferLimit.addObserver(self)
Settings.drawEntities.addObserver(self)
Settings.drawTileEntities.addObserver(self)
Settings.drawTileTicks.addObserver(self)
Settings.drawUnpopulatedChunks.addObserver(self, "drawTerrainPopulated")
Settings.drawMonsters.addObserver(self)
Settings.drawItems.addObserver(self)
Settings.showChunkRedraw.addObserver(self, "showRedraw")
Settings.spaceHeight.addObserver(self)
Settings.targetFPS.addObserver(self, "targetFPS")
self.level = level
chunkClass = ChunkRenderer
calculatorClass = ChunkCalculator
minViewDistance = 2
maxViewDistance = 24
_viewDistance = 8
needsRedraw = True
def toggleLayer(self, val, layer):
if val:
self.visibleLayers.add(layer)
else:
self.visibleLayers.discard(layer)
for cr in self.chunkRenderers.itervalues():
cr.invalidLayers.add(layer)
self.loadNearbyChunks()
def layerProperty(layer, default=True): # @NoSelf
attr = intern("_draw" + layer)
def _get(self):
return getattr(self, attr, default)
def _set(self, val):
if val != _get(self):
setattr(self, attr, val)
self.toggleLayer(val, layer)
return property(_get, _set)
drawEntities = layerProperty(Layer.Entities)
drawTileEntities = layerProperty(Layer.TileEntities)
drawTileTicks = layerProperty(Layer.TileTicks)
drawMonsters = layerProperty(Layer.Monsters)
drawItems = layerProperty(Layer.Items)
drawTerrainPopulated = layerProperty(Layer.TerrainPopulated)
def inSpace(self):
if self.level is None:
return True
h = self.position[1]
return ((h > self.level.Height + self.spaceHeight) or
(h <= -self.spaceHeight))
def chunkDistance(self, cpos):
camx, camy, camz = self.position
# if the renderer is offset into the world somewhere, adjust for that
ox, oy, oz = self.origin
camx -= ox
camz -= oz
camcx = int(numpy.floor(camx)) >> 4
camcz = int(numpy.floor(camz)) >> 4
cx, cz = cpos
return max(abs(cx - camcx), abs(cz - camcz))
overheadMode = False
def detailLevelForChunk(self, cpos):
if self.overheadMode:
return 2
if self.isPreviewer:
w, l, h = self.level.bounds.size
if w + l < 256:
return 0
distance = self.chunkDistance(cpos) - self.viewDistance
if distance > 0 or self.inSpace():
return 1
return 0
def getViewDistance(self):
return self._viewDistance
def setViewDistance(self, vd):
vd = int(vd) & 0xfffe
vd = min(max(vd, self.minViewDistance), self.maxViewDistance)
if vd != self._viewDistance:
self._viewDistance = vd
self.viewDistanceChanged()
# self.invalidateChunkMarkers()
viewDistance = property(getViewDistance, setViewDistance, None, "View Distance")
@property
def effectiveViewDistance(self):
if self.inSpace():
return self.viewDistance * 4
else:
return self.viewDistance * 2
def viewDistanceChanged(self):
self.oldPosition = None # xxx
self.discardMasterList()
self.loadNearbyChunks()
self.discardChunksOutsideViewDistance()
maxWorkFactor = 64
minWorkFactor = 1
workFactor = 2
chunkCalculator = None
_level = None
@property
def level(self):
return self._level
@level.setter
def level(self, level):
""" this probably warrants creating a new renderer """
self.stopWork()
self._level = level
self.oldPosition = None
self.position = (0, 0, 0)
self.chunkCalculator = None
self.invalidChunkQueue = deque()
self.discardAllChunks()
self.loadableChunkMarkers.invalidate()
if level:
self.chunkCalculator = self.calculatorClass(self.level)
self.oldPosition = None
level.allChunks
self.loadNearbyChunks()
position = (0, 0, 0)
def loadChunksStartingFrom(self, wx, wz, distance=None): # world position
if None is self.level:
return
cx = wx >> 4
cz = wz >> 4
if distance is None:
d = self.effectiveViewDistance
else:
d = distance
self.chunkIterator = self.iterateChunks(wx, wz, d * 2)
def iterateChunks(self, x, z, d):
cx = x >> 4
cz = z >> 4
yield (cx, cz)
step = dir = 1
while True:
for i in range(step):
cx += dir
yield (cx, cz)
for i in range(step):
cz += dir
yield (cx, cz)
step += 1
if step > d and not self.overheadMode:
raise StopIteration
dir = -dir
chunkIterator = None
@property
def chunkWorker(self):
if self._chunkWorker is None:
self._chunkWorker = self.makeWorkIterator()
return self._chunkWorker
def stopWork(self):
self._chunkWorker = None
def discardAllChunks(self):
self.bufferUsage = 0
self.forgetAllDisplayLists()
self.chunkRenderers = {}
self.oldPosition = None # xxx force reload
def discardChunksInBox(self, box):
self.discardChunks(box.chunkPositions)
def discardChunksOutsideViewDistance(self):
if self.overheadMode:
return
# print "discardChunksOutsideViewDistance"
d = self.effectiveViewDistance
cx = (self.position[0] - self.origin[0]) / 16
cz = (self.position[2] - self.origin[2]) / 16
origin = (cx - d, cz - d)
size = d * 2
if not len(self.chunkRenderers):
return
(ox, oz) = origin
bytes = 0
# chunks = numpy.fromiter(self.chunkRenderers.iterkeys(), dtype='int32', count=len(self.chunkRenderers))
chunks = numpy.fromiter(self.chunkRenderers.iterkeys(), dtype='i,i', count=len(self.chunkRenderers))
chunks.dtype = 'int32'
chunks.shape = len(self.chunkRenderers), 2
if size:
outsideChunks = chunks[:, 0] < ox - 1
outsideChunks |= chunks[:, 0] > ox + size
outsideChunks |= chunks[:, 1] < oz - 1
outsideChunks |= chunks[:, 1] > oz + size
chunks = chunks[outsideChunks]
self.discardChunks(chunks)
def discardChunks(self, chunks):
for cx, cz in chunks:
self.discardChunk(cx, cz)
self.oldPosition = None # xxx force reload
def discardChunk(self, cx, cz):
" discards the chunk renderer for this chunk and compresses the chunk "
if (cx, cz) in self.chunkRenderers:
self.bufferUsage -= self.chunkRenderers[cx, cz].bufferSize
self.chunkRenderers[cx, cz].forgetDisplayLists()
del self.chunkRenderers[cx, cz]
_fastLeaves = False
@property
def fastLeaves(self):
return self._fastLeaves
@fastLeaves.setter
def fastLeaves(self, val):
if self._fastLeaves != bool(val):
self.discardAllChunks()
self._fastLeaves = bool(val)
_roughGraphics = False
@property
def roughGraphics(self):
return self._roughGraphics
@roughGraphics.setter
def roughGraphics(self, val):
if self._roughGraphics != bool(val):
self.discardAllChunks()
self._roughGraphics = bool(val)
_showHiddenOres = False
@property
def showHiddenOres(self):
return self._showHiddenOres
@showHiddenOres.setter
def showHiddenOres(self, val):
if self._showHiddenOres != bool(val):
self.discardAllChunks()
self._showHiddenOres = bool(val)
def invalidateChunk(self, cx, cz, layers=None):
" marks the chunk for regenerating vertex data and display lists "
if (cx, cz) in self.chunkRenderers:
# self.chunkRenderers[(cx,cz)].invalidate()
# self.bufferUsage -= self.chunkRenderers[(cx, cz)].bufferSize
self.chunkRenderers[(cx, cz)].invalidate(layers)
# self.bufferUsage += self.chunkRenderers[(cx, cz)].bufferSize
self.invalidChunkQueue.append((cx, cz)) # xxx encapsulate
def invalidateChunksInBox(self, box, layers=None):
# If the box is at the edge of any chunks, expanding by 1 makes sure the neighboring chunk gets redrawn.
box = box.expand(1)
self.invalidateChunks(box.chunkPositions, layers)
def invalidateEntitiesInBox(self, box):
self.invalidateChunks(box.chunkPositions, [Layer.Entities])
def invalidateChunks(self, chunks, layers=None):
for c in chunks:
cx, cz = c
self.invalidateChunk(cx, cz, layers)
self.stopWork()
self.discardMasterList()
self.loadNearbyChunks()
def invalidateAllChunks(self, layers=None):
self.invalidateChunks(self.chunkRenderers.iterkeys(), layers)
def forgetAllDisplayLists(self):
for cr in self.chunkRenderers.itervalues():
cr.forgetDisplayLists()
def invalidateMasterList(self):
self.discardMasterList()
shouldRecreateMasterList = True
def discardMasterList(self):
self.shouldRecreateMasterList = True
@property
def shouldDrawAll(self):
box = self.level.bounds
return self.isPreviewer and box.width + box.length < 256
distanceToChunkReload = 32.0
def cameraMovedFarEnough(self):
if self.shouldDrawAll:
return False
if self.oldPosition is None:
return True
cPos = self.position
oldPos = self.oldPosition
cameraDelta = self.distanceToChunkReload
return any([abs(x - y) > cameraDelta for x, y in zip(cPos, oldPos)])
def loadVisibleChunks(self):
""" loads nearby chunks if the camera has moved beyond a certain distance """
# print "loadVisibleChunks"
if self.cameraMovedFarEnough():
if datetime.now() - self.lastVisibleLoad > timedelta(0, 0.5):
self.discardChunksOutsideViewDistance()
self.loadNearbyChunks()
self.oldPosition = self.position
self.lastVisibleLoad = datetime.now()
lastVisibleLoad = datetime.now()
def loadNearbyChunks(self):
if None is self.level:
return
# print "loadNearbyChunks"
cameraPos = self.position
if self.shouldDrawAll:
self.loadAllChunks()
else:
# subtract self.origin to load nearby chunks correctly for preview renderers
self.loadChunksStartingFrom(int(cameraPos[0]) - self.origin[0], int(cameraPos[2]) - self.origin[2])
def loadAllChunks(self):
box = self.level.bounds
self.loadChunksStartingFrom(box.origin[0] + box.width / 2, box.origin[2] + box.length / 2, max(box.width, box.length))
_floorTexture = None
@property
def floorTexture(self):
if self._floorTexture is None:
self._floorTexture = Texture(self.makeFloorTex)
return self._floorTexture
def makeFloorTex(self):
color0 = (0xff, 0xff, 0xff, 0x22)
color1 = (0xff, 0xff, 0xff, 0x44)
img = numpy.array([color0, color1, color1, color0], dtype='uint8')
GL.glTexParameter(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
GL.glTexParameter(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGBA, 2, 2, 0, GL.GL_RGBA, GL.GL_UNSIGNED_BYTE, img)
def invalidateChunkMarkers(self):
self.loadableChunkMarkers.invalidate()
def _drawLoadableChunkMarkers(self):
if self.level.chunkCount:
chunkSet = set(self.level.allChunks)
sizedChunks = chunkMarkers(chunkSet)
GL.glPushAttrib(GL.GL_FOG_BIT)
GL.glDisable(GL.GL_FOG)
GL.glEnable(GL.GL_BLEND)
GL.glEnable(GL.GL_POLYGON_OFFSET_FILL)
GL.glPolygonOffset(DepthOffset.ChunkMarkers, DepthOffset.ChunkMarkers)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glEnableClientState(GL.GL_TEXTURE_COORD_ARRAY)
GL.glEnable(GL.GL_TEXTURE_2D)
GL.glColor(1.0, 1.0, 1.0, 1.0)
self.floorTexture.bind()
# chunkColor = numpy.zeros(shape=(chunks.shape[0], 4, 4), dtype='float32')
# chunkColor[:]= (1, 1, 1, 0.15)
#
# cc = numpy.array(chunks[:,0] + chunks[:,1], dtype='int32')
# cc &= 1
# coloredChunks = cc > 0
# chunkColor[coloredChunks] = (1, 1, 1, 0.28)
# chunkColor *= 255
# chunkColor = numpy.array(chunkColor, dtype='uint8')
#
# GL.glColorPointer(4, GL.GL_UNSIGNED_BYTE, 0, chunkColor)
for size, chunks in sizedChunks.iteritems():
if not len(chunks):
continue
chunks = numpy.array(chunks, dtype='float32')
chunkPosition = numpy.zeros(shape=(chunks.shape[0], 4, 3), dtype='float32')
chunkPosition[:, :, (0, 2)] = numpy.array(((0, 0), (0, 1), (1, 1), (1, 0)), dtype='float32')
chunkPosition[:, :, (0, 2)] *= size
chunkPosition[:, :, (0, 2)] += chunks[:, numpy.newaxis, :]
chunkPosition *= 16
GL.glVertexPointer(3, GL.GL_FLOAT, 0, chunkPosition.ravel())
# chunkPosition *= 8
GL.glTexCoordPointer(2, GL.GL_FLOAT, 0, (chunkPosition[..., (0, 2)] * 8).ravel())
GL.glDrawArrays(GL.GL_QUADS, 0, len(chunkPosition) * 4)
GL.glDisableClientState(GL.GL_TEXTURE_COORD_ARRAY)
GL.glDisable(GL.GL_TEXTURE_2D)
GL.glDisable(GL.GL_BLEND)
GL.glDisable(GL.GL_DEPTH_TEST)
GL.glDisable(GL.GL_POLYGON_OFFSET_FILL)
GL.glPopAttrib()
def drawLoadableChunkMarkers(self):
if not self.isPreviewer or isinstance(self.level, pymclevel.MCInfdevOldLevel):
self.loadableChunkMarkers.call(self._drawLoadableChunkMarkers)
# self.drawCompressedChunkMarkers()
needsImmediateRedraw = False
viewingFrustum = None
if "-debuglists" in sys.argv:
def createMasterLists(self):
pass
def callMasterLists(self):
for cr in self.chunkRenderers.itervalues():
cr.debugDraw()
else:
def createMasterLists(self):
if self.shouldRecreateMasterList:
lists = {}
chunkLists = defaultdict(list)
chunksPerFrame = 80
shouldRecreateAgain = False
for ch in self.chunkRenderers.itervalues():
if chunksPerFrame:
if ch.needsRedisplay:
chunksPerFrame -= 1
ch.makeDisplayLists()
else:
shouldRecreateAgain = True
if ch.renderstateLists:
for rs in ch.renderstateLists:
chunkLists[rs] += ch.renderstateLists[rs]
for rs in chunkLists:
if len(chunkLists[rs]):
lists[rs] = numpy.array(chunkLists[rs], dtype='uint32').ravel()
# lists = lists[lists.nonzero()]
self.masterLists = lists
self.shouldRecreateMasterList = shouldRecreateAgain
self.needsImmediateRedraw = shouldRecreateAgain
def callMasterLists(self):
for renderstate in self.chunkCalculator.renderstates:
if renderstate not in self.masterLists:
continue
if self.alpha != 0xff and renderstate is not ChunkCalculator.renderstateLowDetail:
GL.glEnable(GL.GL_BLEND)
renderstate.bind()
GL.glCallLists(self.masterLists[renderstate])
renderstate.release()
if self.alpha != 0xff and renderstate is not ChunkCalculator.renderstateLowDetail:
GL.glDisable(GL.GL_BLEND)
errorLimit = 10
def draw(self):
self.needsRedraw = False
if not self.level:
return
if not self.chunkCalculator:
return
if not self.render:
return
chunksDrawn = 0
if self.level.materials.name in ("Pocket", "Alpha"):
GL.glMatrixMode(GL.GL_TEXTURE)
GL.glScalef(1/2., 1/2., 1/2.)
with gl.glPushMatrix(GL.GL_MODELVIEW):
dx, dy, dz = self.origin
GL.glTranslate(dx, dy, dz)
GL.glEnable(GL.GL_CULL_FACE)
GL.glEnable(GL.GL_DEPTH_TEST)
self.level.materials.terrainTexture.bind()
GL.glEnable(GL.GL_TEXTURE_2D)
GL.glEnableClientState(GL.GL_TEXTURE_COORD_ARRAY)
offset = DepthOffset.PreviewRenderer if self.isPreviewer else DepthOffset.Renderer
GL.glPolygonOffset(offset, offset)
GL.glEnable(GL.GL_POLYGON_OFFSET_FILL)
self.createMasterLists()
try:
self.callMasterLists()
except GL.GLError, e:
if self.errorLimit:
self.errorLimit -= 1
traceback.print_exc()
print e
GL.glDisable(GL.GL_POLYGON_OFFSET_FILL)
GL.glDisable(GL.GL_CULL_FACE)
GL.glDisable(GL.GL_DEPTH_TEST)
GL.glDisable(GL.GL_TEXTURE_2D)
GL.glDisableClientState(GL.GL_TEXTURE_COORD_ARRAY)
# if self.drawLighting:
self.drawLoadableChunkMarkers()
if self.level.materials.name in ("Pocket", "Alpha"):
GL.glMatrixMode(GL.GL_TEXTURE)
GL.glScalef(2., 2., 2.)
renderErrorHandled = False
def addDebugInfo(self, addDebugString):
addDebugString("BU: {0} MB, ".format(
self.bufferUsage / 1000000,
))
addDebugString("WQ: {0}, ".format(len(self.invalidChunkQueue)))
if self.chunkIterator:
addDebugString("[LR], ")
addDebugString("CR: {0}, ".format(len(self.chunkRenderers),))
def next(self):
self.chunkWorker.next()
def makeWorkIterator(self):
''' does chunk face and vertex calculation work. returns a generator that can be
iterated over for smaller work units.'''
try:
while True:
if self.level is None:
raise StopIteration
if len(self.invalidChunkQueue) > 1024:
self.invalidChunkQueue.clear()
if len(self.invalidChunkQueue):
c = self.invalidChunkQueue[0]
for i in self.workOnChunk(c):
yield
self.invalidChunkQueue.popleft()
elif self.chunkIterator is None:
raise StopIteration
else:
c = self.chunkIterator.next()
if self.vertexBufferLimit:
while self.bufferUsage > (0.9 * (self.vertexBufferLimit << 20)):
deadChunk = None
deadDistance = self.chunkDistance(c)
for cr in self.chunkRenderers.itervalues():
dist = self.chunkDistance(cr.chunkPosition)
if dist > deadDistance:
deadChunk = cr
deadDistance = dist
if deadChunk is not None:
self.discardChunk(*deadChunk.chunkPosition)
else:
break
else:
for i in self.workOnChunk(c):
yield
else:
for i in self.workOnChunk(c):
yield
yield
finally:
self._chunkWorker = None
if self.chunkIterator:
self.chunkIterator = None
vertexBufferLimit = 384
def getChunkRenderer(self, c):
if not (c in self.chunkRenderers):
cr = self.chunkClass(self, c)
else:
cr = self.chunkRenderers[c]
return cr
def calcFacesForChunkRenderer(self, cr):
self.bufferUsage -= cr.bufferSize
calc = cr.calcFaces()
work = 0
for i in calc:
yield
work += 1
self.chunkDone(cr, work)
def workOnChunk(self, c):
work = 0
if self.level.containsChunk(*c):
cr = self.getChunkRenderer(c)
if self.viewingFrustum:
# if not self.viewingFrustum.visible(numpy.array([[c[0] * 16 + 8, 64, c[1] * 16 + 8, 1.0]]), 64).any():
if not self.viewingFrustum.visible1([c[0] * 16 + 8, self.level.Height / 2, c[1] * 16 + 8, 1.0], self.level.Height / 2):
raise StopIteration
yield
faceInfoCalculator = self.calcFacesForChunkRenderer(cr)
try:
for result in faceInfoCalculator:
work += 1
if (work % MCRenderer.workFactor) == 0:
yield
self.invalidateMasterList()
except Exception, e:
traceback.print_exc()
fn = c
logging.info(u"Skipped chunk {f}: {e}".format(e=e, f=fn))
redrawChunks = 0
def chunkDone(self, chunkRenderer, work):
self.chunkRenderers[chunkRenderer.chunkPosition] = chunkRenderer
self.bufferUsage += chunkRenderer.bufferSize
# print "Chunk {0} used {1} work units".format(chunkRenderer.chunkPosition, work)
if not self.needsRedraw:
if self.redrawChunks:
self.redrawChunks -= 1
if not self.redrawChunks:
self.needsRedraw = True
else:
self.redrawChunks = 2
if work > 0:
self.oldChunkStartTime = self.chunkStartTime
self.chunkStartTime = datetime.now()
self.chunkSamples.pop(0)
self.chunkSamples.append(self.chunkStartTime - self.oldChunkStartTime)
cx, cz = chunkRenderer.chunkPosition
class PreviewRenderer(MCRenderer):
isPreviewer = True
def rendermain():
renderer = MCRenderer()
renderer.level = pymclevel.mclevel.loadWorld("World1")
renderer.viewDistance = 6
renderer.detailLevelForChunk = lambda * x: 0
start = datetime.now()
renderer.loadVisibleChunks()
try:
while True:
# for i in range(100):
renderer.next()
except StopIteration:
pass
except Exception, e:
traceback.print_exc()
print repr(e)
duration = datetime.now() - start
perchunk = duration / len(renderer.chunkRenderers)
print "Duration: {0} ({1} chunks per second, {2} per chunk, {3} chunks)".format(duration, 1000000.0 / perchunk.microseconds, perchunk, len(renderer.chunkRenderers))
# display.init( (640, 480), OPENGL | DOUBLEBUF )
from mcedit import GLDisplayContext
from OpenGL import GLU
cxt = GLDisplayContext()
import pygame
# distance = 4000
GL.glMatrixMode(GL.GL_PROJECTION)
GL.glLoadIdentity()
GLU.gluPerspective(35, 640.0 / 480.0, 0.5, 4000.0)
h = 366
pos = (0, h, 0)
look = (0.0001, h - 1, 0.0001)
up = (0, 1, 0)
GL.glMatrixMode(GL.GL_MODELVIEW)
GL.glLoadIdentity()
GLU.gluLookAt(pos[0], pos[1], pos[2],
look[0], look[1], look[2],
up[0], up[1], up[2])
GL.glClearColor(0.0, 0.0, 0.0, 1.0)
framestart = datetime.now()
frames = 200
for i in range(frames):
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
renderer.draw()
pygame.display.flip()
delta = datetime.now() - framestart
seconds = delta.seconds + delta.microseconds / 1000000.0
print "{0} frames in {1} ({2} per frame, {3} FPS)".format(frames, delta, delta / frames, frames / seconds)
while True:
evt = pygame.event.poll()
if evt.type == pygame.MOUSEBUTTONDOWN:
break
# time.sleep(3.0)
import traceback
import cProfile
if __name__ == "__main__":
cProfile.run("rendermain()", "mcedit.profile")
| isc | ca4fca7342e57b8260abfcdc03e22e5d | 33.939232 | 180 | 0.572972 | 3.695092 | false | false | false | false |
mcedit/mcedit | png.py | 1 | 143576 | #!/usr/bin/env python
# $URL: http://pypng.googlecode.com/svn/trunk/code/png.py $
# $Rev: 201 $
# png.py - PNG encoder/decoder in pure Python
#
# Copyright (C) 2006 Johann C. Rocholl <johann@browsershots.org>
# Portions Copyright (C) 2009 David Jones <drj@pobox.com>
# And probably portions Copyright (C) 2006 Nicko van Someren <nicko@nicko.org>
#
# Original concept by Johann C. Rocholl.
#
# LICENSE (The MIT License)
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Changelog (recent first):
# 2009-03-11 David: interlaced bit depth < 8 (writing).
# 2009-03-10 David: interlaced bit depth < 8 (reading).
# 2009-03-04 David: Flat and Boxed pixel formats.
# 2009-02-26 David: Palette support (writing).
# 2009-02-23 David: Bit-depths < 8; better PNM support.
# 2006-06-17 Nicko: Reworked into a class, faster interlacing.
# 2006-06-17 Johann: Very simple prototype PNG decoder.
# 2006-06-17 Nicko: Test suite with various image generators.
# 2006-06-17 Nicko: Alpha-channel, grey-scale, 16-bit/plane support.
# 2006-06-15 Johann: Scanline iterator interface for large input files.
# 2006-06-09 Johann: Very simple prototype PNG encoder.
# Incorporated into Bangai-O Development Tools by drj on 2009-02-11 from
# http://trac.browsershots.org/browser/trunk/pypng/lib/png.py?rev=2885
# Incorporated into pypng by drj on 2009-03-12 from
# //depot/prj/bangaio/master/code/png.py#67
"""
Pure Python PNG Reader/Writer
This Python module implements support for PNG images (see PNG
specification at http://www.w3.org/TR/2003/REC-PNG-20031110/ ). It reads
and writes PNG files with all allowable bit depths (1/2/4/8/16/24/32/48/64
bits per pixel) and colour combinations: greyscale (1/2/4/8/16 bit); RGB,
RGBA, LA (greyscale with alpha) with 8/16 bits per channel; colour mapped
images (1/2/4/8 bit). Adam7 interlacing is supported for reading and
writing. A number of optional chunks can be specified (when writing)
and understood (when reading): ``tRNS``, ``bKGD``, ``gAMA``.
For help, type ``import png; help(png)`` in your python interpreter.
A good place to start is the :class:`Reader` and :class:`Writer` classes.
Requires Python 2.3. Limited support is available for Python 2.2, but
not everything works. Best with Python 2.4 and higher. Installation is
trivial, but see the ``README.txt`` file (with the source distribution)
for details.
This file can also be used as a command-line utility to convert
`Netpbm <http://netpbm.sourceforge.net/>`_ PNM files to PNG, and the reverse conversion from PNG to
PNM. The interface is similar to that of the ``pnmtopng`` program from
Netpbm. Type ``python png.py --help`` at the shell prompt
for usage and a list of options.
A note on spelling and terminology
----------------------------------
Generally British English spelling is used in the documentation. So
that's "greyscale" and "colour". This not only matches the author's
native language, it's also used by the PNG specification.
The major colour models supported by PNG (and hence by PyPNG) are:
greyscale, RGB, greyscale--alpha, RGB--alpha. These are sometimes
referred to using the abbreviations: L, RGB, LA, RGBA. In this case
each letter abbreviates a single channel: *L* is for Luminance or Luma or
Lightness which is the channel used in greyscale images; *R*, *G*, *B* stand
for Red, Green, Blue, the components of a colour image; *A* stands for
Alpha, the opacity channel (used for transparency effects, but higher
values are more opaque, so it makes sense to call it opacity).
A note on formats
-----------------
When getting pixel data out of this module (reading) and presenting
data to this module (writing) there are a number of ways the data could
be represented as a Python value. Generally this module uses one of
three formats called "flat row flat pixel", "boxed row flat pixel", and
"boxed row boxed pixel". Basically the concern is whether each pixel
and each row comes in its own little tuple (box), or not.
Consider an image that is 3 pixels wide by 2 pixels high, and each pixel
has RGB components:
Boxed row flat pixel::
list([R,G,B, R,G,B, R,G,B],
[R,G,B, R,G,B, R,G,B])
Each row appears as its own list, but the pixels are flattened so that
three values for one pixel simply follow the three values for the previous
pixel. This is the most common format used, because it provides a good
compromise between space and convenience. PyPNG regards itself as
at liberty to replace any sequence type with any sufficiently compatible
other sequence type; in practice each row is an array (from the array
module), and the outer list is sometimes an iterator rather than an
explicit list (so that streaming is possible).
Flat row flat pixel::
[R,G,B, R,G,B, R,G,B,
R,G,B, R,G,B, R,G,B]
The entire image is one single giant sequence of colour values.
Generally an array will be used (to save space), not a list.
Boxed row boxed pixel::
list([ (R,G,B), (R,G,B), (R,G,B) ],
[ (R,G,B), (R,G,B), (R,G,B) ])
Each row appears in its own list, but each pixel also appears in its own
tuple. A serious memory burn in Python.
In all cases the top row comes first, and for each row the pixels are
ordered from left-to-right. Within a pixel the values appear in the
order, R-G-B-A (or L-A for greyscale--alpha).
There is a fourth format, mentioned because it is used internally,
is close to what lies inside a PNG file itself, and has some support
from the public API. This format is called packed. When packed,
each row is a sequence of bytes (integers from 0 to 255), just as
it is before PNG scanline filtering is applied. When the bit depth
is 8 this is essentially the same as boxed row flat pixel; when the
bit depth is less than 8, several pixels are packed into each byte;
when the bit depth is 16 (the only value more than 8 that is supported
by the PNG image format) each pixel value is decomposed into 2 bytes
(and `packed` is a misnomer). This format is used by the
:meth:`Writer.write_packed` method. It isn't usually a convenient
format, but may be just right if the source data for the PNG image
comes from something that uses a similar format (for example, 1-bit
BMPs, or another PNG file).
And now, my famous members
--------------------------
"""
# http://www.python.org/doc/2.2.3/whatsnew/node5.html
from __future__ import generators
__version__ = "$URL: http://pypng.googlecode.com/svn/trunk/code/png.py $ $Rev: 201 $"
from array import array
try: # See :pyver:old
import itertools
except:
pass
import math
# http://www.python.org/doc/2.4.4/lib/module-operator.html
import operator
import struct
import sys
import zlib
# http://www.python.org/doc/2.4.4/lib/module-warnings.html
import warnings
__all__ = ['Reader', 'Writer', 'write_chunks']
# The PNG signature.
# http://www.w3.org/TR/PNG/#5PNG-file-signature
_signature = struct.pack('8B', 137, 80, 78, 71, 13, 10, 26, 10)
_adam7 = ((0, 0, 8, 8),
(4, 0, 8, 8),
(0, 4, 4, 8),
(2, 0, 4, 4),
(0, 2, 2, 4),
(1, 0, 2, 2),
(0, 1, 1, 2))
def group(s, n):
# See
# http://www.python.org/doc/2.6/library/functions.html#zip
return zip(*[iter(s)] * n)
def isarray(x):
"""Same as ``isinstance(x, array)`` except on Python 2.2, where it
always returns ``False``. This helps PyPNG work on Python 2.2.
"""
try:
return isinstance(x, array)
except:
return False
try: # see :pyver:old
array.tostring
except:
def tostring(row):
l = len(row)
return struct.pack('%dB' % l, *row)
else:
def tostring(row):
"""Convert row of bytes to string. Expects `row` to be an
``array``.
"""
return row.tostring()
def interleave_planes(ipixels, apixels, ipsize, apsize):
"""
Interleave (colour) planes, e.g. RGB + A = RGBA.
Return an array of pixels consisting of the `ipsize` elements of data
from each pixel in `ipixels` followed by the `apsize` elements of data
from each pixel in `apixels`. Conventionally `ipixels` and
`apixels` are byte arrays so the sizes are bytes, but it actually
works with any arrays of the same type. The returned array is the
same type as the input arrays which should be the same type as each other.
"""
itotal = len(ipixels)
atotal = len(apixels)
newtotal = itotal + atotal
newpsize = ipsize + apsize
# Set up the output buffer
# See http://www.python.org/doc/2.4.4/lib/module-array.html#l2h-1356
out = array(ipixels.typecode)
# It's annoying that there is no cheap way to set the array size :-(
out.extend(ipixels)
out.extend(apixels)
# Interleave in the pixel data
for i in range(ipsize):
out[i:newtotal:newpsize] = ipixels[i:itotal:ipsize]
for i in range(apsize):
out[i + ipsize:newtotal:newpsize] = apixels[i:atotal:apsize]
return out
def check_palette(palette):
"""Check a palette argument (to the :class:`Writer` class) for validity.
Returns the palette as a list if okay; raises an exception otherwise.
"""
# None is the default and is allowed.
if palette is None:
return None
p = list(palette)
if not (0 < len(p) <= 256):
raise ValueError("a palette must have between 1 and 256 entries")
seen_triple = False
for i, t in enumerate(p):
if len(t) not in (3, 4):
raise ValueError(
"palette entry %d: entries must be 3- or 4-tuples." % i)
if len(t) == 3:
seen_triple = True
if seen_triple and len(t) == 4:
raise ValueError(
"palette entry %d: all 4-tuples must precede all 3-tuples" % i)
for x in t:
if int(x) != x or not(0 <= x <= 255):
raise ValueError(
"palette entry %d: values must be integer: 0 <= x <= 255" % i)
return p
class Error(Exception):
prefix = 'Error'
def __str__(self):
return self.prefix + ': ' + ' '.join(self.args)
class FormatError(Error):
"""Problem with input file format. In other words, PNG file does
not conform to the specification in some way and is invalid.
"""
prefix = 'FormatError'
class ChunkError(FormatError):
prefix = 'ChunkError'
class Writer:
"""
PNG encoder in pure Python.
"""
def __init__(self, width=None, height=None,
size=None,
greyscale=False,
alpha=False,
bitdepth=8,
palette=None,
transparent=None,
background=None,
gamma=None,
compression=None,
interlace=False,
bytes_per_sample=None, # deprecated
planes=None,
colormap=None,
maxval=None,
chunk_limit=2 ** 20):
"""
Create a PNG encoder object.
Arguments:
width, height
Image size in pixels, as two separate arguments.
size
Image size (w,h) in pixels, as single argument.
greyscale
Input data is greyscale, not RGB.
alpha
Input data has alpha channel (RGBA or LA).
bitdepth
Bit depth: from 1 to 16.
palette
Create a palette for a colour mapped image (colour type 3).
transparent
Specify a transparent colour (create a ``tRNS`` chunk).
background
Specify a default background colour (create a ``bKGD`` chunk).
gamma
Specify a gamma value (create a ``gAMA`` chunk).
compression
zlib compression level (1-9).
interlace
Create an interlaced image.
chunk_limit
Write multiple ``IDAT`` chunks to save memory.
The image size (in pixels) can be specified either by using the
`width` and `height` arguments, or with the single `size`
argument. If `size` is used it should be a pair (*width*,
*height*).
`greyscale` and `alpha` are booleans that specify whether
an image is greyscale (or colour), and whether it has an
alpha channel (or not).
`bitdepth` specifies the bit depth of the source pixel values.
Each source pixel values must be an integer between 0 and
``2**bitdepth-1``. For example, 8-bit images have values
between 0 and 255. PNG only stores images with bit depths of
1,2,4,8, or 16. When `bitdepth` is not one of these values,
the next highest valid bit depth is selected, and an ``sBIT``
(significant bits) chunk is generated that specifies the original
precision of the source image. In this case the supplied pixel
values will be rescaled to fit the range of the selected bit depth.
The details of which bit depth / colour model combinations the
PNG file format supports directly, are allowed are somewhat arcane
(refer to the PNG specification for full details). Briefly:
"small" bit depths (1,2,4) are only allowed with greyscale and
colour mapped images; colour mapped images cannot have bit depth
16.
For colour mapped images (in other words, when the `palette`
argument is specified) the `bitdepth` argument must match one of
the valid PNG bit depths: 1, 2, 4, or 8. (It is valid to have a
PNG image with a palette and an ``sBIT`` chunk, but the meaning
is slightly different; it would be awkward to press the
`bitdepth` argument into service for this.)
The `palette` option, when specified, causes a colour mapped image
to be created: the PNG colour type is set to 3; greyscale
must not be set; alpha must not be set; transparent must
not be set; the bit depth must be 1,2,4, or 8. When a colour
mapped image is created, the pixel values are palette indexes
and the `bitdepth` argument specifies the size of these indexes
(not the size of the colour values in the palette).
The palette argument value should be a sequence of 3- or
4-tuples. 3-tuples specify RGB palette entries; 4-tuples
specify RGBA palette entries. If both 4-tuples and 3-tuples
appear in the sequence then all the 4-tuples must come
before all the 3-tuples. A ``PLTE`` chunk is created; if there
are 4-tuples then a ``tRNS`` chunk is created as well. The
``PLTE`` chunk will contain all the RGB triples in the same
sequence; the ``tRNS`` chunk will contain the alpha channel for
all the 4-tuples, in the same sequence. Palette entries
are always 8-bit.
If specified, the `transparent` and `background` parameters must
be a tuple with three integer values for red, green, blue, or
a simple integer (or singleton tuple) for a greyscale image.
If specified, the `gamma` parameter must be a positive number
(generally, a float). A ``gAMA`` chunk will be created. Note that
this will not change the values of the pixels as they appear in
the PNG file, they are assumed to have already been converted
appropriately for the gamma specified.
The `compression` argument specifies the compression level
to be used by the ``zlib`` module. Higher values are likely
to compress better, but will be slower to compress. The
default for this argument is ``None``; this does not mean
no compression, rather it means that the default from the
``zlib`` module is used (which is generally acceptable).
If `interlace` is true then an interlaced image is created
(using PNG's so far only interace method, *Adam7*). This does not
affect how the pixels should be presented to the encoder, rather
it changes how they are arranged into the PNG file. On slow
connexions interlaced images can be partially decoded by the
browser to give a rough view of the image that is successively
refined as more image data appears.
.. note ::
Enabling the `interlace` option requires the entire image
to be processed in working memory.
`chunk_limit` is used to limit the amount of memory used whilst
compressing the image. In order to avoid using large amounts of
memory, multiple ``IDAT`` chunks may be created.
"""
# At the moment the `planes` argument is ignored;
# its purpose is to act as a dummy so that
# ``Writer(x, y, **info)`` works, where `info` is a dictionary
# returned by Reader.read and friends.
# Ditto for `colormap`.
# A couple of helper functions come first. Best skipped if you
# are reading through.
def isinteger(x):
try:
return int(x) == x
except:
return False
def check_color(c, which):
"""Checks that a colour argument for transparent or
background options is the right form. Also "corrects" bare
integers to 1-tuples.
"""
if c is None:
return c
if greyscale:
try:
l = len(c)
except TypeError:
c = (c,)
if len(c) != 1:
raise ValueError("%s for greyscale must be 1-tuple" %
which)
if not isinteger(c[0]):
raise ValueError(
"%s colour for greyscale must be integer" %
which)
else:
if not (len(c) == 3 and
isinteger(c[0]) and
isinteger(c[1]) and
isinteger(c[2])):
raise ValueError(
"%s colour must be a triple of integers" %
which)
return c
if size:
if len(size) != 2:
raise ValueError(
"size argument should be a pair (width, height)")
if width is not None and width != size[0]:
raise ValueError(
"size[0] (%r) and width (%r) should match when both are used."
% (size[0], width))
if height is not None and height != size[1]:
raise ValueError(
"size[1] (%r) and height (%r) should match when both are used."
% (size[1], height))
width, height = size
del size
if width <= 0 or height <= 0:
raise ValueError("width and height must be greater than zero")
if not isinteger(width) or not isinteger(height):
raise ValueError("width and height must be integers")
# http://www.w3.org/TR/PNG/#7Integers-and-byte-order
if width > 2 ** 32 - 1 or height > 2 ** 32 - 1:
raise ValueError("width and height cannot exceed 2**32-1")
if alpha and transparent is not None:
raise ValueError(
"transparent colour not allowed with alpha channel")
if bytes_per_sample is not None:
warnings.warn('please use bitdepth instead of bytes_per_sample',
DeprecationWarning)
if bytes_per_sample not in (0.125, 0.25, 0.5, 1, 2):
raise ValueError(
"bytes per sample must be .125, .25, .5, 1, or 2")
bitdepth = int(8 * bytes_per_sample)
del bytes_per_sample
if not isinteger(bitdepth) or bitdepth < 1 or 16 < bitdepth:
raise ValueError("bitdepth (%r) must be a postive integer <= 16" %
bitdepth)
self.rescale = None
if palette:
if bitdepth not in (1, 2, 4, 8):
raise ValueError("with palette, bitdepth must be 1, 2, 4, or 8")
if transparent is not None:
raise ValueError("transparent and palette not compatible")
if alpha:
raise ValueError("alpha and palette not compatible")
if greyscale:
raise ValueError("greyscale and palette not compatible")
else:
# No palette, check for sBIT chunk generation.
if alpha or not greyscale:
if bitdepth not in (8, 16):
targetbitdepth = (8, 16)[bitdepth > 8]
self.rescale = (bitdepth, targetbitdepth)
bitdepth = targetbitdepth
del targetbitdepth
else:
assert greyscale
assert not alpha
if bitdepth not in (1, 2, 4, 8, 16):
if bitdepth > 8:
targetbitdepth = 16
elif bitdepth == 3:
targetbitdepth = 4
else:
assert bitdepth in (5, 6, 7)
targetbitdepth = 8
self.rescale = (bitdepth, targetbitdepth)
bitdepth = targetbitdepth
del targetbitdepth
if bitdepth < 8 and (alpha or not greyscale and not palette):
raise ValueError(
"bitdepth < 8 only permitted with greyscale or palette")
if bitdepth > 8 and palette:
raise ValueError(
"bit depth must be 8 or less for images with palette")
transparent = check_color(transparent, 'transparent')
background = check_color(background, 'background')
# It's important that the true boolean values (greyscale, alpha,
# colormap, interlace) are converted to bool because Iverson's
# convention is relied upon later on.
self.width = width
self.height = height
self.transparent = transparent
self.background = background
self.gamma = gamma
self.greyscale = bool(greyscale)
self.alpha = bool(alpha)
self.colormap = bool(palette)
self.bitdepth = int(bitdepth)
self.compression = compression
self.chunk_limit = chunk_limit
self.interlace = bool(interlace)
self.palette = check_palette(palette)
self.color_type = 4 * self.alpha + 2 * (not greyscale) + 1 * self.colormap
assert self.color_type in (0, 2, 3, 4, 6)
self.color_planes = (3, 1)[self.greyscale or self.colormap]
self.planes = self.color_planes + self.alpha
# :todo: fix for bitdepth < 8
self.psize = (self.bitdepth / 8) * self.planes
def make_palette(self):
"""Create the byte sequences for a ``PLTE`` and if necessary a
``tRNS`` chunk. Returned as a pair (*p*, *t*). *t* will be
``None`` if no ``tRNS`` chunk is necessary.
"""
p = array('B')
t = array('B')
for x in self.palette:
p.extend(x[0:3])
if len(x) > 3:
t.append(x[3])
p = tostring(p)
t = tostring(t)
if t:
return p, t
return p, None
def write(self, outfile, rows):
"""Write a PNG image to the output file. `rows` should be
an iterable that yields each row in boxed row flat pixel format.
The rows should be the rows of the original image, so there
should be ``self.height`` rows of ``self.width * self.planes`` values.
If `interlace` is specified (when creating the instance), then
an interlaced PNG file will be written. Supply the rows in the
normal image order; the interlacing is carried out internally.
.. note ::
Interlacing will require the entire image to be in working memory.
"""
if self.interlace:
fmt = 'BH'[self.bitdepth > 8]
a = array(fmt, itertools.chain(*rows))
return self.write_array(outfile, a)
else:
nrows = self.write_passes(outfile, rows)
if nrows != self.height:
raise ValueError(
"rows supplied (%d) does not match height (%d)" %
(nrows, self.height))
def write_passes(self, outfile, rows, packed=False):
"""
Write a PNG image to the output file.
Most users are expected to find the :meth:`write` or
:meth:`write_array` method more convenient.
The rows should be given to this method in the order that
they appear in the output file. For straightlaced images,
this is the usual top to bottom ordering, but for interlaced
images the rows should have already been interlaced before
passing them to this function.
`rows` should be an iterable that yields each row. When
`packed` is ``False`` the rows should be in boxed row flat pixel
format; when `packed` is ``True`` each row should be a packed
sequence of bytes.
"""
# http://www.w3.org/TR/PNG/#5PNG-file-signature
outfile.write(_signature)
# http://www.w3.org/TR/PNG/#11IHDR
write_chunk(outfile, 'IHDR',
struct.pack("!2I5B", self.width, self.height,
self.bitdepth, self.color_type,
0, 0, self.interlace))
# See :chunk:order
# http://www.w3.org/TR/PNG/#11gAMA
if self.gamma is not None:
write_chunk(outfile, 'gAMA',
struct.pack("!L", int(round(self.gamma * 1e5))))
# See :chunk:order
# http://www.w3.org/TR/PNG/#11sBIT
if self.rescale:
write_chunk(outfile, 'sBIT',
struct.pack('%dB' % self.planes,
*[self.rescale[0]] * self.planes))
# :chunk:order: Without a palette (PLTE chunk), ordering is
# relatively relaxed. With one, gAMA chunk must precede PLTE
# chunk which must precede tRNS and bKGD.
# See http://www.w3.org/TR/PNG/#5ChunkOrdering
if self.palette:
p, t = self.make_palette()
write_chunk(outfile, 'PLTE', p)
if t:
# tRNS chunk is optional. Only needed if palette entries
# have alpha.
write_chunk(outfile, 'tRNS', t)
# http://www.w3.org/TR/PNG/#11tRNS
if self.transparent is not None:
if self.greyscale:
write_chunk(outfile, 'tRNS',
struct.pack("!1H", *self.transparent))
else:
write_chunk(outfile, 'tRNS',
struct.pack("!3H", *self.transparent))
# http://www.w3.org/TR/PNG/#11bKGD
if self.background is not None:
if self.greyscale:
write_chunk(outfile, 'bKGD',
struct.pack("!1H", *self.background))
else:
write_chunk(outfile, 'bKGD',
struct.pack("!3H", *self.background))
# http://www.w3.org/TR/PNG/#11IDAT
if self.compression is not None:
compressor = zlib.compressobj(self.compression)
else:
compressor = zlib.compressobj()
# Choose an extend function based on the bitdepth. The extend
# function packs/decomposes the pixel values into bytes and
# stuffs them onto the data array.
data = array('B')
if self.bitdepth == 8 or packed:
extend = data.extend
elif self.bitdepth == 16:
# Decompose into bytes
def extend(sl):
fmt = '!%dH' % len(sl)
data.extend(array('B', struct.pack(fmt, *sl)))
else:
# Pack into bytes
assert self.bitdepth < 8
# samples per byte
spb = int(8 / self.bitdepth)
def extend(sl):
a = array('B', sl)
# Adding padding bytes so we can group into a whole
# number of spb-tuples.
l = float(len(a))
extra = math.ceil(l / float(spb)) * spb - l
a.extend([0] * int(extra))
# Pack into bytes
l = group(a, spb)
l = map(lambda e: reduce(lambda x, y:
(x << self.bitdepth) + y, e), l)
data.extend(l)
if self.rescale:
oldextend = extend
factor = \
float(2 ** self.rescale[1] - 1) / float(2 ** self.rescale[0] - 1)
def extend(sl):
oldextend(map(lambda x: int(round(factor * x)), sl))
# Build the first row, testing mostly to see if we need to
# changed the extend function to cope with NumPy integer types
# (they cause our ordinary definition of extend to fail, so we
# wrap it). See
# http://code.google.com/p/pypng/issues/detail?id=44
enumrows = enumerate(rows)
del rows
# First row's filter type.
data.append(0)
# :todo: Certain exceptions in the call to ``.next()`` or the
# following try would indicate no row data supplied.
# Should catch.
i, row = enumrows.next()
try:
# If this fails...
extend(row)
except:
# ... try a version that converts the values to int first.
# Not only does this work for the (slightly broken) NumPy
# types, there are probably lots of other, unknown, "nearly"
# int types it works for.
def wrapmapint(f):
return lambda sl: f(map(int, sl))
extend = wrapmapint(extend)
del wrapmapint
extend(row)
for i, row in enumrows:
# Add "None" filter type. Currently, it's essential that
# this filter type be used for every scanline as we do not
# mark the first row of a reduced pass image; that means we
# could accidentally compute the wrong filtered scanline if
# we used "up", "average", or "paeth" on such a line.
data.append(0)
extend(row)
if len(data) > self.chunk_limit:
compressed = compressor.compress(tostring(data))
if len(compressed):
# print >> sys.stderr, len(data), len(compressed)
write_chunk(outfile, 'IDAT', compressed)
# Because of our very witty definition of ``extend``,
# above, we must re-use the same ``data`` object. Hence
# we use ``del`` to empty this one, rather than create a
# fresh one (which would be my natural FP instinct).
del data[:]
if len(data):
compressed = compressor.compress(tostring(data))
else:
compressed = ''
flushed = compressor.flush()
if len(compressed) or len(flushed):
# print >> sys.stderr, len(data), len(compressed), len(flushed)
write_chunk(outfile, 'IDAT', compressed + flushed)
# http://www.w3.org/TR/PNG/#11IEND
write_chunk(outfile, 'IEND')
return i + 1
def write_array(self, outfile, pixels):
"""
Write an array in flat row flat pixel format as a PNG file on
the output file. See also :meth:`write` method.
"""
if self.interlace:
self.write_passes(outfile, self.array_scanlines_interlace(pixels))
else:
self.write_passes(outfile, self.array_scanlines(pixels))
def write_packed(self, outfile, rows):
"""
Write PNG file to `outfile`. The pixel data comes from `rows`
which should be in boxed row packed format. Each row should be
a sequence of packed bytes.
Technically, this method does work for interlaced images but it
is best avoided. For interlaced images, the rows should be
presented in the order that they appear in the file.
This method should not be used when the source image bit depth
is not one naturally supported by PNG; the bit depth should be
1, 2, 4, 8, or 16.
"""
if self.rescale:
raise Error("write_packed method not suitable for bit depth %d" %
self.rescale[0])
return self.write_passes(outfile, rows, packed=True)
def convert_pnm(self, infile, outfile):
"""
Convert a PNM file containing raw pixel data into a PNG file
with the parameters set in the writer object. Works for
(binary) PGM, PPM, and PAM formats.
"""
if self.interlace:
pixels = array('B')
pixels.fromfile(infile,
(self.bitdepth / 8) * self.color_planes *
self.width * self.height)
self.write_passes(outfile, self.array_scanlines_interlace(pixels))
else:
self.write_passes(outfile, self.file_scanlines(infile))
def convert_ppm_and_pgm(self, ppmfile, pgmfile, outfile):
"""
Convert a PPM and PGM file containing raw pixel data into a
PNG outfile with the parameters set in the writer object.
"""
pixels = array('B')
pixels.fromfile(ppmfile,
(self.bitdepth / 8) * self.color_planes *
self.width * self.height)
apixels = array('B')
apixels.fromfile(pgmfile,
(self.bitdepth / 8) *
self.width * self.height)
pixels = interleave_planes(pixels, apixels,
(self.bitdepth / 8) * self.color_planes,
(self.bitdepth / 8))
if self.interlace:
self.write_passes(outfile, self.array_scanlines_interlace(pixels))
else:
self.write_passes(outfile, self.array_scanlines(pixels))
def file_scanlines(self, infile):
"""
Generates boxed rows in flat pixel format, from the input file
`infile`. It assumes that the input file is in a "Netpbm-like"
binary format, and is positioned at the beginning of the first
pixel. The number of pixels to read is taken from the image
dimensions (`width`, `height`, `planes`) and the number of bytes
per value is implied by the image `bitdepth`.
"""
# Values per row
vpr = self.width * self.planes
row_bytes = vpr
if self.bitdepth > 8:
assert self.bitdepth == 16
row_bytes *= 2
fmt = '>%dH' % vpr
def line():
return array('H', struct.unpack(fmt, infile.read(row_bytes)))
else:
def line():
scanline = array('B', infile.read(row_bytes))
return scanline
for y in range(self.height):
yield line()
def array_scanlines(self, pixels):
"""
Generates boxed rows (flat pixels) from flat rows (flat pixels)
in an array.
"""
# Values per row
vpr = self.width * self.planes
stop = 0
for y in range(self.height):
start = stop
stop = start + vpr
yield pixels[start:stop]
def array_scanlines_interlace(self, pixels):
"""
Generator for interlaced scanlines from an array. `pixels` is
the full source image in flat row flat pixel format. The
generator yields each scanline of the reduced passes in turn, in
boxed row flat pixel format.
"""
# http://www.w3.org/TR/PNG/#8InterlaceMethods
# Array type.
fmt = 'BH'[self.bitdepth > 8]
# Value per row
vpr = self.width * self.planes
for xstart, ystart, xstep, ystep in _adam7:
if xstart >= self.width:
continue
# Pixels per row (of reduced image)
ppr = int(math.ceil((self.width - xstart) / float(xstep)))
# number of values in reduced image row.
row_len = ppr * self.planes
for y in range(ystart, self.height, ystep):
if xstep == 1:
offset = y * vpr
yield pixels[offset:offset + vpr]
else:
row = array(fmt)
# There's no easier way to set the length of an array
row.extend(pixels[0:row_len])
offset = y * vpr + xstart * self.planes
end_offset = (y + 1) * vpr
skip = self.planes * xstep
for i in range(self.planes):
row[i::self.planes] = \
pixels[offset + i:end_offset:skip]
yield row
def write_chunk(outfile, tag, data=''):
"""
Write a PNG chunk to the output file, including length and
checksum.
"""
# http://www.w3.org/TR/PNG/#5Chunk-layout
outfile.write(struct.pack("!I", len(data)))
outfile.write(tag)
outfile.write(data)
checksum = zlib.crc32(tag)
checksum = zlib.crc32(data, checksum)
outfile.write(struct.pack("!i", checksum))
def write_chunks(out, chunks):
"""Create a PNG file by writing out the chunks."""
out.write(_signature)
for chunk in chunks:
write_chunk(out, *chunk)
def filter_scanline(type, line, fo, prev=None):
"""Apply a scanline filter to a scanline. `type` specifies the
filter type (0 to 4); `line` specifies the current (unfiltered)
scanline as a sequence of bytes; `prev` specifies the previous
(unfiltered) scanline as a sequence of bytes. `fo` specifies the
filter offset; normally this is size of a pixel in bytes (the number
of bytes per sample times the number of channels), but when this is
< 1 (for bit depths < 8) then the filter offset is 1.
"""
assert 0 <= type < 5
# The output array. Which, pathetically, we extend one-byte at a
# time (fortunately this is linear).
out = array('B', [type])
def sub():
ai = -fo
for x in line:
if ai >= 0:
x = (x - line[ai]) & 0xff
out.append(x)
ai += 1
def up():
for i, x in enumerate(line):
x = (x - prev[i]) & 0xff
out.append(x)
def average():
ai = -fo
for i, x in enumerate(line):
if ai >= 0:
x = (x - ((line[ai] + prev[i]) >> 1)) & 0xff
else:
x = (x - (prev[i] >> 1)) & 0xff
out.append(x)
ai += 1
def paeth():
# http://www.w3.org/TR/PNG/#9Filter-type-4-Paeth
ai = -fo # also used for ci
for i, x in enumerate(line):
a = 0
b = prev[i]
c = 0
if ai >= 0:
a = line[ai]
c = prev[ai]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
Pr = a
elif pb <= pc:
Pr = b
else:
Pr = c
x = (x - Pr) & 0xff
out.append(x)
ai += 1
if not prev:
# We're on the first line. Some of the filters can be reduced
# to simpler cases which makes handling the line "off the top"
# of the image simpler. "up" becomes "none"; "paeth" becomes
# "left" (non-trivial, but true). "average" needs to be handled
# specially.
if type == 2: # "up"
return line # type = 0
elif type == 3:
prev = [0] * len(line)
elif type == 4: # "paeth"
type = 1
if type == 0:
out.extend(line)
elif type == 1:
sub()
elif type == 2:
up()
elif type == 3:
average()
else: # type == 4
paeth()
return out
class _readable:
"""
A simple file-like interface for strings and arrays.
"""
def __init__(self, buf):
self.buf = buf
self.offset = 0
def read(self, n):
r = self.buf[self.offset:self.offset + n]
if isarray(r):
r = r.tostring()
self.offset += n
return r
class Reader:
"""
PNG decoder in pure Python.
"""
def __init__(self, _guess=None, **kw):
"""
Create a PNG decoder object.
The constructor expects exactly one keyword argument. If you
supply a positional argument instead, it will guess the input
type. You can choose among the following keyword arguments:
filename
Name of input file (a PNG file).
file
A file-like object (object with a read() method).
bytes
``array`` or ``string`` with PNG data.
"""
if ((_guess is not None and len(kw) != 0) or
(_guess is None and len(kw) != 1)):
raise TypeError("Reader() takes exactly 1 argument")
# Will be the first 8 bytes, later on. See validate_signature.
self.signature = None
self.transparent = None
# A pair of (len,type) if a chunk has been read but its data and
# checksum have not (in other words the file position is just
# past the 4 bytes that specify the chunk type). See preamble
# method for how this is used.
self.atchunk = None
if _guess is not None:
if isarray(_guess):
kw["bytes"] = _guess
elif isinstance(_guess, (str, unicode)):
kw["filename"] = _guess
elif hasattr(_guess, "read"):
kw["file"] = _guess
if "filename" in kw:
self.file = file(kw["filename"], "rb")
elif "file" in kw:
self.file = kw["file"]
elif "bytes" in kw:
self.file = _readable(kw["bytes"])
else:
raise TypeError("expecting filename, file or bytes array")
def chunk(self, seek=None):
"""
Read the next PNG chunk from the input file; returns type (as a 4
character string) and data. If the optional `seek` argument is
specified then it will keep reading chunks until it either runs
out of file or finds the type specified by the argument. Note
that in general the order of chunks in PNGs is unspecified, so
using `seek` can cause you to miss chunks.
"""
self.validate_signature()
while True:
# http://www.w3.org/TR/PNG/#5Chunk-layout
if not self.atchunk:
self.atchunk = self.chunklentype()
length, type = self.atchunk
self.atchunk = None
data = self.file.read(length)
if len(data) != length:
raise ChunkError('Chunk %s too short for required %i octets.'
% (type, length))
checksum = self.file.read(4)
if len(checksum) != 4:
raise ValueError('Chunk %s too short for checksum.', type)
if seek and type != seek:
continue
verify = zlib.crc32(type)
verify = zlib.crc32(data, verify)
# Whether the output from zlib.crc32 is signed or not varies
# according to hideous implementation details, see
# http://bugs.python.org/issue1202 .
# We coerce it to be positive here (in a way which works on
# Python 2.3 and older).
verify &= 2 ** 32 - 1
verify = struct.pack('!I', verify)
if checksum != verify:
# print repr(checksum)
(a,) = struct.unpack('!I', checksum)
(b,) = struct.unpack('!I', verify)
raise ChunkError(
"Checksum error in %s chunk: 0x%08X != 0x%08X." %
(type, a, b))
return type, data
def chunks(self):
"""Return an iterator that will yield each chunk as a
(*chunktype*, *content*) pair.
"""
while True:
t, v = self.chunk()
yield t, v
if t == 'IEND':
break
def undo_filter(self, filter_type, scanline, previous):
"""Undo the filter for a scanline. `scanline` is a sequence of
bytes that does not include the initial filter type byte.
`previous` is decoded previous scanline (for straightlaced
images this is the previous pixel row, but for interlaced
images, it is the previous scanline in the reduced image, which
in general is not the previous pixel row in the final image).
When there is no previous scanline (the first row of a
straightlaced image, or the first row in one of the passes in an
interlaced image), then this argument should be ``None``.
The scanline will have the effects of filtering removed, and the
result will be returned as a fresh sequence of bytes.
"""
# :todo: Would it be better to update scanline in place?
# Create the result byte array. It seems that the best way to
# create the array to be the right size is to copy from an
# existing sequence. *sigh*
# If we fill the result with scanline, then this allows a
# micro-optimisation in the "null" and "sub" cases.
result = array('B', scanline)
if filter_type == 0:
# And here, we _rely_ on filling the result with scanline,
# above.
return result
if filter_type not in (1, 2, 3, 4):
raise FormatError('Invalid PNG Filter Type.'
' See http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters .')
# Filter unit. The stride from one pixel to the corresponding
# byte from the previous previous. Normally this is the pixel
# size in bytes, but when this is smaller than 1, the previous
# byte is used instead.
fu = max(1, self.psize)
# For the first line of a pass, synthesize a dummy previous
# line. An alternative approach would be to observe that on the
# first line 'up' is the same as 'null', 'paeth' is the same
# as 'sub', with only 'average' requiring any special case.
if not previous:
previous = array('B', [0] * len(scanline))
def sub():
"""Undo sub filter."""
ai = 0
# Loops starts at index fu. Observe that the initial part
# of the result is already filled in correctly with
# scanline.
for i in range(fu, len(result)):
x = scanline[i]
a = result[ai]
result[i] = (x + a) & 0xff
ai += 1
def up():
"""Undo up filter."""
for i in range(len(result)):
x = scanline[i]
b = previous[i]
result[i] = (x + b) & 0xff
def average():
"""Undo average filter."""
ai = -fu
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = 0
else:
a = result[ai]
b = previous[i]
result[i] = (x + ((a + b) >> 1)) & 0xff
ai += 1
def paeth():
"""Undo Paeth filter."""
# Also used for ci.
ai = -fu
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = c = 0
else:
a = result[ai]
c = previous[ai]
b = previous[i]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
pr = a
elif pb <= pc:
pr = b
else:
pr = c
result[i] = (x + pr) & 0xff
ai += 1
# Call appropriate filter algorithm. Note that 0 has already
# been dealt with.
(None, sub, up, average, paeth)[filter_type]()
return result
def deinterlace(self, raw):
"""
Read raw pixel data, undo filters, deinterlace, and flatten.
Return in flat row flat pixel format.
"""
# print >> sys.stderr, ("Reading interlaced, w=%s, r=%s, planes=%s," +
# " bpp=%s") % (self.width, self.height, self.planes, self.bps)
# Values per row (of the target image)
vpr = self.width * self.planes
# Make a result array, and make it big enough. Interleaving
# writes to the output array randomly (well, not quite), so the
# entire output array must be in memory.
fmt = 'BH'[self.bitdepth > 8]
a = array(fmt, [0] * vpr * self.height)
source_offset = 0
for xstart, ystart, xstep, ystep in _adam7:
# print >> sys.stderr, "Adam7: start=%s,%s step=%s,%s" % (
# xstart, ystart, xstep, ystep)
if xstart >= self.width:
continue
# The previous (reconstructed) scanline. None at the
# beginning of a pass to indicate that there is no previous
# line.
recon = None
# Pixels per row (reduced pass image)
ppr = int(math.ceil((self.width - xstart) / float(xstep)))
# Row size in bytes for this pass.
row_size = int(math.ceil(self.psize * ppr))
for y in range(ystart, self.height, ystep):
filter_type = raw[source_offset]
source_offset += 1
scanline = raw[source_offset:source_offset + row_size]
source_offset += row_size
recon = self.undo_filter(filter_type, scanline, recon)
# Convert so that there is one element per pixel value
flat = self.serialtoflat(recon, ppr)
if xstep == 1:
assert xstart == 0
offset = y * vpr
a[offset:offset + vpr] = flat
else:
offset = y * vpr + xstart * self.planes
end_offset = (y + 1) * vpr
skip = self.planes * xstep
for i in range(self.planes):
a[offset + i:end_offset:skip] = \
flat[i::self.planes]
return a
def iterboxed(self, rows):
"""Iterator that yields each scanline in boxed row flat pixel
format. `rows` should be an iterator that yields the bytes of
each row in turn.
"""
def asvalues(raw):
"""Convert a row of raw bytes into a flat row. Result may
or may not share with argument"""
if self.bitdepth == 8:
return raw
if self.bitdepth == 16:
raw = tostring(raw)
return array('H', struct.unpack('!%dH' % (len(raw) // 2), raw))
assert self.bitdepth < 8
width = self.width
# Samples per byte
spb = 8 // self.bitdepth
out = array('B')
mask = 2 ** self.bitdepth - 1
shifts = map(self.bitdepth.__mul__, reversed(range(spb)))
for o in raw:
out.extend(map(lambda i: mask & (o >> i), shifts))
return out[:width]
return itertools.imap(asvalues, rows)
def serialtoflat(self, bytes, width=None):
"""Convert serial format (byte stream) pixel data to flat row
flat pixel.
"""
if self.bitdepth == 8:
return bytes
if self.bitdepth == 16:
bytes = tostring(bytes)
return array('H',
struct.unpack('!%dH' % (len(bytes) // 2), bytes))
assert self.bitdepth < 8
if width is None:
width = self.width
# Samples per byte
spb = 8 // self.bitdepth
out = array('B')
mask = 2 ** self.bitdepth - 1
shifts = map(self.bitdepth.__mul__, reversed(range(spb)))
l = width
for o in bytes:
out.extend(map(lambda i: mask & (o >> i), shifts)[:l])
l -= spb
if l <= 0:
l = width
return out
def iterstraight(self, raw):
"""Iterator that undoes the effect of filtering, and yields each
row in serialised format (as a sequence of bytes). Assumes input
is straightlaced. `raw` should be an iterable that yields the
raw bytes in chunks of arbitrary size."""
# length of row, in bytes
rb = self.row_bytes
a = array('B')
# The previous (reconstructed) scanline. None indicates first
# line of image.
recon = None
for some in raw:
a.extend(some)
while len(a) >= rb + 1:
filter_type = a[0]
scanline = a[1:rb + 1]
del a[:rb + 1]
recon = self.undo_filter(filter_type, scanline, recon)
yield recon
if len(a) != 0:
# :file:format We get here with a file format error: when the
# available bytes (after decompressing) do not pack into exact
# rows.
raise FormatError(
'Wrong size for decompressed IDAT chunk.')
assert len(a) == 0
def validate_signature(self):
"""If signature (header) has not been read then read and
validate it; otherwise do nothing.
"""
if self.signature:
return
self.signature = self.file.read(8)
if self.signature != _signature:
raise FormatError("PNG file has invalid signature.")
def preamble(self):
"""
Extract the image metadata by reading the initial part of the PNG
file up to the start of the ``IDAT`` chunk. All the chunks that
precede the ``IDAT`` chunk are read and either processed for
metadata or discarded.
"""
self.validate_signature()
while True:
if not self.atchunk:
self.atchunk = self.chunklentype()
if self.atchunk is None:
raise FormatError(
'This PNG file has no IDAT chunks.')
if self.atchunk[1] == 'IDAT':
return
self.process_chunk()
def chunklentype(self):
"""Reads just enough of the input to determine the next
chunk's length and type, returned as a (*length*, *type*) pair
where *type* is a string. If there are no more chunks, ``None``
is returned.
"""
x = self.file.read(8)
if not x:
return None
if len(x) != 8:
raise FormatError(
'End of file whilst reading chunk length and type.')
length, type = struct.unpack('!I4s', x)
if length > 2 ** 31 - 1:
raise FormatError('Chunk %s is too large: %d.' % (type, length))
return length, type
def process_chunk(self):
"""Process the next chunk and its data. This only processes the
following chunk types, all others are ignored: ``IHDR``,
``PLTE``, ``bKGD``, ``tRNS``, ``gAMA``, ``sBIT``.
"""
type, data = self.chunk()
if type == 'IHDR':
# http://www.w3.org/TR/PNG/#11IHDR
if len(data) != 13:
raise FormatError('IHDR chunk has incorrect length.')
(self.width, self.height, self.bitdepth, self.color_type,
self.compression, self.filter,
self.interlace) = struct.unpack("!2I5B", data)
# Check that the header specifies only valid combinations.
if self.bitdepth not in (1, 2, 4, 8, 16):
raise Error("invalid bit depth %d" % self.bitdepth)
if self.color_type not in (0, 2, 3, 4, 6):
raise Error("invalid colour type %d" % self.color_type)
# Check indexed (palettized) images have 8 or fewer bits
# per pixel; check only indexed or greyscale images have
# fewer than 8 bits per pixel.
if ((self.color_type & 1 and self.bitdepth > 8) or
(self.bitdepth < 8 and self.color_type not in (0, 3))):
raise FormatError("Illegal combination of bit depth (%d)"
" and colour type (%d)."
" See http://www.w3.org/TR/2003/REC-PNG-20031110/#table111 ."
% (self.bitdepth, self.color_type))
if self.compression != 0:
raise Error("unknown compression method %d" % self.compression)
if self.filter != 0:
raise FormatError("Unknown filter method %d,"
" see http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters ."
% self.filter)
if self.interlace not in (0, 1):
raise FormatError("Unknown interlace method %d,"
" see http://www.w3.org/TR/2003/REC-PNG-20031110/#8InterlaceMethods ."
% self.interlace)
# Derived values
# http://www.w3.org/TR/PNG/#6Colour-values
colormap = bool(self.color_type & 1)
greyscale = not (self.color_type & 2)
alpha = bool(self.color_type & 4)
color_planes = (3, 1)[greyscale or colormap]
planes = color_planes + alpha
self.colormap = colormap
self.greyscale = greyscale
self.alpha = alpha
self.color_planes = color_planes
self.planes = planes
self.psize = float(self.bitdepth) / float(8) * planes
if int(self.psize) == self.psize:
self.psize = int(self.psize)
self.row_bytes = int(math.ceil(self.width * self.psize))
# Stores PLTE chunk if present, and is used to check
# chunk ordering constraints.
self.plte = None
# Stores tRNS chunk if present, and is used to check chunk
# ordering constraints.
self.trns = None
# Stores sbit chunk if present.
self.sbit = None
elif type == 'PLTE':
# http://www.w3.org/TR/PNG/#11PLTE
if self.plte:
warnings.warn("Multiple PLTE chunks present.")
self.plte = data
if len(data) % 3 != 0:
raise FormatError(
"PLTE chunk's length should be a multiple of 3.")
if len(data) > (2 ** self.bitdepth) * 3:
raise FormatError("PLTE chunk is too long.")
if len(data) == 0:
raise FormatError("Empty PLTE is not allowed.")
elif type == 'bKGD':
try:
if self.colormap:
if not self.plte:
warnings.warn(
"PLTE chunk is required before bKGD chunk.")
self.background = struct.unpack('B', data)
else:
self.background = struct.unpack("!%dH" % self.color_planes,
data)
except struct.error:
raise FormatError("bKGD chunk has incorrect length.")
elif type == 'tRNS':
# http://www.w3.org/TR/PNG/#11tRNS
self.trns = data
if self.colormap:
if not self.plte:
warnings.warn("PLTE chunk is required before tRNS chunk.")
else:
if len(data) > len(self.plte) / 3:
# Was warning, but promoted to Error as it
# would otherwise cause pain later on.
raise FormatError("tRNS chunk is too long.")
else:
if self.alpha:
raise FormatError(
"tRNS chunk is not valid with colour type %d." %
self.color_type)
try:
self.transparent = \
struct.unpack("!%dH" % self.color_planes, data)
except struct.error:
raise FormatError("tRNS chunk has incorrect length.")
elif type == 'gAMA':
try:
self.gamma = struct.unpack("!L", data)[0] / 100000.0
except struct.error:
raise FormatError("gAMA chunk has incorrect length.")
elif type == 'sBIT':
self.sbit = data
if (self.colormap and len(data) != 3 or
not self.colormap and len(data) != self.planes):
raise FormatError("sBIT chunk has incorrect length.")
def read(self):
"""
Read the PNG file and decode it. Returns (`width`, `height`,
`pixels`, `metadata`).
May use excessive memory.
`pixels` are returned in boxed row flat pixel format.
"""
def iteridat():
"""Iterator that yields all the ``IDAT`` chunks as strings."""
while True:
try:
type, data = self.chunk()
except ValueError, e:
raise ChunkError(e.args[0])
if type == 'IEND':
# http://www.w3.org/TR/PNG/#11IEND
break
if type != 'IDAT':
continue
# type == 'IDAT'
# http://www.w3.org/TR/PNG/#11IDAT
if self.colormap and not self.plte:
warnings.warn("PLTE chunk is required before IDAT chunk")
yield data
def iterdecomp(idat):
"""Iterator that yields decompressed strings. `idat` should
be an iterator that yields the ``IDAT`` chunk data.
"""
# Currently, with no max_length paramter to decompress, this
# routine will do one yield per IDAT chunk. So not very
# incremental.
d = zlib.decompressobj()
# The decompression loop:
# Decompress an IDAT chunk, then decompress any remaining
# unused data until the unused data does not get any
# smaller. Add the unused data to the front of the input
# and loop to process the next IDAT chunk.
cdata = ''
for data in idat:
# :todo: add a max_length argument here to limit output
# size.
yield array('B', d.decompress(cdata + data))
yield array('B', d.flush())
self.preamble()
raw = iterdecomp(iteridat())
if self.interlace:
raw = array('B', itertools.chain(*raw))
arraycode = 'BH'[self.bitdepth > 8]
# Like :meth:`group` but producing an array.array object for
# each row.
pixels = itertools.imap(lambda * row: array(arraycode, row),
*[iter(self.deinterlace(raw))] * self.width * self.planes)
else:
pixels = self.iterboxed(self.iterstraight(raw))
meta = dict()
for attr in 'greyscale alpha planes bitdepth interlace'.split():
meta[attr] = getattr(self, attr)
meta['size'] = (self.width, self.height)
for attr in 'gamma transparent background'.split():
a = getattr(self, attr, None)
if a is not None:
meta[attr] = a
return self.width, self.height, pixels, meta
def read_flat(self):
"""
Read a PNG file and decode it into flat row flat pixel format.
Returns (*width*, *height*, *pixels*, *metadata*).
May use excessive memory.
`pixels` are returned in flat row flat pixel format.
See also the :meth:`read` method which returns pixels in the
more stream-friendly boxed row flat pixel format.
"""
x, y, pixel, meta = self.read()
arraycode = 'BH'[meta['bitdepth'] > 8]
pixel = array(arraycode, itertools.chain(*pixel))
return x, y, pixel, meta
def palette(self, alpha='natural'):
"""Returns a palette that is a sequence of 3-tuples or 4-tuples,
synthesizing it from the ``PLTE`` and ``tRNS`` chunks. These
chunks should have already been processed (for example, by
calling the :meth:`preamble` method). All the tuples are the
same size, 3-tuples if there is no ``tRNS`` chunk, 4-tuples when
there is a ``tRNS`` chunk. Assumes that the image is colour type
3 and therefore a ``PLTE`` chunk is required.
If the `alpha` argument is ``'force'`` then an alpha channel is
always added, forcing the result to be a sequence of 4-tuples.
"""
if not self.plte:
raise FormatError(
"Required PLTE chunk is missing in colour type 3 image.")
plte = group(array('B', self.plte), 3)
if self.trns or alpha == 'force':
trns = array('B', self.trns or '')
trns.extend([255] * (len(plte) - len(trns)))
plte = map(operator.add, plte, group(trns, 1))
return plte
def asDirect(self):
"""Returns the image data as a direct representation of an
``x * y * planes`` array. This method is intended to remove the
need for callers to deal with palettes and transparency
themselves. Images with a palette (colour type 3)
are converted to RGB or RGBA; images with transparency (a
``tRNS`` chunk) are converted to LA or RGBA as appropriate.
When returned in this format the pixel values represent the
colour value directly without needing to refer to palettes or
transparency information.
Like the :meth:`read` method this method returns a 4-tuple:
(*width*, *height*, *pixels*, *meta*)
This method normally returns pixel values with the bit depth
they have in the source image, but when the source PNG has an
``sBIT`` chunk it is inspected and can reduce the bit depth of
the result pixels; pixel values will be reduced according to
the bit depth specified in the ``sBIT`` chunk (PNG nerds should
note a single result bit depth is used for all channels; the
maximum of the ones specified in the ``sBIT`` chunk. An RGB565
image will be rescaled to 6-bit RGB666).
The *meta* dictionary that is returned reflects the `direct`
format and not the original source image. For example, an RGB
source image with a ``tRNS`` chunk to represent a transparent
colour, will have ``planes=3`` and ``alpha=False`` for the
source image, but the *meta* dictionary returned by this method
will have ``planes=4`` and ``alpha=True`` because an alpha
channel is synthesized and added.
*pixels* is the pixel data in boxed row flat pixel format (just
like the :meth:`read` method).
All the other aspects of the image data are not changed.
"""
self.preamble()
# Simple case, no conversion necessary.
if not self.colormap and not self.trns and not self.sbit:
return self.read()
x, y, pixels, meta = self.read()
if self.colormap:
meta['colormap'] = False
meta['alpha'] = bool(self.trns)
meta['bitdepth'] = 8
meta['planes'] = 3 + bool(self.trns)
plte = self.palette()
def iterpal(pixels):
for row in pixels:
row = map(plte.__getitem__, row)
yield array('B', itertools.chain(*row))
pixels = iterpal(pixels)
elif self.trns:
# It would be nice if there was some reasonable way of doing
# this without generating a whole load of intermediate tuples.
# But tuples does seem like the easiest way, with no other way
# clearly much simpler or much faster. (Actually, the L to LA
# conversion could perhaps go faster (all those 1-tuples!), but
# I still wonder whether the code proliferation is worth it)
it = self.transparent
maxval = 2 ** meta['bitdepth'] - 1
planes = meta['planes']
meta['alpha'] = True
meta['planes'] += 1
typecode = 'BH'[meta['bitdepth'] > 8]
def itertrns(pixels):
for row in pixels:
# For each row we group it into pixels, then form a
# characterisation vector that says whether each pixel
# is opaque or not. Then we convert True/False to
# 0/maxval (by multiplication), and add it as the extra
# channel.
row = group(row, planes)
opa = map(it.__ne__, row)
opa = map(maxval.__mul__, opa)
opa = zip(opa) # convert to 1-tuples
yield array(typecode,
itertools.chain(*map(operator.add, row, opa)))
pixels = itertrns(pixels)
targetbitdepth = None
if self.sbit:
sbit = struct.unpack('%dB' % len(self.sbit), self.sbit)
targetbitdepth = max(sbit)
if targetbitdepth > meta['bitdepth']:
raise Error('sBIT chunk %r exceeds bitdepth %d' %
(sbit, self.bitdepth))
if min(sbit) <= 0:
raise Error('sBIT chunk %r has a 0-entry' % sbit)
if targetbitdepth == meta['bitdepth']:
targetbitdepth = None
if targetbitdepth:
shift = meta['bitdepth'] - targetbitdepth
meta['bitdepth'] = targetbitdepth
def itershift(pixels):
for row in pixels:
yield map(shift.__rrshift__, row)
pixels = itershift(pixels)
return x, y, pixels, meta
def asFloat(self, maxval=1.0):
"""Return image pixels as per :meth:`asDirect` method, but scale
all pixel values to be floating point values between 0.0 and
*maxval*.
"""
x, y, pixels, info = self.asDirect()
sourcemaxval = 2 ** info['bitdepth'] - 1
del info['bitdepth']
info['maxval'] = float(maxval)
factor = float(maxval) / float(sourcemaxval)
def iterfloat():
for row in pixels:
yield map(factor.__mul__, row)
return x, y, iterfloat(), info
def _as_rescale(self, get, targetbitdepth):
"""Helper used by :meth:`asRGB8` and :meth:`asRGBA8`."""
width, height, pixels, meta = get()
maxval = 2 ** meta['bitdepth'] - 1
targetmaxval = 2 ** targetbitdepth - 1
factor = float(targetmaxval) / float(maxval)
meta['bitdepth'] = targetbitdepth
def iterscale():
for row in pixels:
yield map(lambda x: int(round(x * factor)), row)
return width, height, iterscale(), meta
def asRGB8(self):
"""Return the image data as an RGB pixels with 8-bits per
sample. This is like the :meth:`asRGB` method except that
this method additionally rescales the values so that they
are all between 0 and 255 (8-bit). In the case where the
source image has a bit depth < 8 the transformation preserves
all the information; where the source image has bit depth
> 8, then rescaling to 8-bit values loses precision. No
dithering is performed. Like :meth:`asRGB`, an alpha channel
in the source image will raise an exception.
This function returns a 4-tuple:
(*width*, *height*, *pixels*, *metadata*).
*width*, *height*, *metadata* are as per the :meth:`read` method.
*pixels* is the pixel data in boxed row flat pixel format.
"""
return self._as_rescale(self.asRGB, 8)
def asRGBA8(self):
"""Return the image data as RGBA pixels with 8-bits per
sample. This method is similar to :meth:`asRGB8` and
:meth:`asRGBA`: The result pixels have an alpha channel, _and_
values are rescale to the range 0 to 255. The alpha channel is
synthesized if necessary.
"""
return self._as_rescale(self.asRGBA, 8)
def asRGB(self):
"""Return image as RGB pixels. Greyscales are expanded into RGB
triplets. An alpha channel in the source image will raise an
exception. The return values are as for the :meth:`read` method
except that the *metadata* reflect the returned pixels, not the
source image. In particular, for this method
``metadata['greyscale']`` will be ``False``.
"""
width, height, pixels, meta = self.asDirect()
if meta['alpha']:
raise Error("will not convert image with alpha channel to RGB")
if not meta['greyscale']:
return width, height, pixels, meta
meta['greyscale'] = False
typecode = 'BH'[meta['bitdepth'] > 8]
def iterrgb():
for row in pixels:
a = array(typecode, [0]) * 3 * width
for i in range(3):
a[i::3] = row
yield a
return width, height, iterrgb(), meta
def asRGBA(self):
"""Return image as RGBA pixels. Greyscales are expanded into
RGB triplets; an alpha channel is synthesized if necessary.
The return values are as for the :meth:`read` method
except that the *metadata* reflect the returned pixels, not the
source image. In particular, for this method
``metadata['greyscale']`` will be ``False``, and
``metadata['alpha']`` will be ``True``.
"""
width, height, pixels, meta = self.asDirect()
if meta['alpha'] and not meta['greyscale']:
return width, height, pixels, meta
typecode = 'BH'[meta['bitdepth'] > 8]
maxval = 2 ** meta['bitdepth'] - 1
def newarray():
return array(typecode, [0]) * 4 * width
if meta['alpha'] and meta['greyscale']:
# LA to RGBA
def convert():
for row in pixels:
# Create a fresh target row, then copy L channel
# into first three target channels, and A channel
# into fourth channel.
a = newarray()
for i in range(3):
a[i::4] = row[0::2]
a[3::4] = row[1::2]
yield a
elif meta['greyscale']:
# L to RGBA
def convert():
for row in pixels:
a = newarray()
for i in range(3):
a[i::4] = row
a[3::4] = array(typecode, maxval) * width
yield a
else:
assert not meta['alpha'] and not meta['greyscale']
# RGB to RGBA
def convert():
for row in pixels:
a = newarray()
for i in range(3):
a[i::4] = row[i::3]
a[3::4] = array(typecode, [maxval]) * width
yield a
meta['alpha'] = True
meta['greyscale'] = False
return width, height, convert(), meta
# === Legacy Version Support ===
# :pyver:old: PyPNG works on Python versions 2.3 and 2.2, but not
# without some awkward problems. Really PyPNG works on Python 2.4 (and
# above); it works on Pythons 2.3 and 2.2 by virtue of fixing up
# problems here. It's a bit ugly (which is why it's hidden down here).
#
# Generally the strategy is one of pretending that we're running on
# Python 2.4 (or above), and patching up the library support on earlier
# versions so that it looks enough like Python 2.4. When it comes to
# Python 2.2 there is one thing we cannot patch: extended slices
# http://www.python.org/doc/2.3/whatsnew/section-slices.html.
# Instead we simply declare that features that are implemented using
# extended slices will not work on Python 2.2.
#
# In order to work on Python 2.3 we fix up a recurring annoyance involving
# the array type. In Python 2.3 an array cannot be initialised with an
# array, and it cannot be extended with a list (or other sequence).
# Both of those are repeated issues in the code. Whilst I would not
# normally tolerate this sort of behaviour, here we "shim" a replacement
# for array into place (and hope no-ones notices). You never read this.
#
# In an amusing case of warty hacks on top of warty hacks... the array
# shimming we try and do only works on Python 2.3 and above (you can't
# subclass array.array in Python 2.2). So to get it working on Python
# 2.2 we go for something much simpler and (probably) way slower.
try:
array('B').extend([])
array('B', array('B'))
except:
# Expect to get here on Python 2.3
try:
class _array_shim(array):
true_array = array
def __new__(cls, typecode, init=None):
super_new = super(_array_shim, cls).__new__
it = super_new(cls, typecode)
if init is None:
return it
it.extend(init)
return it
def extend(self, extension):
super_extend = super(_array_shim, self).extend
if isinstance(extension, self.true_array):
return super_extend(extension)
if not isinstance(extension, (list, str)):
# Convert to list. Allows iterators to work.
extension = list(extension)
return super_extend(self.true_array(self.typecode, extension))
array = _array_shim
except:
# Expect to get here on Python 2.2
def array(typecode, init=()):
if type(init) == str:
return map(ord, init)
return list(init)
# Further hacks to get it limping along on Python 2.2
try:
enumerate
except:
def enumerate(seq):
i = 0
for x in seq:
yield i, x
i += 1
try:
reversed
except:
def reversed(l):
l = list(l)
l.reverse()
for x in l:
yield x
try:
itertools
except:
class _dummy_itertools:
pass
itertools = _dummy_itertools()
def _itertools_imap(f, seq):
for x in seq:
yield f(x)
itertools.imap = _itertools_imap
def _itertools_chain(*iterables):
for it in iterables:
for element in it:
yield element
itertools.chain = _itertools_chain
# === Internal Test Support ===
# This section comprises the tests that are internally validated (as
# opposed to tests which produce output files that are externally
# validated). Primarily they are unittests.
# Note that it is difficult to internally validate the results of
# writing a PNG file. The only thing we can do is read it back in
# again, which merely checks consistency, not that the PNG file we
# produce is valid.
# Run the tests from the command line:
# python -c 'import png;png.test()'
from cStringIO import StringIO
import tempfile
# http://www.python.org/doc/2.4.4/lib/module-unittest.html
import unittest
def test():
unittest.main(__name__)
def topngbytes(name, rows, x, y, **k):
"""Convenience function for creating a PNG file "in memory" as a
string. Creates a :class:`Writer` instance using the keyword arguments,
then passes `rows` to its :meth:`Writer.write` method. The resulting
PNG file is returned as a string. `name` is used to identify the file for
debugging.
"""
import os
print name
f = StringIO()
w = Writer(x, y, **k)
w.write(f, rows)
if os.environ.get('PYPNG_TEST_TMP'):
w = open(name, 'wb')
w.write(f.getvalue())
w.close()
return f.getvalue()
def testWithIO(inp, out, f):
"""Calls the function `f` with ``sys.stdin`` changed to `inp`
and ``sys.stdout`` changed to `out`. They are restored when `f`
returns. This function returns whatever `f` returns.
"""
try:
oldin, sys.stdin = sys.stdin, inp
oldout, sys.stdout = sys.stdout, out
x = f()
finally:
sys.stdin = oldin
sys.stdout = oldout
return x
class Test(unittest.TestCase):
# This member is used by the superclass. If we don't define a new
# class here then when we use self.assertRaises() and the PyPNG code
# raises an assertion then we get no proper traceback. I can't work
# out why, but defining a new class here means we get a proper
# traceback.
class failureException(Exception):
pass
def helperLN(self, n):
mask = (1 << n) - 1
# Use small chunk_limit so that multiple chunk writing is
# tested. Making it a test for Issue 20.
w = Writer(15, 17, greyscale=True, bitdepth=n, chunk_limit=99)
f = StringIO()
w.write_array(f, array('B', map(mask.__and__, range(1, 256))))
r = Reader(bytes=f.getvalue())
x, y, pixels, meta = r.read()
self.assertEqual(x, 15)
self.assertEqual(y, 17)
self.assertEqual(list(itertools.chain(*pixels)),
map(mask.__and__, range(1, 256)))
def testL8(self):
return self.helperLN(8)
def testL4(self):
return self.helperLN(4)
def testL2(self):
"Also tests asRGB8."
w = Writer(1, 4, greyscale=True, bitdepth=2)
f = StringIO()
w.write_array(f, array('B', range(4)))
r = Reader(bytes=f.getvalue())
x, y, pixels, meta = r.asRGB8()
self.assertEqual(x, 1)
self.assertEqual(y, 4)
for i, row in enumerate(pixels):
self.assertEqual(len(row), 3)
self.assertEqual(list(row), [0x55 * i] * 3)
def testP2(self):
"2-bit palette."
a = (255, 255, 255)
b = (200, 120, 120)
c = (50, 99, 50)
w = Writer(1, 4, bitdepth=2, palette=[a, b, c])
f = StringIO()
w.write_array(f, array('B', (0, 1, 1, 2)))
r = Reader(bytes=f.getvalue())
x, y, pixels, meta = r.asRGB8()
self.assertEqual(x, 1)
self.assertEqual(y, 4)
self.assertEqual(list(pixels), map(list, [a, b, b, c]))
def testPtrns(self):
"Test colour type 3 and tRNS chunk (and 4-bit palette)."
a = (50, 99, 50, 50)
b = (200, 120, 120, 80)
c = (255, 255, 255)
d = (200, 120, 120)
e = (50, 99, 50)
w = Writer(3, 3, bitdepth=4, palette=[a, b, c, d, e])
f = StringIO()
w.write_array(f, array('B', (4, 3, 2, 3, 2, 0, 2, 0, 1)))
r = Reader(bytes=f.getvalue())
x, y, pixels, meta = r.asRGBA8()
self.assertEquals(x, 3)
self.assertEquals(y, 3)
c = c + (255,)
d = d + (255,)
e = e + (255,)
boxed = [(e, d, c), (d, c, a), (c, a, b)]
flat = map(lambda row: itertools.chain(*row), boxed)
self.assertEqual(map(list, pixels), map(list, flat))
def testRGBtoRGBA(self):
"asRGBA8() on colour type 2 source."""
# Test for Issue 26
r = Reader(bytes=_pngsuite['basn2c08'])
x, y, pixels, meta = r.asRGBA8()
# Test the pixels at row 9 columns 0 and 1.
row9 = list(pixels)[9]
self.assertEqual(row9[0:8],
[0xff, 0xdf, 0xff, 0xff, 0xff, 0xde, 0xff, 0xff])
def testCtrns(self):
"Test colour type 2 and tRNS chunk."
# Test for Issue 25
r = Reader(bytes=_pngsuite['tbrn2c08'])
x, y, pixels, meta = r.asRGBA8()
# I just happen to know that the first pixel is transparent.
# In particular it should be #7f7f7f00
row0 = list(pixels)[0]
self.assertEqual(tuple(row0[0:4]), (0x7f, 0x7f, 0x7f, 0x00))
def testAdam7read(self):
"""Adam7 interlace reading.
Specifically, test that for images in the PngSuite that
have both an interlaced and straightlaced pair that both
images from the pair produce the same array of pixels."""
for candidate in _pngsuite:
if not candidate.startswith('basn'):
continue
candi = candidate.replace('n', 'i')
if candi not in _pngsuite:
continue
print 'adam7 read', candidate
straight = Reader(bytes=_pngsuite[candidate])
adam7 = Reader(bytes=_pngsuite[candi])
# Just compare the pixels. Ignore x,y (because they're
# likely to be correct?); metadata is ignored because the
# "interlace" member differs. Lame.
straight = straight.read()[2]
adam7 = adam7.read()[2]
self.assertEqual(map(list, straight), map(list, adam7))
def testAdam7write(self):
"""Adam7 interlace writing.
For each test image in the PngSuite, write an interlaced
and a straightlaced version. Decode both, and compare results.
"""
# Not such a great test, because the only way we can check what
# we have written is to read it back again.
for name, bytes in _pngsuite.items():
# Only certain colour types supported for this test.
if name[3:5] not in ['n0', 'n2', 'n4', 'n6']:
continue
it = Reader(bytes=bytes)
x, y, pixels, meta = it.read()
pngi = topngbytes('adam7wn' + name + '.png', pixels,
x=x, y=y, bitdepth=it.bitdepth,
greyscale=it.greyscale, alpha=it.alpha,
transparent=it.transparent,
interlace=False)
x, y, ps, meta = Reader(bytes=pngi).read()
it = Reader(bytes=bytes)
x, y, pixels, meta = it.read()
pngs = topngbytes('adam7wi' + name + '.png', pixels,
x=x, y=y, bitdepth=it.bitdepth,
greyscale=it.greyscale, alpha=it.alpha,
transparent=it.transparent,
interlace=True)
x, y, pi, meta = Reader(bytes=pngs).read()
self.assertEqual(map(list, ps), map(list, pi))
def testPGMin(self):
"""Test that the command line tool can read PGM files."""
def do():
return _main(['testPGMin'])
s = StringIO()
s.write('P5 2 2 3\n')
s.write('\x00\x01\x02\x03')
s.flush()
s.seek(0)
o = StringIO()
testWithIO(s, o, do)
r = Reader(bytes=o.getvalue())
x, y, pixels, meta = r.read()
self.assert_(r.greyscale)
self.assertEqual(r.bitdepth, 2)
def testPAMin(self):
"""Test that the command line tool can read PAM file."""
def do():
return _main(['testPAMin'])
s = StringIO()
s.write('P7\nWIDTH 3\nHEIGHT 1\nDEPTH 4\nMAXVAL 255\n'
'TUPLTYPE RGB_ALPHA\nENDHDR\n')
# The pixels in flat row flat pixel format
flat = [255, 0, 0, 255, 0, 255, 0, 120, 0, 0, 255, 30]
s.write(''.join(map(chr, flat)))
s.flush()
s.seek(0)
o = StringIO()
testWithIO(s, o, do)
r = Reader(bytes=o.getvalue())
x, y, pixels, meta = r.read()
self.assert_(r.alpha)
self.assert_(not r.greyscale)
self.assertEqual(list(itertools.chain(*pixels)), flat)
def testLA4(self):
"""Create an LA image with bitdepth 4."""
bytes = topngbytes('la4.png', [[5, 12]], 1, 1,
greyscale=True, alpha=True, bitdepth=4)
sbit = Reader(bytes=bytes).chunk('sBIT')[1]
self.assertEqual(sbit, '\x04\x04')
def testPNMsbit(self):
"""Test that PNM files can generates sBIT chunk."""
def do():
return _main(['testPNMsbit'])
s = StringIO()
s.write('P6 8 1 1\n')
for pixel in range(8):
s.write(struct.pack('<I', (0x4081 * pixel) & 0x10101)[:3])
s.flush()
s.seek(0)
o = StringIO()
testWithIO(s, o, do)
r = Reader(bytes=o.getvalue())
sbit = r.chunk('sBIT')[1]
self.assertEqual(sbit, '\x01\x01\x01')
def testLtrns0(self):
"""Create greyscale image with tRNS chunk."""
return self.helperLtrns(0)
def testLtrns1(self):
"""Using 1-tuple for transparent arg."""
return self.helperLtrns((0,))
def helperLtrns(self, transparent):
"""Helper used by :meth:`testLtrns*`."""
pixels = zip(map(ord, '00384c545c403800'.decode('hex')))
o = StringIO()
w = Writer(8, 8, greyscale=True, bitdepth=1, transparent=transparent)
w.write_packed(o, pixels)
r = Reader(bytes=o.getvalue())
x, y, pixels, meta = r.asDirect()
self.assert_(meta['alpha'])
self.assert_(meta['greyscale'])
self.assertEqual(meta['bitdepth'], 1)
def testWinfo(self):
"""Test the dictionary returned by a `read` method can be used
as args for :meth:`Writer`.
"""
r = Reader(bytes=_pngsuite['basn2c16'])
info = r.read()[3]
w = Writer(**info)
def testPackedIter(self):
"""Test iterator for row when using write_packed.
Indicative for Issue 47.
"""
w = Writer(16, 2, greyscale=True, alpha=False, bitdepth=1)
o = StringIO()
w.write_packed(o, [itertools.chain([0x0a], [0xaa]),
itertools.chain([0x0f], [0xff])])
r = Reader(bytes=o.getvalue())
x, y, pixels, info = r.asDirect()
pixels = list(pixels)
self.assertEqual(len(pixels), 2)
self.assertEqual(len(pixels[0]), 16)
def testInterlacedArray(self):
"""Test that reading an interlaced PNG yields each row as an
array."""
r = Reader(bytes=_pngsuite['basi0g08'])
list(r.read()[2])[0].tostring
def testTrnsArray(self):
"""Test that reading a type 2 PNG with tRNS chunk yields each
row as an array (using asDirect)."""
r = Reader(bytes=_pngsuite['tbrn2c08'])
list(r.asDirect()[2])[0].tostring
# Invalid file format tests. These construct various badly
# formatted PNG files, then feed them into a Reader. When
# everything is working properly, we should get FormatError
# exceptions raised.
def testEmpty(self):
"""Test empty file."""
r = Reader(bytes='')
self.assertRaises(FormatError, r.asDirect)
def testSigOnly(self):
"""Test file containing just signature bytes."""
r = Reader(bytes=_signature)
self.assertRaises(FormatError, r.asDirect)
def testExtraPixels(self):
"""Test file that contains too many pixels."""
def eachchunk(chunk):
if chunk[0] != 'IDAT':
return chunk
data = chunk[1].decode('zip')
data += '\x00garbage'
data = data.encode('zip')
chunk = (chunk[0], data)
return chunk
self.assertRaises(FormatError, self.helperFormat, eachchunk)
def testNotEnoughPixels(self):
def eachchunk(chunk):
if chunk[0] != 'IDAT':
return chunk
# Remove last byte.
data = chunk[1].decode('zip')
data = data[:-1]
data = data.encode('zip')
return chunk[0], data
self.assertRaises(FormatError, self.helperFormat, eachchunk)
def helperFormat(self, f):
r = Reader(bytes=_pngsuite['basn0g01'])
o = StringIO()
def newchunks():
for chunk in r.chunks():
yield f(chunk)
write_chunks(o, newchunks())
r = Reader(bytes=o.getvalue())
return list(r.asDirect()[2])
def testBadFilter(self):
def eachchunk(chunk):
if chunk[0] != 'IDAT':
return chunk
data = chunk[1].decode('zip')
# Corrupt the first filter byte
data = '\x99' + data[1:]
data = data.encode('zip')
return chunk[0], data
self.assertRaises(FormatError, self.helperFormat, eachchunk)
def testFlat(self):
"""Test read_flat."""
import hashlib
r = Reader(bytes=_pngsuite['basn0g02'])
x, y, pixel, meta = r.read_flat()
d = hashlib.md5(''.join(map(chr, pixel))).digest()
self.assertEqual(d.encode('hex'), '255cd971ab8cd9e7275ff906e5041aa0')
# numpy dependent tests. These are skipped (with a message to
# sys.stderr) if numpy cannot be imported.
def testNumpyuint16(self):
"""numpy uint16."""
try:
import numpy
except ImportError:
print >> sys.stderr, "skipping numpy test"
return
rows = [map(numpy.uint16, range(0, 0x10000, 0x5555))]
b = topngbytes('numpyuint16.png', rows, 4, 1,
greyscale=True, alpha=False, bitdepth=16)
def testNumpyuint8(self):
"""numpy uint8."""
try:
import numpy
except ImportError:
print >> sys.stderr, "skipping numpy test"
return
rows = [map(numpy.uint8, range(0, 0x100, 0x55))]
b = topngbytes('numpyuint8.png', rows, 4, 1,
greyscale=True, alpha=False, bitdepth=8)
def testNumpybool(self):
"""numpy bool."""
try:
import numpy
except ImportError:
print >> sys.stderr, "skipping numpy test"
return
rows = [map(numpy.bool, [0, 1])]
b = topngbytes('numpybool.png', rows, 2, 1,
greyscale=True, alpha=False, bitdepth=1)
# === Command Line Support ===
def _dehex(s):
"""Liberally convert from hex string to binary string."""
import re
# Remove all non-hexadecimal digits
s = re.sub(r'[^a-fA-F\d]', '', s)
return s.decode('hex')
# Copies of PngSuite test files taken
# from http://www.schaik.com/pngsuite/pngsuite_bas_png.html
# on 2009-02-19 by drj and converted to hex.
# Some of these are not actually in PngSuite (but maybe they should
# be?), they use the same naming scheme, but start with a capital
# letter.
_pngsuite = {
'basi0g01': _dehex("""
89504e470d0a1a0a0000000d49484452000000200000002001000000012c0677
cf0000000467414d41000186a031e8965f0000009049444154789c2d8d310ec2
300c45dfc682c415187a00a42e197ab81e83b127e00c5639001363a580d8582c
65c910357c4b78b0bfbfdf4f70168c19e7acb970a3f2d1ded9695ce5bf5963df
d92aaf4c9fd927ea449e6487df5b9c36e799b91bdf082b4d4bd4014fe4014b01
ab7a17aee694d28d328a2d63837a70451e1648702d9a9ff4a11d2f7a51aa21e5
a18c7ffd0094e3511d661822f20000000049454e44ae426082
"""),
'basi0g02': _dehex("""
89504e470d0a1a0a0000000d49484452000000200000002002000000016ba60d
1f0000000467414d41000186a031e8965f0000005149444154789c635062e860
00e17286bb609c93c370ec189494960631366e4467b3ae675dcf10f521ea0303
90c1ca006444e11643482064114a4852c710baea3f18c31918020c30410403a6
0ac1a09239009c52804d85b6d97d0000000049454e44ae426082
"""),
'basi0g04': _dehex("""
89504e470d0a1a0a0000000d4948445200000020000000200400000001e4e6f8
bf0000000467414d41000186a031e8965f000000ae49444154789c658e5111c2
301044171c141c141c041c843a287510ea20d441c041c141c141c04191102454
03994998cecd7edcecedbb9bdbc3b2c2b6457545fbc4bac1be437347f7c66a77
3c23d60db15e88f5c5627338a5416c2e691a9b475a89cd27eda12895ae8dfdab
43d61e590764f5c83a226b40d669bec307f93247701687723abf31ff83a2284b
a5b4ae6b63ac6520ad730ca4ed7b06d20e030369bd6720ed383290360406d24e
13811f2781eba9d34d07160000000049454e44ae426082
"""),
'basi0g08': _dehex("""
89504e470d0a1a0a0000000d4948445200000020000000200800000001211615
be0000000467414d41000186a031e8965f000000b549444154789cb5905d0ac2
3010849dbac81c42c47bf843cf253e8878b0aa17110f214bdca6be240f5d21a5
94ced3e49bcd322c1624115515154998aa424822a82a5624a1aa8a8b24c58f99
999908130989a04a00d76c2c09e76cf21adcb209393a6553577da17140a2c59e
70ecbfa388dff1f03b82fb82bd07f05f7cb13f80bb07ad2fd60c011c3c588eef
f1f4e03bbec7ce832dca927aea005e431b625796345307b019c845e6bfc3bb98
769d84f9efb02ea6c00f9bb9ff45e81f9f280000000049454e44ae426082
"""),
'basi0g16': _dehex("""
89504e470d0a1a0a0000000d49484452000000200000002010000000017186c9
fd0000000467414d41000186a031e8965f000000e249444154789cb5913b0ec2
301044c7490aa8f85d81c3e4301c8f53a4ca0da8902c8144b3920b4043111282
23bc4956681a6bf5fc3c5a3ba0448912d91a4de2c38dd8e380231eede4c4f7a1
4677700bec7bd9b1d344689315a3418d1a6efbe5b8305ba01f8ff4808c063e26
c60d5c81edcf6c58c535e252839e93801b15c0a70d810ae0d306b205dc32b187
272b64057e4720ff0502154034831520154034c3df81400510cdf0015c86e5cc
5c79c639fddba9dcb5456b51d7980eb52d8e7d7fa620a75120d6064641a05120
b606771a05626b401a05f1f589827cf0fe44c1f0bae0055698ee8914fffffe00
00000049454e44ae426082
"""),
'basi2c08': _dehex("""
89504e470d0a1a0a0000000d49484452000000200000002008020000018b1fdd
350000000467414d41000186a031e8965f000000f249444154789cd59341aa04
210c44abc07b78133d59d37333bd89d76868b566d10cf4675af8596431a11662
7c5688919280e312257dd6a0a4cf1a01008ee312a5f3c69c37e6fcc3f47e6776
a07f8bdaf5b40feed2d33e025e2ff4fe2d4a63e1a16d91180b736d8bc45854c5
6d951863f4a7e0b66dcf09a900f3ffa2948d4091e53ca86c048a64390f662b50
4a999660ced906182b9a01a8be00a56404a6ede182b1223b4025e32c4de34304
63457680c93aada6c99b73865aab2fc094920d901a203f5ddfe1970d28456783
26cffbafeffcd30654f46d119be4793f827387fc0d189d5bc4d69a3c23d45a7f
db803146578337df4d0a3121fc3d330000000049454e44ae426082
"""),
'basi2c16': _dehex("""
89504e470d0a1a0a0000000d4948445200000020000000201002000001db8f01
760000000467414d41000186a031e8965f0000020a49444154789cd5962173e3
3010853fcf1838cc61a1818185a53e56787fa13fa130852e3b5878b4b0b03081
b97f7030070b53e6b057a0a8912bbb9163b9f109ececbc59bd7dcf2b45492409
d66f00eb1dd83cb5497d65456aeb8e1040913b3b2c04504c936dd5a9c7e2c6eb
b1b8f17a58e8d043da56f06f0f9f62e5217b6ba3a1b76f6c9e99e8696a2a72e2
c4fb1e4d452e92ec9652b807486d12b6669be00db38d9114b0c1961e375461a5
5f76682a85c367ad6f682ff53a9c2a353191764b78bb07d8ddc3c97c1950f391
6745c7b9852c73c2f212605a466a502705c8338069c8b9e84efab941eb393a97
d4c9fd63148314209f1c1d3434e847ead6380de291d6f26a25c1ebb5047f5f24
d85c49f0f22cc1d34282c72709cab90477bf25b89d49f0f351822297e0ea9704
f34c82bc94002448ede51866e5656aef5d7c6a385cb4d80e6a538ceba04e6df2
480e9aa84ddedb413bb5c97b3838456df2d4fec2c7a706983e7474d085fae820
a841776a83073838973ac0413fea2f1dc4a06e71108fda73109bdae48954ad60
bf867aac3ce44c7c1589a711cf8a81df9b219679d96d1cec3d8bbbeaa2012626
df8c7802eda201b2d2e0239b409868171fc104ba8b76f10b4da09f6817ffc609
c413ede267fd1fbab46880c90f80eccf0013185eb48b47ba03df2bdaadef3181
cb8976f18e13188768170f98c0f844bb78cb04c62ddac59d09fc3fa25dfc1da4
14deb3df1344f70000000049454e44ae426082
"""),
'basi3p08': _dehex("""
89504e470d0a1a0a0000000d494844520000002000000020080300000133a3ba
500000000467414d41000186a031e8965f00000300504c5445224400f5ffed77
ff77cbffff110a003a77002222ffff11ff110000222200ffac5566ff66ff6666
ff01ff221200dcffffccff994444ff005555220000cbcbff44440055ff55cbcb
00331a00ffecdcedffffe4ffcbffdcdc44ff446666ff330000442200ededff66
6600ffa444ffffaaeded0000cbcbfefffffdfffeffff0133ff33552a000101ff
8888ff00aaaa010100440000888800ffe4cbba5b0022ff22663200ffff99aaaa
ff550000aaaa00cb630011ff11d4ffaa773a00ff4444dc6b0066000001ff0188
4200ecffdc6bdc00ffdcba00333300ed00ed7300ffff88994a0011ffff770000
ff8301ffbabafe7b00fffeff00cb00ff999922ffff880000ffff77008888ffdc
ff1a33000000aa33ffff009900990000000001326600ffbaff44ffffffaaff00
770000fefeaa00004a9900ffff66ff22220000998bff1155ffffff0101ff88ff
005500001111fffffefffdfea4ff4466ffffff66ff003300ffff55ff77770000
88ff44ff00110077ffff006666ffffed000100fff5ed1111ffffff44ff22ffff
eded11110088ffff00007793ff2200dcdc3333fffe00febabaff99ffff333300
63cb00baba00acff55ffffdcffff337bfe00ed00ed5555ffaaffffdcdcff5555
00000066dcdc00dc00dc83ff017777fffefeffffffcbff5555777700fefe00cb
00cb0000fe010200010000122200ffff220044449bff33ffd4aa0000559999ff
999900ba00ba2a5500ffcbcbb4ff66ff9b33ffffbaaa00aa42880053aa00ffaa
aa0000ed00babaffff1100fe00000044009999990099ffcc99ba000088008800
dc00ff93220000dcfefffeaa5300770077020100cb0000000033ffedff00ba00
ff3333edffedffc488bcff7700aa00660066002222dc0000ffcbffdcffdcff8b
110000cb00010155005500880000002201ffffcbffcbed0000ff88884400445b
ba00ffbc77ff99ff006600baffba00777773ed00fe00003300330000baff77ff
004400aaffaafffefe000011220022c4ff8800eded99ff99ff55ff002200ffb4
661100110a1100ff1111dcffbabaffff88ff88010001ff33ffb98ed362000002
a249444154789c65d0695c0b001806f03711a9904a94d24dac63292949e5a810
d244588a14ca5161d1a1323973252242d62157d12ae498c8124d25ca3a11398a
16e55a3cdffab0ffe7f77d7fcff3528645349b584c3187824d9d19d4ec2e3523
9eb0ae975cf8de02f2486d502191841b42967a1ad49e5ddc4265f69a899e26b5
e9e468181baae3a71a41b95669da8df2ea3594c1b31046d7b17bfb86592e4cbe
d89b23e8db0af6304d756e60a8f4ad378bdc2552ae5948df1d35b52143141533
33bbbbababebeb3b3bc9c9c9c6c6c0c0d7b7b535323225a5aa8a02024a4bedec
0a0a2a2bcdcd7d7cf2f3a9a9c9cdcdd8b8adcdd5b5ababa828298982824a4ab2
b21212acadbdbc1414e2e24859b9a72730302f4f49292c4c57373c9c0a0b7372
8c8c1c1c3a3a92936d6dfdfd293e3e26262a4a4eaea2424b4b5fbfbc9c323278
3c0b0ba1303abaae8ecdeeed950d6669a9a7a7a141d4de9e9d5d5cdcd2229b94
c572716132f97cb1d8db9bc3110864a39795d9db6b6a26267a7a9a98d4d6a6a7
cb76090ef6f030354d4d75766e686030545464cb393a1a1ac6c68686eae8f8f9
a9aa4644c8b66d6e1689dcdd2512a994cb35330b0991ad9f9b6b659596a6addd
d8282fafae5e5323fb8f41d01f76c22fd8061be01bfc041a0323e1002c81cd30
0b9ec027a0c930014ec035580fc3e112bc069a0b53e11c0c8095f00176c163a0
e5301baec06a580677600ddc05ba0f13e120bc81a770133ec355a017300d4ec2
0c7800bbe1219c02fa08f3e13c1c85dbb00a2ec05ea0dff00a6ec15a98027360
070c047a06d7e1085c84f1b014f6c03fa0b33018b6c0211801ebe018fc00da0a
6f61113c877eb01d4ec317a085700f26c130f80efbe132bc039a0733e106fc81
f7f017f6c10aa0d1300a0ec374780943e1382c06fa0a9b60238c83473016cec0
02f80f73fefe1072afc1e50000000049454e44ae426082
"""),
'basi6a08': _dehex("""
89504e470d0a1a0a0000000d4948445200000020000000200806000001047d4a
620000000467414d41000186a031e8965f0000012049444154789cc595414ec3
3010459fa541b8bbb26641b8069b861e8b4d12c1c112c1452a710a2a65d840d5
949041fc481ec98ae27c7f3f8d27e3e4648047600fec0d1f390fbbe2633a31e2
9389e4e4ea7bfdbf3d9a6b800ab89f1bd6b553cfcbb0679e960563d72e0a9293
b7337b9f988cc67f5f0e186d20e808042f1c97054e1309da40d02d7e27f92e03
6cbfc64df0fc3117a6210a1b6ad1a00df21c1abcf2a01944c7101b0cb568a001
909c9cf9e399cf3d8d9d4660a875405d9a60d000b05e2de55e25780b7a5268e0
622118e2399aab063a815808462f1ab86890fc2e03e48bb109ded7d26ce4bf59
0db91bac0050747fec5015ce80da0e5700281be533f0ce6d5900b59bcb00ea6d
200314cf801faab200ea752803a8d7a90c503a039f824a53f4694e7342000000
0049454e44ae426082
"""),
'basn0g01': _dehex("""
89504e470d0a1a0a0000000d49484452000000200000002001000000005b0147
590000000467414d41000186a031e8965f0000005b49444154789c2dccb10903
300c05d1ebd204b24a200b7a346f90153c82c18d0a61450751f1e08a2faaead2
a4846ccea9255306e753345712e211b221bf4b263d1b427325255e8bdab29e6f
6aca30692e9d29616ee96f3065f0bf1f1087492fd02f14c90000000049454e44
ae426082
"""),
'basn0g02': _dehex("""
89504e470d0a1a0a0000000d49484452000000200000002002000000001ca13d
890000000467414d41000186a031e8965f0000001f49444154789c6360085df5
1f8cf1308850c20053868f0133091f6390b90700bd497f818b0989a900000000
49454e44ae426082
"""),
# A version of basn0g04 dithered down to 3 bits.
'Basn0g03': _dehex("""
89504e470d0a1a0a0000000d494844520000002000000020040000000093e1c8
2900000001734249540371d88211000000fd49444154789c6d90d18906210c84
c356f22356b2889588604301b112112b11d94a96bb495cf7fe87f32d996f2689
44741cc658e39c0b118f883e1f63cc89dafbc04c0f619d7d898396c54b875517
83f3a2e7ac09a2074430e7f497f00f1138a5444f82839c5206b1f51053cca968
63258821e7f2b5438aac16fbecc052b646e709de45cf18996b29648508728612
952ca606a73566d44612b876845e9a347084ea4868d2907ff06be4436c4b41a3
a3e1774285614c5affb40dbd931a526619d9fa18e4c2be420858de1df0e69893
a0e3e5523461be448561001042b7d4a15309ce2c57aef2ba89d1c13794a109d7
b5880aa27744fc5c4aecb5e7bcef5fe528ec6293a930690000000049454e44ae
426082
"""),
'basn0g04': _dehex("""
89504e470d0a1a0a0000000d494844520000002000000020040000000093e1c8
290000000467414d41000186a031e8965f0000004849444154789c6360601014
545232367671090d4d4b2b2f6720430095dbd1418e002a77e64c720450b9ab56
912380caddbd9b1c0154ee9933e408a072efde25470095fbee1d1902001f14ee
01eaff41fa0000000049454e44ae426082
"""),
'basn0g08': _dehex("""
89504e470d0a1a0a0000000d4948445200000020000000200800000000561125
280000000467414d41000186a031e8965f0000004149444154789c6364602400
1408c8b30c05058c0f0829f8f71f3f6079301c1430ca11906764a2795c0c0605
8c8ff0cafeffcff887e67131181430cae0956564040050e5fe7135e2d8590000
000049454e44ae426082
"""),
'basn0g16': _dehex("""
89504e470d0a1a0a0000000d49484452000000200000002010000000000681f9
6b0000000467414d41000186a031e8965f0000005e49444154789cd5d2310ac0
300c4351395bef7fc6dca093c0287b32d52a04a3d98f3f3880a7b857131363a0
3a82601d089900dd82f640ca04e816dc06422640b7a03d903201ba05b7819009
d02d680fa44c603f6f07ec4ff41938cf7f0016d84bd85fae2b9fd70000000049
454e44ae426082
"""),
'basn2c08': _dehex("""
89504e470d0a1a0a0000000d4948445200000020000000200802000000fc18ed
a30000000467414d41000186a031e8965f0000004849444154789cedd5c10900
300c024085ec91fdb772133b442bf4a1f8cee12bb40d043b800a14f81ca0ede4
7d4c784081020f4a871fc284071428f0a0743823a94081bb7077a3c00182b1f9
5e0f40cf4b0000000049454e44ae426082
"""),
'basn2c16': _dehex("""
89504e470d0a1a0a0000000d4948445200000020000000201002000000ac8831
e00000000467414d41000186a031e8965f000000e549444154789cd596c10a83
301044a7e0417fcb7eb7fdadf6961e06039286266693cc7a188645e43dd6a08f
1042003e2fe09aef6472737e183d27335fcee2f35a77b702ebce742870a23397
f3edf2705dd10160f3b2815fe8ecf2027974a6b0c03f74a6e4192843e75c6c03
35e8ec3202f5e84c0181bbe8cca967a00d9df3491bb040671f2e6087ce1c2860
8d1e05f8c7ee0f1d00b667e70df44467ef26d01fbd9bc028f42860f71d188bce
fb8d3630039dbd59601e7ab3c06cf428507f0634d039afdc80123a7bb1801e7a
b1802a7a14c89f016d74ce331bf080ce9e08f8414f04bca133bfe642fe5e07bb
c4ec0000000049454e44ae426082
"""),
'basn6a08': _dehex("""
89504e470d0a1a0a0000000d4948445200000020000000200806000000737a7a
f40000000467414d41000186a031e8965f0000006f49444154789cedd6310a80
300c46e12764684fa1f73f55048f21c4ddc545781d52e85028fc1f4d28d98a01
305e7b7e9cffba33831d75054703ca06a8f90d58a0074e351e227d805c8254e3
1bb0420f5cdc2e0079208892ffe2a00136a07b4007943c1004d900195036407f
011bf00052201a9c160fb84c0000000049454e44ae426082
"""),
'cs3n3p08': _dehex("""
89504e470d0a1a0a0000000d494844520000002000000020080300000044a48a
c60000000467414d41000186a031e8965f0000000373424954030303a392a042
00000054504c544592ff0000ff9200ffff00ff0000dbff00ff6dffb600006dff
b6ff00ff9200dbff000049ffff2400ff000024ff0049ff0000ffdb00ff4900ff
b6ffff0000ff2400b6ffffdb000092ffff6d000024ffff49006dff00df702b17
0000004b49444154789c85cac70182000000b1b3625754b0edbfa72324ef7486
184ed0177a437b680bcdd0031c0ed00ea21f74852ed00a1c9ed0086da0057487
6ed0121cd6d004bda0013a421ff803224033e177f4ae260000000049454e44ae
426082
"""),
's09n3p02': _dehex("""
89504e470d0a1a0a0000000d49484452000000090000000902030000009dffee
830000000467414d41000186a031e8965f000000037342495404040477f8b5a3
0000000c504c544500ff000077ffff00ffff7700ff5600640000001f49444154
789c63600002fbff0c0c56ab19182ca381581a4283f82071200000696505c36a
437f230000000049454e44ae426082
"""),
'tbgn3p08': _dehex("""
89504e470d0a1a0a0000000d494844520000002000000020080300000044a48a
c60000000467414d41000186a031e8965f00000207504c54457f7f7fafafafab
abab110000222200737300999999510d00444400959500959595e6e600919191
8d8d8d620d00898989666600b7b700911600000000730d007373736f6f6faaaa
006b6b6b676767c41a00cccc0000f30000ef00d51e0055555567670000dd0051
515100d1004d4d4de61e0038380000b700160d0d00ab00560d00090900009500
009100008d003333332f2f2f2f2b2f2b2b000077007c7c001a05002b27000073
002b2b2b006f00bb1600272727780d002323230055004d4d00cc1e00004d00cc
1a000d00003c09006f6f00002f003811271111110d0d0d55554d090909001100
4d0900050505000d00e2e200000900000500626200a6a6a6a2a2a29e9e9e8484
00fb00fbd5d500801100800d00ea00ea555500a6a600e600e6f7f700e200e233
0500888888d900d9848484c01a007777003c3c05c8c8008080804409007c7c7c
bb00bbaa00aaa600a61e09056262629e009e9a009af322005e5e5e05050000ee
005a5a5adddd00a616008d008d00e20016050027270088110078780000c40078
00787300736f006f44444400aa00c81e004040406600663c3c3c090000550055
1a1a00343434d91e000084004d004d007c004500453c3c00ea1e00222222113c
113300331e1e1efb22001a1a1a004400afaf00270027003c001616161e001e0d
160d2f2f00808000001e00d1d1001100110d000db7b7b7090009050005b3b3b3
6d34c4230000000174524e530040e6d86600000001624b474402660b7c640000
01f249444154789c6360c0048c8c58049100575f215ee92e6161ef109cd2a15e
4b9645ce5d2c8f433aa4c24f3cbd4c98833b2314ab74a186f094b9c2c27571d2
6a2a58e4253c5cda8559057a392363854db4d9d0641973660b0b0bb76bb16656
06970997256877a07a95c75a1804b2fbcd128c80b482a0b0300f8a824276a9a8
ec6e61612b3e57ee06fbf0009619d5fac846ac5c60ed20e754921625a2daadc6
1967e29e97d2239c8aec7e61fdeca9cecebef54eb36c848517164514af16169e
866444b2b0b7b55534c815cc2ec22d89cd1353800a8473100a4485852d924a6a
412adc74e7ad1016ceed043267238c901716f633a812022998a4072267c4af02
92127005c0f811b62830054935ce017b38bf0948cc5c09955f030a24617d9d46
63371fd940b0827931cbfdf4956076ac018b592f72d45594a9b1f307f3261b1a
084bc2ad50018b1900719ba6ba4ca325d0427d3f6161449486f981144cf3100e
2a5f2a1ce8683e4ddf1b64275240c8438d98af0c729bbe07982b8a1c94201dc2
b3174c9820bcc06201585ad81b25b64a2146384e3798290c05ad280a18c0a62e
e898260c07fca80a24c076cc864b777131a00190cdfa3069035eccbc038c30e1
3e88b46d16b6acc5380d6ac202511c392f4b789aa7b0b08718765990111606c2
9e854c38e5191878fbe471e749b0112bb18902008dc473b2b2e8e72700000000
49454e44ae426082
"""),
'Tp2n3p08': _dehex("""
89504e470d0a1a0a0000000d494844520000002000000020080300000044a48a
c60000000467414d41000186a031e8965f00000300504c544502ffff80ff05ff
7f0703ff7f0180ff04ff00ffff06ff000880ff05ff7f07ffff06ff000804ff00
0180ff02ffff03ff7f02ffff80ff0503ff7f0180ffff0008ff7f0704ff00ffff
06ff000802ffffff7f0704ff0003ff7fffff0680ff050180ff04ff000180ffff
0008ffff0603ff7f80ff05ff7f0702ffffff000880ff05ffff0603ff7f02ffff
ff7f070180ff04ff00ffff06ff000880ff050180ffff7f0702ffff04ff0003ff
7fff7f0704ff0003ff7f0180ffffff06ff000880ff0502ffffffff0603ff7fff
7f0702ffff04ff000180ff80ff05ff0008ff7f07ffff0680ff0504ff00ff0008
0180ff03ff7f02ffff02ffffffff0604ff0003ff7f0180ffff000880ff05ff7f
0780ff05ff00080180ff02ffffff7f0703ff7fffff0604ff00ff7f07ff0008ff
ff0680ff0504ff0002ffff0180ff03ff7fff0008ffff0680ff0504ff000180ff
02ffff03ff7fff7f070180ff02ffff04ff00ffff06ff0008ff7f0780ff0503ff
7fffff06ff0008ff7f0780ff0502ffff03ff7f0180ff04ff0002ffffff7f07ff
ff0604ff0003ff7fff00080180ff80ff05ffff0603ff7f0180ffff000804ff00
80ff0502ffffff7f0780ff05ffff0604ff000180ffff000802ffffff7f0703ff
7fff0008ff7f070180ff03ff7f02ffff80ff05ffff0604ff00ff0008ffff0602
ffff0180ff04ff0003ff7f80ff05ff7f070180ff04ff00ff7f0780ff0502ffff
ff000803ff7fffff0602ffffff7f07ffff0680ff05ff000804ff0003ff7f0180
ff02ffff0180ffff7f0703ff7fff000804ff0080ff05ffff0602ffff04ff00ff
ff0603ff7fff7f070180ff80ff05ff000803ff7f0180ffff7f0702ffffff0008
04ff00ffff0680ff0503ff7f0180ff04ff0080ff05ffff06ff000802ffffff7f
0780ff05ff0008ff7f070180ff03ff7f04ff0002ffffffff0604ff00ff7f07ff
000880ff05ffff060180ff02ffff03ff7f80ff05ffff0602ffff0180ff03ff7f
04ff00ff7f07ff00080180ffff000880ff0502ffff04ff00ff7f0703ff7fffff
06ff0008ffff0604ff00ff7f0780ff0502ffff03ff7f0180ffdeb83387000000
f874524e53000000000000000008080808080808081010101010101010181818
1818181818202020202020202029292929292929293131313131313131393939
393939393941414141414141414a4a4a4a4a4a4a4a52525252525252525a5a5a
5a5a5a5a5a62626262626262626a6a6a6a6a6a6a6a73737373737373737b7b7b
7b7b7b7b7b83838383838383838b8b8b8b8b8b8b8b94949494949494949c9c9c
9c9c9c9c9ca4a4a4a4a4a4a4a4acacacacacacacacb4b4b4b4b4b4b4b4bdbdbd
bdbdbdbdbdc5c5c5c5c5c5c5c5cdcdcdcdcdcdcdcdd5d5d5d5d5d5d5d5dedede
dededededee6e6e6e6e6e6e6e6eeeeeeeeeeeeeeeef6f6f6f6f6f6f6f6b98ac5
ca0000012c49444154789c6360e7169150d230b475f7098d4ccc28a96ced9e32
63c1da2d7b8e9fb97af3d1fb8f3f18e8a0808953544a4dd7c4c2c9233c2621bf
b4aab17fdacce5ab36ee3a72eafaad87efbefea68702362e7159652d031b07cf
c0b8a4cce28aa68e89f316aedfb4ffd0b92bf79fbcfcfe931e0a183904e55435
8decdcbcc22292b3caaadb7b27cc5db67af3be63e72fdf78fce2d31f7a2860e5
119356d037b374f10e8a4fc92eaa6fee99347fc9caad7b0f9ebd74f7c1db2fbf
e8a180995f484645dbdccad12f38363dafbcb6a573faeca5ebb6ed3e7ce2c29d
e76fbefda38702063e0149751d537b67ff80e8d4dcc29a86bea97316add9b0e3
c0e96bf79ebdfafc971e0a587885e515f58cad5d7d43a2d2720aeadaba26cf5a
bc62fbcea3272fde7efafac37f3a28000087c0fe101bc2f85f0000000049454e
44ae426082
"""),
'tbbn1g04': _dehex("""
89504e470d0a1a0a0000000d494844520000002000000020040000000093e1c8
290000000467414d41000186a031e8965f0000000274524e530007e8f7589b00
000002624b47440000aa8d23320000013e49444154789c55d1cd4b024118c7f1
efbe6419045b6a48a72d352808b435284f9187ae9b098627a1573a19945beba5
e8129e8222af11d81e3a4545742de8ef6af6d5762e0fbf0fc33c33f36085cb76
bc4204778771b867260683ee57e13f0c922df5c719c2b3b6c6c25b2382cea4b9
9f7d4f244370746ac71f4ca88e0f173a6496749af47de8e44ba8f3bf9bdfa98a
0faf857a7dd95c7dc8d7c67c782c99727997f41eb2e3c1e554152465bb00fe8e
b692d190b718d159f4c0a45c4435915a243c58a7a4312a7a57913f05747594c6
46169866c57101e4d4ce4d511423119c419183a3530cc63db88559ae28e7342a
1e9c8122b71139b8872d6e913153224bc1f35b60e4445bd4004e20ed6682c759
1d9873b3da0fbf50137dc5c9bde84fdb2ec8bde1189e0448b63584735993c209
7a601bd2710caceba6158797285b7f2084a2f82c57c01a0000000049454e44ae
426082
"""),
'tbrn2c08': _dehex("""
89504e470d0a1a0a0000000d4948445200000020000000200802000000fc18ed
a30000000467414d41000186a031e8965f0000000674524e53007f007f007f8a
33334f00000006624b474400ff0000000033277cf3000004d649444154789cad
965f68537714c73fd912d640235e692f34d0406fa0c1663481045ab060065514
56660a295831607df0a1488715167060840a1614e6431e9cb34fd2c00a762c85
f6a10f816650c13b0cf40612e1822ddc4863bd628a8924d23d6464f9d3665dd9
f7e977ce3dbff3cd3939bfdfef6bb87dfb364782dbed065ebe7cd93acc78b4ec
a228debd7bb7bfbfbfbbbbfb7f261045311a8d261209405194274f9ea4d3e916
f15f1c3eb5dd6e4fa5fecce526239184a2b0b8486f6f617171b1f5ae4311381c
8e57af5e5dbd7a351088150a78bd389d44222c2f93cdfe66b7db8f4ee07038b6
b6b6bebf766d7e7e7e60a06432313b4ba984c3c1c4049a46b95c5a58583822c1
dbb76f27272733d1b9df853c3030c0f232562b9108cf9eb1b888d7cbf030abab
31abd5fa1f08dc6ef7e7cf9f1f3f7e1c8944745d4f1400c62c001313acad21cb
b8dd2c2c603271eb1640341aad4c6d331aa7e8c48913a150a861307ecc11e964
74899919bc5e14e56fffc404f1388502f178dceff7ef4bf0a5cfe7abb533998c
e5f9ea2f1dd88c180d64cb94412df3dd57e83a6b3b3c7a84c98420100c72fd3a
636348bae726379fe69e8e8d8dbd79f3a6558b0607079796965256479b918085
7b02db12712b6181950233023f3f647494ee6e2e5ea45864cce5b8a7fe3acffc
3aebb22c2bd5d20e22d0757d7b7bbbbdbd3d94a313bed1b0aa3cd069838b163a
8d4c59585f677292d0b84d9a995bd337def3fe6bbe5e6001989b9b6bfe27ea08
36373781542ab56573248b4c5bc843ac4048c7ab21aa24ca00534c25482828a3
8c9ee67475bbaaaab22cb722c8e57240a150301a8d219de94e44534d7d90e885
87acb0e2c4f9800731629b6c5ee14a35a6b9887d2a0032994cb9cf15dbe59650
ff7b46a04c9a749e7cc5112214266cc65c31354d5b5d5d3d90209bcd5616a552
a95c2e87f2a659bd9ee01c2cd73964e438f129a6aa9e582c363838b80f81d7eb
5555b56a2a8ad2d9d7affd0409f8015c208013fea00177b873831b0282c964f2
783c1e8fa7582cee5f81a669b5e6eeeeaee58e8559b0c233d8843c7c0b963a82
34e94b5cb2396d7d7d7db22c8ba258fb0afd43f0e2c58b919191ba9de9b4d425
118329b0c3323c8709d02041b52b4ea7f39de75d2a934a2693c0a953a76a93d4
5d157ebf7f6565a5542a553df97c5e10045dd731c130b86113cc300cbd489224
08422a952a140a95788fc763b1d41558d7a2d7af5f5fb870a1d6a3aaaacd6603
18802da84c59015bd2e6897b745d9765b99a1df0f97c0daf74e36deaf7fbcd66
73ad2797cb89a2c839880188a2e8743a8bc5a22ccbba5e376466b3b9bdbdbd21
6123413a9d0e0402b51e4dd3bababa788eb022b85caeb6b6364551b6b7b76942
43f7f727007a7a7a04a1ee8065b3595fde2768423299ac1ec6669c3973e65004
c0f8f878ad69341a33994ced2969c0d0d0502412f9f8f163f3a7fd654b474787
288ad53e74757535df6215b85cae60302849d2410aecc037f9f2e5cbd5b5c160
680eb0dbede170381c0e7ff8f0a185be3b906068684892a4ca7a6f6faff69328
8ad3d3d3f7efdfdfdbdbfb57e96868a14d0d0643381c96242997cbe5f3794010
84603078fcf8f1d6496bd14a3aba5c2ea7d369341a5555b5582c8140e0fcf9f3
1b1b1b87cf4eeb0a8063c78e45a3d19e9e1ebfdfdf5a831e844655d18093274f
9e3d7bf6d3a74f3b3b3b47c80efc05ff7af28fefb70d9b0000000049454e44ae
426082
"""),
'basn6a16': _dehex("""
89504e470d0a1a0a0000000d494844520000002000000020100600000023eaa6
b70000000467414d41000186a031e8965f00000d2249444154789cdd995f6c1c
d775c67ff38fb34b724d2ee55a8e4b04a0ac87049100cab4dbd8c6528902cb4d
10881620592e52d4325ac0905bc98a94025e71fd622cb5065ac98a0c283050c0
728a00b6e542a1d126885cd3298928891d9a0444037e904434951d4b90b84b2f
c9dde1fcebc33977a95555348f411e16dfce9d3b77ee77eebde77ce78c95a669
0ad07c17009a13edd898b87dfb1fcb7d2b4d1bff217f33df80deb1e6267df0ff
c1e6e6dfafdf1f5a7fd30f9aef66b6d546dd355bf02c40662e3307f9725a96c6
744c3031f83782f171c148dbc3bf1774f5dad1e79d6f095a3f54d4fbec5234ef
d9a2f8d73afe4f14f57ef4f42def7b44f19060f06b45bddf1c5534d77fd922be
2973a15a82e648661c6e3240aa3612ead952b604bde57458894f29deaf133bac
13d2766f5227a4a3b8cf08da7adfd6fbd6bd8a4fe9dbb43d35e3dfa3f844fbf8
9119bf4f7144094fb56333abf8a86063ca106f94b3a3b512343765e60082097f
1bb86ba72439a653519b09f5cee1ce61c897d37eedf5553580ae60f4af8af33a
b14fd400b6a0f34535c0434afc0b3a9f07147527a5fa7ca218ff56c74d74dc3f
155cfd3325fc278acf2ae1cb4a539f5f9937c457263b0bd51234c732a300cdd1
cc1840f0aaff54db0e4874ed5a9b5d6d27d4bb36746d80de72baa877ff4b275a
d7895ed1897ea4139b5143fcbb1a62560da1ed9662aaed895ec78a91c18795b8
5e07ab4af8ba128e95e682e0728bf8f2e5ae815a091a53d902ac1920d8e05f06
589de8d8d66680789f4e454fb9d9ec66cd857af796ee2d902fa73fd5bba775a2
153580ae44705ed0d37647d15697cb8f14bfa3e3e8fdf8031d47af571503357c
f30d25acedcbbf135c9a35c49766ba07ab255859e8ec03684e66860182dff8f7
0304bff6ff1c20fc81b7afdd00a71475539a536e36bb5973a19e3b923b02bde5
e4efd4003ac170eb2d13fe274157afedbd82d6fb3a9a1e85e4551d47cf7078f8
9671fe4289ebf5f2bf08d63f37c4eb4773c55a0996efeefa0ca011671d8060ca
2f0004c7fcc300e166ef0240f825efe3361f106d57d423d0723f7acacd66376b
2ed47b7a7a7a205f4ef4ac4691e0aad9aa0d41cf13741c3580a506487574ddca
61a8c403c1863ebfbcac3475168b2de28b8b3d77544bb05ce92a02aceced3c0d
d0cc65ea371b201cf1c601c24dde1c4078cedbdeb60322f50126a019bf6edc9b
39e566b39b3517eaf97c3e0fbde5e4491d45bd74537145d155b476aa0176e868
c6abebf30dbd5e525c54ac8e18e2d56abeb756827a3d970358a97416019a6f64
f60004fdfe1580d5c98e618070cc1b05887eee7e0d209a70db7d8063029889b4
c620ead78d7b33a7dc6c76b3e6427ddddbebde867c393aa7845e5403e8ca794a
d0d6fb897af5f03525fe5782f5e7046bdaef468bf88d1debc6ab25583cd17310
6079b9ab0ba059c914018245bf076075b5a303200c3c1f209a733701444fbbaf
00c4134ebb016c5d0b23614c243701cdf875e3decce9349bddacb9505fbf7dfd
76e82d87736a00f5d2b5ffd4b7dce2719a4d25ae717ee153c1abef18e257cfad
7fa45682da48ef38c052b53b0fd06864b300c151ff08c0ea431de701a287dd5f
004497dc7b01a253ee3e80b8c7f91c20f967fb6fdb7c80ada7d8683723614c24
3701cdf875e3decc29379bddacb950ef3fd47f08f2e5a61ea4aa2a3eb757cd55
13345efcfa59c12b2f19e2578ef77fb75a82854ffbee01a83f977b11a031931d
040802df07082b5e11207cc17b1e209a770700e2df0a83e409fb7580f827c230
99b06fd901fb058d6835dacd481813c94d40337eddb83773cacd66376b2ed437
bebcf165e82d2f4e4beb7f3fa6e652c2d7ee10bc78c010bfb87fe3c95a09ae9f
bd732740bd2fb700d0f865f64180e059ff044018ca0ca28a5b04883f701e0088
bfec7c0c909cb71f0448c6ec518074b375012079d9dedf66004bcfbc51eb2dd1
aadacd481813c94d40337eddb83773cacd66376b2ed487868686205fbe7c49ef
5605a73f34c4a7a787eeab96e0da81bb4e022c15ba27019a5b339300e16bf286
a8eae601e25866907cdf3e0890acb36f00245fb57f05904e59c300e92561946e
b2e600d209ab7d07f04d458dfb46ad1bd16ab49b913026929b8066fcba716fe6
949bcd6ed65ca8ef7e7cf7e3d05b7e7c8f217ee6cdddbb6a25a856f37980e0c7
fe4e80a82623c48193014846ec7180f4acf518409aca0cd28a5504e03b32c374
de1a00608a0240faaa327a4b19fe946fb6f90054dbb5f2333d022db56eb4966a
3723614c243701cdf8f556bea8a7dc6c76b3e66bd46584ddbbcebc0990cf4b0f
ff4070520c282338a7e26700ec725202b01e4bcf0258963c6f1d4d8f0030cb20
805549c520930c03584fa522b676f11600ffc03fde3e1b3489a9c9054c9aa23b
c08856a3dd8c843191dc0434e3d78d7b33a75c36fb993761f7ae5a69f72ef97f
e6ad336fed7e1c60e8bee96980bbdebbb60da07b7069062033d9dc0ae03d296f
70ab511ec071640676252902d833c916007b3e1900b0a6d2028035968e025861
ea01581369fb11488c34d18cbc95989afccca42baad65ba2d5683723614c24d7
8066fcbab8b7e96918baaf5aaa56219f975fb50a43f7c9bde90fa73f1c1a02d8
78f2e27e803b77ca08b90519315b6fe400fc1392097a9eccc0ad444500e70199
a1331f0f00d8934901c07e5d526ceb87c2d07e2579badd005a2b31a5089391b7
1253358049535a6add8856dd0146c298482e01ede27ed878b256ba7600ee3a09
c18fc1df09fe01084ec25defc1b56db0f1a4f4bd78e0e2818d2f0334e7330300
7df7c888b917e50dd9c1c60c80efcb0cbc63e1f700bce7c31700dccbd1060027
8add9b0de06c8e2f00d84962b7d7030e2a61538331b98051f92631bd253f336a
dd8856a3dd44c25c390efddfad96ae9f853b77c25201ba27c533b8bdf28b6ad0
3d084b33d2e7fa59099e9901b8f2d29597fa0f01848f78e70082117f1ca07b76
6910209b9519f895a008d031bbba05c09d8f06005c5b18b8fba25300cea6780e
c03e911c6ccf06d507b48a4fa606634a114609de929f9934c5a87511ad57cfc1
fa476aa5854fa1ef1e3910b905686e85cc24c40138198915f133d2d6dc2a7dea
7df2ccc2a752faf2cec1d577aebeb37e3b4034eeee0008dff3be0e6b923773b4
7904c0ef9119767cb4fa1500ef1361e08e452500f71561e84cc4ed3e20fab6a2
c905f40cb76a3026bf3319b91ac2e46792a6dcd801ebc6aba5da08f48ecb81c8
bd088d5f42f6417191de93908c803d0e76199292b485af41b60e8d9c3c537f0e
8211f0c7211a077707dc18b931b2ee6d80a4d7ae024491ebc24d4a708ff70680
7f25e807e8785f1878e322d6ddaf453f0770ff2dfa769b01423dbbad72a391b6
5a7c3235985629423372494cab55c8f7d64a8b27a0e7202c55a13b0f8d19c80e
4ae9ca3f015115dc3ca467c17a4c7ee95970ab10e5a54ff0ac3cd39881ee5958
1a84f03df0be0e492fd855a8d6aa35d10b4962dbb0a604a3d3ee5e80a8eee600
a24977f8660378bf0bbf00e01d0a8fb7f980f04b8aa6ce6aca8d5a7533c52753
839152c4e222f4dc512dd5eb90cbc981e8ea12cf90cd8a8bf47d89159e2741d3
7124f65b96fcd254dae258fa84a13c13043246a32129574787e49eae2b49b86d
c3e2e78b9ff7f4002415bb08907c66df0d103b4e0c104db90500ff70700c203a
ee1e82dba4c3e16e256c0acca6ceaae9afd1f612d7eb472157ac95962bd05594
7dd1598466053245088e827f44628657942a825b84e4fb601f84b4025611aca3
901e01bb024911dc0a4445f08e41f83df02b10142173149ab71baf027611ea95
7a257704201d14cd9af4d90b00f194530088cb4e09c0df1c5c0088f7393f6833
c0aa3ac156655de3bca9b34ab9716906ba07aba5e5bba1eb3358d90b9da7c533
64f6888bf47b60f521e8380fe10be03d2feac17900927560df40f4e48f805960
50328d648bf4893f9067c217a0631656b7c898c122847bc07b03a2d3e0ee85e4
33b0ef867450c4fad2ecd26cf7168074c0ba0c904cdac300c9cfec4701924df6
1cdca61e10685c6f7d52d0caba1498972f43d740adb4b2009d7d7220b20e3473
90a943d00ffe959bb6eac3e0fe42ea49ee00c45f06e76329b1dabf127d690d80
5581b408f63c2403e0cc433c00ee658836803b0fd100747c04ab5f917704fd10
d5c1cd41ec801343d207f602a403605d86e5f9e5f9ae0d00e994556833806685
c931fb709b0f08b4e869bea5c827859549e82c544b8d29c816a0390999613920
7e610d5727a16318c2003c1fa24be0de2b32caf92224e7c17e5004b6350c4c01
05601218066b0ad28224e149019c086257ca315102de2712903bde97b8144d82
3b2c6ac52d403c054e019249b087f53d0558995a99ea946c70cc927458b3c1ff
550f30050df988d4284376b4566a8e416654cc921985e037e0df0fc131f00f4b
acf0c6211c036f14a239703741740adc7da227edd7e56b833d0ae92549b4d357
25dfb49ed2ff63908e6adf27d6d0dda7638d4154d2778daca17f58e61297c129
41f233b01f5dc3740cac51688c35c6b22580f48224fee9b83502569a66b629f1
09f3713473413e2666e7fe6f6c6efefdfafda1f56f6e06f93496d9d67cb7366a
9964b6f92e64b689196ec6c604646fd3fe4771ff1bf03f65d8ecc3addbb5f300
00000049454e44ae426082
"""),
}
def test_suite(options, args):
"""
Create a PNG test image and write the file to stdout.
"""
# Below is a big stack of test image generators.
# They're all really tiny, so PEP 8 rules are suspended.
def test_gradient_horizontal_lr(x, y):
return x
def test_gradient_horizontal_rl(x, y):
return 1 - x
def test_gradient_vertical_tb(x, y):
return y
def test_gradient_vertical_bt(x, y):
return 1 - y
def test_radial_tl(x, y):
return max(1 - math.sqrt(x * x + y * y), 0.0)
def test_radial_center(x, y):
return test_radial_tl(x - 0.5, y - 0.5)
def test_radial_tr(x, y):
return test_radial_tl(1 - x, y)
def test_radial_bl(x, y):
return test_radial_tl(x, 1 - y)
def test_radial_br(x, y):
return test_radial_tl(1 - x, 1 - y)
def test_stripe(x, n):
return float(int(x * n) & 1)
def test_stripe_h_2(x, y):
return test_stripe(x, 2)
def test_stripe_h_4(x, y):
return test_stripe(x, 4)
def test_stripe_h_10(x, y):
return test_stripe(x, 10)
def test_stripe_v_2(x, y):
return test_stripe(y, 2)
def test_stripe_v_4(x, y):
return test_stripe(y, 4)
def test_stripe_v_10(x, y):
return test_stripe(y, 10)
def test_stripe_lr_10(x, y):
return test_stripe(x + y, 10)
def test_stripe_rl_10(x, y):
return test_stripe(1 + x - y, 10)
def test_checker(x, y, n):
return float((int(x * n) & 1) ^ (int(y * n) & 1))
def test_checker_8(x, y):
return test_checker(x, y, 8)
def test_checker_15(x, y):
return test_checker(x, y, 15)
def test_zero(x, y):
return 0
def test_one(x, y):
return 1
test_patterns = {
'GLR': test_gradient_horizontal_lr,
'GRL': test_gradient_horizontal_rl,
'GTB': test_gradient_vertical_tb,
'GBT': test_gradient_vertical_bt,
'RTL': test_radial_tl,
'RTR': test_radial_tr,
'RBL': test_radial_bl,
'RBR': test_radial_br,
'RCTR': test_radial_center,
'HS2': test_stripe_h_2,
'HS4': test_stripe_h_4,
'HS10': test_stripe_h_10,
'VS2': test_stripe_v_2,
'VS4': test_stripe_v_4,
'VS10': test_stripe_v_10,
'LRS': test_stripe_lr_10,
'RLS': test_stripe_rl_10,
'CK8': test_checker_8,
'CK15': test_checker_15,
'ZERO': test_zero,
'ONE': test_one,
}
def test_pattern(width, height, bitdepth, pattern):
"""Create a single plane (monochrome) test pattern. Returns a
flat row flat pixel array.
"""
maxval = 2 ** bitdepth - 1
if maxval > 255:
a = array('H')
else:
a = array('B')
fw = float(width)
fh = float(height)
pfun = test_patterns[pattern]
for y in range(height):
fy = float(y) / fh
for x in range(width):
a.append(int(round(pfun(float(x) / fw, fy) * maxval)))
return a
def test_rgba(size=256, bitdepth=8,
red="GTB", green="GLR", blue="RTL", alpha=None):
"""
Create a test image. Each channel is generated from the
specified pattern; any channel apart from red can be set to
None, which will cause it not to be in the image. It
is possible to create all PNG channel types (L, RGB, LA, RGBA),
as well as non PNG channel types (RGA, and so on).
"""
i = test_pattern(size, size, bitdepth, red)
psize = 1
for channel in (green, blue, alpha):
if channel:
c = test_pattern(size, size, bitdepth, channel)
i = interleave_planes(i, c, psize, 1)
psize += 1
return i
def pngsuite_image(name):
"""
Create a test image by reading an internal copy of the files
from the PngSuite. Returned in flat row flat pixel format.
"""
if name not in _pngsuite:
raise NotImplementedError("cannot find PngSuite file %s (use -L for a list)" % name)
r = Reader(bytes=_pngsuite[name])
w, h, pixels, meta = r.asDirect()
assert w == h
# LAn for n < 8 is a special case for which we need to rescale
# the data.
if meta['greyscale'] and meta['alpha'] and meta['bitdepth'] < 8:
factor = 255 // (2 ** meta['bitdepth'] - 1)
def rescale(data):
for row in data:
yield map(factor.__mul__, row)
pixels = rescale(pixels)
meta['bitdepth'] = 8
arraycode = 'BH'[meta['bitdepth'] > 8]
return w, array(arraycode, itertools.chain(*pixels)), meta
# The body of test_suite()
size = 256
if options.test_size:
size = options.test_size
options.bitdepth = options.test_depth
options.greyscale = bool(options.test_black)
kwargs = {}
if options.test_red:
kwargs["red"] = options.test_red
if options.test_green:
kwargs["green"] = options.test_green
if options.test_blue:
kwargs["blue"] = options.test_blue
if options.test_alpha:
kwargs["alpha"] = options.test_alpha
if options.greyscale:
if options.test_red or options.test_green or options.test_blue:
raise ValueError("cannot specify colours (R, G, B) when greyscale image (black channel, K) is specified")
kwargs["red"] = options.test_black
kwargs["green"] = None
kwargs["blue"] = None
options.alpha = bool(options.test_alpha)
if not args:
pixels = test_rgba(size, options.bitdepth, **kwargs)
else:
size, pixels, meta = pngsuite_image(args[0])
for k in ['bitdepth', 'alpha', 'greyscale']:
setattr(options, k, meta[k])
writer = Writer(size, size,
bitdepth=options.bitdepth,
transparent=options.transparent,
background=options.background,
gamma=options.gamma,
greyscale=options.greyscale,
alpha=options.alpha,
compression=options.compression,
interlace=options.interlace)
writer.write_array(sys.stdout, pixels)
def read_pam_header(infile):
"""
Read (the rest of a) PAM header. `infile` should be positioned
immediately after the initial 'P7' line (at the beginning of the
second line). Returns are as for `read_pnm_header`.
"""
# Unlike PBM, PGM, and PPM, we can read the header a line at a time.
header = dict()
while True:
l = infile.readline().strip()
if l == 'ENDHDR':
break
if l == '':
raise EOFError('PAM ended prematurely')
if l[0] == '#':
continue
l = l.split(None, 1)
if l[0] not in header:
header[l[0]] = l[1]
else:
header[l[0]] += ' ' + l[1]
if ('WIDTH' not in header or
'HEIGHT' not in header or
'DEPTH' not in header or
'MAXVAL' not in header):
raise Error('PAM file must specify WIDTH, HEIGHT, DEPTH, and MAXVAL')
width = int(header['WIDTH'])
height = int(header['HEIGHT'])
depth = int(header['DEPTH'])
maxval = int(header['MAXVAL'])
if (width <= 0 or
height <= 0 or
depth <= 0 or
maxval <= 0):
raise Error(
'WIDTH, HEIGHT, DEPTH, MAXVAL must all be positive integers')
return 'P7', width, height, depth, maxval
def read_pnm_header(infile, supported=('P5', 'P6')):
"""
Read a PNM header, returning (format,width,height,depth,maxval).
`width` and `height` are in pixels. `depth` is the number of
channels in the image; for PBM and PGM it is synthesized as 1, for
PPM as 3; for PAM images it is read from the header. `maxval` is
synthesized (as 1) for PBM images.
"""
# Generally, see http://netpbm.sourceforge.net/doc/ppm.html
# and http://netpbm.sourceforge.net/doc/pam.html
# Technically 'P7' must be followed by a newline, so by using
# rstrip() we are being liberal in what we accept. I think this
# is acceptable.
type = infile.read(3).rstrip()
if type not in supported:
raise NotImplementedError('file format %s not supported' % type)
if type == 'P7':
# PAM header parsing is completely different.
return read_pam_header(infile)
# Expected number of tokens in header (3 for P4, 4 for P6)
expected = 4
pbm = ('P1', 'P4')
if type in pbm:
expected = 3
header = [type]
# We have to read the rest of the header byte by byte because the
# final whitespace character (immediately following the MAXVAL in
# the case of P6) may not be a newline. Of course all PNM files in
# the wild use a newline at this point, so it's tempting to use
# readline; but it would be wrong.
def getc():
c = infile.read(1)
if c == '':
raise Error('premature EOF reading PNM header')
return c
c = getc()
while True:
# Skip whitespace that precedes a token.
while c.isspace():
c = getc()
# Skip comments.
while c == '#':
while c not in '\n\r':
c = getc()
if not c.isdigit():
raise Error('unexpected character %s found in header' % c)
# According to the specification it is legal to have comments
# that appear in the middle of a token.
# This is bonkers; I've never seen it; and it's a bit awkward to
# code good lexers in Python (no goto). So we break on such
# cases.
token = ''
while c.isdigit():
token += c
c = getc()
# Slight hack. All "tokens" are decimal integers, so convert
# them here.
header.append(int(token))
if len(header) == expected:
break
# Skip comments (again)
while c == '#':
while c not in '\n\r':
c = getc()
if not c.isspace():
raise Error('expected header to end with whitespace, not %s' % c)
if type in pbm:
# synthesize a MAXVAL
header.append(1)
depth = (1, 3)[type == 'P6']
return header[0], header[1], header[2], depth, header[3]
def write_pnm(file, width, height, pixels, meta):
"""Write a Netpbm PNM/PAM file."""
bitdepth = meta['bitdepth']
maxval = 2 ** bitdepth - 1
# Rudely, the number of image planes can be used to determine
# whether we are L (PGM), LA (PAM), RGB (PPM), or RGBA (PAM).
planes = meta['planes']
# Can be an assert as long as we assume that pixels and meta came
# from a PNG file.
assert planes in (1, 2, 3, 4)
if planes in (1, 3):
if 1 == planes:
# PGM
# Could generate PBM if maxval is 1, but we don't (for one
# thing, we'd have to convert the data, not just blat it
# out).
fmt = 'P5'
else:
# PPM
fmt = 'P6'
file.write('%s %d %d %d\n' % (fmt, width, height, maxval))
if planes in (2, 4):
# PAM
# See http://netpbm.sourceforge.net/doc/pam.html
if 2 == planes:
tupltype = 'GRAYSCALE_ALPHA'
else:
tupltype = 'RGB_ALPHA'
file.write('P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\n'
'TUPLTYPE %s\nENDHDR\n' %
(width, height, planes, maxval, tupltype))
# Values per row
vpr = planes * width
# struct format
fmt = '>%d' % vpr
if maxval > 0xff:
fmt = fmt + 'H'
else:
fmt = fmt + 'B'
for row in pixels:
file.write(struct.pack(fmt, *row))
file.flush()
def color_triple(color):
"""
Convert a command line colour value to a RGB triple of integers.
FIXME: Somewhere we need support for greyscale backgrounds etc.
"""
if color.startswith('#') and len(color) == 4:
return (int(color[1], 16),
int(color[2], 16),
int(color[3], 16))
if color.startswith('#') and len(color) == 7:
return (int(color[1:3], 16),
int(color[3:5], 16),
int(color[5:7], 16))
elif color.startswith('#') and len(color) == 13:
return (int(color[1:5], 16),
int(color[5:9], 16),
int(color[9:13], 16))
def _main(argv):
"""
Run the PNG encoder with options from the command line.
"""
# Parse command line arguments
from optparse import OptionParser
import re
version = '%prog ' + re.sub(r'( ?\$|URL: |Rev:)', '', __version__)
parser = OptionParser(version=version)
parser.set_usage("%prog [options] [imagefile]")
parser.add_option('-r', '--read-png', default=False,
action='store_true',
help='Read PNG, write PNM')
parser.add_option("-i", "--interlace",
default=False, action="store_true",
help="create an interlaced PNG file (Adam7)")
parser.add_option("-t", "--transparent",
action="store", type="string", metavar="color",
help="mark the specified colour (#RRGGBB) as transparent")
parser.add_option("-b", "--background",
action="store", type="string", metavar="color",
help="save the specified background colour")
parser.add_option("-a", "--alpha",
action="store", type="string", metavar="pgmfile",
help="alpha channel transparency (RGBA)")
parser.add_option("-g", "--gamma",
action="store", type="float", metavar="value",
help="save the specified gamma value")
parser.add_option("-c", "--compression",
action="store", type="int", metavar="level",
help="zlib compression level (0-9)")
parser.add_option("-T", "--test",
default=False, action="store_true",
help="create a test image (a named PngSuite image if an argument is supplied)")
parser.add_option('-L', '--list',
default=False, action='store_true',
help="print list of named test images")
parser.add_option("-R", "--test-red",
action="store", type="string", metavar="pattern",
help="test pattern for the red image layer")
parser.add_option("-G", "--test-green",
action="store", type="string", metavar="pattern",
help="test pattern for the green image layer")
parser.add_option("-B", "--test-blue",
action="store", type="string", metavar="pattern",
help="test pattern for the blue image layer")
parser.add_option("-A", "--test-alpha",
action="store", type="string", metavar="pattern",
help="test pattern for the alpha image layer")
parser.add_option("-K", "--test-black",
action="store", type="string", metavar="pattern",
help="test pattern for greyscale image")
parser.add_option("-d", "--test-depth",
default=8, action="store", type="int",
metavar='NBITS',
help="create test PNGs that are NBITS bits per channel")
parser.add_option("-S", "--test-size",
action="store", type="int", metavar="size",
help="width and height of the test image")
(options, args) = parser.parse_args(args=argv[1:])
# Convert options
if options.transparent is not None:
options.transparent = color_triple(options.transparent)
if options.background is not None:
options.background = color_triple(options.background)
if options.list:
names = list(_pngsuite)
names.sort()
for name in names:
print name
return
# Run regression tests
if options.test:
return test_suite(options, args)
# Prepare input and output files
if len(args) == 0:
infilename = '-'
infile = sys.stdin
elif len(args) == 1:
infilename = args[0]
infile = open(infilename, 'rb')
else:
parser.error("more than one input file")
outfile = sys.stdout
if options.read_png:
# Encode PNG to PPM
png = Reader(file=infile)
width, height, pixels, meta = png.asDirect()
write_pnm(outfile, width, height, pixels, meta)
else:
# Encode PNM to PNG
format, width, height, depth, maxval = \
read_pnm_header(infile, ('P5', 'P6', 'P7'))
# When it comes to the variety of input formats, we do something
# rather rude. Observe that L, LA, RGB, RGBA are the 4 colour
# types supported by PNG and that they correspond to 1, 2, 3, 4
# channels respectively. So we use the number of channels in
# the source image to determine which one we have. We do not
# care about TUPLTYPE.
greyscale = depth <= 2
pamalpha = depth in (2, 4)
supported = map(lambda x: 2 ** x - 1, range(1, 17))
try:
mi = supported.index(maxval)
except ValueError:
raise NotImplementedError(
'your maxval (%s) not in supported list %s' %
(maxval, str(supported)))
bitdepth = mi + 1
writer = Writer(width, height,
greyscale=greyscale,
bitdepth=bitdepth,
interlace=options.interlace,
transparent=options.transparent,
background=options.background,
alpha=bool(pamalpha or options.alpha),
gamma=options.gamma,
compression=options.compression)
if options.alpha:
pgmfile = open(options.alpha, 'rb')
format, awidth, aheight, adepth, amaxval = \
read_pnm_header(pgmfile, 'P5')
if amaxval != '255':
raise NotImplementedError(
'maxval %s not supported for alpha channel' % amaxval)
if (awidth, aheight) != (width, height):
raise ValueError("alpha channel image size mismatch"
" (%s has %sx%s but %s has %sx%s)"
% (infilename, width, height,
options.alpha, awidth, aheight))
writer.convert_ppm_and_pgm(infile, pgmfile, outfile)
else:
writer.convert_pnm(infile, outfile)
if __name__ == '__main__':
try:
_main(sys.argv)
except Error, e:
print >> sys.stderr, e
| isc | 0408251e12a21018fa70c52861e30d1d | 39.398424 | 117 | 0.640678 | 3.218038 | false | true | false | false |
mozilla-services/tecken | tecken/useradmin/management/commands/superuser.py | 1 | 1836 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
from mozilla_django_oidc.utils import import_from_settings
from mozilla_django_oidc.auth import default_username_algo
class Command(BaseCommand):
help = "Create or toggle an existing user being a superuser."
def add_arguments(self, parser):
parser.add_argument("email", default=None, nargs="?")
def handle(self, *args, **options):
email = options["email"]
if not email:
email = input("Email: ").strip()
if " " in email or email.count("@") != 1:
raise CommandError(f"Invalid email {email!r}")
try:
user = User.objects.get(email__iexact=email)
except User.DoesNotExist:
username_algo = import_from_settings("OIDC_USERNAME_ALGO", None)
if username_algo:
username = username_algo(email)
else:
username = default_username_algo(email)
user = User.objects.create(username=username, email=email)
user.set_unusable_password()
self.stdout.write(self.style.WARNING("New user created"))
if user.is_superuser and user.is_staff:
self.stdout.write(self.style.WARNING(f"{email} already a superuser/staff"))
else:
user.is_superuser = True
user.is_staff = True
user.is_active = True
user.save()
if user.is_superuser:
self.stdout.write(
self.style.SUCCESS(f"{email} PROMOTED to superuser/staff")
)
| mpl-2.0 | f91dfe5e7403ea6e7c3ec8b3efc59097 | 38.913043 | 87 | 0.616013 | 4.08 | false | false | false | false |
mozilla-services/tecken | systemtests/bin/symbolicate.py | 1 | 5529 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
# Sends a stack for symbolication with a Symbols server using the symbolicate service
# API.
# Usage: ./bin/symbolicate.py FILE
import json
import os
import sys
import click
import jsonschema
import requests
def load_schema(path):
with open(path) as fp:
schema = json.load(fp)
jsonschema.Draft7Validator.check_schema(schema)
return schema
class RequestError(Exception):
pass
def request_stack(url, payload, api_version, is_debug):
headers = {"User-Agent": "teckent-systemtests"}
if api_version == 4:
# We have to add the version to the payload, so parse it, add it, and then
# unparse it.
payload["version"] = 4
options = {}
if is_debug:
headers["Debug"] = "true"
# NOTE(willkg): this triggers the Allow-Control-* CORS headers, but maybe we want to
# make the origin specifiable via the command line arguments
headers["Origin"] = "http://example.com"
resp = requests.post(url, headers=headers, json=payload, **options)
if is_debug:
click.echo(click.style(f"Response: {resp.status_code} {resp.reason}"))
for key, val in resp.headers.items():
click.echo(click.style(f"{key}: {val}"))
if resp.status_code != 200:
# The server returned something "bad", so print out the things that
# would be helpful in debugging the issue.
click.echo(
click.style(f"Error: Got status code {resp.status_code}", fg="yellow")
)
click.echo(click.style("Request payload:", fg="yellow"))
click.echo(payload)
click.echo(click.style("Response:", fg="yellow"))
click.echo(resp.content)
raise RequestError()
return resp.json()
@click.group()
def symbolicate_group():
"""Symbolicate stack data."""
@symbolicate_group.command("print")
@click.option(
"--api-url",
default="https://symbolication.services.mozilla.com/symbolicate/v5",
help="The API url to use.",
)
@click.option(
"--api-version",
default=5,
type=int,
help="The API version to use; 4 or 5; defaults to 5.",
)
@click.option(
"--debug/--no-debug", default=False, help="Whether to include debug info."
)
@click.argument("stackfile", required=False)
@click.pass_context
def print_stack(ctx, api_url, api_version, debug, stackfile):
if not stackfile and not sys.stdin.isatty():
data = click.get_text_stream("stdin").read()
else:
if not os.path.exists(stackfile):
raise click.BadParameter(
"Stack file does not exist.",
ctx=ctx,
param="stackfile",
param_hint="stackfile",
)
with open(stackfile) as fp:
data = fp.read()
if api_version not in [4, 5]:
raise click.BadParameter(
"Not a valid API version number. Must be 4 or 5.",
ctx=ctx,
param="api_version",
param_hint="api_version",
)
try:
payload = json.loads(data)
except json.decoder.JSONDecodeError as jde:
click.echo(f"Error: request is not valid JSON: {jde!r}\n{data!r}")
return
response_data = request_stack(api_url, payload, api_version, debug)
if debug:
click.echo(json.dumps(response_data, indent=2))
else:
click.echo(json.dumps(response_data))
@symbolicate_group.command("verify")
@click.option(
"--api-url",
default="https://symbolication.services.mozilla.com/symbolicate/v5",
help="The API url to use.",
)
@click.option(
"--api-version",
default=5,
type=int,
help="The API version to use; 4 or 5; defaults to 5.",
)
@click.argument("stackfile", required=False)
@click.pass_context
def verify_symbolication(ctx, api_url, api_version, stackfile):
if not stackfile and not sys.stdin.isatty():
data = click.get_text_stream("stdin").read()
else:
if not os.path.exists(stackfile):
raise click.BadParameter(
"Stack file does not exist.",
ctx=ctx,
param="stackfile",
param_hint="stackfile",
)
with open(stackfile) as fp:
data = fp.read()
if api_version not in [4, 5]:
raise click.BadParameter(
"Not a valid API version number. Must be 4 or 5.",
ctx=ctx,
param="api_version",
param_hint="api_version",
)
if stackfile:
click.echo(click.style(f"Working on stackfile {stackfile} ...", fg="yellow"))
else:
click.echo(click.style("Working on stdin ...", fg="yellow"))
payload = json.loads(data)
response_data = request_stack(api_url, payload, api_version, is_debug=True)
path = os.path.abspath("../schemas/symbolicate_api_response_v%d.json" % api_version)
schema = load_schema(path)
try:
jsonschema.validate(response_data, schema)
click.echo(click.style(f"Response is valid v{api_version}!", fg="green"))
except jsonschema.exceptions.ValidationError as exc:
click.echo(json.dumps(response_data, indent=2))
click.echo(
click.style(f"Response is invalid v{api_version}! {exc!r}", fg="red")
)
ctx.exit(1)
if __name__ == "__main__":
symbolicate_group()
| mpl-2.0 | 7a0a1194c37e4767b9de1182fc128736 | 28.409574 | 88 | 0.613854 | 3.639895 | false | false | false | false |
mozilla-services/tecken | tecken/base/form_utils.py | 1 | 2013 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
"""Form-related utilities"""
import datetime
from django import forms
ORM_OPERATORS = {"<=": "lte", ">=": "gte", "=": "exact", "<": "lt", ">": "gt"}
def filter_form_dates(qs, form, keys):
for key in keys:
for operator, value in form.cleaned_data.get(key, []):
if value is None:
orm_operator = f"{key}__isnull"
qs = qs.filter(**{orm_operator: True})
elif operator == "=" and (
not isinstance(value, datetime.datetime)
or value.hour == 0
and value.minute == 0
):
# When querying on a specific day, make it a little easier
qs = qs.filter(
**{
f"{key}__gte": value,
f"{key}__lt": value + datetime.timedelta(days=1),
}
)
else:
if operator == ">":
# Because we use microseconds in the ORM, but when
# datetimes are passed back end forth in XHR, the datetimes
# are converted with isoformat() which drops microseconds.
# Therefore add 1 second to avoid matching the latest date.
value += datetime.timedelta(seconds=1)
orm_operator = "{}__{}".format(key, ORM_OPERATORS[operator])
qs = qs.filter(**{orm_operator: value})
return qs
class PaginationForm(forms.Form):
page = forms.CharField(required=False)
def clean_page(self):
value = self.cleaned_data["page"]
try:
value = int(value or 1)
except ValueError:
raise forms.ValidationError(f"Not a number {value!r}")
if value < 1:
value = 1
return value
| mpl-2.0 | aaef9f70b660deed2d1ca13e2d469369 | 34.946429 | 79 | 0.517139 | 4.385621 | false | false | false | false |
mozilla-services/tecken | tecken/tokens/models.py | 1 | 2095 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import datetime
import uuid
from django.db import models
from django.conf import settings
from django.utils import timezone
from django.contrib.auth.models import Permission, Group
from django.dispatch import receiver
def make_key():
return uuid.uuid4().hex
def get_future():
delta = datetime.timedelta(days=settings.TOKENS_DEFAULT_EXPIRATION_DAYS)
return timezone.now() + delta
class Token(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
key = models.CharField(max_length=32, default=make_key)
expires_at = models.DateTimeField(default=get_future)
permissions = models.ManyToManyField(Permission)
notes = models.TextField(blank=True)
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
permissions = (("manage_tokens", "Manage Your API Tokens"),)
def __repr__(self):
return f"<{self.__class__.__name__} {self.key[:2]}...{self.key[-2]}>"
@property
def is_expired(self):
return self.expires_at < timezone.now()
@receiver(models.signals.m2m_changed, sender=Group.permissions.through)
def drop_permissions_on_group_change(sender, instance, action, **kwargs):
if action == "post_remove":
# A permission was removed from a group.
# Every Token that had this permission needs to be re-evaluated
# because, had the user created this token now, they might
# no longer have access to that permission due to their
# group memberships.
permissions = Permission.objects.filter(id__in=kwargs["pk_set"])
for permission in permissions:
for token in Token.objects.filter(permissions=permission):
user_permissions = Permission.objects.filter(group__user=token.user)
if permission not in user_permissions:
token.permissions.remove(permission)
| mpl-2.0 | 0c106185f407eeab9641a4f27a29204a | 36.410714 | 84 | 0.694511 | 3.998092 | false | false | false | false |
mozilla-iam/sso-dashboard | dashboard/oidc_auth.py | 1 | 6237 | import json
import logging
from josepy.jwk import JWK
from josepy.jws import JWS
"""Class that governs all authentication with open id connect."""
from flask_pyoidc.flask_pyoidc import OIDCAuthentication
logger = logging.getLogger(__name__)
class OpenIDConnect(object):
"""Auth object for login, logout, and response validation."""
def __init__(self, configuration):
"""Object initializer for auth object."""
self.oidc_config = configuration
def client_info(self):
client_info = {"client_id": self.oidc_config.client_id, "client_secret": self.oidc_config.client_secret}
return client_info
def get_oidc(self, app):
extra_request_args = {"scope": ["openid", "profile"]}
o = OIDCAuthentication(
app,
issuer="https://{DOMAIN}".format(DOMAIN=self.oidc_config.OIDC_DOMAIN),
client_registration_info=self.client_info(),
extra_request_args=extra_request_args,
)
return o
class tokenVerification(object):
def __init__(self, jws, public_key):
self.jws = jws
self.jws_data = {}
self.public_key = public_key
@property
def verify(self):
return self._verified()
@property
def data(self):
return self.jws_data
@property
def error_code(self):
return self.jws_data.get("code", None)
@property
def preferred_connection_name(self):
return self.jws_data.get("preferred_connection_name", "Unknown")
@property
def redirect_uri(self):
return self.jws_data.get("redirect_uri", "https://sso.mozilla.com")
def _get_connection_name(self, connection):
CONNECTION_NAMES = {
"google-oauth2": "Google",
"github": "GitHub",
"firefoxaccounts": "Firefox Accounts",
"Mozilla-LDAP-Dev": "LDAP",
"Mozilla-LDAP": "LDAP",
"email": "passwordless email",
}
return CONNECTION_NAMES[connection] if connection in CONNECTION_NAMES else connection
def _signed(self, jwk):
if self.jws_obj.verify(jwk):
return True
else:
return False
def _verified(self):
try:
jwk = JWK.load(self.public_key)
self.jws_obj = JWS.from_compact(self.jws)
if self._signed(jwk) is False:
logger.warning("The public key signature was not valid for jws {jws}".format(jws=self.jws))
self.jws_data = json.loads(self.jws.payload)
self.jws_data["code"] = "invalid"
return False
else:
self.jws_data = json.loads(self.jws_obj.payload.decode())
logger.info("Loaded JWS data.")
self.jws_data["connection_name"] = self._get_connection_name(self.jws_data["connection"])
return True
except UnicodeDecodeError:
return False
def error_message(self):
error_code = self.error_code
if error_code == "githubrequiremfa":
error_text = 'You must setup a security device ("MFA", "2FA") for your GitHub account in order to access \
this service. Please follow the \
<a href="https://help.github.com/articles/securing-your-account-with-two-factor-authentication-2fa/">\
GitHub documentation\
</a> to setup your device, then try logging in again.'
elif error_code == "fxarequiremfa":
error_text = 'Please <a href="https://support.mozilla.org/kb/secure-firefox-account-two-step-authentication">\
secure your Firefox Account with two-step authentication</a>, \
then try logging in again.\n<br/><br/>\n\
If you have just setup your security device and you see this message, please log out of \
<a href="https://accounts.firefox.com">Firefox Accounts</a> (click the "Sign out" button), then \
log back in.'
elif error_code == "notingroup":
error_text = "Sorry, you do not have permission to access {client}. \
Please contact eus@mozilla.com if you should have access.".format(
client=self.data.get("client")
)
elif error_code == "accesshasexpired":
error_text = "Sorry, your access to {client} has expired because you have not been actively using it. \
Please request access again.".format(
client=self.data.get("client")
)
elif error_code == "primarynotverified":
"You primary email address is not yet verified. Please verify your \
email address with {connection_name} in order to use this service.".format(
connection_name=self._get_connection_name(self.jws_data.get("connection", ""))
)
elif error_code == "incorrectaccount":
error_text = "Sorry, you may not login using {connection_name}. \
Instead, please use \
{preferred_connection_name}.".format(
connection_name=self._get_connection_name(self.jws_data.get("connection", "")),
preferred_connection_name=self._get_connection_name(self.preferred_connection_name),
)
elif error_code == "aai_failed":
error_text = "{client} requires you to setup additional security measures for your account, \
such as enabling multi-factor authentication (MFA) or using a safer authentication method (such as a \
Firefox Account login). You will not be able to login until this is \
done.".format(
client=self.data.get("client")
)
elif error_code == "staffmustuseldap":
error_text = "Staff LDAP account holders are required to use their LDAP account to login. Please go back \
and type your LDAP email address to login with your Staff account, instead of using \
{connection_name}.".format(
connection_name=self._get_connection_name(self.jws_data.get("connection", ""))
)
else:
error_text = "Oye, something went wrong."
return error_text
| mpl-2.0 | 0b3750d6c7810c8fc7938aa06f714003 | 41.719178 | 122 | 0.598204 | 4.141434 | false | false | false | false |
mozilla-iam/sso-dashboard | dashboard/vanity.py | 1 | 1312 | from flask import make_response
from flask import redirect
from flask import request
from dashboard.op import yaml_loader
class Router(object):
def __init__(self, app, app_list):
self.app = app
self.url_list = yaml_loader.Application(app_list.apps_yml).vanity_urls()
def setup(self):
for url in self.url_list:
for vanity_url in url.keys():
try:
self.app.add_url_rule(vanity_url, vanity_url, self.redirect_url)
self.app.add_url_rule(
vanity_url + "/", vanity_url + "/", self.redirect_url
)
except Exception as e:
print(e)
def redirect_url(self):
vanity_url = "/" + request.url.split("/")[3]
for match in self.url_list:
for key in match.keys():
if key == vanity_url:
resp = make_response(redirect(match[vanity_url], code=301))
resp.headers["Cache-Control"] = (
"no-store, no-cache, must-revalidate, "
"post-check=0, pre-check=0, max-age=0"
)
resp.headers["Expires"] = "-1"
return resp
else:
pass
| mpl-2.0 | e1b780eb598df1e62a70d858e6c2fdb0 | 33.526316 | 84 | 0.487805 | 4.087227 | false | false | false | false |
cdr-stats/cdr-stats | cdr_stats/import_cdr/models.py | 1 | 2834 | #
# CDR-Stats License
# http://www.cdr-stats.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2015 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
from __future__ import unicode_literals
from django.db import models
from postgres.fields import json_field
class CDRImport(models.Model):
"""
CDRImport table live on Database 'import_cdr'
Manually selecting a database for a QuerySet:
CDRImport.objects.using('import_cdr').all()
TODO: add documentation to explain all the fields
"""
id = models.AutoField(primary_key=True)
switch = models.CharField(max_length=80)
cdr_source_type = models.IntegerField(blank=True, null=True)
callid = models.CharField(max_length=80)
caller_id_number = models.CharField(max_length=80)
caller_id_name = models.CharField(max_length=80)
destination_number = models.CharField(max_length=80)
dialcode = models.CharField(max_length=10, blank=True)
state = models.CharField(max_length=5, blank=True)
channel = models.CharField(max_length=80, blank=True)
starting_date = models.DateTimeField()
duration = models.IntegerField()
billsec = models.IntegerField()
progresssec = models.IntegerField(blank=True, null=True)
answersec = models.IntegerField(blank=True, null=True)
waitsec = models.IntegerField(blank=True, null=True)
hangup_cause_id = models.IntegerField(blank=True, null=True)
hangup_cause = models.CharField(max_length=80, blank=True)
direction = models.IntegerField(blank=True, null=True)
country_code = models.CharField(max_length=3, blank=True)
accountcode = models.CharField(max_length=40, blank=True)
buy_rate = models.DecimalField(max_digits=10, decimal_places=5, blank=True, null=True)
buy_cost = models.DecimalField(max_digits=12, decimal_places=5, blank=True, null=True)
sell_rate = models.DecimalField(max_digits=10, decimal_places=5, blank=True, null=True)
sell_cost = models.DecimalField(max_digits=12, decimal_places=5, blank=True, null=True)
imported = models.BooleanField(default=False)
# Postgresql >= 9.4 Json field
extradata = json_field.JSONField(blank=True)
def __unicode__(self):
return '[%s] %s - dur:%d - hangup:%s' % \
(self.id, self.destination_number, self.duration, str(self.hangup_cause_id))
class Meta:
# Remove `managed = False` lines if you wish to allow Django to create, modify,
# and delete the table
# managed = False
verbose_name = "CDR Import"
verbose_name_plural = "CDRs Import"
db_table = 'cdr_import'
| mpl-2.0 | 4a55cc683311bd29b5f21cd4b1a958d7 | 39.485714 | 91 | 0.702188 | 3.426844 | false | false | false | false |
cdr-stats/cdr-stats | cdr_stats/realtime/views.py | 3 | 5910 | #
# CDR-Stats License
# http://www.cdr-stats.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2015 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
from django.contrib.auth.decorators import login_required, permission_required
from django.http import Http404
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.conf import settings
from django_lets_go.common_functions import getvar, ceil_strdate
from switch.models import Switch
from cdr.functions_def import get_switch_ip_addr
from cdr.forms import ConcurrentCallForm, SwitchForm
from cdr.decorators import check_user_detail
from datetime import datetime
from collections import defaultdict
import time
import logging
@permission_required('user_profile.concurrent_calls', login_url='/')
@check_user_detail('accountcode')
@login_required
def cdr_concurrent_calls(request):
"""CDR view of concurrent calls
**Attributes**:
* ``template`` - cdr/graph_concurrent_calls.html
* ``form`` - ConcurrentCallForm
* ``mongodb_data_set`` - MONGO_CDRSTATS['CONC_CALL_AGG'] (map-reduce collection)
**Logic Description**:
get all concurrent call records from mongodb map-reduce collection for
current date
"""
logging.debug('CDR concurrent view start')
now = datetime.today()
from_date = now.strftime('%Y-%m-%d')
start_date = datetime(now.year, now.month, now.day, 0, 0, 0, 0)
end_date = datetime(now.year, now.month, now.day, 23, 59, 59, 0)
query_var = {}
switch_id = 0
form = ConcurrentCallForm(request.POST or None, initial={'from_date': from_date})
logging.debug('CDR concurrent view with search option')
if form.is_valid():
from_date = getvar(request, 'from_date')
switch_id = getvar(request, 'switch_id')
start_date = ceil_strdate(from_date, 'start')
end_date = ceil_strdate(from_date, 'end')
if switch_id and int(switch_id) != 0:
query_var['switch_id'] = int(switch_id)
query_var['date'] = {'$gte': start_date, '$lt': end_date}
if not request.user.is_superuser: # not superuser
query_var['accountcode'] = request.user.userprofile.accountcode
xdata = []
charttype = "stackedAreaChart"
call_count_res = defaultdict(list)
if query_var:
# calls_in_day = mongodb.conc_call_agg.find(query_var).sort([('date', 1)])
calls_in_day = {}
for d in calls_in_day:
# convert date into timestamp value
ts = time.mktime(d['date'].timetuple())
tsint = int(ts * 1000)
xdata.append(str(tsint))
call_count_res[d['switch_id']].append(d['numbercall'])
int_count = 1
chartdata = {'x': xdata}
extra_serie = {"tooltip": {"y_start": "", "y_end": " concurrent calls"},
"date_format": "%d %b %Y %I:%M:%S %p"}
for i in call_count_res:
chartdata['name' + str(int_count)] = str(get_switch_ip_addr(i))
chartdata['y' + str(int_count)] = call_count_res[i]
chartdata['extra' + str(int_count)] = extra_serie
int_count += 1
logging.debug('CDR concurrent view end')
data = {
'form': form,
'start_date': start_date,
'chartdata': chartdata,
'charttype': charttype,
'chartcontainer': 'stacked_area_container',
'chart_extra': {
'x_is_date': True,
'x_axis_format': '%d %b %Y %H:%S',
'tag_script_js': True,
'jquery_on_ready': True,
},
}
return render_to_response('cdr/graph_concurrent_calls.html', data, context_instance=RequestContext(request))
@permission_required('user_profile.real_time_calls', login_url='/')
@check_user_detail('accountcode')
@login_required
def cdr_realtime(request):
"""Call realtime view
**Attributes**:
* ``template`` - cdr/realtime.html
* ``form`` - SwitchForm
* ``mongodb_collection`` - MONGO_CDRSTATS['CONC_CALL_AGG'] (map-reduce collection)
**Logic Description**:
get all call records from mongodb collection for
concurrent analytics
"""
logging.debug('CDR realtime view start')
query_var = {}
switch_id = 0
list_switch = Switch.objects.all()
form = SwitchForm(request.POST or None)
if form.is_valid():
switch_id = int(getvar(request, 'switch_id'))
if switch_id and switch_id != 0:
query_var['value.switch_id'] = switch_id
now = datetime.now()
start_date = datetime(now.year, now.month, now.day, 0, 0, 0, 0)
end_date = datetime(now.year, now.month, now.day, 23, 59, 59, 999999)
query_var['value.call_date'] = {'$gte': start_date, '$lt': end_date}
if not request.user.is_superuser: # not superuser
query_var['value.accountcode'] = request.user.userprofile.accountcode
# calls_in_day = mongodb.conc_call_agg.find(query_var).sort([('_id.g_Millisec', -1)])
calls_in_day = {}
final_data = []
for d in calls_in_day:
dt = int(d['_id']['g_Millisec'])
final_data.append((dt, int(d['value']['numbercall__max'])))
logging.debug('Realtime view end')
variables = {
'form': form,
'final_data': final_data,
'list_switch': list_switch,
'colorgraph1': '180, 0, 0',
'colorgraph2': '0, 180, 0',
'colorgraph3': '0, 0, 180',
'realtime_graph_maxcall': settings.REALTIME_Y_AXIS_LIMIT,
}
return render_to_response('cdr/graph_realtime.html', variables, context_instance=RequestContext(request))
| mpl-2.0 | 681f8596a8f0cb4d2c305c619786072a | 33.970414 | 112 | 0.619797 | 3.46831 | false | false | false | false |
cdr-stats/cdr-stats | cdr_stats/voip_billing/models.py | 1 | 14486 | #
# CDR-Stats License
# http://www.cdr-stats.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2015 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
from django.db import models
from django.utils.translation import ugettext_lazy as _
from country_dialcode.models import Prefix
from voip_gateway.models import Provider
from voip_billing.function_def import prefix_allowed_to_call
from django_lets_go.intermediate_model_base_class import Model
from django.db import connection
LCR_TYPE = (
(0, 'LCR'),
(1, 'LCD'),
)
class VoIPPlan(Model):
"""
VoIPPlan
VoIPPlans are associated to your clients, this defines the rate at which
the VoIP calls are sold to your clients.
A VoIPPlan is a collection of VoIPRetailPlans, you can have 1 or more
VoIPRetailPlans associated to the VoIPPlan
A client has a single VoIPPlan,
VoIPPlan has many VoIPRetailPlans.
VoIPRetailPlan has VoIPRetailRates
The LCR system will route the VoIP via the lowest cost carrier.
"""
name = models.CharField(unique=True, max_length=255, verbose_name=_('name'),
help_text=_("enter plan name"))
pubname = models.CharField(max_length=255, verbose_name=_('publish name'),
help_text=_("enter publish name"))
lcrtype = models.IntegerField(choices=list(LCR_TYPE), verbose_name=_('LCR type'),
help_text=_("select LCR type"))
created_date = models.DateTimeField(auto_now_add=True, verbose_name=_('date'))
updated_date = models.DateTimeField(auto_now=True)
class Meta:
db_table = u'voip_plan'
verbose_name = _("VoIP plan")
verbose_name_plural = _("VoIP plans")
def __unicode__(self):
return '[%s] %s' % (self.id, self.name)
class BanPlan(models.Model):
"""
BanPlan
List of Ban Plan which are linked to VoIP Plan
"""
name = models.CharField(unique=True, max_length=255, verbose_name=_('name'),
help_text=_("enter ban plan name"))
created_date = models.DateTimeField(auto_now_add=True)
updated_date = models.DateTimeField(auto_now=True)
voip_plan = models.ManyToManyField(VoIPPlan, through='VoIPPlan_BanPlan')
class Meta:
db_table = u'voipbilling_banplan'
verbose_name = _("ban plan")
verbose_name_plural = _("ban plans")
def __unicode__(self):
return "%s" % (self.name)
class VoIPPlan_BanPlan(models.Model):
"""
VoIPPlan_BanPlan
OnetoMany relationship between VoIPPlan & BanPlan
"""
voipplan = models.ForeignKey(VoIPPlan, related_name='voip plan')
banplan = models.ForeignKey(BanPlan, related_name='ban plan')
created_date = models.DateTimeField(auto_now_add=True)
class Meta:
db_table = u'voipplan_banplan'
def __unicode__(self):
return "%s" % (self.banplan)
class BanPrefix(models.Model):
"""
BanPrefix
Ban prefixes are linked to Ban plan & VoIP with these prefix
will not be authorized to send.
"""
ban_plan = models.ForeignKey(BanPlan, verbose_name=_('ban plan'), help_text=_("select ban plan"))
prefix = models.ForeignKey(Prefix, verbose_name=_('prefix'), help_text=_("select prefix"))
created_date = models.DateTimeField(auto_now_add=True)
updated_date = models.DateTimeField(auto_now=True)
class Meta:
db_table = u'voipbilling_ban_prefix'
verbose_name = _("ban prefix")
verbose_name_plural = _("ban prefixes")
def __unicode__(self):
return "%s" % (self.ban_plan)
def prefix_with_name(self):
"""
Return prefix with name
on Ban Prefix Listing (changelist_view)
"""
if self.prefix is None:
return ""
else:
return "[%d] - %s" % (self.prefix.prefix, self.prefix.destination)
prefix_with_name.short_description = _('prefix')
class VoIPRetailPlan(Model):
"""
VoIPRetailPlan
This contains the VoIPRetailRates to retail to the customer. these plans are
associated to the VoIPPlan with a ManyToMany relation.
It defines the costs at which we sell the VoIP calls to clients.
VoIPRetailPlan will then contain a set of VoIPRetailRates which will define
the cost of sending a VoIP call to each destination.
The system can have several VoIPRetailPlans, but only the ones associated to
the VoIPplan will be used by the client.
"""
name = models.CharField(max_length=255, verbose_name=_('name'), help_text=_("enter plan name"))
description = models.TextField(verbose_name=_('description'), null=True, blank=True,
help_text=_("short description about Plan"))
metric = models.IntegerField(default=10, verbose_name=_('metric'), help_text=_("enter metric in digit"))
created_date = models.DateTimeField(auto_now_add=True, verbose_name=_('date'))
updated_date = models.DateTimeField(auto_now=True)
voip_plan = models.ManyToManyField(VoIPPlan, through='VoIPPlan_VoIPRetailPlan')
class Meta:
db_table = u'voip_retail_plan'
verbose_name = _("retail plan")
verbose_name_plural = _("retail plans")
def __unicode__(self):
return "%s" % (self.name)
class VoIPPlan_VoIPRetailPlan(models.Model):
"""
VoIPPlan_VoIPRetailPlan
ManytoMany relationship between VoIPPlan & VoIPRetailPlan
"""
voipretailplan = models.ForeignKey(VoIPRetailPlan, related_name='VoIP Retail Plan')
voipplan = models.ForeignKey(VoIPPlan, related_name='VoIP Plan')
created_date = models.DateTimeField(auto_now_add=True)
class Meta:
db_table = u'voipplan_voipretailplan'
def __unicode__(self):
return "%s" % (self.voipplan)
class VoIPRetailRate(models.Model):
"""
VoIPRetailRate
A single VoIPRetailRate consists of a retail rate and prefix at which you
want to use to sell a VoIP Call to a particular destination.
VoIPRetailRates are grouped by VoIPRetailPlan, which will be then in turn be
associated to a VoIPPlan
"""
voip_retail_plan_id = models.ForeignKey(VoIPRetailPlan, db_column="voip_retail_plan_id",
verbose_name=_("retail plan"),
help_text=_("select retail plan"))
prefix = models.ForeignKey(Prefix, db_column="prefix", verbose_name=_("prefix"),
help_text=_("select prefix"))
retail_rate = models.DecimalField(max_digits=10, decimal_places=4, default=0, verbose_name=_("rate"),
help_text=_("enter Rate"))
created_date = models.DateTimeField(auto_now_add=True, verbose_name=_("date"))
updated_date = models.DateTimeField(auto_now=True)
class Meta:
db_table = u'voip_retail_rate'
verbose_name = _("retail rate")
verbose_name_plural = _("retail rates")
def voip_retail_plan_name(self):
"""
Return Retail Plan name
on Retail Rate listing (changelist_view)
"""
if self.voip_retail_plan_id is None:
return ""
else:
return self.voip_retail_plan_id.name
voip_retail_plan_name.short_description = _("retail plan")
def prefix_with_name(self):
"""
Return prefix with name
on Retail Rate listing (changelist_view)
"""
if self.prefix is None:
return ""
else:
return "[%d] - %s" % (self.prefix.prefix, self.prefix.destination)
prefix_with_name.short_description = _('prefix')
class VoIPCarrierPlan(Model):
"""
VoIPCarrierPlan
Once the retail price is defined by the VoIPPlan, VoIPRetailPlans and
VoIPRetailRates, we also need to know which is the best route to send
the VoIP how much it will cost, and which VoIP Gateway to use.
VoIPCarrierPlan is linked to the VoIP Plan, so once we found how to sell
the service to the client, we need to look at which carrier (Provider)
we want to use, The VoIPCarrierPlan defines this.
The system can have several VoIPCarrierPlans, but only the one associated to
the VoIPRetailPlan-VoIPPlan will be used to connect the VoIP of
the client.
"""
name = models.CharField(max_length=255, verbose_name=_("name"),
help_text=_("enter plan name"))
description = models.TextField(verbose_name=_("description"),
null=True, blank=True,
help_text=_("short description about Plan"))
metric = models.IntegerField(default=10, verbose_name=_("metric"),
help_text=_("enter metric in digit"))
callsent = models.IntegerField(null=True, blank=True,
verbose_name=_("message sent"))
voip_provider_id = models.ForeignKey(Provider, db_column="voip_provider_id",
verbose_name=_("provider"),
help_text=_("select provider"))
created_date = models.DateTimeField(auto_now_add=True, verbose_name=_("date"))
updated_date = models.DateTimeField(auto_now=True)
class Meta:
db_table = u'voip_carrier_plan'
verbose_name = _("carrier plan")
verbose_name_plural = _("carrier plans")
def __unicode__(self):
return "%s" % (self.name)
class VoIPCarrierRate(models.Model):
"""
VoIPCarrierRate
The VoIPCarrierRates are a set of all the carrier rate and prefix that
will be used to purchase the VoIP from your carrier,
VoIPCarrierRates are grouped by VoIPCarrierPlan, which will be then
associated to a VoIPRetailPlan
"""
voip_carrier_plan_id = models.ForeignKey(VoIPCarrierPlan, db_column="voip_carrier_plan_id",
verbose_name=_("carrier plan"),
help_text=_("select carrier plan"))
prefix = models.ForeignKey(Prefix, db_column="prefix", verbose_name=_("prefix"),
help_text=_("select prefix"))
carrier_rate = models.DecimalField(max_digits=10, decimal_places=4, default=0, verbose_name=_("rate"),
help_text=_("enter rate"))
created_date = models.DateTimeField(auto_now_add=True, verbose_name=_("date"))
updated_date = models.DateTimeField(auto_now=True)
class Meta:
db_table = u'voip_carrier_rate'
verbose_name = _("carrier rate")
verbose_name_plural = _("carrier rates")
def voip_carrier_plan_name(self):
"""
Return Carrier Plan name
on Carrier Rate listing (changelist_view)
"""
if self.voip_carrier_plan_id is None:
return ""
else:
return self.voip_carrier_plan_id.name
voip_carrier_plan_name.short_description = _("carrier plan")
def prefix_with_name(self):
"""
Return prefix with name
on Carrier Rate listing (changelist_view)
"""
if self.prefix is None:
return ""
else:
return "[%d] - %s" % (self.prefix.prefix, self.prefix.destination)
prefix_with_name.short_description = _("prefix")
class VoIPPlan_VoIPCarrierPlan(models.Model):
"""
VoIPPlan_VoIPCarrierPlan
ManytoMany relationship between VoIPPlan & VoIPCarrierPlan
"""
voipcarrierplan = models.ForeignKey(VoIPCarrierPlan, related_name='carrier plan')
voipplan = models.ForeignKey(VoIPPlan, related_name='voip_plan')
created_date = models.DateTimeField(auto_now_add=True)
class Meta:
db_table = u'voipplan_voipcarrierplan'
def __unicode__(self):
return "%s" % (self.voipplan)
def find_rates(voipplan_id, dialcode, sort_field, order):
"""
function to retrieve list of rates belonging to a voipplan
"""
cursor = connection.cursor()
# variables used for sorting
extension_query = ''
if sort_field == 'prefix':
sort_field = 'voip_retail_rate.prefix'
if sort_field == 'retail_rate':
sort_field = 'minrate'
if sort_field == 'destination':
sort_field = 'dialcode_prefix.destination'
if sort_field:
extension_query = "ORDER BY " + sort_field + ' ' + order
cursor = connection.cursor()
if dialcode:
sqldialcode = str(dialcode) + '%'
sql_statement = (
"SELECT voip_retail_rate.prefix, "
"Min(retail_rate) as minrate, dialcode_prefix.destination "
"FROM voip_retail_rate "
"INNER JOIN voipplan_voipretailplan "
"ON voipplan_voipretailplan.voipretailplan_id = "
"voip_retail_rate.voip_retail_plan_id "
"LEFT JOIN dialcode_prefix ON dialcode_prefix.prefix = "
"voip_retail_rate.prefix "
"WHERE voipplan_id=%s "
"AND CAST(voip_retail_rate.prefix AS TEXT) LIKE %s "
"GROUP BY voip_retail_rate.prefix, dialcode_prefix.destination "
+ extension_query)
cursor.execute(sql_statement, [voipplan_id, sqldialcode])
else:
sql_statement = (
"SELECT voip_retail_rate.prefix, "
"Min(retail_rate) as minrate, dialcode_prefix.destination "
"FROM voip_retail_rate "
"INNER JOIN voipplan_voipretailplan "
"ON voipplan_voipretailplan.voipretailplan_id = "
"voip_retail_rate.voip_retail_plan_id "
"LEFT JOIN dialcode_prefix ON dialcode_prefix.prefix = "
"voip_retail_rate.prefix "
"WHERE voipplan_id=%s "
"GROUP BY voip_retail_rate.prefix, dialcode_prefix.destination "
+ extension_query)
cursor.execute(sql_statement, [voipplan_id])
row = cursor.fetchall()
result = []
for record in row:
# Not banned Prefix
allowed = prefix_allowed_to_call(record[0], voipplan_id)
if allowed:
modrecord = {}
modrecord['prefix'] = record[0]
modrecord['retail_rate'] = record[1]
modrecord['prefix__destination'] = record[2]
result.append(modrecord)
return result
| mpl-2.0 | 060033b94c32d4099ec97b61075a155d | 34.945409 | 108 | 0.621635 | 3.794133 | false | false | false | false |
cdr-stats/cdr-stats | cdr_stats/voip_billing/forms.py | 1 | 9868 | #
# CDR-Stats License
# http://www.cdr-stats.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2015 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
from django import forms
from django.utils.translation import gettext_lazy as _
from django_lets_go.common_functions import isint
from voip_billing.function_def import get_list_rate_filter
from voip_billing.models import VoIPPlan, VoIPRetailPlan, VoIPCarrierPlan
from cdr.forms import sw_list_with_all, CdrSearchForm
from mod_utils.forms import Exportfile, common_submit_buttons
from bootstrap3_datetime.widgets import DateTimePicker
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div, HTML
from crispy_forms.bootstrap import FormActions
CONFIRMATION_TYPE = (
('YES', _('Yes')),
('NO', _('No')),
)
def voip_plan_list():
"""Return List of VoIP Plans"""
try:
return VoIPPlan.objects.values_list('id', 'name').all()
except:
return []
def carrier_plan_list():
"""List all carrier plan"""
try:
return VoIPCarrierPlan.objects.values_list('id', 'name').all()
except:
return []
def retail_plan_list():
"""List all retail plan"""
try:
return VoIPRetailPlan.objects.values_list('id', 'name').all()
except:
return []
class FileImport(forms.Form):
"""
General Form : CSV file upload
"""
csv_file = forms.FileField(label=_("upload CSV File "), required=True,
error_messages={'required': 'please upload File'},
help_text=_("browse CSV file"))
def clean_file(self):
"""
Form Validation : File extension Check
"""
filename = self.cleaned_data["csv_file"]
file_exts = ("csv", "txt")
if not str(filename).split(".")[1].lower() in file_exts:
raise forms.ValidationError(_(u'document types accepted: %s' % ' '.join(file_exts)))
else:
return filename
class RetailRate_fileImport(FileImport):
"""
Admin Form : Import CSV file with Retail Plan
"""
plan_id = forms.ChoiceField(label=_("retail plan"), required=False,
help_text=_("select retail plan"))
def __init__(self, *args, **kwargs):
super(RetailRate_fileImport, self).__init__(*args, **kwargs)
self.fields['plan_id'].choices = retail_plan_list()
class CarrierRate_fileImport(FileImport):
"""
Admin Form : Import CSV file with Carrier Plan
"""
plan_id = forms.ChoiceField(label=_("carrier plan"), required=False,
help_text=_("select carrier plan"))
chk = forms.BooleanField(label=_("make retail plan"), required=False,
help_text=_("select if you want to make retail plan"))
retail_plan_id = forms.ChoiceField(label=_("retail plan"), required=False,
help_text=_("select retail plan"))
profit_percentage = forms.CharField(label=_("profit in % :"), required=False,
widget=forms.TextInput(attrs={'size': 3}),
help_text=_("enter digit without %"))
def __init__(self, *args, **kwargs):
super(CarrierRate_fileImport, self).__init__(*args, **kwargs)
self.fields['plan_id'].choices = carrier_plan_list()
self.fields['retail_plan_id'].choices = retail_plan_list()
def clean_profit_percentage(self):
"""
Validation Check:
Percentage Value must be in digit (int/float)
"""
chk = self.cleaned_data["chk"]
p_p = self.cleaned_data["profit_percentage"]
if chk:
if not isint(p_p):
raise forms.ValidationError(_("please enter int/float value"))
else:
return p_p
class Carrier_Rate_fileExport(Exportfile):
"""
Admin Form : Carrier Rate Export
"""
plan_id = forms.ChoiceField(label=_("carrier plan").capitalize(), required=False)
def __init__(self, *args, **kwargs):
super(Carrier_Rate_fileExport, self).__init__(*args, **kwargs)
self.fields.keyOrder = ['plan_id', 'export_to']
self.fields['plan_id'].choices = carrier_plan_list()
class Retail_Rate_fileExport(Exportfile):
"""
Admin Form : Retail Rate Export
"""
plan_id = forms.ChoiceField(label=_("retail plan").capitalize(), required=False)
def __init__(self, *args, **kwargs):
super(Retail_Rate_fileExport, self).__init__(*args, **kwargs)
self.fields.keyOrder = ['plan_id', 'export_to']
self.fields['plan_id'].choices = retail_plan_list()
class VoIPPlan_fileExport(Exportfile):
"""
Admin Form : VoIP Plan Export
"""
plan_id = forms.ChoiceField(label=_("VoIP plan"), required=False,
help_text=_('this will export the VoIPPlan using LCR on each prefix-rate tuple'))
def __init__(self, *args, **kwargs):
super(VoIPPlan_fileExport, self).__init__(*args, **kwargs)
self.fields.keyOrder = ['plan_id', 'export_to']
self.fields['plan_id'].choices = voip_plan_list()
class PrefixRetailRateForm(forms.Form):
"""
Client Form : To know Retail Rate for prefix
"""
prefix = forms.CharField(label=_("enter prefix").capitalize(),
widget=forms.TextInput(attrs={'size': 15}), required=False)
def __init__(self, *args, **kwargs):
super(PrefixRetailRateForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = 'well'
css_class = 'col-md-4'
self.helper.layout = Layout(
Div(
Div('prefix', css_class=css_class),
css_class='row',
),
FormActions(
HTML('<button type="submit" id="id_submit" name="submit" class="btn btn-primary" value="submit">'
'<i class="fa fa-search fa-lg"></i> %s</button>'
'<a href="/rates/" class="btn btn-danger">%s</a>' % (_('search').title(), _('clear').title()))
)
)
class SimulatorForm(forms.Form):
"""
Admin/Client Form : To Simulator
"""
destination_no = forms.CharField(label=_("destination").capitalize(), required=True,
help_text=_('enter digit only'))
plan_id = forms.ChoiceField(label=_("VoIP plan"), required=False)
def __init__(self, user, *args, **kwargs):
super(SimulatorForm, self).__init__(*args, **kwargs)
self.fields['plan_id'].choices = voip_plan_list()
self.fields.keyOrder = ['plan_id', 'destination_no', ]
if not user.is_superuser:
self.fields['plan_id'] = forms.ChoiceField(widget=forms.HiddenInput())
self.helper = FormHelper()
self.helper.form_class = 'well'
css_class = 'col-md-4'
self.helper.layout = Layout(
Div(
Div('destination_no', css_class=css_class),
Div('plan_id', css_class=css_class),
css_class='row',
),
)
common_submit_buttons(self.helper.layout, 'search')
def clean_plan_id(self):
"""
Form Validation : Check Plan id
"""
plan_id = int(self.cleaned_data['plan_id'])
if plan_id == 0:
raise forms.ValidationError("select VoIP Plan!!")
return plan_id
def clean_destination_no(self):
"""
Form Validation : destination_no Check
"""
destination_no = self.cleaned_data['destination_no']
if not isint(destination_no):
raise forms.ValidationError("enter digit only!")
return destination_no
class CustomRateFilterForm(forms.Form):
"""
Admin Form : Custom Rate Filter
"""
rate_range = forms.ChoiceField(label=_(" "), choices=get_list_rate_filter(), required=False)
rate = forms.CharField(label=_(" "), widget=forms.TextInput(attrs={'size': 10}),
required=False)
class BillingReportForm(forms.Form):
"""Daily Billing Form"""
from_date = forms.DateTimeField(label=_('from').capitalize(), required=True,
widget=DateTimePicker(options={"format": "YYYY-MM-DD HH:mm", "pickSeconds": False}))
to_date = forms.DateTimeField(label=_('to').capitalize(), required=True,
widget=DateTimePicker(options={"format": "YYYY-MM-DD HH:mm", "pickSeconds": False}))
switch_id = forms.ChoiceField(label=_('switch'), required=False)
def __init__(self, *args, **kwargs):
super(BillingReportForm, self).__init__(*args, **kwargs)
self.fields['switch_id'].choices = sw_list_with_all()
self.helper = FormHelper()
self.helper.form_class = 'well'
css_class = 'col-md-4'
self.helper.layout = Layout(
Div(
Div('from_date', css_class=css_class),
Div('to_date', css_class=css_class),
Div('switch_id', css_class=css_class),
css_class='row',
),
)
common_submit_buttons(self.helper.layout, 'search')
class RebillForm(CdrSearchForm):
"""Re-bill VoIP call"""
confirmation = forms.ChoiceField(choices=list(CONFIRMATION_TYPE), required=False)
def __init__(self, *args, **kwargs):
super(RebillForm, self).__init__(*args, **kwargs)
self.fields.keyOrder = ['from_date', 'to_date', 'confirmation']
self.fields['confirmation'].widget = forms.HiddenInput()
| mpl-2.0 | f7ba448809ed55812971a64b8c177253 | 34.117438 | 120 | 0.583604 | 3.903481 | false | false | false | false |
cdr-stats/cdr-stats | cdr_stats/voip_billing/rate_engine.py | 3 | 4457 | #
# CDR-Stats License
# http://www.cdr-stats.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2015 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
from voip_billing.models import VoIPRetailPlan
from voip_billing.function_def import prefix_list_string
from collections import namedtuple
from cache_utils.decorators import cached
def calculate_call_cost(voipplan_id, dest_number, billsec):
"""
Calcultate the cost of the call, based on the voip plan and the destination
"""
rates = rate_engine(voipplan_id=voipplan_id, dest_number=dest_number)
buy_rate = 0.0
buy_cost = 0.0
sell_rate = 0.0
sell_cost = 0.0
if rates:
buy_rate = float(rates[0].carrier_rate)
sell_rate = float(rates[0].retail_rate)
buy_cost = buy_rate * float(billsec) / 60
sell_cost = sell_rate * float(billsec) / 60
data = {
'buy_rate': buy_rate,
'buy_cost': round(buy_cost, 4),
'sell_rate': sell_rate,
'sell_cost': round(sell_cost, 4),
}
return data
def rate_engine(voipplan_id, dest_number):
"""
To determine the cost of the voip call and get provider/gateway
to use to deliver the call.
"""
if not voipplan_id or not dest_number:
return []
dest_prefix = prefix_list_string(str(dest_number))
if not dest_prefix:
return []
rate_tuples = rate_call_prefix(voipplan_id, dest_prefix)
rates = []
if rate_tuples:
Rate = namedtuple('Rate', ['id', 'cpid', 'cr_prefix', 'rt_prefix', 'rrid', 'carrier_rate',
'retail_rate', 'crid', 'provider_id', 'gateway_id', 'sum_metric'])
rate = Rate(*rate_tuples)
rates.append(rate)
return rates
@cached(60)
def rate_call_prefix(voipplan_id, dest_prefix):
# Build SQL query to rate calls
sql = 'SELECT rpid as id, cpid, cr_prefix, prefix as rt_prefix, rrid, \
carrier_rate, retail_rate, crid, provider_id, gateway_id, sum_metric \
FROM ( \
SELECT DISTINCT allsellrates.*, \
voip_carrier_rate.id AS crid, voip_carrier_rate.prefix AS cr_prefix, \
voip_provider.id AS provider_id, voip_provider.gateway_id AS gateway_id, \
voip_carrier_rate.carrier_rate,\
(voip_carrier_plan.metric + allsellrates.metric + voip_provider.metric) AS sum_metric, \
voip_carrier_plan.id AS cpid \
FROM ( \
SELECT DISTINCT voip_retail_plan.id AS rpid, \
voip_retail_rate.prefix, voip_retail_rate.id AS rrid, \
voip_retail_rate.retail_rate AS retail_rate, voip_retail_plan.metric AS metric \
FROM voip_retail_rate, voip_retail_plan, voipplan_voipretailplan \
WHERE voip_retail_rate.prefix IN (%s) \
AND voip_retail_plan.id = voip_retail_rate.voip_retail_plan_id \
AND voipplan_voipretailplan.voipplan_id = %s \
AND voipplan_voipretailplan.voipretailplan_id = voip_retail_plan.id \
ORDER BY prefix DESC, retail_rate ASC\
) AS allsellrates, dialcode_prefix,\
voip_carrier_rate, \
voipplan_voipcarrierplan, \
voip_carrier_plan, voip_provider \
WHERE voipplan_voipcarrierplan.voipplan_id = %s AND \
voipplan_voipcarrierplan.voipcarrierplan_id = voip_carrier_plan.id AND \
voip_carrier_rate.voip_carrier_plan_id = voip_carrier_plan.id AND \
voip_carrier_rate.prefix IN (%s) AND \
voip_carrier_plan.voip_provider_id = voip_provider.id\
ORDER BY voip_carrier_plan.id, cr_prefix DESC, allsellrates.prefix DESC \
) AS bothrates \
ORDER BY cr_prefix DESC, rt_prefix DESC, \
sum_metric ASC, carrier_rate ASC, retail_rate ASC LIMIT 1' % \
(str(dest_prefix), str(voipplan_id), str(voipplan_id), str(dest_prefix))
qset = VoIPRetailPlan.objects.raw(sql)
for i in qset:
return (i.id, i.cpid, i.cr_prefix, i.rt_prefix, i.rrid, i.carrier_rate,
i.retail_rate, i.crid, i.provider_id, i.gateway_id, i.sum_metric)
return None
| mpl-2.0 | e4f244de751c0dc94c42731cf4de6785 | 39.518182 | 104 | 0.614988 | 3.232052 | false | false | false | false |
cdr-stats/cdr-stats | cdr_stats/voip_billing/management/commands/generate_fake_rate.py | 2 | 3492 | # -*- coding: utf-8 -*-
#
# CDR-Stats License
# http://www.cdr-stats.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2015 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
from django.core.management.base import BaseCommand
from voip_billing.models import VoIPRetailRate, VoIPPlan, \
VoIPRetailPlan, VoIPCarrierPlan, VoIPCarrierRate, \
VoIPPlan_VoIPCarrierPlan
from country_dialcode.models import Prefix
from optparse import make_option
import random
random.seed()
class Command(BaseCommand):
args = ' number-rate, call-plan '
help = "Generate fake Rates\n"\
"-------------------\n"\
"python manage.py generate_fake_rate --number=100 --call-plan=1"
option_list = BaseCommand.option_list + (
make_option('--number-rate',
default=None,
dest='number-rate',
help=help),
make_option('--call-plan',
default=1,
dest='call-plan',
help=help),
)
def handle(self, *args, **options):
"""
Note that rates created this way are only for devel purposes
"""
no_of_record = 1 # default
if options.get('number-rate'):
no_of_record = int(options.get('number-rate', 1))
voip_plan_id = 1 # default
if options.get('call-plan'):
voip_plan_id = int(options.get('call-plan', 1))
try:
voip_plan = VoIPPlan.objects.get(pk=int(voip_plan_id))
except:
print "No call-plan"
return False
carrierplanid = VoIPPlan_VoIPCarrierPlan.objects.get(voipplan=voip_plan).voipcarrierplan_id
carrier_plan = VoIPCarrierPlan.objects.get(pk=carrierplanid)
retail_plan = VoIPRetailPlan.objects.get(voip_plan=voip_plan_id)
for i in range(1, int(no_of_record) + 1):
# get random prefixes from Prefix
prefix = Prefix.objects.order_by('?')[0]
# Create carrier_rate & retail_rate with random prefix
carrier_rate = '%.4f' % random.random()
# No duplication
if VoIPCarrierRate.objects.filter(prefix=prefix).count() == 0:
VoIPCarrierRate.objects.create(
voip_carrier_plan_id=carrier_plan,
prefix=prefix,
carrier_rate=float(carrier_rate)
)
print "Insert VoIPCarrierRate - prefix=%d [call-plan=%d;carrier_plan=%d;carrier_rate=%f]" % \
(prefix.prefix, voip_plan_id, carrier_plan.id, float(carrier_rate))
# retail_rate = 10% increase in carrier_rate
retail_rate = float(carrier_rate) + ((float(carrier_rate) * 10) / 100)
# No duplication
if VoIPRetailRate.objects.filter(prefix=prefix).count() == 0:
VoIPRetailRate.objects.create(
voip_retail_plan_id=retail_plan,
prefix=prefix,
retail_rate=float(retail_rate)
)
print "Insert VoIPRetailRate - prefix=%d [call-plan=%d;retail_plan=%d;retail_rate=%f]" % \
(prefix.prefix, voip_plan_id, retail_plan.id, float(retail_rate))
| mpl-2.0 | f2b64aadd9681e377ae45c4e61d86f42 | 35.757895 | 109 | 0.579324 | 3.603715 | false | false | false | false |
bitmovin/bitmovin-python | bitmovin/services/encodings/thumbnail_service.py | 1 | 1927 | from bitmovin.errors import MissingArgumentError
from bitmovin.resources.models import Thumbnail as ThumbnailResource
from bitmovin.services.rest_service import RestService
class ThumbnailService(RestService):
BASE_ENDPOINT_URL = 'encoding/encodings/{encoding_id}/streams/{stream_id}/thumbnails'
def __init__(self, http_client):
super().__init__(http_client=http_client, relative_url=self.BASE_ENDPOINT_URL, class_=ThumbnailResource)
def _get_endpoint_url(self, encoding_id, stream_id):
if not encoding_id:
raise MissingArgumentError('encoding_id must be given')
if not stream_id:
raise MissingArgumentError('stream_id must be given')
endpoint_url = self.BASE_ENDPOINT_URL\
.replace('{encoding_id}', encoding_id)\
.replace('{stream_id}', stream_id)
return endpoint_url
def create(self, object_, encoding_id, stream_id):
self.relative_url = self._get_endpoint_url(encoding_id=encoding_id, stream_id=stream_id)
return super().create(object_)
def delete(self, encoding_id, stream_id, thumbnail_id):
self.relative_url = self._get_endpoint_url(encoding_id=encoding_id, stream_id=stream_id)
return super().delete(id_=thumbnail_id)
def retrieve(self, encoding_id, stream_id, thumbnail_id):
self.relative_url = self._get_endpoint_url(encoding_id=encoding_id, stream_id=stream_id)
return super().retrieve(id_=thumbnail_id)
def list(self, encoding_id, stream_id, offset=None, limit=None):
self.relative_url = self._get_endpoint_url(encoding_id=encoding_id, stream_id=stream_id)
return super().list(offset, limit)
def retrieve_custom_data(self, encoding_id, stream_id, thumbnail_id):
self.relative_url = self._get_endpoint_url(encoding_id=encoding_id, stream_id=stream_id)
return super().retrieve_custom_data(id_=thumbnail_id)
| unlicense | 705ee61e8215a251aa1caaf00f32dc40 | 44.880952 | 112 | 0.690711 | 3.670476 | false | false | false | false |
bitmovin/bitmovin-python | tests/bitmovin/services/inputs/analyze_service_tests.py | 1 | 16652 | import json
import time
import unittest
from bitmovin import Bitmovin, HTTPSInput, CustomData, Analysis, AnalysisStatus, AnalysisDetails, AnalysisVideoStream, \
AnalysisStreamDetails, CloudRegion
from bitmovin.errors import BitmovinError
from bitmovin.utils import BitmovinJSONEncoder
from tests.bitmovin import BitmovinTestCase
@unittest.skip
class AnalyzeServiceTests(BitmovinTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
def setUp(self):
super().setUp()
self.bitmovin = Bitmovin(self.api_key)
self.assertIsNotNone(self.bitmovin)
self.assertTrue(isinstance(self.bitmovin, Bitmovin))
def tearDown(self):
super().tearDown()
def test_start_analyse_https_input(self):
(sample_input, sample_file) = self._get_https_input_multiple_audio_tracks()
created_input_response = self.bitmovin.inputs.HTTPS.create(sample_input)
self.assertIsNotNone(created_input_response)
self.assertIsNotNone(created_input_response.resource)
self.assertIsNotNone(created_input_response.resource.id)
self._compare_https_inputs(sample_input, created_input_response.resource)
analyze_request = Analysis(path=sample_file, cloud_region=CloudRegion.GOOGLE_EUROPE_WEST_1)
analyze_response = self.bitmovin.inputs.HTTPS.analyze(
created_input_response.resource.id, analysis_object=analyze_request)
self.assertIsNotNone(analyze_response)
self.assertIsNotNone(analyze_response.resource)
self.assertIsNotNone(analyze_response.resource.id)
def test_list_analyses(self):
(sample_input, sample_file) = self._get_https_input_multiple_audio_tracks()
created_input_response = self.bitmovin.inputs.HTTPS.create(sample_input)
self.assertIsNotNone(created_input_response)
self.assertIsNotNone(created_input_response.resource)
self.assertIsNotNone(created_input_response.resource.id)
self._compare_https_inputs(sample_input, created_input_response.resource)
analyze_request = Analysis(path=sample_file, cloud_region=CloudRegion.GOOGLE_EUROPE_WEST_1)
analyze_response = self.bitmovin.inputs.HTTPS.analyze(
created_input_response.resource.id, analysis_object=analyze_request)
self.assertIsNotNone(analyze_response)
self.assertIsNotNone(analyze_response.resource)
self.assertIsNotNone(analyze_response.resource.id)
analyses_list = self.bitmovin.inputs.S3.list_analyses(input_id=created_input_response.resource.id)
self.assertIsNotNone(analyses_list)
self.assertIsNotNone(analyses_list.resource)
self.assertIsInstance(analyses_list.resource, list)
self.assertIs(len(analyses_list.resource), 1)
@unittest.skip('GET customData route not available yet.') # TODO: implement get customdata route in service
def test_start_analyse_with_custom_data(self):
(sample_input, sample_file) = self._get_https_input_multiple_audio_tracks()
created_input_response = self.bitmovin.inputs.HTTPS.create(sample_input)
self.assertIsNotNone(created_input_response)
self.assertIsNotNone(created_input_response.resource)
self.assertIsNotNone(created_input_response.resource.id)
self._compare_https_inputs(sample_input, created_input_response.resource)
analyze_request = Analysis(path=sample_file, cloud_region=CloudRegion.GOOGLE_EUROPE_WEST_1, custom_data='mycustomdata')
analyze_response = self.bitmovin.inputs.HTTPS.analyze(
created_input_response.resource.id, analysis_object=analyze_request)
self.assertIsNotNone(analyze_response)
self.assertIsNotNone(analyze_response.resource)
self.assertIsNotNone(analyze_response.resource.id)
custom_data_response = self.bitmovin.inputs.S3.retrieve_analysis_custom_data(
input_id=created_input_response.resource.id, analysis_id=analyze_response.resource.id)
custom_data = custom_data_response.resource # type: CustomData
self.assertEqual(analyze_request.customData, custom_data.customData)
def test_retrieve_analysis_status_wait_until_finished(self):
(sample_input, sample_file) = self._get_https_input_multiple_audio_tracks()
created_input_response = self.bitmovin.inputs.HTTPS.create(sample_input)
self.assertIsNotNone(created_input_response)
self.assertIsNotNone(created_input_response.resource)
self.assertIsNotNone(created_input_response.resource.id)
self._compare_https_inputs(sample_input, created_input_response.resource)
analyze_request = Analysis(path=sample_file, cloud_region=CloudRegion.GOOGLE_EUROPE_WEST_1)
analyze_response = self.bitmovin.inputs.HTTPS.analyze(
created_input_response.resource.id, analysis_object=analyze_request)
self.assertIsNotNone(analyze_response)
self.assertIsNotNone(analyze_response.resource)
self.assertIsNotNone(analyze_response.resource.id)
started_at = time.time()
analysis_status = None
while analysis_status != 'FINISHED' and analysis_status != 'ERROR':
time.sleep(5)
analysis_status = self.bitmovin.inputs.HTTPS.retrieve_analysis_status(
input_id=created_input_response.resource.id,
analysis_id=analyze_response.resource.id
)
analysis_status_resource = analysis_status.resource # type: AnalysisStatus
self.logger.info('Analysis Status: {}'.format(json.dumps(analysis_status_resource,
cls=BitmovinJSONEncoder)))
analysis_status = analysis_status_resource.status
finished_at = time.time()
difference = int(finished_at - started_at)
self.logger.info('Analysis took {} seconds.'.format(difference))
if analysis_status == 'ERROR':
self.fail('Analysis FAILED!')
def test_retrieve_analysis_details_multiple_audio(self):
(sample_input, sample_file) = self._get_https_input_multiple_audio_tracks()
analysis_details = self._retrieve_analysis_details_https_input(sample_input, sample_file)
self.assertEqual(2, len(analysis_details.audioStreams))
first = None
second = None
for audio_stream in analysis_details.audioStreams:
if audio_stream.position == 0:
first = audio_stream
elif audio_stream.position == 1:
second = audio_stream
else:
raise BitmovinError('Got unexpected audio stream position: {}'.format(audio_stream.position))
self.assertEqual('swe', first.language)
self.assertEqual('nor', second.language)
def test_retrieve_analysis_details_multiple_audio_second(self):
(sample_input, sample_file) = self._get_https_input_multiple_audio_tracks_second()
analysis_details = self._retrieve_analysis_details_https_input(sample_input, sample_file)
self.assertEqual(2, len(analysis_details.audioStreams))
first = None
second = None
for audio_stream in analysis_details.audioStreams:
if audio_stream.position == 0:
first = audio_stream
elif audio_stream.position == 1:
second = audio_stream
else:
raise BitmovinError('Got unexpected audio stream position: {}'.format(audio_stream.position))
self.assertIsNotNone(first)
self.assertIsNotNone(second)
self.assertEqual('nor', first.language)
self.assertEqual('nor', second.language)
self.assertEqual('ac3', first.codec)
self.assertEqual('aac_latm', second.codec)
def test_retrieve_analysis_details_multiple_audio_third(self):
(sample_input, sample_file) = self._get_https_input_multiple_audio_tracks_second()
analysis_details = self._retrieve_analysis_details_https_input(sample_input, sample_file)
self.assertEqual(2, len(analysis_details.audioStreams))
first = None
second = None
third = None
for subtitle_stream in analysis_details.subtitleStreams:
if subtitle_stream.position == 3:
first = subtitle_stream
elif subtitle_stream.position == 4:
second = subtitle_stream
elif subtitle_stream.position == 5:
third = subtitle_stream
else:
raise BitmovinError('Got unexpected audio stream position: {}'.format(subtitle_stream.position))
self.assertIsNotNone(first)
self.assertIsNotNone(second)
self.assertIsNotNone(third)
self.assertIsNotNone(first.id)
self.assertIsNotNone(second.id)
self.assertIsNotNone(third.id)
# self.assertIsNotNone(first.codec) see issue #643
self.assertIsNotNone(second.codec)
self.assertIsNotNone(third.codec)
self.assertIsNotNone(first.language)
self.assertIsNotNone(second.language)
self.assertIsNotNone(third.language)
self.assertEqual('nor', first.language)
self.assertEqual('nor', second.language)
self.assertEqual('nor', third.language)
# self.assertEqual('???', first.codec) # see issue #643
self.assertEqual('dvbsub', second.codec)
self.assertEqual('dvbsub', third.codec)
self.assertFalse(first.hearingImpaired)
self.assertFalse(second.hearingImpaired)
self.assertTrue(third.hearingImpaired)
def _retrieve_analysis_details_https_input(self, sample_input, sample_file):
created_input_response = self.bitmovin.inputs.HTTPS.create(sample_input)
self.assertIsNotNone(created_input_response)
self.assertIsNotNone(created_input_response.resource)
self.assertIsNotNone(created_input_response.resource.id)
self._compare_https_inputs(sample_input, created_input_response.resource)
analyze_request = Analysis(path=sample_file, cloud_region=CloudRegion.GOOGLE_EUROPE_WEST_1)
analyze_response = self.bitmovin.inputs.HTTPS.analyze(
created_input_response.resource.id, analysis_object=analyze_request)
self.assertIsNotNone(analyze_response)
self.assertIsNotNone(analyze_response.resource)
self.assertIsNotNone(analyze_response.resource.id)
started_at = time.time()
analysis_status = None
while analysis_status != 'FINISHED' and analysis_status != 'ERROR':
time.sleep(5)
analysis_status = self.bitmovin.inputs.HTTPS.retrieve_analysis_status(
input_id=created_input_response.resource.id,
analysis_id=analyze_response.resource.id
)
analysis_status_resource = analysis_status.resource # type: AnalysisStatus
self.logger.info('Analysis Status: {}'.format(json.dumps(analysis_status_resource,
cls=BitmovinJSONEncoder)))
analysis_status = analysis_status_resource.status
finished_at = time.time()
difference = int(finished_at - started_at)
self.logger.info('Analysis took {} seconds.'.format(difference))
if analysis_status == 'ERROR':
self.fail('Analysis FAILED!')
analysis_details_response = self.bitmovin.inputs.HTTPS.retrieve_analysis_details(
input_id=created_input_response.resource.id,
analysis_id=analyze_response.resource.id
)
self.assertIsNotNone(analysis_details_response)
self.assertIsNotNone(analysis_details_response.resource)
analysis_details = analysis_details_response.resource # type: AnalysisDetails
self.assertIsNotNone(analysis_details.id)
return analysis_details
@unittest.skip('feature with more object not deployed yet')
def test_retrieve_analysis_stream_details_multiple_audio(self):
(sample_input, sample_file) = self._get_https_input_multiple_audio_tracks()
self._retrieve_analysis_stream_details_https_input(sample_input, sample_file)
def _retrieve_analysis_stream_details_https_input(self, sample_input, sample_file):
created_input_response = self.bitmovin.inputs.HTTPS.create(sample_input)
self.assertIsNotNone(created_input_response)
self.assertIsNotNone(created_input_response.resource)
self.assertIsNotNone(created_input_response.resource.id)
self._compare_https_inputs(sample_input, created_input_response.resource)
analyze_request = Analysis(path=sample_file, cloud_region=CloudRegion.GOOGLE_EUROPE_WEST_1)
analyze_response = self.bitmovin.inputs.HTTPS.analyze(
created_input_response.resource.id, analysis_object=analyze_request)
self.assertIsNotNone(analyze_response)
self.assertIsNotNone(analyze_response.resource)
self.assertIsNotNone(analyze_response.resource.id)
started_at = time.time()
analysis_status = None
while analysis_status != 'FINISHED' and analysis_status != 'ERROR':
time.sleep(5)
analysis_status = self.bitmovin.inputs.HTTPS.retrieve_analysis_status(
input_id=created_input_response.resource.id,
analysis_id=analyze_response.resource.id
)
analysis_status_resource = analysis_status.resource # type: AnalysisStatus
self.logger.info('Analysis Status: {}'.format(json.dumps(analysis_status_resource,
cls=BitmovinJSONEncoder)))
analysis_status = analysis_status_resource.status
finished_at = time.time()
difference = int(finished_at - started_at)
self.logger.info('Analysis took {} seconds.'.format(difference))
if analysis_status == 'ERROR':
self.fail('Analysis FAILED!')
analysis_details_response = self.bitmovin.inputs.HTTPS.retrieve_analysis_details(
input_id=created_input_response.resource.id,
analysis_id=analyze_response.resource.id
)
self.assertIsNotNone(analysis_details_response)
self.assertIsNotNone(analysis_details_response.resource)
analysis_details = analysis_details_response.resource # type: AnalysisDetails
self.assertIsNotNone(analysis_details.id)
video_stream = analysis_details.videoStreams[0] # type: AnalysisVideoStream
analysis_stream_details_response = self.bitmovin.inputs.HTTPS.retrieve_analysis_stream_details(
input_id=created_input_response.resource.id,
analysis_id=analyze_response.resource.id,
stream_id=video_stream.id
)
self.assertIsNotNone(analysis_stream_details_response)
self.assertIsNotNone(analysis_stream_details_response.resource)
analysis_stream_details = analysis_stream_details_response.resource # type: AnalysisStreamDetails
self.assertIsNotNone(analysis_stream_details.more)
def _compare_https_inputs(self, first: HTTPSInput, second: HTTPSInput):
"""
:param first: HTTPSInput
:param second: HTTPSInput
:return: bool
"""
self.assertEqual(first.host, second.host)
self.assertEqual(first.name, second.name)
self.assertEqual(first.description, second.description)
def _get_https_input_multiple_audio_tracks(self):
http_input_settings = self.settings.get('sampleObjects').get('inputs').get('http')\
.get('4fa9fec1-b75e-4e2c-a01b-6e0cb7e3cf3e')
input_file = http_input_settings.get('files').get('1c08c700-abcb-41f8-9cb7-3387503c1e50')
self.assertIsNotNone(input_file)
https_input = HTTPSInput(host=http_input_settings.get('host'), name='Sample HTTPS input multiple audio tracks')
return https_input, input_file
def _get_https_input_multiple_audio_tracks_second(self):
http_input_settings = self.settings.get('sampleObjects').get('inputs').get('http')\
.get('4fa9fec1-b75e-4e2c-a01b-6e0cb7e3cf3e')
input_file = http_input_settings.get('files').get('7fd49d67-562b-4027-8e94-59b125bb2ef7')
self.assertIsNotNone(input_file)
https_input = HTTPSInput(host=http_input_settings.get('host'),
name='Sample HTTPS input multiple audio tracks 2')
return https_input, input_file
if __name__ == '__main__':
unittest.main()
| unlicense | c93f6bc064bafcb70c539074d5d8d993 | 47.976471 | 127 | 0.677997 | 4.142289 | false | false | false | false |
bitmovin/bitmovin-python | tests/bitmovin/services/filters/interlace_filter_tests.py | 1 | 6095 | import json
import unittest
from bitmovin import Bitmovin, Response, InterlaceFilter, InterlaceMode, VerticalLowPassFilteringMode
from bitmovin.errors import BitmovinApiError
from tests.bitmovin import BitmovinTestCase
class InterlaceFilterTests(BitmovinTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
def setUp(self):
super().setUp()
self.bitmovin = Bitmovin(self.api_key)
self.assertIsNotNone(self.bitmovin)
self.assertTrue(isinstance(self.bitmovin, Bitmovin))
def tearDown(self):
super().tearDown()
def test_create_interlace_filter(self):
sample_filter = self._get_sample_interlace_filter()
filter_resource_response = self.bitmovin.filters.Interlace.create(sample_filter)
self.assertIsNotNone(filter_resource_response)
self.assertIsNotNone(filter_resource_response.resource)
self.assertIsNotNone(filter_resource_response.resource.id)
self._compare_interlace_filters(sample_filter, filter_resource_response.resource)
def test_create_interlace_filter_without_name(self):
sample_filter = self._get_sample_interlace_filter()
sample_filter.name = None
filter_resource_response = self.bitmovin.filters.Interlace.create(sample_filter)
self.assertIsNotNone(filter_resource_response)
self.assertIsNotNone(filter_resource_response.resource)
self.assertIsNotNone(filter_resource_response.resource.id)
self._compare_interlace_filters(sample_filter, filter_resource_response.resource)
def test_retrieve_interlace_filter(self):
sample_filter = self._get_sample_interlace_filter()
created_filter_response = self.bitmovin.filters.Interlace.create(sample_filter)
self.assertIsNotNone(created_filter_response)
self.assertIsNotNone(created_filter_response.resource)
self.assertIsNotNone(created_filter_response.resource.id)
self._compare_interlace_filters(sample_filter, created_filter_response.resource)
retrieved_filter_response = self.bitmovin.filters.Interlace.retrieve(created_filter_response.resource.id)
self.assertIsNotNone(retrieved_filter_response)
self.assertIsNotNone(retrieved_filter_response.resource)
self._compare_interlace_filters(created_filter_response.resource, retrieved_filter_response.resource)
def test_delete_interlace_filter(self):
sample_filter = self._get_sample_interlace_filter()
created_filter_response = self.bitmovin.filters.Interlace.create(sample_filter)
self.assertIsNotNone(created_filter_response)
self.assertIsNotNone(created_filter_response.resource)
self.assertIsNotNone(created_filter_response.resource.id)
self._compare_interlace_filters(sample_filter, created_filter_response.resource)
deleted_minimal_resource = self.bitmovin.filters.Interlace.delete(created_filter_response.resource.id)
self.assertIsNotNone(deleted_minimal_resource)
self.assertIsNotNone(deleted_minimal_resource.resource)
self.assertIsNotNone(deleted_minimal_resource.resource.id)
try:
self.bitmovin.filters.Interlace.retrieve(created_filter_response.resource.id)
self.fail(
'Previous statement should have thrown an exception. ' +
'Retrieving filter after deleting it shouldn\'t be possible.'
)
except BitmovinApiError:
pass
def test_list_interlace_filters(self):
sample_filter = self._get_sample_interlace_filter()
created_filter_response = self.bitmovin.filters.Interlace.create(sample_filter)
self.assertIsNotNone(created_filter_response)
self.assertIsNotNone(created_filter_response.resource)
self.assertIsNotNone(created_filter_response.resource.id)
self._compare_interlace_filters(sample_filter, created_filter_response.resource)
filters = self.bitmovin.filters.Interlace.list()
self.assertIsNotNone(filters)
self.assertIsNotNone(filters.resource)
self.assertIsNotNone(filters.response)
self.assertIsInstance(filters.resource, list)
self.assertIsInstance(filters.response, Response)
self.assertGreater(filters.resource.__sizeof__(), 1)
def test_retrieve_interlace_filter_custom_data(self):
sample_filter = self._get_sample_interlace_filter()
sample_filter.customData = '<pre>my custom data</pre>'
created_filter_response = self.bitmovin.filters.Interlace.create(sample_filter)
self.assertIsNotNone(created_filter_response)
self.assertIsNotNone(created_filter_response.resource)
self.assertIsNotNone(created_filter_response.resource.id)
self._compare_interlace_filters(sample_filter, created_filter_response.resource)
custom_data_response = self.bitmovin.filters.Interlace.retrieve_custom_data(created_filter_response.resource.id)
custom_data = custom_data_response.resource
self.assertEqual(sample_filter.customData, json.loads(custom_data.customData))
def _compare_interlace_filters(self, first: InterlaceFilter, second: InterlaceFilter):
"""
:param first: InterlaceFilter
:param second: InterlaceFilter
:return: bool
"""
self.assertEqual(first.mode, second.mode)
self.assertEqual(first.verticalLowPassFilteringMode, second.verticalLowPassFilteringMode)
self.assertEqual(first.name, second.name)
self.assertEqual(first.description, second.description)
return True
def _get_sample_interlace_filter(self):
interlace_filter = InterlaceFilter(name='Sample Interlace Filter py',
mode=InterlaceMode.INTERLACE_X2,
vertical_low_pass_filtering_mode=VerticalLowPassFilteringMode.COMPLEX)
return interlace_filter
if __name__ == '__main__':
unittest.main()
| unlicense | 4d6b5d2e9a6a3ac931d3078c8c846268 | 45.884615 | 120 | 0.7137 | 4.194769 | false | true | false | false |
bitmovin/bitmovin-python | tests/bitmovin/services/filters/watermark_filter_tests.py | 1 | 6388 | import json
import unittest
from bitmovin import Bitmovin, Response, WatermarkFilter, WatermarkUnit
from bitmovin.errors import BitmovinApiError
from tests.bitmovin import BitmovinTestCase
class WatermarkFilterTests(BitmovinTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
def setUp(self):
super().setUp()
self.bitmovin = Bitmovin(self.api_key)
self.assertIsNotNone(self.bitmovin)
self.assertTrue(isinstance(self.bitmovin, Bitmovin))
def tearDown(self):
super().tearDown()
def test_create_watermark_filter(self):
sample_filter = self._get_sample_watermark_filter()
filter_resource_response = self.bitmovin.filters.Watermark.create(sample_filter)
self.assertIsNotNone(filter_resource_response)
self.assertIsNotNone(filter_resource_response.resource)
self.assertIsNotNone(filter_resource_response.resource.id)
self._compare_watermark_filters(sample_filter, filter_resource_response.resource)
def test_create_watermark_filter_without_name(self):
sample_filter = self._get_sample_watermark_filter()
sample_filter.name = None
filter_resource_response = self.bitmovin.filters.Watermark.create(sample_filter)
self.assertIsNotNone(filter_resource_response)
self.assertIsNotNone(filter_resource_response.resource)
self.assertIsNotNone(filter_resource_response.resource.id)
self._compare_watermark_filters(sample_filter, filter_resource_response.resource)
def test_retrieve_watermark_filter(self):
sample_filter = self._get_sample_watermark_filter()
created_filter_response = self.bitmovin.filters.Watermark.create(sample_filter)
self.assertIsNotNone(created_filter_response)
self.assertIsNotNone(created_filter_response.resource)
self.assertIsNotNone(created_filter_response.resource.id)
self._compare_watermark_filters(sample_filter, created_filter_response.resource)
retrieved_filter_response = self.bitmovin.filters.Watermark.retrieve(created_filter_response.resource.id)
self.assertIsNotNone(retrieved_filter_response)
self.assertIsNotNone(retrieved_filter_response.resource)
self._compare_watermark_filters(created_filter_response.resource, retrieved_filter_response.resource)
def test_delete_watermark_filter(self):
sample_filter = self._get_sample_watermark_filter()
created_filter_response = self.bitmovin.filters.Watermark.create(sample_filter)
self.assertIsNotNone(created_filter_response)
self.assertIsNotNone(created_filter_response.resource)
self.assertIsNotNone(created_filter_response.resource.id)
self._compare_watermark_filters(sample_filter, created_filter_response.resource)
deleted_minimal_resource = self.bitmovin.filters.Watermark.delete(created_filter_response.resource.id)
self.assertIsNotNone(deleted_minimal_resource)
self.assertIsNotNone(deleted_minimal_resource.resource)
self.assertIsNotNone(deleted_minimal_resource.resource.id)
try:
self.bitmovin.filters.Watermark.retrieve(created_filter_response.resource.id)
self.fail(
'Previous statement should have thrown an exception. ' +
'Retrieving filter after deleting it shouldn\'t be possible.'
)
except BitmovinApiError:
pass
def test_list_watermark_filters(self):
sample_filter = self._get_sample_watermark_filter()
created_filter_response = self.bitmovin.filters.Watermark.create(sample_filter)
self.assertIsNotNone(created_filter_response)
self.assertIsNotNone(created_filter_response.resource)
self.assertIsNotNone(created_filter_response.resource.id)
self._compare_watermark_filters(sample_filter, created_filter_response.resource)
filters = self.bitmovin.filters.Watermark.list()
self.assertIsNotNone(filters)
self.assertIsNotNone(filters.resource)
self.assertIsNotNone(filters.response)
self.assertIsInstance(filters.resource, list)
self.assertIsInstance(filters.response, Response)
self.assertGreater(filters.resource.__sizeof__(), 1)
def test_retrieve_watermark_filter_custom_data(self):
sample_filter = self._get_sample_watermark_filter()
sample_filter.customData = '<pre>my custom data</pre>'
created_filter_response = self.bitmovin.filters.Watermark.create(sample_filter)
self.assertIsNotNone(created_filter_response)
self.assertIsNotNone(created_filter_response.resource)
self.assertIsNotNone(created_filter_response.resource.id)
self._compare_watermark_filters(sample_filter, created_filter_response.resource)
custom_data_response = self.bitmovin.filters.Watermark.retrieve_custom_data(created_filter_response.resource.id)
custom_data = custom_data_response.resource
self.assertEqual(sample_filter.customData, json.loads(custom_data.customData))
def _compare_watermark_filters(self, first: WatermarkFilter, second: WatermarkFilter):
"""
:param first: WatermarkFilter
:param second: WatermarkFilter
:return: bool
"""
self.assertEqual(first.image, second.image)
self.assertEqual(first.top, second.top)
self.assertEqual(first.bottom, second.bottom)
self.assertEqual(first.left, second.left)
self.assertEqual(first.right, second.right)
self.assertEqual(first.name, second.name)
self.assertEqual(first.description, second.description)
self.assertEqual(first.unit, second.unit)
return True
def _get_sample_watermark_filter(self):
watermark_filter = WatermarkFilter(image='http://www.bitmovin.com/favicon.ico', right=10, top=10,
name='Sample Watermark Filter bitmovin icon', unit=WatermarkUnit.PERCENTS)
self.assertIsNotNone(watermark_filter.image)
self.assertIsNotNone(watermark_filter.right)
self.assertIsNotNone(watermark_filter.top)
self.assertIsNotNone(watermark_filter.unit)
return watermark_filter
if __name__ == '__main__':
unittest.main()
| unlicense | f8e092bb7c1c4b1c41621f08169fa2b0 | 45.627737 | 120 | 0.713525 | 4.126615 | false | true | false | false |
bitmovin/bitmovin-python | examples/encoding/merge_audio_streams.py | 1 | 12219 | import datetime
from bitmovin import Bitmovin, Encoding, HTTPSInput, S3Output, H264CodecConfiguration, \
AACCodecConfiguration, H264Profile, StreamInput, SelectionMode, Stream, EncodingOutput, ACLEntry, ACLPermission, \
FMP4Muxing, MuxingStream, CloudRegion, DashManifest, FMP4Representation, FMP4RepresentationType, Period, \
VideoAdaptationSet, AudioAdaptationSet
from bitmovin.errors import BitmovinError
API_KEY = '<INSERT_YOUR_API_KEY>'
# https://<INSERT_YOUR_HTTP_HOST>/<INSERT_YOUR_HTTP_PATH>
HTTPS_INPUT_HOST = '<INSERT_YOUR_HTTPS_HOST>'
HTTPS_INPUT_PATH = '<INSERT_YOUR_HTTPS_PATH>'
S3_OUTPUT_ACCESSKEY = '<INSERT_YOUR_ACCESS_KEY>'
S3_OUTPUT_SECRETKEY = '<INSERT_YOUR_SECRET_KEY>'
S3_OUTPUT_BUCKETNAME = '<INSERT_YOUR_BUCKET_NAME>'
date_component = str(datetime.datetime.now()).replace(' ', '_').replace(':', '-').split('.')[0].replace('_', '__')
OUTPUT_BASE_PATH = '/your/output/base/path/{}/'.format(date_component)
def main():
bitmovin = Bitmovin(api_key=API_KEY)
https_input = HTTPSInput(name='create_simple_encoding HTTPS input', host=HTTPS_INPUT_HOST)
https_input = bitmovin.inputs.HTTPS.create(https_input).resource
s3_output = S3Output(access_key=S3_OUTPUT_ACCESSKEY,
secret_key=S3_OUTPUT_SECRETKEY,
bucket_name=S3_OUTPUT_BUCKETNAME,
name='Sample S3 Output')
s3_output = bitmovin.outputs.S3.create(s3_output).resource
encoding = Encoding(name='example encoding',
cloud_region=CloudRegion.GOOGLE_EUROPE_WEST_1)
encoding = bitmovin.encodings.Encoding.create(encoding).resource
video_codec_configuration_1080p = H264CodecConfiguration(name='example_video_codec_configuration_1080p',
bitrate=4800000,
rate=25.0,
width=1920,
height=1080,
profile=H264Profile.HIGH)
video_codec_configuration_1080p = bitmovin.codecConfigurations.H264.create(video_codec_configuration_1080p).resource
video_codec_configuration_720p = H264CodecConfiguration(name='example_video_codec_configuration_720p',
bitrate=2400000,
rate=25.0,
width=1280,
height=720,
profile=H264Profile.HIGH)
video_codec_configuration_720p = bitmovin.codecConfigurations.H264.create(video_codec_configuration_720p).resource
audio_codec_configuration = AACCodecConfiguration(name='example_audio_codec_configuration_english',
bitrate=128000,
rate=48000)
audio_codec_configuration = bitmovin.codecConfigurations.AAC.create(audio_codec_configuration).resource
video_input_stream = StreamInput(input_id=https_input.id,
input_path=HTTPS_INPUT_PATH,
selection_mode=SelectionMode.AUTO)
audio_input_stream_1 = StreamInput(input_id=https_input.id,
input_path=HTTPS_INPUT_PATH,
selection_mode=SelectionMode.AUDIO_RELATIVE,
position=0)
audio_input_stream_2 = StreamInput(input_id=https_input.id,
input_path=HTTPS_INPUT_PATH,
selection_mode=SelectionMode.AUDIO_RELATIVE,
position=1)
video_stream_1080p = Stream(codec_configuration_id=video_codec_configuration_1080p.id,
input_streams=[video_input_stream], name='Sample Stream 1080p')
video_stream_1080p = bitmovin.encodings.Stream.create(object_=video_stream_1080p,
encoding_id=encoding.id).resource
video_stream_720p = Stream(codec_configuration_id=video_codec_configuration_720p.id,
input_streams=[video_input_stream], name='Sample Stream 720p')
video_stream_720p = bitmovin.encodings.Stream.create(object_=video_stream_720p,
encoding_id=encoding.id).resource
audio_stream = Stream(codec_configuration_id=audio_codec_configuration.id,
input_streams=[audio_input_stream_1, audio_input_stream_2],
name='Sample Stream AUDIO Merged')
audio_stream = bitmovin.encodings.Stream.create(object_=audio_stream,
encoding_id=encoding.id).resource
acl_entry = ACLEntry(permission=ACLPermission.PUBLIC_READ)
video_muxing_stream_1080p = MuxingStream(video_stream_1080p.id)
video_muxing_stream_720p = MuxingStream(video_stream_720p.id)
audio_muxing_stream = MuxingStream(audio_stream.id)
video_muxing_1080p_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH + 'video/1080p/',
acl=[acl_entry])
video_muxing_1080p = FMP4Muxing(segment_length=4,
segment_naming='seg_%number%.m4s',
init_segment_name='init.mp4',
streams=[video_muxing_stream_1080p],
outputs=[video_muxing_1080p_output],
name='Sample Muxing 1080p')
video_muxing_1080p = bitmovin.encodings.Muxing.FMP4.create(object_=video_muxing_1080p,
encoding_id=encoding.id).resource
video_muxing_720p_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH + 'video/720p/',
acl=[acl_entry])
video_muxing_720p = FMP4Muxing(segment_length=4,
segment_naming='seg_%number%.m4s',
init_segment_name='init.mp4',
streams=[video_muxing_stream_720p],
outputs=[video_muxing_720p_output],
name='Sample Muxing 720p')
video_muxing_720p = bitmovin.encodings.Muxing.FMP4.create(object_=video_muxing_720p,
encoding_id=encoding.id).resource
audio_muxing_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH + 'audio/',
acl=[acl_entry])
audio_muxing = FMP4Muxing(segment_length=4,
segment_naming='seg_%number%.m4s',
init_segment_name='init.mp4',
streams=[audio_muxing_stream],
outputs=[audio_muxing_output],
name='Sample Muxing AUDIO')
audio_muxing = bitmovin.encodings.Muxing.FMP4.create(object_=audio_muxing,
encoding_id=encoding.id).resource
bitmovin.encodings.Encoding.start(encoding_id=encoding.id)
try:
bitmovin.encodings.Encoding.wait_until_finished(encoding_id=encoding.id)
except BitmovinError as bitmovin_error:
print("Exception occurred while waiting for encoding to finish: {}".format(bitmovin_error))
manifest_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH,
acl=[acl_entry])
dash_manifest = DashManifest(manifest_name='example_manifest_sintel_dash.mpd',
outputs=[manifest_output],
name='Sample DASH Manifest')
dash_manifest = bitmovin.manifests.DASH.create(dash_manifest).resource
period = Period()
period = bitmovin.manifests.DASH.add_period(object_=period, manifest_id=dash_manifest.id).resource
video_adaptation_set = VideoAdaptationSet()
video_adaptation_set = bitmovin.manifests.DASH.add_video_adaptation_set(object_=video_adaptation_set,
manifest_id=dash_manifest.id,
period_id=period.id).resource
audio_adaptation_set = AudioAdaptationSet(lang='en')
audio_adaptation_set = bitmovin.manifests.DASH.add_audio_adaptation_set(object_=audio_adaptation_set,
manifest_id=dash_manifest.id,
period_id=period.id).resource
fmp4_representation_1080p = FMP4Representation(type=FMP4RepresentationType.TEMPLATE,
encoding_id=encoding.id,
muxing_id=video_muxing_1080p.id,
segment_path='video/1080p/')
fmp4_representation_1080p = bitmovin.manifests.DASH.add_fmp4_representation(object_=fmp4_representation_1080p,
manifest_id=dash_manifest.id,
period_id=period.id,
adaptationset_id=video_adaptation_set.id
).resource
fmp4_representation_720p = FMP4Representation(type=FMP4RepresentationType.TEMPLATE,
encoding_id=encoding.id,
muxing_id=video_muxing_720p.id,
segment_path='video/720p/')
fmp4_representation_720p = bitmovin.manifests.DASH.add_fmp4_representation(object_=fmp4_representation_720p,
manifest_id=dash_manifest.id,
period_id=period.id,
adaptationset_id=video_adaptation_set.id
).resource
fmp4_representation_audio = FMP4Representation(type=FMP4RepresentationType.TEMPLATE,
encoding_id=encoding.id,
muxing_id=audio_muxing.id,
segment_path='audio/')
fmp4_representation_audio = bitmovin.manifests.DASH.add_fmp4_representation(object_=fmp4_representation_audio,
manifest_id=dash_manifest.id,
period_id=period.id,
adaptationset_id=audio_adaptation_set.id
).resource
bitmovin.manifests.DASH.start(manifest_id=dash_manifest.id)
try:
bitmovin.manifests.DASH.wait_until_finished(manifest_id=dash_manifest.id)
except BitmovinError as bitmovin_error:
print("Exception occurred while waiting for manifest creation to finish: {}".format(bitmovin_error))
if __name__ == '__main__':
main()
| unlicense | 5710bd47d0a29671a68bb24c555c2237 | 62.640625 | 120 | 0.503969 | 4.686997 | false | true | false | false |
bitmovin/bitmovin-python | tests/bitmovin/services/inputs/ftp_input_service_tests.py | 1 | 6688 | import unittest
import json
from bitmovin import Bitmovin, Response, FTPInput
from bitmovin.errors import BitmovinApiError
from tests.bitmovin import BitmovinTestCase
class FTPInputTests(BitmovinTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
def setUp(self):
super().setUp()
self.bitmovin = Bitmovin(self.api_key)
self.assertIsNotNone(self.bitmovin)
self.assertTrue(isinstance(self.bitmovin, Bitmovin))
def tearDown(self):
super().tearDown()
def test_create_ftp_input(self):
(sample_input, sample_files) = self._get_sample_ftp_input()
input_resource_response = self.bitmovin.inputs.FTP.create(sample_input)
self.assertIsNotNone(input_resource_response)
self.assertIsNotNone(input_resource_response.resource)
self.assertIsNotNone(input_resource_response.resource.id)
self._compare_ftp_inputs(sample_input, input_resource_response.resource)
def test_create_ftp_input_without_name(self):
(sample_input, sample_files) = self._get_sample_ftp_input()
sample_input.name = None
input_resource_response = self.bitmovin.inputs.FTP.create(sample_input)
self.assertIsNotNone(input_resource_response)
self.assertIsNotNone(input_resource_response.resource)
self.assertIsNotNone(input_resource_response.resource.id)
self._compare_ftp_inputs(sample_input, input_resource_response.resource)
def test_create_ftp_input_custom(self):
(sample_input, sample_files) = self._get_sample_ftp_input()
sample_input.port = 9921
sample_input.passive = False
input_resource_response = self.bitmovin.inputs.FTP.create(sample_input)
self.assertIsNotNone(input_resource_response)
self.assertIsNotNone(input_resource_response.resource)
self.assertIsNotNone(input_resource_response.resource.id)
self._compare_ftp_inputs(sample_input, input_resource_response.resource)
self.assertEqual(sample_input.port, input_resource_response.resource.port)
self.assertNotEqual(True, input_resource_response.resource.passive)
def test_retrieve_ftp_input(self):
(sample_input, sample_files) = self._get_sample_ftp_input()
created_input_response = self.bitmovin.inputs.FTP.create(sample_input)
self.assertIsNotNone(created_input_response)
self.assertIsNotNone(created_input_response.resource)
self.assertIsNotNone(created_input_response.resource.id)
self._compare_ftp_inputs(sample_input, created_input_response.resource)
retrieved_input_response = self.bitmovin.inputs.FTP.retrieve(created_input_response.resource.id)
self.assertIsNotNone(retrieved_input_response)
self.assertIsNotNone(retrieved_input_response.resource)
self._compare_ftp_inputs(created_input_response.resource, retrieved_input_response.resource)
def test_delete_ftp_input(self):
(sample_input, sample_files) = self._get_sample_ftp_input()
created_input_response = self.bitmovin.inputs.FTP.create(sample_input)
self.assertIsNotNone(created_input_response)
self.assertIsNotNone(created_input_response.resource)
self.assertIsNotNone(created_input_response.resource.id)
self._compare_ftp_inputs(sample_input, created_input_response.resource)
deleted_minimal_resource = self.bitmovin.inputs.FTP.delete(created_input_response.resource.id)
self.assertIsNotNone(deleted_minimal_resource)
self.assertIsNotNone(deleted_minimal_resource.resource)
self.assertIsNotNone(deleted_minimal_resource.resource.id)
try:
self.bitmovin.inputs.FTP.retrieve(created_input_response.resource.id)
self.fail(
'Previous statement should have thrown an exception. ' +
'Retrieving input after deleting it shouldn\'t be possible.'
)
except BitmovinApiError:
pass
def test_list_ftp_inputs(self):
(sample_input, sample_files) = self._get_sample_ftp_input()
created_input_response = self.bitmovin.inputs.FTP.create(sample_input)
self.assertIsNotNone(created_input_response)
self.assertIsNotNone(created_input_response.resource)
self.assertIsNotNone(created_input_response.resource.id)
self._compare_ftp_inputs(sample_input, created_input_response.resource)
inputs = self.bitmovin.inputs.FTP.list()
self.assertIsNotNone(inputs)
self.assertIsNotNone(inputs.resource)
self.assertIsNotNone(inputs.response)
self.assertIsInstance(inputs.resource, list)
self.assertIsInstance(inputs.response, Response)
self.assertGreater(inputs.resource.__sizeof__(), 1)
def test_retrieve_ftp_input_custom_data(self):
(sample_input, sample_files) = self._get_sample_ftp_input()
sample_input.customData = '<pre>my custom data</pre>'
created_input_response = self.bitmovin.inputs.FTP.create(sample_input)
self.assertIsNotNone(created_input_response)
self.assertIsNotNone(created_input_response.resource)
self.assertIsNotNone(created_input_response.resource.id)
self._compare_ftp_inputs(sample_input, created_input_response.resource)
custom_data_response = self.bitmovin.inputs.FTP.retrieve_custom_data(created_input_response.resource.id)
custom_data = custom_data_response.resource
self.assertEqual(sample_input.customData, json.loads(custom_data.customData))
def _compare_ftp_inputs(self, first: FTPInput, second: FTPInput):
"""
:param first: FTPInput
:param second: FTPInput
:return: bool
"""
self.assertEqual(first.host, second.host)
self.assertEqual(first.name, second.name)
self.assertEqual(first.description, second.description)
def _get_sample_ftp_input(self):
ftp_input_settings = self.settings.get('sampleObjects').get('inputs').get('ftp')\
.get('13bcc79f-f554-482c-bd12-041391df63f8')
files = ftp_input_settings.get('files')
ftp_input = FTPInput(
host=ftp_input_settings.get('host'),
username=ftp_input_settings.get('username'),
password=ftp_input_settings.get('password'),
name='Sample FTP input'
)
self.assertIsNotNone(ftp_input.host)
self.assertIsNotNone(ftp_input.username)
self.assertIsNotNone(ftp_input.password)
return ftp_input, files
if __name__ == '__main__':
unittest.main()
| unlicense | eaef7e0a3db95ee08adf8d01a7e3b392 | 44.189189 | 112 | 0.694976 | 3.966785 | false | true | false | false |
bitmovin/bitmovin-python | bitmovin/services/parsing_utils.py | 1 | 2308 | from bitmovin.bitmovin_object import BitmovinObject
from bitmovin.errors import MissingArgumentError, InvalidTypeError, BitmovinApiError
from bitmovin.resources import ResponseSuccessData
from bitmovin.resources.models import MinimalModel, CustomData
class ParsingUtils(BitmovinObject):
def __init__(self):
super().__init__()
@classmethod
def check_arg_valid_uuid(cls, argument):
if not argument:
raise MissingArgumentError('argument must be an UUID')
if not isinstance(argument, str):
raise InvalidTypeError('argument must be an UUID')
@classmethod
def check_not_blank(cls, argument):
if not argument:
raise MissingArgumentError('argument must not be blank')
if not isinstance(argument, str):
raise InvalidTypeError('argument must be an str')
if argument == '':
raise InvalidTypeError('argument must not be blank')
@classmethod
def check_not_none(cls, argument):
if not argument:
raise MissingArgumentError('argument must not be blank')
if argument is None:
raise MissingArgumentError('argument must not be blank')
def parse_bitmovin_resource_from_response(self, response, class_):
response_data = response.data # type: ResponseSuccessData
result = response_data.result
resource = class_.parse_from_json_object(json_object=result)
return resource
def parse_bitmovin_minimal_model_from_response(self, response):
response_data = response.data # type: ResponseSuccessData
result = response_data.result
resource = MinimalModel.parse_from_json_object(json_object=result)
return resource
def parse_bitmovin_resource_list_from_response(self, response, class_):
response_data = response.data # type: ResponseSuccessData
resource_list = response_data.result.get('items')
if not isinstance(resource_list, list):
raise BitmovinApiError('Got invalid response from server: \'items\' has to be a list')
resources = []
for resource in resource_list:
parsed_resource = class_.parse_from_json_object(json_object=resource)
resources.append(parsed_resource)
return resources
| unlicense | 3e3045175c89c31b499bfd227716e080 | 38.118644 | 98 | 0.679809 | 4.472868 | false | false | false | false |
bitmovin/bitmovin-python | bitmovin/resources/models/outputs/sftp_output.py | 1 | 2535 | from bitmovin.errors import InvalidTypeError
from bitmovin.resources.enums import FTPTransferVersion
from bitmovin.utils import Serializable
from . import AbstractOutput
class SFTPOutput(AbstractOutput, Serializable):
def __init__(self, host, username, password, port=None, id_=None, custom_data=None, name=None, description=None,
transfer_version=None, max_concurrent_connections=None):
super().__init__(id_=id_, custom_data=custom_data, name=name, description=description)
self.host = host
self.port = port
self.username = username
self.password = password
self._transferVersion = None
self.transferVersion = transfer_version
self.maxConcurrentConnections = max_concurrent_connections
@property
def transferVersion(self):
if self._transferVersion is not None:
return self._transferVersion
else:
return FTPTransferVersion.default().value
@transferVersion.setter
def transferVersion(self, new_transfer_version):
if new_transfer_version is None:
return
if isinstance(new_transfer_version, str):
self._transferVersion = new_transfer_version
elif isinstance(new_transfer_version, FTPTransferVersion):
self._transferVersion = new_transfer_version.value
else:
raise InvalidTypeError(
'Invalid type {} for transferVersion: must be either str or FTPTransferVersion!'.format(
type(new_transfer_version)
)
)
@classmethod
def parse_from_json_object(cls, json_object):
id_ = json_object['id']
host = json_object['host']
username = json_object.get('username')
password = json_object.get('password')
port = json_object.get('port')
name = json_object.get('name')
transfer_version = json_object.get('transferVersion')
max_concurrent_connections = json_object.get('maxConcurrentConnections')
description = json_object.get('description')
sftp_output = SFTPOutput(
host=host, port=port, username=username, password=password, id_=id_, name=name, description=description,
transfer_version=transfer_version, max_concurrent_connections=max_concurrent_connections
)
return sftp_output
def serialize(self):
serialized = super().serialize()
serialized['transferVersion'] = self.transferVersion
return serialized
| unlicense | e2568b1d3df5b2c486e82abe31f7d4f7 | 39.887097 | 116 | 0.656805 | 4.494681 | false | false | false | false |
bitmovin/bitmovin-python | bitmovin/resources/models/filters/unsharp_filter.py | 1 | 2355 | from bitmovin.utils import Serializable
from . import AbstractFilter
class UnsharpFilter(AbstractFilter, Serializable):
def __init__(self,
name=None,
luma_matrix_horizontal_size=None,
luma_matrix_vertical_size=None,
luma_effect_strength=None,
chroma_matrix_horizontal_size=None,
chroma_matrix_vertical_size=None,
chroma_effect_strength=None,
id_=None,
custom_data=None,
description=None):
super().__init__(id_=id_, custom_data=custom_data, name=name, description=description)
self.lumaMatrixHorizontalSize = luma_matrix_horizontal_size
self.lumaMatrixVerticalSize = luma_matrix_vertical_size
self.lumaEffectStrength = luma_effect_strength
self.chromaMatrixHorizontalSize = chroma_matrix_horizontal_size
self.chromaMatrixVerticalSize = chroma_matrix_vertical_size
self.chromaEffectStrength = chroma_effect_strength
def serialize(self):
serialized = super().serialize()
return serialized
@classmethod
def parse_from_json_object(cls, json_object):
id_ = json_object['id']
name = json_object.get('name')
description = json_object.get('description')
luma_matrix_horizontal_size = json_object.get('lumaMatrixHorizontalSize')
luma_matrix_vertical_size = json_object.get('lumaMatrixVerticalSize')
luma_effect_strength = json_object.get('lumaEffectStrength')
chroma_matrix_horizontal_size = json_object.get('chromaMatrixHorizontalSize')
chroma_matrix_vertical_size = json_object.get('chromaMatrixVerticalSize')
chroma_effect_strength = json_object.get('chromaEffectStrength')
unsharp_filter = UnsharpFilter(
name=name,
id_=id_,
description=description,
luma_matrix_horizontal_size=luma_matrix_horizontal_size,
luma_matrix_vertical_size=luma_matrix_vertical_size,
luma_effect_strength=luma_effect_strength,
chroma_matrix_horizontal_size=chroma_matrix_horizontal_size,
chroma_matrix_vertical_size=chroma_matrix_vertical_size,
chroma_effect_strength=chroma_effect_strength
)
return unsharp_filter
| unlicense | 838b70e2a34e52e6d05b3439e6358ae3 | 41.818182 | 94 | 0.651805 | 3.873355 | false | false | false | false |
bitmovin/bitmovin-python | bitmovin/resources/models/encodings/captions/burn_in_srt_subtitle.py | 1 | 2689 | from bitmovin.errors import InvalidTypeError
from bitmovin.resources import AbstractNameDescriptionResource, CaptionCharacterEncoding
from bitmovin.resources.models import AbstractModel
from bitmovin.utils import Serializable
from bitmovin.resources.models.encodings.encoding_input import EncodingInput
class BurnInSrtSubtitle(AbstractNameDescriptionResource, AbstractModel, Serializable):
def __init__(self, input=None, id_=None, custom_data=None,
name=None, description=None, character_encoding=None):
super().__init__(id_=id_, custom_data=custom_data, name=name, description=description)
self._input = None
self._character_encoding = None
self.input = input
self.characterEncoding = character_encoding
@property
def input(self):
return self._input
@input.setter
def input(self, new_input):
if new_input is None:
self._input = None
return
if isinstance(new_input, EncodingInput):
self._input = new_input
else:
new_input_parsed = EncodingInput.parse_from_json_object(new_input)
self._input = new_input_parsed
@property
def characterEncoding(self):
return self._character_encoding
@characterEncoding.setter
def characterEncoding(self, new_character_encoding):
if new_character_encoding is None:
self._character_encoding = None
elif isinstance(new_character_encoding, CaptionCharacterEncoding):
self._character_encoding = new_character_encoding.value
elif isinstance(new_character_encoding, str):
self._character_encoding = new_character_encoding
else:
raise InvalidTypeError('characterEncoding has to be of type CaptionCharacterEncoding or str')
@classmethod
def parse_from_json_object(cls, json_object):
id_ = json_object.get('id')
custom_data = json_object.get('customData')
name = json_object.get('name')
description = json_object.get('description')
input = json_object.get('input')
character_encoding = json_object.get('characterEncoding')
burn_in_srt_subtitle = BurnInSrtSubtitle(input=input, name=name, description=description,
custom_data=custom_data, id_=id_,
character_encoding=character_encoding)
return burn_in_srt_subtitle
def serialize(self):
serialized = super().serialize()
serialized['input'] = self.input
serialized['characterEncoding'] = self.characterEncoding
return serialized
| unlicense | 9d4d93399fee1b9029c5bd76d36c1c24 | 37.971014 | 105 | 0.657493 | 4.519328 | false | false | false | false |
openaddresses/machine | openaddr/cache.py | 1 | 15360 | from __future__ import absolute_import, division, print_function
import logging; _L = logging.getLogger('openaddr.cache')
import os
import errno
import math
import mimetypes
import shutil
import re
import csv
import simplejson as json
from os import mkdir
from hashlib import md5
from os.path import join, basename, exists, abspath, splitext
from urllib.parse import urlparse
from subprocess import check_output
from tempfile import mkstemp
from hashlib import sha1
from shutil import move
from shapely.geometry import shape
from esridump import EsriDumper
from esridump.errors import EsriDownloadError
import requests
# HTTP timeout in seconds, used in various calls to requests.get() and requests.post()
_http_timeout = 180
from .conform import X_FIELDNAME, Y_FIELDNAME, GEOM_FIELDNAME, attrib_types
from . import util
def mkdirsp(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def traverse(item):
"Iterates over nested iterables"
if isinstance(item, list):
for i in item:
for j in traverse(i):
yield j
else:
yield item
def request(method, url, **kwargs):
if urlparse(url).scheme == 'ftp':
if method != 'GET':
raise NotImplementedError("Don't know how to {} with {}".format(method, url))
return util.request_ftp_file(url)
try:
_L.debug("Requesting %s with args %s", url, kwargs.get('params') or kwargs.get('data'))
return requests.request(method, url, timeout=_http_timeout, **kwargs)
except requests.exceptions.SSLError as e:
_L.warning("Retrying %s without SSL verification", url)
return requests.request(method, url, timeout=_http_timeout, verify=False, **kwargs)
class CacheResult:
cache = None
fingerprint = None
version = None
elapsed = None
def __init__(self, cache, fingerprint, version, elapsed):
self.cache = cache
self.fingerprint = fingerprint
self.version = version
self.elapsed = elapsed
@staticmethod
def empty():
return CacheResult(None, None, None, None)
def todict(self):
return dict(cache=self.cache, fingerprint=self.fingerprint, version=self.version)
def compare_cache_details(filepath, resultdir, data):
''' Compare cache file with known source data, return cache and fingerprint.
Checks if fresh data is already cached, returns a new file path if not.
'''
if not exists(filepath):
raise Exception('cached file {} is missing'.format(filepath))
fingerprint = md5()
with open(filepath, 'rb') as file:
for line in file:
fingerprint.update(line)
# Determine if anything needs to be done at all.
if urlparse(data.get('cache', '')).scheme == 'http' and 'fingerprint' in data:
if fingerprint.hexdigest() == data['fingerprint']:
return data['cache'], data['fingerprint']
cache_name = basename(filepath)
if not exists(resultdir):
mkdir(resultdir)
move(filepath, join(resultdir, cache_name))
data_cache = 'file://' + join(abspath(resultdir), cache_name)
return data_cache, fingerprint.hexdigest()
class DownloadError(Exception):
pass
class DownloadTask(object):
def __init__(self, source_prefix, params={}, headers={}):
'''
params: Additional query parameters, used by EsriRestDownloadTask.
headers: Additional HTTP headers.
'''
self.source_prefix = source_prefix
self.headers = {
'User-Agent': 'openaddresses-extract/1.0 (https://github.com/openaddresses/openaddresses)',
}
self.headers.update(dict(**headers))
self.query_params = dict(**params)
@classmethod
def from_protocol_string(clz, protocol_string, source_prefix=None):
if protocol_string.lower() == 'http':
return URLDownloadTask(source_prefix)
elif protocol_string.lower() == 'ftp':
return URLDownloadTask(source_prefix)
elif protocol_string.lower() == 'esri':
return EsriRestDownloadTask(source_prefix)
else:
raise KeyError("I don't know how to extract for protocol {}".format(protocol_string))
def download(self, source_urls, workdir, conform):
raise NotImplementedError()
def guess_url_file_extension(url):
''' Get a filename extension for a URL using various hints.
'''
scheme, _, path, _, query, _ = urlparse(url)
mimetypes.add_type('application/x-zip-compressed', '.zip', False)
mimetypes.add_type('application/vnd.geo+json', '.json', False)
_, likely_ext = os.path.splitext(path)
bad_extensions = '', '.cgi', '.php', '.aspx', '.asp', '.do'
if not query and likely_ext not in bad_extensions:
#
# Trust simple URLs without meaningless filename extensions.
#
_L.debug(u'URL says "{}" for {}'.format(likely_ext, url))
path_ext = likely_ext
else:
#
# Get a dictionary of headers and a few bytes of content from the URL.
#
if scheme in ('http', 'https'):
response = request('GET', url, stream=True)
content_chunk = next(response.iter_content(99))
headers = response.headers
response.close()
elif scheme in ('file', ''):
headers = dict()
with open(path) as file:
content_chunk = file.read(99)
else:
raise ValueError('Unknown scheme "{}": {}'.format(scheme, url))
path_ext = False
# Guess path extension from Content-Type header
if 'content-type' in headers:
content_type = headers['content-type'].split(';')[0]
_L.debug('Content-Type says "{}" for {}'.format(content_type, url))
path_ext = mimetypes.guess_extension(content_type, False)
#
# Uh-oh, see if Content-Disposition disagrees with Content-Type.
# Socrata recently started using Content-Disposition instead
# of normal response headers so it's no longer easy to identify
# file type.
#
if 'content-disposition' in headers:
pattern = r'attachment; filename=("?)(?P<filename>[^;]+)\1'
match = re.match(pattern, headers['content-disposition'], re.I)
if match:
_, attachment_ext = splitext(match.group('filename'))
if path_ext == attachment_ext:
_L.debug('Content-Disposition agrees: "{}"'.format(match.group('filename')))
else:
_L.debug('Content-Disposition disagrees: "{}"'.format(match.group('filename')))
path_ext = False
if not path_ext:
#
# Headers didn't clearly define a known extension.
# Instead, shell out to `file` to peek at the content.
#
mime_type = get_content_mimetype(content_chunk)
_L.debug('file says "{}" for {}'.format(mime_type, url))
path_ext = mimetypes.guess_extension(mime_type, False)
return path_ext
def get_content_mimetype(chunk):
''' Get a mime-type for a short length of file content.
'''
handle, file = mkstemp()
os.write(handle, chunk)
os.close(handle)
mime_type = check_output(('file', '--mime-type', '-b', file)).strip()
os.remove(file)
return mime_type.decode('utf-8')
class URLDownloadTask(DownloadTask):
CHUNK = 16 * 1024
def get_file_path(self, url, dir_path):
''' Return a local file path in a directory for a URL.
May need to fill in a filename extension based on HTTP Content-Type.
'''
scheme, host, path, _, _, _ = urlparse(url)
path_base, _ = os.path.splitext(path)
if self.source_prefix is None:
# With no source prefix like "us-ca-oakland" use the name as given.
name_base = os.path.basename(path_base)
else:
# With a source prefix, create a safe and unique filename with a hash.
hash = sha1((host + path_base).encode('utf-8'))
name_base = u'{}-{}'.format(self.source_prefix, hash.hexdigest()[:8])
path_ext = guess_url_file_extension(url)
_L.debug(u'Guessed {}{} for {}'.format(name_base, path_ext, url))
return os.path.join(dir_path, name_base + path_ext)
def download(self, source_urls, workdir, conform=None):
output_files = []
download_path = os.path.join(workdir, 'http')
mkdirsp(download_path)
for source_url in source_urls:
file_path = self.get_file_path(source_url, download_path)
# FIXME: For URLs with file:// scheme, simply copy the file
# to the expected location so that os.path.exists() returns True.
# Instead, implement a FileDownloadTask class?
scheme, _, path, _, _, _ = urlparse(source_url)
if scheme == 'file':
shutil.copy(path, file_path)
if os.path.exists(file_path):
output_files.append(file_path)
_L.debug("File exists %s", file_path)
continue
try:
resp = request('GET', source_url, headers=self.headers, stream=True)
except Exception as e:
raise DownloadError("Could not connect to URL", e)
if resp.status_code in range(400, 499):
raise DownloadError('{} response from {}'.format(resp.status_code, source_url))
size = 0
with open(file_path, 'wb') as fp:
for chunk in resp.iter_content(self.CHUNK):
size += len(chunk)
fp.write(chunk)
output_files.append(file_path)
_L.info("Downloaded %s bytes for file %s", size, file_path)
return output_files
class EsriRestDownloadTask(DownloadTask):
def get_file_path(self, url, dir_path):
''' Return a local file path in a directory for a URL.
'''
_, host, path, _, _, _ = urlparse(url)
hash, path_ext = sha1((host + path).encode('utf-8')), '.csv'
# With no source prefix like "us-ca-oakland" use the host as a hint.
name_base = '{}-{}'.format(self.source_prefix or host, hash.hexdigest()[:8])
_L.debug('Downloading {} to {}{}'.format(url, name_base, path_ext))
return os.path.join(dir_path, name_base + path_ext)
@classmethod
def fields_from_conform_function(cls, v):
fxn = v.get('function')
if fxn:
if fxn in ('join', 'format'):
return set(v['fields'])
elif fxn == 'chain':
fields = set()
user_vars = set([v['variable']])
for func in v['functions']:
if isinstance(func, dict) and 'function' in func:
fields |= cls.fields_from_conform_function(func) - user_vars
return fields
else:
return set([v.get('field')])
@classmethod
def field_names_to_request(cls, conform):
''' Return list of fieldnames to request based on conform, or None.
'''
if not conform:
return None
fields = set()
for k, v in conform.items():
if k in attrib_types:
if isinstance(v, dict):
# It's a function of some sort?
if 'function' in v:
fields |= cls.fields_from_conform_function(v)
elif isinstance(v, list):
# It's a list of field names
fields |= set(v)
else:
fields.add(v)
if fields:
return list(filter(None, sorted(fields)))
else:
return None
def download(self, source_urls, workdir, conform=None):
output_files = []
download_path = os.path.join(workdir, 'esri')
mkdirsp(download_path)
query_fields = EsriRestDownloadTask.field_names_to_request(conform)
for source_url in source_urls:
size = 0
file_path = self.get_file_path(source_url, download_path)
if os.path.exists(file_path):
output_files.append(file_path)
_L.debug("File exists %s", file_path)
continue
downloader = EsriDumper(source_url, parent_logger=_L, timeout=300)
metadata = downloader.get_metadata()
if query_fields is None:
field_names = [f['name'] for f in metadata['fields']]
else:
field_names = query_fields[:]
if X_FIELDNAME not in field_names:
field_names.append(X_FIELDNAME)
if Y_FIELDNAME not in field_names:
field_names.append(Y_FIELDNAME)
if GEOM_FIELDNAME not in field_names:
field_names.append(GEOM_FIELDNAME)
# Get the count of rows in the layer
try:
row_count = downloader.get_feature_count()
_L.info("Source has {} rows".format(row_count))
except EsriDownloadError:
_L.info("Source doesn't support count")
with open(file_path, 'w', encoding='utf-8') as f:
writer = csv.DictWriter(f, fieldnames=field_names)
writer.writeheader()
for feature in downloader:
try:
geom = feature.get('geometry') or {}
row = feature.get('properties') or {}
if not geom:
raise TypeError("No geometry parsed")
if any((isinstance(g, float) and math.isnan(g)) for g in traverse(geom)):
raise TypeError("Geometry has NaN coordinates")
shp = shape(feature['geometry'])
row[GEOM_FIELDNAME] = shp.wkt
try:
centroid = shp.centroid
except RuntimeError as e:
if 'Invalid number of points in LinearRing found' not in str(e):
raise
xmin, xmax, ymin, ymax = shp.bounds
row[X_FIELDNAME] = round(xmin/2 + xmax/2, 7)
row[Y_FIELDNAME] = round(ymin/2 + ymax/2, 7)
else:
if centroid.is_empty:
raise TypeError(json.dumps(feature['geometry']))
row[X_FIELDNAME] = round(centroid.x, 7)
row[Y_FIELDNAME] = round(centroid.y, 7)
writer.writerow({fn: row.get(fn) for fn in field_names})
size += 1
except TypeError:
_L.debug("Skipping a geometry", exc_info=True)
_L.info("Downloaded %s ESRI features for file %s", size, file_path)
output_files.append(file_path)
return output_files
| isc | e3db2aedea52784263d641de782ffe98 | 35.141176 | 103 | 0.562956 | 4.207067 | false | false | false | false |
openaddresses/machine | setup.py | 1 | 4292 | from setuptools import setup
from os.path import join, dirname
with open(join(dirname(__file__), 'openaddr', 'VERSION')) as file:
version = file.read().strip()
setup(
name = 'OpenAddresses-Machine',
version = version,
url = 'https://github.com/openaddresses/machine',
author = 'Michal Migurski',
author_email = 'mike-pypi@teczno.com',
description = 'In-progress scripts for running OpenAddresses on a complete data set and publishing the results.',
packages = ['openaddr', 'openaddr.util', 'openaddr.ci', 'openaddr.ci.coverage', 'openaddr.tests', 'openaddr.parcels'],
entry_points = dict(
console_scripts = [
'openaddr-render-us = openaddr.render:main',
'openaddr-preview-source = openaddr.preview:main',
'openaddr-process-one = openaddr.process_one:main',
'openaddr-ci-recreate-db = openaddr.ci.recreate_db:main',
'openaddr-ci-run-dequeue = openaddr.ci.run_dequeue:main',
'openaddr-ci-worker = openaddr.ci.worker:main',
'openaddr-enqueue-sources = openaddr.ci.enqueue:main',
'openaddr-collect-extracts = openaddr.ci.collect:main',
'openaddr-index-tiles = openaddr.ci.tileindex:main',
'openaddr-update-dotmap = openaddr.dotmap:main',
'openaddr-sum-up-data = openaddr.ci.sum_up:main',
'openaddr-calculate-coverage = openaddr.ci.coverage.calculate:main',
]
),
package_data = {
'openaddr': [
'geodata/*.shp', 'geodata/*.shx', 'geodata/*.prj', 'geodata/*.dbf',
'geodata/*.cpg', 'VERSION',
],
'openaddr.ci': [
'schema.pgsql', 'templates/*.*', 'static/*.*'
],
'openaddr.ci.coverage': [
'schema.pgsql'
],
'openaddr.tests': [
'data/*.*', 'outputs/*.*', 'sources/*.*', 'sources/fr/*.*',
'sources/us/*/*.*', 'sources/de/*.*', 'sources/nl/*.*',
'sources/be/*/*.json', 'conforms/lake-man-gdb.gdb/*',
'conforms/*.csv', 'conforms/*.dbf', 'conforms/*.zip', 'conforms/*.gfs',
'conforms/*.gml', 'conforms/*.json', 'conforms/*.prj', 'conforms/*.shp',
'conforms/*.shx', 'conforms/*.vrt',
'parcels/sources/us/ca/*.*', 'parcels/sources/us/id/*.*',
'parcels/data/*.*', 'parcels/data/us/ca/*.*',
'parcels/data/us/ca/berkeley/*.*'
],
'openaddr.parcels': [
'README.md'
],
'openaddr.util': [
'templates/*.*'
]
},
test_suite = 'openaddr.tests',
install_requires = [
'boto == 2.49.0', 'dateutils == 0.6.6', 'ijson == 2.4',
# http://jinja.pocoo.org/docs/2.10/
'Jinja2 == 2.10.1',
# http://flask.pocoo.org
'Flask == 1.1.1',
# http://flask-cors.corydolphin.com
'Flask-Cors == 3.0.8',
# https://www.palletsprojects.com/p/werkzeug/
'Werkzeug == 0.16.0',
# http://gunicorn.org
'gunicorn == 19.10.0',
# http://www.voidspace.org.uk/python/mock/
'mock == 3.0.5',
# https://github.com/uri-templates/uritemplate-py/
'uritemplate == 3.0.0',
# https://github.com/malthe/pq/
'pq == 1.8.1',
# http://initd.org/psycopg/
'psycopg2-binary == 2.8.4',
# http://docs.python-requests.org/en/master/
'requests == 2.22.0',
# https://github.com/patrys/httmock
'httmock == 1.3.0',
# https://boto3.readthedocs.org
'boto3 == 1.11.5',
# https://github.com/openaddresses/pyesridump
'esridump == 1.6.0',
# Used in openaddr.parcels
'Shapely == 1.7b1',
'Fiona == 1.8.13',
# Used in dotmaps preview to support S3-backed SQLite mbtiles
# https://rogerbinns.github.io/apsw/
'apsw == 3.9.2.post1',
# http://pythonhosted.org/itsdangerous/
'itsdangerous == 1.1.0',
# https://pypi.python.org/pypi/python-memcached
'python-memcached == 1.59',
# https://github.com/tilezen/mapbox-vector-tile
'mapbox-vector-tile==1.2.0',
'future==0.16.0',
'protobuf==3.5.1',
'pyclipper==1.1.0',
'six==1.11.0',
]
)
| isc | 4771c59723550875635c66516e2ef967 | 33.336 | 122 | 0.537745 | 3.234363 | false | false | false | false |
openaddresses/machine | openaddr/ci/enqueue.py | 1 | 4394 | import logging; _L = logging.getLogger('openaddr.ci.enqueue')
from os import environ
from itertools import count
from time import time, sleep
from argparse import ArgumentParser
from . import (
db_connect, db_queue, TASK_QUEUE, load_config, setup_logger,
enqueue_sources, find_batch_sources, get_batch_run_times
)
from .objects import add_set
from ..util import set_autoscale_capacity
from . import render_set_maps, log_function_errors
from .. import S3
from boto import connect_autoscale, connect_cloudwatch
parser = ArgumentParser(description='Run some source files.')
parser.add_argument('-o', '--owner', default='openaddresses',
help='Github repository owner. Defaults to "openaddresses".')
parser.add_argument('-r', '--repository', default='openaddresses',
help='Github repository name. Defaults to "openaddresses".')
parser.add_argument('-t', '--github-token', default=environ.get('GITHUB_TOKEN', None),
help='Optional token value for reading from Github. Defaults to value of GITHUB_TOKEN environment variable.')
parser.add_argument('-d', '--database-url', default=environ.get('DATABASE_URL', None),
help='Optional connection string for database. Defaults to value of DATABASE_URL environment variable.')
parser.add_argument('-b', '--bucket', default=environ.get('AWS_S3_BUCKET', None),
help='S3 bucket name. Defaults to value of AWS_S3_BUCKET environment variable.')
parser.add_argument('--sns-arn', default=environ.get('AWS_SNS_ARN', None),
help='Optional AWS Simple Notification Service (SNS) resource. Defaults to value of AWS_SNS_ARN environment variable.')
parser.add_argument('--cloudwatch-ns', default=environ.get('AWS_CLOUDWATCH_NS', None),
help='Optional AWS CloudWatch namespace. Defaults to value of AWS_CLOUDWATCH_NS environment variable.')
parser.add_argument('-v', '--verbose', help='Turn on verbose logging',
action='store_const', dest='loglevel',
const=logging.DEBUG, default=logging.INFO)
parser.add_argument('-q', '--quiet', help='Turn off most logging',
action='store_const', dest='loglevel',
const=logging.WARNING, default=logging.INFO)
@log_function_errors
def main():
''' Single threaded worker to serve the job queue.
'''
args = parser.parse_args()
setup_logger(args.sns_arn, None, log_level=args.loglevel)
s3 = S3(None, None, args.bucket)
autoscale = connect_autoscale(None, None)
cloudwatch = connect_cloudwatch(None, None)
github_auth = args.github_token, 'x-oauth-basic'
next_queue_interval, next_autoscale_interval = 60, 8 * 3600
try:
with db_connect(args.database_url) as conn:
task_Q = db_queue(conn, TASK_QUEUE)
next_queue_report = time() + next_queue_interval
next_autoscale_grow = time() + next_autoscale_interval
minimum_capacity = count(1)
with task_Q as db:
run_times = get_batch_run_times(db, args.owner, args.repository)
sources = find_batch_sources(args.owner, args.repository, github_auth, run_times)
with task_Q as db:
new_set = add_set(db, args.owner, args.repository)
for expected_count in enqueue_sources(task_Q, new_set, sources):
if time() >= next_queue_report:
next_queue_report, n = time() + next_queue_interval, len(task_Q)
_L.debug('Task queue has {} item{}, {} sources expected'.format(n, 's' if n != 1 else '', expected_count))
try:
if time() >= next_autoscale_grow:
next_autoscale_grow = time() + next_autoscale_interval
set_autoscale_capacity(autoscale, cloudwatch, args.cloudwatch_ns, next(minimum_capacity))
except Exception as e:
_L.error('Problem during autoscale', exc_info=True)
if expected_count:
sleep(2)
with task_Q as db:
_L.debug('Rendering that shit')
render_set_maps(s3, db, new_set)
except:
_L.error('Error in worker main()', exc_info=True)
return 1
else:
return 0
if __name__ == '__main__':
exit(main())
| isc | aa97aa8f6f9f18d4f0f706290e7fb6ee | 41.25 | 139 | 0.624943 | 3.980072 | false | false | false | false |
mitre/multiscanner | multiscanner/modules/Metadata/ssdeeper.py | 3 | 1625 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import division, absolute_import, with_statement, print_function, unicode_literals
try:
import ssdeep
except ImportError:
print("ssdeep module not installed...")
ssdeep = False
import time
__author__ = "Drew Bonasera"
__license__ = "MPL 2.0"
TYPE = "Metadata"
NAME = "ssdeep"
def check():
if ssdeep:
return True
else:
return False
def scan(filelist):
results = []
for fname in filelist:
goodtogo = False
i = 0
# Ran into a weird issue with file locking, this fixes it
while not goodtogo and i < 5:
try:
ssdeep_hash = ssdeep.hash_from_file(fname)
chunksize, chunk, double_chunk = ssdeep_hash.split(':')
chunksize = int(chunksize)
doc = {
'ssdeep_hash': ssdeep_hash,
'chunksize': chunksize,
'chunk': chunk,
'double_chunk': double_chunk,
'analyzed': 'false',
'matches': {},
}
results.append((fname, doc))
goodtogo = True
except Exception as e:
print('ssdeeper:', e)
time.sleep(3)
i += 1
metadata = {}
metadata["Name"] = NAME
metadata["Type"] = TYPE
metadata["Include"] = False
return (results, metadata)
| mpl-2.0 | 5af32bf6b3a1e5a0485f5d93e3f3616c | 27.017241 | 98 | 0.533538 | 4.166667 | false | false | false | false |
odlgroup/odl | examples/solvers/proximal_lang_poisson.py | 3 | 1295 | """Poisson's problem using the ProxImaL solver.
Solves the optimization problem
min_x 10 ||laplacian(x) - g||_2^2 + || |grad(x)| ||_1
Where ``laplacian`` is the spatial Laplacian, ``grad`` the spatial
gradient and ``g`` is given noisy data.
"""
import numpy as np
import odl
import proximal
# Create space defined on a square from [0, 0] to [100, 100] with (100 x 100)
# points
space = odl.uniform_discr([0, 0], [100, 100], [100, 100])
# Create ODL operator for the Laplacian
laplacian = odl.Laplacian(space)
# Create right hand side
phantom = odl.phantom.shepp_logan(space, modified=True)
phantom.show('original image')
rhs = laplacian(phantom)
rhs += odl.phantom.white_noise(space) * np.std(rhs) * 0.1
rhs.show('rhs')
# Convert laplacian to ProxImaL operator
proximal_lang_laplacian = odl.as_proximal_lang_operator(laplacian)
# Convert to array
rhs_arr = rhs.asarray()
# Set up optimization problem
x = proximal.Variable(space.shape)
funcs = [10 * proximal.sum_squares(proximal_lang_laplacian(x) - rhs_arr),
proximal.norm1(proximal.grad(x))]
# Solve the problem using ProxImaL
prob = proximal.Problem(funcs)
prob.solve(verbose=True)
# Convert back to odl and display result
result_odl = space.element(x.value)
result_odl.show('result from ProxImaL', force_show=True)
| mpl-2.0 | e95b2459544512788ca895f387969a50 | 27.152174 | 77 | 0.720463 | 2.903587 | false | false | false | false |
odlgroup/odl | odl/contrib/datasets/ct/mayo.py | 1 | 11135 | # Copyright 2014-2020 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""Tomographic datasets from Mayo Clinic.
In addition to the standard ODL requirements, this library also requires:
- tqdm
- dicom
- A copy of the Mayo dataset, see
https://www.aapm.org/GrandChallenge/LowDoseCT/#registration
"""
from __future__ import division
import numpy as np
import os
import dicom
import odl
import tqdm
from dicom.datadict import DicomDictionary, NameDict, CleanName
from odl.discr.discr_utils import linear_interpolator
from odl.contrib.datasets.ct.mayo_dicom_dict import new_dict_items
# Update the DICOM dictionary with the extra Mayo tags
DicomDictionary.update(new_dict_items)
NameDict.update((CleanName(tag), tag) for tag in new_dict_items)
__all__ = ('load_projections', 'load_reconstruction')
def _read_projections(folder, indices):
"""Read mayo projections from a folder."""
datasets = []
# Get the relevant file names
file_names = sorted([f for f in os.listdir(folder) if f.endswith(".dcm")])
if len(file_names) == 0:
raise ValueError('No DICOM files found in {}'.format(folder))
file_names = file_names[indices]
data_array = None
for i, file_name in enumerate(tqdm.tqdm(file_names,
'Loading projection data')):
# read the file
dataset = dicom.read_file(folder + '/' + file_name)
# Get some required data
rows = dataset.NumberofDetectorRows
cols = dataset.NumberofDetectorColumns
hu_factor = dataset.HUCalibrationFactor
rescale_intercept = dataset.RescaleIntercept
rescale_slope = dataset.RescaleSlope
# Load the array as bytes
proj_array = np.array(np.frombuffer(dataset.PixelData, 'H'),
dtype='float32')
proj_array = proj_array.reshape([rows, cols], order='F').T
# Rescale array
proj_array *= rescale_slope
proj_array += rescale_intercept
proj_array /= hu_factor
# Store results
if data_array is None:
# We need to load the first dataset before we know the shape
data_array = np.empty((len(file_names), cols, rows),
dtype='float32')
data_array[i] = proj_array[:, ::-1]
datasets.append(dataset)
return datasets, data_array
def load_projections(folder, indices=None):
"""Load geometry and data stored in Mayo format from folder.
Parameters
----------
folder : str
Path to the folder where the Mayo DICOM files are stored.
indices : optional
Indices of the projections to load.
Accepts advanced indexing such as slice or list of indices.
Returns
-------
geometry : ConeBeamGeometry
Geometry corresponding to the Mayo projector.
proj_data : `numpy.ndarray`
Projection data, given as the line integral of the linear attenuation
coefficient (g/cm^3). Its unit is thus g/cm^2.
"""
datasets, data_array = _read_projections(folder, indices)
# Get the angles
angles = [d.DetectorFocalCenterAngularPosition for d in datasets]
angles = -np.unwrap(angles) - np.pi # different definition of angles
# Set minimum and maximum corners
shape = np.array([datasets[0].NumberofDetectorColumns,
datasets[0].NumberofDetectorRows])
pixel_size = np.array([datasets[0].DetectorElementTransverseSpacing,
datasets[0].DetectorElementAxialSpacing])
# Correct from center of pixel to corner of pixel
minp = -(np.array(datasets[0].DetectorCentralElement) - 0.5) * pixel_size
maxp = minp + shape * pixel_size
# Select geometry parameters
src_radius = datasets[0].DetectorFocalCenterRadialDistance
det_radius = (datasets[0].ConstantRadialDistance -
datasets[0].DetectorFocalCenterRadialDistance)
# For unknown reasons, mayo does not include the tag
# "TableFeedPerRotation", which is what we want.
# Instead we manually compute the pitch
pitch = ((datasets[-1].DetectorFocalCenterAxialPosition -
datasets[0].DetectorFocalCenterAxialPosition) /
((np.max(angles) - np.min(angles)) / (2 * np.pi)))
# Get flying focal spot data
offset_axial = np.array([d.SourceAxialPositionShift for d in datasets])
offset_angular = np.array([d.SourceAngularPositionShift for d in datasets])
offset_radial = np.array([d.SourceRadialDistanceShift for d in datasets])
# TODO(adler-j): Implement proper handling of flying focal spot.
# Currently we do not fully account for it, merely making some "first
# order corrections" to the detector position and radial offset.
# Update angles with flying focal spot (in plane direction).
# This increases the resolution of the reconstructions.
angles = angles - offset_angular
# We correct for the mean offset due to the rotated angles, we need to
# shift the detector.
offset_detector_by_angles = det_radius * np.mean(offset_angular)
minp[0] -= offset_detector_by_angles
maxp[0] -= offset_detector_by_angles
# We currently apply only the mean of the offsets
src_radius = src_radius + np.mean(offset_radial)
# Partially compensate for a movement of the source by moving the object
# instead. We need to rescale by the magnification to get the correct
# change in the detector. This approximation is only exactly valid on the
# axis of rotation.
mean_offset_along_axis_for_ffz = np.mean(offset_axial) * (
src_radius / (src_radius + det_radius))
# Create partition for detector
detector_partition = odl.uniform_partition(minp, maxp, shape)
# Convert offset to odl definitions
offset_along_axis = (mean_offset_along_axis_for_ffz +
datasets[0].DetectorFocalCenterAxialPosition -
angles[0] / (2 * np.pi) * pitch)
# Assemble geometry
angle_partition = odl.nonuniform_partition(angles)
geometry = odl.tomo.ConeBeamGeometry(angle_partition,
detector_partition,
src_radius=src_radius,
det_radius=det_radius,
pitch=pitch,
offset_along_axis=offset_along_axis)
# Create a *temporary* ray transform (we need its range)
spc = odl.uniform_discr([-1] * 3, [1] * 3, [32] * 3)
ray_trafo = odl.tomo.RayTransform(spc, geometry, interp='linear')
# convert coordinates
theta, up, vp = ray_trafo.range.grid.meshgrid
d = src_radius + det_radius
u = d * np.arctan(up / d)
v = d / np.sqrt(d**2 + up**2) * vp
# Calculate projection data in rectangular coordinates since we have no
# backend that supports cylindrical
interpolator = linear_interpolator(
data_array, ray_trafo.range.coord_vectors
)
proj_data = interpolator((theta, u, v))
return geometry, proj_data.asarray()
def load_reconstruction(folder, slice_start=0, slice_end=-1):
"""Load a volume from folder, also returns the corresponding partition.
Parameters
----------
folder : str
Path to the folder where the DICOM files are stored.
slice_start : int
Index of the first slice to use. Used for subsampling.
slice_end : int
Index of the final slice to use.
Returns
-------
partition : `odl.RectPartition`
Partition describing the geometric positioning of the voxels.
data : `numpy.ndarray`
Volumetric data. Scaled such that data = 1 for water (0 HU).
Notes
-----
DICOM data is highly non trivial. Typically, each slice has been computed
with a slice tickness (e.g. 3mm) but the slice spacing might be
different from that.
Further, the coordinates in DICOM is typically the *middle* of the pixel,
not the corners as in ODL.
This function should handle all of these peculiarities and give a volume
with the correct coordinate system attached.
"""
file_names = sorted([f for f in os.listdir(folder) if f.endswith(".IMA")])
if len(file_names) == 0:
raise ValueError('No DICOM files found in {}'.format(folder))
volumes = []
datasets = []
file_names = file_names[slice_start:slice_end]
for file_name in tqdm.tqdm(file_names, 'loading volume data'):
# read the file
dataset = dicom.read_file(folder + '/' + file_name)
# Get parameters
pixel_size = np.array(dataset.PixelSpacing)
pixel_thickness = float(dataset.SliceThickness)
rows = dataset.Rows
cols = dataset.Columns
# Get data array and convert to correct coordinates
data_array = np.array(np.frombuffer(dataset.PixelData, 'H'),
dtype='float32')
data_array = data_array.reshape([cols, rows], order='C')
data_array = np.rot90(data_array, -1)
# Convert from storage type to densities
# TODO: Optimize these computations
hu_values = (dataset.RescaleSlope * data_array +
dataset.RescaleIntercept)
densities = (hu_values + 1000) / 1000
# Store results
volumes.append(densities)
datasets.append(dataset)
voxel_size = np.array(list(pixel_size) + [pixel_thickness])
shape = np.array([rows, cols, len(volumes)])
# Compute geometry parameters
mid_pt = (np.array(dataset.ReconstructionTargetCenterPatient) -
np.array(dataset.DataCollectionCenterPatient))
reconstruction_size = (voxel_size * shape)
min_pt = mid_pt - reconstruction_size / 2
max_pt = mid_pt + reconstruction_size / 2
# axis 1 has reversed convention
min_pt[1], max_pt[1] = -max_pt[1], -min_pt[1]
if len(datasets) > 1:
slice_distance = np.abs(
float(datasets[1].DataCollectionCenterPatient[2]) -
float(datasets[0].DataCollectionCenterPatient[2]))
else:
# If we only have one slice, we must approximate the distance.
slice_distance = pixel_thickness
# The middle of the minimum/maximum slice can be computed from the
# DICOM attribute "DataCollectionCenterPatient". Since ODL uses corner
# points (e.g. edge of volume) we need to add half a voxel thickness to
# both sides.
min_pt[2] = -np.array(datasets[0].DataCollectionCenterPatient)[2]
min_pt[2] -= 0.5 * slice_distance
max_pt[2] = -np.array(datasets[-1].DataCollectionCenterPatient)[2]
max_pt[2] += 0.5 * slice_distance
partition = odl.uniform_partition(min_pt, max_pt, shape)
volume = np.transpose(np.array(volumes), (1, 2, 0))
return partition, volume
if __name__ == '__main__':
from odl.util.testutils import run_doctests
run_doctests()
| mpl-2.0 | 06bee21182020e81a8dd9874416dc294 | 35.508197 | 79 | 0.647238 | 3.842305 | false | false | false | false |
odlgroup/odl | odl/contrib/solvers/functional/nonlocalmeans_functionals.py | 2 | 3997 | # Copyright 2014-2019 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""Non Local Means functionals."""
from __future__ import print_function, division, absolute_import
import numpy as np
from odl.operator import Operator
from odl.solvers.functional.functional import Functional
__all__ = ('NLMRegularizer',)
class NLMRegularizer(Functional):
r"""The nonlocal means "functional".
This is not a true functional in the strict sense, but regardless it
implements a `proximal` method and is hence usable with proximal solvers.
See [Heide+2015] for more information.
The functional requires an appropriate backend. To install the backends run
=========== ===============================
`impl` call
=========== ===============================
`'skimage'` ``$ pip install scikit-image``
`'opencv'` ``$ pip install opencv-python``
=========== ===============================
Notes
-----
The nonlocal means regularization of a image :math:`u` is given by
.. math::
NL[u](x) =
\frac{1}{C(x)}
\int_\Omega
e^{-\frac{(G_a * |u(x + \cdot) - u(y + \cdot)|^2)(0)}{h^2}}
u(y) dy
where :math:`\Omega` is the domain, :math:`G_a` is a gaussian kernel,
:math:`h` is a parameter and :math:`*` denotes convolution and :math:`C(x)`
is a normalization constant
.. math::
C(x) =
\int_\Omega
e^{-\frac{(G_a * |u(x + \cdot) - u(y + \cdot)|^2)(0)}{h^2}}
dy
See [Buades+2005] for more information.
References
----------
[Buades+2005] *A non-local algorithm for image denoising*, A. Buades,
B. Coll and J.-M. Morel. CVPR 2005
[Heide+2015] *FlexISP: a flexible camera image processing framework*,
F. Heide et. al. SIGGRAPH Asia 2014
"""
def __init__(self, space, h,
patch_size=7, patch_distance=11, impl='skimage'):
"""Initialize a new instance.
"""
self.h = float(h)
self.impl = impl
self.patch_size = patch_size
self.patch_distance = patch_distance
super(NLMRegularizer, self).__init__(
space=space, linear=False, grad_lipschitz=np.nan)
@property
def proximal(self):
func = self
class NLMProximal(Operator):
def __init__(self, stepsize):
super(NLMProximal, self).__init__(
func.domain, func.domain, linear=False)
self.stepsize = stepsize
def _call(self, x):
h = func.h * self.stepsize
if func.impl == 'skimage':
from skimage.restoration import denoise_nl_means
x_arr = x.asarray()
return denoise_nl_means(
x_arr,
patch_size=func.patch_size,
patch_distance=func.patch_distance,
h=h,
multichannel=False)
elif func.impl == 'opencv':
import cv2
x_arr = x.asarray()
xmin, xmax = np.min(x_arr), np.max(x_arr)
x_arr = (x_arr - xmin) * 255.0 / (xmax - xmin)
x_arr = x_arr.astype('uint8')
h_scaled = h * 255.0 / (xmax - xmin)
res = cv2.fastNlMeansDenoising(
x_arr,
templateWindowSize=func.patch_size,
searchWindowSize=2 * func.patch_distance + 1,
h=h_scaled)
return res * (xmax - xmin) / 255.0 + xmin
return NLMProximal
if __name__ == '__main__':
from odl.util.testutils import run_doctests
run_doctests()
| mpl-2.0 | 497df34501ff58420b9fa6611cbfad4f | 31.762295 | 79 | 0.518889 | 3.785038 | false | false | false | false |
odlgroup/odl | odl/discr/grid.py | 2 | 38427 | # Copyright 2014-2020 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""Sparse implementations of n-dimensional sampling grids.
Sampling grids are collections of points in an n-dimensional coordinate
space with a certain structure which is exploited to minimize storage.
"""
from __future__ import print_function, division, absolute_import
import numpy as np
from odl.set import Set, IntervalProd
from odl.util import (
normalized_index_expression, normalized_scalar_param_list, safe_int_conv,
array_str, signature_string, indent, npy_printoptions)
__all__ = (
'sparse_meshgrid',
'RectGrid',
'uniform_grid',
'uniform_grid_fromintv',
)
def sparse_meshgrid(*x):
"""Make a sparse `meshgrid` by adding empty dimensions.
Parameters
----------
x1,...,xN : `array-like`
Input arrays to turn into sparse meshgrid vectors.
Returns
-------
meshgrid : tuple of `numpy.ndarray`'s
Sparse coordinate vectors representing an N-dimensional grid.
See Also
--------
numpy.meshgrid : dense or sparse meshgrids
Examples
--------
>>> x, y = [0, 1], [2, 3, 4]
>>> mesh = sparse_meshgrid(x, y)
>>> sum(xi for xi in mesh).ravel() # first axis slowest
array([2, 3, 4, 3, 4, 5])
"""
n = len(x)
mesh = []
for ax, xi in enumerate(x):
xi = np.asarray(xi)
slc = [None] * n
slc[ax] = slice(None)
mesh.append(np.ascontiguousarray(xi[tuple(slc)]))
return tuple(mesh)
class RectGrid(Set):
"""An n-dimensional rectilinear grid.
A rectilinear grid is the set of points defined by all possible
combination of coordinates taken from fixed coordinate vectors.
The storage need for a rectilinear grid is only the sum of the lengths
of the coordinate vectors, while the total number of points is
the product of these lengths. This class makes use of that
sparse storage scheme.
See ``Notes`` for details.
"""
def __init__(self, *coord_vectors):
r"""Initialize a new instance.
Parameters
----------
vec1,...,vecN : `array-like`
The coordinate vectors defining the grid points. They must
be sorted in ascending order and may not contain
duplicates. Empty vectors are not allowed.
Examples
--------
>>> g = RectGrid([1, 2, 5], [-2, 1.5, 2])
>>> g
RectGrid(
[ 1., 2., 5.],
[-2. , 1.5, 2. ]
)
>>> g.ndim # number of axes
2
>>> g.shape # points per axis
(3, 3)
>>> g.size # total number of points
9
Grid points can be extracted with index notation (NOTE: This is
slow, do not loop over the grid using indices!):
>>> g = RectGrid([-1, 0, 3], [2, 4, 5], [5], [2, 4, 7])
>>> g[0, 0, 0, 0]
array([-1., 2., 5., 2.])
Slices and ellipsis are also supported:
>>> g[:, 0, 0, 0]
RectGrid(
[-1., 0., 3.],
[ 2.],
[ 5.],
[ 2.]
)
>>> g[0, ..., 1:]
RectGrid(
[-1.],
[ 2., 4., 5.],
[ 5.],
[ 4., 7.]
)
Notes
-----
In 2 dimensions, for example, given two coordinate vectors
.. math::
v_1 = (-1, 0, 2),\ v_2 = (0, 1)
the corresponding rectilinear grid :math:`G` is the set of all
2d points whose first component is from :math:`v_1` and the
second component from :math:`v_2`:
.. math::
G = \{(-1, 0), (-1, 1), (0, 0), (0, 1), (2, 0), (2, 1)\}
Here is a graphical representation::
: : :
: : :
1 -x----x--------x-...
| | |
0 -x----x--------x-...
| | |
-1 0 2
Apparently, this structure can represent grids with arbitrary step
sizes in each axis.
Note that the above ordering of points is the standard ``'C'``
ordering where the first axis (:math:`v_1`) varies slowest.
Ordering is only relevant when the point array is actually created;
the grid itself is independent of this ordering.
"""
super(RectGrid, self).__init__()
vecs = tuple(np.atleast_1d(vec).astype('float64')
for vec in coord_vectors)
for i, vec in enumerate(vecs):
if len(vec) == 0:
raise ValueError('vector {} has zero length'
''.format(i + 1))
if not np.all(np.isfinite(vec)):
raise ValueError('vector {} contains invalid entries'
''.format(i + 1))
if vec.ndim != 1:
raise ValueError('vector {} has {} dimensions instead of 1'
''.format(i + 1, vec.ndim))
sorted_vec = np.sort(vec)
if np.any(vec != sorted_vec):
raise ValueError('vector {} not sorted'
''.format(i + 1))
if np.any(np.diff(vec) == 0):
raise ValueError('vector {} contains duplicates'
''.format(i + 1))
# Lazily evaluates strides when needed but stores the result
self.__stride = None
self.__coord_vectors = vecs
# Non-degenerate axes
self.__nondegen_byaxis = tuple(len(v) > 1 for v in self.coord_vectors)
# Uniformity, setting True in degenerate axes
diffs = [np.diff(v) for v in self.coord_vectors]
self.__is_uniform_byaxis = tuple(
(diff.size == 0) or np.allclose(diff, diff[0])
for diff in diffs)
# Attributes
@property
def coord_vectors(self):
"""Coordinate vectors of the grid.
Returns
-------
coord_vectors : tuple of `numpy.ndarray`'s
Examples
--------
>>> g = RectGrid([0, 1], [-1, 0, 2])
>>> x, y = g.coord_vectors
>>> x
array([ 0., 1.])
>>> y
array([-1., 0., 2.])
See Also
--------
meshgrid : Same result but with nd arrays
"""
return self.__coord_vectors
@property
def ndim(self):
"""Number of dimensions of the grid."""
try:
return self.__ndim
except AttributeError:
ndim = len(self.coord_vectors)
self.__ndim = ndim
return ndim
@property
def shape(self):
"""Number of grid points per axis."""
try:
return self.__shape
except AttributeError:
shape = tuple(len(vec) for vec in self.coord_vectors)
self.__shape = shape
return shape
@property
def size(self):
"""Total number of grid points."""
# Since np.prod(()) == 1.0 we need to handle that by ourselves
return (0 if self.shape == () else
int(np.prod(self.shape, dtype='int64')))
def __len__(self):
"""Return ``len(self)``.
The length along the first dimension.
Examples
--------
>>> g = RectGrid([0, 1], [-1, 0, 2], [4, 5, 6])
>>> len(g)
2
See Also
--------
size : The total number of elements.
"""
return 0 if self.shape == () else self.shape[0]
@property
def min_pt(self):
"""Vector containing the minimal grid coordinates per axis.
Examples
--------
>>> g = RectGrid([1, 2, 5], [-2, 1.5, 2])
>>> g.min_pt
array([ 1., -2.])
"""
return np.array([vec[0] for vec in self.coord_vectors])
@property
def max_pt(self):
"""Vector containing the maximal grid coordinates per axis.
Examples
--------
>>> g = RectGrid([1, 2, 5], [-2, 1.5, 2])
>>> g.max_pt
array([ 5., 2.])
"""
return np.array([vec[-1] for vec in self.coord_vectors])
@property
def nondegen_byaxis(self):
"""Boolean array with ``True`` entries for non-degenerate axes.
Examples
--------
>>> g = uniform_grid([0, 0], [1, 1], (5, 1))
>>> g.nondegen_byaxis
(True, False)
"""
return self.__nondegen_byaxis
@property
def is_uniform_byaxis(self):
"""Boolean tuple showing uniformity of this grid per axis."""
return self.__is_uniform_byaxis
@property
def is_uniform(self):
"""``True`` if this grid is uniform in all axes, else ``False``."""
return all(self.is_uniform_byaxis)
# min, max and extent are for set duck-typing
def min(self, **kwargs):
"""Return `min_pt`.
Parameters
----------
kwargs
For duck-typing with `numpy.amin`
See Also
--------
max
odl.set.domain.IntervalProd.min
Examples
--------
>>> g = RectGrid([1, 2, 5], [-2, 1.5, 2])
>>> g.min()
array([ 1., -2.])
Also works with Numpy:
>>> np.min(g)
array([ 1., -2.])
"""
out = kwargs.get('out', None)
if out is not None:
out[:] = self.min_pt
return out
else:
return self.min_pt
def max(self, **kwargs):
"""Return `max_pt`.
Parameters
----------
kwargs
For duck-typing with `numpy.amax`
See Also
--------
min
odl.set.domain.IntervalProd.max
Examples
--------
>>> g = RectGrid([1, 2, 5], [-2, 1.5, 2])
>>> g.max()
array([ 5., 2.])
Also works with Numpy:
>>> np.max(g)
array([ 5., 2.])
"""
out = kwargs.get('out', None)
if out is not None:
out[:] = self.max_pt
return out
else:
return self.max_pt
@property
def mid_pt(self):
"""Midpoint of the grid, not necessarily a grid point.
Examples
--------
>>> rg = uniform_grid([-1.5, -1], [-0.5, 3], (2, 3))
>>> rg.mid_pt
array([-1., 1.])
"""
return (self.max_pt + self.min_pt) / 2
@property
def stride(self):
"""Step per axis between neighboring points of a uniform grid.
If the grid contains axes that are not uniform, ``stride`` has
a ``NaN`` entry.
For degenerate (length 1) axes, ``stride`` has value ``0.0``.
Returns
-------
stride : numpy.array
Array of dtype ``float`` and length `ndim`.
Examples
--------
>>> rg = uniform_grid([-1.5, -1], [-0.5, 3], (2, 3))
>>> rg.stride
array([ 1., 2.])
NaN returned for non-uniform dimension:
>>> g = RectGrid([0, 1, 2], [0, 1, 4])
>>> g.stride
array([ 1., nan])
0.0 returned for degenerate dimension:
>>> g = RectGrid([0, 1, 2], [0])
>>> g.stride
array([ 1., 0.])
"""
# Cache for efficiency instead of re-computing
if self.__stride is None:
strd = []
for i in range(self.ndim):
if not self.is_uniform_byaxis[i]:
strd.append(float('nan'))
elif self.nondegen_byaxis[i]:
strd.append(self.extent[i] / (self.shape[i] - 1.0))
else:
strd.append(0.0)
self.__stride = np.array(strd)
return self.__stride.copy()
@property
def extent(self):
"""Return the edge lengths of this grid's minimal bounding box.
Examples
--------
>>> g = RectGrid([1, 2, 5], [-2, 1.5, 2])
>>> g.extent
array([ 4., 4.])
"""
return self.max_pt - self.min_pt
def convex_hull(self):
"""Return the smallest `IntervalProd` containing this grid.
The convex hull of a set is the union of all line segments
between points in the set. For a rectilinear grid, it is the
interval product given by the extremal coordinates.
Returns
-------
convex_hull : `IntervalProd`
Interval product defined by the minimum and maximum points
of the grid.
Examples
--------
>>> g = RectGrid([-1, 0, 3], [2, 4], [5], [2, 4, 7])
>>> g.convex_hull()
IntervalProd([-1., 2., 5., 2.], [ 3., 4., 5., 7.])
"""
return IntervalProd(self.min(), self.max())
def element(self):
"""An arbitrary element, the minimum coordinates."""
return self.min_pt
def approx_equals(self, other, atol):
"""Test if this grid is equal to another grid.
Parameters
----------
other :
Object to be tested
atol : float
Allow deviations up to this number in absolute value
per vector entry.
Returns
-------
equals : bool
``True`` if ``other`` is a `RectGrid` instance with all
coordinate vectors equal (up to the given tolerance), to
the ones of this grid, ``False`` otherwise.
Examples
--------
>>> g1 = RectGrid([0, 1], [-1, 0, 2])
>>> g2 = RectGrid([-0.1, 1.1], [-1, 0.1, 2])
>>> g1.approx_equals(g2, atol=0)
False
>>> g1.approx_equals(g2, atol=0.15)
True
"""
if other is self:
return True
return (type(other) is type(self) and
self.ndim == other.ndim and
self.shape == other.shape and
all(np.allclose(vec_s, vec_o, atol=atol, rtol=0.0)
for (vec_s, vec_o) in zip(self.coord_vectors,
other.coord_vectors)))
def __eq__(self, other):
"""Return ``self == other``.
"""
# Implemented separately for performance reasons
if other is self:
return True
return (type(other) is type(self) and
self.shape == other.shape and
all(np.array_equal(vec_s, vec_o)
for (vec_s, vec_o) in zip(self.coord_vectors,
other.coord_vectors)))
def __hash__(self):
"""Return ``hash(self)``."""
# TODO: update with #841
coord_vec_str = tuple(cv.tobytes() for cv in self.coord_vectors)
return hash((type(self), coord_vec_str))
def approx_contains(self, other, atol):
"""Test if ``other`` belongs to this grid up to a tolerance.
Parameters
----------
other : `array-like` or float
The object to test for membership in this grid
atol : float
Allow deviations up to this number in absolute value
per vector entry.
Examples
--------
>>> g = RectGrid([0, 1], [-1, 0, 2])
>>> g.approx_contains([0, 0], atol=0.0)
True
>>> [0, 0] in g # equivalent
True
>>> g.approx_contains([0.1, -0.1], atol=0.0)
False
>>> g.approx_contains([0.1, -0.1], atol=0.15)
True
"""
other = np.atleast_1d(other)
return (other.shape == (self.ndim,) and
all(np.any(np.isclose(vector, coord, atol=atol, rtol=0.0))
for vector, coord in zip(self.coord_vectors, other)))
def __contains__(self, other):
"""Return ``other in self``."""
other = np.atleast_1d(other)
if other.dtype == np.dtype(object):
return False
return (other.shape == (self.ndim,) and
all(coord in vector
for vector, coord in zip(self.coord_vectors, other)))
def is_subgrid(self, other, atol=0.0):
"""Return ``True`` if this grid is a subgrid of ``other``.
Parameters
----------
other : `RectGrid`
The other grid which is supposed to contain this grid
atol : float, optional
Allow deviations up to this number in absolute value
per coordinate vector entry.
Returns
-------
is_subgrid : bool
``True`` if all coordinate vectors of ``self`` are within
absolute distance ``atol`` of the other grid, else ``False``.
Examples
--------
>>> rg = uniform_grid([-2, -2], [0, 4], (3, 4))
>>> rg.coord_vectors
(array([-2., -1., 0.]), array([-2., 0., 2., 4.]))
>>> rg_sub = uniform_grid([-1, 2], [0, 4], (2, 2))
>>> rg_sub.coord_vectors
(array([-1., 0.]), array([ 2., 4.]))
>>> rg_sub.is_subgrid(rg)
True
Fuzzy check is also possible. Note that the tolerance still
applies to the coordinate vectors.
>>> rg_sub = uniform_grid([-1.015, 2], [0, 3.99], (2, 2))
>>> rg_sub.is_subgrid(rg, atol=0.01)
False
>>> rg_sub.is_subgrid(rg, atol=0.02)
True
"""
# Optimization for some common cases
if other is self:
return True
if not isinstance(other, RectGrid):
return False
if not all(self.shape[i] <= other.shape[i] and
self.min_pt[i] >= other.min_pt[i] - atol and
self.max_pt[i] <= other.max_pt[i] + atol
for i in range(self.ndim)):
return False
if self.size == 0:
return True
if self.is_uniform and other.is_uniform:
# For uniform grids, it suffices to show that min_pt, max_pt
# and g[1,...,1] are contained in the other grid. For axes
# with less than 2 points, this reduces to min_pt and max_pt,
# and the corresponding indices in the other check point are
# set to 0.
minmax_contained = (
other.approx_contains(self.min_pt, atol=atol) and
other.approx_contains(self.max_pt, atol=atol))
check_idx = np.zeros(self.ndim, dtype=int)
check_idx[np.array(self.shape) >= 3] = 1
checkpt_contained = other.approx_contains(self[tuple(check_idx)],
atol=atol)
return minmax_contained and checkpt_contained
else:
# Array version of the fuzzy subgrid test, about 3 times faster
# than the loop version.
for vec_o, vec_s in zip(other.coord_vectors, self.coord_vectors):
# Create array of differences of all entries in vec_o and
# vec_s. If there is no almost zero entry in each row,
# return False.
vec_o_mg, vec_s_mg = sparse_meshgrid(vec_o, vec_s)
if not np.all(np.any(np.isclose(vec_s_mg, vec_o_mg, atol=atol),
axis=0)):
return False
return True
def insert(self, index, *grids):
"""Return a copy with ``grids`` inserted before ``index``.
The given grids are inserted (as a block) into ``self``, yielding
a new grid whose number of dimensions is the sum of the numbers of
dimensions of all involved grids.
Note that no changes are made in-place.
Parameters
----------
index : int
The index of the dimension before which ``grids`` are to
be inserted. Negative indices count backwards from
``self.ndim``.
grid1, ..., gridN : `RectGrid`
The grids to be inserted into ``self``.
Returns
-------
newgrid : `RectGrid`
The enlarged grid.
Examples
--------
>>> g1 = RectGrid([0, 1], [-1, 0, 2])
>>> g2 = RectGrid([1], [-6, 15])
>>> g1.insert(1, g2)
RectGrid(
[ 0., 1.],
[ 1.],
[ -6., 15.],
[-1., 0., 2.]
)
>>> g1.insert(1, g2, g2)
RectGrid(
[ 0., 1.],
[ 1.],
[ -6., 15.],
[ 1.],
[ -6., 15.],
[-1., 0., 2.]
)
See Also
--------
append
"""
index, index_in = safe_int_conv(index), index
if not -self.ndim <= index <= self.ndim:
raise IndexError('index {0} outside the valid range -{1} ... {1}'
''.format(index_in, self.ndim))
if index < 0:
index += self.ndim
if len(grids) == 0:
# Copy of `self`
return RectGrid(*self.coord_vectors)
elif len(grids) == 1:
# Insert single grid
grid = grids[0]
if not isinstance(grid, RectGrid):
raise TypeError('{!r} is not a `RectGrid` instance'
''.format(grid))
new_vecs = (self.coord_vectors[:index] + grid.coord_vectors +
self.coord_vectors[index:])
return RectGrid(*new_vecs)
else:
# Recursively insert first grid and the remaining into the result
return self.insert(index, grids[0]).insert(
index + grids[0].ndim, *(grids[1:]))
def append(self, *grids):
"""Insert ``grids`` at the end as a block.
Parameters
----------
grid1, ..., gridN : `RectGrid`
The grids to be appended to ``self``.
Returns
-------
newgrid : `RectGrid`
The enlarged grid.
Examples
--------
>>> g1 = RectGrid([0, 1], [-1, 0, 2])
>>> g2 = RectGrid([1], [-6, 15])
>>> g1.append(g2)
RectGrid(
[ 0., 1.],
[-1., 0., 2.],
[ 1.],
[ -6., 15.]
)
>>> g1.append(g2, g2)
RectGrid(
[ 0., 1.],
[-1., 0., 2.],
[ 1.],
[ -6., 15.],
[ 1.],
[ -6., 15.]
)
See Also
--------
insert
"""
return self.insert(self.ndim, *grids)
def squeeze(self, axis=None):
"""Return the grid with removed degenerate (length 1) dimensions.
Parameters
----------
axis : None or index expression, optional
Subset of the axes to squeeze. Default: All axes.
Returns
-------
squeezed : `RectGrid`
Squeezed grid.
Examples
--------
>>> g = RectGrid([0, 1], [-1], [-1, 0, 2])
>>> g.squeeze()
RectGrid(
[ 0., 1.],
[-1., 0., 2.]
)
"""
if axis is None:
rng = range(self.ndim)
else:
rng = list(np.atleast_1d(np.arange(self.ndim)[axis]))
new_indcs = [i for i in range(self.ndim)
if i not in rng or self.nondegen_byaxis[i]]
coord_vecs = [self.coord_vectors[axis] for axis in new_indcs]
return RectGrid(*coord_vecs)
def points(self, order='C'):
"""All grid points in a single array.
Parameters
----------
order : {'C', 'F'}, optional
Axis ordering in the resulting point array.
Returns
-------
points : `numpy.ndarray`
The shape of the array is ``size x ndim``, i.e. the points
are stored as rows.
Examples
--------
>>> g = RectGrid([0, 1], [-1, 0, 2])
>>> g.points()
array([[ 0., -1.],
[ 0., 0.],
[ 0., 2.],
[ 1., -1.],
[ 1., 0.],
[ 1., 2.]])
>>> g.points(order='F')
array([[ 0., -1.],
[ 1., -1.],
[ 0., 0.],
[ 1., 0.],
[ 0., 2.],
[ 1., 2.]])
"""
if str(order).upper() not in ('C', 'F'):
raise ValueError('order {!r} not recognized'.format(order))
else:
order = str(order).upper()
axes = range(self.ndim) if order == 'C' else reversed(range(self.ndim))
shape = self.shape if order == 'C' else tuple(reversed(self.shape))
point_arr = np.empty((self.size, self.ndim))
for i, axis in enumerate(axes):
view = point_arr[:, axis].reshape(shape)
coord_shape = (1,) * i + (-1,) + (1,) * (self.ndim - i - 1)
view[:] = self.coord_vectors[axis].reshape(coord_shape)
return point_arr
def corner_grid(self):
"""Return a grid with only the corner points.
Returns
-------
cgrid : `RectGrid`
Grid with size 2 in non-degenerate dimensions and 1
in degenerate ones
Examples
--------
>>> g = RectGrid([0, 1], [-1, 0, 2])
>>> g.corner_grid()
uniform_grid([ 0., -1.], [ 1., 2.], (2, 2))
"""
minmax_vecs = []
for axis in range(self.ndim):
if self.shape[axis] == 1:
minmax_vecs.append(self.coord_vectors[axis][0])
else:
minmax_vecs.append((self.coord_vectors[axis][0],
self.coord_vectors[axis][-1]))
return RectGrid(*minmax_vecs)
def corners(self, order='C'):
"""Corner points of the grid in a single array.
Parameters
----------
order : {'C', 'F'}, optional
Axis ordering in the resulting point array
Returns
-------
corners : `numpy.ndarray`
The size of the array is 2^m x ndim, where m is the number
of non-degenerate axes, i.e. the corners are stored as rows.
Examples
--------
>>> g = RectGrid([0, 1], [-1, 0, 2])
>>> g.corners()
array([[ 0., -1.],
[ 0., 2.],
[ 1., -1.],
[ 1., 2.]])
>>> g.corners(order='F')
array([[ 0., -1.],
[ 1., -1.],
[ 0., 2.],
[ 1., 2.]])
"""
return self.corner_grid().points(order=order)
@property
def meshgrid(self):
"""A grid suitable for function evaluation.
Returns
-------
meshgrid : tuple of `numpy.ndarray`'s
Function evaluation grid with ``ndim`` axes
See Also
--------
numpy.meshgrid
Coordinate matrices from coordinate vectors.
We use ``indexing='ij'`` and ``copy=True``
Examples
--------
>>> g = RectGrid([0, 1], [-1, 0, 2])
>>> x, y = g.meshgrid
>>> x
array([[ 0.],
[ 1.]])
>>> y
array([[-1., 0., 2.]])
Easy function evaluation via broadcasting:
>>> x ** 2 - y ** 2
array([[-1., 0., -4.],
[ 0., 1., -3.]])
"""
return sparse_meshgrid(*self.coord_vectors)
def __getitem__(self, indices):
"""Return ``self[indices]``.
Parameters
----------
indices : index expression
Object determining which parts of the grid to extract.
``None`` (new axis) and empty axes are not supported.
Examples
--------
Indexing with integers along all axes produces an array (a point):
>>> g = RectGrid([-1, 0, 3], [2, 4, 5], [5], [2, 4, 7])
>>> g[0, 0, 0, 0]
array([-1., 2., 5., 2.])
Otherwise, a new RectGrid is returned:
>>> g[:, 0, 0, 0]
RectGrid(
[-1., 0., 3.],
[ 2.],
[ 5.],
[ 2.]
)
>>> g[0, ..., 1:]
RectGrid(
[-1.],
[ 2., 4., 5.],
[ 5.],
[ 4., 7.]
)
>>> g[::2, ..., ::2]
RectGrid(
[-1., 3.],
[ 2., 4., 5.],
[ 5.],
[ 2., 7.]
)
Too few indices are filled up with an ellipsis from the right:
>>> g[0]
RectGrid(
[-1.],
[ 2., 4., 5.],
[ 5.],
[ 2., 4., 7.]
)
>>> g[0] == g[0, :, :, :] == g[0, ...]
True
"""
if isinstance(indices, list):
if indices:
new_coord_vecs = [self.coord_vectors[0][indices]]
new_coord_vecs += self.coord_vectors[1:]
else:
new_coord_vecs = []
return RectGrid(*new_coord_vecs)
indices = normalized_index_expression(indices, self.shape,
int_to_slice=False)
# If all indices are integers, return an array (a point). Otherwise,
# create a new grid.
if all(np.isscalar(idx) for idx in indices):
return np.fromiter(
(v[int(idx)] for idx, v in zip(indices, self.coord_vectors)),
dtype=float)
else:
new_coord_vecs = [vec[idx]
for idx, vec in zip(indices, self.coord_vectors)]
return RectGrid(*new_coord_vecs)
def __array__(self, dtype=None):
"""Used with ``numpy``. Returns `points`.
This allows usage of RectGrid with some numpy functions.
Parameters
----------
dtype : `numpy.dtype`
The Numpy data type of the result array. ``None`` means `float`.
Examples
--------
>>> g = RectGrid([0, 1], [-2, 0, 2])
Convert to an array:
>>> np.asarray(g)
array([[ 0., -2.],
[ 0., 0.],
[ 0., 2.],
[ 1., -2.],
[ 1., 0.],
[ 1., 2.]])
Calculate the midpoint:
>>> np.mean(g, axis=0)
array([ 0.5, 0. ])
"""
return self.points().astype(dtype)
def __repr__(self):
"""Return ``repr(self)``."""
if self.is_uniform:
ctor = 'uniform_grid'
posargs = [self.min_pt, self.max_pt, self.shape]
posmod = [array_str, array_str, '']
with npy_printoptions(precision=4):
inner_str = signature_string(posargs, [], mod=[posmod, ''])
return '{}({})'.format(ctor, inner_str)
else:
ctor = self.__class__.__name__
posargs = self.coord_vectors
posmod = array_str
inner_str = signature_string(posargs, [], sep=[',\n', ', ', ', '],
mod=[posmod, ''])
return '{}(\n{}\n)'.format(ctor, indent(inner_str))
__str__ = __repr__
def uniform_grid_fromintv(intv_prod, shape, nodes_on_bdry=True):
"""Return a grid from sampling an interval product uniformly.
The resulting grid will by default include ``intv_prod.min_pt`` and
``intv_prod.max_pt`` as grid points. If you want a subdivision into
equally sized cells with grid points in the middle, use
`uniform_partition` instead.
Parameters
----------
intv_prod : `IntervalProd`
Set to be sampled.
shape : int or sequence of ints
Number of nodes per axis. Entries corresponding to degenerate axes
must be equal to 1.
nodes_on_bdry : bool or sequence, optional
If a sequence is provided, it determines per axis whether to
place the last grid point on the boundary (``True``) or shift it
by half a cell size into the interior (``False``). In each axis,
an entry may consist in a single bool or a 2-tuple of
bool. In the latter case, the first tuple entry decides for
the left, the second for the right boundary. The length of the
sequence must be ``array.ndim``.
A single boolean is interpreted as a global choice for all
boundaries.
Returns
-------
sampling : `RectGrid`
Uniform sampling grid for the interval product.
Examples
--------
>>> rbox = odl.IntervalProd([-1.5, 2], [-0.5, 3])
>>> grid = uniform_grid_fromintv(rbox, (3, 3))
>>> grid.coord_vectors
(array([-1.5, -1. , -0.5]), array([ 2. , 2.5, 3. ]))
To have the nodes in the "middle", use ``nodes_on_bdry=False``:
>>> grid = uniform_grid_fromintv(rbox, (2, 2), nodes_on_bdry=False)
>>> grid.coord_vectors
(array([-1.25, -0.75]), array([ 2.25, 2.75]))
See Also
--------
uniform_grid : Create a uniform grid directly.
odl.discr.partition.uniform_partition_fromintv :
divide interval product into equally sized subsets
"""
if not isinstance(intv_prod, IntervalProd):
raise TypeError('{!r} is not an `IntervalProd` instance'
''.format(intv_prod))
if (np.any(np.isinf(intv_prod.min_pt)) or
np.any(np.isinf(intv_prod.max_pt))):
raise ValueError('`intv_prod` must be finite, got {!r}'
''.format('intv_prod'))
shape = normalized_scalar_param_list(shape, intv_prod.ndim, safe_int_conv)
if np.shape(nodes_on_bdry) == ():
nodes_on_bdry = ([(bool(nodes_on_bdry), bool(nodes_on_bdry))] *
intv_prod.ndim)
elif intv_prod.ndim == 1 and len(nodes_on_bdry) == 2:
nodes_on_bdry = [nodes_on_bdry]
elif len(nodes_on_bdry) != intv_prod.ndim:
raise ValueError('`nodes_on_bdry` has length {}, expected {}'
''.format(len(nodes_on_bdry), intv_prod.ndim))
else:
shape = tuple(int(n) for n in shape)
# We need to determine the placement of the grid minimum and maximum
# points based on the choices in nodes_on_bdry. If in a given axis,
# and for a given side (left or right), the entry is True, the node lies
# on the boundary, so this coordinate can simply be taken as-is.
#
# Otherwise, the following conditions must be met:
#
# 1. The node should be half a stride s away from the boundary
# 2. Adding or subtracting (n-1)*s should give the other extremal node.
#
# If both nodes are to be shifted half a stride inside,
# the second condition yields
# a + s/2 + (n-1)*s = b - s/2 => s = (b - a) / n,
# hence the extremal grid points are
# gmin = a + s/2 = a + (b - a) / (2 * n),
# gmax = b - s/2 = b - (b - a) / (2 * n).
#
# In the case where one node, say the rightmost, lies on the boundary,
# the condition 2. reads as
# a + s/2 + (n-1)*s = b => s = (b - a) / (n - 1/2),
# thus
# gmin = a + (b - a) / (2 * n - 1).
gmin, gmax = [], []
for n, xmin, xmax, on_bdry in zip(shape, intv_prod.min_pt,
intv_prod.max_pt, nodes_on_bdry):
# Unpack the tuple if possible, else use bool globally for this axis
try:
bdry_l, bdry_r = on_bdry
except TypeError:
bdry_l = bdry_r = on_bdry
if bdry_l and bdry_r:
gmin.append(xmin)
gmax.append(xmax)
elif bdry_l and not bdry_r:
gmin.append(xmin)
gmax.append(xmax - (xmax - xmin) / (2 * n - 1))
elif not bdry_l and bdry_r:
gmin.append(xmin + (xmax - xmin) / (2 * n - 1))
gmax.append(xmax)
else:
gmin.append(xmin + (xmax - xmin) / (2 * n))
gmax.append(xmax - (xmax - xmin) / (2 * n))
# Create the grid
coord_vecs = [np.linspace(mi, ma, num, endpoint=True, dtype=np.float64)
for mi, ma, num in zip(gmin, gmax, shape)]
return RectGrid(*coord_vecs)
def uniform_grid(min_pt, max_pt, shape, nodes_on_bdry=True):
"""Return a grid from sampling an implicit interval product uniformly.
Parameters
----------
min_pt : float or sequence of float
Vectors of lower ends of the intervals in the product.
max_pt : float or sequence of float
Vectors of upper ends of the intervals in the product.
shape : int or sequence of ints
Number of nodes per axis. Entries corresponding to degenerate axes
must be equal to 1.
nodes_on_bdry : bool or sequence, optional
If a sequence is provided, it determines per axis whether to
place the last grid point on the boundary (``True``) or shift it
by half a cell size into the interior (``False``). In each axis,
an entry may consist in a single bool or a 2-tuple of
bool. In the latter case, the first tuple entry decides for
the left, the second for the right boundary. The length of the
sequence must be ``array.ndim``.
A single boolean is interpreted as a global choice for all
boundaries.
Returns
-------
uniform_grid : `RectGrid`
The resulting uniform grid.
See Also
--------
uniform_grid_fromintv :
sample a given interval product
odl.discr.partition.uniform_partition :
divide implicitly defined interval product into equally
sized subsets
Examples
--------
By default, the min/max points are included in the grid:
>>> grid = odl.uniform_grid([-1.5, 2], [-0.5, 3], (3, 3))
>>> grid.coord_vectors
(array([-1.5, -1. , -0.5]), array([ 2. , 2.5, 3. ]))
If ``shape`` is supposed to refer to small subvolumes, and the grid
should be their centers, use the option ``nodes_on_bdry=False``:
>>> grid = odl.uniform_grid([-1.5, 2], [-0.5, 3], (2, 2),
... nodes_on_bdry=False)
>>> grid.coord_vectors
(array([-1.25, -0.75]), array([ 2.25, 2.75]))
In 1D, we don't need sequences:
>>> grid = odl.uniform_grid(0, 1, 3)
>>> grid.coord_vectors
(array([ 0. , 0.5, 1. ]),)
"""
return uniform_grid_fromintv(IntervalProd(min_pt, max_pt), shape,
nodes_on_bdry=nodes_on_bdry)
if __name__ == '__main__':
from odl.util.testutils import run_doctests
run_doctests()
| mpl-2.0 | afacb0839a8feabcf87160072319b130 | 29.939614 | 79 | 0.48721 | 3.895286 | false | false | false | false |
odlgroup/odl | examples/tomo/anisotropic_voxels.py | 2 | 1845 | """Example for ray transform with 3d parallel beam and anisotropic voxels.
Anisotropic voxels are supported in ASTRA v1.8 and upwards; earlier versions
will trigger an error.
"""
import numpy as np
import odl
# Reconstruction space: discretized functions on the cube
# [-20, 20]^3 with 300 samples in x and y, and 100 samples in z direction.
reco_space = odl.uniform_discr(
min_pt=[-20, -20, -20], max_pt=[20, 20, 20], shape=[300, 300, 100],
dtype='float32')
# Make a 3d single-axis parallel beam geometry with flat detector
# Angles: uniformly spaced, n = 180, min = 0, max = pi
angle_partition = odl.uniform_partition(0, np.pi, 180)
# Detector: uniformly sampled, n = (500, 500), min = (-30, -30), max = (30, 30)
detector_partition = odl.uniform_partition([-30, -30], [30, 30], [500, 500])
geometry = odl.tomo.Parallel3dAxisGeometry(angle_partition, detector_partition)
# Ray transform (= forward projection).
ray_trafo = odl.tomo.RayTransform(reco_space, geometry)
# Create a discrete Shepp-Logan phantom (modified version)
phantom = odl.phantom.shepp_logan(reco_space, modified=True)
# Create projection data by calling the ray transform on the phantom
proj_data = ray_trafo(phantom)
# Back-projection can be done by simply calling the adjoint operator on the
# projection data (or any element in the projection space).
backproj = ray_trafo.adjoint(proj_data)
# Show the slice y=0 of phantom and backprojection, as well as a projection
# image at theta=0 and a sinogram at v=0 (middle detector row)
phantom.show(coords=[None, 0, None], title='Phantom, Middle Y Slice')
backproj.show(coords=[None, 0, None], title='Back-projection, Middle Y Slice')
proj_data.show(coords=[0, None, None], title=r'Projection at theta = 0')
proj_data.show(coords=[None, None, 0], title='Sinogram, Middle Slice',
force_show=True)
| mpl-2.0 | 14527664ff9f72736a086e430d91ee4b | 42.928571 | 79 | 0.726829 | 3.121827 | false | false | false | false |
odlgroup/odl | odl/solvers/functional/example_funcs.py | 2 | 5614 | # Copyright 2014-2019 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""Example functionals used in optimization."""
from __future__ import print_function, division, absolute_import
import numpy as np
from odl.solvers.functional.functional import Functional
from odl.operator import Operator, MatrixOperator
from odl.space.base_tensors import TensorSpace
__all__ = ('RosenbrockFunctional',)
class RosenbrockFunctional(Functional):
r"""The well-known Rosenbrock function on ``R^n``.
The `Rosenbrock function`_ is often used as a test problem in
smooth optimization.
Notes
-----
The functional is defined for :math:`x \\in \\mathbb{R}^n`,
:math:`n \\geq 2`, as
.. math::
\sum_{i=1}^{n - 1} c (x_{i+1} - x_i^2)^2 + (1 - x_i)^2,
where :math:`c` is a constant, usually set to 100, which determines how
"ill-behaved" the function should be.
The global minimum lies at :math:`x = (1, \\dots, 1)`, independent
of :math:`c`.
There are two definitions of the n-dimensional Rosenbrock function found in
the literature. One is the product of 2-dimensional Rosenbrock functions,
which is not the one used here. This one extends the pattern of the 2d
Rosenbrock function so all dimensions depend on each other in sequence.
References
----------
.. _Rosenbrock function: https://en.wikipedia.org/wiki/Rosenbrock_function
"""
def __init__(self, space, scale=100.0):
"""Initialize a new instance.
Parameters
----------
space : `TensorSpace`
Domain of the functional.
scale : positive float, optional
The scale ``c`` in the functional determining how
"ill-behaved" the functional should be. Larger value means
worse behavior.
Examples
--------
Initialize and call the functional:
>>> r2 = odl.rn(2)
>>> functional = RosenbrockFunctional(r2)
>>> functional([1, 1]) # optimum is 0 at [1, 1]
0.0
>>> functional([0, 1])
101.0
The functional can also be used in higher dimensions:
>>> r5 = odl.rn(5)
>>> functional = RosenbrockFunctional(r5)
>>> functional([1, 1, 1, 1, 1])
0.0
We can change how much the function is ill-behaved via ``scale``:
>>> r2 = odl.rn(2)
>>> functional = RosenbrockFunctional(r2, scale=2)
>>> functional([1, 1]) # optimum is still 0 at [1, 1]
0.0
>>> functional([0, 1]) # much lower variation
3.0
"""
self.scale = float(scale)
if not isinstance(space, TensorSpace):
raise ValueError('`space` must be a `TensorSpace` instance, '
'got {!r}'.format(space))
if space.ndim > 1:
raise ValueError('`space` cannot have more than 1 dimension')
if space.size < 2:
raise ValueError('`space.size` must be >= 2, got {}'
''.format(space.size))
super(RosenbrockFunctional, self).__init__(
space, linear=False, grad_lipschitz=np.inf)
def _call(self, x):
"""Return ``self(x)``."""
result = 0
for i in range(0, self.domain.size - 1):
result += (self.scale * (x[i + 1] - x[i] ** 2) ** 2 +
(x[i] - 1) ** 2)
return result
@property
def gradient(self):
"""Gradient operator of the Rosenbrock functional."""
functional = self
c = self.scale
class RosenbrockGradient(Operator):
"""The gradient operator of the Rosenbrock functional."""
def __init__(self):
"""Initialize a new instance."""
super(RosenbrockGradient, self).__init__(
functional.domain, functional.domain, linear=False)
def _call(self, x, out):
"""Apply the gradient operator to the given point."""
for i in range(1, self.domain.size - 1):
out[i] = (2 * c * (x[i] - x[i - 1]**2) -
4 * c * (x[i + 1] - x[i]**2) * x[i] -
2 * (1 - x[i]))
out[0] = (-4 * c * (x[1] - x[0] ** 2) * x[0] +
2 * (x[0] - 1))
out[-1] = 2 * c * (x[-1] - x[-2] ** 2)
def derivative(self, x):
"""The derivative of the gradient.
This is also known as the Hessian.
"""
# TODO: Implement optimized version of this that does not need
# a matrix.
shape = (functional.domain.size, functional.domain.size)
matrix = np.zeros(shape)
# Straightforward computation
for i in range(0, self.domain.size - 1):
matrix[i, i] = (2 * c + 2 + 12 * c * x[i] ** 2 -
4 * c * x[i + 1])
matrix[i + 1, i] = -4 * c * x[i]
matrix[i, i + 1] = -4 * c * x[i]
matrix[-1, -1] = 2 * c
matrix[0, 0] = 2 + 12 * c * x[0] ** 2 - 4 * c * x[1]
return MatrixOperator(matrix, self.domain, self.range)
return RosenbrockGradient()
if __name__ == '__main__':
from odl.util.testutils import run_doctests
run_doctests()
| mpl-2.0 | 32272d9bcb54af2fba177ec7e3b88dab | 33.654321 | 79 | 0.527966 | 3.777927 | false | false | false | false |
odlgroup/odl | odl/contrib/torch/test/test_operator.py | 2 | 6462 | # Copyright 2014-2019 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""Unit tests for the ODL-PyTorch integration."""
import numpy as np
import torch
from torch import nn
import odl
from odl.contrib import torch as odl_torch
from odl.util.testutils import all_almost_equal, simple_fixture
dtype = simple_fixture('dtype', ['float32', 'float64'])
device_params = ['cpu']
if torch.cuda.is_available():
device_params.append('cuda')
device = simple_fixture('device', device_params)
shape = simple_fixture('shape', [(3,), (2, 3), (2, 2, 3)])
def test_autograd_function_forward(dtype, device):
"""Test forward evaluation with operators as autograd functions."""
# Define ODL operator
matrix = np.random.rand(2, 3).astype(dtype)
odl_op = odl.MatrixOperator(matrix)
# Compute forward pass with both ODL and PyTorch
x_arr = np.ones(3, dtype=dtype)
x = torch.from_numpy(x_arr).to(device)
res = odl_torch.OperatorFunction.apply(odl_op, x)
res_arr = res.detach().cpu().numpy()
odl_res = odl_op(x_arr)
assert res_arr.dtype == dtype
assert all_almost_equal(res_arr, odl_res)
assert x.device.type == res.device.type == device
def test_autograd_function_backward(dtype, device):
"""Test backprop with operators/functionals as autograd functions."""
# Define ODL operator and cost functional
matrix = np.random.rand(2, 3).astype(dtype)
odl_op = odl.MatrixOperator(matrix)
odl_cost = odl.solvers.L2NormSquared(odl_op.range)
odl_functional = odl_cost * odl_op
# Define evaluation point and mark as `requires_grad` to enable
# backpropagation
x_arr = np.ones(3, dtype=dtype)
x = torch.from_numpy(x_arr).to(device)
x.requires_grad_(True)
# Compute forward pass
y = odl_torch.OperatorFunction.apply(odl_op, x)
res = odl_torch.OperatorFunction.apply(odl_cost, y)
# Populate gradients by backwards pass
res.backward()
grad = x.grad
grad_arr = grad.detach().cpu().numpy()
# Compute gradient with ODL
odl_grad = odl_functional.gradient(x_arr)
assert grad_arr.dtype == dtype
assert all_almost_equal(grad_arr, odl_grad)
assert x.device.type == grad.device.type == device
def test_module_forward(shape, device):
"""Test forward evaluation with operators as modules."""
# Define ODL operator and wrap as module
ndim = len(shape)
space = odl.uniform_discr([0] * ndim, shape, shape, dtype='float32')
odl_op = odl.ScalingOperator(space, 2)
op_mod = odl_torch.OperatorModule(odl_op)
# Input data
x_arr = np.ones(shape, dtype='float32')
# Test with 1 extra dim (minimum)
x = torch.from_numpy(x_arr).to(device)[None, ...]
x.requires_grad_(True)
res = op_mod(x)
res_arr = res.detach().cpu().numpy()
assert res_arr.shape == (1,) + odl_op.range.shape
assert all_almost_equal(
res_arr, np.asarray(odl_op(x_arr))[None, ...]
)
assert x.device.type == res.device.type == device
# Test with 2 extra dims
x = torch.from_numpy(x_arr).to(device)[None, None, ...]
x.requires_grad_(True)
res = op_mod(x)
res_arr = res.detach().cpu().numpy()
assert res_arr.shape == (1, 1) + odl_op.range.shape
assert all_almost_equal(
res_arr, np.asarray(odl_op(x_arr))[None, None, ...]
)
assert x.device.type == res.device.type == device
def test_module_forward_diff_shapes(device):
"""Test operator module with different shapes of input and output."""
# Define ODL operator and wrap as module
matrix = np.random.rand(2, 3).astype('float32')
odl_op = odl.MatrixOperator(matrix)
op_mod = odl_torch.OperatorModule(odl_op)
# Input data
x_arr = np.ones(3, dtype='float32')
# Test with 1 extra dim (minimum)
x = torch.from_numpy(x_arr).to(device)[None, ...]
x.requires_grad_(True)
res = op_mod(x)
res_arr = res.detach().cpu().numpy()
assert res_arr.shape == (1,) + odl_op.range.shape
assert all_almost_equal(
res_arr, np.asarray(odl_op(x_arr))[None, ...]
)
assert x.device.type == res.device.type == device
# Test with 2 extra dims
x = torch.from_numpy(x_arr).to(device)[None, None, ...]
x.requires_grad_(True)
res = op_mod(x)
res_arr = res.detach().cpu().numpy()
assert res_arr.shape == (1, 1) + odl_op.range.shape
assert all_almost_equal(
res_arr, np.asarray(odl_op(x_arr))[None, None, ...]
)
assert x.device.type == res.device.type == device
def test_module_backward(device):
"""Test backpropagation with operators as modules."""
# Define ODL operator and wrap as module
matrix = np.random.rand(2, 3).astype('float32')
odl_op = odl.MatrixOperator(matrix)
op_mod = odl_torch.OperatorModule(odl_op)
loss_fn = nn.MSELoss()
# Test with linear layers (1 extra dim)
layer_before = nn.Linear(3, 3)
layer_after = nn.Linear(2, 2)
model = nn.Sequential(layer_before, op_mod, layer_after).to(device)
x = torch.from_numpy(
np.ones(3, dtype='float32')
)[None, ...].to(device)
x.requires_grad_(True)
target = torch.from_numpy(
np.zeros(2, dtype='float32')
)[None, ...].to(device)
loss = loss_fn(model(x), target)
loss.backward()
assert all(p is not None for p in model.parameters())
assert x.grad.detach().cpu().abs().sum() != 0
assert x.device.type == loss.device.type == device
# Test with conv layers (2 extra dims)
layer_before = nn.Conv1d(1, 2, 2) # 1->2 channels
layer_after = nn.Conv1d(2, 1, 2) # 2->1 channels
model = nn.Sequential(layer_before, op_mod, layer_after).to(device)
# Input size 4 since initial convolution reduces by 1
x = torch.from_numpy(
np.ones(4, dtype='float32')
)[None, None, ...].to(device)
x.requires_grad_(True)
# Output size 1 since final convolution reduces by 1
target = torch.from_numpy(
np.zeros(1, dtype='float32')
)[None, None, ...].to(device)
loss = loss_fn(model(x), target)
loss.backward()
assert all(p is not None for p in model.parameters())
assert x.grad.detach().cpu().abs().sum() != 0
assert x.device.type == loss.device.type == device
if __name__ == '__main__':
odl.util.test_file(__file__)
| mpl-2.0 | 397b38113acf8b59a15e7d85c3c277ba | 32.832461 | 78 | 0.644073 | 3.178554 | false | true | false | false |
odlgroup/odl | examples/operator/convolution_operator.py | 2 | 1883 | """Create a convolution operator by wrapping a library."""
import odl
import scipy.signal
class Convolution(odl.Operator):
"""Operator calculating the convolution of a kernel with a function.
The operator inherits from ``odl.Operator`` to be able to be used with ODL.
"""
def __init__(self, kernel):
"""Initialize a convolution operator with a known kernel."""
# Store the kernel
self.kernel = kernel
# Initialize the Operator class by calling its __init__ method.
# This sets properties such as domain and range and allows the other
# operator convenience functions to work.
super(Convolution, self).__init__(
domain=kernel.space, range=kernel.space, linear=True)
def _call(self, x):
"""Implement calling the operator by calling scipy."""
return scipy.signal.fftconvolve(self.kernel, x, mode='same')
@property
def adjoint(self):
"""Implement ``self.adjoint``.
For a convolution operator, the adjoint is given by the convolution
with a kernel with flipped axes. In particular, if the kernel is
symmetric the operator is self-adjoint.
"""
return Convolution(self.kernel[::-1, ::-1])
# Define the space on which the problem should be solved
# Here the square [-1, 1] x [-1, 1] discretized on a 100x100 grid
space = odl.uniform_discr([-1, -1], [1, 1], [100, 100])
# Convolution kernel, a small centered rectangle
kernel = odl.phantom.cuboid(space, [-0.05, -0.05], [0.05, 0.05])
# Create convolution operator
A = Convolution(kernel)
# Create phantom (the "unknown" solution)
phantom = odl.phantom.shepp_logan(space, modified=True)
# Apply convolution to phantom to create data
g = A(phantom)
# Display the results using the show method
kernel.show('kernel')
phantom.show('phantom')
g.show('convolved phantom')
| mpl-2.0 | d59aebc19d45a60cef3cee50518d75ec | 30.915254 | 79 | 0.673394 | 3.858607 | false | false | false | false |
odlgroup/odl | odl/test/discr/discr_ops_test.py | 2 | 10401 | # Copyright 2014-2020 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""Unit tests for `discr_ops`."""
from __future__ import division
import numpy as np
import pytest
import odl
from odl.discr.discr_ops import _SUPPORTED_RESIZE_PAD_MODES
from odl.space.entry_points import tensor_space_impl
from odl.util import is_numeric_dtype, is_real_floating_dtype
from odl.util.testutils import dtype_tol, noise_element
# --- pytest fixtures --- #
paddings = list(_SUPPORTED_RESIZE_PAD_MODES)
paddings.remove('constant')
paddings.extend([('constant', 0), ('constant', 1)])
padding_ids = [" pad_mode='{}'-{} ".format(*p)
if isinstance(p, tuple)
else " pad_mode='{}' ".format(p)
for p in paddings]
@pytest.fixture(scope="module", ids=padding_ids, params=paddings)
def padding(request):
if isinstance(request.param, tuple):
pad_mode, pad_const = request.param
else:
pad_mode = request.param
pad_const = 0
return pad_mode, pad_const
# --- ResizingOperator tests --- #
def test_resizing_op_init(odl_tspace_impl, padding):
# Test if the different init patterns run
impl = odl_tspace_impl
pad_mode, pad_const = padding
space = odl.uniform_discr([0, -1], [1, 1], (10, 5), impl=impl)
res_space = odl.uniform_discr([0, -3], [2, 3], (20, 15), impl=impl)
odl.ResizingOperator(space, res_space)
odl.ResizingOperator(space, ran_shp=(20, 15))
odl.ResizingOperator(space, ran_shp=(20, 15), offset=(0, 5))
odl.ResizingOperator(space, ran_shp=(20, 15), pad_mode=pad_mode)
odl.ResizingOperator(space, ran_shp=(20, 15), pad_mode=pad_mode,
pad_const=pad_const)
odl.ResizingOperator(space, ran_shp=(20, 15),
discr_kwargs={'nodes_on_bdry': True})
def test_resizing_op_raise():
"""Validate error checking in ResizingOperator."""
# Domain not a uniformly discretized Lp
with pytest.raises(TypeError):
odl.ResizingOperator(odl.rn(5), ran_shp=(10,))
grid = odl.RectGrid([0, 2, 3])
part = odl.RectPartition(odl.IntervalProd(0, 3), grid)
tspace = odl.rn(3)
space = odl.DiscretizedSpace(part, tspace)
with pytest.raises(ValueError):
odl.ResizingOperator(space, ran_shp=(10,))
# Different cell sides in domain and range
space = odl.uniform_discr(0, 1, 10)
res_space = odl.uniform_discr(0, 1, 15)
with pytest.raises(ValueError):
odl.ResizingOperator(space, res_space)
# Non-integer multiple of cell sides used as shift (grid of the
# resized space shifted)
space = odl.uniform_discr(0, 1, 5)
res_space = odl.uniform_discr(-0.5, 1.5, 10)
with pytest.raises(ValueError):
odl.ResizingOperator(space, res_space)
# Need either range or ran_shp
with pytest.raises(ValueError):
odl.ResizingOperator(space)
# Offset cannot be combined with range
space = odl.uniform_discr([0, -1], [1, 1], (10, 5))
res_space = odl.uniform_discr([0, -3], [2, 3], (20, 15))
with pytest.raises(ValueError):
odl.ResizingOperator(space, res_space, offset=(0, 0))
# Bad pad_mode
with pytest.raises(ValueError):
odl.ResizingOperator(space, res_space, pad_mode='something')
def test_resizing_op_properties(odl_tspace_impl, padding):
impl = odl_tspace_impl
dtypes = [dt for dt in tensor_space_impl(impl).available_dtypes()
if is_numeric_dtype(dt)]
pad_mode, pad_const = padding
for dtype in dtypes:
# Explicit range
space = odl.uniform_discr([0, -1], [1, 1], (10, 5), dtype=dtype)
res_space = odl.uniform_discr([0, -3], [2, 3], (20, 15), dtype=dtype)
res_op = odl.ResizingOperator(space, res_space, pad_mode=pad_mode,
pad_const=pad_const)
assert res_op.domain == space
assert res_op.range == res_space
assert res_op.offset == (0, 5)
assert res_op.pad_mode == pad_mode
assert res_op.pad_const == pad_const
if pad_mode == 'constant' and pad_const != 0:
assert not res_op.is_linear
else:
assert res_op.is_linear
# Implicit range via ran_shp and offset
res_op = odl.ResizingOperator(space, ran_shp=(20, 15), offset=[0, 5],
pad_mode=pad_mode, pad_const=pad_const)
assert np.allclose(res_op.range.min_pt, res_space.min_pt)
assert np.allclose(res_op.range.max_pt, res_space.max_pt)
assert np.allclose(res_op.range.cell_sides, res_space.cell_sides)
assert res_op.range.dtype == res_space.dtype
assert res_op.offset == (0, 5)
assert res_op.pad_mode == pad_mode
assert res_op.pad_const == pad_const
if pad_mode == 'constant' and pad_const != 0:
assert not res_op.is_linear
else:
assert res_op.is_linear
def test_resizing_op_call(odl_tspace_impl):
impl = odl_tspace_impl
dtypes = [dt for dt in tensor_space_impl(impl).available_dtypes()
if is_numeric_dtype(dt)]
for dtype in dtypes:
# Minimal test since this operator only wraps resize_array
space = odl.uniform_discr(
[0, -1], [1, 1], (4, 5), dtype=dtype, impl=impl
)
res_space = odl.uniform_discr(
[0, -0.6], [2, 0.2], (8, 2), dtype=dtype, impl=impl
)
res_op = odl.ResizingOperator(space, res_space)
out = res_op(space.one())
true_res = np.zeros((8, 2), dtype=dtype)
true_res[:4, :] = 1
assert np.array_equal(out, true_res)
out = res_space.element()
res_op(space.one(), out=out)
assert np.array_equal(out, true_res)
# Test also mapping to default impl for other 'impl'
if impl != 'numpy':
space = odl.uniform_discr(
[0, -1], [1, 1], (4, 5), dtype=dtype, impl=impl
)
res_space = odl.uniform_discr(
[0, -0.6], [2, 0.2], (8, 2), dtype=dtype
)
res_op = odl.ResizingOperator(space, res_space)
out = res_op(space.one())
true_res = np.zeros((8, 2), dtype=dtype)
true_res[:4, :] = 1
assert np.array_equal(out, true_res)
out = res_space.element()
res_op(space.one(), out=out)
assert np.array_equal(out, true_res)
def test_resizing_op_deriv(padding):
pad_mode, pad_const = padding
space = odl.uniform_discr([0, -1], [1, 1], (4, 5))
res_space = odl.uniform_discr([0, -0.6], [2, 0.2], (8, 2))
res_op = odl.ResizingOperator(space, res_space, pad_mode=pad_mode,
pad_const=pad_const)
res_op_deriv = res_op.derivative(space.one())
if pad_mode == 'constant' and pad_const != 0:
# Only non-trivial case is constant padding with const != 0
assert res_op_deriv.pad_mode == 'constant'
assert res_op_deriv.pad_const == 0.0
else:
assert res_op_deriv is res_op
def test_resizing_op_inverse(padding, odl_tspace_impl):
impl = odl_tspace_impl
pad_mode, pad_const = padding
dtypes = [dt for dt in tensor_space_impl(impl).available_dtypes()
if is_numeric_dtype(dt)]
for dtype in dtypes:
space = odl.uniform_discr([0, -1], [1, 1], (4, 5), dtype=dtype,
impl=impl)
res_space = odl.uniform_discr([0, -1.4], [1.5, 1.4], (6, 7),
dtype=dtype, impl=impl)
res_op = odl.ResizingOperator(space, res_space, pad_mode=pad_mode,
pad_const=pad_const)
# Only left inverse if the operator extends in all axes
x = noise_element(space)
assert res_op.inverse(res_op(x)) == x
def test_resizing_op_adjoint(padding, odl_tspace_impl):
impl = odl_tspace_impl
pad_mode, pad_const = padding
dtypes = [dt for dt in tensor_space_impl(impl).available_dtypes()
if is_real_floating_dtype(dt)]
for dtype in dtypes:
space = odl.uniform_discr([0, -1], [1, 1], (4, 5), dtype=dtype,
impl=impl)
res_space = odl.uniform_discr([0, -1.4], [1.5, 1.4], (6, 7),
dtype=dtype, impl=impl)
res_op = odl.ResizingOperator(space, res_space, pad_mode=pad_mode,
pad_const=pad_const)
if pad_const != 0.0:
with pytest.raises(NotImplementedError):
res_op.adjoint
return
elem = noise_element(space)
res_elem = noise_element(res_space)
inner1 = res_op(elem).inner(res_elem)
inner2 = elem.inner(res_op.adjoint(res_elem))
assert inner1 == pytest.approx(
inner2, rel=space.size * dtype_tol(dtype)
)
def test_resizing_op_mixed_uni_nonuni():
"""Check if resizing along uniform axes in mixed discretizations works."""
nonuni_part = odl.nonuniform_partition([0, 1, 4])
uni_part = odl.uniform_partition(-1, 1, 4)
part = uni_part.append(nonuni_part, uni_part, nonuni_part)
tspace = odl.rn(part.shape)
space = odl.DiscretizedSpace(part, tspace)
# Keep non-uniform axes fixed
res_op = odl.ResizingOperator(space, ran_shp=(6, 3, 6, 3))
assert res_op.axes == (0, 2)
assert res_op.offset == (1, 0, 1, 0)
# Evaluation test with a simpler case
part = uni_part.append(nonuni_part)
tspace = odl.rn(part.shape)
space = odl.DiscretizedSpace(part, tspace)
res_op = odl.ResizingOperator(space, ran_shp=(6, 3))
result = res_op(space.one())
true_result = [[0, 0, 0],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]]
assert np.array_equal(result, true_result)
# Test adjoint
elem = noise_element(space)
res_elem = noise_element(res_op.range)
inner1 = res_op(elem).inner(res_elem)
inner2 = elem.inner(res_op.adjoint(res_elem))
assert inner1 == pytest.approx(inner2)
if __name__ == '__main__':
odl.util.test_file(__file__)
| mpl-2.0 | 8bf4c5f4b7e092fe3df373a437db35ef | 34.498294 | 78 | 0.584367 | 3.226117 | false | true | false | false |
odlgroup/odl | odl/tomo/geometry/detector.py | 2 | 51757 | # Copyright 2014-2019 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""Detectors for tomographic imaging."""
from __future__ import absolute_import, division, print_function
from builtins import object
import numpy as np
from odl.discr import RectPartition
from odl.tomo.util import is_inside_bounds, perpendicular_vector
from odl.tomo.util.utility import rotation_matrix_from_to
from odl.util import array_str, indent, signature_string
__all__ = ('Detector',
'Flat1dDetector', 'Flat2dDetector', 'CircularDetector',
'CylindricalDetector', 'SphericalDetector')
class Detector(object):
"""Abstract detector class.
A detector is described by
* a set of parameters for surface parametrization (including sampling),
* a function mapping a surface parameter to the location of a detector
point relative to its reference point,
* optionally a surface measure function.
Most implementations implicitly assume that an N-dimensional detector
is embedded in an (N+1)-dimensional space, but subclasses can override
this behavior.
"""
def __init__(self, partition, space_ndim=None, check_bounds=True):
"""Initialize a new instance.
Parameters
----------
partition : `RectPartition`
Partition of the detector parameter set (pixelization).
It determines dimension, parameter range and discretization.
space_ndim : positive int, optional
Number of dimensions of the embedding space.
Default: ``partition.ndim + 1``
check_bounds : bool, optional
If ``True``, methods computing vectors check input arguments.
Checks are vectorized and add only a small overhead.
"""
if not isinstance(partition, RectPartition):
raise TypeError('`partition` {!r} is not a RectPartition instance'
''.format(partition))
if space_ndim is None:
self.__space_ndim = partition.ndim + 1
else:
self.__space_ndim = int(space_ndim)
if self.space_ndim <= 0:
raise ValueError('`space_ndim` must be postitive, got {}'
''.format(space_ndim))
self.__partition = partition
self.__check_bounds = bool(check_bounds)
@property
def partition(self):
"""Partition of the detector parameter set into subsets."""
return self.__partition
@property
def check_bounds(self):
"""If ``True``, methods computing vectors check input arguments.
For very large input arrays, these checks can introduce significant
overhead, but the overhead is kept low by vectorization.
"""
return self.__check_bounds
@property
def ndim(self):
"""Number of dimensions of the parameters (= surface dimension)."""
return self.partition.ndim
@property
def space_ndim(self):
"""Number of dimensions of the embedding space.
This default (``space_ndim = ndim + 1``) can be overridden by
subclasses.
"""
return self.__space_ndim
@property
def params(self):
"""Surface parameter set of this detector."""
return self.partition.set
@property
def grid(self):
"""Sampling grid of the parameters."""
return self.partition.grid
@property
def shape(self):
"""Number of subsets (pixels) of the detector per axis."""
return self.partition.shape
@property
def size(self):
"""Total number of pixels."""
return self.partition.size
def surface(self, param):
"""Parametrization of the detector reference surface.
Parameters
----------
param : `array-like` or sequence
Parameter value(s) at which to evaluate.
Returns
-------
point : `numpy.ndarray`
Vector(s) pointing from the origin to the detector surface
point at ``param``.
"""
raise NotImplementedError('abstract method')
def surface_deriv(self, param):
"""Partial derivative(s) of the surface parametrization.
Parameters
----------
param : `array-like` or sequence
Parameter value(s) at which to evaluate. If ``ndim >= 2``,
a sequence of length `ndim` must be provided.
Returns
-------
deriv : `numpy.ndarray`
Array of vectors representing the surface derivative(s) at
``param``.
"""
raise NotImplementedError('abstract method')
def surface_normal(self, param):
"""Unit vector perpendicular to the detector surface at ``param``.
The orientation is chosen as follows:
- In 2D, the system ``(normal, tangent)`` should be
right-handed.
- In 3D, the system ``(tangent[0], tangent[1], normal)``
should be right-handed.
Here, ``tangent`` is the return value of `surface_deriv` at
``param``.
Parameters
----------
param : `array-like` or sequence
Parameter value(s) at which to evaluate. If ``ndim >= 2``,
a sequence of length `ndim` must be provided.
Returns
-------
normal : `numpy.ndarray`
Unit vector(s) perpendicular to the detector surface at
``param``.
If ``param`` is a single parameter, an array of shape
``(space_ndim,)`` representing a single vector is returned.
Otherwise the shape of the returned array is
- ``param.shape + (space_ndim,)`` if `ndim` is 1,
- ``param.shape[:-1] + (space_ndim,)`` otherwise.
"""
# Checking is done by `surface_deriv`
if self.ndim == 1 and self.space_ndim == 2:
return -perpendicular_vector(self.surface_deriv(param))
elif self.ndim == 2 and self.space_ndim == 3:
deriv = self.surface_deriv(param)
if deriv.ndim > 2:
# Vectorized, need to reshape (N, 2, 3) to (2, N, 3)
deriv = np.moveaxis(deriv, -2, 0)
normal = np.cross(*deriv, axis=-1)
normal /= np.linalg.norm(normal, axis=-1, keepdims=True)
return normal
else:
raise NotImplementedError(
'no default implementation of `surface_normal` available '
'for `ndim = {}` and `space_ndim = {}`'
''.format(self.ndim, self.space_ndim))
def surface_measure(self, param):
"""Density function of the surface measure.
This is the default implementation relying on the `surface_deriv`
method. For a detector with `ndim` equal to 1, the density is given
by the `Arc length`_, for a surface with `ndim` 2 in a 3D space, it
is the length of the cross product of the partial derivatives of the
parametrization, see Wikipedia's `Surface area`_ article.
Parameters
----------
param : `array-like` or sequence
Parameter value(s) at which to evaluate. If ``ndim >= 2``,
a sequence of length `ndim` must be provided.
Returns
-------
measure : float or `numpy.ndarray`
The density value(s) at the given parameter(s). If a single
parameter is provided, a float is returned. Otherwise, an
array is returned with shape
- ``param.shape`` if `ndim` is 1,
- ``broadcast(*param).shape`` otherwise.
References
----------
.. _Arc length:
https://en.wikipedia.org/wiki/Curve#Lengths_of_curves
.. _Surface area:
https://en.wikipedia.org/wiki/Surface_area
"""
# Checking is done by `surface_deriv`
if self.ndim == 1:
scalar_out = (np.shape(param) == ())
measure = np.linalg.norm(self.surface_deriv(param), axis=-1)
if scalar_out:
measure = float(measure)
return measure
elif self.ndim == 2 and self.space_ndim == 3:
scalar_out = (np.shape(param) == (2,))
deriv = self.surface_deriv(param)
if deriv.ndim > 2:
# Vectorized, need to reshape (N, 2, 3) to (2, N, 3)
deriv = np.moveaxis(deriv, -2, 0)
cross = np.cross(*deriv, axis=-1)
measure = np.linalg.norm(cross, axis=-1)
if scalar_out:
measure = float(measure)
return measure
else:
raise NotImplementedError(
'no default implementation of `surface_measure` available '
'for `ndim={}` and `space_ndim={}`'
''.format(self.ndim, self.space_ndim))
class Flat1dDetector(Detector):
"""A 1d line detector aligned with a given axis in 2D space."""
def __init__(self, partition, axis, check_bounds=True):
"""Initialize a new instance.
Parameters
----------
partition : 1-dim. `RectPartition`
Partition of the parameter interval, corresponding to the
line elements.
axis : `array-like`, shape ``(2,)``
Fixed axis along which this detector is aligned.
check_bounds : bool, optional
If ``True``, methods computing vectors check input arguments.
Checks are vectorized and add only a small overhead.
Examples
--------
>>> part = odl.uniform_partition(0, 1, 10)
>>> det = Flat1dDetector(part, axis=[1, 0])
>>> det.axis
array([ 1., 0.])
>>> np.allclose(det.surface_normal(0), [0, -1])
True
"""
super(Flat1dDetector, self).__init__(partition, 2, check_bounds)
if self.ndim != 1:
raise ValueError('`partition` must be 1-dimensional, got ndim={}'
''.format(self.ndim))
if np.linalg.norm(axis) == 0:
raise ValueError('`axis` cannot be zero')
self.__axis = np.asarray(axis) / np.linalg.norm(axis)
@property
def axis(self):
"""Fixed axis along which this detector is aligned."""
return self.__axis
def surface(self, param):
"""Return the detector surface point corresponding to ``param``.
For parameter value ``p``, the surface point is given by ::
surf = p * axis
Parameters
----------
param : float or `array-like`
Parameter value(s) at which to evaluate.
Returns
-------
point : `numpy.ndarray`
Vector(s) pointing from the origin to the detector surface
point at ``param``.
If ``param`` is a single parameter, the returned array has
shape ``(2,)``, otherwise ``param.shape + (2,)``.
Examples
--------
The method works with a single parameter, resulting in a single
vector:
>>> part = odl.uniform_partition(0, 1, 10)
>>> det = Flat1dDetector(part, axis=[1, 0])
>>> det.surface(0)
array([ 0., 0.])
>>> det.surface(1)
array([ 1., 0.])
It is also vectorized, i.e., it can be called with multiple
parameters at once (or an n-dimensional array of parameters):
>>> det.surface([0, 1])
array([[ 0., 0.],
[ 1., 0.]])
>>> det.surface(np.zeros((4, 5))).shape
(4, 5, 2)
"""
squeeze_out = (np.shape(param) == ())
param = np.array(param, dtype=float, copy=False, ndmin=1)
if self.check_bounds and not is_inside_bounds(param, self.params):
raise ValueError('`param` {} not in the valid range '
'{}'.format(param, self.params))
# Create outer product of `params` and `axis`, resulting in shape
# params.shape + axis.shape
surf = np.multiply.outer(param, self.axis)
if squeeze_out:
surf = surf.squeeze()
return surf
def surface_deriv(self, param):
"""Return the surface derivative at ``param``.
This is a constant function evaluating to `axis` everywhere.
Parameters
----------
param : float or `array-like`
Parameter value(s) at which to evaluate.
Returns
-------
deriv : `numpy.ndarray`
Array representing the derivative vector(s) at ``param``.
If ``param`` is a single parameter, the returned array has
shape ``(2,)``, otherwise ``param.shape + (2,)``.
Examples
--------
The method works with a single parameter, resulting in a single
vector:
>>> part = odl.uniform_partition(0, 1, 10)
>>> det = Flat1dDetector(part, axis=[1, 0])
>>> det.surface_deriv(0)
array([ 1., 0.])
>>> det.surface_deriv(1)
array([ 1., 0.])
It is also vectorized, i.e., it can be called with multiple
parameters at once (or an n-dimensional array of parameters):
>>> det.surface_deriv([0, 1])
array([[ 1., 0.],
[ 1., 0.]])
>>> det.surface_deriv(np.zeros((4, 5))).shape
(4, 5, 2)
"""
squeeze_out = (np.shape(param) == ())
param = np.array(param, dtype=float, copy=False, ndmin=1)
if self.check_bounds and not is_inside_bounds(param, self.params):
raise ValueError('`param` {} not in the valid range '
'{}'.format(param, self.params))
if squeeze_out:
return self.axis
else:
# Produce array of shape `param.shape + (ndim,)` by broadcasting
bcast_slc = (None,) * param.ndim + (slice(None),)
return np.broadcast_to(
self.axis[bcast_slc], param.shape + self.axis.shape)
def __repr__(self):
"""Return ``repr(self)``."""
posargs = [self.partition]
optargs = [('axis', array_str(self.axis), '')]
inner_str = signature_string(posargs, optargs, sep=',\n')
return '{}(\n{}\n)'.format(self.__class__.__name__, indent(inner_str))
def __str__(self):
"""Return ``str(self)``."""
return repr(self)
class Flat2dDetector(Detector):
"""A 2D flat panel detector aligned two given axes in 3D space."""
def __init__(self, partition, axes, check_bounds=True):
"""Initialize a new instance.
Parameters
----------
partition : 2-dim. `RectPartition`
Partition of the parameter rectangle, corresponding to the
pixels.
axes : sequence of `array-like`'s
Fixed pair of of unit vectors with which the detector is aligned.
The vectors must have shape ``(3,)`` and be linearly
independent.
check_bounds : bool, optional
If ``True``, methods computing vectors check input arguments.
Checks are vectorized and add only a small overhead.
Examples
--------
>>> part = odl.uniform_partition([0, 0], [1, 1], (10, 10))
>>> det = Flat2dDetector(part, axes=[(1, 0, 0), (0, 0, 1)])
>>> det.axes
array([[ 1., 0., 0.],
[ 0., 0., 1.]])
>>> det.surface_normal([0, 0])
array([ 0., -1., 0.])
"""
super(Flat2dDetector, self).__init__(partition, 3, check_bounds)
if self.ndim != 2:
raise ValueError('`partition` must be 2-dimensional, got ndim={}'
''.format(self.ndim))
axes, axes_in = np.asarray(axes, dtype=float), axes
if axes.shape != (2, 3):
raise ValueError('`axes` must be a sequence of 2 3-dimensional '
'vectors, got {}'.format(axes_in))
if np.linalg.norm(np.cross(*axes)) == 0:
raise ValueError('`axes` {} are linearly dependent'
''.format(axes_in))
self.__axes = axes / np.linalg.norm(axes, axis=1, keepdims=True)
@property
def axes(self):
"""Fixed array of unit vectors with which the detector is aligned."""
return self.__axes
def surface(self, param):
"""Return the detector surface point corresponding to ``param``.
For parameter value ``p``, the surface point is given by ::
surf = p[0] * axes[0] + p[1] * axes[1]
Parameters
----------
param : `array-like` or sequence
Parameter value(s) at which to evaluate. A sequence of
parameters must have length 2.
Returns
-------
point : `numpy.ndarray`
Vector(s) pointing from the origin to the detector surface
point at ``param``.
If ``param`` is a single parameter, the returned array has
shape ``(3,)``, otherwise ``broadcast(*param).shape + (3,)``.
Examples
--------
The method works with a single parameter, resulting in a single
vector:
>>> part = odl.uniform_partition([0, 0], [1, 1], (10, 10))
>>> det = Flat2dDetector(part, axes=[(1, 0, 0), (0, 0, 1)])
>>> det.surface([0, 0])
array([ 0., 0., 0.])
>>> det.surface([0, 1])
array([ 0., 0., 1.])
>>> det.surface([1, 1])
array([ 1., 0., 1.])
It is also vectorized, i.e., it can be called with multiple
parameters at once (or n-dimensional arrays of parameters):
>>> # 3 pairs of parameters, resulting in 3 vectors
>>> det.surface([[0, 0, 1],
... [0, 1, 1]])
array([[ 0., 0., 0.],
[ 0., 0., 1.],
[ 1., 0., 1.]])
>>> # Pairs of parameters in a (4, 5) array each
>>> param = (np.zeros((4, 5)), np.zeros((4, 5)))
>>> det.surface(param).shape
(4, 5, 3)
>>> # Using broadcasting for "outer product" type result
>>> param = (np.zeros((4, 1)), np.zeros((1, 5)))
>>> det.surface(param).shape
(4, 5, 3)
"""
squeeze_out = (np.broadcast(*param).shape == ())
param_in = param
param = tuple(np.array(p, dtype=float, copy=False, ndmin=1)
for p in param)
if self.check_bounds and not is_inside_bounds(param, self.params):
raise ValueError('`param` {} not in the valid range '
'{}'.format(param_in, self.params))
# Compute outer product of the i-th spatial component of the
# parameter and sum up the contributions
surf = sum(np.multiply.outer(p, ax) for p, ax in zip(param, self.axes))
if squeeze_out:
surf = surf.squeeze()
return surf
def surface_deriv(self, param):
"""Return the surface derivative at ``param``.
This is a constant function evaluating to `axes` everywhere.
Parameters
----------
param : `array-like` or sequence
Parameter value(s) at which to evaluate. A sequence of
parameters must have length 2.
Returns
-------
deriv : `numpy.ndarray`
Array containing the derivative vectors. The first dimension
enumerates the axes, i.e., has always length 2.
If ``param`` is a single parameter, the returned array has
shape ``(2, 3)``, otherwise
``broadcast(*param).shape + (2, 3)``.
Notes
-----
To get an array that enumerates the derivative vectors in the first
dimension, move the second-to-last axis to the first position::
deriv = surface_deriv(param)
axes_enumeration = np.moveaxis(deriv, -2, 0)
Examples
--------
The method works with a single parameter, resulting in a 2-tuple
of vectors:
>>> part = odl.uniform_partition([0, 0], [1, 1], (10, 10))
>>> det = Flat2dDetector(part, axes=[(1, 0, 0), (0, 0, 1)])
>>> det.surface_deriv([0, 0])
array([[ 1., 0., 0.],
[ 0., 0., 1.]])
>>> det.surface_deriv([1, 1])
array([[ 1., 0., 0.],
[ 0., 0., 1.]])
It is also vectorized, i.e., it can be called with multiple
parameters at once (or n-dimensional arrays of parameters):
>>> # 2 pairs of parameters, resulting in 3 vectors for each axis
>>> deriv = det.surface_deriv([[0, 1],
... [0, 1]])
>>> deriv[0] # first pair of vectors
array([[ 1., 0., 0.],
[ 0., 0., 1.]])
>>> deriv[1] # second pair of vectors
array([[ 1., 0., 0.],
[ 0., 0., 1.]])
>>> # Pairs of parameters in a (4, 5) array each
>>> param = (np.zeros((4, 5)), np.zeros((4, 5))) # pairs of params
>>> det.surface_deriv(param).shape
(4, 5, 2, 3)
>>> # Using broadcasting for "outer product" type result
>>> param = (np.zeros((4, 1)), np.zeros((1, 5))) # broadcasting
>>> det.surface_deriv(param).shape
(4, 5, 2, 3)
"""
squeeze_out = (np.broadcast(*param).shape == ())
param_in = param
param = tuple(np.array(p, dtype=float, copy=False, ndmin=1)
for p in param)
if self.check_bounds and not is_inside_bounds(param, self.params):
raise ValueError('`param` {} not in the valid range '
'{}'.format(param_in, self.params))
if squeeze_out:
return self.axes
else:
return np.broadcast_to(
self.axes, np.broadcast(*param).shape + self.axes.shape)
def __repr__(self):
"""Return ``repr(self)``."""
posargs = [self.partition]
optargs = [('axes', tuple(array_str(ax) for ax in self.axes), None)]
inner_str = signature_string(posargs, optargs, sep=',\n')
return '{}(\n{}\n)'.format(self.__class__.__name__, indent(inner_str))
def __str__(self):
"""Return ``str(self)``."""
return repr(self)
class CircularDetector(Detector):
"""A 1D detector on a circle section in 2D space.
The circular section that corresponds to the angular partition
is rotated to be aligned with a given axis and
shifted to cross the origin. Note, the partition angle increases
in the clockwise direction, by analogy to flat detectors."""
def __init__(self, partition, axis, radius, check_bounds=True):
"""Initialize a new instance.
Parameters
----------
partition : 1-dim. `RectPartition`
Partition of the parameter interval, corresponding to the
angular sections along the line.
axis : `array-like`, shape ``(2,)``
Fixed axis along which this detector is aligned.
radius : nonnegative float
Radius of the circle.
check_bounds : bool, optional
If ``True``, methods computing vectors check input arguments.
Checks are vectorized and add only a small overhead.
Examples
--------
Initialize a detector with circle radius 2 and extending to
90 degrees on both sides of the origin (a half circle).
>>> part = odl.uniform_partition(-np.pi / 2, np.pi / 2, 10)
>>> det = CircularDetector(part, axis=[1, 0], radius=2)
>>> det.axis
array([ 1., 0.])
>>> det.radius
2.0
>>> np.allclose(det.surface_normal(0), [0, -1])
True
"""
super(CircularDetector, self).__init__(partition, 2, check_bounds)
if self.ndim != 1:
raise ValueError('`partition` must be 1-dimensional, got ndim={}'
''.format(self.ndim))
if np.linalg.norm(axis) == 0:
raise ValueError('`axis` cannot be zero')
self.__axis = np.asarray(axis) / np.linalg.norm(axis)
self.__radius = float(radius)
if self.__radius <= 0:
raise ValueError('`radius` must be positive')
sin = self.__axis[0]
cos = -self.__axis[1]
self.__rotation_matrix = np.array([[cos, -sin], [sin, cos]])
self.__translation = (- self.__radius
* np.matmul(self.__rotation_matrix, (1, 0)))
@property
def axis(self):
"""Fixed axis along which this detector is aligned."""
return self.__axis
@property
def radius(self):
"""Curvature radius of the detector."""
return self.__radius
@property
def rotation_matrix(self):
"""Rotation matrix that is used to align the detector
with a given axis."""
return self.__rotation_matrix
@property
def translation(self):
"""A vector used to shift the detector towards the origin."""
return self.__translation
def surface(self, param):
"""Return the detector surface point corresponding to ``param``.
For a parameter ``phi``, the returned point is given by ::
surf = R * radius * (cos(phi), -sin(phi)) + t
where ``R`` is a rotation matrix and ``t`` is a translation vector.
Note that increase of ``phi`` corresponds to rotation
in the clockwise direction, by analogy to flat detectors.
Parameters
----------
param : float or `array-like`
Parameter value(s) at which to evaluate.
Returns
-------
point : `numpy.ndarray`
Vector(s) pointing from the origin to the detector surface
point at ``param``.
If ``param`` is a single parameter, the returned array has
shape ``(2,)``, otherwise ``param.shape + (2,)``.
Examples
--------
The method works with a single parameter, resulting in a single
vector:
>>> part = odl.uniform_partition(-np.pi / 2, np.pi / 2, 10)
>>> det = CircularDetector(part, axis=[1, 0], radius=2)
>>> np.allclose(det.surface(0), [0, 0])
True
It is also vectorized, i.e., it can be called with multiple
parameters at once (or an n-dimensional array of parameters):
>>> np.round(det.surface([-np.pi / 2, 0, np.pi / 2]), 10)
array([[-2., -2.],
[ 0., 0.],
[ 2., -2.]])
>>> det.surface(np.zeros((4, 5))).shape
(4, 5, 2)
"""
squeeze_out = (np.shape(param) == ())
param = np.array(param, dtype=float, copy=False, ndmin=1)
if self.check_bounds and not is_inside_bounds(param, self.params):
raise ValueError('`param` {} not in the valid range '
'{}'.format(param, self.params))
surf = np.empty(param.shape + (2,))
surf[..., 0] = np.cos(param)
surf[..., 1] = -np.sin(param)
surf *= self.radius
surf = np.matmul(surf, np.transpose(self.rotation_matrix))
surf += self.translation
if squeeze_out:
surf = surf.squeeze()
return surf
def surface_deriv(self, param):
"""Return the surface derivative at ``param``.
The derivative at parameter ``phi`` is given by ::
deriv = R * radius * (-sin(phi), -cos(phi))
where R is a rotation matrix.
Parameters
----------
param : float or `array-like`
Parameter value(s) at which to evaluate.
Returns
-------
deriv : `numpy.ndarray`
Array representing the derivative vector(s) at ``param``.
If ``param`` is a single parameter, the returned array has
shape ``(2,)``, otherwise ``param.shape + (2,)``.
See Also
--------
surface
Examples
--------
The method works with a single parameter, resulting in a single
vector:
>>> part = odl.uniform_partition(-np.pi / 2, np.pi / 2, 10)
>>> det = CircularDetector(part, axis=[1, 0], radius=2)
>>> det.surface_deriv(0)
array([ 2., 0.])
It is also vectorized, i.e., it can be called with multiple
parameters at once (or an n-dimensional array of parameters):
>>> np.round(det.surface_deriv([-np.pi / 2, 0, np.pi / 2]), 10)
array([[ 0., 2.],
[ 2., 0.],
[ 0., -2.]])
>>> det.surface_deriv(np.zeros((4, 5))).shape
(4, 5, 2)
"""
squeeze_out = (np.shape(param) == ())
param = np.array(param, dtype=float, copy=False, ndmin=1)
if self.check_bounds and not is_inside_bounds(param, self.params):
raise ValueError('`param` {} not in the valid range '
'{}'.format(param, self.params))
deriv = np.empty(param.shape + (2,))
deriv[..., 0] = -np.sin(param)
deriv[..., 1] = -np.cos(param)
deriv *= self.radius
deriv = np.matmul(deriv, np.transpose(self.rotation_matrix))
if squeeze_out:
deriv = deriv.squeeze()
return deriv
def surface_measure(self, param):
"""Return the arc length measure at ``param``.
This is a constant function evaluating to `radius` everywhere.
Parameters
----------
param : float or `array-like`
Parameter value(s) at which to evaluate.
Returns
-------
measure : float or `numpy.ndarray`
Constant value(s) of the arc length measure at ``param``.
If ``param`` is a single parameter, a float is returned,
otherwise an array of shape ``param.shape``.
See Also
--------
surface
surface_deriv
Examples
--------
The method works with a single parameter, resulting in a float:
>>> part = odl.uniform_partition(-np.pi / 2, np.pi / 2, 10)
>>> det = CircularDetector(part, axis=[1, 0], radius=2)
>>> det.surface_measure(0)
2.0
>>> det.surface_measure(np.pi / 2)
2.0
It is also vectorized, i.e., it can be called with multiple
parameters at once (or an n-dimensional array of parameters):
>>> det.surface_measure([0, np.pi / 2])
array([ 2., 2.])
>>> det.surface_measure(np.zeros((4, 5))).shape
(4, 5)
"""
scalar_out = (np.shape(param) == ())
param = np.array(param, dtype=float, copy=False, ndmin=1)
if self.check_bounds and not is_inside_bounds(param, self.params):
raise ValueError('`param` {} not in the valid range '
'{}'.format(param, self.params))
if scalar_out:
return self.radius
else:
return self.radius * np.ones(param.shape)
def __repr__(self):
"""Return ``repr(self)``."""
posargs = [self.partition]
optargs = [('radius', array_str(self.center), '')]
inner_str = signature_string(posargs, optargs, sep=',\n')
return '{}(\n{}\n)'.format(self.__class__.__name__, indent(inner_str))
def __str__(self):
"""Return ``str(self)``."""
return repr(self)
class CylindricalDetector(Detector):
"""A 2D detector on a cylindrical surface in 3D space.
The cylindrical surface that corresponds to the partition
is rotated to be aligned with given axes and
shifted to cross the origin. Note that the partition angle increases
in the clockwise direction, by analogy to flat detectors."""
def __init__(self, partition, axes, radius, check_bounds=True):
"""Initialize a new instance.
Parameters
----------
partition : 2-dim. `RectPartition`
Partition of the parameter interval, corresponding to the
angular partition and height partition.
axes : sequence of `array-like`
Fixed pair of of unit vectors with which the detector is aligned.
The vectors must have shape ``(3,)`` and be perpendicular.
radius : nonnegative float
Radius of the cylinder.
check_bounds : bool, optional
If ``True``, methods computing vectors check input arguments.
Checks are vectorized and add only a small overhead.
Examples
--------
Initialize a detector with height 8 and circle radius 2 extending to
90 degrees on both sides of the origin (a half cylinder).
>>> part = odl.uniform_partition(
... [-np.pi / 2, -4], [np.pi / 2, 4], [10, 8])
>>> det = CylindricalDetector(
... part, axes=[(1, 0, 0), (0, 0, 1)], radius=2)
>>> det.axes
array([[ 1., 0., 0.],
[ 0., 0., 1.]])
>>> det.radius
2.0
>>> np.allclose(det.surface_normal([0, 0]), [ 0, -1, 0])
True
"""
super(CylindricalDetector, self).__init__(partition, 3, check_bounds)
if self.ndim != 2:
raise ValueError('`partition` must be 2-dimensional, got ndim={}'
''.format(self.ndim))
axes, axes_in = np.asarray(axes, dtype=float), axes
if axes.shape != (2, 3):
raise ValueError('`axes` must be a sequence of 2 3-dimensional '
'vectors, got {}'.format(axes_in))
if np.linalg.norm(np.cross(*axes)) == 0:
raise ValueError('`axes` {} are linearly dependent'
''.format(axes_in))
if np.linalg.norm(np.dot(*axes)) != 0:
raise ValueError('`axes` {} are not perpendicular'
''.format(axes_in))
self.__axes = axes / np.linalg.norm(axes, axis=1, keepdims=True)
self.__radius = float(radius)
if self.__radius <= 0:
raise ValueError('`radius` must be positive')
initial_axes = np.array([[0, -1, 0], [0, 0, 1]])
r1 = rotation_matrix_from_to(initial_axes[0], axes[0])
r2 = rotation_matrix_from_to(np.matmul(r1, initial_axes[1]), axes[1])
self.__rotation_matrix = np.matmul(r2, r1)
self.__translation = (-self.__radius
* np.matmul(self.__rotation_matrix, (1, 0, 0)))
@property
def axes(self):
"""Fixed array of unit vectors with which the detector is aligned."""
return self.__axes
@property
def radius(self):
"""Curvature radius of the detector."""
return self.__radius
@property
def rotation_matrix(self):
"""Rotation matrix that is used to align the detector
with a given axis."""
return self.__rotation_matrix
@property
def translation(self):
"""A vector used to shift the detector towards the origin."""
return self.__translation
def surface(self, param):
"""Return the detector surface point corresponding to ``param``.
For parameters ``phi`` and ``h``, the returned point is given by ::
surf = R * (radius * cos(phi), -radius * sin(phi), h) + t
where ``R`` is a rotation matrix and ``t`` is a translation vector.
Note that increase of ``phi`` corresponds to rotation
in the clockwise direction, by analogy to flat detectors.
Parameters
----------
param : `array-like` or sequence
Parameter value(s) at which to evaluate. A sequence of
parameters must have length 2.
Returns
-------
point : `numpy.ndarray`
Vector(s) pointing from the origin to the detector surface
point at ``param``.
If ``param`` is a single parameter, the returned array has
shape ``(3,)``, otherwise ``broadcast(*param).shape + (3,)``.
Examples
--------
The method works with a single parameter, resulting in a single
vector:
>>> part = odl.uniform_partition(
... [-np.pi / 2, -4], [np.pi / 2, 4], (10, 8))
>>> det = CylindricalDetector(
... part, axes=[(1, 0, 0), (0, 0, 1)], radius = 2)
>>> det.surface([0, 0])
array([ 0., 0., 0.])
>>> np.round(det.surface([np.pi / 2, 1]), 10)
array([ 2., -2., 1.])
It is also vectorized, i.e., it can be called with multiple
parameters at once (or an n-dimensional array of parameters):
>>> # 3 pairs of parameters, resulting in 3 vectors
>>> np.round(det.surface([[-np.pi / 2, 0, np.pi / 2], [-1, 0, 1]]), 10)
array([[-2., -2., -1.],
[ 0., 0., 0.],
[ 2., -2., 1.]])
>>> # Pairs of parameters in a (4, 5) array each
>>> param = (np.zeros((4, 5)), np.zeros((4, 5)))
>>> det.surface(param).shape
(4, 5, 3)
"""
squeeze_out = (np.broadcast(*param).shape == ())
param_in = param
param = tuple(np.array(p, dtype=float, copy=False, ndmin=1)
for p in param)
if self.check_bounds and not is_inside_bounds(param, self.params):
raise ValueError('`param` {} not in the valid range '
'{}'.format(param_in, self.params))
surf = np.empty(param[0].shape + (3,))
surf[..., 0] = self.radius * np.cos(param[0])
surf[..., 1] = self.radius * (-np.sin(param[0]))
surf[..., 2] = param[1]
surf = np.matmul(surf, np.transpose(self.rotation_matrix))
surf += self.translation
if squeeze_out:
surf = surf.squeeze()
return surf
def surface_deriv(self, param):
"""Return the surface derivative at ``param``.
The derivative at parameters ``phi`` and ``h`` is given by ::
deriv = R * ((-radius * sin(phi), 0),
(-radius * cos(phi), 0),
( 0, 1))
where ``R`` is a rotation matrix.
Parameters
----------
param : `array-like` or sequence
Parameter value(s) at which to evaluate. A sequence of
parameters must have length 2.
Returns
-------
deriv : `numpy.ndarray`
Array representing the derivative vector(s) at ``param``.
If ``param`` is a single parameter, the returned array has
shape ``(2,)``, otherwise ``param.shape + (2,)``.
See Also
--------
surface
Examples
--------
The method works with a single parameter, resulting in a single
vector:
>>> part = odl.uniform_partition(
... [-np.pi / 2, -4], [np.pi / 2, 4], (10,8))
>>> det = CylindricalDetector(
... part, axes=[(1, 0, 0), (0, 0, 1)], radius = 2)
>>> np.round(det.surface_deriv([0, 0]), 10)
array([[ 2., -0., 0.],
[ 0., 0., 1.]])
It is also vectorized, i.e., it can be called with multiple
parameters at once (or an n-dimensional array of parameters):
>>> # 2 pairs of parameters, resulting in 3 vectors for each axis
>>> deriv = det.surface_deriv([[0, np.pi / 2], [0, 1]])
>>> np.round(deriv[0], 10)
array([[ 2., -0., 0.],
[ 0., 0., 1.]])
>>> np.round(deriv[1], 10)
array([[ 0., -2., 0.],
[ 0., 0., 1.]])
>>> # Pairs of parameters in a (4, 5) array each
>>> param = (np.zeros((4, 5)), np.zeros((4, 5))) # pairs of params
>>> det.surface_deriv(param).shape
(4, 5, 2, 3)
"""
squeeze_out = (np.broadcast(*param).shape == ())
param_in = param
param = tuple(np.array(p, dtype=float, copy=False, ndmin=1)
for p in param)
if self.check_bounds and not is_inside_bounds(param, self.params):
raise ValueError('`param` {} not in the valid range '
'{}'.format(param_in, self.params))
deriv_phi = np.empty(param[0].shape + (3,))
deriv_phi[..., 0] = -np.sin(param[0])
deriv_phi[..., 1] = -np.cos(param[0])
deriv_phi[..., 2] = 0
deriv_phi *= self.radius
deriv_h = np.broadcast_to((0, 0, 1),
np.broadcast(*param).shape + (3,))
deriv = np.stack((deriv_phi, deriv_h), axis=-2)
deriv = np.matmul(deriv, np.transpose(self.rotation_matrix))
if squeeze_out:
deriv = deriv.squeeze()
return deriv
def __repr__(self):
"""Return ``repr(self)``."""
posargs = [self.partition]
optargs = [('radius', array_str(self.center), '')]
inner_str = signature_string(posargs, optargs, sep=',\n')
return '{}(\n{}\n)'.format(self.__class__.__name__, indent(inner_str))
def __str__(self):
"""Return ``str(self)``."""
return repr(self)
class SphericalDetector(Detector):
"""A 2D detector on a spherical surface in 3D space.
The spherical surface that corresponds to the partition
is rotated to be aligned with given axes and
shifted to cross the origin. Note, the partition angles
increase in the direction of -y (clockwise) and z axis,
by analogy to flat detectors."""
def __init__(self, partition, axes, radius, check_bounds=True):
"""Initialize a new instance.
Parameters
----------
partition : 2-dim. `RectPartition`
Partition of the parameter interval, corresponding to the
angular partition in two directions.
axes : sequence of `array-like`'s
Fixed pair of of unit vectors with which the detector is aligned.
The vectors must have shape ``(3,)`` and be perpendicular.
radius : nonnegative float
Radius of the sphere.
check_bounds : bool, optional
If ``True``, methods computing vectors check input arguments.
Checks are vectorized and add only a small overhead.
Examples
--------
Initialize a detector with radius 2 extending to
90 degrees in both directions along the equator and
45 degrees in both directions towards the poles.
>>> part = odl.uniform_partition([-np.pi / 2, -np.pi / 3],
... [ np.pi / 2, np.pi / 3], [20, 10])
>>> det = SphericalDetector(
... part, axes=[(1, 0, 0), (0, 0, 1)], radius = 2)
>>> det.axes
array([[ 1., 0., 0.],
[ 0., 0., 1.]])
>>> det.radius
2.0
>>> np.allclose(det.surface_normal([0, 0]), [0, -1, 0])
True
"""
super(SphericalDetector, self).__init__(partition, 3, check_bounds)
if self.ndim != 2:
raise ValueError('`partition` must be 2-dimensional, got ndim={}'
''.format(self.ndim))
axes, axes_in = np.asarray(axes, dtype=float), axes
if axes.shape != (2, 3):
raise ValueError('`axes` must be a sequence of 2 3-dimensional '
'vectors, got {}'.format(axes_in))
if np.linalg.norm(np.cross(*axes)) == 0:
raise ValueError('`axes` {} are linearly dependent'
''.format(axes_in))
if np.linalg.norm(np.dot(*axes)) != 0:
raise ValueError('`axes` {} are not perpendicular'
''.format(axes_in))
self.__axes = axes / np.linalg.norm(axes, axis=1, keepdims=True)
self.__radius = float(radius)
if self.__radius <= 0:
raise ValueError('`radius` must be positive')
initial_axes = np.array([[0, -1, 0], [0, 0, 1]])
r1 = rotation_matrix_from_to(initial_axes[0], axes[0])
r2 = rotation_matrix_from_to(np.matmul(r1, initial_axes[1]), axes[1])
self.__rotation_matrix = np.matmul(r2, r1)
self.__translation = (- self.__radius
* np.matmul(self.__rotation_matrix, (1, 0, 0)))
@property
def axes(self):
"""Fixed array of unit vectors with which the detector is aligned."""
return self.__axes
@property
def radius(self):
"""Curvature radius of the detector."""
return self.__radius
@property
def rotation_matrix(self):
"""Rotation matrix that is used to align the detector
with a given axis."""
return self.__rotation_matrix
@property
def translation(self):
"""A vector used to shift the detector towards the origin."""
return self.__translation
def surface(self, param):
"""Return the detector surface point corresponding to ``param``.
For parameters ``phi`` and ``theta``, the surface point is given by ::
surf = R * radius * ( cos(phi) * cos(theta),
-sin(phi) * cos(theta),
sin(theta)) + t
where ``R`` is a rotation matrix and ``t`` is a translation vector.
Note that increase of ``phi`` corresponds to rotation
in the clockwise direction, by analogy to flat detectors.
Parameters
----------
param : `array-like` or sequence
Parameter value(s) at which to evaluate. A sequence of
parameters must have length 2.
Returns
-------
point : `numpy.ndarray`
Vector(s) pointing from the origin to the detector surface
point at ``param``.
If ``param`` is a single parameter, the returned array has
shape ``(3,)``, otherwise ``broadcast(*param).shape + (3,)``.
Examples
--------
The method works with a single parameter, resulting in a single
vector:
>>> part = odl.uniform_partition([-np.pi / 2, -np.pi / 3],
... [ np.pi / 2, np.pi / 3], [20, 10])
>>> det = SphericalDetector(
... part, axes=[(1, 0, 0), (0, 0, 1)], radius = 2)
>>> det.surface([0, 0])
array([ 0., 0., 0.])
>>> np.round(det.surface([ np.pi / 2, np.pi / 3]), 2)
array([ 1. , -2. , 1.73])
It is also vectorized, i.e., it can be called with multiple
parameters at once (or an n-dimensional array of parameters):
>>> # 3 pairs of parameters, resulting in 3 vectors
>>> np.round(det.surface([[-np.pi / 2, 0, np.pi / 2],
... [-np.pi / 3, 0, np.pi / 3]]), 2)
array([[-1. , -2. , -1.73],
[ 0. , 0. , 0. ],
[ 1. , -2. , 1.73]])
>>> # Pairs of parameters in a (4, 5) array each
>>> param = (np.zeros((4, 5)), np.zeros((4, 5)))
>>> det.surface(param).shape
(4, 5, 3)
"""
squeeze_out = (np.broadcast(*param).shape == ())
param_in = param
param = tuple(np.array(p, dtype=float, copy=False, ndmin=1)
for p in param)
if self.check_bounds and not is_inside_bounds(param, self.params):
raise ValueError('`param` {} not in the valid range '
'{}'.format(param_in, self.params))
surf = np.empty(param[0].shape + (3,))
surf[..., 0] = np.cos(param[0]) * np.cos(param[1])
surf[..., 1] = -np.sin(param[0]) * np.cos(param[1])
surf[..., 2] = np.sin(param[1])
surf *= self.radius
surf = np.matmul(surf, np.transpose(self.rotation_matrix))
surf += self.translation
if squeeze_out:
surf = surf.squeeze()
return surf
def surface_deriv(self, param):
"""Return the surface derivative at ``param``.
The derivative at parameters ``phi`` and ``theta`` is given by ::
deriv = R * radius
* ((-sin(phi) * cos(theta), -cos(phi) * sin(theta)),
(-cos(phi) * cos(theta), sin(phi) * sin(theta)),
( 0, cos(theta)))
where R is a rotation matrix.
Parameters
----------
param : `array-like` or sequence
Parameter value(s) at which to evaluate. A sequence of
parameters must have length 2.
Returns
-------
deriv : `numpy.ndarray`
Array representing the derivative vector(s) at ``param``.
If ``param`` is a single parameter, the returned array has
shape ``(2,)``, otherwise ``param.shape + (2,)``.
See Also
--------
surface
Examples
--------
The method works with a single parameter, resulting in a single
vector:
>>> part = odl.uniform_partition([-np.pi / 2, -np.pi / 3],
... [ np.pi / 2, np.pi / 3], [20, 10])
>>> det = SphericalDetector(
... part, axes=[(1, 0, 0), (0, 0, 1)], radius = 2)
>>> np.round(det.surface_deriv([0, 0]), 10)
array([[ 2., -0., 0.],
[ 0., 0., 2.]])
It is also vectorized, i.e., it can be called with multiple
parameters at once (or an n-dimensional array of parameters):
>>> # 2 pairs of parameters, resulting in 3 vectors for each axis
>>> deriv = det.surface_deriv([[0, np.pi / 2], [0, np.pi / 3]])
>>> np.round(deriv[0], 10)
array([[ 2., -0., 0.],
[ 0., 0., 2.]])
>>> np.round(deriv[1], 2)
array([[ 0. , -1. , 0. ],
[-1.73, 0. , 1. ]])
>>> # Pairs of parameters in a (4, 5) array each
>>> param = (np.zeros((4, 5)), np.zeros((4, 5))) # pairs of params
>>> det.surface_deriv(param).shape
(4, 5, 2, 3)
"""
squeeze_out = (np.broadcast(*param).shape == ())
param_in = param
param = tuple(np.array(p, dtype=float, copy=False, ndmin=1)
for p in param)
if self.check_bounds and not is_inside_bounds(param, self.params):
raise ValueError('`param` {} not in the valid range '
'{}'.format(param_in, self.params))
deriv_phi = np.empty(param[0].shape + (3,))
deriv_phi[..., 0] = -np.sin(param[0]) * np.cos(param[1])
deriv_phi[..., 1] = -np.cos(param[0]) * np.cos(param[1])
deriv_phi[..., 2] = 0
deriv_phi *= self.radius
deriv_theta = np.empty(param[0].shape + (3,))
deriv_theta[..., 0] = -np.cos(param[0]) * np.sin(param[1])
deriv_theta[..., 1] = np.sin(param[0]) * np.sin(param[1])
deriv_theta[..., 2] = np.cos(param[1])
deriv_theta *= self.radius
deriv = np.stack((deriv_phi, deriv_theta), axis=-2)
deriv = np.matmul(deriv, np.transpose(self.rotation_matrix))
if squeeze_out:
deriv = deriv.squeeze()
return deriv
def __repr__(self):
"""Return ``repr(self)``."""
posargs = [self.partition]
optargs = [('radius', array_str(self.center), '')]
inner_str = signature_string(posargs, optargs, sep=',\n')
return '{}(\n{}\n)'.format(self.__class__.__name__, indent(inner_str))
def __str__(self):
"""Return ``str(self)``."""
return repr(self)
if __name__ == '__main__':
from odl.util.testutils import run_doctests
run_doctests()
| mpl-2.0 | 477f84f7a02c25a7e174ed55839cb46e | 35.397328 | 79 | 0.52959 | 4.096968 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/motherless.py | 5 | 8892 | from __future__ import unicode_literals
import datetime
import re
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
ExtractorError,
InAdvancePagedList,
orderedSet,
str_to_int,
unified_strdate,
)
class MotherlessIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?motherless\.com/(?:g/[a-z0-9_]+/)?(?P<id>[A-Z0-9]+)'
_TESTS = [{
'url': 'http://motherless.com/AC3FFE1',
'md5': '310f62e325a9fafe64f68c0bccb6e75f',
'info_dict': {
'id': 'AC3FFE1',
'ext': 'mp4',
'title': 'Fucked in the ass while playing PS3',
'categories': ['Gaming', 'anal', 'reluctant', 'rough', 'Wife'],
'upload_date': '20100913',
'uploader_id': 'famouslyfuckedup',
'thumbnail': r're:https?://.*\.jpg',
'age_limit': 18,
}
}, {
'url': 'http://motherless.com/532291B',
'md5': 'bc59a6b47d1f958e61fbd38a4d31b131',
'info_dict': {
'id': '532291B',
'ext': 'mp4',
'title': 'Amazing girl playing the omegle game, PERFECT!',
'categories': ['Amateur', 'webcam', 'omegle', 'pink', 'young', 'masturbate', 'teen',
'game', 'hairy'],
'upload_date': '20140622',
'uploader_id': 'Sulivana7x',
'thumbnail': r're:https?://.*\.jpg',
'age_limit': 18,
},
'skip': '404',
}, {
'url': 'http://motherless.com/g/cosplay/633979F',
'md5': '0b2a43f447a49c3e649c93ad1fafa4a0',
'info_dict': {
'id': '633979F',
'ext': 'mp4',
'title': 'Turtlette',
'categories': ['superheroine heroine superher'],
'upload_date': '20140827',
'uploader_id': 'shade0230',
'thumbnail': r're:https?://.*\.jpg',
'age_limit': 18,
}
}, {
# no keywords
'url': 'http://motherless.com/8B4BBC1',
'only_matching': True,
}, {
# see https://motherless.com/videos/recent for recent videos with
# uploaded date in "ago" format
'url': 'https://motherless.com/3C3E2CF',
'info_dict': {
'id': '3C3E2CF',
'ext': 'mp4',
'title': 'a/ Hot Teens',
'categories': list,
'upload_date': '20210104',
'uploader_id': 'yonbiw',
'thumbnail': r're:https?://.*\.jpg',
'age_limit': 18,
},
'params': {
'skip_download': True,
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
if any(p in webpage for p in (
'<title>404 - MOTHERLESS.COM<',
">The page you're looking for cannot be found.<")):
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
if '>The content you are trying to view is for friends only.' in webpage:
raise ExtractorError('Video %s is for friends only' % video_id, expected=True)
title = self._html_search_regex(
(r'(?s)<div[^>]+\bclass=["\']media-meta-title[^>]+>(.+?)</div>',
r'id="view-upload-title">\s+([^<]+)<'), webpage, 'title')
video_url = (self._html_search_regex(
(r'setup\(\{\s*["\']file["\']\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1',
r'fileurl\s*=\s*(["\'])(?P<url>(?:(?!\1).)+)\1'),
webpage, 'video URL', default=None, group='url')
or 'http://cdn4.videos.motherlessmedia.com/videos/%s.mp4?fs=opencloud' % video_id)
age_limit = self._rta_search(webpage)
view_count = str_to_int(self._html_search_regex(
(r'>([\d,.]+)\s+Views<', r'<strong>Views</strong>\s+([^<]+)<'),
webpage, 'view count', fatal=False))
like_count = str_to_int(self._html_search_regex(
(r'>([\d,.]+)\s+Favorites<',
r'<strong>Favorited</strong>\s+([^<]+)<'),
webpage, 'like count', fatal=False))
upload_date = unified_strdate(self._search_regex(
r'class=["\']count[^>]+>(\d+\s+[a-zA-Z]{3}\s+\d{4})<', webpage,
'upload date', default=None))
if not upload_date:
uploaded_ago = self._search_regex(
r'>\s*(\d+[hd])\s+[aA]go\b', webpage, 'uploaded ago',
default=None)
if uploaded_ago:
delta = int(uploaded_ago[:-1])
_AGO_UNITS = {
'h': 'hours',
'd': 'days',
}
kwargs = {_AGO_UNITS.get(uploaded_ago[-1]): delta}
upload_date = (datetime.datetime.utcnow() - datetime.timedelta(**kwargs)).strftime('%Y%m%d')
comment_count = webpage.count('class="media-comment-contents"')
uploader_id = self._html_search_regex(
r'"thumb-member-username">\s+<a href="/m/([^"]+)"',
webpage, 'uploader_id')
categories = self._html_search_meta('keywords', webpage, default=None)
if categories:
categories = [cat.strip() for cat in categories.split(',')]
return {
'id': video_id,
'title': title,
'upload_date': upload_date,
'uploader_id': uploader_id,
'thumbnail': self._og_search_thumbnail(webpage),
'categories': categories,
'view_count': view_count,
'like_count': like_count,
'comment_count': comment_count,
'age_limit': age_limit,
'url': video_url,
}
class MotherlessGroupIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?motherless\.com/gv?/(?P<id>[a-z0-9_]+)'
_TESTS = [{
'url': 'http://motherless.com/g/movie_scenes',
'info_dict': {
'id': 'movie_scenes',
'title': 'Movie Scenes',
'description': 'Hot and sexy scenes from "regular" movies... '
'Beautiful actresses fully nude... A looot of '
'skin! :)Enjoy!',
},
'playlist_mincount': 662,
}, {
'url': 'http://motherless.com/gv/sex_must_be_funny',
'info_dict': {
'id': 'sex_must_be_funny',
'title': 'Sex must be funny',
'description': 'Sex can be funny. Wide smiles,laugh, games, fun of '
'any kind!'
},
'playlist_mincount': 9,
}]
@classmethod
def suitable(cls, url):
return (False if MotherlessIE.suitable(url)
else super(MotherlessGroupIE, cls).suitable(url))
def _extract_entries(self, webpage, base):
entries = []
for mobj in re.finditer(
r'href="(?P<href>/[^"]+)"[^>]*>(?:\s*<img[^>]+alt="[^-]+-\s(?P<title>[^"]+)")?',
webpage):
video_url = compat_urlparse.urljoin(base, mobj.group('href'))
if not MotherlessIE.suitable(video_url):
continue
video_id = MotherlessIE._match_id(video_url)
title = mobj.group('title')
entries.append(self.url_result(
video_url, ie=MotherlessIE.ie_key(), video_id=video_id,
video_title=title))
# Alternative fallback
if not entries:
entries = [
self.url_result(
compat_urlparse.urljoin(base, '/' + entry_id),
ie=MotherlessIE.ie_key(), video_id=entry_id)
for entry_id in orderedSet(re.findall(
r'data-codename=["\']([A-Z0-9]+)', webpage))]
return entries
def _real_extract(self, url):
group_id = self._match_id(url)
page_url = compat_urlparse.urljoin(url, '/gv/%s' % group_id)
webpage = self._download_webpage(page_url, group_id)
title = self._search_regex(
r'<title>([\w\s]+\w)\s+-', webpage, 'title', fatal=False)
description = self._html_search_meta(
'description', webpage, fatal=False)
page_count = self._int(self._search_regex(
r'(\d+)</(?:a|span)><(?:a|span)[^>]+>\s*NEXT',
webpage, 'page_count'), 'page_count')
PAGE_SIZE = 80
def _get_page(idx):
webpage = self._download_webpage(
page_url, group_id, query={'page': idx + 1},
note='Downloading page %d/%d' % (idx + 1, page_count)
)
for entry in self._extract_entries(webpage, url):
yield entry
playlist = InAdvancePagedList(_get_page, page_count, PAGE_SIZE)
return {
'_type': 'playlist',
'id': group_id,
'title': title,
'description': description,
'entries': playlist
}
| unlicense | 516ebe400c1ba4575ce214855147fb4a | 37.327586 | 108 | 0.494827 | 3.534181 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/watchindianporn.py | 49 | 2297 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import parse_duration
class WatchIndianPornIE(InfoExtractor):
IE_DESC = 'Watch Indian Porn'
_VALID_URL = r'https?://(?:www\.)?watchindianporn\.net/(?:[^/]+/)*video/(?P<display_id>[^/]+)-(?P<id>[a-zA-Z0-9]+)\.html'
_TEST = {
'url': 'http://www.watchindianporn.net/video/hot-milf-from-kerala-shows-off-her-gorgeous-large-breasts-on-camera-RZa2avywNPa.html',
'md5': '249589a164dde236ec65832bfce17440',
'info_dict': {
'id': 'RZa2avywNPa',
'display_id': 'hot-milf-from-kerala-shows-off-her-gorgeous-large-breasts-on-camera',
'ext': 'mp4',
'title': 'Hot milf from kerala shows off her gorgeous large breasts on camera',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 226,
'view_count': int,
'categories': list,
'age_limit': 18,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id)
info_dict = self._parse_html5_media_entries(url, webpage, video_id)[0]
title = self._html_search_regex((
r'<title>(.+?)\s*-\s*Indian\s+Porn</title>',
r'<h4>(.+?)</h4>'
), webpage, 'title')
duration = parse_duration(self._search_regex(
r'Time:\s*<strong>\s*(.+?)\s*</strong>',
webpage, 'duration', fatal=False))
view_count = int(self._search_regex(
r'(?s)Time:\s*<strong>.*?</strong>.*?<strong>\s*(\d+)\s*</strong>',
webpage, 'view count', fatal=False))
categories = re.findall(
r'<a[^>]+class=[\'"]categories[\'"][^>]*>\s*([^<]+)\s*</a>',
webpage)
info_dict.update({
'id': video_id,
'display_id': display_id,
'http_headers': {
'Referer': url,
},
'title': title,
'duration': duration,
'view_count': view_count,
'categories': categories,
'age_limit': 18,
})
return info_dict
| unlicense | 08531848efbf5aec44549cfecc957aed | 32.779412 | 139 | 0.521985 | 3.397929 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/senateisvp.py | 59 | 6275 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
unsmuggle_url,
)
from ..compat import (
compat_parse_qs,
compat_urlparse,
)
class SenateISVPIE(InfoExtractor):
_COMM_MAP = [
['ag', '76440', 'http://ag-f.akamaihd.net'],
['aging', '76442', 'http://aging-f.akamaihd.net'],
['approps', '76441', 'http://approps-f.akamaihd.net'],
['armed', '76445', 'http://armed-f.akamaihd.net'],
['banking', '76446', 'http://banking-f.akamaihd.net'],
['budget', '76447', 'http://budget-f.akamaihd.net'],
['cecc', '76486', 'http://srs-f.akamaihd.net'],
['commerce', '80177', 'http://commerce1-f.akamaihd.net'],
['csce', '75229', 'http://srs-f.akamaihd.net'],
['dpc', '76590', 'http://dpc-f.akamaihd.net'],
['energy', '76448', 'http://energy-f.akamaihd.net'],
['epw', '76478', 'http://epw-f.akamaihd.net'],
['ethics', '76449', 'http://ethics-f.akamaihd.net'],
['finance', '76450', 'http://finance-f.akamaihd.net'],
['foreign', '76451', 'http://foreign-f.akamaihd.net'],
['govtaff', '76453', 'http://govtaff-f.akamaihd.net'],
['help', '76452', 'http://help-f.akamaihd.net'],
['indian', '76455', 'http://indian-f.akamaihd.net'],
['intel', '76456', 'http://intel-f.akamaihd.net'],
['intlnarc', '76457', 'http://intlnarc-f.akamaihd.net'],
['jccic', '85180', 'http://jccic-f.akamaihd.net'],
['jec', '76458', 'http://jec-f.akamaihd.net'],
['judiciary', '76459', 'http://judiciary-f.akamaihd.net'],
['rpc', '76591', 'http://rpc-f.akamaihd.net'],
['rules', '76460', 'http://rules-f.akamaihd.net'],
['saa', '76489', 'http://srs-f.akamaihd.net'],
['smbiz', '76461', 'http://smbiz-f.akamaihd.net'],
['srs', '75229', 'http://srs-f.akamaihd.net'],
['uscc', '76487', 'http://srs-f.akamaihd.net'],
['vetaff', '76462', 'http://vetaff-f.akamaihd.net'],
['arch', '', 'http://ussenate-f.akamaihd.net/']
]
_IE_NAME = 'senate.gov'
_VALID_URL = r'https?://(?:www\.)?senate\.gov/isvp/?\?(?P<qs>.+)'
_TESTS = [{
'url': 'http://www.senate.gov/isvp/?comm=judiciary&type=live&stt=&filename=judiciary031715&auto_play=false&wmode=transparent&poster=http%3A%2F%2Fwww.judiciary.senate.gov%2Fthemes%2Fjudiciary%2Fimages%2Fvideo-poster-flash-fit.png',
'info_dict': {
'id': 'judiciary031715',
'ext': 'mp4',
'title': 'Integrated Senate Video Player',
'thumbnail': r're:^https?://.*\.(?:jpg|png)$',
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'http://www.senate.gov/isvp/?type=live&comm=commerce&filename=commerce011514.mp4&auto_play=false',
'info_dict': {
'id': 'commerce011514',
'ext': 'mp4',
'title': 'Integrated Senate Video Player'
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'http://www.senate.gov/isvp/?type=arch&comm=intel&filename=intel090613&hc_location=ufi',
# checksum differs each time
'info_dict': {
'id': 'intel090613',
'ext': 'mp4',
'title': 'Integrated Senate Video Player'
}
}, {
# From http://www.c-span.org/video/?96791-1
'url': 'http://www.senate.gov/isvp?type=live&comm=banking&filename=banking012715',
'only_matching': True,
}]
@staticmethod
def _search_iframe_url(webpage):
mobj = re.search(
r"<iframe[^>]+src=['\"](?P<url>https?://www\.senate\.gov/isvp/?\?[^'\"]+)['\"]",
webpage)
if mobj:
return mobj.group('url')
def _get_info_for_comm(self, committee):
for entry in self._COMM_MAP:
if entry[0] == committee:
return entry[1:]
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
qs = compat_parse_qs(re.match(self._VALID_URL, url).group('qs'))
if not qs.get('filename') or not qs.get('type') or not qs.get('comm'):
raise ExtractorError('Invalid URL', expected=True)
video_id = re.sub(r'.mp4$', '', qs['filename'][0])
webpage = self._download_webpage(url, video_id)
if smuggled_data.get('force_title'):
title = smuggled_data['force_title']
else:
title = self._html_search_regex(r'<title>([^<]+)</title>', webpage, video_id)
poster = qs.get('poster')
thumbnail = poster[0] if poster else None
video_type = qs['type'][0]
committee = video_type if video_type == 'arch' else qs['comm'][0]
stream_num, domain = self._get_info_for_comm(committee)
formats = []
if video_type == 'arch':
filename = video_id if '.' in video_id else video_id + '.mp4'
formats = [{
# All parameters in the query string are necessary to prevent a 403 error
'url': compat_urlparse.urljoin(domain, filename) + '?v=3.1.0&fp=&r=&g=',
}]
else:
hdcore_sign = 'hdcore=3.1.0'
url_params = (domain, video_id, stream_num)
f4m_url = '%s/z/%s_1@%s/manifest.f4m?' % url_params + hdcore_sign
m3u8_url = '%s/i/%s_1@%s/master.m3u8' % url_params
for entry in self._extract_f4m_formats(f4m_url, video_id, f4m_id='f4m'):
# URLs without the extra param induce an 404 error
entry.update({'extra_param_to_segment_url': hdcore_sign})
formats.append(entry)
for entry in self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4', m3u8_id='m3u8'):
mobj = re.search(r'(?P<tag>(?:-p|-b)).m3u8', entry['url'])
if mobj:
entry['format_id'] += mobj.group('tag')
formats.append(entry)
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnail': thumbnail,
}
| unlicense | 61d51c7cb7cbccc1fed8fc513aaf831e | 40.013072 | 238 | 0.532112 | 3.024096 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/moevideo.py | 19 | 2835 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
clean_html,
int_or_none,
)
class MoeVideoIE(InfoExtractor):
IE_DESC = 'LetitBit video services: moevideo.net, playreplay.net and videochart.net'
_VALID_URL = r'''(?x)
https?://(?P<host>(?:www\.)?
(?:(?:moevideo|playreplay|videochart)\.net|thesame\.tv))/
(?:video|framevideo|embed)/(?P<id>[0-9a-z]+\.[0-9A-Za-z]+)'''
_API_URL = 'http://api.letitbit.net/'
_API_KEY = 'tVL0gjqo5'
_TESTS = [
{
'url': 'http://moevideo.net/video/00297.0036103fe3d513ef27915216fd29',
'md5': '129f5ae1f6585d0e9bb4f38e774ffb3a',
'info_dict': {
'id': '00297.0036103fe3d513ef27915216fd29',
'ext': 'flv',
'title': 'Sink cut out machine',
'description': 'md5:f29ff97b663aefa760bf7ca63c8ca8a8',
'thumbnail': r're:^https?://.*\.jpg$',
'width': 540,
'height': 360,
'duration': 179,
'filesize': 17822500,
},
'skip': 'Video has been removed',
},
{
'url': 'http://playreplay.net/video/77107.7f325710a627383d40540d8e991a',
'md5': '74f0a014d5b661f0f0e2361300d1620e',
'info_dict': {
'id': '77107.7f325710a627383d40540d8e991a',
'ext': 'flv',
'title': 'Operacion Condor.',
'description': 'md5:7e68cb2fcda66833d5081c542491a9a3',
'thumbnail': r're:^https?://.*\.jpg$',
'width': 480,
'height': 296,
'duration': 6027,
'filesize': 588257923,
},
'skip': 'Video has been removed',
},
]
def _real_extract(self, url):
host, video_id = re.match(self._VALID_URL, url).groups()
webpage = self._download_webpage(
'http://%s/video/%s' % (host, video_id),
video_id, 'Downloading webpage')
title = self._og_search_title(webpage)
embed_webpage = self._download_webpage(
'http://%s/embed/%s' % (host, video_id),
video_id, 'Downloading embed webpage')
video = self._parse_json(self._search_regex(
r'mvplayer\("#player"\s*,\s*({.+})',
embed_webpage, 'mvplayer'), video_id)['video']
return {
'id': video_id,
'title': title,
'thumbnail': video.get('poster') or self._og_search_thumbnail(webpage),
'description': clean_html(self._og_search_description(webpage)),
'duration': int_or_none(self._og_search_property('video:duration', webpage)),
'url': video['ourUrl'],
}
| unlicense | 26c8219fb1eb4d8ff97eef0f351434e5 | 34.886076 | 89 | 0.516402 | 3.351064 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/postprocessor/metadatafromtitle.py | 51 | 1652 | from __future__ import unicode_literals
import re
from .common import PostProcessor
class MetadataFromTitlePP(PostProcessor):
def __init__(self, downloader, titleformat):
super(MetadataFromTitlePP, self).__init__(downloader)
self._titleformat = titleformat
self._titleregex = (self.format_to_regex(titleformat)
if re.search(r'%\(\w+\)s', titleformat)
else titleformat)
def format_to_regex(self, fmt):
r"""
Converts a string like
'%(title)s - %(artist)s'
to a regex like
'(?P<title>.+)\ \-\ (?P<artist>.+)'
"""
lastpos = 0
regex = ''
# replace %(..)s with regex group and escape other string parts
for match in re.finditer(r'%\((\w+)\)s', fmt):
regex += re.escape(fmt[lastpos:match.start()])
regex += r'(?P<' + match.group(1) + '>.+)'
lastpos = match.end()
if lastpos < len(fmt):
regex += re.escape(fmt[lastpos:])
return regex
def run(self, info):
title = info['title']
match = re.match(self._titleregex, title)
if match is None:
self._downloader.to_screen(
'[fromtitle] Could not interpret title of video as "%s"'
% self._titleformat)
return [], info
for attribute, value in match.groupdict().items():
info[attribute] = value
self._downloader.to_screen(
'[fromtitle] parsed %s: %s'
% (attribute, value if value is not None else 'NA'))
return [], info
| unlicense | 57b66fe982b6b6cc6047b2aef32b9573 | 33.416667 | 72 | 0.526029 | 4.13 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/blinkx.py | 12 | 3217 | from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..utils import (
remove_start,
int_or_none,
)
class BlinkxIE(InfoExtractor):
_VALID_URL = r'(?:https?://(?:www\.)blinkx\.com/#?ce/|blinkx:)(?P<id>[^?]+)'
IE_NAME = 'blinkx'
_TEST = {
'url': 'http://www.blinkx.com/ce/Da0Gw3xc5ucpNduzLuDDlv4WC9PuI4fDi1-t6Y3LyfdY2SZS5Urbvn-UPJvrvbo8LTKTc67Wu2rPKSQDJyZeeORCR8bYkhs8lI7eqddznH2ofh5WEEdjYXnoRtj7ByQwt7atMErmXIeYKPsSDuMAAqJDlQZ-3Ff4HJVeH_s3Gh8oQ',
'md5': '337cf7a344663ec79bf93a526a2e06c7',
'info_dict': {
'id': 'Da0Gw3xc',
'ext': 'mp4',
'title': 'No Daily Show for John Oliver; HBO Show Renewed - IGN News',
'uploader': 'IGN News',
'upload_date': '20150217',
'timestamp': 1424215740,
'description': 'HBO has renewed Last Week Tonight With John Oliver for two more seasons.',
'duration': 47.743333,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
display_id = video_id[:8]
api_url = ('https://apib4.blinkx.com/api.php?action=play_video&'
+ 'video=%s' % video_id)
data_json = self._download_webpage(api_url, display_id)
data = json.loads(data_json)['api']['results'][0]
duration = None
thumbnails = []
formats = []
for m in data['media']:
if m['type'] == 'jpg':
thumbnails.append({
'url': m['link'],
'width': int(m['w']),
'height': int(m['h']),
})
elif m['type'] == 'original':
duration = float(m['d'])
elif m['type'] == 'youtube':
yt_id = m['link']
self.to_screen('Youtube video detected: %s' % yt_id)
return self.url_result(yt_id, 'Youtube', video_id=yt_id)
elif m['type'] in ('flv', 'mp4'):
vcodec = remove_start(m['vcodec'], 'ff')
acodec = remove_start(m['acodec'], 'ff')
vbr = int_or_none(m.get('vbr') or m.get('vbitrate'), 1000)
abr = int_or_none(m.get('abr') or m.get('abitrate'), 1000)
tbr = vbr + abr if vbr and abr else None
format_id = '%s-%sk-%s' % (vcodec, tbr, m['w'])
formats.append({
'format_id': format_id,
'url': m['link'],
'vcodec': vcodec,
'acodec': acodec,
'abr': abr,
'vbr': vbr,
'tbr': tbr,
'width': int_or_none(m.get('w')),
'height': int_or_none(m.get('h')),
})
self._sort_formats(formats)
return {
'id': display_id,
'fullid': video_id,
'title': data['title'],
'formats': formats,
'uploader': data['channel_name'],
'timestamp': data['pubdate_epoch'],
'description': data.get('description'),
'thumbnails': thumbnails,
'duration': duration,
}
| unlicense | 087e0927759b3e3c4a7edd7c084eeb1e | 36.406977 | 216 | 0.482437 | 3.306269 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/viqeo.py | 23 | 3298 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
str_or_none,
url_or_none,
)
class ViqeoIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
viqeo:|
https?://cdn\.viqeo\.tv/embed/*\?.*?\bvid=|
https?://api\.viqeo\.tv/v\d+/data/startup?.*?\bvideo(?:%5B%5D|\[\])=
)
(?P<id>[\da-f]+)
'''
_TESTS = [{
'url': 'https://cdn.viqeo.tv/embed/?vid=cde96f09d25f39bee837',
'md5': 'a169dd1a6426b350dca4296226f21e76',
'info_dict': {
'id': 'cde96f09d25f39bee837',
'ext': 'mp4',
'title': 'cde96f09d25f39bee837',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 76,
},
}, {
'url': 'viqeo:cde96f09d25f39bee837',
'only_matching': True,
}, {
'url': 'https://api.viqeo.tv/v1/data/startup?video%5B%5D=71bbec412ade45c3216c&profile=112',
'only_matching': True,
}]
@staticmethod
def _extract_urls(webpage):
return [
mobj.group('url')
for mobj in re.finditer(
r'<iframe[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//cdn\.viqeo\.tv/embed/*\?.*?\bvid=[\da-f]+.*?)\1',
webpage)]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'https://cdn.viqeo.tv/embed/?vid=%s' % video_id, video_id)
data = self._parse_json(
self._search_regex(
r'SLOT_DATA\s*=\s*({.+?})\s*;', webpage, 'slot data'),
video_id)
formats = []
thumbnails = []
for media_file in data['mediaFiles']:
if not isinstance(media_file, dict):
continue
media_url = url_or_none(media_file.get('url'))
if not media_url or not media_url.startswith(('http', '//')):
continue
media_type = str_or_none(media_file.get('type'))
if not media_type:
continue
media_kind = media_type.split('/')[0].lower()
f = {
'url': media_url,
'width': int_or_none(media_file.get('width')),
'height': int_or_none(media_file.get('height')),
}
format_id = str_or_none(media_file.get('quality'))
if media_kind == 'image':
f['id'] = format_id
thumbnails.append(f)
elif media_kind in ('video', 'audio'):
is_audio = media_kind == 'audio'
f.update({
'format_id': 'audio' if is_audio else format_id,
'fps': int_or_none(media_file.get('fps')),
'vcodec': 'none' if is_audio else None,
})
formats.append(f)
self._sort_formats(formats)
duration = int_or_none(data.get('duration'))
return {
'id': video_id,
'title': video_id,
'duration': duration,
'thumbnails': thumbnails,
'formats': formats,
}
| unlicense | 2af9344a4ba223122c1ab5adabd3de9b | 32.313131 | 114 | 0.459369 | 3.608315 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/academicearth.py | 142 | 1399 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
class AcademicEarthCourseIE(InfoExtractor):
_VALID_URL = r'^https?://(?:www\.)?academicearth\.org/playlists/(?P<id>[^?#/]+)'
IE_NAME = 'AcademicEarth:Course'
_TEST = {
'url': 'http://academicearth.org/playlists/laws-of-nature/',
'info_dict': {
'id': 'laws-of-nature',
'title': 'Laws of Nature',
'description': 'Introduce yourself to the laws of nature with these free online college lectures from Yale, Harvard, and MIT.',
},
'playlist_count': 3,
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
title = self._html_search_regex(
r'<h1 class="playlist-name"[^>]*?>(.*?)</h1>', webpage, 'title')
description = self._html_search_regex(
r'<p class="excerpt"[^>]*?>(.*?)</p>',
webpage, 'description', fatal=False)
urls = re.findall(
r'<li class="lecture-preview">\s*?<a target="_blank" href="([^"]+)">',
webpage)
entries = [self.url_result(u) for u in urls]
return {
'_type': 'playlist',
'id': playlist_id,
'title': title,
'description': description,
'entries': entries,
}
| unlicense | 69a3115c4ee7daac78961fc6dc2088f1 | 33.121951 | 139 | 0.54253 | 3.596401 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/tvn24.py | 15 | 3902 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
NO_DEFAULT,
unescapeHTML,
)
class TVN24IE(InfoExtractor):
_VALID_URL = r'https?://(?:(?:[^/]+)\.)?tvn24(?:bis)?\.pl/(?:[^/]+/)*(?P<id>[^/]+)'
_TESTS = [{
'url': 'http://www.tvn24.pl/wiadomosci-z-kraju,3/oredzie-artura-andrusa,702428.html',
'md5': 'fbdec753d7bc29d96036808275f2130c',
'info_dict': {
'id': '1584444',
'ext': 'mp4',
'title': '"Święta mają być wesołe, dlatego, ludziska, wszyscy pod jemiołę"',
'description': 'Wyjątkowe orędzie Artura Andrusa, jednego z gości Szkła kontaktowego.',
'thumbnail': 're:https?://.*[.]jpeg',
}
}, {
# different layout
'url': 'https://tvnmeteo.tvn24.pl/magazyny/maja-w-ogrodzie,13/odcinki-online,1,4,1,0/pnacza-ptaki-i-iglaki-odc-691-hgtv-odc-29,1771763.html',
'info_dict': {
'id': '1771763',
'ext': 'mp4',
'title': 'Pnącza, ptaki i iglaki (odc. 691 /HGTV odc. 29)',
'thumbnail': 're:https?://.*',
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://fakty.tvn24.pl/ogladaj-online,60/53-konferencja-bezpieczenstwa-w-monachium,716431.html',
'only_matching': True,
}, {
'url': 'http://sport.tvn24.pl/pilka-nozna,105/ligue-1-kamil-glik-rozcial-glowe-monaco-tylko-remisuje-z-bastia,716522.html',
'only_matching': True,
}, {
'url': 'http://tvn24bis.pl/poranek,146,m/gen-koziej-w-tvn24-bis-wracamy-do-czasow-zimnej-wojny,715660.html',
'only_matching': True,
}, {
'url': 'https://www.tvn24.pl/magazyn-tvn24/angie-w-jednej-czwartej-polka-od-szarej-myszki-do-cesarzowej-europy,119,2158',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
title = self._og_search_title(
webpage, default=None) or self._search_regex(
r'<h\d+[^>]+class=["\']magazineItemHeader[^>]+>(.+?)</h',
webpage, 'title')
def extract_json(attr, name, default=NO_DEFAULT, fatal=True):
return self._parse_json(
self._search_regex(
r'\b%s=(["\'])(?P<json>(?!\1).+?)\1' % attr, webpage,
name, group='json', default=default, fatal=fatal) or '{}',
display_id, transform_source=unescapeHTML, fatal=fatal)
quality_data = extract_json('data-quality', 'formats')
formats = []
for format_id, url in quality_data.items():
formats.append({
'url': url,
'format_id': format_id,
'height': int_or_none(format_id.rstrip('p')),
})
self._sort_formats(formats)
description = self._og_search_description(webpage, default=None)
thumbnail = self._og_search_thumbnail(
webpage, default=None) or self._html_search_regex(
r'\bdata-poster=(["\'])(?P<url>(?!\1).+?)\1', webpage,
'thumbnail', group='url')
video_id = None
share_params = extract_json(
'data-share-params', 'share params', default=None)
if isinstance(share_params, dict):
video_id = share_params.get('id')
if not video_id:
video_id = self._search_regex(
r'data-vid-id=["\'](\d+)', webpage, 'video id',
default=None) or self._search_regex(
r',(\d+)\.html', url, 'video id', default=display_id)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'formats': formats,
}
| unlicense | c21c33d2ba48d97346b1b718beee66be | 36.76699 | 149 | 0.537018 | 3.029595 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/ntvru.py | 13 | 5054 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
strip_or_none,
unescapeHTML,
xpath_text,
)
class NTVRuIE(InfoExtractor):
IE_NAME = 'ntv.ru'
_VALID_URL = r'https?://(?:www\.)?ntv\.ru/(?:[^/]+/)*(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'http://www.ntv.ru/novosti/863142/',
'md5': 'ba7ea172a91cb83eb734cad18c10e723',
'info_dict': {
'id': '746000',
'ext': 'mp4',
'title': 'Командующий Черноморским флотом провел переговоры в штабе ВМС Украины',
'description': 'Командующий Черноморским флотом провел переговоры в штабе ВМС Украины',
'thumbnail': r're:^http://.*\.jpg',
'duration': 136,
},
}, {
'url': 'http://www.ntv.ru/video/novosti/750370/',
'md5': 'adecff79691b4d71e25220a191477124',
'info_dict': {
'id': '750370',
'ext': 'mp4',
'title': 'Родные пассажиров пропавшего Boeing не верят в трагический исход',
'description': 'Родные пассажиров пропавшего Boeing не верят в трагический исход',
'thumbnail': r're:^http://.*\.jpg',
'duration': 172,
},
}, {
'url': 'http://www.ntv.ru/peredacha/segodnya/m23700/o232416',
'md5': '82dbd49b38e3af1d00df16acbeab260c',
'info_dict': {
'id': '747480',
'ext': 'mp4',
'title': '«Сегодня». 21 марта 2014 года. 16:00',
'description': '«Сегодня». 21 марта 2014 года. 16:00',
'thumbnail': r're:^http://.*\.jpg',
'duration': 1496,
},
}, {
'url': 'https://www.ntv.ru/kino/Koma_film/m70281/o336036/video/',
'md5': 'e9c7cde24d9d3eaed545911a04e6d4f4',
'info_dict': {
'id': '1126480',
'ext': 'mp4',
'title': 'Остросюжетный фильм «Кома»',
'description': 'Остросюжетный фильм «Кома»',
'thumbnail': r're:^http://.*\.jpg',
'duration': 5592,
},
}, {
'url': 'http://www.ntv.ru/serial/Delo_vrachey/m31760/o233916/',
'md5': '9320cd0e23f3ea59c330dc744e06ff3b',
'info_dict': {
'id': '751482',
'ext': 'mp4',
'title': '«Дело врачей»: «Деревце жизни»',
'description': '«Дело врачей»: «Деревце жизни»',
'thumbnail': r're:^http://.*\.jpg',
'duration': 2590,
},
}, {
# Schemeless file URL
'url': 'https://www.ntv.ru/video/1797442',
'only_matching': True,
}]
_VIDEO_ID_REGEXES = [
r'<meta property="og:url" content="http://www\.ntv\.ru/video/(\d+)',
r'<video embed=[^>]+><id>(\d+)</id>',
r'<video restriction[^>]+><key>(\d+)</key>',
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url = self._og_search_property(
('video', 'video:iframe'), webpage, default=None)
if video_url:
video_id = self._search_regex(
r'https?://(?:www\.)?ntv\.ru/video/(?:embed/)?(\d+)',
video_url, 'video id', default=None)
if not video_id:
video_id = self._html_search_regex(
self._VIDEO_ID_REGEXES, webpage, 'video id')
player = self._download_xml(
'http://www.ntv.ru/vi%s/' % video_id,
video_id, 'Downloading video XML')
title = strip_or_none(unescapeHTML(xpath_text(player, './data/title', 'title', fatal=True)))
video = player.find('./data/video')
formats = []
for format_id in ['', 'hi', 'webm']:
file_ = xpath_text(video, './%sfile' % format_id)
if not file_:
continue
if file_.startswith('//'):
file_ = self._proto_relative_url(file_)
elif not file_.startswith('http'):
file_ = 'http://media.ntv.ru/vod/' + file_
formats.append({
'url': file_,
'filesize': int_or_none(xpath_text(video, './%ssize' % format_id)),
})
self._sort_formats(formats)
return {
'id': xpath_text(video, './id'),
'title': title,
'description': strip_or_none(unescapeHTML(xpath_text(player, './data/description'))),
'thumbnail': xpath_text(video, './splash'),
'duration': int_or_none(xpath_text(video, './totaltime')),
'view_count': int_or_none(xpath_text(video, './views')),
'formats': formats,
}
| unlicense | 8990a315d0d793fd10b63b72948fe57d | 34.816794 | 100 | 0.503836 | 2.814637 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/gaskrank.py | 50 | 3847 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
float_or_none,
int_or_none,
unified_strdate,
)
class GaskrankIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?gaskrank\.tv/tv/(?P<categories>[^/]+)/(?P<id>[^/]+)\.htm'
_TESTS = [{
'url': 'http://www.gaskrank.tv/tv/motorrad-fun/strike-einparken-durch-anfaenger-crash-mit-groesserem-flurschaden.htm',
'md5': '1ae88dbac97887d85ebd1157a95fc4f9',
'info_dict': {
'id': '201601/26955',
'ext': 'mp4',
'title': 'Strike! Einparken können nur Männer - Flurschaden hält sich in Grenzen *lol*',
'thumbnail': r're:^https?://.*\.jpg$',
'categories': ['motorrad-fun'],
'display_id': 'strike-einparken-durch-anfaenger-crash-mit-groesserem-flurschaden',
'uploader_id': 'Bikefun',
'upload_date': '20170110',
'uploader_url': None,
}
}, {
'url': 'http://www.gaskrank.tv/tv/racing/isle-of-man-tt-2011-michael-du-15920.htm',
'md5': 'c33ee32c711bc6c8224bfcbe62b23095',
'info_dict': {
'id': '201106/15920',
'ext': 'mp4',
'title': 'Isle of Man - Michael Dunlop vs Guy Martin - schwindelig kucken',
'thumbnail': r're:^https?://.*\.jpg$',
'categories': ['racing'],
'display_id': 'isle-of-man-tt-2011-michael-du-15920',
'uploader_id': 'IOM',
'upload_date': '20170523',
'uploader_url': 'www.iomtt.com',
}
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
title = self._og_search_title(
webpage, default=None) or self._html_search_meta(
'title', webpage, fatal=True)
categories = [re.match(self._VALID_URL, url).group('categories')]
mobj = re.search(
r'Video von:\s*(?P<uploader_id>[^|]*?)\s*\|\s*vom:\s*(?P<upload_date>[0-9][0-9]\.[0-9][0-9]\.[0-9][0-9][0-9][0-9])',
webpage)
if mobj is not None:
uploader_id = mobj.groupdict().get('uploader_id')
upload_date = unified_strdate(mobj.groupdict().get('upload_date'))
uploader_url = self._search_regex(
r'Homepage:\s*<[^>]*>(?P<uploader_url>[^<]*)',
webpage, 'uploader_url', default=None)
tags = re.findall(
r'/tv/tags/[^/]+/"\s*>(?P<tag>[^<]*?)<',
webpage)
view_count = self._search_regex(
r'class\s*=\s*"gkRight"(?:[^>]*>\s*<[^>]*)*icon-eye-open(?:[^>]*>\s*<[^>]*)*>\s*(?P<view_count>[0-9\.]*)',
webpage, 'view_count', default=None)
if view_count:
view_count = int_or_none(view_count.replace('.', ''))
average_rating = self._search_regex(
r'itemprop\s*=\s*"ratingValue"[^>]*>\s*(?P<average_rating>[0-9,]+)',
webpage, 'average_rating')
if average_rating:
average_rating = float_or_none(average_rating.replace(',', '.'))
video_id = self._search_regex(
r'https?://movies\.gaskrank\.tv/([^-]*?)(-[^\.]*)?\.mp4',
webpage, 'video id', default=display_id)
entry = self._parse_html5_media_entries(url, webpage, video_id)[0]
entry.update({
'id': video_id,
'title': title,
'categories': categories,
'display_id': display_id,
'uploader_id': uploader_id,
'upload_date': upload_date,
'uploader_url': uploader_url,
'tags': tags,
'view_count': view_count,
'average_rating': average_rating,
})
self._sort_formats(entry['formats'])
return entry
| unlicense | ff521dc80480bc206c9ec84f3fe8c4e7 | 37.059406 | 128 | 0.523413 | 3.23569 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/puls4.py | 68 | 2295 | # coding: utf-8
from __future__ import unicode_literals
from .prosiebensat1 import ProSiebenSat1BaseIE
from ..utils import (
unified_strdate,
parse_duration,
compat_str,
)
class Puls4IE(ProSiebenSat1BaseIE):
_VALID_URL = r'https?://(?:www\.)?puls4\.com/(?P<id>[^?#&]+)'
_TESTS = [{
'url': 'http://www.puls4.com/2-minuten-2-millionen/staffel-3/videos/2min2miotalk/Tobias-Homberger-von-myclubs-im-2min2miotalk-118118',
'md5': 'fd3c6b0903ac72c9d004f04bc6bb3e03',
'info_dict': {
'id': '118118',
'ext': 'flv',
'title': 'Tobias Homberger von myclubs im #2min2miotalk',
'description': 'md5:f9def7c5e8745d6026d8885487d91955',
'upload_date': '20160830',
'uploader': 'PULS_4',
},
}, {
'url': 'http://www.puls4.com/pro-und-contra/wer-wird-prasident/Ganze-Folgen/Wer-wird-Praesident.-Norbert-Hofer',
'only_matching': True,
}, {
'url': 'http://www.puls4.com/pro-und-contra/wer-wird-prasident/Ganze-Folgen/Wer-wird-Praesident-Analyse-des-Interviews-mit-Norbert-Hofer-416598',
'only_matching': True,
}]
_TOKEN = 'puls4'
_SALT = '01!kaNgaiNgah1Ie4AeSha'
_CLIENT_NAME = ''
def _real_extract(self, url):
path = self._match_id(url)
content_path = self._download_json(
'http://www.puls4.com/api/json-fe/page/' + path, path)['content'][0]['url']
media = self._download_json(
'http://www.puls4.com' + content_path,
content_path)['mediaCurrent']
player_content = media['playerContent']
info = self._extract_video_info(url, player_content['id'])
info.update({
'id': compat_str(media['objectId']),
'title': player_content['title'],
'description': media.get('description'),
'thumbnail': media.get('previewLink'),
'upload_date': unified_strdate(media.get('date')),
'duration': parse_duration(player_content.get('duration')),
'episode': player_content.get('episodePartName'),
'show': media.get('channel'),
'season_id': player_content.get('seasonId'),
'uploader': player_content.get('sourceCompany'),
})
return info
| unlicense | 74ed6b07e588d26557ab50d40b359dda | 39.263158 | 153 | 0.5878 | 3.003927 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/wwe.py | 20 | 4532 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
try_get,
unescapeHTML,
url_or_none,
urljoin,
)
class WWEBaseIE(InfoExtractor):
_SUBTITLE_LANGS = {
'English': 'en',
'Deutsch': 'de',
}
def _extract_entry(self, data, url, video_id=None):
video_id = compat_str(video_id or data['nid'])
title = data['title']
formats = self._extract_m3u8_formats(
data['file'], video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls')
description = data.get('description')
thumbnail = urljoin(url, data.get('image'))
series = data.get('show_name')
episode = data.get('episode_name')
subtitles = {}
tracks = data.get('tracks')
if isinstance(tracks, list):
for track in tracks:
if not isinstance(track, dict):
continue
if track.get('kind') != 'captions':
continue
track_file = url_or_none(track.get('file'))
if not track_file:
continue
label = track.get('label')
lang = self._SUBTITLE_LANGS.get(label, label) or 'en'
subtitles.setdefault(lang, []).append({
'url': track_file,
})
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'series': series,
'episode': episode,
'formats': formats,
'subtitles': subtitles,
}
class WWEIE(WWEBaseIE):
_VALID_URL = r'https?://(?:[^/]+\.)?wwe\.com/(?:[^/]+/)*videos/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://www.wwe.com/videos/daniel-bryan-vs-andrade-cien-almas-smackdown-live-sept-4-2018',
'md5': '92811c6a14bfc206f7a6a9c5d9140184',
'info_dict': {
'id': '40048199',
'ext': 'mp4',
'title': 'Daniel Bryan vs. Andrade "Cien" Almas: SmackDown LIVE, Sept. 4, 2018',
'description': 'md5:2d7424dbc6755c61a0e649d2a8677f67',
'thumbnail': r're:^https?://.*\.jpg$',
}
}, {
'url': 'https://de.wwe.com/videos/gran-metalik-vs-tony-nese-wwe-205-live-sept-4-2018',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
landing = self._parse_json(
self._html_search_regex(
r'(?s)Drupal\.settings\s*,\s*({.+?})\s*\)\s*;',
webpage, 'drupal settings'),
display_id)['WWEVideoLanding']
data = landing['initialVideo']['playlist'][0]
video_id = landing.get('initialVideoId')
info = self._extract_entry(data, url, video_id)
info['display_id'] = display_id
return info
class WWEPlaylistIE(WWEBaseIE):
_VALID_URL = r'https?://(?:[^/]+\.)?wwe\.com/(?:[^/]+/)*(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://www.wwe.com/shows/raw/2018-11-12',
'info_dict': {
'id': '2018-11-12',
},
'playlist_mincount': 11,
}, {
'url': 'http://www.wwe.com/article/walk-the-prank-wwe-edition',
'only_matching': True,
}, {
'url': 'https://www.wwe.com/shows/wwenxt/article/matt-riddle-interview',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return False if WWEIE.suitable(url) else super(WWEPlaylistIE, cls).suitable(url)
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
entries = []
for mobj in re.finditer(
r'data-video\s*=\s*(["\'])(?P<data>{.+?})\1', webpage):
video = self._parse_json(
mobj.group('data'), display_id, transform_source=unescapeHTML,
fatal=False)
if not video:
continue
data = try_get(video, lambda x: x['playlist'][0], dict)
if not data:
continue
try:
entry = self._extract_entry(data, url)
except Exception:
continue
entry['extractor_key'] = WWEIE.ie_key()
entries.append(entry)
return self.playlist_result(entries, display_id)
| unlicense | 8d8e65b33af91421829824ad7bcdec1c | 31.371429 | 106 | 0.515666 | 3.540625 | false | false | false | false |
rbrito/pkg-youtube-dl | devscripts/make_supportedsites.py | 36 | 1153 | #!/usr/bin/env python
from __future__ import unicode_literals
import io
import optparse
import os
import sys
# Import youtube_dl
ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
sys.path.insert(0, ROOT_DIR)
import youtube_dl
def main():
parser = optparse.OptionParser(usage='%prog OUTFILE.md')
options, args = parser.parse_args()
if len(args) != 1:
parser.error('Expected an output filename')
outfile, = args
def gen_ies_md(ies):
for ie in ies:
ie_md = '**{0}**'.format(ie.IE_NAME)
ie_desc = getattr(ie, 'IE_DESC', None)
if ie_desc is False:
continue
if ie_desc is not None:
ie_md += ': {0}'.format(ie.IE_DESC)
if not ie.working():
ie_md += ' (Currently broken)'
yield ie_md
ies = sorted(youtube_dl.gen_extractors(), key=lambda i: i.IE_NAME.lower())
out = '# Supported sites\n' + ''.join(
' - ' + md + '\n'
for md in gen_ies_md(ies))
with io.open(outfile, 'w', encoding='utf-8') as outf:
outf.write(out)
if __name__ == '__main__':
main()
| unlicense | 5a711ef936786a4eb9e5a1e7abb8b1fc | 24.065217 | 78 | 0.546401 | 3.351744 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/ntvcojp.py | 17 | 1939 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
js_to_json,
smuggle_url,
)
class NTVCoJpCUIE(InfoExtractor):
IE_NAME = 'cu.ntv.co.jp'
IE_DESC = 'Nippon Television Network'
_VALID_URL = r'https?://cu\.ntv\.co\.jp/(?!program)(?P<id>[^/?&#]+)'
_TEST = {
'url': 'https://cu.ntv.co.jp/televiva-chill-gohan_181031/',
'info_dict': {
'id': '5978891207001',
'ext': 'mp4',
'title': '桜エビと炒り卵がポイント! 「中華風 エビチリおにぎり」──『美虎』五十嵐美幸',
'upload_date': '20181213',
'description': 'md5:211b52f4fd60f3e0e72b68b0c6ba52a9',
'uploader_id': '3855502814001',
'timestamp': 1544669941,
},
'params': {
# m3u8 download
'skip_download': True,
},
}
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/default_default/index.html?videoId=%s'
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
player_config = self._parse_json(self._search_regex(
r'(?s)PLAYER_CONFIG\s*=\s*({.+?})',
webpage, 'player config'), display_id, js_to_json)
video_id = player_config['videoId']
account_id = player_config.get('account') or '3855502814001'
return {
'_type': 'url_transparent',
'id': video_id,
'display_id': display_id,
'title': self._search_regex(r'<h1[^>]+class="title"[^>]*>([^<]+)', webpage, 'title').strip(),
'description': self._html_search_meta(['description', 'og:description'], webpage),
'url': smuggle_url(self.BRIGHTCOVE_URL_TEMPLATE % (account_id, video_id), {'geo_countries': ['JP']}),
'ie_key': 'BrightcoveNew',
}
| unlicense | 24f33835a196c7c7f12c58d11409b3ea | 37.061224 | 113 | 0.551206 | 2.927786 | false | true | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/once.py | 19 | 2167 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class OnceIE(InfoExtractor):
_VALID_URL = r'https?://.+?\.unicornmedia\.com/now/(?:ads/vmap/)?[^/]+/[^/]+/(?P<domain_id>[^/]+)/(?P<application_id>[^/]+)/(?:[^/]+/)?(?P<media_item_id>[^/]+)/content\.(?:once|m3u8|mp4)'
ADAPTIVE_URL_TEMPLATE = 'http://once.unicornmedia.com/now/master/playlist/%s/%s/%s/content.m3u8'
PROGRESSIVE_URL_TEMPLATE = 'http://once.unicornmedia.com/now/media/progressive/%s/%s/%s/%s/content.mp4'
def _extract_once_formats(self, url, http_formats_preference=None):
domain_id, application_id, media_item_id = re.match(
OnceIE._VALID_URL, url).groups()
formats = self._extract_m3u8_formats(
self.ADAPTIVE_URL_TEMPLATE % (
domain_id, application_id, media_item_id),
media_item_id, 'mp4', m3u8_id='hls', fatal=False)
progressive_formats = []
for adaptive_format in formats:
# Prevent advertisement from embedding into m3u8 playlist (see
# https://github.com/ytdl-org/youtube-dl/issues/8893#issuecomment-199912684)
adaptive_format['url'] = re.sub(
r'\badsegmentlength=\d+', r'adsegmentlength=0', adaptive_format['url'])
rendition_id = self._search_regex(
r'/now/media/playlist/[^/]+/[^/]+/([^/]+)',
adaptive_format['url'], 'redition id', default=None)
if rendition_id:
progressive_format = adaptive_format.copy()
progressive_format.update({
'url': self.PROGRESSIVE_URL_TEMPLATE % (
domain_id, application_id, rendition_id, media_item_id),
'format_id': adaptive_format['format_id'].replace(
'hls', 'http'),
'protocol': 'http',
'preference': http_formats_preference,
})
progressive_formats.append(progressive_format)
self._check_formats(progressive_formats, media_item_id)
formats.extend(progressive_formats)
return formats
| unlicense | 30aac8bd9c20fc77b6e74b79b16631f6 | 49.395349 | 191 | 0.576373 | 3.629816 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/nhl.py | 19 | 5004 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
determine_ext,
int_or_none,
parse_iso8601,
parse_duration,
)
class NHLBaseIE(InfoExtractor):
def _real_extract(self, url):
site, tmp_id = re.match(self._VALID_URL, url).groups()
video_data = self._download_json(
'https://%s/%s/%sid/v1/%s/details/web-v1.json'
% (self._CONTENT_DOMAIN, site[:3], 'item/' if site == 'mlb' else '', tmp_id), tmp_id)
if video_data.get('type') != 'video':
video_data = video_data['media']
video = video_data.get('video')
if video:
video_data = video
else:
videos = video_data.get('videos')
if videos:
video_data = videos[0]
video_id = compat_str(video_data['id'])
title = video_data['title']
formats = []
for playback in video_data.get('playbacks', []):
playback_url = playback.get('url')
if not playback_url:
continue
ext = determine_ext(playback_url)
if ext == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
playback_url, video_id, 'mp4', 'm3u8_native',
m3u8_id=playback.get('name', 'hls'), fatal=False)
self._check_formats(m3u8_formats, video_id)
formats.extend(m3u8_formats)
else:
height = int_or_none(playback.get('height'))
formats.append({
'format_id': playback.get('name', 'http' + ('-%dp' % height if height else '')),
'url': playback_url,
'width': int_or_none(playback.get('width')),
'height': height,
'tbr': int_or_none(self._search_regex(r'_(\d+)[kK]', playback_url, 'bitrate', default=None)),
})
self._sort_formats(formats)
thumbnails = []
cuts = video_data.get('image', {}).get('cuts') or []
if isinstance(cuts, dict):
cuts = cuts.values()
for thumbnail_data in cuts:
thumbnail_url = thumbnail_data.get('src')
if not thumbnail_url:
continue
thumbnails.append({
'url': thumbnail_url,
'width': int_or_none(thumbnail_data.get('width')),
'height': int_or_none(thumbnail_data.get('height')),
})
return {
'id': video_id,
'title': title,
'description': video_data.get('description'),
'timestamp': parse_iso8601(video_data.get('date')),
'duration': parse_duration(video_data.get('duration')),
'thumbnails': thumbnails,
'formats': formats,
}
class NHLIE(NHLBaseIE):
IE_NAME = 'nhl.com'
_VALID_URL = r'https?://(?:www\.)?(?P<site>nhl|wch2016)\.com/(?:[^/]+/)*c-(?P<id>\d+)'
_CONTENT_DOMAIN = 'nhl.bamcontent.com'
_TESTS = [{
# type=video
'url': 'https://www.nhl.com/video/anisimov-cleans-up-mess/t-277752844/c-43663503',
'md5': '0f7b9a8f986fb4b4eeeece9a56416eaf',
'info_dict': {
'id': '43663503',
'ext': 'mp4',
'title': 'Anisimov cleans up mess',
'description': 'md5:a02354acdfe900e940ce40706939ca63',
'timestamp': 1461288600,
'upload_date': '20160422',
},
}, {
# type=article
'url': 'https://www.nhl.com/news/dennis-wideman-suspended/c-278258934',
'md5': '1f39f4ea74c1394dea110699a25b366c',
'info_dict': {
'id': '40784403',
'ext': 'mp4',
'title': 'Wideman suspended by NHL',
'description': 'Flames defenseman Dennis Wideman was banned 20 games for violation of Rule 40 (Physical Abuse of Officials)',
'upload_date': '20160204',
'timestamp': 1454544904,
},
}, {
# Some m3u8 URLs are invalid (https://github.com/ytdl-org/youtube-dl/issues/10713)
'url': 'https://www.nhl.com/predators/video/poile-laviolette-on-subban-trade/t-277437416/c-44315003',
'md5': '50b2bb47f405121484dda3ccbea25459',
'info_dict': {
'id': '44315003',
'ext': 'mp4',
'title': 'Poile, Laviolette on Subban trade',
'description': 'General manager David Poile and head coach Peter Laviolette share their thoughts on acquiring P.K. Subban from Montreal (06/29/16)',
'timestamp': 1467242866,
'upload_date': '20160629',
},
}, {
'url': 'https://www.wch2016.com/video/caneur-best-of-game-2-micd-up/t-281230378/c-44983703',
'only_matching': True,
}, {
'url': 'https://www.wch2016.com/news/3-stars-team-europe-vs-team-canada/c-282195068',
'only_matching': True,
}]
| unlicense | 98fc26fcf555e25daf383aa8dfd3a899 | 38.09375 | 160 | 0.529177 | 3.365165 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/vodlocker.py | 64 | 2796 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
ExtractorError,
NO_DEFAULT,
sanitized_Request,
urlencode_postdata,
)
class VodlockerIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?vodlocker\.(?:com|city)/(?:embed-)?(?P<id>[0-9a-zA-Z]+)(?:\..*?)?'
_TESTS = [{
'url': 'http://vodlocker.com/e8wvyzz4sl42',
'md5': 'ce0c2d18fa0735f1bd91b69b0e54aacf',
'info_dict': {
'id': 'e8wvyzz4sl42',
'ext': 'mp4',
'title': 'Germany vs Brazil',
'thumbnail': r're:http://.*\.jpg',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
if any(p in webpage for p in (
'>THIS FILE WAS DELETED<',
'>File Not Found<',
'The file you were looking for could not be found, sorry for any inconvenience.<',
'>The file was removed')):
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
fields = self._hidden_inputs(webpage)
if fields['op'] == 'download1':
self._sleep(3, video_id) # they do detect when requests happen too fast!
post = urlencode_postdata(fields)
req = sanitized_Request(url, post)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
webpage = self._download_webpage(
req, video_id, 'Downloading video page')
def extract_file_url(html, default=NO_DEFAULT):
return self._search_regex(
r'file:\s*"(http[^\"]+)",', html, 'file url', default=default)
video_url = extract_file_url(webpage, default=None)
if not video_url:
embed_url = self._search_regex(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?://)?vodlocker\.(?:com|city)/embed-.+?)\1',
webpage, 'embed url', group='url')
embed_webpage = self._download_webpage(
embed_url, video_id, 'Downloading embed webpage')
video_url = extract_file_url(embed_webpage)
thumbnail_webpage = embed_webpage
else:
thumbnail_webpage = webpage
title = self._search_regex(
r'id="file_title".*?>\s*(.*?)\s*<(?:br|span)', webpage, 'title')
thumbnail = self._search_regex(
r'image:\s*"(http[^\"]+)",', thumbnail_webpage, 'thumbnail', fatal=False)
formats = [{
'format_id': 'sd',
'url': video_url,
}]
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'formats': formats,
}
| unlicense | 2b3dec56bf1ec2de1f2505e60c07a381 | 33.95 | 104 | 0.529685 | 3.713147 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/bravotv.py | 5 | 3775 | # coding: utf-8
from __future__ import unicode_literals
import re
from .adobepass import AdobePassIE
from ..utils import (
smuggle_url,
update_url_query,
int_or_none,
)
class BravoTVIE(AdobePassIE):
_VALID_URL = r'https?://(?:www\.)?(?P<req_id>bravotv|oxygen)\.com/(?:[^/]+/)+(?P<id>[^/?#]+)'
_TESTS = [{
'url': 'https://www.bravotv.com/top-chef/season-16/episode-15/videos/the-top-chef-season-16-winner-is',
'md5': 'e34684cfea2a96cd2ee1ef3a60909de9',
'info_dict': {
'id': 'epL0pmK1kQlT',
'ext': 'mp4',
'title': 'The Top Chef Season 16 Winner Is...',
'description': 'Find out who takes the title of Top Chef!',
'uploader': 'NBCU-BRAV',
'upload_date': '20190314',
'timestamp': 1552591860,
}
}, {
'url': 'http://www.bravotv.com/below-deck/season-3/ep-14-reunion-part-1',
'only_matching': True,
}, {
'url': 'https://www.oxygen.com/in-ice-cold-blood/season-2/episode-16/videos/handling-the-horwitz-house-after-the-murder-season-2',
'only_matching': True,
}]
def _real_extract(self, url):
site, display_id = re.match(self._VALID_URL, url).groups()
webpage = self._download_webpage(url, display_id)
settings = self._parse_json(self._search_regex(
r'<script[^>]+data-drupal-selector="drupal-settings-json"[^>]*>({.+?})</script>', webpage, 'drupal settings'),
display_id)
info = {}
query = {
'mbr': 'true',
}
account_pid, release_pid = [None] * 2
tve = settings.get('ls_tve')
if tve:
query['manifest'] = 'm3u'
mobj = re.search(r'<[^>]+id="pdk-player"[^>]+data-url=["\']?(?:https?:)?//player\.theplatform\.com/p/([^/]+)/(?:[^/]+/)*select/([^?#&"\']+)', webpage)
if mobj:
account_pid, tp_path = mobj.groups()
release_pid = tp_path.strip('/').split('/')[-1]
else:
account_pid = 'HNK2IC'
tp_path = release_pid = tve['release_pid']
if tve.get('entitlement') == 'auth':
adobe_pass = settings.get('tve_adobe_auth', {})
if site == 'bravotv':
site = 'bravo'
resource = self._get_mvpd_resource(
adobe_pass.get('adobePassResourceId') or site,
tve['title'], release_pid, tve.get('rating'))
query['auth'] = self._extract_mvpd_auth(
url, release_pid,
adobe_pass.get('adobePassRequestorId') or site, resource)
else:
shared_playlist = settings['ls_playlist']
account_pid = shared_playlist['account_pid']
metadata = shared_playlist['video_metadata'][shared_playlist['default_clip']]
tp_path = release_pid = metadata.get('release_pid')
if not release_pid:
release_pid = metadata['guid']
tp_path = 'media/guid/2140479951/' + release_pid
info.update({
'title': metadata['title'],
'description': metadata.get('description'),
'season_number': int_or_none(metadata.get('season_num')),
'episode_number': int_or_none(metadata.get('episode_num')),
})
query['switch'] = 'progressive'
info.update({
'_type': 'url_transparent',
'id': release_pid,
'url': smuggle_url(update_url_query(
'http://link.theplatform.com/s/%s/%s' % (account_pid, tp_path),
query), {'force_smil_url': True}),
'ie_key': 'ThePlatform',
})
return info
| unlicense | 0bb476a23fc570077a81a383d0c71179 | 40.944444 | 162 | 0.514702 | 3.42559 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/tele13.py | 90 | 3345 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from .youtube import YoutubeIE
from ..utils import (
js_to_json,
qualities,
determine_ext,
)
class Tele13IE(InfoExtractor):
_VALID_URL = r'^https?://(?:www\.)?t13\.cl/videos(?:/[^/]+)+/(?P<id>[\w-]+)'
_TESTS = [
{
'url': 'http://www.t13.cl/videos/actualidad/el-circulo-de-hierro-de-michelle-bachelet-en-su-regreso-a-la-moneda',
'md5': '4cb1fa38adcad8fea88487a078831755',
'info_dict': {
'id': 'el-circulo-de-hierro-de-michelle-bachelet-en-su-regreso-a-la-moneda',
'ext': 'mp4',
'title': 'El círculo de hierro de Michelle Bachelet en su regreso a La Moneda',
},
'params': {
# HTTP Error 404: Not Found
'skip_download': True,
},
},
{
'url': 'http://www.t13.cl/videos/mundo/tendencias/video-captan-misteriosa-bola-fuego-cielos-bangkok',
'md5': '867adf6a3b3fef932c68a71d70b70946',
'info_dict': {
'id': 'rOoKv2OMpOw',
'ext': 'mp4',
'title': 'Shooting star seen on 7-Sep-2015',
'description': 'md5:7292ff2a34b2f673da77da222ae77e1e',
'uploader': 'Porjai Jaturongkhakun',
'upload_date': '20150906',
'uploader_id': 'UCnLY_3ezwNcDSC_Wc6suZxw',
},
'add_ie': ['Youtube'],
}
]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
setup_js = self._search_regex(
r"(?s)jwplayer\('player-vivo'\).setup\((\{.*?\})\)",
webpage, 'setup code')
sources = self._parse_json(self._search_regex(
r'sources\s*:\s*(\[[^\]]+\])', setup_js, 'sources'),
display_id, js_to_json)
preference = qualities(['Móvil', 'SD', 'HD'])
formats = []
urls = []
for f in sources:
format_url = f['file']
if format_url and format_url not in urls:
ext = determine_ext(format_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
format_url, display_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
elif YoutubeIE.suitable(format_url):
return self.url_result(format_url, 'Youtube')
else:
formats.append({
'url': format_url,
'format_id': f.get('label'),
'preference': preference(f.get('label')),
'ext': ext,
})
urls.append(format_url)
self._sort_formats(formats)
return {
'id': display_id,
'title': self._search_regex(
r'title\s*:\s*"([^"]+)"', setup_js, 'title'),
'description': self._html_search_meta(
'description', webpage, 'description'),
'thumbnail': self._search_regex(
r'image\s*:\s*"([^"]+)"', setup_js, 'thumbnail', default=None),
'formats': formats,
}
| unlicense | 1cb54e87c5916d038568301f5b05b8a8 | 36.988636 | 125 | 0.483697 | 3.442842 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/movingimage.py | 64 | 1774 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
unescapeHTML,
parse_duration,
)
class MovingImageIE(InfoExtractor):
_VALID_URL = r'https?://movingimage\.nls\.uk/film/(?P<id>\d+)'
_TEST = {
'url': 'http://movingimage.nls.uk/film/3561',
'md5': '4caa05c2b38453e6f862197571a7be2f',
'info_dict': {
'id': '3561',
'ext': 'mp4',
'title': 'SHETLAND WOOL',
'description': 'md5:c5afca6871ad59b4271e7704fe50ab04',
'duration': 900,
'thumbnail': r're:^https?://.*\.jpg$',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
formats = self._extract_m3u8_formats(
self._html_search_regex(r'file\s*:\s*"([^"]+)"', webpage, 'm3u8 manifest URL'),
video_id, ext='mp4', entry_protocol='m3u8_native')
def search_field(field_name, fatal=False):
return self._search_regex(
r'<span\s+class="field_title">%s:</span>\s*<span\s+class="field_content">([^<]+)</span>' % field_name,
webpage, 'title', fatal=fatal)
title = unescapeHTML(search_field('Title', fatal=True)).strip('()[]')
description = unescapeHTML(search_field('Description'))
duration = parse_duration(search_field('Running time'))
thumbnail = self._search_regex(
r"image\s*:\s*'([^']+)'", webpage, 'thumbnail', fatal=False)
return {
'id': video_id,
'formats': formats,
'title': title,
'description': description,
'duration': duration,
'thumbnail': thumbnail,
}
| unlicense | f81960c7e06bcc4f3592484ce7abb51a | 33.115385 | 118 | 0.546787 | 3.512871 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/bpb.py | 36 | 2204 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
js_to_json,
determine_ext,
)
class BpbIE(InfoExtractor):
IE_DESC = 'Bundeszentrale für politische Bildung'
_VALID_URL = r'https?://(?:www\.)?bpb\.de/mediathek/(?P<id>[0-9]+)/'
_TEST = {
'url': 'http://www.bpb.de/mediathek/297/joachim-gauck-zu-1989-und-die-erinnerung-an-die-ddr',
# md5 fails in Python 2.6 due to buggy server response and wrong handling of urllib2
'md5': 'c4f84c8a8044ca9ff68bb8441d300b3f',
'info_dict': {
'id': '297',
'ext': 'mp4',
'title': 'Joachim Gauck zu 1989 und die Erinnerung an die DDR',
'description': 'Joachim Gauck, erster Beauftragter für die Stasi-Unterlagen, spricht auf dem Geschichtsforum über die friedliche Revolution 1989 und eine "gewisse Traurigkeit" im Umgang mit der DDR-Vergangenheit.'
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
r'<h2 class="white">(.*?)</h2>', webpage, 'title')
video_info_dicts = re.findall(
r"({\s*src\s*:\s*'https?://film\.bpb\.de/[^}]+})", webpage)
formats = []
for video_info in video_info_dicts:
video_info = self._parse_json(
video_info, video_id, transform_source=js_to_json, fatal=False)
if not video_info:
continue
video_url = video_info.get('src')
if not video_url:
continue
quality = 'high' if '_high' in video_url else 'low'
formats.append({
'url': video_url,
'preference': 10 if quality == 'high' else 0,
'format_note': quality,
'format_id': '%s-%s' % (quality, determine_ext(video_url)),
})
self._sort_formats(formats)
return {
'id': video_id,
'formats': formats,
'title': title,
'description': self._og_search_description(webpage),
}
| unlicense | c428290ee6e7657a4b5ebd0dcd804baf | 34.5 | 225 | 0.556565 | 3.270431 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/karaoketv.py | 73 | 2340 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class KaraoketvIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?karaoketv\.co\.il/[^/]+/(?P<id>\d+)'
_TEST = {
'url': 'http://www.karaoketv.co.il/%D7%A9%D7%99%D7%A8%D7%99_%D7%A7%D7%A8%D7%99%D7%95%D7%A7%D7%99/58356/%D7%90%D7%99%D7%96%D7%95%D7%9F',
'info_dict': {
'id': '58356',
'ext': 'flv',
'title': 'קריוקי של איזון',
},
'params': {
# rtmp download
'skip_download': True,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
api_page_url = self._search_regex(
r'<iframe[^>]+src=(["\'])(?P<url>https?://www\.karaoke\.co\.il/api_play\.php\?.+?)\1',
webpage, 'API play URL', group='url')
api_page = self._download_webpage(api_page_url, video_id)
video_cdn_url = self._search_regex(
r'<iframe[^>]+src=(["\'])(?P<url>https?://www\.video-cdn\.com/embed/iframe/.+?)\1',
api_page, 'video cdn URL', group='url')
video_cdn = self._download_webpage(video_cdn_url, video_id)
play_path = self._parse_json(
self._search_regex(
r'var\s+options\s*=\s*({.+?});', video_cdn, 'options'),
video_id)['clip']['url']
settings = self._parse_json(
self._search_regex(
r'var\s+settings\s*=\s*({.+?});', video_cdn, 'servers', default='{}'),
video_id, fatal=False) or {}
servers = settings.get('servers')
if not servers or not isinstance(servers, list):
servers = ('wowzail.video-cdn.com:80/vodcdn', )
formats = [{
'url': 'rtmp://%s' % server if not server.startswith('rtmp') else server,
'play_path': play_path,
'app': 'vodcdn',
'page_url': video_cdn_url,
'player_url': 'http://www.video-cdn.com/assets/flowplayer/flowplayer.commercial-3.2.18.swf',
'rtmp_real_time': True,
'ext': 'flv',
} for server in servers]
return {
'id': video_id,
'title': self._og_search_title(webpage),
'formats': formats,
}
| unlicense | bf72d3fe5d16728b8dc8c5fd821cbae3 | 35.359375 | 143 | 0.510958 | 3.090305 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/internazionale.py | 21 | 3328 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import unified_timestamp
class InternazionaleIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?internazionale\.it/video/(?:[^/]+/)*(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://www.internazionale.it/video/2015/02/19/richard-linklater-racconta-una-scena-di-boyhood',
'md5': '3e39d32b66882c1218e305acbf8348ca',
'info_dict': {
'id': '265968',
'display_id': 'richard-linklater-racconta-una-scena-di-boyhood',
'ext': 'mp4',
'title': 'Richard Linklater racconta una scena di Boyhood',
'description': 'md5:efb7e5bbfb1a54ae2ed5a4a015f0e665',
'timestamp': 1424354635,
'upload_date': '20150219',
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {
'format': 'bestvideo',
},
}, {
'url': 'https://www.internazionale.it/video/2018/08/29/telefono-stare-con-noi-stessi',
'md5': '9db8663704cab73eb972d1cee0082c79',
'info_dict': {
'id': '761344',
'display_id': 'telefono-stare-con-noi-stessi',
'ext': 'mp4',
'title': 'Usiamo il telefono per evitare di stare con noi stessi',
'description': 'md5:75ccfb0d6bcefc6e7428c68b4aa1fe44',
'timestamp': 1535528954,
'upload_date': '20180829',
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {
'format': 'bestvideo',
},
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
DATA_RE = r'data-%s=(["\'])(?P<value>(?:(?!\1).)+)\1'
title = self._search_regex(
DATA_RE % 'video-title', webpage, 'title', default=None,
group='value') or self._og_search_title(webpage)
video_id = self._search_regex(
DATA_RE % 'job-id', webpage, 'video id', group='value')
video_path = self._search_regex(
DATA_RE % 'video-path', webpage, 'video path', group='value')
video_available_abroad = self._search_regex(
DATA_RE % 'video-available_abroad', webpage,
'video available aboard', default='1', group='value')
video_available_abroad = video_available_abroad == '1'
video_base = 'https://video%s.internazionale.it/%s/%s.' % \
('' if video_available_abroad else '-ita', video_path, video_id)
formats = self._extract_m3u8_formats(
video_base + 'm3u8', display_id, 'mp4',
entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)
formats.extend(self._extract_mpd_formats(
video_base + 'mpd', display_id, mpd_id='dash', fatal=False))
self._sort_formats(formats)
timestamp = unified_timestamp(self._html_search_meta(
'article:published_time', webpage, 'timestamp'))
return {
'id': video_id,
'display_id': display_id,
'title': title,
'thumbnail': self._og_search_thumbnail(webpage),
'description': self._og_search_description(webpage),
'timestamp': timestamp,
'formats': formats,
}
| unlicense | 42788ebcd3cc57c9eaff6e23ab143426 | 38.152941 | 112 | 0.553486 | 3.29179 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/gamespot.py | 6 | 3156 | from __future__ import unicode_literals
from .once import OnceIE
from ..compat import compat_urllib_parse_unquote
class GameSpotIE(OnceIE):
_VALID_URL = r'https?://(?:www\.)?gamespot\.com/(?:video|article|review)s/(?:[^/]+/\d+-|embed/)(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.gamespot.com/videos/arma-3-community-guide-sitrep-i/2300-6410818/',
'md5': 'b2a30deaa8654fcccd43713a6b6a4825',
'info_dict': {
'id': 'gs-2300-6410818',
'ext': 'mp4',
'title': 'Arma 3 - Community Guide: SITREP I',
'description': 'Check out this video where some of the basics of Arma 3 is explained.',
},
'skip': 'manifest URL give HTTP Error 404: Not Found',
}, {
'url': 'http://www.gamespot.com/videos/the-witcher-3-wild-hunt-xbox-one-now-playing/2300-6424837/',
'md5': '173ea87ad762cf5d3bf6163dceb255a6',
'info_dict': {
'id': 'gs-2300-6424837',
'ext': 'mp4',
'title': 'Now Playing - The Witcher 3: Wild Hunt',
'description': 'Join us as we take a look at the early hours of The Witcher 3: Wild Hunt and more.',
},
}, {
'url': 'https://www.gamespot.com/videos/embed/6439218/',
'only_matching': True,
}, {
'url': 'https://www.gamespot.com/articles/the-last-of-us-2-receives-new-ps4-trailer/1100-6454469/',
'only_matching': True,
}, {
'url': 'https://www.gamespot.com/reviews/gears-of-war-review/1900-6161188/',
'only_matching': True,
}]
def _real_extract(self, url):
page_id = self._match_id(url)
webpage = self._download_webpage(url, page_id)
data_video = self._parse_json(self._html_search_regex(
r'data-video=(["\'])({.*?})\1', webpage,
'video data', group=2), page_id)
title = compat_urllib_parse_unquote(data_video['title'])
streams = data_video['videoStreams']
formats = []
m3u8_url = streams.get('adaptive_stream')
if m3u8_url:
m3u8_formats = self._extract_m3u8_formats(
m3u8_url, page_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False)
for f in m3u8_formats:
formats.append(f)
http_f = f.copy()
del http_f['manifest_url']
http_f.update({
'format_id': f['format_id'].replace('hls-', 'http-'),
'protocol': 'http',
'url': f['url'].replace('.m3u8', '.mp4'),
})
formats.append(http_f)
mpd_url = streams.get('adaptive_dash')
if mpd_url:
formats.extend(self._extract_mpd_formats(
mpd_url, page_id, mpd_id='dash', fatal=False))
self._sort_formats(formats)
return {
'id': data_video.get('guid') or page_id,
'display_id': page_id,
'title': title,
'formats': formats,
'description': self._html_search_meta('description', webpage),
'thumbnail': self._og_search_thumbnail(webpage),
}
| unlicense | 243abfb7cd52eb9642f2febd36c8636b | 38.949367 | 112 | 0.533587 | 3.2875 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/spankbang.py | 5 | 7229 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
ExtractorError,
merge_dicts,
parse_duration,
parse_resolution,
str_to_int,
url_or_none,
urlencode_postdata,
urljoin,
)
class SpankBangIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://
(?:[^/]+\.)?spankbang\.com/
(?:
(?P<id>[\da-z]+)/(?:video|play|embed)\b|
[\da-z]+-(?P<id_2>[\da-z]+)/playlist/[^/?#&]+
)
'''
_TESTS = [{
'url': 'http://spankbang.com/3vvn/video/fantasy+solo',
'md5': '1cc433e1d6aa14bc376535b8679302f7',
'info_dict': {
'id': '3vvn',
'ext': 'mp4',
'title': 'fantasy solo',
'description': 'dillion harper masturbates on a bed',
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': 'silly2587',
'timestamp': 1422571989,
'upload_date': '20150129',
'age_limit': 18,
}
}, {
# 480p only
'url': 'http://spankbang.com/1vt0/video/solvane+gangbang',
'only_matching': True,
}, {
# no uploader
'url': 'http://spankbang.com/lklg/video/sex+with+anyone+wedding+edition+2',
'only_matching': True,
}, {
# mobile page
'url': 'http://m.spankbang.com/1o2de/video/can+t+remember+her+name',
'only_matching': True,
}, {
# 4k
'url': 'https://spankbang.com/1vwqx/video/jade+kush+solo+4k',
'only_matching': True,
}, {
'url': 'https://m.spankbang.com/3vvn/play/fantasy+solo/480p/',
'only_matching': True,
}, {
'url': 'https://m.spankbang.com/3vvn/play',
'only_matching': True,
}, {
'url': 'https://spankbang.com/2y3td/embed/',
'only_matching': True,
}, {
'url': 'https://spankbang.com/2v7ik-7ecbgu/playlist/latina+booty',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id') or mobj.group('id_2')
webpage = self._download_webpage(
url.replace('/%s/embed' % video_id, '/%s/video' % video_id),
video_id, headers={'Cookie': 'country=US'})
if re.search(r'<[^>]+\b(?:id|class)=["\']video_removed', webpage):
raise ExtractorError(
'Video %s is not available' % video_id, expected=True)
formats = []
def extract_format(format_id, format_url):
f_url = url_or_none(format_url)
if not f_url:
return
f = parse_resolution(format_id)
ext = determine_ext(f_url)
if format_id.startswith('m3u8') or ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
f_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
elif format_id.startswith('mpd') or ext == 'mpd':
formats.extend(self._extract_mpd_formats(
f_url, video_id, mpd_id='dash', fatal=False))
elif ext == 'mp4' or f.get('width') or f.get('height'):
f.update({
'url': f_url,
'format_id': format_id,
})
formats.append(f)
STREAM_URL_PREFIX = 'stream_url_'
for mobj in re.finditer(
r'%s(?P<id>[^\s=]+)\s*=\s*(["\'])(?P<url>(?:(?!\2).)+)\2'
% STREAM_URL_PREFIX, webpage):
extract_format(mobj.group('id', 'url'))
if not formats:
stream_key = self._search_regex(
r'data-streamkey\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1',
webpage, 'stream key', group='value')
stream = self._download_json(
'https://spankbang.com/api/videos/stream', video_id,
'Downloading stream JSON', data=urlencode_postdata({
'id': stream_key,
'data': 0,
}), headers={
'Referer': url,
'X-Requested-With': 'XMLHttpRequest',
})
for format_id, format_url in stream.items():
if format_url and isinstance(format_url, list):
format_url = format_url[0]
extract_format(format_id, format_url)
self._sort_formats(formats, field_preference=('preference', 'height', 'width', 'fps', 'tbr', 'format_id'))
info = self._search_json_ld(webpage, video_id, default={})
title = self._html_search_regex(
r'(?s)<h1[^>]*>(.+?)</h1>', webpage, 'title', default=None)
description = self._search_regex(
r'<div[^>]+\bclass=["\']bottom[^>]+>\s*<p>[^<]*</p>\s*<p>([^<]+)',
webpage, 'description', default=None)
thumbnail = self._og_search_thumbnail(webpage, default=None)
uploader = self._html_search_regex(
(r'(?s)<li[^>]+class=["\']profile[^>]+>(.+?)</a>',
r'class="user"[^>]*><img[^>]+>([^<]+)'),
webpage, 'uploader', default=None)
duration = parse_duration(self._search_regex(
r'<div[^>]+\bclass=["\']right_side[^>]+>\s*<span>([^<]+)',
webpage, 'duration', default=None))
view_count = str_to_int(self._search_regex(
r'([\d,.]+)\s+plays', webpage, 'view count', default=None))
age_limit = self._rta_search(webpage)
return merge_dicts({
'id': video_id,
'title': title or video_id,
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
'duration': duration,
'view_count': view_count,
'formats': formats,
'age_limit': age_limit,
}, info
)
class SpankBangPlaylistIE(InfoExtractor):
_VALID_URL = r'https?://(?:[^/]+\.)?spankbang\.com/(?P<id>[\da-z]+)/playlist/(?P<display_id>[^/]+)'
_TEST = {
'url': 'https://spankbang.com/ug0k/playlist/big+ass+titties',
'info_dict': {
'id': 'ug0k',
'title': 'Big Ass Titties',
},
'playlist_mincount': 40,
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(
url, playlist_id, headers={'Cookie': 'country=US; mobile=on'})
entries = [self.url_result(
urljoin(url, mobj.group('path')),
ie=SpankBangIE.ie_key(), video_id=mobj.group('id'))
for mobj in re.finditer(
r'<a[^>]+\bhref=(["\'])(?P<path>/?[\da-z]+-(?P<id>[\da-z]+)/playlist/%s(?:(?!\1).)*)\1'
% re.escape(display_id), webpage)]
title = self._html_search_regex(
r'<h1>([^<]+)\s+playlist\s*<', webpage, 'playlist title',
fatal=False)
return self.playlist_result(entries, playlist_id, title)
| unlicense | 343578ea257bb9d159ba90165544c84c | 35.510101 | 114 | 0.487481 | 3.495648 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/youku.py | 11 | 11404 | # coding: utf-8
from __future__ import unicode_literals
import random
import re
import string
import time
from .common import InfoExtractor
from ..utils import (
ExtractorError,
get_element_by_class,
js_to_json,
str_or_none,
strip_jsonp,
)
class YoukuIE(InfoExtractor):
IE_NAME = 'youku'
IE_DESC = '优酷'
_VALID_URL = r'''(?x)
(?:
https?://(
(?:v|player)\.youku\.com/(?:v_show/id_|player\.php/sid/)|
video\.tudou\.com/v/)|
youku:)
(?P<id>[A-Za-z0-9]+)(?:\.html|/v\.swf|)
'''
_TESTS = [{
# MD5 is unstable
'url': 'http://v.youku.com/v_show/id_XMTc1ODE5Njcy.html',
'info_dict': {
'id': 'XMTc1ODE5Njcy',
'title': '★Smile﹗♡ Git Fresh -Booty Music舞蹈.',
'ext': 'mp4',
'duration': 74.73,
'thumbnail': r're:^https?://.*',
'uploader': '。躲猫猫、',
'uploader_id': '36017967',
'uploader_url': 'http://i.youku.com/u/UMTQ0MDcxODY4',
'tags': list,
}
}, {
'url': 'http://player.youku.com/player.php/sid/XNDgyMDQ2NTQw/v.swf',
'only_matching': True,
}, {
'url': 'http://v.youku.com/v_show/id_XODgxNjg1Mzk2_ev_1.html',
'info_dict': {
'id': 'XODgxNjg1Mzk2',
'ext': 'mp4',
'title': '武媚娘传奇 85',
'duration': 1999.61,
'thumbnail': r're:^https?://.*',
'uploader': '疯狂豆花',
'uploader_id': '62583473',
'uploader_url': 'http://i.youku.com/u/UMjUwMzMzODky',
'tags': list,
},
}, {
'url': 'http://v.youku.com/v_show/id_XMTI1OTczNDM5Mg==.html',
'info_dict': {
'id': 'XMTI1OTczNDM5Mg',
'ext': 'mp4',
'title': '花千骨 04',
'duration': 2363,
'thumbnail': r're:^https?://.*',
'uploader': '放剧场-花千骨',
'uploader_id': '772849359',
'uploader_url': 'http://i.youku.com/u/UMzA5MTM5NzQzNg==',
'tags': list,
},
}, {
'url': 'http://v.youku.com/v_show/id_XNjA1NzA2Njgw.html',
'note': 'Video protected with password',
'info_dict': {
'id': 'XNjA1NzA2Njgw',
'ext': 'mp4',
'title': '邢義田复旦讲座之想象中的胡人—从“左衽孔子”说起',
'duration': 7264.5,
'thumbnail': r're:^https?://.*',
'uploader': 'FoxJin1006',
'uploader_id': '322014285',
'uploader_url': 'http://i.youku.com/u/UMTI4ODA1NzE0MA==',
'tags': list,
},
'params': {
'videopassword': '100600',
},
}, {
# /play/get.json contains streams with "channel_type":"tail"
'url': 'http://v.youku.com/v_show/id_XOTUxMzg4NDMy.html',
'info_dict': {
'id': 'XOTUxMzg4NDMy',
'ext': 'mp4',
'title': '我的世界☆明月庄主☆车震猎杀☆杀人艺术Minecraft',
'duration': 702.08,
'thumbnail': r're:^https?://.*',
'uploader': '明月庄主moon',
'uploader_id': '38465621',
'uploader_url': 'http://i.youku.com/u/UMTUzODYyNDg0',
'tags': list,
},
}, {
'url': 'http://video.tudou.com/v/XMjIyNzAzMTQ4NA==.html?f=46177805',
'info_dict': {
'id': 'XMjIyNzAzMTQ4NA',
'ext': 'mp4',
'title': '卡马乔国足开大脚长传冲吊集锦',
'duration': 289,
'thumbnail': r're:^https?://.*',
'uploader': '阿卜杜拉之星',
'uploader_id': '2382249',
'uploader_url': 'http://i.youku.com/u/UOTUyODk5Ng==',
'tags': list,
},
}, {
'url': 'http://video.tudou.com/v/XMjE4ODI3OTg2MA==.html',
'only_matching': True,
}]
@staticmethod
def get_ysuid():
return '%d%s' % (int(time.time()), ''.join([
random.choice(string.ascii_letters) for i in range(3)]))
def get_format_name(self, fm):
_dict = {
'3gp': 'h6',
'3gphd': 'h5',
'flv': 'h4',
'flvhd': 'h4',
'mp4': 'h3',
'mp4hd': 'h3',
'mp4hd2': 'h4',
'mp4hd3': 'h4',
'hd2': 'h2',
'hd3': 'h1',
}
return _dict.get(fm)
def _real_extract(self, url):
video_id = self._match_id(url)
self._set_cookie('youku.com', '__ysuid', self.get_ysuid())
self._set_cookie('youku.com', 'xreferrer', 'http://www.youku.com')
_, urlh = self._download_webpage_handle(
'https://log.mmstat.com/eg.js', video_id, 'Retrieving cna info')
# The etag header is '"foobar"'; let's remove the double quotes
cna = urlh.headers['etag'][1:-1]
# request basic data
basic_data_params = {
'vid': video_id,
'ccode': '0590',
'client_ip': '192.168.1.1',
'utid': cna,
'client_ts': time.time() / 1000,
}
video_password = self._downloader.params.get('videopassword')
if video_password:
basic_data_params['password'] = video_password
headers = {
'Referer': url,
}
headers.update(self.geo_verification_headers())
data = self._download_json(
'https://ups.youku.com/ups/get.json', video_id,
'Downloading JSON metadata',
query=basic_data_params, headers=headers)['data']
error = data.get('error')
if error:
error_note = error.get('note')
if error_note is not None and '因版权原因无法观看此视频' in error_note:
raise ExtractorError(
'Youku said: Sorry, this video is available in China only', expected=True)
elif error_note and '该视频被设为私密' in error_note:
raise ExtractorError(
'Youku said: Sorry, this video is private', expected=True)
else:
msg = 'Youku server reported error %i' % error.get('code')
if error_note is not None:
msg += ': ' + error_note
raise ExtractorError(msg)
# get video title
video_data = data['video']
title = video_data['title']
formats = [{
'url': stream['m3u8_url'],
'format_id': self.get_format_name(stream.get('stream_type')),
'ext': 'mp4',
'protocol': 'm3u8_native',
'filesize': int(stream.get('size')),
'width': stream.get('width'),
'height': stream.get('height'),
} for stream in data['stream'] if stream.get('channel_type') != 'tail']
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'duration': video_data.get('seconds'),
'thumbnail': video_data.get('logo'),
'uploader': video_data.get('username'),
'uploader_id': str_or_none(video_data.get('userid')),
'uploader_url': data.get('uploader', {}).get('homepage'),
'tags': video_data.get('tags'),
}
class YoukuShowIE(InfoExtractor):
_VALID_URL = r'https?://list\.youku\.com/show/id_(?P<id>[0-9a-z]+)\.html'
IE_NAME = 'youku:show'
_TESTS = [{
'url': 'http://list.youku.com/show/id_zc7c670be07ff11e48b3f.html',
'info_dict': {
'id': 'zc7c670be07ff11e48b3f',
'title': '花千骨 DVD版',
'description': 'md5:a1ae6f5618571bbeb5c9821f9c81b558',
},
'playlist_count': 50,
}, {
# Episode number not starting from 1
'url': 'http://list.youku.com/show/id_zefbfbd70efbfbd780bef.html',
'info_dict': {
'id': 'zefbfbd70efbfbd780bef',
'title': '超级飞侠3',
'description': 'md5:275715156abebe5ccc2a1992e9d56b98',
},
'playlist_count': 24,
}, {
# Ongoing playlist. The initial page is the last one
'url': 'http://list.youku.com/show/id_za7c275ecd7b411e1a19e.html',
'only_matching': True,
}, {
# No data-id value.
'url': 'http://list.youku.com/show/id_zefbfbd61237fefbfbdef.html',
'only_matching': True,
}, {
# Wrong number of reload_id.
'url': 'http://list.youku.com/show/id_z20eb4acaf5c211e3b2ad.html',
'only_matching': True,
}]
def _extract_entries(self, playlist_data_url, show_id, note, query):
query['callback'] = 'cb'
playlist_data = self._download_json(
playlist_data_url, show_id, query=query, note=note,
transform_source=lambda s: js_to_json(strip_jsonp(s))).get('html')
if playlist_data is None:
return [None, None]
drama_list = (get_element_by_class('p-drama-grid', playlist_data)
or get_element_by_class('p-drama-half-row', playlist_data))
if drama_list is None:
raise ExtractorError('No episodes found')
video_urls = re.findall(r'<a[^>]+href="([^"]+)"', drama_list)
return playlist_data, [
self.url_result(self._proto_relative_url(video_url, 'http:'), YoukuIE.ie_key())
for video_url in video_urls]
def _real_extract(self, url):
show_id = self._match_id(url)
webpage = self._download_webpage(url, show_id)
entries = []
page_config = self._parse_json(self._search_regex(
r'var\s+PageConfig\s*=\s*({.+});', webpage, 'page config'),
show_id, transform_source=js_to_json)
first_page, initial_entries = self._extract_entries(
'http://list.youku.com/show/module', show_id,
note='Downloading initial playlist data page',
query={
'id': page_config['showid'],
'tab': 'showInfo',
})
first_page_reload_id = self._html_search_regex(
r'<div[^>]+id="(reload_\d+)', first_page, 'first page reload id')
# The first reload_id has the same items as first_page
reload_ids = re.findall('<li[^>]+data-id="([^"]+)">', first_page)
entries.extend(initial_entries)
for idx, reload_id in enumerate(reload_ids):
if reload_id == first_page_reload_id:
continue
_, new_entries = self._extract_entries(
'http://list.youku.com/show/episode', show_id,
note='Downloading playlist data page %d' % (idx + 1),
query={
'id': page_config['showid'],
'stage': reload_id,
})
if new_entries is not None:
entries.extend(new_entries)
desc = self._html_search_meta('description', webpage, fatal=False)
playlist_title = desc.split(',')[0] if desc else None
detail_li = get_element_by_class('p-intro', webpage)
playlist_description = get_element_by_class(
'intro-more', detail_li) if detail_li else None
return self.playlist_result(
entries, show_id, playlist_title, playlist_description)
| unlicense | 959f8a170b13208803fd02b69add9651 | 35.097087 | 94 | 0.505469 | 3.127874 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/tv5mondeplus.py | 12 | 4498 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
determine_ext,
extract_attributes,
int_or_none,
parse_duration,
)
class TV5MondePlusIE(InfoExtractor):
IE_DESC = 'TV5MONDE+'
_VALID_URL = r'https?://(?:www\.)?(?:tv5mondeplus|revoir\.tv5monde)\.com/toutes-les-videos/[^/]+/(?P<id>[^/?#]+)'
_TESTS = [{
# movie
'url': 'https://revoir.tv5monde.com/toutes-les-videos/cinema/rendez-vous-a-atlit',
'md5': '8cbde5ea7b296cf635073e27895e227f',
'info_dict': {
'id': '822a4756-0712-7329-1859-a13ac7fd1407',
'display_id': 'rendez-vous-a-atlit',
'ext': 'mp4',
'title': 'Rendez-vous à Atlit',
'description': 'md5:2893a4c5e1dbac3eedff2d87956e4efb',
'upload_date': '20200130',
},
}, {
# series episode
'url': 'https://revoir.tv5monde.com/toutes-les-videos/series-fictions/c-est-la-vie-ennemie-juree',
'info_dict': {
'id': '0df7007c-4900-3936-c601-87a13a93a068',
'display_id': 'c-est-la-vie-ennemie-juree',
'ext': 'mp4',
'title': "C'est la vie - Ennemie jurée",
'description': 'md5:dfb5c63087b6f35fe0cc0af4fe44287e',
'upload_date': '20200130',
'series': "C'est la vie",
'episode': 'Ennemie jurée',
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://revoir.tv5monde.com/toutes-les-videos/series-fictions/neuf-jours-en-hiver-neuf-jours-en-hiver',
'only_matching': True,
}, {
'url': 'https://revoir.tv5monde.com/toutes-les-videos/info-societe/le-journal-de-la-rts-edition-du-30-01-20-19h30',
'only_matching': True,
}]
_GEO_BYPASS = False
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
if ">Ce programme n'est malheureusement pas disponible pour votre zone géographique.<" in webpage:
self.raise_geo_restricted(countries=['FR'])
title = episode = self._html_search_regex(r'<h1>([^<]+)', webpage, 'title')
vpl_data = extract_attributes(self._search_regex(
r'(<[^>]+class="video_player_loader"[^>]+>)',
webpage, 'video player loader'))
video_files = self._parse_json(
vpl_data['data-broadcast'], display_id).get('files', [])
formats = []
for video_file in video_files:
v_url = video_file.get('url')
if not v_url:
continue
video_format = video_file.get('format') or determine_ext(v_url)
if video_format == 'm3u8':
formats.extend(self._extract_m3u8_formats(
v_url, display_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
else:
formats.append({
'url': v_url,
'format_id': video_format,
})
self._sort_formats(formats)
description = self._html_search_regex(
r'(?s)<div[^>]+class=["\']episode-texte[^>]+>(.+?)</div>', webpage,
'description', fatal=False)
series = self._html_search_regex(
r'<p[^>]+class=["\']episode-emission[^>]+>([^<]+)', webpage,
'series', default=None)
if series and series != title:
title = '%s - %s' % (series, title)
upload_date = self._search_regex(
r'(?:date_publication|publish_date)["\']\s*:\s*["\'](\d{4}_\d{2}_\d{2})',
webpage, 'upload date', default=None)
if upload_date:
upload_date = upload_date.replace('_', '')
video_id = self._search_regex(
(r'data-guid=["\']([\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})',
r'id_contenu["\']\s:\s*(\d+)'), webpage, 'video id',
default=display_id)
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': vpl_data.get('data-image'),
'duration': int_or_none(vpl_data.get('data-duration')) or parse_duration(self._html_search_meta('duration', webpage)),
'upload_date': upload_date,
'formats': formats,
'series': series,
'episode': episode,
}
| unlicense | 4503653b4642690be3789cfaf960ef1b | 37.410256 | 130 | 0.525145 | 3.21 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/defense.py | 90 | 1242 | from __future__ import unicode_literals
from .common import InfoExtractor
class DefenseGouvFrIE(InfoExtractor):
IE_NAME = 'defense.gouv.fr'
_VALID_URL = r'https?://.*?\.defense\.gouv\.fr/layout/set/ligthboxvideo/base-de-medias/webtv/(?P<id>[^/?#]*)'
_TEST = {
'url': 'http://www.defense.gouv.fr/layout/set/ligthboxvideo/base-de-medias/webtv/attaque-chimique-syrienne-du-21-aout-2013-1',
'md5': '75bba6124da7e63d2d60b5244ec9430c',
'info_dict': {
'id': '11213',
'ext': 'mp4',
'title': 'attaque-chimique-syrienne-du-21-aout-2013-1'
}
}
def _real_extract(self, url):
title = self._match_id(url)
webpage = self._download_webpage(url, title)
video_id = self._search_regex(
r"flashvars.pvg_id=\"(\d+)\";",
webpage, 'ID')
json_url = (
'http://static.videos.gouv.fr/brightcovehub/export/json/%s' %
video_id)
info = self._download_json(json_url, title, 'Downloading JSON config')
video_url = info['renditions'][0]['url']
return {
'id': video_id,
'ext': 'mp4',
'url': video_url,
'title': title,
}
| unlicense | ef24bdf0972a67b9c9adc5aa753be571 | 30.846154 | 134 | 0.549114 | 3.007264 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/megaphone.py | 30 | 1770 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import js_to_json
class MegaphoneIE(InfoExtractor):
IE_NAME = 'megaphone.fm'
IE_DESC = 'megaphone.fm embedded players'
_VALID_URL = r'https://player\.megaphone\.fm/(?P<id>[A-Z0-9]+)'
_TEST = {
'url': 'https://player.megaphone.fm/GLT9749789991?"',
'md5': '4816a0de523eb3e972dc0dda2c191f96',
'info_dict': {
'id': 'GLT9749789991',
'ext': 'mp3',
'title': '#97 What Kind Of Idiot Gets Phished?',
'thumbnail': r're:^https://.*\.png.*$',
'duration': 1776.26375,
'author': 'Reply All',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._og_search_property('audio:title', webpage)
author = self._og_search_property('audio:artist', webpage)
thumbnail = self._og_search_thumbnail(webpage)
episode_json = self._search_regex(r'(?s)var\s+episode\s*=\s*(\{.+?\});', webpage, 'episode JSON')
episode_data = self._parse_json(episode_json, video_id, js_to_json)
video_url = self._proto_relative_url(episode_data['mediaUrl'], 'https:')
formats = [{
'url': video_url,
}]
return {
'id': video_id,
'thumbnail': thumbnail,
'title': title,
'author': author,
'duration': episode_data['duration'],
'formats': formats,
}
@classmethod
def _extract_urls(cls, webpage):
return [m[0] for m in re.findall(
r'<iframe[^>]*?\ssrc=["\'](%s)' % cls._VALID_URL, webpage)]
| unlicense | b6f2b40cfcf67ae08192fd7d184db05b | 31.181818 | 105 | 0.548588 | 3.384321 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/tvp.py | 20 | 9417 | # coding: utf-8
from __future__ import unicode_literals
import itertools
import re
from .common import InfoExtractor
from ..utils import (
clean_html,
determine_ext,
ExtractorError,
get_element_by_attribute,
orderedSet,
)
class TVPIE(InfoExtractor):
IE_NAME = 'tvp'
IE_DESC = 'Telewizja Polska'
_VALID_URL = r'https?://[^/]+\.tvp\.(?:pl|info)/(?:video/(?:[^,\s]*,)*|(?:(?!\d+/)[^/]+/)*)(?P<id>\d+)'
_TESTS = [{
'url': 'https://vod.tvp.pl/video/czas-honoru,i-seria-odc-13,194536',
'md5': 'a21eb0aa862f25414430f15fdfb9e76c',
'info_dict': {
'id': '194536',
'ext': 'mp4',
'title': 'Czas honoru, odc. 13 – Władek',
'description': 'md5:437f48b93558370b031740546b696e24',
},
}, {
'url': 'http://www.tvp.pl/there-can-be-anything-so-i-shortened-it/17916176',
'md5': 'b0005b542e5b4de643a9690326ab1257',
'info_dict': {
'id': '17916176',
'ext': 'mp4',
'title': 'TVP Gorzów pokaże filmy studentów z podroży dookoła świata',
'description': 'TVP Gorzów pokaże filmy studentów z podroży dookoła świata',
},
}, {
# page id is not the same as video id(#7799)
'url': 'https://wiadomosci.tvp.pl/33908820/28092017-1930',
'md5': '84cd3c8aec4840046e5ab712416b73d0',
'info_dict': {
'id': '33908820',
'ext': 'mp4',
'title': 'Wiadomości, 28.09.2017, 19:30',
'description': 'Wydanie główne codziennego serwisu informacyjnego.'
},
'skip': 'HTTP Error 404: Not Found',
}, {
'url': 'http://vod.tvp.pl/seriale/obyczajowe/na-sygnale/sezon-2-27-/odc-39/17834272',
'only_matching': True,
}, {
'url': 'http://wiadomosci.tvp.pl/25169746/24052016-1200',
'only_matching': True,
}, {
'url': 'http://krakow.tvp.pl/25511623/25lecie-mck-wyjatkowe-miejsce-na-mapie-krakowa',
'only_matching': True,
}, {
'url': 'http://teleexpress.tvp.pl/25522307/wierni-wzieli-udzial-w-procesjach',
'only_matching': True,
}, {
'url': 'http://sport.tvp.pl/25522165/krychowiak-uspokaja-w-sprawie-kontuzji-dwa-tygodnie-to-maksimum',
'only_matching': True,
}, {
'url': 'http://www.tvp.info/25511919/trwa-rewolucja-wladza-zdecydowala-sie-na-pogwalcenie-konstytucji',
'only_matching': True,
}]
def _real_extract(self, url):
page_id = self._match_id(url)
webpage = self._download_webpage(url, page_id)
video_id = self._search_regex([
r'<iframe[^>]+src="[^"]*?object_id=(\d+)',
r"object_id\s*:\s*'(\d+)'",
r'data-video-id="(\d+)"'], webpage, 'video id', default=page_id)
return {
'_type': 'url_transparent',
'url': 'tvp:' + video_id,
'description': self._og_search_description(
webpage, default=None) or self._html_search_meta(
'description', webpage, default=None),
'thumbnail': self._og_search_thumbnail(webpage, default=None),
'ie_key': 'TVPEmbed',
}
class TVPEmbedIE(InfoExtractor):
IE_NAME = 'tvp:embed'
IE_DESC = 'Telewizja Polska'
_VALID_URL = r'(?:tvp:|https?://[^/]+\.tvp\.(?:pl|info)/sess/tvplayer\.php\?.*?object_id=)(?P<id>\d+)'
_TESTS = [{
'url': 'tvp:194536',
'md5': 'a21eb0aa862f25414430f15fdfb9e76c',
'info_dict': {
'id': '194536',
'ext': 'mp4',
'title': 'Czas honoru, odc. 13 – Władek',
},
}, {
# not available
'url': 'http://www.tvp.pl/sess/tvplayer.php?object_id=22670268',
'md5': '8c9cd59d16edabf39331f93bf8a766c7',
'info_dict': {
'id': '22670268',
'ext': 'mp4',
'title': 'Panorama, 07.12.2015, 15:40',
},
'skip': 'Transmisja została zakończona lub materiał niedostępny',
}, {
'url': 'tvp:22670268',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'http://www.tvp.pl/sess/tvplayer.php?object_id=%s' % video_id, video_id)
error = self._html_search_regex(
r'(?s)<p[^>]+\bclass=["\']notAvailable__text["\'][^>]*>(.+?)</p>',
webpage, 'error', default=None) or clean_html(
get_element_by_attribute('class', 'msg error', webpage))
if error:
raise ExtractorError('%s said: %s' % (
self.IE_NAME, clean_html(error)), expected=True)
title = self._search_regex(
r'name\s*:\s*([\'"])Title\1\s*,\s*value\s*:\s*\1(?P<title>.+?)\1',
webpage, 'title', group='title')
series_title = self._search_regex(
r'name\s*:\s*([\'"])SeriesTitle\1\s*,\s*value\s*:\s*\1(?P<series>.+?)\1',
webpage, 'series', group='series', default=None)
if series_title:
title = '%s, %s' % (series_title, title)
thumbnail = self._search_regex(
r"poster\s*:\s*'([^']+)'", webpage, 'thumbnail', default=None)
video_url = self._search_regex(
r'0:{src:([\'"])(?P<url>.*?)\1', webpage,
'formats', group='url', default=None)
if not video_url or 'material_niedostepny.mp4' in video_url:
video_url = self._download_json(
'http://www.tvp.pl/pub/stat/videofileinfo?video_id=%s' % video_id,
video_id)['video_url']
formats = []
video_url_base = self._search_regex(
r'(https?://.+?/video)(?:\.(?:ism|f4m|m3u8)|-\d+\.mp4)',
video_url, 'video base url', default=None)
if video_url_base:
# TODO: <Group> found instead of <AdaptationSet> in MPD manifest.
# It's not mentioned in MPEG-DASH standard. Figure that out.
# formats.extend(self._extract_mpd_formats(
# video_url_base + '.ism/video.mpd',
# video_id, mpd_id='dash', fatal=False))
formats.extend(self._extract_ism_formats(
video_url_base + '.ism/Manifest',
video_id, 'mss', fatal=False))
formats.extend(self._extract_f4m_formats(
video_url_base + '.ism/video.f4m',
video_id, f4m_id='hds', fatal=False))
m3u8_formats = self._extract_m3u8_formats(
video_url_base + '.ism/video.m3u8', video_id,
'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)
self._sort_formats(m3u8_formats)
m3u8_formats = list(filter(
lambda f: f.get('vcodec') != 'none', m3u8_formats))
formats.extend(m3u8_formats)
for i, m3u8_format in enumerate(m3u8_formats, 2):
http_url = '%s-%d.mp4' % (video_url_base, i)
if self._is_valid_url(http_url, video_id):
f = m3u8_format.copy()
f.update({
'url': http_url,
'format_id': f['format_id'].replace('hls', 'http'),
'protocol': 'http',
})
formats.append(f)
else:
formats = [{
'format_id': 'direct',
'url': video_url,
'ext': determine_ext(video_url, 'mp4'),
}]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'formats': formats,
}
class TVPWebsiteIE(InfoExtractor):
IE_NAME = 'tvp:series'
_VALID_URL = r'https?://vod\.tvp\.pl/website/(?P<display_id>[^,]+),(?P<id>\d+)'
_TESTS = [{
# series
'url': 'https://vod.tvp.pl/website/lzy-cennet,38678312/video',
'info_dict': {
'id': '38678312',
},
'playlist_count': 115,
}, {
# film
'url': 'https://vod.tvp.pl/website/gloria,35139666',
'info_dict': {
'id': '36637049',
'ext': 'mp4',
'title': 'Gloria, Gloria',
},
'params': {
'skip_download': True,
},
'add_ie': ['TVPEmbed'],
}, {
'url': 'https://vod.tvp.pl/website/lzy-cennet,38678312',
'only_matching': True,
}]
def _entries(self, display_id, playlist_id):
url = 'https://vod.tvp.pl/website/%s,%s/video' % (display_id, playlist_id)
for page_num in itertools.count(1):
page = self._download_webpage(
url, display_id, 'Downloading page %d' % page_num,
query={'page': page_num})
video_ids = orderedSet(re.findall(
r'<a[^>]+\bhref=["\']/video/%s,[^,]+,(\d+)' % display_id,
page))
if not video_ids:
break
for video_id in video_ids:
yield self.url_result(
'tvp:%s' % video_id, ie=TVPEmbedIE.ie_key(),
video_id=video_id)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id, playlist_id = mobj.group('display_id', 'id')
return self.playlist_result(
self._entries(display_id, playlist_id), playlist_id)
| unlicense | ecbd09e2bbf10ff860850e5f06b25222 | 36.269841 | 111 | 0.50937 | 3.057292 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/vidio.py | 5 | 3285 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_iso8601,
str_or_none,
strip_or_none,
try_get,
)
class VidioIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?vidio\.com/watch/(?P<id>\d+)-(?P<display_id>[^/?#&]+)'
_TESTS = [{
'url': 'http://www.vidio.com/watch/165683-dj_ambred-booyah-live-2015',
'md5': 'cd2801394afc164e9775db6a140b91fe',
'info_dict': {
'id': '165683',
'display_id': 'dj_ambred-booyah-live-2015',
'ext': 'mp4',
'title': 'DJ_AMBRED - Booyah (Live 2015)',
'description': 'md5:27dc15f819b6a78a626490881adbadf8',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 149,
'like_count': int,
'uploader': 'TWELVE Pic',
'timestamp': 1444902800,
'upload_date': '20151015',
'uploader_id': 'twelvepictures',
'channel': 'Cover Music Video',
'channel_id': '280236',
'view_count': int,
'dislike_count': int,
'comment_count': int,
'tags': 'count:4',
},
}, {
'url': 'https://www.vidio.com/watch/77949-south-korea-test-fires-missile-that-can-strike-all-of-the-north',
'only_matching': True,
}]
def _real_initialize(self):
self._api_key = self._download_json(
'https://www.vidio.com/auth', None, data=b'')['api_key']
def _real_extract(self, url):
video_id, display_id = re.match(self._VALID_URL, url).groups()
data = self._download_json(
'https://api.vidio.com/videos/' + video_id, display_id, headers={
'Content-Type': 'application/vnd.api+json',
'X-API-KEY': self._api_key,
})
video = data['videos'][0]
title = video['title'].strip()
formats = self._extract_m3u8_formats(
data['clips'][0]['hls_url'], display_id, 'mp4', 'm3u8_native')
self._sort_formats(formats)
get_first = lambda x: try_get(data, lambda y: y[x + 's'][0], dict) or {}
channel = get_first('channel')
user = get_first('user')
username = user.get('username')
get_count = lambda x: int_or_none(video.get('total_' + x))
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': strip_or_none(video.get('description')),
'thumbnail': video.get('image_url_medium'),
'duration': int_or_none(video.get('duration')),
'like_count': get_count('likes'),
'formats': formats,
'uploader': user.get('name'),
'timestamp': parse_iso8601(video.get('created_at')),
'uploader_id': username,
'uploader_url': 'https://www.vidio.com/@' + username if username else None,
'channel': channel.get('name'),
'channel_id': str_or_none(channel.get('id')),
'view_count': get_count('view_count'),
'dislike_count': get_count('dislikes'),
'comment_count': get_count('comments'),
'tags': video.get('tag_list'),
}
| unlicense | 2874bacd7f99b59cfade0d48c23c2d36 | 35.910112 | 115 | 0.52481 | 3.362334 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/karrierevideos.py | 15 | 3379 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
fix_xml_ampersands,
float_or_none,
xpath_with_ns,
xpath_text,
)
class KarriereVideosIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?karrierevideos\.at(?:/[^/]+)+/(?P<id>[^/]+)'
_TESTS = [{
'url': 'http://www.karrierevideos.at/berufsvideos/mittlere-hoehere-schulen/altenpflegerin',
'info_dict': {
'id': '32c91',
'ext': 'flv',
'title': 'AltenpflegerIn',
'description': 'md5:dbadd1259fde2159a9b28667cb664ae2',
'thumbnail': r're:^http://.*\.png',
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
# broken ampersands
'url': 'http://www.karrierevideos.at/orientierung/vaeterkarenz-und-neue-chancen-fuer-muetter-baby-was-nun',
'info_dict': {
'id': '5sniu',
'ext': 'flv',
'title': 'Väterkarenz und neue Chancen für Mütter - "Baby - was nun?"',
'description': 'md5:97092c6ad1fd7d38e9d6a5fdeb2bcc33',
'thumbnail': r're:^http://.*\.png',
},
'params': {
# rtmp download
'skip_download': True,
}
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = (self._html_search_meta('title', webpage, default=None)
or self._search_regex(r'<h1 class="title">([^<]+)</h1>', webpage, 'video title'))
video_id = self._search_regex(
r'/config/video/(.+?)\.xml', webpage, 'video id')
# Server returns malformed headers
# Force Accept-Encoding: * to prevent gzipped results
playlist = self._download_xml(
'http://www.karrierevideos.at/player-playlist.xml.php?p=%s' % video_id,
video_id, transform_source=fix_xml_ampersands,
headers={'Accept-Encoding': '*'})
NS_MAP = {
'jwplayer': 'http://developer.longtailvideo.com/trac/wiki/FlashFormats'
}
def ns(path):
return xpath_with_ns(path, NS_MAP)
item = playlist.find('./tracklist/item')
video_file = xpath_text(
item, ns('./jwplayer:file'), 'video url', fatal=True)
streamer = xpath_text(
item, ns('./jwplayer:streamer'), 'streamer', fatal=True)
uploader = xpath_text(
item, ns('./jwplayer:author'), 'uploader')
duration = float_or_none(
xpath_text(item, ns('./jwplayer:duration'), 'duration'))
description = self._html_search_regex(
r'(?s)<div class="leadtext">(.+?)</div>',
webpage, 'description')
thumbnail = self._html_search_meta(
'thumbnail', webpage, 'thumbnail')
if thumbnail:
thumbnail = compat_urlparse.urljoin(url, thumbnail)
return {
'id': video_id,
'url': streamer.replace('rtmpt', 'rtmp'),
'play_path': 'mp4:%s' % video_file,
'ext': 'flv',
'title': title,
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
'duration': duration,
}
| unlicense | 8781444a17db1a7b7685e0d2bf6f3877 | 33.10101 | 115 | 0.53673 | 3.549947 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/vvvvid.py | 1 | 9782 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .youtube import YoutubeIE
from ..utils import (
ExtractorError,
int_or_none,
str_or_none,
)
class VVVVIDIE(InfoExtractor):
_VALID_URL_BASE = r'https?://(?:www\.)?vvvvid\.it/(?:#!)?(?:show|anime|film|series)/'
_VALID_URL = r'%s(?P<show_id>\d+)/[^/]+/(?P<season_id>\d+)/(?P<id>[0-9]+)' % _VALID_URL_BASE
_TESTS = [{
# video_type == 'video/vvvvid'
'url': 'https://www.vvvvid.it/#!show/434/perche-dovrei-guardarlo-di-dario-moccia/437/489048/ping-pong',
'md5': 'b8d3cecc2e981adc3835adf07f6df91b',
'info_dict': {
'id': '489048',
'ext': 'mp4',
'title': 'Ping Pong',
'duration': 239,
'series': '"Perché dovrei guardarlo?" di Dario Moccia',
'season_id': '437',
'episode': 'Ping Pong',
'episode_number': 1,
'episode_id': '3334',
'view_count': int,
'like_count': int,
'repost_count': int,
},
'params': {
'skip_download': True,
},
}, {
# video_type == 'video/rcs'
'url': 'https://www.vvvvid.it/#!show/376/death-note-live-action/377/482493/episodio-01',
'md5': '33e0edfba720ad73a8782157fdebc648',
'info_dict': {
'id': '482493',
'ext': 'mp4',
'title': 'Episodio 01',
},
'params': {
'skip_download': True,
},
}, {
# video_type == 'video/youtube'
'url': 'https://www.vvvvid.it/show/404/one-punch-man/406/486683/trailer',
'md5': '33e0edfba720ad73a8782157fdebc648',
'info_dict': {
'id': 'RzmFKUDOUgw',
'ext': 'mp4',
'title': 'Trailer',
'upload_date': '20150906',
'description': 'md5:a5e802558d35247fee285875328c0b80',
'uploader_id': 'BandaiVisual',
'uploader': 'BANDAI NAMCO Arts Channel',
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.vvvvid.it/show/434/perche-dovrei-guardarlo-di-dario-moccia/437/489048',
'only_matching': True
}]
_conn_id = None
def _real_initialize(self):
self._conn_id = self._download_json(
'https://www.vvvvid.it/user/login',
None, headers=self.geo_verification_headers())['data']['conn_id']
def _download_info(self, show_id, path, video_id, fatal=True):
response = self._download_json(
'https://www.vvvvid.it/vvvvid/ondemand/%s/%s' % (show_id, path),
video_id, headers=self.geo_verification_headers(), query={
'conn_id': self._conn_id,
}, fatal=fatal)
if not (response or fatal):
return
if response.get('result') == 'error':
raise ExtractorError('%s said: %s' % (
self.IE_NAME, response['message']), expected=True)
return response['data']
def _extract_common_video_info(self, video_data):
return {
'thumbnail': video_data.get('thumbnail'),
'episode_id': str_or_none(video_data.get('id')),
}
def _real_extract(self, url):
show_id, season_id, video_id = re.match(self._VALID_URL, url).groups()
response = self._download_info(
show_id, 'season/%s' % season_id, video_id)
vid = int(video_id)
video_data = list(filter(
lambda episode: episode.get('video_id') == vid, response))[0]
title = video_data['title']
formats = []
# vvvvid embed_info decryption algorithm is reverse engineered from function $ds(h) at vvvvid.js
def ds(h):
g = "MNOPIJKL89+/4567UVWXQRSTEFGHABCDcdefYZabstuvopqr0123wxyzklmnghij"
def f(m):
l = []
o = 0
b = False
m_len = len(m)
while ((not b) and o < m_len):
n = m[o] << 2
o += 1
k = -1
j = -1
if o < m_len:
n += m[o] >> 4
o += 1
if o < m_len:
k = (m[o - 1] << 4) & 255
k += m[o] >> 2
o += 1
if o < m_len:
j = (m[o - 1] << 6) & 255
j += m[o]
o += 1
else:
b = True
else:
b = True
else:
b = True
l.append(n)
if k != -1:
l.append(k)
if j != -1:
l.append(j)
return l
c = []
for e in h:
c.append(g.index(e))
c_len = len(c)
for e in range(c_len * 2 - 1, -1, -1):
a = c[e % c_len] ^ c[(e + 1) % c_len]
c[e % c_len] = a
c = f(c)
d = ''
for e in c:
d += chr(e)
return d
info = {}
def metadata_from_url(r_url):
if not info and r_url:
mobj = re.search(r'_(?:S(\d+))?Ep(\d+)', r_url)
if mobj:
info['episode_number'] = int(mobj.group(2))
season_number = mobj.group(1)
if season_number:
info['season_number'] = int(season_number)
video_type = video_data.get('video_type')
is_youtube = False
for quality in ('', '_sd'):
embed_code = video_data.get('embed_info' + quality)
if not embed_code:
continue
embed_code = ds(embed_code)
if video_type in ('video/rcs', 'video/kenc'):
if video_type == 'video/kenc':
kenc = self._download_json(
'https://www.vvvvid.it/kenc', video_id, query={
'action': 'kt',
'conn_id': self._conn_id,
'url': embed_code,
}, fatal=False) or {}
kenc_message = kenc.get('message')
if kenc_message:
embed_code += '?' + ds(kenc_message)
formats.extend(self._extract_akamai_formats(embed_code, video_id))
elif video_type == 'video/youtube':
info.update({
'_type': 'url_transparent',
'ie_key': YoutubeIE.ie_key(),
'url': embed_code,
})
is_youtube = True
break
else:
formats.extend(self._extract_wowza_formats(
'http://sb.top-ix.org/videomg/_definst_/mp4:%s/playlist.m3u8' % embed_code, video_id))
metadata_from_url(embed_code)
if not is_youtube:
self._sort_formats(formats)
info['formats'] = formats
metadata_from_url(video_data.get('thumbnail'))
info.update(self._extract_common_video_info(video_data))
info.update({
'id': video_id,
'title': title,
'duration': int_or_none(video_data.get('length')),
'series': video_data.get('show_title'),
'season_id': season_id,
'episode': title,
'view_count': int_or_none(video_data.get('views')),
'like_count': int_or_none(video_data.get('video_likes')),
'repost_count': int_or_none(video_data.get('video_shares')),
})
return info
class VVVVIDShowIE(VVVVIDIE):
_VALID_URL = r'(?P<base_url>%s(?P<id>\d+)(?:/(?P<show_title>[^/?&#]+))?)/?(?:[?#&]|$)' % VVVVIDIE._VALID_URL_BASE
_TESTS = [{
'url': 'https://www.vvvvid.it/show/156/psyco-pass',
'info_dict': {
'id': '156',
'title': 'Psycho-Pass',
'description': 'md5:94d572c0bd85894b193b8aebc9a3a806',
},
'playlist_count': 46,
}, {
'url': 'https://www.vvvvid.it/show/156',
'only_matching': True,
}]
def _real_extract(self, url):
base_url, show_id, show_title = re.match(self._VALID_URL, url).groups()
seasons = self._download_info(
show_id, 'seasons/', show_title)
show_info = self._download_info(
show_id, 'info/', show_title, fatal=False)
entries = []
for season in (seasons or []):
episodes = season.get('episodes') or []
for episode in episodes:
if episode.get('playable') is False:
continue
season_id = str_or_none(episode.get('season_id'))
video_id = str_or_none(episode.get('video_id'))
if not (season_id and video_id):
continue
info = self._extract_common_video_info(episode)
info.update({
'_type': 'url',
'ie_key': VVVVIDIE.ie_key(),
'url': '/'.join([base_url, season_id, video_id]),
'title': episode.get('title'),
'description': episode.get('description'),
'season_id': season_id,
})
entries.append(info)
return self.playlist_result(
entries, show_id, show_info.get('title'), show_info.get('description'))
| unlicense | 9ba4d04a733530dc732c9ae72d0aea4e | 34.959559 | 117 | 0.448932 | 3.659184 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/fivemin.py | 79 | 1917 | from __future__ import unicode_literals
from .common import InfoExtractor
class FiveMinIE(InfoExtractor):
IE_NAME = '5min'
_VALID_URL = r'(?:5min:|https?://(?:[^/]*?5min\.com/|delivery\.vidible\.tv/aol)(?:(?:Scripts/PlayerSeed\.js|playerseed/?)?\?.*?playList=)?)(?P<id>\d+)'
_TESTS = [
{
# From http://www.engadget.com/2013/11/15/ipad-mini-retina-display-review/
'url': 'http://pshared.5min.com/Scripts/PlayerSeed.js?sid=281&width=560&height=345&playList=518013791',
'md5': '4f7b0b79bf1a470e5004f7112385941d',
'info_dict': {
'id': '518013791',
'ext': 'mp4',
'title': 'iPad Mini with Retina Display Review',
'description': 'iPad mini with Retina Display review',
'duration': 177,
'uploader': 'engadget',
'upload_date': '20131115',
'timestamp': 1384515288,
},
'params': {
# m3u8 download
'skip_download': True,
}
},
{
# From http://on.aol.com/video/how-to-make-a-next-level-fruit-salad-518086247
'url': '5min:518086247',
'md5': 'e539a9dd682c288ef5a498898009f69e',
'info_dict': {
'id': '518086247',
'ext': 'mp4',
'title': 'How to Make a Next-Level Fruit Salad',
'duration': 184,
},
'skip': 'no longer available',
},
{
'url': 'http://embed.5min.com/518726732/',
'only_matching': True,
},
{
'url': 'http://delivery.vidible.tv/aol?playList=518013791',
'only_matching': True,
}
]
def _real_extract(self, url):
video_id = self._match_id(url)
return self.url_result('aol-video:%s' % video_id)
| unlicense | 2a49b2cfc3c249c07b37d0bf65b55cd2 | 34.5 | 155 | 0.491914 | 3.351399 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/eagleplatform.py | 23 | 7736 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_HTTPError
from ..utils import (
ExtractorError,
int_or_none,
unsmuggle_url,
url_or_none,
)
class EaglePlatformIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
eagleplatform:(?P<custom_host>[^/]+):|
https?://(?P<host>.+?\.media\.eagleplatform\.com)/index/player\?.*\brecord_id=
)
(?P<id>\d+)
'''
_TESTS = [{
# http://lenta.ru/news/2015/03/06/navalny/
'url': 'http://lentaru.media.eagleplatform.com/index/player?player=new&record_id=227304&player_template_id=5201',
# Not checking MD5 as sometimes the direct HTTP link results in 404 and HLS is used
'info_dict': {
'id': '227304',
'ext': 'mp4',
'title': 'Навальный вышел на свободу',
'description': 'md5:d97861ac9ae77377f3f20eaf9d04b4f5',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 87,
'view_count': int,
'age_limit': 0,
},
}, {
# http://muz-tv.ru/play/7129/
# http://media.clipyou.ru/index/player?record_id=12820&width=730&height=415&autoplay=true
'url': 'eagleplatform:media.clipyou.ru:12820',
'md5': '358597369cf8ba56675c1df15e7af624',
'info_dict': {
'id': '12820',
'ext': 'mp4',
'title': "'O Sole Mio",
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 216,
'view_count': int,
},
'skip': 'Georestricted',
}, {
# referrer protected video (https://tvrain.ru/lite/teleshow/kak_vse_nachinalos/namin-418921/)
'url': 'eagleplatform:tvrainru.media.eagleplatform.com:582306',
'only_matching': True,
}]
@staticmethod
def _extract_url(webpage):
# Regular iframe embedding
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//.+?\.media\.eagleplatform\.com/index/player\?.+?)\1',
webpage)
if mobj is not None:
return mobj.group('url')
PLAYER_JS_RE = r'''
<script[^>]+
src=(?P<qjs>["\'])(?:https?:)?//(?P<host>(?:(?!(?P=qjs)).)+\.media\.eagleplatform\.com)/player/player\.js(?P=qjs)
.+?
'''
# "Basic usage" embedding (see http://dultonmedia.github.io/eplayer/)
mobj = re.search(
r'''(?xs)
%s
<div[^>]+
class=(?P<qclass>["\'])eagleplayer(?P=qclass)[^>]+
data-id=["\'](?P<id>\d+)
''' % PLAYER_JS_RE, webpage)
if mobj is not None:
return 'eagleplatform:%(host)s:%(id)s' % mobj.groupdict()
# Generalization of "Javascript code usage", "Combined usage" and
# "Usage without attaching to DOM" embeddings (see
# http://dultonmedia.github.io/eplayer/)
mobj = re.search(
r'''(?xs)
%s
<script>
.+?
new\s+EaglePlayer\(
(?:[^,]+\s*,\s*)?
{
.+?
\bid\s*:\s*["\']?(?P<id>\d+)
.+?
}
\s*\)
.+?
</script>
''' % PLAYER_JS_RE, webpage)
if mobj is not None:
return 'eagleplatform:%(host)s:%(id)s' % mobj.groupdict()
@staticmethod
def _handle_error(response):
status = int_or_none(response.get('status', 200))
if status != 200:
raise ExtractorError(' '.join(response['errors']), expected=True)
def _download_json(self, url_or_request, video_id, *args, **kwargs):
try:
response = super(EaglePlatformIE, self)._download_json(
url_or_request, video_id, *args, **kwargs)
except ExtractorError as ee:
if isinstance(ee.cause, compat_HTTPError):
response = self._parse_json(ee.cause.read().decode('utf-8'), video_id)
self._handle_error(response)
raise
return response
def _get_video_url(self, url_or_request, video_id, note='Downloading JSON metadata'):
return self._download_json(url_or_request, video_id, note)['data'][0]
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
mobj = re.match(self._VALID_URL, url)
host, video_id = mobj.group('custom_host') or mobj.group('host'), mobj.group('id')
headers = {}
query = {
'id': video_id,
}
referrer = smuggled_data.get('referrer')
if referrer:
headers['Referer'] = referrer
query['referrer'] = referrer
player_data = self._download_json(
'http://%s/api/player_data' % host, video_id,
headers=headers, query=query)
media = player_data['data']['playlist']['viewports'][0]['medialist'][0]
title = media['title']
description = media.get('description')
thumbnail = self._proto_relative_url(media.get('snapshot'), 'http:')
duration = int_or_none(media.get('duration'))
view_count = int_or_none(media.get('views'))
age_restriction = media.get('age_restriction')
age_limit = None
if age_restriction:
age_limit = 0 if age_restriction == 'allow_all' else 18
secure_m3u8 = self._proto_relative_url(media['sources']['secure_m3u8']['auto'], 'http:')
formats = []
m3u8_url = self._get_video_url(secure_m3u8, video_id, 'Downloading m3u8 JSON')
m3u8_formats = self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False)
formats.extend(m3u8_formats)
m3u8_formats_dict = {}
for f in m3u8_formats:
if f.get('height') is not None:
m3u8_formats_dict[f['height']] = f
mp4_data = self._download_json(
# Secure mp4 URL is constructed according to Player.prototype.mp4 from
# http://lentaru.media.eagleplatform.com/player/player.js
re.sub(r'm3u8|hlsvod|hls|f4m', 'mp4s', secure_m3u8),
video_id, 'Downloading mp4 JSON', fatal=False)
if mp4_data:
for format_id, format_url in mp4_data.get('data', {}).items():
if not url_or_none(format_url):
continue
height = int_or_none(format_id)
if height is not None and m3u8_formats_dict.get(height):
f = m3u8_formats_dict[height].copy()
f.update({
'format_id': f['format_id'].replace('hls', 'http'),
'protocol': 'http',
})
else:
f = {
'format_id': 'http-%s' % format_id,
'height': int_or_none(format_id),
}
f['url'] = format_url
formats.append(f)
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'view_count': view_count,
'age_limit': age_limit,
'formats': formats,
}
| unlicense | 06bf8b4717d45c4bde8b848c6db6082d | 36.441748 | 141 | 0.491897 | 3.641643 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/carambatv.py | 20 | 3524 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
float_or_none,
int_or_none,
try_get,
)
from .videomore import VideomoreIE
class CarambaTVIE(InfoExtractor):
_VALID_URL = r'(?:carambatv:|https?://video1\.carambatv\.ru/v/)(?P<id>\d+)'
_TESTS = [{
'url': 'http://video1.carambatv.ru/v/191910501',
'md5': '2f4a81b7cfd5ab866ee2d7270cb34a2a',
'info_dict': {
'id': '191910501',
'ext': 'mp4',
'title': '[BadComedian] - Разборка в Маниле (Абсолютный обзор)',
'thumbnail': r're:^https?://.*\.jpg',
'duration': 2678.31,
},
}, {
'url': 'carambatv:191910501',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._download_json(
'http://video1.carambatv.ru/v/%s/videoinfo.js' % video_id,
video_id)
title = video['title']
base_url = video.get('video') or 'http://video1.carambatv.ru/v/%s/' % video_id
formats = [{
'url': base_url + f['fn'],
'height': int_or_none(f.get('height')),
'format_id': '%sp' % f['height'] if f.get('height') else None,
} for f in video['qualities'] if f.get('fn')]
self._sort_formats(formats)
thumbnail = video.get('splash')
duration = float_or_none(try_get(
video, lambda x: x['annotations'][0]['end_time'], compat_str))
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
}
class CarambaTVPageIE(InfoExtractor):
_VALID_URL = r'https?://carambatv\.ru/(?:[^/]+/)+(?P<id>[^/?#&]+)'
_TEST = {
'url': 'http://carambatv.ru/movie/bad-comedian/razborka-v-manile/',
'md5': 'a49fb0ec2ad66503eeb46aac237d3c86',
'info_dict': {
'id': '475222',
'ext': 'flv',
'title': '[BadComedian] - Разборка в Маниле (Абсолютный обзор)',
'thumbnail': r're:^https?://.*\.jpg',
# duration reported by videomore is incorrect
'duration': int,
},
'add_ie': [VideomoreIE.ie_key()],
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
videomore_url = VideomoreIE._extract_url(webpage)
if not videomore_url:
videomore_id = self._search_regex(
r'getVMCode\s*\(\s*["\']?(\d+)', webpage, 'videomore id',
default=None)
if videomore_id:
videomore_url = 'videomore:%s' % videomore_id
if videomore_url:
title = self._og_search_title(webpage)
return {
'_type': 'url_transparent',
'url': videomore_url,
'ie_key': VideomoreIE.ie_key(),
'title': title,
}
video_url = self._og_search_property('video:iframe', webpage, default=None)
if not video_url:
video_id = self._search_regex(
r'(?:video_id|crmb_vuid)\s*[:=]\s*["\']?(\d+)',
webpage, 'video id')
video_url = 'carambatv:%s' % video_id
return self.url_result(video_url, CarambaTVIE.ie_key())
| unlicense | 70b6d82a96e3b340f77dd8591f2fddad | 31.074074 | 86 | 0.516744 | 3.228332 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/slideshare.py | 39 | 2132 | from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..compat import (
compat_urlparse,
)
from ..utils import (
ExtractorError,
get_element_by_id,
)
class SlideshareIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?slideshare\.net/[^/]+?/(?P<title>.+?)($|\?)'
_TEST = {
'url': 'http://www.slideshare.net/Dataversity/keynote-presentation-managing-scale-and-complexity',
'info_dict': {
'id': '25665706',
'ext': 'mp4',
'title': 'Managing Scale and Complexity',
'description': 'This was a keynote presentation at the NoSQL Now! 2013 Conference & Expo (http://www.nosqlnow.com). This presentation was given by Adrian Cockcroft from Netflix.',
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
page_title = mobj.group('title')
webpage = self._download_webpage(url, page_title)
slideshare_obj = self._search_regex(
r'\$\.extend\(.*?slideshare_object,\s*(\{.*?\})\);',
webpage, 'slideshare object')
info = json.loads(slideshare_obj)
if info['slideshow']['type'] != 'video':
raise ExtractorError('Webpage type is "%s": only video extraction is supported for Slideshare' % info['slideshow']['type'], expected=True)
doc = info['doc']
bucket = info['jsplayer']['video_bucket']
ext = info['jsplayer']['video_extension']
video_url = compat_urlparse.urljoin(bucket, doc + '-SD.' + ext)
description = get_element_by_id('slideshow-description-paragraph', webpage) or self._html_search_regex(
r'(?s)<p[^>]+itemprop="description"[^>]*>(.+?)</p>', webpage,
'description', fatal=False)
return {
'_type': 'video',
'id': info['slideshow']['id'],
'title': info['slideshow']['title'],
'ext': ext,
'url': video_url,
'thumbnail': info['slideshow']['pin_image_url'],
'description': description.strip() if description else None,
}
| unlicense | cbda9b05c05d1a6e5dda5954ab1c3e40 | 37.071429 | 191 | 0.576454 | 3.786856 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/safari.py | 12 | 9746 | # coding: utf-8
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_urlparse,
)
from ..utils import (
ExtractorError,
update_url_query,
)
class SafariBaseIE(InfoExtractor):
_LOGIN_URL = 'https://learning.oreilly.com/accounts/login/'
_NETRC_MACHINE = 'safari'
_API_BASE = 'https://learning.oreilly.com/api/v1'
_API_FORMAT = 'json'
LOGGED_IN = False
def _real_initialize(self):
self._login()
def _login(self):
username, password = self._get_login_info()
if username is None:
return
_, urlh = self._download_webpage_handle(
'https://learning.oreilly.com/accounts/login-check/', None,
'Downloading login page')
def is_logged(urlh):
return 'learning.oreilly.com/home/' in urlh.geturl()
if is_logged(urlh):
self.LOGGED_IN = True
return
redirect_url = urlh.geturl()
parsed_url = compat_urlparse.urlparse(redirect_url)
qs = compat_parse_qs(parsed_url.query)
next_uri = compat_urlparse.urljoin(
'https://api.oreilly.com', qs['next'][0])
auth, urlh = self._download_json_handle(
'https://www.oreilly.com/member/auth/login/', None, 'Logging in',
data=json.dumps({
'email': username,
'password': password,
'redirect_uri': next_uri,
}).encode(), headers={
'Content-Type': 'application/json',
'Referer': redirect_url,
}, expected_status=400)
credentials = auth.get('credentials')
if (not auth.get('logged_in') and not auth.get('redirect_uri')
and credentials):
raise ExtractorError(
'Unable to login: %s' % credentials, expected=True)
# oreilly serves two same instances of the following cookies
# in Set-Cookie header and expects first one to be actually set
for cookie in ('groot_sessionid', 'orm-jwt', 'orm-rt'):
self._apply_first_set_cookie_header(urlh, cookie)
_, urlh = self._download_webpage_handle(
auth.get('redirect_uri') or next_uri, None, 'Completing login',)
if is_logged(urlh):
self.LOGGED_IN = True
return
raise ExtractorError('Unable to log in')
class SafariIE(SafariBaseIE):
IE_NAME = 'safari'
IE_DESC = 'safaribooksonline.com online video'
_VALID_URL = r'''(?x)
https?://
(?:www\.)?(?:safaribooksonline|(?:learning\.)?oreilly)\.com/
(?:
library/view/[^/]+/(?P<course_id>[^/]+)/(?P<part>[^/?\#&]+)\.html|
videos/[^/]+/[^/]+/(?P<reference_id>[^-]+-[^/?\#&]+)
)
'''
_TESTS = [{
'url': 'https://www.safaribooksonline.com/library/view/hadoop-fundamentals-livelessons/9780133392838/part00.html',
'md5': 'dcc5a425e79f2564148652616af1f2a3',
'info_dict': {
'id': '0_qbqx90ic',
'ext': 'mp4',
'title': 'Introduction to Hadoop Fundamentals LiveLessons',
'timestamp': 1437758058,
'upload_date': '20150724',
'uploader_id': 'stork',
},
}, {
# non-digits in course id
'url': 'https://www.safaribooksonline.com/library/view/create-a-nodejs/100000006A0210/part00.html',
'only_matching': True,
}, {
'url': 'https://www.safaribooksonline.com/library/view/learning-path-red/9780134664057/RHCE_Introduction.html',
'only_matching': True,
}, {
'url': 'https://www.safaribooksonline.com/videos/python-programming-language/9780134217314/9780134217314-PYMC_13_00',
'only_matching': True,
}, {
'url': 'https://learning.oreilly.com/videos/hadoop-fundamentals-livelessons/9780133392838/9780133392838-00_SeriesIntro',
'only_matching': True,
}, {
'url': 'https://www.oreilly.com/library/view/hadoop-fundamentals-livelessons/9780133392838/00_SeriesIntro.html',
'only_matching': True,
}]
_PARTNER_ID = '1926081'
_UICONF_ID = '29375172'
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
reference_id = mobj.group('reference_id')
if reference_id:
video_id = reference_id
partner_id = self._PARTNER_ID
ui_id = self._UICONF_ID
else:
video_id = '%s-%s' % (mobj.group('course_id'), mobj.group('part'))
webpage, urlh = self._download_webpage_handle(url, video_id)
mobj = re.match(self._VALID_URL, urlh.geturl())
reference_id = mobj.group('reference_id')
if not reference_id:
reference_id = self._search_regex(
r'data-reference-id=(["\'])(?P<id>(?:(?!\1).)+)\1',
webpage, 'kaltura reference id', group='id')
partner_id = self._search_regex(
r'data-partner-id=(["\'])(?P<id>(?:(?!\1).)+)\1',
webpage, 'kaltura widget id', default=self._PARTNER_ID,
group='id')
ui_id = self._search_regex(
r'data-ui-id=(["\'])(?P<id>(?:(?!\1).)+)\1',
webpage, 'kaltura uiconf id', default=self._UICONF_ID,
group='id')
query = {
'wid': '_%s' % partner_id,
'uiconf_id': ui_id,
'flashvars[referenceId]': reference_id,
}
if self.LOGGED_IN:
kaltura_session = self._download_json(
'%s/player/kaltura_session/?reference_id=%s' % (self._API_BASE, reference_id),
video_id, 'Downloading kaltura session JSON',
'Unable to download kaltura session JSON', fatal=False,
headers={'Accept': 'application/json'})
if kaltura_session:
session = kaltura_session.get('session')
if session:
query['flashvars[ks]'] = session
return self.url_result(update_url_query(
'https://cdnapisec.kaltura.com/html5/html5lib/v2.37.1/mwEmbedFrame.php', query),
'Kaltura')
class SafariApiIE(SafariBaseIE):
IE_NAME = 'safari:api'
_VALID_URL = r'https?://(?:www\.)?(?:safaribooksonline|(?:learning\.)?oreilly)\.com/api/v1/book/(?P<course_id>[^/]+)/chapter(?:-content)?/(?P<part>[^/?#&]+)\.html'
_TESTS = [{
'url': 'https://www.safaribooksonline.com/api/v1/book/9780133392838/chapter/part00.html',
'only_matching': True,
}, {
'url': 'https://www.safaribooksonline.com/api/v1/book/9780134664057/chapter/RHCE_Introduction.html',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
part = self._download_json(
url, '%s/%s' % (mobj.group('course_id'), mobj.group('part')),
'Downloading part JSON')
return self.url_result(part['web_url'], SafariIE.ie_key())
class SafariCourseIE(SafariBaseIE):
IE_NAME = 'safari:course'
IE_DESC = 'safaribooksonline.com online courses'
_VALID_URL = r'''(?x)
https?://
(?:
(?:www\.)?(?:safaribooksonline|(?:learning\.)?oreilly)\.com/
(?:
library/view/[^/]+|
api/v1/book|
videos/[^/]+
)|
techbus\.safaribooksonline\.com
)
/(?P<id>[^/]+)
'''
_TESTS = [{
'url': 'https://www.safaribooksonline.com/library/view/hadoop-fundamentals-livelessons/9780133392838/',
'info_dict': {
'id': '9780133392838',
'title': 'Hadoop Fundamentals LiveLessons',
},
'playlist_count': 22,
'skip': 'Requires safaribooksonline account credentials',
}, {
'url': 'https://www.safaribooksonline.com/api/v1/book/9781449396459/?override_format=json',
'only_matching': True,
}, {
'url': 'http://techbus.safaribooksonline.com/9780134426365',
'only_matching': True,
}, {
'url': 'https://www.safaribooksonline.com/videos/python-programming-language/9780134217314',
'only_matching': True,
}, {
'url': 'https://learning.oreilly.com/videos/hadoop-fundamentals-livelessons/9780133392838',
'only_matching': True,
}, {
'url': 'https://www.oreilly.com/library/view/hadoop-fundamentals-livelessons/9780133392838/',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return (False if SafariIE.suitable(url) or SafariApiIE.suitable(url)
else super(SafariCourseIE, cls).suitable(url))
def _real_extract(self, url):
course_id = self._match_id(url)
course_json = self._download_json(
'%s/book/%s/?override_format=%s' % (self._API_BASE, course_id, self._API_FORMAT),
course_id, 'Downloading course JSON')
if 'chapters' not in course_json:
raise ExtractorError(
'No chapters found for course %s' % course_id, expected=True)
entries = [
self.url_result(chapter, SafariApiIE.ie_key())
for chapter in course_json['chapters']]
course_title = course_json['title']
return self.playlist_result(entries, course_id, course_title)
| unlicense | 5eb63a2fea3388073fb40df68f122e64 | 35.916667 | 167 | 0.542069 | 3.536284 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/cjsw.py | 45 | 2412 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
unescapeHTML,
)
class CJSWIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?cjsw\.com/program/(?P<program>[^/]+)/episode/(?P<id>\d+)'
_TESTS = [{
'url': 'http://cjsw.com/program/freshly-squeezed/episode/20170620',
'md5': 'cee14d40f1e9433632c56e3d14977120',
'info_dict': {
'id': '91d9f016-a2e7-46c5-8dcb-7cbcd7437c41',
'ext': 'mp3',
'title': 'Freshly Squeezed – Episode June 20, 2017',
'description': 'md5:c967d63366c3898a80d0c7b0ff337202',
'series': 'Freshly Squeezed',
'episode_id': '20170620',
},
}, {
# no description
'url': 'http://cjsw.com/program/road-pops/episode/20170707/',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
program, episode_id = mobj.group('program', 'id')
audio_id = '%s/%s' % (program, episode_id)
webpage = self._download_webpage(url, episode_id)
title = unescapeHTML(self._search_regex(
(r'<h1[^>]+class=["\']episode-header__title["\'][^>]*>(?P<title>[^<]+)',
r'data-audio-title=(["\'])(?P<title>(?:(?!\1).)+)\1'),
webpage, 'title', group='title'))
audio_url = self._search_regex(
r'<button[^>]+data-audio-src=(["\'])(?P<url>(?:(?!\1).)+)\1',
webpage, 'audio url', group='url')
audio_id = self._search_regex(
r'/([\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})\.mp3',
audio_url, 'audio id', default=audio_id)
formats = [{
'url': audio_url,
'ext': determine_ext(audio_url, 'mp3'),
'vcodec': 'none',
}]
description = self._html_search_regex(
r'<p>(?P<description>.+?)</p>', webpage, 'description',
default=None)
series = self._search_regex(
r'data-showname=(["\'])(?P<name>(?:(?!\1).)+)\1', webpage,
'series', default=program, group='name')
return {
'id': audio_id,
'title': title,
'description': description,
'formats': formats,
'series': series,
'episode_id': episode_id,
}
| unlicense | df2a9ab6e765ae6ab61db701fb026cf8 | 32.472222 | 95 | 0.506224 | 3.247978 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/ellentube.py | 29 | 4909 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
clean_html,
extract_attributes,
float_or_none,
int_or_none,
try_get,
)
class EllenTubeBaseIE(InfoExtractor):
def _extract_data_config(self, webpage, video_id):
details = self._search_regex(
r'(<[^>]+\bdata-component=(["\'])[Dd]etails.+?></div>)', webpage,
'details')
return self._parse_json(
extract_attributes(details)['data-config'], video_id)
def _extract_video(self, data, video_id):
title = data['title']
formats = []
duration = None
for entry in data.get('media'):
if entry.get('id') == 'm3u8':
formats = self._extract_m3u8_formats(
entry['url'], video_id, 'mp4',
entry_protocol='m3u8_native', m3u8_id='hls')
duration = int_or_none(entry.get('duration'))
break
self._sort_formats(formats)
def get_insight(kind):
return int_or_none(try_get(
data, lambda x: x['insight']['%ss' % kind]))
return {
'extractor_key': EllenTubeIE.ie_key(),
'id': video_id,
'title': title,
'description': data.get('description'),
'duration': duration,
'thumbnail': data.get('thumbnail'),
'timestamp': float_or_none(data.get('publishTime'), scale=1000),
'view_count': get_insight('view'),
'like_count': get_insight('like'),
'formats': formats,
}
class EllenTubeIE(EllenTubeBaseIE):
_VALID_URL = r'''(?x)
(?:
ellentube:|
https://api-prod\.ellentube\.com/ellenapi/api/item/
)
(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})
'''
_TESTS = [{
'url': 'https://api-prod.ellentube.com/ellenapi/api/item/0822171c-3829-43bf-b99f-d77358ae75e3',
'md5': '2fabc277131bddafdd120e0fc0f974c9',
'info_dict': {
'id': '0822171c-3829-43bf-b99f-d77358ae75e3',
'ext': 'mp4',
'title': 'Ellen Meets Las Vegas Survivors Jesus Campos and Stephen Schuck',
'description': 'md5:76e3355e2242a78ad9e3858e5616923f',
'thumbnail': r're:^https?://.+?',
'duration': 514,
'timestamp': 1508505120,
'upload_date': '20171020',
'view_count': int,
'like_count': int,
}
}, {
'url': 'ellentube:734a3353-f697-4e79-9ca9-bfc3002dc1e0',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
data = self._download_json(
'https://api-prod.ellentube.com/ellenapi/api/item/%s' % video_id,
video_id)
return self._extract_video(data, video_id)
class EllenTubeVideoIE(EllenTubeBaseIE):
_VALID_URL = r'https?://(?:www\.)?ellentube\.com/video/(?P<id>.+?)\.html'
_TEST = {
'url': 'https://www.ellentube.com/video/ellen-meets-las-vegas-survivors-jesus-campos-and-stephen-schuck.html',
'only_matching': True,
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_id = self._extract_data_config(webpage, display_id)['id']
return self.url_result(
'ellentube:%s' % video_id, ie=EllenTubeIE.ie_key(),
video_id=video_id)
class EllenTubePlaylistIE(EllenTubeBaseIE):
_VALID_URL = r'https?://(?:www\.)?ellentube\.com/(?:episode|studios)/(?P<id>.+?)\.html'
_TESTS = [{
'url': 'https://www.ellentube.com/episode/dax-shepard-jordan-fisher-haim.html',
'info_dict': {
'id': 'dax-shepard-jordan-fisher-haim',
'title': "Dax Shepard, 'DWTS' Team Jordan Fisher & Lindsay Arnold, HAIM",
'description': 'md5:bfc982194dabb3f4e325e43aa6b2e21c',
},
'playlist_count': 6,
}, {
'url': 'https://www.ellentube.com/studios/macey-goes-rving0.html',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
data = self._extract_data_config(webpage, display_id)['data']
feed = self._download_json(
'https://api-prod.ellentube.com/ellenapi/api/feed/?%s'
% data['filter'], display_id)
entries = [
self._extract_video(elem, elem['id'])
for elem in feed if elem.get('type') == 'VIDEO' and elem.get('id')]
return self.playlist_result(
entries, display_id, data.get('title'),
clean_html(data.get('description')))
| unlicense | b7a7862a315c1907a6666b94cc203dc2 | 35.909774 | 118 | 0.539417 | 3.261794 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/abc.py | 12 | 7495 | from __future__ import unicode_literals
import hashlib
import hmac
import re
import time
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
ExtractorError,
js_to_json,
int_or_none,
parse_iso8601,
try_get,
unescapeHTML,
update_url_query,
)
class ABCIE(InfoExtractor):
IE_NAME = 'abc.net.au'
_VALID_URL = r'https?://(?:www\.)?abc\.net\.au/news/(?:[^/]+/){1,2}(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.abc.net.au/news/2014-11-05/australia-to-staff-ebola-treatment-centre-in-sierra-leone/5868334',
'md5': 'cb3dd03b18455a661071ee1e28344d9f',
'info_dict': {
'id': '5868334',
'ext': 'mp4',
'title': 'Australia to help staff Ebola treatment centre in Sierra Leone',
'description': 'md5:809ad29c67a05f54eb41f2a105693a67',
},
'skip': 'this video has expired',
}, {
'url': 'http://www.abc.net.au/news/2015-08-17/warren-entsch-introduces-same-sex-marriage-bill/6702326',
'md5': 'db2a5369238b51f9811ad815b69dc086',
'info_dict': {
'id': 'NvqvPeNZsHU',
'ext': 'mp4',
'upload_date': '20150816',
'uploader': 'ABC News (Australia)',
'description': 'Government backbencher Warren Entsch introduces a cross-party sponsored bill to legalise same-sex marriage, saying the bill is designed to promote "an inclusive Australia, not a divided one.". Read more here: http://ab.co/1Mwc6ef',
'uploader_id': 'NewsOnABC',
'title': 'Marriage Equality: Warren Entsch introduces same sex marriage bill',
},
'add_ie': ['Youtube'],
'skip': 'Not accessible from Travis CI server',
}, {
'url': 'http://www.abc.net.au/news/2015-10-23/nab-lifts-interest-rates-following-westpac-and-cba/6880080',
'md5': 'b96eee7c9edf4fc5a358a0252881cc1f',
'info_dict': {
'id': '6880080',
'ext': 'mp3',
'title': 'NAB lifts interest rates, following Westpac and CBA',
'description': 'md5:f13d8edc81e462fce4a0437c7dc04728',
},
}, {
'url': 'http://www.abc.net.au/news/2015-10-19/6866214',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
mobj = re.search(
r'inline(?P<type>Video|Audio|YouTube)Data\.push\((?P<json_data>[^)]+)\);',
webpage)
if mobj is None:
expired = self._html_search_regex(r'(?s)class="expired-(?:video|audio)".+?<span>(.+?)</span>', webpage, 'expired', None)
if expired:
raise ExtractorError('%s said: %s' % (self.IE_NAME, expired), expected=True)
raise ExtractorError('Unable to extract video urls')
urls_info = self._parse_json(
mobj.group('json_data'), video_id, transform_source=js_to_json)
if not isinstance(urls_info, list):
urls_info = [urls_info]
if mobj.group('type') == 'YouTube':
return self.playlist_result([
self.url_result(url_info['url']) for url_info in urls_info])
formats = [{
'url': url_info['url'],
'vcodec': url_info.get('codec') if mobj.group('type') == 'Video' else 'none',
'width': int_or_none(url_info.get('width')),
'height': int_or_none(url_info.get('height')),
'tbr': int_or_none(url_info.get('bitrate')),
'filesize': int_or_none(url_info.get('filesize')),
} for url_info in urls_info]
self._sort_formats(formats)
return {
'id': video_id,
'title': self._og_search_title(webpage),
'formats': formats,
'description': self._og_search_description(webpage),
'thumbnail': self._og_search_thumbnail(webpage),
}
class ABCIViewIE(InfoExtractor):
IE_NAME = 'abc.net.au:iview'
_VALID_URL = r'https?://iview\.abc\.net\.au/(?:[^/]+/)*video/(?P<id>[^/?#]+)'
_GEO_COUNTRIES = ['AU']
# ABC iview programs are normally available for 14 days only.
_TESTS = [{
'url': 'https://iview.abc.net.au/show/gruen/series/11/video/LE1927H001S00',
'md5': '67715ce3c78426b11ba167d875ac6abf',
'info_dict': {
'id': 'LE1927H001S00',
'ext': 'mp4',
'title': "Series 11 Ep 1",
'series': "Gruen",
'description': 'md5:52cc744ad35045baf6aded2ce7287f67',
'upload_date': '20190925',
'uploader_id': 'abc1',
'timestamp': 1569445289,
},
'params': {
'skip_download': True,
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
video_params = self._download_json(
'https://iview.abc.net.au/api/programs/' + video_id, video_id)
title = unescapeHTML(video_params.get('title') or video_params['seriesTitle'])
stream = next(s for s in video_params['playlist'] if s.get('type') in ('program', 'livestream'))
house_number = video_params.get('episodeHouseNumber') or video_id
path = '/auth/hls/sign?ts={0}&hn={1}&d=android-tablet'.format(
int(time.time()), house_number)
sig = hmac.new(
b'android.content.res.Resources',
path.encode('utf-8'), hashlib.sha256).hexdigest()
token = self._download_webpage(
'http://iview.abc.net.au{0}&sig={1}'.format(path, sig), video_id)
def tokenize_url(url, token):
return update_url_query(url, {
'hdnea': token,
})
for sd in ('720', 'sd', 'sd-low'):
sd_url = try_get(
stream, lambda x: x['streams']['hls'][sd], compat_str)
if not sd_url:
continue
formats = self._extract_m3u8_formats(
tokenize_url(sd_url, token), video_id, 'mp4',
entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)
if formats:
break
self._sort_formats(formats)
subtitles = {}
src_vtt = stream.get('captions', {}).get('src-vtt')
if src_vtt:
subtitles['en'] = [{
'url': src_vtt,
'ext': 'vtt',
}]
is_live = video_params.get('livestream') == '1'
if is_live:
title = self._live_title(title)
return {
'id': video_id,
'title': title,
'description': video_params.get('description'),
'thumbnail': video_params.get('thumbnail'),
'duration': int_or_none(video_params.get('eventDuration')),
'timestamp': parse_iso8601(video_params.get('pubDate'), ' '),
'series': unescapeHTML(video_params.get('seriesTitle')),
'series_id': video_params.get('seriesHouseNumber') or video_id[:7],
'season_number': int_or_none(self._search_regex(
r'\bSeries\s+(\d+)\b', title, 'season number', default=None)),
'episode_number': int_or_none(self._search_regex(
r'\bEp\s+(\d+)\b', title, 'episode number', default=None)),
'episode_id': house_number,
'uploader_id': video_params.get('channel'),
'formats': formats,
'subtitles': subtitles,
'is_live': is_live,
}
| unlicense | dd6a42f4aed53bad8e95e90168f49c94 | 37.834197 | 259 | 0.546364 | 3.383747 | false | false | false | false |
unitedstates/congress-legislators | scripts/election_results.py | 1 | 10137 | # Update the data files according to the results of
# a general election using a spreadsheet of election
# results and prepares for a new Congress. This script
# does the following:
#
# * Adds end dates to all current leadership roles since
# leadership resets in both chambers each Congress.
# * Brings senators not up for relection and the Puerto
# Rico resident commissioner in off-years forward
# unchanged.
# * Creates new legislator entries for new people in
# the election results spreadsheet. The next available
# GovTrack ID is assigned to each new legislator.
# * Creates new terms for each election winner in the
# election results spreadsheet (incumbents and new
# legislators).
# * Clears the committee-membership-current.yaml file
# since all House and Senate committees reset at the
# start of a new Congress.
# * Clears out the social media entries for legislators
# no longer serving.
#
# Usage:
# * Save the spreadsheet to archive/election_results_{year}.csv.
# * Edit the ELECTION_YEAR constant below.
# * Make sure the legislators-{current,historical}.yaml files are
# clean -- i.e. if you've run this script, reset these files
# before running it again.
# * Run this script.
import collections, csv, re
from utils import load_data, save_data
ELECTION_YEAR = 2020
def run():
# Compute helper constants.
SENATE_CLASS = ((ELECTION_YEAR-2) % 6) // 2 + 1
# Open existing data.
print("Opening legislator data...")
legislators_historical = load_data("legislators-historical.yaml")
legislators_current = load_data("legislators-current.yaml")
# New member data.
party_map = { "R": "Republican", "D": "Democrat", "I": "Independent" }
new_legislators = []
# Only one class of senators was up for election. Mark all other
# senators as still serving. Additionally, in off years for the
# four-year-termed resident commissioner of Puerto Rico, mark
# that person as still serving also.
current = []
for p in legislators_current:
if p["terms"][-1]["type"] == "sen" and p["terms"][-1]["class"] != SENATE_CLASS:
current.append(p["id"]["govtrack"])
if p["terms"][-1]["state"] == "PR" and (ELECTION_YEAR % 4 == 0):
current.append(p["id"]["govtrack"])
# Map govtrack IDs to exiting legislators.
govtrack_id_map = { }
for entry in legislators_historical + legislators_current:
govtrack_id_map[entry['id']['govtrack']] = entry
# Get highest existing GovTrack ID to know where to start for assigning new IDs.
next_govtrack_id = max(p['id']['govtrack'] for p in (legislators_historical+legislators_current))
# Load spreadsheet of Senate election results.
print("Applying election results...")
election_results = csv.DictReader(open("archive/election_results_{year}.csv".format(year=ELECTION_YEAR)))
for row in election_results:
if row['Race'] == "": break # end of spreadsheet
# Get state and district from race code. An empty
# district means a senate race.
state, district = re.match(r"^([A-Z]{2})(\d*)$", row["Race"]).groups()
if row['GovTrack ID'] != "":
# Use the GovTrack ID to get the legislator who won, which might be
# the incumbent or a representative elected to the senate, or a
# someone who used to serve in Congress, etc.
p = govtrack_id_map[int(row['GovTrack ID'])]
elif row['Incumbent Win? Y/N'] == 'Y':
# Use the race code to get the legislator who won.
incumbent = [p for p in legislators_current
if p["terms"][-1]["type"] == ("sen" if district == "" else "rep")
and p["terms"][-1]["state"] == state
and ((p["terms"][-1]["district"] == int(district)) if district != ""
else (p["terms"][-1]["class"] == SENATE_CLASS))
]
if len(incumbent) != 1:
raise ValueError("Could not find incumbent.")
p = incumbent[0]
elif row['Incumbent Win? Y/N'] == 'N':
# Make a new legislator entry.
next_govtrack_id += 1
p = collections.OrderedDict([
("id", collections.OrderedDict([
#("bioguide", row['Bioguide ID']),
("fec", [row['FEC.gov ID']]),
("govtrack", next_govtrack_id),
#("opensecrets", None), # don't know yet
#("votesmart", int(row['votesmart'])), # not doing this anymore
#("wikipedia", row['Wikipedia Page Name']), # will convert from Wikidata
("wikidata", row['Wikidata ID']),
#("ballotpedia", row['Ballotpedia Page Name']),
])),
("name", collections.OrderedDict([
("first", row['First Name']),
("middle", row['Middle Name']),
("last", row['Last Name']),
("suffix", row['Suffix']),
#("official_full", mi.find('official-name').text), #not available yet
])),
("bio", collections.OrderedDict([
("gender", row['Gender (M/F)']),
("birthday", row['Birthday (YYYY-MM-DD)']),
])),
("terms", []),
])
# Delete name keys that were filled with Nones.
for k in list(p["name"]): # clone key list before modifying dict
if not p["name"][k]:
del p["name"][k]
new_legislators.append(p)
else:
# There is no winner in this election. The incumbent
# will not be marked as still serving, so they'll
# be moved to the historical file, and no person will
# be added for this race.
print("No election result for", row["Race"], row["Incumbent Win? Y/N"])
continue
# Add to array marking this legislator as currently serving.
current.append(p['id']['govtrack'])
# Add a new term.
if district == "": # Senate race
term = collections.OrderedDict([
("type", "sen"),
("start", "{next_year}-01-03".format(next_year=ELECTION_YEAR+1)),
("end", "{in_six_years}-01-03".format(in_six_years=ELECTION_YEAR+1+6)),
("state", state),
("class", SENATE_CLASS),
("state_rank", None), # computed later
])
else:
term = collections.OrderedDict([
("type", "rep"),
("start", "{next_year}-01-03".format(next_year=ELECTION_YEAR+1)),
("end", "{in_two_years}-01-03".format(in_two_years=ELECTION_YEAR+1+2)),
("state", state),
("district", int(district)),
])
# If party is given in the table (for some incumbents and
# all new winners), use it. Otherwise just make a field so
# it's in the right order.
term.update(collections.OrderedDict([
("party", party_map[row['Party']] if row['Party'] else None),
]))
p['terms'].append(term)
if term['party'] == "Independent":
term["caucus"] = row['Caucus']
if len(p['terms']) > 1 and p["terms"][-2]["type"] == term["type"]:
# This is an incumbent (or at least served in the same chamber previously).
# Copy some fields forward that are likely to remain the same, if we
# haven't already set them.
for k in ('party', 'url', 'rss_url'):
if k in p['terms'][-2] and not term.get(k):
term[k] = p['terms'][-2][k]
# End any current leadership roles.
for p in legislators_current:
for r in p.get('leadership_roles', []):
if not r.get('end'):
r['end'] = "{next_year}-01-03".format(next_year=ELECTION_YEAR+1)
# Split the legislators back into the historical and current lists:
# Move previously-current legislators into the historical list
# if they are no longer serving, in the order that they appear
# in the current list.
for p in legislators_current:
if p["id"]["govtrack"] not in current:
legislators_historical.append(p)
legislators_current = [p for p in legislators_current if p['id']['govtrack'] in current]
# Move former legislators forward into the current list if they
# are returning to Congress, in the order they appear in the
# historical list.
for p in legislators_historical:
if p["id"]["govtrack"] in current:
legislators_current.append(p)
legislators_historical = [p for p in legislators_historical if p['id']['govtrack'] not in current]
# Add new legislators in the order they occur in the election
# results spreadsheet.
for p in new_legislators:
legislators_current.append(p)
# Re-compute the state_rank junior/senior status of all senators.
# We'll get this authoritatively from the Senate by senate_contacts.py
# once that data is up, but we'll make an educated guess now.
state_rank_assignment = set()
# Senior senators not up for re-election keep their status:
for p in legislators_current:
term = p['terms'][-1]
if term['type'] == 'sen' and term['class'] != SENATE_CLASS and term['state_rank'] == 'senior':
state_rank_assignment.add(p['terms'][-1]['state'])
# Senior senators who won re-election pull their status forward:
for p in legislators_current:
term = p['terms'][-1]
if term['state'] in state_rank_assignment: continue # we already assigned the senior senator
if term['type'] == 'sen' and term['class'] == SENATE_CLASS and len(p['terms']) > 1 \
and p['terms'][-2]['type'] == 'sen' and p['terms'][-2]['state'] == term['state'] and p['terms'][-2]['state_rank'] == 'senior':
term['state_rank'] = 'senior'
state_rank_assignment.add(p['terms'][-1]['state'])
# Junior senators not up for re-election become senior if we didn't see a senior senator yet:
for p in legislators_current:
term = p['terms'][-1]
if term['state'] in state_rank_assignment: continue # we already assigned the senior senator
if term['type'] == 'sen' and term['class'] != SENATE_CLASS and term['state_rank'] == 'junior':
term['state_rank'] = 'senior'
state_rank_assignment.add(p['terms'][-1]['state'])
# Remaining senators are senior if we haven't seen a senior senator yet, else junior:
for p in legislators_current:
term = p['terms'][-1]
if term['type'] == 'sen' and term['state_rank'] is None:
if term['state'] not in state_rank_assignment:
term['state_rank'] = 'senior'
state_rank_assignment.add(term['state'])
else:
term['state_rank'] = 'junior'
# Save.
print("Saving legislator data...")
save_data(legislators_current, "legislators-current.yaml")
save_data(legislators_historical, "legislators-historical.yaml")
# Run the sweep script to clear out data that needs to be cleared out
# for legislators that are gone.
import sweep
sweep.run()
# Clears committee membership.
save_data([], "committee-membership-current.yaml")
if __name__ == "__main__":
run()
| cc0-1.0 | 045958499a61f9230d910eca89c8c0da | 39.22619 | 129 | 0.662425 | 3.112373 | false | false | false | false |
unitedstates/congress-legislators | scripts/social_media.py | 1 | 18271 | #!/usr/bin/env python
# run with --sweep (or by default):
# given a service, looks through current members for those missing an account on that service,
# and checks that member's official website's source code for mentions of that service.
# A CSV of "leads" is produced for manual review.
#
# run with --update:
# reads the CSV produced by --sweep back in and updates the YAML accordingly.
#
# run with --clean:
# removes legislators from the social media file who are no longer current
#
# run with --verify:
# verifies that current usernames are still valid. (tries to catch renames)
#
# run with --resolveyt:
# finds both YouTube usernames and channel IDs and updates the YAML accordingly.
# run with --resolvetw:
# for entries with `twitter` but not `twitter_id`
# resolves Twitter screen_names to Twitter IDs and updates the YAML accordingly
# other options:
# --service (required): "twitter", "youtube", "facebook", or "instagram"
# --bioguide: limit to only one particular member
# --email:
# in conjunction with --sweep, send an email if there are any new leads, using
# settings in scripts/email/config.yml (if it was created and filled out).
# uses a CSV at data/social_media_blacklist.csv to exclude known non-individual account names
import csv, json, re
import utils
from utils import load_data, save_data
import requests
import time
def main():
regexes = {
"youtube": [
"(?:https?:)?//(?:www\\.)?youtube.com/embed/?\?(list=[^\\s\"/\\?#&']+)",
"(?:https?:)?//(?:www\\.)?youtube.com/channel/([^\\s\"/\\?#']+)",
"(?:https?:)?//(?:www\\.)?youtube.com/(?:subscribe_widget\\?p=)?(?:subscription_center\\?add_user=)?(?:user/)?([^\\s\"/\\?#']+)"
],
"facebook": [
"\\('facebook.com/([^']+)'\\)",
"(?:https?:)?//(?:www\\.)?facebook.com/(?:home\\.php)?(?:business/dashboard/#/)?(?:government)?(?:#!/)?(?:#%21/)?(?:#/)?pages/[^/]+/(\\d+)",
"(?:https?:)?//(?:www\\.)?facebook.com/(?:profile.php\\?id=)?(?:home\\.php)?(?:#!)?/?(?:people)?/?([^/\\s\"#\\?&']+)"
],
"twitter": [
"(?:https?:)?//(?:www\\.)?twitter.com/(?:intent/user\?screen_name=)?(?:#!/)?(?:#%21/)?@?([^\\s\"'/?]+)",
"\\.render\\(\\)\\.setUser\\('@?(.*?)'\\)\\.start\\(\\)"
],
"instagram": [
"instagram.com/(\w{3,})"
]
}
email_enabled = utils.flags().get('email', False)
debug = utils.flags().get('debug', False)
do_update = utils.flags().get('update', False)
do_clean = utils.flags().get('clean', False)
do_verify = utils.flags().get('verify', False)
do_resolveyt = utils.flags().get('resolveyt', False)
do_resolveig = utils.flags().get('resolveig', False)
do_resolvetw = utils.flags().get('resolvetw', False)
# default to not caching
cache = utils.flags().get('cache', False)
force = not cache
if do_resolveyt:
service = "youtube"
elif do_resolveig:
service = "instagram"
elif do_resolvetw:
service = "twitter"
else:
service = utils.flags().get('service', None)
if service not in ["twitter", "youtube", "facebook", "instagram"]:
print("--service must be one of twitter, youtube, facebook, or instagram")
exit(0)
# load in members, orient by bioguide ID
print("Loading current legislators...")
current = load_data("legislators-current.yaml")
current_bioguide = { }
for m in current:
if "bioguide" in m["id"]:
current_bioguide[m["id"]["bioguide"]] = m
print("Loading blacklist...")
blacklist = {
'twitter': [], 'facebook': [], 'youtube': [], 'instagram': []
}
for rec in csv.DictReader(open("data/social_media_blacklist.csv")):
blacklist[rec["service"]].append(rec["pattern"])
print("Loading whitelist...")
whitelist = {
'twitter': [], 'facebook': [], 'youtube': []
}
for rec in csv.DictReader(open("data/social_media_whitelist.csv")):
whitelist[rec["service"]].append(rec["account"].lower())
# reorient currently known social media by ID
print("Loading social media...")
media = load_data("legislators-social-media.yaml")
media_bioguide = { }
for m in media:
media_bioguide[m["id"]["bioguide"]] = m
def resolveyt():
# To avoid hitting quota limits, register for a YouTube 2.0 API key at
# https://code.google.com/apis/youtube/dashboard
# and put it below
api_file = open('cache/youtube_api_key','r')
api_key = api_file.read()
bioguide = utils.flags().get('bioguide', None)
updated_media = []
for m in media:
if bioguide and (m['id']['bioguide'] != bioguide):
updated_media.append(m)
continue
social = m['social']
if ('youtube' in social) or ('youtube_id' in social):
if 'youtube' not in social:
social['youtube'] = social['youtube_id']
ytid = social['youtube']
profile_url = ("https://gdata.youtube.com/feeds/api/users/%s"
"?v=2&prettyprint=true&alt=json&key=%s" % (ytid, api_key))
try:
print("Resolving YT info for %s" % social['youtube'])
ytreq = requests.get(profile_url)
# print "\tFetched with status code %i..." % ytreq.status_code
if ytreq.status_code == 404:
# If the account name isn't valid, it's probably a redirect.
try:
# Try to scrape the real YouTube username
print("\Scraping YouTube username")
search_url = ("https://www.youtube.com/%s" % social['youtube'])
csearch = requests.get(search_url).text.encode('ascii','ignore')
u = re.search(r'<a[^>]*href="[^"]*/user/([^/"]*)"[.]*>',csearch)
if u:
print("\t%s maps to %s" % (social['youtube'],u.group(1)))
social['youtube'] = u.group(1)
profile_url = ("https://gdata.youtube.com/feeds/api/users/%s"
"?v=2&prettyprint=true&alt=json" % social['youtube'])
print("\tFetching GData profile...")
ytreq = requests.get(profile_url)
print("\tFetched GData profile")
else:
raise Exception("Couldn't figure out the username format for %s" % social['youtube'])
except:
print("\tCouldn't locate YouTube account")
raise
ytobj = ytreq.json()
social['youtube_id'] = ytobj['entry']['yt$channelId']['$t']
print("\tResolved youtube_id to %s" % social['youtube_id'])
# even though we have their channel ID, do they also have a username?
if ytobj['entry']['yt$username']['$t'] != ytobj['entry']['yt$userId']['$t']:
if social['youtube'].lower() != ytobj['entry']['yt$username']['$t'].lower():
# YT accounts are case-insensitive. Preserve capitalization if possible.
social['youtube'] = ytobj['entry']['yt$username']['$t']
print("\tAdded YouTube username of %s" % social['youtube'])
else:
print("\tYouTube says they do not have a separate username")
del social['youtube']
except:
print("Unable to get YouTube Channel ID for: %s" % social['youtube'])
updated_media.append(m)
print("Saving social media...")
save_data(updated_media, "legislators-social-media.yaml")
def resolveig():
# in order to preserve the comment block at the top of the file,
# copy it over into a new RtYamlList instance. We do this because
# Python list instances can't hold other random attributes.
import rtyaml
updated_media = rtyaml.RtYamlList()
if hasattr(media, '__initial_comment_block'):
updated_media.__initial_comment_block = getattr(media, '__initial_comment_block')
client_id_file = open('cache/instagram_client_id','r')
client_id = client_id_file.read()
bioguide = utils.flags().get('bioguide', None)
for m in media:
if bioguide and (m['id']['bioguide'] != bioguide):
updated_media.append(m)
continue
social = m['social']
if 'instagram' not in social and 'instagram_id' not in social:
updated_media.append(m)
continue
instagram_handle = social['instagram']
query_url = "https://api.instagram.com/v1/users/search?q={query}&client_id={client_id}".format(query=instagram_handle,client_id=client_id)
instagram_user_search = requests.get(query_url).json()
for user in instagram_user_search['data']:
time.sleep(0.5)
if user['username'] == instagram_handle:
m['social']['instagram_id'] = int(user['id'])
print("matched instagram_id {instagram_id} to {instagram_handle}".format(instagram_id=social['instagram_id'],instagram_handle=instagram_handle))
updated_media.append(m)
save_data(updated_media, "legislators-social-media.yaml")
def resolvetw():
"""
Does two batch lookups:
1. All entries with `twitter_id`: Checks to see if the corresponding Twitter profile has the same screen_name
as found in the entry's `twitter`. If not, the `twitter` value is updated.
2. All entries with `twitter` (but not `twitter_id`): fetches the corresponding Twitter profile by screen_name and
inserts ID. If no profile is found, the `twitter` value is deleted.
Note: cache/twitter_client_id must be a formatted JSON dict:
{
"consumer_secret": "xyz",
"access_token": "abc",
"access_token_secret": "def",
"consumer_key": "jk"
}
"""
import rtyaml
from social.twitter import get_api, fetch_profiles
updated_media = rtyaml.RtYamlList()
if hasattr(media, '__initial_comment_block'):
updated_media.__initial_comment_block = getattr(media, '__initial_comment_block')
client_id_file = open('cache/twitter_client_id', 'r')
_c = json.load(client_id_file)
api = get_api(_c['access_token'], _c['access_token_secret'], _c['consumer_key'], _c['consumer_secret'])
bioguide = utils.flags().get('bioguide', None)
lookups = {'screen_names': [], 'ids': []} # store members that have `twitter` or `twitter_id` info
for m in media:
# we start with appending to updated_media so that we keep the same order of entries
# as found in the loaded file
updated_media.append(m)
if bioguide and (m['id']['bioguide'] != bioguide):
continue
social = m['social']
# now we add entries to either the `ids` or the `screen_names` list to batch lookup
if 'twitter_id' in social:
# add to the queue to be batched-looked-up
lookups['ids'].append(m)
# append
elif 'twitter' in social:
lookups['screen_names'].append(m)
#######################################
# perform Twitter batch lookup for ids:
if lookups['screen_names']:
arr = lookups['screen_names']
print("Looking up Twitter ids for", len(arr), "names.")
tw_names = [m['social']['twitter'] for m in arr]
tw_profiles = fetch_profiles(api, screen_names = tw_names)
for m in arr:
social = m['social']
# find profile that corresponds to a given screen_name
twitter_handle = social['twitter']
twp = next((p for p in tw_profiles if p['screen_name'].lower() == twitter_handle.lower()), None)
if twp:
m['social']['twitter_id'] = int(twp['id'])
print("Matched twitter_id `%s` to `%s`" % (social['twitter_id'], twitter_handle))
else:
# Remove errant Twitter entry for now
print("No Twitter user profile for:", twitter_handle)
m['social'].pop('twitter')
print("\t ! removing Twitter handle:", twitter_handle)
##########################################
# perform Twitter batch lookup for names by id, to update any renamings:
if lookups['ids']:
arr = lookups['ids']
print("Looking up Twitter screen_names for", len(arr), "ids.")
tw_ids = [m['social']['twitter_id'] for m in arr]
tw_profiles = fetch_profiles(api, ids = tw_ids)
any_renames_needed = False
for m in arr:
social = m['social']
# find profile that corresponds to a given screen_name
t_id = social['twitter_id']
t_name = social.get('twitter')
twp = next((p for p in tw_profiles if int(p['id']) == t_id), None)
if twp:
# Be silent if there is no change to screen name
if t_name and (twp['screen_name'].lower() == t_name.lower()):
pass
else:
any_renames_needed = True
m['social']['twitter'] = twp['screen_name']
print("For twitter_id `%s`, renamed `%s` to `%s`" % (t_id, t_name, m['social']['twitter']))
else:
# No entry found for this twitter id
print("No Twitter user profile for %s, %s" % (t_id, t_name))
m['social'].pop('twitter_id')
print("\t ! removing Twitter id:", t_id)
if not any_renames_needed:
print("No renames needed")
# all done with Twitter
save_data(updated_media, "legislators-social-media.yaml")
def sweep():
to_check = []
bioguide = utils.flags().get('bioguide', None)
if bioguide:
possibles = [bioguide]
else:
possibles = list(current_bioguide.keys())
for bioguide in possibles:
if media_bioguide.get(bioguide, None) is None:
to_check.append(bioguide)
elif (media_bioguide[bioguide]["social"].get(service, None) is None) and \
(media_bioguide[bioguide]["social"].get(service + "_id", None) is None):
to_check.append(bioguide)
else:
pass
utils.mkdir_p("cache/social_media")
writer = csv.writer(open("cache/social_media/%s_candidates.csv" % service, 'w'))
writer.writerow(["bioguide", "official_full", "website", "service", "candidate", "candidate_url"])
if len(to_check) > 0:
rows_found = []
for bioguide in to_check:
candidate = candidate_for(bioguide)
if candidate:
url = current_bioguide[bioguide]["terms"][-1].get("url", None)
candidate_url = "https://%s.com/%s" % (service, candidate)
row = [bioguide, current_bioguide[bioguide]['name']['official_full'].encode('utf-8'), url, service, candidate, candidate_url]
writer.writerow(row)
print("\tWrote: %s" % candidate)
rows_found.append(row)
if email_enabled and len(rows_found) > 0:
email_body = "Social media leads found:\n\n"
for row in rows_found:
email_body += ("%s\n" % row)
utils.send_email(email_body)
def verify():
bioguide = utils.flags().get('bioguide', None)
if bioguide:
to_check = [bioguide]
else:
to_check = list(media_bioguide.keys())
for bioguide in to_check:
entry = media_bioguide[bioguide]
current = entry['social'].get(service, None)
if not current:
continue
bioguide = entry['id']['bioguide']
candidate = candidate_for(bioguide, current)
if not candidate:
# if current is in whitelist, and none is on the page, that's okay
if current.lower() in whitelist[service]:
continue
else:
candidate = ""
url = current_bioguide[bioguide]['terms'][-1].get('url')
if current.lower() != candidate.lower():
print("[%s] mismatch on %s - %s -> %s" % (bioguide, url, current, candidate))
def update():
for rec in csv.DictReader(open("cache/social_media/%s_candidates.csv" % service)):
bioguide = rec["bioguide"]
candidate = rec["candidate"]
if bioguide in media_bioguide:
media_bioguide[bioguide]['social'][service] = candidate
else:
new_media = {'id': {}, 'social': {}}
new_media['id']['bioguide'] = bioguide
thomas_id = current_bioguide[bioguide]['id'].get("thomas", None)
govtrack_id = current_bioguide[bioguide]['id'].get("govtrack", None)
if thomas_id:
new_media['id']['thomas'] = thomas_id
if govtrack_id:
new_media['id']['govtrack'] = govtrack_id
new_media['social'][service] = candidate
media.append(new_media)
print("Saving social media...")
save_data(media, "legislators-social-media.yaml")
# if it's a youtube update, always do the resolve
# if service == "youtube":
# resolveyt()
def clean():
print("Loading historical legislators...")
historical = load_data("legislators-historical.yaml")
count = 0
for m in historical:
if m["id"]["bioguide"] in media_bioguide:
media.remove(media_bioguide[m["id"]["bioguide"]])
count += 1
print("Removed %i out of office legislators from social media file..." % count)
print("Saving historical legislators...")
save_data(media, "legislators-social-media.yaml")
def candidate_for(bioguide, current = None):
"""find the most likely candidate account from the URL.
If current is passed, the candidate will match it if found
otherwise, the first candidate match is returned
"""
url = current_bioguide[bioguide]["terms"][-1].get("url", None)
if not url:
if debug:
print("[%s] No official website, skipping" % bioguide)
return None
if debug:
print("[%s] Downloading..." % bioguide)
cache = "congress/%s.html" % bioguide
body = utils.download(url, cache, force, {'check_redirects': True})
if not body:
return None
all_matches = []
for regex in regexes[service]:
matches = re.findall(regex, body, re.I)
if matches:
all_matches.extend(matches)
if not current == None and current in all_matches:
return current
if all_matches:
for candidate in all_matches:
passed = True
for blacked in blacklist[service]:
if re.search(blacked, candidate, re.I):
passed = False
if not passed:
if debug:
print("\tBlacklisted: %s" % candidate)
continue
return candidate
return None
if do_update:
update()
elif do_clean:
clean()
elif do_verify:
verify()
elif do_resolveyt:
resolveyt()
elif do_resolveig:
resolveig()
elif do_resolvetw:
resolvetw()
else:
sweep()
if __name__ == '__main__':
main()
| cc0-1.0 | d3214da9d157e8b34477fb83d54cd776 | 35.251984 | 154 | 0.596245 | 3.534043 | false | false | false | false |
eliben/code-for-blog | 2011/socket_client_thread_sample/sampleguiclient.py | 1 | 3818 | """
Sample GUI using SocketClientThread for socket communication, while doing other
stuff in parallel.
Eli Bendersky (eliben@gmail.com)
This code is in the public domain
"""
import os, sys, time
import Queue
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from socketclientthread import SocketClientThread, ClientCommand, ClientReply
SERVER_ADDR = 'localhost', 50007
class CircleWidget(QWidget):
def __init__(self, parent=None):
super(CircleWidget, self).__init__(parent)
self.nframe = 0
self.setBackgroundRole(QPalette.Base)
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
def minimumSizeHint(self):
return QSize(50, 50)
def sizeHint(self):
return QSize(180, 180)
def next(self):
self.nframe += 1
self.update()
def paintEvent(self, event):
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing, True)
painter.translate(self.width() / 2, self.height() / 2)
for diameter in range(0, 64, 9):
delta = abs((self.nframe % 64) - diameter / 2)
alpha = 255 - (delta * delta) / 4 - diameter
if alpha > 0:
painter.setPen(QPen(QColor(0, diameter / 2, 127, alpha), 3))
painter.drawEllipse(QRectF(
-diameter / 2.0,
-diameter / 2.0,
diameter,
diameter))
class LogWidget(QTextBrowser):
def __init__(self, parent=None):
super(LogWidget, self).__init__(parent)
palette = QPalette()
palette.setColor(QPalette.Base, QColor("#ddddfd"))
self.setPalette(palette)
class SampleGUIClientWindow(QMainWindow):
def __init__(self, parent=None):
super(SampleGUIClientWindow, self).__init__(parent)
self.create_main_frame()
self.create_client()
self.create_timers()
def create_main_frame(self):
self.circle_widget = CircleWidget()
self.doit_button = QPushButton('Do it!')
self.doit_button.clicked.connect(self.on_doit)
self.log_widget = LogWidget()
hbox = QHBoxLayout()
hbox.addWidget(self.circle_widget)
hbox.addWidget(self.doit_button)
hbox.addWidget(self.log_widget)
main_frame = QWidget()
main_frame.setLayout(hbox)
self.setCentralWidget(main_frame)
def create_client(self):
self.client = SocketClientThread()
self.client.start()
def create_timers(self):
self.circle_timer = QTimer(self)
self.circle_timer.timeout.connect(self.circle_widget.next)
self.circle_timer.start(25)
self.client_reply_timer = QTimer(self)
self.client_reply_timer.timeout.connect(self.on_client_reply_timer)
self.client_reply_timer.start(100)
def on_doit(self):
self.client.cmd_q.put(ClientCommand(ClientCommand.CONNECT, SERVER_ADDR))
self.client.cmd_q.put(ClientCommand(ClientCommand.SEND, 'hello'))
self.client.cmd_q.put(ClientCommand(ClientCommand.RECEIVE))
self.client.cmd_q.put(ClientCommand(ClientCommand.CLOSE))
def on_client_reply_timer(self):
try:
reply = self.client.reply_q.get(block=False)
status = "SUCCESS" if reply.type == ClientReply.SUCCESS else "ERROR"
self.log('Client reply %s: %s' % (status, reply.data))
except Queue.Empty:
pass
def log(self, msg):
timestamp = '[%010.3f]' % time.clock()
self.log_widget.append(timestamp + ' ' + str(msg))
#-------------------------------------------------------------------------------
if __name__ == "__main__":
app = QApplication(sys.argv)
mainwindow = SampleGUIClientWindow()
mainwindow.show()
app.exec_()
| unlicense | 4427ebf3d4dc16dac732832327f96a88 | 30.04065 | 80 | 0.608696 | 3.754179 | false | false | false | false |
eliben/code-for-blog | 2013/twisted_irc_testbot.py | 13 | 3414 | #-------------------------------------------------------------------------------
# twisted_irc_testbot.py
#
# A sample IRC bot based on the example in Twisted's docs.
#
# Eli Bendersky (eliben@gmail.com)
# Last updated: 2013.01.27
# This code is in the public domain
#-------------------------------------------------------------------------------
import argparse
import sys
from twisted.internet import reactor, protocol
from twisted.python import log
from twisted.words.protocols import irc
class TestBot(irc.IRCClient):
def __init__(self, channel, nickname, password):
self.channel = channel
self.nickname = nickname
self.password = password
def connectionMade(self):
irc.IRCClient.connectionMade(self)
log.msg("[connected]")
def connectionLost(self, reason):
irc.IRCClient.connectionLost(self, reason)
log.msg("[disconnected]")
def signedOn(self):
"""Called after sucessfully signing on to the server."""
self.join(self.channel)
def joined(self, channel):
"""Called when I finish joining a channel.
channel has the starting character intact.
"""
log.msg("[I have joined %s]" % channel)
self.msg(channel, "user1: bonbon")
def privmsg(self, user, channel, msg):
"""Called when I have a message from a user to me or a channel."""
user = user.split('!', 1)[0]
log.msg("<%s> %s" % (user, msg))
# Check to see if they're sending me a private message
if channel == self.nickname:
self.msg(user, 'Thanks for the private message')
else:
# Otherwise check to see if it is a message directed at me
if msg.startswith(self.nickname + ":"):
msg = "%s: I am a bot" % user
self.msg(channel, msg)
log.msg("<%s> %s" % (self.nickname, msg))
def lineReceived(self, line):
"""Low level LineReceiver callback, used for debugging..."""
log.msg('>> %s' % line)
# Twisted's classes are old-style, so no super(), oh my...
irc.IRCClient.lineReceived(self, line)
class TestBotFactory(protocol.ClientFactory):
def __init__(self, channel, nickname, password):
self.channel = channel
self.nickname = nickname
self.password = password
def buildProtocol(self, addr):
return TestBot(self.channel, self.nickname, self.password)
def clientConnectionLost(self, connector, reason):
reactor.stop()
connector.connect()
def clientConnectionFailed(self, connector, reason):
reactor.stop()
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('--server', help='the server to connect to',
default='localhost')
argparser.add_argument('--port', help='TCP port',
default=6667)
argparser.add_argument('--channel', help='channel/room name to join',
default='room')
argparser.add_argument('--nickname', default='user1')
argparser.add_argument('--password', default='pass1')
args = argparser.parse_args()
log.startLogging(sys.stdout)
# Create client, connect to server and run
f = TestBotFactory(args.channel, args.nickname, args.password)
reactor.connectTCP(args.server, args.port, f)
reactor.run()
| unlicense | d2d8709bd39b1bc3897f3d3aa3e2c9d2 | 32.80198 | 80 | 0.591095 | 4.204433 | false | false | false | false |
eliben/code-for-blog | 2018/type-inference/parser.py | 1 | 7046 | # EBNF specification for micro-ML. { x } means zero or more repetitions of x.
#
# The top-level is decl.
#
# decl: ID { ID } '=' expr
#
# expr: INT
# | bool
# | ID
# | ID '(' { expr ',' } ')'
# | '(' expr ')'
# | expr op expr
# | 'if' expr 'then' expr 'else' expr
# | 'lambda' { ID } '->' expr
#
# op: + | * | - | == | > | >= | <= | < | !=
# bool: 'true' | 'false'
#
# ID: identifier
# INT: an integer
#
# Eli Bendersky [http://eli.thegreenplace.net]
# This code is in the public domain.
import ast
import lexer
class ParseError(Exception):
pass
class Parser:
"""Parser for micro-ML.
The only public method here is parse_decl that parses a 'decl' from a
string. Usage:
p = Parser()
decl = p.parse_decl(<some micro-ML code>)
# decl is now an ast.Decl node
parse_decl() can be called multiple times with the same parser to parse
multiple decls (state is wiped out between calls).
"""
def __init__(self):
lex_rules = (
('if', 'IF'),
('then', 'THEN'),
('else', 'ELSE'),
('true', 'TRUE'),
('false', 'FALSE'),
('lambda', 'LAMBDA'),
('\d+', 'INT'),
('->', 'ARROW'),
('!=', '!='),
('==', '=='),
('>=', '>='),
('<=', '<='),
('<', '<'),
('>', '>'),
('\+', '+'),
('\-', '-'),
('\*', '*'),
('\(', '('),
('\)', ')'),
('=', '='),
(',', ','),
('[a-zA-Z_]\w*', 'ID'),
)
self.lexer = lexer.Lexer(lex_rules, skip_whitespace=True)
self.cur_token = None
self.operators = {'!=', '==', '>=', '<=', '<', '>', '+', '-', '*'}
def parse_decl(self, text):
"""Parse declaration given in text and return an AST node for it."""
self.lexer.input(text)
self._get_next_token()
decl = self._decl()
if self.cur_token.type != None:
self._error('Unexpected token "{}" (at #{})'.format(
self.cur_token.val, self.cur_token.pos))
return decl
def _error(self, msg):
raise ParseError(msg)
def _get_next_token(self):
"""Advances the parser's internal lexer to the next token.
This method doesn't return anything; it assigns self.cur_token to the
next token in the input stream.
"""
try:
self.cur_token = self.lexer.token()
if self.cur_token is None:
self.cur_token = lexer.Token(None, None, None)
except lexer.LexerError as e:
self._error('Lexer error at position {}: {}'.format(e.pos, e))
def _match(self, type):
""" The 'match' primitive of RD parsers.
* Verifies that the current token is of the given type
* Returns the value of the current token
* Reads in the next token
"""
if self.cur_token.type == type:
val = self.cur_token.val
self._get_next_token()
return val
else:
self._error('Unmatched {} (found {})'.format(type,
self.cur_token.type))
def _decl(self):
name = self._match('ID')
argnames = []
# If we have arguments, collect them. Only IDs allowed here.
while self.cur_token.type == 'ID':
argnames.append(self.cur_token.val)
self._get_next_token()
self._match('=')
expr = self._expr()
if len(argnames) > 0:
return ast.Decl(name, ast.LambdaExpr(argnames, expr))
else:
return ast.Decl(name, expr)
def _expr(self):
"""Parse an expr of the form:
expr op expr
We only allow a single operator between expressions. Additional
operators should be nested using parens, e.g. x + (y * z)
"""
node = self._expr_component()
if self.cur_token.type in self.operators:
op = self.cur_token.type
self._get_next_token()
rhs = self._expr_component()
return ast.OpExpr(op, node, rhs)
else:
return node
def _expr_component(self):
"""Parse an expr component (components can be separated by an operator).
"""
curtok = self.cur_token
if self.cur_token.type == 'INT':
self._get_next_token()
return ast.IntConstant(curtok.val)
elif self.cur_token.type in ('FALSE', 'TRUE'):
self._get_next_token()
return ast.BoolConstant(curtok.val)
elif self.cur_token.type == 'ID':
self._get_next_token()
if self.cur_token.type == '(':
# ID followed by '(' is function application
return self._app(curtok.val)
else:
return ast.Identifier(curtok.val)
elif self.cur_token.type == '(':
self._get_next_token()
expr = self._expr()
self._match(')')
return expr
elif self.cur_token.type == 'IF':
return self._ifexpr()
elif self.cur_token.type == 'LAMBDA':
return self._lambda()
else:
self._error("Don't support {} yet".format(curtok.type))
def _ifexpr(self):
self._match('IF')
ifexpr = self._expr()
self._match('THEN')
thenexpr = self._expr()
self._match('ELSE')
elseexpr = self._expr()
return ast.IfExpr(ifexpr, thenexpr, elseexpr)
def _lambda(self):
self._match('LAMBDA')
argnames = []
while self.cur_token.type == 'ID':
argnames.append(self.cur_token.val)
self._get_next_token()
if len(argnames) < 1:
self._error('Expected non-empty argument list for lambda')
self._match('ARROW')
expr = self._expr()
return ast.LambdaExpr(argnames, expr)
def _app(self, name):
self._match('(')
args = []
while self.cur_token.type != ')':
args.append(self._expr())
if self.cur_token.type == ',':
self._get_next_token()
elif self.cur_token.type == ')':
pass # the loop will break
else:
self._error("Unexpected {} in application".format(
self.cur_token.val))
self._match(')')
return ast.AppExpr(ast.Identifier(name), args)
| unlicense | ecf15bb9229432b23a293ebdde0eb0f3 | 31.925234 | 80 | 0.453591 | 4.106061 | false | false | false | false |
eliben/code-for-blog | 2011/asio_protobuf_sample/tester_client.py | 13 | 2232 | #!/usr/bin/python
#
# tester_client.py: simple testing client for the server. Suitable for
# usage from the python interactive prompt.
#
# Eli Bendersky (eliben@gmail.com)
# This code is in the public domain
#
from __future__ import print_function
import sys
from socket import *
import struct
from stringdb_pb2 import Request, Response
def make_socket(port=4050):
""" Create a socket on localhost and return it.
"""
sockobj = socket(AF_INET, SOCK_STREAM)
sockobj.connect(('localhost', port))
return sockobj
def send_message(sock, message):
""" Send a serialized message (protobuf Message interface)
to a socket, prepended by its length packed in 4 bytes.
"""
s = message.SerializeToString()
packed_len = struct.pack('>L', len(s))
packed_message = packed_len + s
sock.send(packed_message)
def socket_read_n(sock, n):
""" Read exactly n bytes from the socket.
Raise RuntimeError if the connection closed before n bytes were read.
"""
buf = ''
while n > 0:
data = sock.recv(n)
if data == '':
raise RuntimeError('unexpected connection close')
buf += data
n -= len(data)
return buf
def get_response(sock):
""" Read a serialized response message from a socket.
"""
msg = Response()
len_buf = socket_read_n(sock, 4)
msg_len = struct.unpack('>L', len_buf)[0]
msg_buf = socket_read_n(sock, msg_len)
msg.ParseFromString(msg_buf)
return msg
def send_set_value(sock, key, value):
rq = Request()
rq.type = Request.SET_VALUE
rq.request_set_value.key = key
rq.request_set_value.value = value
send_message(sock, rq)
return get_response(sock)
def send_get_value(sock, key):
rq = Request()
rq.type = Request.GET_VALUE
rq.request_get_value.key = key
send_message(sock, rq)
return get_response(sock)
def send_count_values(sock):
rq = Request()
rq.type = Request.COUNT_VALUES
send_message(sock, rq)
return get_response(sock)
if __name__ == '__main__':
port = 4050
if len(sys.argv) >= 2:
port = int(sys.argv[1])
sockobj = make_socket(port)
print(send_set_value(sockobj, "key2", "djuli"))
| unlicense | 1ec3ed94aed24a66ed88aef267c67f0a | 23 | 77 | 0.636649 | 3.4875 | false | false | false | false |
eliben/code-for-blog | 2009/pygame_creeps_game/pathfinder.py | 1 | 4974 | from priorityqueueset import PriorityQueueSet
class PathFinder(object):
""" Computes a path in a graph using the A* algorithm.
Initialize the object and then repeatedly compute_path to
get the path between a start point and an end point.
The points on a graph are required to be hashable and
comparable with __eq__. Other than that, they may be
represented as you wish, as long as the functions
supplied to the constructor know how to handle them.
"""
def __init__(self, successors, move_cost, heuristic_to_goal):
""" Create a new PathFinder. Provided with several
functions that represent your graph and the costs of
moving through it.
successors:
A function that receives a point as a single
argument and returns a list of "successor" points,
the points on the graph that can be reached from
the given point.
move_cost:
A function that receives two points as arguments
and returns the numeric cost of moving from the
first to the second.
heuristic_to_goal:
A function that receives a point and a goal point,
and returns the numeric heuristic estimation of
the cost of reaching the goal from the point.
"""
self.successors = successors
self.move_cost = move_cost
self.heuristic_to_goal = heuristic_to_goal
def compute_path(self, start, goal):
""" Compute the path between the 'start' point and the
'goal' point.
The path is returned as an iterator to the points,
including the start and goal points themselves.
If no path was found, an empty list is returned.
"""
#
# Implementation of the A* algorithm.
#
closed_set = {}
start_node = self._Node(start)
start_node.g_cost = 0
start_node.f_cost = self._compute_f_cost(start_node, goal)
open_set = PriorityQueueSet()
open_set.add(start_node)
while len(open_set) > 0:
# Remove and get the node with the lowest f_score from
# the open set
#
curr_node = open_set.pop_smallest()
if curr_node.coord == goal:
return self._reconstruct_path(curr_node)
closed_set[curr_node] = curr_node
for succ_coord in self.successors(curr_node.coord):
succ_node = self._Node(succ_coord)
succ_node.g_cost = self._compute_g_cost(curr_node, succ_node)
succ_node.f_cost = self._compute_f_cost(succ_node, goal)
if succ_node in closed_set:
continue
if open_set.add(succ_node):
succ_node.pred = curr_node
return []
########################## PRIVATE ##########################
def _compute_g_cost(self, from_node, to_node):
return (from_node.g_cost +
self.move_cost(from_node.coord, to_node.coord))
def _compute_f_cost(self, node, goal):
return node.g_cost + self._cost_to_goal(node, goal)
def _cost_to_goal(self, node, goal):
return self.heuristic_to_goal(node.coord, goal)
def _reconstruct_path(self, node):
""" Reconstructs the path to the node from the start node
(for which .pred is None)
"""
pth = [node.coord]
n = node
while n.pred:
n = n.pred
pth.append(n.coord)
return reversed(pth)
class _Node(object):
""" Used to represent a node on the searched graph during
the A* search.
Each Node has its coordinate (the point it represents),
a g_cost (the cumulative cost of reaching the point
from the start point), a f_cost (the estimated cost
from the start to the goal through this point) and
a predecessor Node (for path construction).
The Node is meant to be used inside PriorityQueueSet,
so it implements equality and hashinig (based on the
coordinate, which is assumed to be unique) and
comparison (based on f_cost) for sorting by cost.
"""
def __init__(self, coord, g_cost=None, f_cost=None, pred=None):
self.coord = coord
self.g_cost = g_cost
self.f_cost = f_cost
self.pred = pred
def __eq__(self, other):
return self.coord == other.coord
def __cmp__(self, other):
return cmp(self.f_cost, other.f_cost)
def __hash__(self):
return hash(self.coord)
def __str__(self):
return 'N(%s) -> g: %s, f: %s' % (self.coord, self.g_cost, self.f_cost)
def __repr__(self):
return self.__str__()
| unlicense | 7a1ececccd76627d2a3fd9cd1fccd1f4 | 33.783217 | 83 | 0.560314 | 4.262211 | false | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.