repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
eregs/regulations-parser | regparser/tree/xml_parser/us_code.py | 3 | 2825 | import re
import six
from regparser.tree.depth import markers as mtypes
from regparser.tree.depth import optional_rules
from regparser.tree.struct import Node
from regparser.tree.xml_parser import paragraph_processor, tree_utils
class USCodeParagraphMatcher(paragraph_processor.BaseMatcher):
"""Convert a paragraph found in the US Code into appropriate Nodes"""
_MARKER_RE = re.compile(r'\((?P<marker>[a-z]+|[A-Z]+|[0-9]+)\)')
def matches(self, xml):
return xml.tag == 'P'
def paragraph_markers(self, text):
"""We can't use tree_utils.get_paragraph_markers as that makes
assumptions about the order of paragraph markers (specifically
that the markers will match the order found in regulations). This is
simpler, looking only at multiple markers at the beginning of the
paragraph"""
markers = []
match = self._MARKER_RE.match(text)
while match:
markers.append(match.group('marker'))
text = text[match.end():].strip()
match = self._MARKER_RE.match(text)
return markers
def derive_nodes(self, xml, processor=None):
nodes = []
text = tree_utils.get_node_text(xml).strip()
tagged_text = tree_utils.get_node_text_tags_preserved(xml).strip()
markers_list = self.paragraph_markers(text)
with_parens = ['({0})'.format(m) for m in markers_list]
triplets = zip(markers_list,
tree_utils.split_text(text, with_parens),
tree_utils.split_text(tagged_text, with_parens))
for m, text, tagged_text in triplets:
nodes.append(Node(
text=text.strip(), label=[m], source_xml=xml,
tagged_text=six.text_type(tagged_text.strip())
))
return nodes
class USCodeProcessor(paragraph_processor.ParagraphProcessor):
"""ParagraphProcessor which converts a chunk of XML into Nodes. Only
processes P nodes and limits the type of paragraph markers to those found
in US Code"""
MATCHERS = [USCodeParagraphMatcher()]
def additional_constraints(self):
return [optional_rules.limit_sequence_gap(),
optional_rules.limit_paragraph_types(
mtypes.lower, mtypes.ints, mtypes.upper, mtypes.roman,
mtypes.upper_roman)]
class USCodeMatcher(paragraph_processor.BaseMatcher):
"""Matches a custom `USCODE` tag and parses it's contents with the
USCodeProcessor. Does not use a custom node type at the moment"""
def matches(self, xml):
return xml.tag == 'USCODE'
def derive_nodes(self, xml, processor=None):
processor = USCodeProcessor()
node = Node(label=[mtypes.MARKERLESS], source_xml=xml)
return [processor.process(xml, node)]
| cc0-1.0 | eb3ea576b663e34d02baa3e97509e1a0 | 38.788732 | 77 | 0.644956 | 4.058908 | false | false | false | false |
eregs/regulations-parser | regparser/tree/xml_parser/xml_wrapper.py | 3 | 1852 | from copy import deepcopy
import six
from lxml import etree
from regparser import plugins
from regparser.tree.xml_parser.preprocessors import replace_html_entities
class XMLWrapper(object):
"""Wrapper around XML which provides a consistent interface shared by both
Notices and Annual editions of XML"""
def __init__(self, xml, source=None):
"""Includes automatic conversion from string and a deep copy for
safety. `source` represents the providence of this xml. It is _not_
serialized and hence does not follow the xml through the index"""
if isinstance(xml, six.binary_type):
xml = replace_html_entities(xml)
self.xml = etree.fromstring(xml)
elif isinstance(xml, etree._Element):
self.xml = deepcopy(xml)
else:
raise ValueError("xml should be either binary or an lxml node")
self.source = source
def preprocess(self):
"""Unfortunately, the notice xml is often inaccurate. This function
attempts to fix some of those (general) flaws. For specific issues, we
tend to instead use the files in settings.LOCAL_XML_PATHS"""
for plugin in plugins.instantiate_if_possible(
'eregs_ns.parser.preprocessors', method_name='transform'):
plugin(self.xml)
return self
def xpath(self, *args, **kwargs):
return self.xml.xpath(*args, **kwargs)
def xml_str(self):
return etree.tounicode(self.xml, pretty_print=True)
def _find_or_create(self, tag):
"""Look for the first matching tag present in the document. If it's
not present, create it by inserting it into the root"""
matches = self.xpath('//' + tag)
if matches:
return matches[0]
else:
return etree.SubElement(self.xml, tag)
| cc0-1.0 | 586efc2d566b19950872523dd7561cd9 | 36.04 | 78 | 0.649028 | 4.287037 | false | false | false | false |
eregs/regulations-parser | interpparser/preprocessors.py | 2 | 1307 | from regparser.tree.gpo_cfr import appendices
_CONTAINS_SUPPLEMENT = "contains(., 'Supplement I')"
_SUPPLEMENT_HD = "//REGTEXT//HD[@SOURCE='HD1' and {0}]".format(
_CONTAINS_SUPPLEMENT)
_SUPPLEMENT_AMD_OR_P = "./AMDPAR[{0}]|./P[{0}]".format(_CONTAINS_SUPPLEMENT)
def _set_prev_to_amdpar(xml_node):
"""Set the tag to AMDPAR on all previous siblings until we hit the
Supplement I header"""
if xml_node is not None and xml_node.tag in ('P', 'AMDPAR'):
xml_node.tag = 'AMDPAR'
if 'supplement i' not in xml_node.text.lower(): # not done
_set_prev_to_amdpar(xml_node.getprevious())
elif xml_node is not None:
_set_prev_to_amdpar(xml_node.getprevious())
def supplement_amdpar(xml):
"""Supplement I AMDPARs are often incorrect (labelled as Ps)"""
for supp_header in xml.xpath(_SUPPLEMENT_HD):
parent = supp_header.getparent()
if parent.xpath(_SUPPLEMENT_AMD_OR_P):
_set_prev_to_amdpar(supp_header.getprevious())
def appendix_to_interp(xml):
"""Convert Supplement I APPENDIX tags to INTERP."""
for appendix in xml.xpath('.//APPENDIX'):
section_title = appendices.get_appendix_title(appendix)
if 'Supplement' in section_title and 'Part' in section_title:
appendix.tag = 'INTERP'
| cc0-1.0 | e28b631ab7a382f51b82aa4ca812c02b | 38.606061 | 76 | 0.651875 | 3.119332 | false | false | false | false |
eregs/regulations-parser | regparser/history/annual.py | 2 | 5454 | # -*- coding: utf-8 -*-
import logging
import os
import re
from collections import namedtuple
from datetime import date
import requests
from cached_property import cached_property
from regparser.index.http_cache import http_client
from regparser.tree.xml_parser.xml_wrapper import XMLWrapper
from regparser.web.settings import parser as settings
CFR_BULK_URL = ("https://www.gpo.gov/fdsys/bulkdata/CFR/{year}/title-{title}/"
"CFR-{year}-title{title}-vol{volume}.xml")
CFR_PART_URL = ("https://www.gpo.gov/fdsys/pkg/"
"CFR-{year}-title{title}-vol{volume}/xml/"
"CFR-{year}-title{title}-vol{volume}-part{part}.xml")
# Matches any of the following:
# Parts 200 to 219
# Parts 200 to end
# Part 52 (§§ 52.1019 to 52.2019)
# Note: The outer parentheses seem to be required by Python, although they
# shouldn't be
PART_SPAN_REGEX = re.compile(
r'.*parts? ('
r'(?P<span>(?P<start>\d+) to ((?P<end>\d+)|(?P<end_literal>end)))'
r'|((?P<single_part>\d+) \(.*\))'
r'.*)',
flags=re.IGNORECASE)
logger = logging.getLogger(__name__)
class Volume(namedtuple('Volume', ['year', 'title', 'vol_num'])):
@property
def url(self):
return CFR_BULK_URL.format(year=self.year, title=self.title,
volume=self.vol_num)
@cached_property
def response(self):
logger.debug("GET %s", self.url)
return http_client().get(self.url, stream=True)
@property
def exists(self):
return self.response.status_code == 200
@cached_property
def part_span(self):
"""Calculate and memoize the range of parts this volume covers"""
_part_span = False
part_string = ''
for line in self.response.iter_lines(decode_unicode=True):
if '<PARTS>' in line:
part_string = line
break
if part_string:
match = PART_SPAN_REGEX.match(part_string)
if match and match.group('span'):
start = int(match.group('start'))
if match.group('end_literal'):
end = None
else:
end = int(match.group('end'))
_part_span = (start, end)
elif match:
start = int(match.group('single_part'))
_part_span = (start, start)
else:
logger.warning("Can't parse: %s", part_string)
else:
logger.warning('No <PARTS> in %s. Assuming this volume '
'contains all of the regs', self.url)
_part_span = (1, None)
return _part_span
@property
def publication_date(self):
return date(self.year, publication_month(self.title), 1)
def should_contain(self, part):
"""Does this volume contain the part number requested?"""
if self.part_span:
(start, end) = self.part_span
if start > part:
return False
elif end is None:
return True
else:
return end >= part
else:
return False
def find_part_xml(self, part):
"""Pull the XML for an annual edition, first checking locally"""
logger.info("Find Part xml for %s CFR %s", self.title, part)
url = CFR_PART_URL.format(year=self.year, title=self.title,
volume=self.vol_num, part=part)
filename = url.split('/')[-1]
for xml_path in settings.LOCAL_XML_PATHS:
xml_path = os.path.join(xml_path, 'annual', filename)
logger.debug("Checking locally for file %s", xml_path)
if os.path.isfile(xml_path):
with open(xml_path, 'rb') as f:
return XMLWrapper(f.read(), xml_path)
client = http_client()
first_try_url = settings.XML_REPO_PREFIX + 'annual/' + filename
logging.info('trying to fetch annual edition from %s', first_try_url)
response = client.get(first_try_url)
if response.status_code != requests.codes.ok:
logger.info('failed. fetching from %s', url)
response = client.get(url)
if response.status_code == requests.codes.ok:
return XMLWrapper(response.content, url)
def publication_month(cfr_title):
"""Annual editions are published for different titles at different points
throughout the year. Return the month associated with this CFR title"""
if cfr_title <= 16:
return 1
elif cfr_title <= 27:
return 4
elif cfr_title <= 41:
return 7
else:
return 10
def date_of_annual_after(title, eff_date):
"""Return the date of the _first_ annual edition which should contain any
changes on `eff_date`. This date may well be in the future"""
publication_date = date(eff_date.year, publication_month(title), 1)
if eff_date <= publication_date:
return publication_date
else:
return publication_date.replace(year=eff_date.year + 1)
def find_volume(year, title, part):
"""Annual editions have multiple volume numbers. Try to find the volume
that we care about"""
vol_num = 1
volume = Volume(year, title, vol_num)
while volume.exists:
if volume.should_contain(part):
return volume
vol_num += 1
volume = Volume(year, title, vol_num)
return None
| cc0-1.0 | 4f3ae5638d97e8f6c30c581268aa903c | 34.16129 | 78 | 0.582569 | 3.789986 | false | false | false | false |
eregs/regulations-parser | regparser/layer/layer.py | 3 | 2855 | import abc
from collections import defaultdict, namedtuple
import six
SearchReplace = namedtuple('SearchReplace',
['text', 'locations', 'representative'])
class Layer(six.with_metaclass(abc.ABCMeta)):
"""Base class for all of the Layer generators. Defines the interface they
must implement"""
def __init__(self, tree, **context):
"""Different layers may need different contextual information, such as
which version of a regulation is being processed, which CFR title is
under inspection, etc. We'd like to call the constructor of each
different layer in the same way (so we can just iterate over all
layers), so we silently eat all kwargs"""
self.tree = tree
self.layer = {}
def pre_process(self):
""" Take the whole tree and do any pre-processing """
pass
@abc.abstractproperty
def shorthand(self):
"""Unique identifier for this layer"""
raise NotImplementedError()
@abc.abstractmethod
def process(self, node):
""" Construct the element of the layer relevant to processing the given
node, so it returns (pargraph_id, layer_content) or None if there is no
relevant information. """
raise NotImplementedError()
def builder(self, node, cache=None):
if cache:
layer_element = cache.fetch_or_process(self, node)
else:
layer_element = self.process(node)
if layer_element:
self.layer[node.label_id()] = layer_element
for c in node.children:
self.builder(c, cache)
def build(self, cache=None):
self.pre_process()
self.builder(self.tree, cache)
return self.layer
@staticmethod
def convert_to_search_replace(matches, text, start_fn, end_fn):
"""We'll often have a bunch of text matches based on offsets. To use
the "search-replace" encoding (which is a bit more resilient to minor
variations in text), we need to convert these offsets into "locations"
-- i.e. of all of the instances of a string in this text, which should
be matched. Yields `SearchReplace` tuples"""
text_to_matches = defaultdict(list)
for match in matches:
text_to_matches[text[start_fn(match):end_fn(match)]].append(match)
for match_text, matches in sorted(text_to_matches.items()):
locations, location = [], 0
idx = text.find(match_text)
while idx != -1:
if any(start_fn(match) == idx for match in matches):
locations.append(location)
location += 1
idx = text.find(match_text, idx + 1)
yield SearchReplace(match_text, locations,
representative=matches[0])
| cc0-1.0 | af4147f808b958e7645a5ff380be0b08 | 36.565789 | 79 | 0.615412 | 4.440124 | false | false | false | false |
eregs/regulations-parser | regparser/notice/preamble.py | 3 | 5888 | import re
from collections import OrderedDict
from copy import deepcopy
from itertools import takewhile
from lxml import etree
from regparser.tree.depth import heuristics
from regparser.tree.depth.derive import markers as mtypes
from regparser.tree.struct import Node
from regparser.tree.xml_parser.flatsubtree_processor import FlatsubtreeMatcher
from regparser.tree.xml_parser.paragraph_processor import (BaseMatcher,
GraphicsMatcher,
IgnoreTagMatcher,
ParagraphProcessor,
SimpleTagMatcher,
TableMatcher)
from regparser.tree.xml_parser.tree_utils import get_node_text
_MARKER_REGEX = re.compile(r'(?P<marker>([0-9]+)|([a-z]+)|([A-Z]+))\.')
def convert_id(doc_number):
"""Dashes have special significance in other parts of eRegs"""
return doc_number.replace('-', '_')
class PreambleLevelMatcher(BaseMatcher):
def matches(self, xml):
return xml.tag == 'PREAMBLE_LEVEL'
def derive_nodes(self, xml, processor=None):
"""For a given <PREAMBLE_LEVEL>, create a root Node, pull out a
marker, if present, and recurse via PreambleProcessor on child
etree.Elements"""
title = xml.get('TITLE', "")
marker_match = _MARKER_REGEX.match(title)
if marker_match:
label = [marker_match.group('marker')]
else:
label = [mtypes.MARKERLESS]
root = Node(label=label, node_type='preamble', title=title)
PreambleProcessor().process(xml, root)
return [root]
class PreambleProcessor(ParagraphProcessor):
MATCHERS = [PreambleLevelMatcher(), SimpleTagMatcher('P', 'FP'),
# FTNT's are already converted; we can ignore the original
IgnoreTagMatcher('FTNT', 'PRTPAGE'), GraphicsMatcher(),
FlatsubtreeMatcher(tags=['EXTRACT'], node_type=Node.EXTRACT),
TableMatcher()]
DEPTH_HEURISTICS = OrderedDict(ParagraphProcessor.DEPTH_HEURISTICS)
DEPTH_HEURISTICS[heuristics.prefer_diff_types_diff_levels] = 0.2
DEPTH_HEURISTICS[heuristics.prefer_shallow_depths] = 0.8
def transform_xml(elements, title, depth):
"""The original XML is very _flat_, despite being broken up by headers at
various depths. This function returns a retooled XML tree with nested
<PREAMBLE_LEVEL>s; these are much easier for our paragraph processor to
handle.
:param list[etree.Element] elements: Initial XML elements to process
:param str title: Title of the root XML node we'll generate
:param int depth: indicates which depth headers to look for"""
root = etree.Element("PREAMBLE_LEVEL", TITLE=title)
deeper_source = 'HD{0}'.format(depth)
non_nested_children = takewhile(
lambda e: e.tag != 'HD' or e.get('SOURCE') != deeper_source,
elements)
root.extend(non_nested_children)
indexes_of_next_level_headers = [
idx for idx, elt in enumerate(elements)
if elt.tag == 'HD' and elt.get('SOURCE') == deeper_source]
# Pairs of [start, end) indexes, defining runs of XML elements which
# should be grouped together. The final pair will include len(elements),
# the end of the list
start_and_ends = zip(indexes_of_next_level_headers,
indexes_of_next_level_headers[1:] + [len(elements)])
header_and_childrens = [(elements[start], elements[start + 1:end])
for start, end in start_and_ends]
for header, children in header_and_childrens:
title = header.text
root.append(transform_xml(children, title, depth + 1))
return root
def parse_intro(notice_xml, doc_id):
"""The introduction to the preamble includes some key paragraphs which
we bundle together in an "intro" node"""
root = Node(node_type='preamble_intro', label=[doc_id, 'intro'],
title='Preamble introduction')
parent_tags = ('AGY', 'ACT', 'SUM', 'DATES', 'ADD', 'FURINF')
xpath = '|'.join('.//' + parent_tag for parent_tag in parent_tags)
for xml in notice_xml.xpath(xpath):
title = xml.xpath('./HD')[0].text.strip()
paras = [get_node_text(p) for p in xml.xpath("./P")]
parent_label = [doc_id, 'intro', 'p{0}'.format(len(root.children) + 1)]
children = []
for i, para in enumerate(paras, start=1):
label = [doc_id, 'intro', 'p{0}'.format(len(root.children) + 1),
'p{0}'.format(i)]
children.append(Node(text=para, node_type='preamble', label=label))
root.children.append(Node(node_type='preamble', label=parent_label,
title=title, children=children))
if root.children:
return root
def parse_preamble(notice_xml):
"""Convert preamble into a Node tree. The preamble is contained within the
SUPLINF tag, but before a list of altered subjects. Processing proceeds in
two phases: first we make the XML more hierarchical, then we use that
hierarchy to create nested nodes
:param NoticeXML xml: wrapped XML element"""
suplinf = deepcopy(notice_xml.xpath('.//SUPLINF')[0])
subject_list = suplinf.xpath('./LSTSUB')
if subject_list:
subject_list_idx = suplinf.index(subject_list[0])
del suplinf[subject_list_idx:]
title = suplinf[0].text
label = [convert_id(notice_xml.version_id)]
root = transform_xml(suplinf[1:], title, depth=1)
root_node = Node(node_type='preamble', label=label, title=title)
PreambleProcessor().process(root, root_node)
intro = parse_intro(notice_xml, label[0])
if intro:
root_node.children.insert(0, intro)
return root_node
| cc0-1.0 | a88dd666076aa8ce250fd21d448ca0c2 | 43.270677 | 79 | 0.628057 | 3.866054 | false | false | false | false |
noisyboiler/wampy | wampy/transports/websocket/frames.py | 1 | 15187 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import array
import logging
import os
import struct
from struct import pack, unpack_from
from wampy.errors import WebsocktProtocolError, IncompleteFrameError
logger = logging.getLogger(__name__)
class Frame(object):
""" The framing is what distinguishes the connection from a raw TCP
one - it's part of the websocket protocol.
Frames can have a payload length of up to 9,223,372,036,854,775,807
bytes (due to the fact that the protocol allows for a 63bit length
indicator).
The primary purpose of fragmentation is to allow sending a message
that is of unknown size when the message is started without having
to buffer that message - it is *not* supported by wampy.
This is how a websocket frame looks according to RFC 6455
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-------+-+-------------+-------------------------------+
|F|R|R|R| opcode|M| Payload len | Extended payload length |
|I|S|S|S| (4) |A| (7) | (16/64) |
|N|V|V|V| |S| | (if payload len==126/127) |
| |1|2|3| |K| | |
+-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - +
| Extended payload length continued, if payload len == 127 |
+ - - - - - - - - - - - - - - - +-------------------------------+
| |Masking-key, if MASK set to 1 |
+-------------------------------+-------------------------------+
| Masking-key (continued) | Payload Data |
+-------------------------------- - - - - - - - - - - - - - - - +
: Payload Data continued ... :
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
| Payload Data continued ... |
+---------------------------------------------------------------+
ws-frame = frame-fin ; 1 bit in length
frame-rsv1 ; 1 bit in length
frame-rsv2 ; 1 bit in length
frame-rsv3 ; 1 bit in length
frame-opcode ; 4 bits in length
frame-masked ; 1 bit in length
frame-payload-length ; either 7, 7+16,
; or 7+64 bits in
; length
[ frame-masking-key ] ; 32 bits in length
frame-payload-data ; n*8 bits in
; length, where
; n >= 0
protocol constants are represented in base16/hexidecimal.
"""
# always use "text" as the type of data to send
TEXT = 0x01 # 1, 00000001
# always send an entire message as one frame
FIN = 0x80 # 128
# opcodes indicate what the frame represents e.g. a ping, a pong,
# a acontinuation data from last or a termination, et all.
OPCODE_BINARY = 0x2
OPCODE_CONT = 0x0
OPCODE_CLOSE = 0x8
OPCODE_PING = 0x9
OPCODE_PONG = 0xA
OPCODE_TEXT = 0x1
OPCODES = (
OPCODE_BINARY, OPCODE_CONT, OPCODE_CLOSE,
OPCODE_PING, OPCODE_PONG, OPCODE_TEXT,
)
# not intended to carry data for the application but instead for
# protocol-level signaling,
CONTROL_FRAMES = [OPCODE_PING, OPCODE_PONG, OPCODE_CLOSE]
# Frame Length
# The WebSocket protocol has a frame-size limit of 2^63 octets, but
# a WebSocket message can be composed of an unlimited number of frames.
# websocket frames come in 3 length brackets: 7bit, 16bit and 64bit.
# this does mean that there is more than one way to represent the
# length of the payload.... but the spec insiste you only use the
# shortest bracket available. clear?
# i.e. inspect bits 9-15 to determine the payload length or what bits
# to inspect to *actually* determine the payload length.
# 9-15: 0-125 means the payload is that long
# 126 means the length is actually determined by bits
# 16 through 31, i.e. the next bracket size up
# 127 means the length is actually determined by bits 16
# through 79, i.e. the largest bracket
LENGTH_7 = 0x7e # 0x7e, 126, 01111110
LENGTH_16 = 1 << 16 # 0x10000, 65536, 10000000000000000
MAX_LENGTH = 1 << 63 # 1 x 2**63
def __init__(self, raw_bytes, payload=None):
""" Represent a complete websocket frame """
self._raw_bytes = raw_bytes
self._payload = payload
self.fin_bit = self._raw_bytes[0] >> 7
self.opcode = self._raw_bytes[0] & 0xf
self.payload_length_indicator = self._raw_bytes[1] & 0b1111111
def __str__(self):
return self.payload
@property
def frame(self):
return self._raw_bytes
@property
def payload(self):
try:
if self._payload:
payload_str = self._payload.decode('utf-8')
elif self.payload_length_indicator < 126:
payload_str = self._raw_bytes[2:].decode('utf-8')
elif self.payload_length_indicator == 126:
payload_str = self._raw_bytes[4:].decode('utf-8')
else:
payload_str = self._raw_bytes[6:].decode('utf-8')
except UnicodeDecodeError:
logger.error('cannot decode %s', self.raw_bytes)
raise
return payload_str
class FrameFactory(object):
@classmethod
def from_bytes(cls, buffered_bytes):
# the first 2 bytes are *always* used as headers - but sometimes
# more than 2 bytes are needed.
# our work must first be to determine the header length.
# note that here we are reading data from the *server*, so there
# is *never* a Mask (part of the protocol).
if not buffered_bytes or len(buffered_bytes) < 2:
raise IncompleteFrameError(required_bytes=1)
opcode = buffered_bytes[0] & 0xf
if opcode not in Frame.OPCODES:
raise WebsocktProtocolError('unknown opcode: %s', opcode)
# binary data interpretation is left up to th application...
if opcode == Frame.OPCODE_BINARY:
return Frame(raw_bytes=buffered_bytes)
# Parse the first two buffered_bytes of header
fin = buffered_bytes[0] >> 7
if fin == 0:
raise RuntimeError("Fragmented Frames Not Supported")
payload_length_indicator = buffered_bytes[1] & 0b1111111
if payload_length_indicator == 0:
if opcode == Frame.OPCODE_PING:
return Ping(raw_bytes=buffered_bytes)
elif opcode == Frame.OPCODE_CLOSE:
return Close(raw_bytes=buffered_bytes)
else:
return Frame(raw_bytes=buffered_bytes)
available_bytes_for_body = buffered_bytes[2:]
if len(available_bytes_for_body) < 2:
raise IncompleteFrameError(
required_bytes=payload_length_indicator
)
# unpack the buffered buffered_bytes into an integer
body_length = unpack_from(">h", available_bytes_for_body)[0]
if payload_length_indicator < 126:
# then we have enough knowlege about the payload length as it's
# contained within the 2nd byte of the header - because the
# trailing 7 bits of the 2 buffered_bytes tells us exactly how long
# the payload is
body_candidate = available_bytes_for_body
# in this case body length is represented by the indicator
body_length = payload_length_indicator
elif payload_length_indicator == 126:
# This is a case where more than 2 bytes are needed for headers.
# "Extended payload" length is now used
# meaning that we can chop another 2 bytes off from our
# `available_bytes_for_body` and be confident that we are left with
# the payload
body_candidate = available_bytes_for_body[2:] # require >= 2 bytes
elif payload_length_indicator == 127:
# This is a case where more than 2 bytes are needed for headers.
# "Extended payload length continued" length is now used
# chop off Extended Payload bytes
body_candidate = available_bytes_for_body[8:] # require >= 8 bytes
# in this case, we know that there are more bytes to receive
body_length = struct.unpack("!Q", buffered_bytes[2:10])[0]
if len(body_candidate) < body_length:
required_bytes = body_length - len(body_candidate)
logger.debug("missing %s buffered_bytes", required_bytes)
raise IncompleteFrameError(
required_bytes=required_bytes
)
if opcode == Frame.OPCODE_PING:
return Ping(raw_bytes=buffered_bytes)
if opcode == Frame.OPCODE_PONG:
return Pong(raw_bytes=buffered_bytes)
if opcode == Frame.OPCODE_CLOSE:
return Close(raw_bytes=buffered_bytes)
return Frame(raw_bytes=buffered_bytes, payload=body_candidate)
@classmethod
def generate_mask(cls, mask_key, data):
""" Mask data.
:Parameters:
mask_key: byte string
4 byte string(byte), e.g. '\x10\xc6\xc4\x16'
data: str
data to mask
"""
# Masking of WebSocket traffic from client to server is required
# because of the unlikely chance that malicious code could cause
# some broken proxies to do the wrong thing and use this as an
# attack of some kind. Nobody has proved that this could actually
# happen, but since the fact that it could happen was reason enough
# for browser vendors to get twitchy, masking was added to remove
# the possibility of it being used as an attack.
if data is None:
data = ""
data = bytearray(data, 'utf-8')
_m = array.array("B", mask_key)
_d = array.array("B", data)
for i in range(len(_d)):
_d[i] ^= _m[i % 4]
return _d.tobytes()
@classmethod
def generate_bytes(cls, payload, fin_bit, opcode, mask_payload):
""" Format data to string (buffered_bytes) to send to server.
"""
# the first byte contains the FIN bit, the 3 RSV bits and the
# 4 opcode bits and for a client will *always* be 1000 0001 (or 129).
# so we want the first byte to look like...
#
# 1 0 0 0 0 0 0 1 (1 is a text frame)
# +-+-+-+-+-------+
# |F|R|R|R| opcode|
# |I|S|S|S| |
# |N|V|V|V| |
# | |1|2|3| |
# +-+-+-+-+-------+
# note that because all RSV bits are zero, we can ignore them
# this shifts each bit into position and bitwise ORs them together,
# using the struct module to pack them as incoming network bytes
frame = pack(
'!B', (
(fin_bit << 7) | opcode
)
)
# the second byte - and maybe the 7 after this, we'll use to tell
# the server how long our payload is.
# +-+-------------+-------------------------------+
# |M| Payload len | Extended payload length |
# |A| (7) | (16/63) |
# |S| | (if payload len==126/127) |
# |K| | |
# +-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - +
# | Extended payload length continued, if payload len == 127 |
# + - - - - - - - - - - - - - - - +-------------------------------+
# the mask is always included with client -> server, so the first bit
# of the second byte is always 1 which flags that the data is masked,
# i.e. encoded
if mask_payload:
mask_bit = 1 << 7
else:
mask_bit = 0 << 7
# next we have to | this bit with the payload length.
# note that we ensure that the payload is utf-8 encoded before we take
# the length because unicode characters can be >1 bytes in length and
# lead to bugs if we just do ``len(payload)``.
length = len(payload.encode('utf-8'))
if length >= Frame.MAX_LENGTH:
raise WebsocktProtocolError("data is too long")
# the second byte contains the payload length and mask
if length < Frame.LENGTH_7:
# we can simply represent payload length with first 7 bits
frame += pack('!B', (mask_bit | length))
elif length < Frame.LENGTH_16:
frame += pack('!B', (mask_bit | 126)) + pack('!H', length)
else:
frame += pack('!B', (mask_bit | 127)) + pack('!Q', length)
if mask_payload:
# we always mask frames from the client to server
# use a string of n random buffered_bytes for the mask
mask_key = os.urandom(4)
mask_data = cls.generate_mask(mask_key=mask_key, data=payload)
mask = mask_key + mask_data
frame += mask
else:
frame += bytearray(payload, 'utf-8')
return bytearray(frame)
class Text(Frame):
def __init__(self, raw_bytes=None, payload=''):
raw_bytes = raw_bytes or FrameFactory.generate_bytes(
payload=payload,
fin_bit=1,
opcode=Frame.OPCODE_TEXT,
mask_payload=True,
)
super(Text, self).__init__(raw_bytes=raw_bytes)
class Ping(Frame):
def __init__(self, raw_bytes=None, payload='', mask_payload=False):
raw_bytes = raw_bytes or FrameFactory.generate_bytes(
payload=payload,
fin_bit=1,
opcode=Frame.OPCODE_PING,
mask_payload=mask_payload,
)
super(Ping, self).__init__(raw_bytes=raw_bytes)
class Pong(Frame):
def __init__(self, raw_bytes=None, payload=''):
raw_bytes = raw_bytes or FrameFactory.generate_bytes(
payload=payload,
fin_bit=1,
opcode=Frame.OPCODE_PONG,
mask_payload=True,
)
super(Pong, self).__init__(raw_bytes=raw_bytes)
class Close(Frame):
def __init__(self, raw_bytes=None, payload=''):
raw_bytes = raw_bytes or FrameFactory.generate_bytes(
payload=payload,
fin_bit=1,
opcode=Frame.OPCODE_CLOSE,
mask_payload=False,
)
super(Close, self).__init__(raw_bytes=raw_bytes)
| mpl-2.0 | 9734a38295e989391c16584d084d7f03 | 38.141753 | 79 | 0.531441 | 4.130269 | false | false | false | false |
noisyboiler/wampy | wampy/messages/event.py | 1 | 1383 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
class Event(object):
""" When a Subscriber_is deemed to be a receiver, the Broker sends
the Subscriber an "EVENT" message:
[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id,
Details|dict]
or
[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id,
Details|dict, PUBLISH.Arguments|list]
or
[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id,
Details|dict, PUBLISH.Arguments|list, PUBLISH.ArgumentKw|dict]
"""
WAMP_CODE = 36
name = "event"
def __init__(
self, subscription_id, publication_id, details_dict,
publish_args=None, publish_kwargs=None,
):
super(Event, self).__init__()
self.subscription_id = subscription_id
self.publication_id = publication_id
self.details = details_dict
self.publish_args = publish_args or []
self.publish_kwargs = publish_kwargs or {}
@property
def message(self):
return [
self.WAMP_CODE, self.subscription_id, self.publication_id,
self.details, self.publish_args, self.publish_kwargs,
]
| mpl-2.0 | 47835d497352a2fb5f7248d12eaaed61 | 29.733333 | 71 | 0.628344 | 3.831025 | false | false | false | false |
noisyboiler/wampy | wampy/messages/publish.py | 1 | 1025 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import random
class Publish(object):
""" Send a PUBLISH message to the Router.
Message is of the format ``[PUBLISH, Request|id, Options|dict,
Topic|uri, Arguments|list, ArgumentsKw|dict]``, e.g. ::
[
16, 239714735, {}, "com.myapp.mytopic1", [],
{"color": "orange", "sizes": [23, 42, 7]}
]
"""
WAMP_CODE = 16
name = "publish"
def __init__(self, topic, options, *args, **kwargs):
super(Publish, self).__init__()
self.topic = topic
self.options = options
self.request_id = random.getrandbits(32)
self.args = args
self.kwargs = kwargs
@property
def message(self):
return [
self.WAMP_CODE, self.request_id, self.options, self.topic,
self.args, self.kwargs
]
| mpl-2.0 | 33978fad2586d7282e30198b75d7ff1e | 26.702703 | 70 | 0.580488 | 3.621908 | false | false | false | false |
noisyboiler/wampy | wampy/roles/callee.py | 1 | 1138 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import logging
import types
from functools import partial
logger = logging.getLogger(__name__)
class RegisterProcedureDecorator(object):
def __init__(self, *args, **kwargs):
self.invocation_policy = kwargs.get("invocation_policy", "single")
@classmethod
def decorator(cls, *args, **kwargs):
def registering_decorator(fn, args, kwargs):
invocation_policy = kwargs.get("invocation_policy", "single")
fn.callee = True
fn.invocation_policy = invocation_policy
return fn
if len(args) == 1 and isinstance(args[0], types.FunctionType):
# usage without arguments to the decorator:
return registering_decorator(args[0], args=(), kwargs={})
else:
# usage with arguments to the decorator:
return partial(registering_decorator, args=args, kwargs=kwargs)
callee = RegisterProcedureDecorator.decorator
| mpl-2.0 | 3a93fd4464fc375e3fed3505b0e206b6 | 31.514286 | 75 | 0.661687 | 4.262172 | false | false | false | false |
modoboa/modoboa | modoboa/core/management/commands/cleanlogs.py | 2 | 1344 | import datetime
import logging
from django.core.management.base import BaseCommand
from django.utils import timezone
from modoboa.core.models import Log
from modoboa.parameters import tools as param_tools
class Command(BaseCommand):
"""Command class."""
help = "Log table cleanup" # NOQA:A003
def add_arguments(self, parser):
"""Add extra arguments to command line."""
parser.add_argument(
"--debug", action="store_true", default=False,
help="Activate debug output")
parser.add_argument(
"--verbose", action="store_true", default=False,
help="Display informational messages")
def __vprint(self, msg):
if not self.verbose:
return
print(msg)
def handle(self, *args, **options):
if options["debug"]:
log = logging.getLogger("django.db.backends")
log.setLevel(logging.DEBUG)
log.addHandler(logging.StreamHandler())
self.verbose = options["verbose"]
log_maximum_age = param_tools.get_global_parameter("log_maximum_age")
self.__vprint("Deleting logs older than %d days..." % log_maximum_age)
limit = timezone.now() - datetime.timedelta(log_maximum_age)
Log.objects.filter(date_created__lt=limit).delete()
self.__vprint("Done.")
| isc | 81c8b7cf9451f901b45e8759eb830846 | 31.780488 | 78 | 0.63244 | 4.160991 | false | false | false | false |
modoboa/modoboa | modoboa/relaydomains/api/v1/serializers.py | 2 | 2397 | """RelayDomain serializers."""
import json
from django.utils.translation import ugettext as _
from rest_framework import serializers
from modoboa.admin import models as admin_models
from modoboa.transport import backends as tr_backends, models as tr_models
class TransportSerializer(serializers.ModelSerializer):
"""Transport serializer."""
class Meta:
model = tr_models.Transport
fields = ("pk", "service", "_settings")
def validate(self, data):
"""Check fields based on backend."""
self.backend = tr_backends.manager.get_backend(data["service"])
if not self.backend:
raise serializers.ValidationError({
"service": _("Unsupported service")
})
data["_settings"] = json.loads(data["_settings"])
errors = self.backend.clean_fields(data["_settings"])
if errors:
raise serializers.ValidationError({
"_settings": ",".join(
["{}: {}".format(error[0], error[1]) for error in errors])
})
return data
class RelayDomainSerializer(serializers.ModelSerializer):
"""RelayDomain serializer class."""
transport = TransportSerializer()
class Meta:
model = admin_models.Domain
fields = (
"pk", "name", "enabled", "transport", "enable_dkim",
"dkim_key_selector", "dkim_public_key"
)
read_only_fields = ("pk", "dkim_public_key", )
def create(self, validated_data):
"""Use backend to serialize data."""
transport = tr_models.Transport(**validated_data.pop("transport"))
transport.pattern = validated_data["name"]
transport.save()
domain = admin_models.Domain(**validated_data)
domain.type = "relaydomain"
domain.transport = transport
domain.save(creator=self.context["request"].user)
return domain
def update(self, instance, validated_data):
"""Use backend to serialize data."""
transport_data = validated_data.pop("transport")
for key, value in validated_data.items():
setattr(instance, key, value)
instance.save()
for key, value in transport_data.items():
setattr(instance.transport, key, value)
instance.transport.pattern = instance.name
instance.transport.save()
return instance
| isc | ed80a680c7d41f9486c97e37b54b9906 | 32.760563 | 78 | 0.61577 | 4.488764 | false | false | false | false |
modoboa/modoboa | modoboa/admin/migrations/0006_auto_20161104_1900.py | 2 | 2194 | # Generated by Django 1.9.5 on 2016-11-04 18:00
from django.db import migrations, models
import django.utils.timezone
def move_dates(apps, schema_editor):
"""Move dates to models."""
Domain = apps.get_model("admin", "Domain")
DomainAlias = apps.get_model("admin", "DomainAlias")
Mailbox = apps.get_model("admin", "Mailbox")
Alias = apps.get_model("admin", "Alias")
for model in [Domain, DomainAlias, Mailbox, Alias]:
for instance in model.objects.all():
instance.creation = instance.dates.creation
instance.last_modification = instance.dates.last_modification
instance.save()
class Migration(migrations.Migration):
dependencies = [
('admin', '0005_auto_20161026_1003'),
]
operations = [
migrations.AddField(
model_name='alias',
name='creation',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='alias',
name='last_modification',
field=models.DateTimeField(null=True),
),
migrations.AddField(
model_name='domain',
name='creation',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='domain',
name='last_modification',
field=models.DateTimeField(null=True),
),
migrations.AddField(
model_name='domainalias',
name='creation',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='domainalias',
name='last_modification',
field=models.DateTimeField(null=True),
),
migrations.AddField(
model_name='mailbox',
name='creation',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='mailbox',
name='last_modification',
field=models.DateTimeField(null=True),
),
migrations.RunPython(move_dates)
]
| isc | 604805c2b29e5f9c1cb3e50550511912 | 31.746269 | 74 | 0.584777 | 4.486708 | false | false | false | false |
modoboa/modoboa | modoboa/admin/models/base.py | 1 | 2189 | """Base admin models."""
from django.contrib.contenttypes.fields import GenericRelation
from django.db import models
from django.utils import timezone
from modoboa.core import models as core_models
from modoboa.lib.permissions import (
grant_access_to_object, ungrant_access_to_object
)
class AdminObjectManager(models.Manager):
def get_for_admin(self, admin):
"""Return the objects belonging to this admin
The result is a ``QuerySet`` object, so this function can be used
to fill ``ModelChoiceField`` objects.
"""
if admin.is_superuser:
return self.get_queryset()
return self.get_queryset().prefetch_related("owners").filter(owners__user=admin)
class AdminObject(models.Model):
"""Abstract model to support dates.
Inherit from this model to automatically add the "dates" feature
to another model. It defines the appropriate field and handles
saves.
"""
creation = models.DateTimeField(default=timezone.now)
last_modification = models.DateTimeField(auto_now=True)
owners = GenericRelation(core_models.ObjectAccess)
_objectname = None
objects = AdminObjectManager()
class Meta:
abstract = True
def __init__(self, *args, **kwargs):
"""Custom constructor."""
super().__init__(*args, **kwargs)
self._loaded_values = {}
@classmethod
def from_db(cls, db, field_names, values):
"""Store loaded values."""
instance = super().from_db(db, field_names, values)
instance._loaded_values = dict(zip(field_names, values))
return instance
@property
def objectname(self):
if self._objectname is None:
return self.__class__.__name__
return self._objectname
def post_create(self, creator):
grant_access_to_object(creator, self, is_owner=True)
def save(self, *args, **kwargs):
creator = kwargs.pop("creator", None)
super(AdminObject, self).save(*args, **kwargs)
if creator is not None:
self.post_create(creator)
def delete(self):
ungrant_access_to_object(self)
super(AdminObject, self).delete()
| isc | e8c05afd708d06c8c1cde005ac1bd9f6 | 28.986301 | 88 | 0.652353 | 4.145833 | false | false | false | false |
modoboa/modoboa | modoboa/admin/api/v2/viewsets.py | 1 | 13125 | """Admin API v2 viewsets."""
from django.utils.translation import ugettext as _
from django.contrib.contenttypes.models import ContentType
from django_filters import rest_framework as dj_filters
from drf_spectacular.utils import extend_schema, extend_schema_view
from rest_framework import (
filters, mixins, parsers, pagination, permissions, response, status, viewsets
)
from rest_framework.decorators import action
from rest_framework.exceptions import PermissionDenied
from modoboa.admin.api.v1 import viewsets as v1_viewsets
from modoboa.core import models as core_models
from modoboa.lib import renderers as lib_renderers
from modoboa.lib import viewsets as lib_viewsets
from ... import lib
from ... import models
from . import serializers
@extend_schema_view(
retrieve=extend_schema(
description="Retrieve a particular domain",
summary="Retrieve a particular domain"
),
list=extend_schema(
description="Retrieve a list of domains",
summary="Retrieve a list of domains"
),
create=extend_schema(
description="Create a new domain",
summary="Create a new domain"
),
delete=extend_schema(
description="Delete a particular domain",
summary="Delete a particular domain"
),
)
class DomainViewSet(lib_viewsets.RevisionModelMixin,
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.CreateModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
"""V2 viewset."""
permission_classes = (
permissions.IsAuthenticated, permissions.DjangoModelPermissions,
)
def get_queryset(self):
"""Filter queryset based on current user."""
return models.Domain.objects.get_for_admin(self.request.user)
def get_serializer_class(self, *args, **kwargs):
if self.action == "delete":
return serializers.DeleteDomainSerializer
if self.action == "administrators":
return serializers.DomainAdminSerializer
if self.action in ["add_administrator", "remove_administrator"]:
return serializers.SimpleDomainAdminSerializer
return serializers.DomainSerializer
@action(methods=["post"], detail=True)
def delete(self, request, **kwargs):
"""Custom delete method that accepts body arguments."""
domain = self.get_object()
if not request.user.can_access(domain):
raise PermissionDenied(_("You don't have access to this domain"))
mb = getattr(request.user, "mailbox", None)
if mb and mb.domain == domain:
raise PermissionDenied(_("You can't delete your own domain"))
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
domain.delete(request.user, serializer.validated_data["keep_folder"])
return response.Response(status=status.HTTP_204_NO_CONTENT)
@action(methods=["get"], detail=True)
def administrators(self, request, **kwargs):
"""Retrieve all administrators of a domain."""
domain = self.get_object()
serializer = self.get_serializer(domain.admins, many=True)
return response.Response(serializer.data)
@action(methods=["post"], detail=True, url_path="administrators/add")
def add_administrator(self, request, **kwargs):
"""Add an administrator to a domain."""
domain = self.get_object()
context = self.get_serializer_context()
context["domain"] = domain
serializer = self.get_serializer(data=request.data, context=context)
serializer.is_valid(raise_exception=True)
domain.add_admin(serializer.validated_data["account"])
return response.Response()
@action(methods=["post"], detail=True, url_path="administrators/remove")
def remove_administrator(self, request, **kwargs):
"""Remove an administrator from a domain."""
domain = self.get_object()
context = self.get_serializer_context()
context["domain"] = domain
serializer = self.get_serializer(data=request.data, context=context)
serializer.is_valid(raise_exception=True)
domain.remove_admin(serializer.validated_data["account"])
return response.Response()
@action(methods=["get"], detail=False,
renderer_classes=(lib_renderers.CSVRenderer,))
def export(self, request, **kwargs):
"""Export domains and aliases to CSV."""
result = []
for domain in self.get_queryset():
result += domain.to_csv_rows()
return response.Response(result)
@extend_schema(
request=serializers.CSVImportSerializer
)
@action(methods=["post"],
detail=False,
parser_classes=(parsers.MultiPartParser, parsers.FormParser),
url_path="import")
def import_from_csv(self, request, **kwargs):
"""Import domains and aliases from CSV file."""
serializer = serializers.CSVImportSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
status, msg = lib.import_data(
request.user,
request.FILES["sourcefile"],
serializer.validated_data
)
return response.Response({"status": status, "message": msg})
class AccountFilterSet(dj_filters.FilterSet):
"""Custom FilterSet for Account."""
domain = dj_filters.ModelChoiceFilter(
queryset=lambda request: models.Domain.objects.get_for_admin(
request.user),
field_name="mailbox__domain"
)
role = dj_filters.CharFilter(method="filter_role")
class Meta:
model = core_models.User
fields = ["domain", "role"]
def filter_role(self, queryset, name, value):
return queryset.filter(groups__name=value)
class AccountViewSet(v1_viewsets.AccountViewSet):
"""ViewSet for User/Mailbox."""
filter_backends = (filters.SearchFilter, dj_filters.DjangoFilterBackend)
filterset_class = AccountFilterSet
def get_serializer_class(self):
if self.action in ["create", "validate", "update", "partial_update"]:
return serializers.WritableAccountSerializer
if self.action == "delete":
return serializers.DeleteAccountSerializer
if self.action in ["list", "retrieve"]:
return serializers.AccountSerializer
return super().get_serializer_class()
def get_queryset(self):
"""Filter queryset based on current user."""
user = self.request.user
ids = (
user.objectaccess_set
.filter(content_type=ContentType.objects.get_for_model(user))
.values_list("object_id", flat=True)
)
return (
core_models.User.objects.filter(pk__in=ids)
.prefetch_related("userobjectlimit_set")
)
@action(methods=["post"], detail=False)
def validate(self, request, **kwargs):
"""Validate given account without creating it."""
serializer = self.get_serializer(
data=request.data,
context=self.get_serializer_context(),
partial=True)
serializer.is_valid(raise_exception=True)
return response.Response(status=204)
@action(methods=["get"], detail=False)
def random_password(self, request, **kwargs):
"""Generate a random password."""
password = lib.make_password()
return response.Response({"password": password})
@action(methods=["post"], detail=True)
def delete(self, request, **kwargs):
"""Custom delete method that accepts body arguments."""
account = self.get_object()
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
account.delete()
return response.Response(status=status.HTTP_204_NO_CONTENT)
class IdentityViewSet(viewsets.ViewSet):
"""Viewset for identities."""
permission_classes = (permissions.IsAuthenticated, )
serializer_class = None
def list(self, request, **kwargs):
"""Return all identities."""
serializer = serializers.IdentitySerializer(
lib.get_identities(request.user), many=True)
return response.Response(serializer.data)
@action(methods=["get"], detail=False,
renderer_classes=(lib_renderers.CSVRenderer,))
def export(self, request, **kwargs):
"""Export accounts and aliases to CSV."""
result = []
for idt in lib.get_identities(request.user):
result.append(idt.to_csv_row())
return response.Response(result)
@extend_schema(
request=serializers.CSVIdentityImportSerializer
)
@action(methods=["post"],
detail=False,
parser_classes=(parsers.MultiPartParser, parsers.FormParser),
url_path="import")
def import_from_csv(self, request, **kwargs):
"""Import accounts and aliases from CSV file."""
serializer = serializers.CSVIdentityImportSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
status, msg = lib.import_data(
request.user,
request.FILES["sourcefile"],
serializer.validated_data
)
return response.Response({"status": status, "message": msg})
class AliasViewSet(v1_viewsets.AliasViewSet):
"""Viewset for Alias."""
serializer_class = serializers.AliasSerializer
@action(methods=["post"], detail=False)
def validate(self, request, **kwargs):
"""Validate given alias without creating it."""
serializer = self.get_serializer(
data=request.data,
context=self.get_serializer_context(),
partial=True)
serializer.is_valid(raise_exception=True)
return response.Response(status=204)
@action(methods=["get"], detail=False)
def random_address(self, request, **kwargs):
return response.Response({
"address": models.Alias.generate_random_address()
})
class UserAccountViewSet(viewsets.ViewSet):
"""Viewset for current user operations."""
@action(methods=["get", "post"], detail=False)
def forward(self, request, **kwargs):
"""Get or define user forward."""
mb = request.user.mailbox
alias = models.Alias.objects.filter(
address=mb.full_address, internal=False).first()
data = {}
if request.method == "GET":
if alias is not None and alias.recipients:
recipients = list(alias.recipients)
if alias.aliasrecipient_set.filter(r_mailbox=mb).exists():
data["keepcopies"] = True
recipients.remove(mb.full_address)
data["recipients"] = "\n".join(recipients)
serializer = serializers.UserForwardSerializer(data)
return response.Response(serializer.data)
serializer = serializers.UserForwardSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
recipients = serializer.validated_data.get("recipients")
if not recipients:
models.Alias.objects.filter(
address=mb.full_address, internal=False).delete()
# Make sure internal self-alias is enabled
models.Alias.objects.filter(
address=mb.full_address, internal=True
).update(enabled=True)
else:
if alias is None:
alias = models.Alias.objects.create(
address=mb.full_address,
domain=mb.domain,
enabled=mb.user.is_active
)
alias.post_create(request.user)
if serializer.validated_data["keepcopies"]:
# Make sure internal self-alias is enabled
models.Alias.objects.filter(
address=mb.full_address, internal=True
).update(enabled=True)
recipients.append(mb.full_address)
else:
# Deactivate internal self-alias to avoid storing
# local copies...
models.Alias.objects.filter(
address=mb.full_address, internal=True
).update(enabled=False)
alias.set_recipients(recipients)
return response.Response(serializer.validated_data)
class AlarmViewSet(viewsets.ReadOnlyModelViewSet):
"""Viewset for Alarm."""
filter_backends = (filters.OrderingFilter, filters.SearchFilter, )
ordering_fields = ["created", "status", "title"]
pagination_class = pagination.PageNumberPagination
permission_classes = (
permissions.IsAuthenticated,
)
search_fields = ["domain__name", "title"]
serializer_class = serializers.AlarmSerializer
def get_queryset(self):
return models.Alarm.objects.select_related("domain").filter(
domain__in=models.Domain.objects.get_for_admin(
self.request.user)
).order_by("-created")
| isc | 8bc2ed07f5392f9666a7683870758673 | 37.377193 | 81 | 0.636038 | 4.408801 | false | false | false | false |
modoboa/modoboa | modoboa/core/api/v1/viewsets.py | 2 | 2930 | """Core API viewsets."""
from django.utils.translation import ugettext as _
import django_otp
from django_otp.plugins.otp_static.models import StaticDevice, StaticToken
from django_otp.plugins.otp_totp.models import TOTPDevice
from rest_framework import permissions, response, viewsets
from rest_framework.decorators import action
from drf_spectacular.utils import extend_schema
from . import serializers
class AccountViewSet(viewsets.ViewSet):
"""Account viewset.
Contains endpoints used to manipulate current user's account.
"""
permission_classes = (permissions.IsAuthenticated, )
serializer_class = None
@action(methods=["post"], detail=False, url_path="tfa/setup")
def tfa_setup(self, request):
"""Initiate TFA setup."""
instance, created = TOTPDevice.objects.get_or_create(
user=request.user,
defaults={"name": "{} TOTP device".format(request.user)}
)
return response.Response()
@extend_schema(
request=serializers.CheckTFASetupSerializer
)
@action(methods=["post"], detail=False, url_path="tfa/setup/check")
def tfa_setup_check(self, request):
"""Check TFA setup."""
serializer = serializers.CheckTFASetupSerializer(
data=request.data, context={"user": request.user})
serializer.is_valid(raise_exception=True)
# create static device for recovery purposes
device = StaticDevice.objects.create(
user=request.user,
name="{} static device".format(request.user)
)
for cpt in range(10):
token = StaticToken.random_token()
device.token_set.create(token=token)
django_otp.login(self.request, request.user.totpdevice_set.first())
return response.Response()
@action(methods=["post"], detail=False, url_path="tfa/disable")
def tfa_disable(self, request):
"""Disable TFA."""
if not request.user.tfa_enabled:
return response.Response({"error": _("2FA is not enabled")},
status=400)
request.user.totpdevice_set.all().delete()
request.user.staticdevice_set.all().delete()
request.user.tfa_enabled = False
request.user.save()
return response.Response()
@extend_schema(tags=['account'])
@action(methods=["post"], detail=False, url_path="tfa/reset_codes")
def tfa_reset_codes(self, request, *args, **kwargs):
"""Reset recovery codes."""
device = request.user.staticdevice_set.first()
if device is None:
return response.Response(status=403)
device.token_set.all().delete()
for cpt in range(10):
token = StaticToken.random_token()
device.token_set.create(token=token)
return response.Response({
"tokens": device.token_set.all().values_list("token", flat=True)
})
| isc | 2fcc25422f5e51cb628e0855777f882a | 36.564103 | 76 | 0.640614 | 3.997271 | false | false | false | false |
modoboa/modoboa | modoboa/core/forms.py | 2 | 5616 | """Core forms."""
import oath
from django import forms
from django.contrib.auth import (
forms as auth_forms, get_user_model, password_validation
)
from django.db.models import Q
from django.utils.translation import ugettext as _, ugettext_lazy
import django_otp
from modoboa.core.models import User
from modoboa.lib.form_utils import UserKwargModelFormMixin
from modoboa.parameters import tools as param_tools
class LoginForm(forms.Form):
"""User login form."""
username = forms.CharField(
label=ugettext_lazy("Username"),
widget=forms.TextInput(attrs={"class": "form-control"})
)
password = forms.CharField(
label=ugettext_lazy("Password"),
widget=forms.PasswordInput(attrs={"class": "form-control"})
)
rememberme = forms.BooleanField(
initial=False,
required=False
)
class ProfileForm(forms.ModelForm):
"""Form to update User profile."""
oldpassword = forms.CharField(
label=ugettext_lazy("Old password"), required=False,
widget=forms.PasswordInput(attrs={"class": "form-control"})
)
newpassword = forms.CharField(
label=ugettext_lazy("New password"), required=False,
widget=forms.PasswordInput(attrs={"class": "form-control"})
)
confirmation = forms.CharField(
label=ugettext_lazy("Confirmation"), required=False,
widget=forms.PasswordInput(attrs={"class": "form-control"})
)
class Meta(object):
model = User
fields = ("first_name", "last_name", "language",
"phone_number", "secondary_email")
widgets = {
"first_name": forms.TextInput(attrs={"class": "form-control"}),
"last_name": forms.TextInput(attrs={"class": "form-control"})
}
def __init__(self, update_password, *args, **kwargs):
super(ProfileForm, self).__init__(*args, **kwargs)
if not update_password:
del self.fields["oldpassword"]
del self.fields["newpassword"]
del self.fields["confirmation"]
def clean_oldpassword(self):
if self.cleaned_data["oldpassword"] == "":
return self.cleaned_data["oldpassword"]
if param_tools.get_global_parameter("authentication_type") != "local":
return self.cleaned_data["oldpassword"]
if not self.instance.check_password(self.cleaned_data["oldpassword"]):
raise forms.ValidationError(_("Old password mismatchs"))
return self.cleaned_data["oldpassword"]
def clean_confirmation(self):
newpassword = self.cleaned_data["newpassword"]
confirmation = self.cleaned_data["confirmation"]
if not newpassword and not confirmation:
return confirmation
if newpassword != confirmation:
raise forms.ValidationError(_("Passwords mismatch"))
password_validation.validate_password(confirmation, self.instance)
return confirmation
def save(self, commit=True):
user = super(ProfileForm, self).save(commit=False)
if commit:
if self.cleaned_data.get("confirmation", "") != "":
user.set_password(
self.cleaned_data["confirmation"],
self.cleaned_data["oldpassword"]
)
user.save()
return user
class APIAccessForm(forms.Form):
"""Form to control API access."""
enable_api_access = forms.BooleanField(
label=ugettext_lazy("Enable API access"), required=False)
def __init__(self, *args, **kwargs):
"""Initialize form."""
user = kwargs.pop("user")
super(APIAccessForm, self).__init__(*args, **kwargs)
self.fields["enable_api_access"].initial = hasattr(user, "auth_token")
class PasswordResetForm(auth_forms.PasswordResetForm):
"""Custom password reset form."""
def get_users(self, email):
"""Return matching user(s) who should receive a reset."""
return (
get_user_model()._default_manager.filter(
email__iexact=email, is_active=True)
.exclude(Q(secondary_email__isnull=True) | Q(secondary_email=""))
)
def send_mail(self, subject_template_name, email_template_name,
context, from_email, to_email,
html_email_template_name=None):
"""Send message to secondary email instead."""
to_email = context["user"].secondary_email
super(PasswordResetForm, self).send_mail(
subject_template_name, email_template_name,
context, from_email, to_email, html_email_template_name)
class VerifySMSCodeForm(forms.Form):
"""A form to verify a code received by SMS."""
code = forms.CharField(
label=ugettext_lazy("Verification code"),
widget=forms.widgets.TextInput(attrs={"class": "form-control"})
)
def __init__(self, *args, **kwargs):
self.totp_secret = kwargs.pop("totp_secret")
super().__init__(*args, **kwargs)
def clean_code(self):
code = self.cleaned_data["code"]
if not oath.accept_totp(self.totp_secret, code)[0]:
raise forms.ValidationError(_("Invalid code"))
return code
class Verify2FACodeForm(UserKwargModelFormMixin, forms.Form):
"""A form to verify 2FA codes validity."""
tfa_code = forms.CharField()
def clean_tfa_code(self):
code = self.cleaned_data["tfa_code"]
device = django_otp.match_token(self.user, code)
if device is None:
raise forms.ValidationError(_("This code is invalid"))
return device
| isc | fe6d978c37cb940f833747ecfabd461a | 33.243902 | 78 | 0.624644 | 4.172363 | false | false | false | false |
modoboa/modoboa | modoboa/dnstools/tests.py | 2 | 8125 | """App related tests."""
from unittest import mock
from django.test import SimpleTestCase
from django.urls import reverse
from django.utils import timezone
from dns.rdtypes.ANY.TXT import TXT
from modoboa.admin import factories as admin_factories
from modoboa.admin import models as admin_models
from modoboa.lib.tests import ModoTestCase
from . import factories
from . import lib
BAD_SPF_RECORDS = [
("a mx ~all", "Not an SPF record"),
("v=spf1 toto ~all", "Unknown mechanism toto"),
("v=spf1 ip4 -all", "Wrong ip4 mechanism syntax"),
("v=spf1 +ip4:1.1.1. +all", "Wrong IPv4 address format"),
("v=spf1 ip6 ~all", "Wrong ip6 mechanism syntax"),
("v=spf1 +ip6:x:: +all", "Wrong IPv6 address format"),
("v=spf1 a|domain.com ~all", "Invalid syntax for a mechanism"),
("v=spf1 a:domain.com/TOTO", "Invalid mask found TOTO"),
("v=spf1 a/1000", "Invalid mask found 1000"),
("v=spf1 mx:domain.com/TOTO", "Invalid mask found TOTO"),
("v=spf1 mx/1000", "Invalid mask found 1000"),
("v=spf1 ptr|test.com", "Invalid syntax for ptr mechanism"),
("v=spf1 forward=test.com ~all", "Unknown modifier forward"),
("v=spf1 redirect=test.com redirect=test.com ~all",
"Duplicate modifier redirect found"),
("v=spf1", "No mechanism found"),
]
GOOD_SPF_RECORDS = [
"v=spf1 mx",
"v=spf1 a ~all",
"v=spf1 ip4:1.2.3.4 -all",
"v=spf1 ip4:1.2.3.4/16 -all",
"v=spf1 ip6:fe80::9eb6:d0ff:fe8e:2807 -all",
"v=spf1 ip6:fe80::9eb6:d0ff:fe8e:2807/64 -all",
"v=spf1 a -all",
"v=spf1 a:example.com -all",
"v=spf1 a:mailers.example.com -all",
"v=spf1 a/24 a:offsite.example.com/24 -all",
"v=spf1 mx mx:deferrals.domain.com -all",
"v=spf1 mx/24 mx:offsite.domain.com/24 -all",
"v=spf1 ptr -all",
"v=spf1 ptr:otherdomain.com -all",
"v=spf1 exists:example.com -all",
"v=spf1 include:example.com -all",
"v=spf1 ?include:example.com -all",
"v=spf1 redirect=example.com",
"v=spf1 exp=example.com",
]
BAD_DMARC_RECORDS = [
("", "Not a valid DMARC record"),
("v=DMARC1; test", "Invalid tag test"),
("v=DMARC1; test=toto", "Unknown tag test"),
("v=DMARC1; adkim=g", "Wrong value g for tag adkim"),
("v=DMARC1; rua=mail:toto@test.com",
"Wrong value mail:toto@test.com for tag rua"),
("v=DMARC1; rf=afrf,toto", "Wrong value toto for tag rf"),
("v=DMARC1; ri=XXX", "Wrong value XXX for tag ri: not an integer"),
("v=DMARC1; pct=-1", "Wrong value -1 for tag pct: less than 0"),
("v=DMARC1; pct=1000", "Wrong value 1000 for tag pct: greater than 100"),
("v=DMARC1; pct=100", "Missing required p tag"),
]
GOOD_DMARC_RECORDS = [
"v=DMARC1; p=reject;; pct=100",
"v=DMARC1; p=quarantine; sp=none; adkim=s; aspf=s; "
"rua=mailto:dmarc-aggrep@ngyn.org; ruf=mailto:toto@test.com!24m; "
"rf=afrf; pct=100; ri=86400"
]
BAD_DKIM_RECORDS = [
("", "Not a valid DKIM record"),
("v=DKIM1; toto;p=XXX", "Invalid tag toto"),
("v=DKIM1;;k=rsa", "No key found in record"),
]
class LibTestCase(SimpleTestCase):
"""TestCase for library methods."""
@mock.patch('modoboa.admin.lib.get_dns_records')
def test_get_record_type_value(self, mock_get_dns_records):
mock_get_dns_records.return_value = [
TXT("IN", "TXT", ["v=spf1 mx -all"]),
TXT("IN", "TXT", ["v=DKIM1 p=XXXXX", "YYYYY"]),
TXT("IN", "TXT", ["v=DMARC1 p=reject"]),
]
self.assertEqual(
lib.get_spf_record("example.com"), "v=spf1 mx -all"
)
self.assertEqual(
lib.get_dkim_record("example.com", "mail"), "v=DKIM1 p=XXXXXYYYYY"
)
self.assertEqual(
lib.get_dmarc_record("example.com"), "v=DMARC1 p=reject"
)
def test_check_spf_syntax(self):
for record in BAD_SPF_RECORDS:
with self.assertRaises(lib.DNSSyntaxError) as ctx:
lib.check_spf_syntax(record[0])
self.assertEqual(str(ctx.exception), record[1])
for record in GOOD_SPF_RECORDS:
lib.check_spf_syntax(record)
def test_check_dmarc_syntax(self):
for record in BAD_DMARC_RECORDS:
with self.assertRaises(lib.DNSSyntaxError) as ctx:
lib.check_dmarc_syntax(record[0])
self.assertEqual(str(ctx.exception), record[1])
for record in GOOD_DMARC_RECORDS:
lib.check_dmarc_syntax(record)
def test_check_dkim_syntax(self):
for record in BAD_DKIM_RECORDS:
with self.assertRaises(lib.DNSSyntaxError) as ctx:
lib.check_dkim_syntax(record[0])
self.assertEqual(str(ctx.exception), record[1])
key = lib.check_dkim_syntax("v=DKIM1;p=XXX123")
self.assertEqual(key, "XXX123")
class ViewsTestCase(ModoTestCase):
"""A test case for views."""
@classmethod
def setUpTestData(cls):
"""Create some records."""
super(ViewsTestCase, cls).setUpTestData()
cls.spf_rec = factories.DNSRecordFactory(
type="spf", value="v=SPF1 mx -all", is_valid=True,
domain__name="test.com"
)
cls.dmarc_rec = factories.DNSRecordFactory(
type="dmarc", value="XXX", is_valid=False,
error="Not a DMARC record",
domain__name="test.com"
)
cls.dkim_rec = factories.DNSRecordFactory(
type="dkim", value="12345", is_valid=False,
error="Public key mismatchs",
domain__name="test.com"
)
cls.ac_rec = factories.DNSRecordFactory(
type="autoconfig", value="1.2.3.4", is_valid=True,
domain__name="test.com"
)
def test_record_detail_view(self):
url = reverse("dnstools:dns_record_detail", args=[self.spf_rec.pk])
response = self.client.get(url)
self.assertContains(
response, "A DNS record has been found and is valid")
url = reverse("dnstools:dns_record_detail", args=[self.dmarc_rec.pk])
response = self.client.get(url)
self.assertContains(response, "Not a DMARC record")
def test_autoconfig_record_status_view(self):
url = reverse(
"dnstools:autoconfig_records_status", args=[self.ac_rec.domain.pk])
response = self.client.get(url)
self.assertContains(response, "autoconfig record (Mozilla) found")
self.assertContains(
response, "autodiscover record (Microsoft) not found")
def test_domain_dns_configuration(self):
url = reverse(
"dnstools:domain_dns_configuration", args=[self.ac_rec.domain.pk])
response = self.client.get(url)
self.assertContains(response, "[IP address of your Modoboa server]")
class DomainTestCase(ModoTestCase):
def test_dns_global_status(self):
admin_factories.MXRecordFactory.create(
domain__name="test.com", name="mail.test.com",
updated=timezone.now())
domain = admin_models.Domain.objects.get(name="test.com")
self.assertEqual(domain.dns_global_status, "critical")
factories.DNSRecordFactory.create(
type="spf", value="v=SPF1 mx -all", is_valid=True,
domain__name="test.com"
)
self.assertEqual(domain.dns_global_status, "critical")
factories.DNSRecordFactory(
type="dkim", value="12345", is_valid=True,
domain__name="test.com"
)
self.assertEqual(domain.dns_global_status, "critical")
factories.DNSRecordFactory(
type="dmarc", value="XXX", is_valid=True,
domain__name="test.com"
)
self.assertEqual(domain.dns_global_status, "critical")
factories.DNSRecordFactory(
type="autoconfig", value="1.2.3.4", is_valid=True,
domain__name="test.com"
)
self.assertEqual(domain.dns_global_status, "critical")
factories.DNSRecordFactory(
type="autodiscover", value="1.2.3.4", is_valid=True,
domain__name="test.com"
)
self.assertEqual(domain.dns_global_status, "ok")
| isc | 2d97343a2057ceaf7842d37e685f007d | 35.931818 | 79 | 0.608985 | 3.170113 | false | true | false | false |
modoboa/modoboa | modoboa/core/tests/test_ldap.py | 2 | 5494 | """Tests for core application."""
from unittest import skipIf
from django.conf import settings
from django.test import override_settings
from django.urls import reverse
from django.utils.functional import cached_property
from modoboa.lib import exceptions
from modoboa.lib.tests import NO_LDAP, ModoTestCase
from .. import factories, models
@skipIf(NO_LDAP, "No ldap module installed")
class LDAPTestCaseMixin(object):
"""Set of methods used to test LDAP features."""
@cached_property
def ldapauthbackend(self):
"""Return LDAPAuthBackend instance."""
from modoboa.lib.ldap_utils import LDAPAuthBackend
return LDAPAuthBackend()
def activate_ldap_authentication(self):
"""Modify settings."""
self.set_global_parameters({
"authentication_type": "ldap",
"ldap_server_port": settings.LDAP_SERVER_PORT
})
def restore_user_password(self, username, new_password):
"""Restore user password to its initial state."""
for password in ["Toto1234", "test"]:
try:
self.ldapauthbackend.update_user_password(
username, password, new_password)
except exceptions.InternalError:
pass
else:
return
raise RuntimeError("Can't restore user password.")
def authenticate(self, user, password, restore_before=True):
"""Restore password and authenticate user."""
self.client.logout()
if restore_before:
self.restore_user_password(user, password)
self.assertTrue(
self.client.login(username=user, password=password))
def searchbind_mode(self):
"""Apply settings required by the searchbind mode."""
self.set_global_parameters({
"ldap_auth_method": "searchbind",
"ldap_bind_dn": "cn=admin,dc=example,dc=com",
"ldap_bind_password": "test",
"ldap_search_base": "ou=users,dc=example,dc=com"
})
def directbind_mode(self):
"""Apply settings required by the directbind mode."""
self.set_global_parameters({
"ldap_auth_method": "directbind",
"ldap_user_dn_template": "cn=%(user)s,ou=users,dc=example,dc=com"
})
@override_settings(AUTHENTICATION_BACKENDS=(
"modoboa.lib.authbackends.LDAPBackend",
"django.contrib.auth.backends.ModelBackend"
))
class LDAPAuthenticationTestCase(LDAPTestCaseMixin, ModoTestCase):
"""Validate LDAP authentication scenarios."""
def setUp(self):
"""Create test data."""
super(LDAPAuthenticationTestCase, self).setUp()
self.activate_ldap_authentication()
def check_created_user(self, username, group="SimpleUsers", with_mb=True):
"""Check that created user is valid."""
user = models.User.objects.get(username=username)
self.assertEqual(user.role, group)
if with_mb:
self.assertEqual(user.email, username)
self.assertEqual(user.mailbox.domain.name, "example.com")
self.assertEqual(user.mailbox.full_address, username)
@override_settings(AUTH_LDAP_USER_DN_TEMPLATE=None)
def test_searchbind_authentication(self):
"""Test the bind&search method."""
self.searchbind_mode()
username = "testuser@example.com"
self.authenticate(username, "test")
self.check_created_user(username)
self.set_global_parameters({
"ldap_admin_groups": "admins",
"ldap_groups_search_base": "ou=groups,dc=example,dc=com"
})
username = "mailadmin@example.com"
self.authenticate(username, "test", False)
self.check_created_user(username, "DomainAdmins")
def test_directbind_authentication(self):
"""Test the directbind method."""
self.client.logout()
self.directbind_mode()
username = "testuser"
self.client.login(username=username, password="test")
self.check_created_user(username + "@example.com")
# 1: must work because usernames of domain admins are not
# always email addresses
self.set_global_parameters({
"ldap_admin_groups": "admins",
"ldap_groups_search_base": "ou=groups,dc=example,dc=com"
})
username = "mailadmin"
self.authenticate(username, "test", False)
self.check_created_user("mailadmin@example.com", "DomainAdmins", False)
class ProfileTestCase(LDAPTestCaseMixin, ModoTestCase):
"""Profile related tests."""
@classmethod
def setUpTestData(cls): # NOQA:N802
"""Create test data."""
super(ProfileTestCase, cls).setUpTestData()
cls.account = factories.UserFactory(
username="user@test.com", groups=("SimpleUsers",)
)
@override_settings(AUTHENTICATION_BACKENDS=(
"modoboa.lib.authbackends.LDAPBackend",
"django.contrib.auth.backends.ModelBackend"
))
def test_update_password_ldap(self):
"""Update password for an LDAP user."""
self.activate_ldap_authentication()
self.searchbind_mode()
username = "testuser@example.com"
self.authenticate(username, "test")
self.ajax_post(
reverse("core:user_profile"),
{"language": "en", "oldpassword": "test",
"newpassword": "Toto1234", "confirmation": "Toto1234"}
)
self.authenticate(username, "Toto1234", False)
| isc | 5e00caa7561069009cf08ac26bde40f6 | 34.908497 | 79 | 0.633418 | 4.155825 | false | true | false | false |
csirtgadgets/bearded-avenger | cif/httpd/views/feed/ipv4.py | 1 | 1552 | import pytricia
import ipaddress
import sys
PERM_WHITELIST = [
"0.0.0.0/8",
"10.0.0.0/8",
"127.0.0.0/8",
"192.168.0.0/16",
"169.254.0.0/16",
"192.0.2.0/24",
"224.0.0.0/4",
"240.0.0.0/5",
"248.0.0.0/5",
]
LARGEST_PREFIX = '8'
def _normalize(i):
bits = i.split('.')
rv = []
for b in bits:
if len(b) > 1 and b.startswith('0') and not b.startswith('0/'):
b = b[1:]
rv.append(b)
return '.'.join(rv)
class Ipv4(object):
def __init__(self):
pass
# https://github.com/jsommers/pytricia
def process(self, data, whitelist=[]):
wl = pytricia.PyTricia()
for x in PERM_WHITELIST:
wl[x] = True
for y in whitelist:
y = str(_normalize(y['indicator']))
if '/' not in y: # weird bug work-around it'll insert 172.16.1.60 with a /0 at the end??
y = '{}/32'.format(y)
wl[y] = True
# this could be done with generators...
rv = []
for y in data:
y['indicator'] = _normalize(y['indicator'])
try:
if sys.version_info.major < 3:
ipaddress.ip_network(unicode(y['indicator']))
else:
ipaddress.ip_network(y['indicator'])
if str(y['indicator']) not in wl:
rv.append(y)
except ValueError as e:
print(e)
print('skipping invalid address: %s' % y['indicator'])
return rv
| mpl-2.0 | 210a44158987ad884c0847013c5cc2d7 | 21.492754 | 101 | 0.47616 | 3.281184 | false | false | false | false |
csirtgadgets/bearded-avenger | cif/httpd/views/indicators.py | 1 | 4680 | from ..common import pull_token, jsonify_success, jsonify_unauth, jsonify_unknown, compress, response_compress, \
VALID_FILTERS, jsonify_busy
from flask.views import MethodView
from flask import request, current_app
from cifsdk.client.zeromq import ZMQ as Client
from cifsdk.client.dummy import Dummy as DummyClient
from cif.constants import ROUTER_ADDR, PYVERSION
from cifsdk.exceptions import AuthError, TimeoutError, InvalidSearch, SubmissionFailed, CIFBusy
import logging
import zlib
remote = ROUTER_ADDR
logger = logging.getLogger('cif-httpd')
if PYVERSION > 2:
basestring = (str, bytes)
else:
basestring = (str, unicode)
class IndicatorsAPI(MethodView):
def get(self):
filters = {}
for f in VALID_FILTERS:
if request.args.get(f):
filters[f] = request.args.get(f)
if request.args.get('q'):
filters['indicator'] = request.args.get('q')
if request.args.get('confidence'):
filters['confidence'] = request.args.get('confidence')
if request.args.get('provider'):
filters['provider'] = request.args.get('provider')
if request.args.get('group'):
filters['group'] = request.args.get('group')
if request.args.get('tags'):
filters['tags'] = request.args.get('tags')
if request.args.get('lasttime'):
filters['lasttime'] = request.args.get('lasttime')
if current_app.config.get('dummy'):
r = DummyClient(remote, pull_token()).indicators_search(filters)
return jsonify_success(r)
try:
with Client(remote, pull_token()) as cli:
r = cli.indicators_search(filters, decode=False)
except RuntimeError as e:
logger.error(e)
return jsonify_unknown(msg='search failed')
except InvalidSearch as e:
return jsonify_unknown(msg='invalid search', code=400)
except AuthError:
return jsonify_unauth()
except Exception as e:
logger.error(e)
return jsonify_unknown(msg='search failed, system may be too busy, check back later')
response = current_app.response_class(r, mimetype='application/json')
if isinstance(r, basestring):
if '"message":"unauthorized"' in r and '"message":"unauthorized"' in r:
response.status_code = 401
return response
return response
def post(self):
fireball = False
nowait = request.args.get('nowait', False)
if request.headers.get('Content-Length'):
logger.debug('content-length: %s' % request.headers['Content-Length'])
if int(request.headers['Content-Length']) > 5000:
logger.info('fireball mode')
fireball = True
try:
with Client(remote, pull_token()) as cli:
r = cli.indicators_create(request.data, nowait=nowait,
fireball=fireball)
if nowait:
r = 'pending'
except SubmissionFailed as e:
logger.error(e)
return jsonify_unknown(msg='submission failed: %s' % e, code=422)
except RuntimeError as e:
logger.error(e)
return jsonify_unknown(msg='submission had a runtime error, check logs for more information', code=422)
except TimeoutError as e:
logger.error(e)
return jsonify_unknown('submission timed out, check logs for more information', 408)
except CIFBusy:
return jsonify_busy()
except AuthError:
return jsonify_unauth()
except Exception as e:
logger.error(e)
return jsonify_unknown('submission failed with generic exception, check logs for more information', 422)
return jsonify_success(r, code=201)
def delete(self):
try:
data = request.data.decode('utf-8')
with Client(remote, pull_token()) as cli:
r = cli.indicators_delete(data)
except RuntimeError as e:
logger.error(e)
return jsonify_unknown(msg='submission failed, check logs for more information', code=422)
except TimeoutError as e:
logger.error(e)
return jsonify_unknown('submission failed, check logs for more information', 408)
except AuthError:
return jsonify_unauth()
except Exception as e:
logger.error(e)
return jsonify_unknown('submission failed, check logs for more information', 422)
return jsonify_success(r)
| mpl-2.0 | 99c94aae22a37ae1206e07dba2301e3f | 33.925373 | 116 | 0.605342 | 4.325323 | false | false | false | false |
csirtgadgets/bearded-avenger | cif/store/zelasticsearch/indicator.py | 1 | 15288 | from elasticsearch_dsl import Index
from elasticsearch import helpers
import elasticsearch.exceptions
from elasticsearch_dsl.connections import connections
from cif.store.indicator_plugin import IndicatorManagerPlugin
from cifsdk.exceptions import AuthError, CIFException
from datetime import datetime, timedelta
from cifsdk.constants import PYVERSION
import logging
from .helpers import expand_ip_idx, i_to_id
from .filters import filter_build
from .constants import LIMIT, WINDOW_LIMIT, TIMEOUT, UPSERT_MODE, PARTITION, DELETE_FILTERS, UPSERT_MATCH, REQUEST_TIMEOUT
from .locks import LockManager
from .schema import Indicator
import arrow
import time
import os
logger = logging.getLogger('cif.store.zelasticsearch')
if PYVERSION > 2:
basestring = (str, bytes)
UPSERT_TRACE = os.environ.get('CIF_STORE_ES_UPSERT_TRACE')
class IndicatorManager(IndicatorManagerPlugin):
class Deserializer(object):
def __init__(self):
pass
def loads(self, s, mimetype=None):
return s
def __init__(self, *args, **kwargs):
super(IndicatorManager, self).__init__(*args, **kwargs)
self.indicators_prefix = kwargs.get('indicators_prefix', 'indicators')
self.partition = PARTITION
self.idx = self._current_index()
self.last_index_check = datetime.now() - timedelta(minutes=5)
self.last_index_value = None
self.handle = connections.get_connection()
self.lockm = LockManager(self.handle, logger)
self._create_index()
def flush(self):
self.handle.indices.flush(index=self._current_index())
def _current_index(self):
dt = datetime.utcnow()
if self.partition == 'month': # default partition setting
dt = dt.strftime('%Y.%m')
if self.partition == 'day':
dt = dt.strftime('%Y.%m.%d')
if self.partition == 'year':
dt = dt.strftime('%Y')
idx = '{}-{}'.format(self.indicators_prefix, dt)
return idx
def _create_index(self):
# https://github.com/csirtgadgets/massive-octo-spice/blob/develop/elasticsearch/observables.json
# http://elasticsearch-py.readthedocs.org/en/master/api.html#elasticsearch.Elasticsearch.bulk
# every time we check it does a HEAD req
if self.last_index_value and (datetime.utcnow() - self.last_index_check) < timedelta(minutes=2):
return self.last_index_value
idx = self._current_index()
if not self.handle.indices.exists(idx):
logger.info('Creating new index')
index = Index(idx)
index.aliases(live={})
index.doc_type(Indicator)
index.settings(max_result_window=WINDOW_LIMIT)
try:
index.create()
# after implementing auth to use cif_store, there appears to sometimes be a race condition
# where both the normal store and the auth store don't see the index and then try to create simultaneously.
# gracefully handle if that happens
except elasticsearch.exceptions.TransportError as e:
if (e.error.startswith('IndexAlreadyExistsException') or
e.error.startswith('index_already_exists_exception')):
pass
else:
raise
self.handle.indices.flush(idx)
self.last_index_check = datetime.utcnow()
self.last_index_value = idx
return idx
def search(self, token, filters, sort='reporttime', raw=False, sindex=False, timeout=TIMEOUT):
limit = filters.get('limit', LIMIT)
# search a given index - used in upserts
if sindex:
s = Indicator.search(index=sindex)
else:
s = Indicator.search(index='{}-*'.format(self.indicators_prefix))
s = s.params(size=limit, timeout=timeout, request_timeout=REQUEST_TIMEOUT)
s = s.sort('-reporttime', '-lasttime')
s = filter_build(s, filters, token=token)
#logger.debug(s.to_dict())
start = time.time()
try:
es = connections.get_connection(s._using)
old_serializer = es.transport.deserializer
if raw:
rv = es.search(
index=s._index,
doc_type=s._doc_type,
body=s.to_dict(),
**s._params)
else:
es.transport.deserializer = self.Deserializer()
rv = es.search(
index=s._index,
doc_type=s._doc_type,
body=s.to_dict(),
filter_path=['hits.hits._source'],
**s._params)
# transport caches this, so the tokens mis-fire
es.transport.deserializer = old_serializer
except elasticsearch.exceptions.RequestError as e:
logger.error(e)
es.transport.deserializer = old_serializer
return
# catch all other es errors
except elasticsearch.ElasticsearchException as e:
logger.error(e)
es.transport.deserializer = old_serializer
return
logger.debug('query took: %0.2f' % (time.time() - start))
return rv
def create(self, token, data, raw=False, bulk=False):
index = self._create_index()
expand_ip_idx(data)
id = i_to_id(data)
if data.get('group') and type(data['group']) != list:
data['group'] = [data['group']]
if bulk:
d = {
'_index': index,
'_type': 'indicator',
'_source': data
}
return d
data['meta'] = {}
data['meta']['index'] = index
data['meta']['id'] = id
i = Indicator(**data)
if not i.save():
raise AuthError('indicator exists')
if raw:
return i
return i.to_dict()
def create_bulk(self, token, indicators, flush=False):
actions = []
for i in indicators:
ii = self.create(token, i, bulk=True)
actions.append(ii)
try:
helpers.bulk(self.handle, actions, index=self._current_index())
except Exception as e:
logger.error(e)
actions = []
if flush:
self.flush()
return len(actions)
def upsert(self, token, indicators, flush=False):
if not UPSERT_MODE:
return self.create_bulk(token, indicators, flush=flush)
# Create current index if needed
index = self._create_index()
count = 0
# http://stackoverflow.com/questions/30111258/elasticsearch-in-equivalent-operator-in-elasticsearch
# aggregate indicators based on dedup criteria
agg = {}
for d in sorted(indicators, key=lambda k: k['lasttime'], reverse=True):
key = []
for v in UPSERT_MATCH:
if d.get(v):
if isinstance(d[v], basestring):
key.append(d[v])
elif isinstance(d[v], float) or isinstance(d[v], int):
key.append(str(d[v]))
elif isinstance(d[v], list):
for k in d[v]:
key.append(k)
key = "_".join(key)
# already seen in batch
if key in agg:
# look for older first times
if d.get('firsttime') < agg[key].get('firsttime'):
agg[key]['firsttime'] = d['firsttime']
if d.get('count'):
agg[key]['count'] = agg[key].get('count') + d.get('count')
# haven't yet seen in batch
else:
agg[key] = d
actions = []
#self.lockm.lock_aquire()
for d in agg:
d = agg[d]
# start assembling search filters
filters = {'limit': 1}
for x in UPSERT_MATCH:
if d.get(x):
if x == 'confidence':
filters[x] = '{},{}'.format(d[x], d[x])
elif x == 'group':
# indicator submit api expects 'group' (singular)
# but search api expects 'groups' (plural)
filters['groups'] = d[x]
elif x == 'rdata':
# if wildcard in rdata, don't add it to upsert search;
# urls can contain asterisks, and complex wildcard queries can
# create ES timeouts
if '*' not in d['rdata']:
filters[x] = d[x]
else:
filters[x] = d[x]
# search for existing, return latest record
try:
# search the current index only
rv = self.search(token, filters, sort='reporttime', raw=True, sindex=index)
except Exception as e:
logger.error(e)
raise e
try:
rv = rv['hits']['hits']
except Exception as e:
raise CIFException(e)
# Indicator does not exist in results
if len(rv) == 0:
if not d.get('count'):
d['count'] = 1
if d.get('group') and type(d['group']) != list:
d['group'] = [d['group']]
expand_ip_idx(d)
# append create to create set
if UPSERT_TRACE:
logger.debug('upsert: creating new {}'.format(d['indicator']))
actions.append({
'_index': index,
'_type': 'indicator',
'_source': d,
})
count += 1
continue
# Indicator exists in results
else:
if UPSERT_TRACE:
logger.debug('upsert: match indicator {}'.format(rv[0]['_id']))
# map result
i = rv[0]
# skip new indicators that don't have a more recent lasttime
if not self._is_newer(d, i['_source']):
logger.debug('skipping...')
continue
# map existing indicator
i = i['_source']
# we're working within the same index
if rv[0]['_index'] == self._current_index():
# update fields
i['count'] += 1
i['lasttime'] = d['lasttime']
i['reporttime'] = d['reporttime']
# if existing indicator doesn't have message field but new indicator does, add new message to upsert
if d.get('message'):
if not i.get('message'):
i['message'] = []
i['message'].append(d['message'])
# always update description if it exists
if d.get('description'):
i['description'] = d['description']
# append update to create set
if UPSERT_TRACE:
logger.debug('upsert: updating same index {}, {}'.format(d.get('indicator'), rv[0]['_id']))
actions.append({
'_op_type': 'update',
'_index': rv[0]['_index'],
'_type': 'indicator',
'_id': rv[0]['_id'],
'_body': {'doc': i}
})
count += 1
continue
# if we aren't in the same index
else:
# update fields
i['count'] = i['count'] + 1
i['lasttime'] = d['lasttime']
i['reporttime'] = d['reporttime']
# if existing indicator doesn't have message field but new indicator does, add new message to upsert
if d.get('message'):
if not i.get('message'):
i['message'] = []
i['message'].append(d['message'])
# always update description if exists
if d.get('description'):
i['description'] = d['description']
# append create to create set
if UPSERT_TRACE:
logger.debug('upsert: updating across index {}'.format(d['indicator']))
actions.append({
'_index': index,
'_type': 'indicator',
'_source': i,
})
# delete the old document
if UPSERT_TRACE:
logger.debug('upsert: deleting old index {}, {}'.format(d['indicator'], rv[0]['_id']))
actions.append({
'_op_type': 'delete',
'_index': rv[0]['_index'],
'_type': 'indicator',
'_id': rv[0]['_id']
})
count += 1
continue
if len(actions) > 0:
try:
helpers.bulk(self.handle, actions)
except helpers.BulkIndexError as e:
if e.errors[0]['index']['status'] == 409:
# version conflict error, prob just modified by another mp process
logger.error(e)
else:
raise e
except Exception as e:
#self.lockm.lock_release()
raise e
if flush:
self.flush()
#self.lockm.lock_release()
return count
def delete(self, token, data, id=None, flush=True):
q_filters = {}
for x in DELETE_FILTERS:
if data.get(x):
q_filters[x] = data[x]
logger.debug(q_filters)
if len(q_filters) == 0:
return '0, must specify valid filter. valid filters: {}'.format(DELETE_FILTERS)
try:
rv = self.search(token, q_filters, sort='reporttime', raw=True)
rv = rv['hits']['hits']
except Exception as e:
raise CIFException(e)
logger.debug('delete match: {}'.format(rv))
# docs matched
if len(rv) > 0:
actions = []
for i in rv:
actions.append({
'_op_type': 'delete',
'_index': i['_index'],
'_type': 'indicator',
'_id': i['_id']
})
try:
helpers.bulk(self.handle, actions)
except Exception as e:
raise CIFException(e)
if flush:
self.flush()
logger.info('{} deleted {} indicators'.format(token['username'], len(rv)))
return len(rv)
# no matches, return 0
return 0
| mpl-2.0 | f51ec1badd41f84a96e295dc704d72d2 | 32.452954 | 122 | 0.483582 | 4.584108 | false | false | false | false |
cdr-stats/cdr-stats | cdr_stats/apirest/view_voip_rate.py | 3 | 3905 | #
# CDR-Stats License
# http://www.cdr-stats.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2015 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
from rest_framework.authentication import BasicAuthentication, SessionAuthentication
from rest_framework.views import APIView
from rest_framework.response import Response
from voip_billing.models import VoIPRetailRate, find_rates
from cdr.functions_def import prefix_list_string
from voip_billing.function_def import prefix_allowed_to_call
import logging
logger = logging.getLogger('cdr-stats.filelog')
class VoIPRateList(APIView):
"""
List all voip rate
**Read**:
CURL Usage::
curl -u username:password -H 'Accept: application/json'
http://localhost:8000/rest-api/voip-rate/?recipient_phone_no=4323432&sort_field=prefix&order=desc
curl -u username:password -H 'Accept: application/json'
http://localhost:8000/rest-api/voip-rate/?dialcode=4323432&sort_field=prefix&order=desc
"""
authentication = (BasicAuthentication, SessionAuthentication)
def get(self, request, format=None):
"""
Voip Rate GET
"""
logger.debug('Voip Rate GET API get called')
error = {}
# check voipplan id for user
try:
voipplan_id = request.user.userprofile.voipplan_id
except:
error_msg = "User is not attached with voip plan"
error['error'] = error_msg
return Response(error)
dialcode = ''
recipient_phone_no = ''
if 'dialcode' in request.GET and request.GET.get('dialcode') != '':
dialcode = request.GET.get('dialcode')
try:
dialcode = int(dialcode)
except ValueError:
error['error'] = "Wrong value for dialcode !"
logger.error(error['error'])
return Response(error)
if 'recipient_phone_no' in request.GET:
if request.GET.get('recipient_phone_no') != '':
recipient_phone_no = request.GET.get('recipient_phone_no')
else:
error['error'] = "Please enter recipient_phone_no"
logger.error(error['error'])
return Response(error)
if recipient_phone_no:
# Check if recipient_phone_no is not banned
allowed = prefix_allowed_to_call(recipient_phone_no, voipplan_id)
if allowed:
# Get Destination prefix list e.g (34,346,3465,34657)
destination_prefix_list = prefix_list_string(str(recipient_phone_no))
prefixlist = destination_prefix_list.split(",")
# Get Rate List
rate_list = VoIPRetailRate.objects\
.values('prefix', 'retail_rate', 'prefix__destination')\
.filter(prefix__in=[int(s) for s in prefixlist])
logger.debug('Voip Rate API : result OK 200')
return Response(rate_list)
else:
error_msg = "Not allowed : %s" % recipient_phone_no
error['error'] = error_msg
logger.error(error_msg)
return Response(error)
sort_field = ''
order = ''
if request.GET.get('sort_field'):
sort_field = request.GET.get('sort_field')
if request.GET.get('order'):
order = request.GET.get('order')
# call the find rates function
result = find_rates(voipplan_id, dialcode, sort_field, order)
logger.debug('Voip Rate API : result OK 200')
return Response(result)
| mpl-2.0 | 79f931e81fb19b70c66da47ab82d7983 | 35.495327 | 113 | 0.596159 | 3.964467 | false | false | false | false |
bitmovin/bitmovin-python | examples/encoding/create_per_title_encoding_dash_hls_cenc_fairplay.py | 1 | 21397 | from bitmovin import S3Input, S3Output, Encoding, StreamInput, AACCodecConfiguration, Stream, Bitmovin, CloudRegion, \
EncoderVersion, SelectionMode, H264Profile, FMP4Muxing, TSMuxing, EncodingOutput, \
MuxingStream, H264Level, EncodingMode, StartEncodingRequest, Condition, H264CodecConfiguration, StreamMode, \
H264PerTitleConfiguration, AutoRepresentation, PerTitle, ACLEntry, ACLPermission, \
DashManifest, Period, FMP4RepresentationType, VideoAdaptationSet, AudioAdaptationSet, \
HlsManifest, AudioMedia, VariantStream, \
DRMFMP4Representation, FairPlayDRM, CENCDRM, CENCWidevineEntry, CENCPlayReadyEntry, ContentProtection
from bitmovin.errors import BitmovinError
import datetime
API_KEY = '<INSERT YOUR API KEY>'
S3_INPUT_ACCESS_KEY = '<INSERT_YOUR_ACCESS_KEY>'
S3_INPUT_SECRET_KEY = '<INSERT_YOUR_SECRET_KEY>'
S3_INPUT_BUCKET_NAME = '<INSERT_YOUR_BUCKET_NAME>'
INPUT_PATH = '/path/to/your/input/file.mp4'
S3_OUTPUT_ACCESSKEY = '<INSERT_YOUR_ACCESS_KEY>'
S3_OUTPUT_SECRETKEY = '<INSERT_YOUR_SECRET_KEY>'
S3_OUTPUT_BUCKETNAME = '<INSERT_YOUR_BUCKET_NAME>'
CENC_KEY = '<YOUR_CENC_KEY>'
CENC_KID = '<YOUR_CENC_KID>'
CENC_WIDEVINE_PSSH = '<YOUR_CENC_WIDEVINE_PSSH>'
CENC_PLAYREADY_LA_URL = '<YOUR_PLAYREADY_LA_URL>'
FAIRPLAY_KEY = '<YOUR_FAIRPLAY_KEY>'
FAIRPLAY_IV = '<YOUR_FAIRPLAY_IV>'
FAIRPLAY_URI = '<YOUR_FAIRPLAY_LICENSING_URL>'
date_component = str(datetime.datetime.now()).replace(' ', '_').replace(':', '-').split('.')[0].replace('_', '__')
OUTPUT_BASE_PATH = 'output/{}/'.format(date_component)
bitmovin = Bitmovin(api_key=API_KEY)
# Please set here the encoding profiles. You can modify height, bitrate and fps.
encoding_profiles_h264 = [
dict(width=320, height=180, max_bitrate=150, bufsize=150, profile=H264Profile.HIGH, level=None),
dict(width=512, height=288, max_bitrate=300, bufsize=300, profile=H264Profile.HIGH, level=None),
dict(width=640, height=360, max_bitrate=600, bufsize=600, profile=H264Profile.HIGH, level=None),
dict(width=960, height=540, max_bitrate=1200, bufsize=1200, profile=H264Profile.HIGH, level=None),
dict(width=1280, height=720, max_bitrate=2400, bufsize=2400, profile=H264Profile.HIGH, level=H264Level.L3_1),
dict(width=1920, height=1080, max_bitrate=4800, bufsize=4800, profile=H264Profile.HIGH, level=H264Level.L4),
]
def main():
# Create an S3 input. This resource is then used as base to acquire input files.
s3_input = S3Input(access_key=S3_INPUT_ACCESS_KEY,
secret_key=S3_INPUT_SECRET_KEY,
bucket_name=S3_INPUT_BUCKET_NAME,
name='Test S3 Input')
s3_input = bitmovin.inputs.S3.create(s3_input).resource
# Create an S3 Output. This will be used as target bucket for the muxings, sprites and manifests
s3_output = S3Output(access_key=S3_OUTPUT_ACCESSKEY,
secret_key=S3_OUTPUT_SECRETKEY,
bucket_name=S3_OUTPUT_BUCKETNAME,
name='Test S3 Output')
s3_output = bitmovin.outputs.S3.create(s3_output).resource
acl_entry = ACLEntry(permission=ACLPermission.PUBLIC_READ)
# Create DRM resources
widevine_drm = CENCWidevineEntry(pssh=CENC_WIDEVINE_PSSH)
playready_drm = CENCPlayReadyEntry(la_url=CENC_PLAYREADY_LA_URL)
# Create an Encoding. This is the base entity used to configure the encoding.
encoding = Encoding(name='Constrained Per-title encoding test',
cloud_region=CloudRegion.AUTO,
encoder_version=EncoderVersion.BETA)
encoding = bitmovin.encodings.Encoding.create(encoding).resource
encoding_configs = []
# Iterate over all encoding profiles and create the H264 configuration.
# As we are using per-title, we do not define bitrates, instead just providing the target height as indicator
for idx, _ in enumerate(encoding_profiles_h264):
profile_h264 = encoding_profiles_h264[idx]
encoding_config = dict(profile_h264=profile_h264)
h264_codec = H264CodecConfiguration(
name='Sample video codec configuration',
profile=H264Profile.HIGH,
height=profile_h264.get("height")
)
encoding_config['h264_codec'] = bitmovin.codecConfigurations.H264.create(h264_codec).resource
encoding_configs.append(encoding_config)
# Also the AAC configuration has to be created, which will be later on used to create the streams.
audio_codec_configuration = AACCodecConfiguration(name='AAC Codec Configuration',
bitrate=128000,
rate=48000)
audio_codec_configuration = bitmovin.codecConfigurations.AAC.create(audio_codec_configuration).resource
# create the input stream resources
video_input_stream = StreamInput(input_id=s3_input.id,
input_path=INPUT_PATH,
selection_mode=SelectionMode.AUTO)
audio_input_stream = StreamInput(input_id=s3_input.id,
input_path=INPUT_PATH,
selection_mode=SelectionMode.AUTO)
# With the configurations and the input file, streams are now created that will be muxed later on.
# As we use per-title, the streams are used as templates
for encoding_config in encoding_configs:
encoding_profile = encoding_config.get("profile_h264")
video_stream_condition = Condition(attribute="HEIGHT", operator=">=", value=str(encoding_profile.get('height')))
video_stream = Stream(codec_configuration_id=encoding_config.get("h264_codec").id,
input_streams=[video_input_stream],
conditions=video_stream_condition,
name='Stream H264 {}p'.format(encoding_profile.get('height')),
mode=StreamMode.PER_TITLE_TEMPLATE)
encoding_config['h264_stream'] = bitmovin.encodings.Stream.create(object_=video_stream,
encoding_id=encoding.id).resource
# create the stream for the audio
audio_stream = Stream(codec_configuration_id=audio_codec_configuration.id,
input_streams=[audio_input_stream],
name='Audio Stream')
audio_stream = bitmovin.encodings.Stream.create(object_=audio_stream,
encoding_id=encoding.id).resource
# === FMP4 Muxings ===
# Create FMP4 muxings which are later used for the DASH manifest. The current settings will set a segment length
# of 4 seconds.
for encoding_config in encoding_configs:
encoding_profile = encoding_config.get("profile_h264")
video_muxing_stream = MuxingStream(encoding_config['h264_stream'].id)
video_muxing_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH + "video/dash/{height}p_{bitrate}_{uuid}/",
acl=[acl_entry])
video_muxing = FMP4Muxing(segment_length=4,
segment_naming='seg_%number%.m4s',
init_segment_name='init.mp4',
streams=[video_muxing_stream],
name="Video FMP4 Muxing {}p".format(encoding_profile.get('height')))
encoding_config['fmp4_muxing'] = bitmovin.encodings.Muxing.FMP4.create(object_=video_muxing,
encoding_id=encoding.id).resource
video_cenc = CENCDRM(key=CENC_KEY,
kid=CENC_KID,
widevine=widevine_drm,
playReady=playready_drm,
outputs=[video_muxing_output],
name="Video FMP4 CENC")
encoding_config['fmp4_cenc'] = bitmovin.encodings.Muxing.FMP4.DRM.CENC.create(
object_=video_cenc,
encoding_id=encoding.id,
muxing_id=encoding_config['fmp4_muxing'].id).resource
audio_muxing_stream = MuxingStream(audio_stream.id)
audio_muxing_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH + 'audio/dash/',
acl=[acl_entry])
audio_fmp4_muxing = FMP4Muxing(segment_length=4,
segment_naming='seg_%number%.m4s',
init_segment_name='init.mp4',
streams=[audio_muxing_stream],
name='Audio FMP4 Muxing')
audio_fmp4_muxing = bitmovin.encodings.Muxing.FMP4.create(object_=audio_fmp4_muxing,
encoding_id=encoding.id).resource
audio_cenc = CENCDRM(key=CENC_KEY,
kid=CENC_KID,
widevine=widevine_drm,
playReady=playready_drm,
outputs=[audio_muxing_output],
name='Audio FMP4 CENC')
audio_cenc = bitmovin.encodings.Muxing.FMP4.DRM.CENC.create(object_=audio_cenc,
encoding_id=encoding.id,
muxing_id=audio_fmp4_muxing.id).resource
# === TS Muxings ===
# Create TS muxings which are later used for the HLS manifest. The current settings will set a segment length
# of 4 seconds.
for encoding_config in encoding_configs:
encoding_profile = encoding_config.get('profile_h264')
video_muxing_stream = MuxingStream(encoding_config['h264_stream'].id)
video_muxing_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH + 'video/hls/{height}p_{bitrate}_{uuid}/',
acl=[acl_entry])
video_muxing = TSMuxing(segment_length=4,
segment_naming='seg_%number%.ts',
streams=[video_muxing_stream],
name='Video TS Muxing {}p'.format(encoding_profile.get('height')))
encoding_config['ts_muxing'] = bitmovin.encodings.Muxing.TS.create(object_=video_muxing,
encoding_id=encoding.id).resource
video_fairplay = FairPlayDRM(key=FAIRPLAY_KEY,
iv=FAIRPLAY_IV,
uri=FAIRPLAY_URI,
outputs=[video_muxing_output],
name='Video TS FairPlay')
encoding_config['ts_fairplay'] = bitmovin.encodings.Muxing.TS.DRM.FairPlay.create(
object_=video_fairplay,
encoding_id=encoding.id,
muxing_id=encoding_config['ts_muxing'].id).resource
audio_muxing_stream = MuxingStream(audio_stream.id)
audio_muxing_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH + 'audio/hls/',
acl=[acl_entry])
audio_ts_muxing = TSMuxing(segment_length=4,
segment_naming='seg_%number%.ts',
streams=[audio_muxing_stream],
name='Audio TS Muxing')
audio_ts_muxing = bitmovin.encodings.Muxing.TS.create(object_=audio_ts_muxing,
encoding_id=encoding.id).resource
audio_fairplay = FairPlayDRM(key=FAIRPLAY_KEY,
iv=FAIRPLAY_IV,
uri=FAIRPLAY_URI,
outputs=[audio_muxing_output],
name='Audio FairPlay')
audio_fairplay = bitmovin.encodings.Muxing.TS.DRM.FairPlay.create(object_=audio_fairplay,
encoding_id=encoding.id,
muxing_id=audio_ts_muxing.id).resource
# Keep the audio info together
audio_representation_info = dict(
fmp4_muxing=audio_fmp4_muxing,
ts_muxing=audio_ts_muxing,
stream=audio_stream,
ts_fairplay=audio_fairplay,
fmp4_cenc=audio_cenc
)
# Finally create the per-title configuration to pass to the encoding
auto_representations = AutoRepresentation(adopt_configuration_threshold=0.5)
h264_per_title_configuration = H264PerTitleConfiguration(auto_representations=auto_representations)
per_title = PerTitle(h264_configuration=h264_per_title_configuration)
# And start the encoding
start_encoding_request = StartEncodingRequest(per_title=per_title, encoding_mode=EncodingMode.THREE_PASS)
bitmovin.encodings.Encoding.start(encoding_id=encoding.id, start_encoding_request=start_encoding_request)
try:
bitmovin.encodings.Encoding.wait_until_finished(encoding_id=encoding.id)
except BitmovinError as bitmovin_error:
print("Exception occurred while waiting for encoding to finish: {}".format(bitmovin_error))
# Specify the output for manifest which will be in the OUTPUT_BASE_PATH.
manifest_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH,
acl=[acl_entry])
# === DASH MANIFEST ===
muxing_for_contentprotection = None
drm_for_contentprotection = None
# Create a DASH manifest and add one period with an adapation set for audio and video
dash_manifest = DashManifest(manifest_name='stream.mpd',
outputs=[manifest_output],
name='DASH Manifest')
dash_manifest = bitmovin.manifests.DASH.create(dash_manifest).resource
period = Period()
period = bitmovin.manifests.DASH.add_period(object_=period, manifest_id=dash_manifest.id).resource
video_adaptation_set = VideoAdaptationSet()
video_adaptation_set = bitmovin.manifests.DASH.add_video_adaptation_set(object_=video_adaptation_set,
manifest_id=dash_manifest.id,
period_id=period.id).resource
audio_adaptation_set = AudioAdaptationSet(lang='en')
audio_adaptation_set = bitmovin.manifests.DASH.add_audio_adaptation_set(object_=audio_adaptation_set,
manifest_id=dash_manifest.id,
period_id=period.id).resource
# Add the audio representation
segment_path = audio_representation_info.get('fmp4_cenc').outputs[0].outputPath
segment_path = remove_output_base_path(segment_path)
fmp4_representation_audio = DRMFMP4Representation(FMP4RepresentationType.TEMPLATE,
encoding_id=encoding.id,
muxing_id=audio_representation_info.get('fmp4_muxing').id,
drm_id=audio_representation_info.get('fmp4_cenc').id,
segment_path=segment_path)
bitmovin.manifests.DASH.add_drm_fmp4_representation(object_=fmp4_representation_audio,
manifest_id=dash_manifest.id,
period_id=period.id,
adaptationset_id=audio_adaptation_set.id)
# Add all video representations to the video adaption set
muxings = bitmovin.encodings.Muxing.FMP4.list(encoding_id=encoding.id).resource
for muxing in muxings:
drm = bitmovin.encodings.Muxing.FMP4.DRM.CENC.list(encoding.id, muxing.id).resource
segment_path = drm[0].outputs[0].outputPath
if 'audio' in segment_path:
# we ignore the audio muxing
continue
if '{uuid}' in segment_path:
# we ignore any muxing with placeholders in the path - they are the template muxings, not the result muxings
continue
segment_path = remove_output_base_path(segment_path)
muxing_for_contentprotection = muxing
drm_for_contentprotection = drm[0]
fmp4_representation = DRMFMP4Representation(
type=FMP4RepresentationType.TEMPLATE,
encoding_id=encoding.id,
muxing_id=muxing.id,
segment_path=segment_path,
drm_id=drm[0].id
)
bitmovin.manifests.DASH.add_drm_fmp4_representation(
object_=fmp4_representation,
manifest_id=dash_manifest.id,
period_id=period.id,
adaptationset_id=video_adaptation_set.id)
# add content protection to the adaptation set
video_content_protection = ContentProtection(encoding_id=encoding.id,
muxing_id=muxing_for_contentprotection.id,
drm_id=drm_for_contentprotection.id)
bitmovin.manifests.DASH.add_content_protection_to_adaptionset(object_=video_content_protection,
manifest_id=dash_manifest.id,
period_id=period.id,
adaptationset_id=video_adaptation_set.id)
bitmovin.manifests.DASH.start(manifest_id=dash_manifest.id)
# === HLS MANIFEST ===
# Create a HLS manifest and add one period with an adapation set for audio and video
hls_manifest = HlsManifest(manifest_name='stream.m3u8',
outputs=[manifest_output],
name='HLS Manifest')
hls_manifest = bitmovin.manifests.HLS.create(hls_manifest).resource
segment_path = audio_representation_info.get('ts_fairplay').outputs[0].outputPath
segment_path = remove_output_base_path(segment_path)
audio_media = AudioMedia(name='HLS Audio Media',
group_id='audio',
segment_path=segment_path,
encoding_id=encoding.id,
stream_id=audio_representation_info.get('stream').id,
muxing_id=audio_representation_info.get('ts_muxing').id,
drm_id=audio_representation_info.get('ts_fairplay').id,
language='en',
uri='audio.m3u8')
audio_media = bitmovin.manifests.HLS.AudioMedia.create(manifest_id=hls_manifest.id, object_=audio_media).resource
# Add all video representations to the video adaption set
muxings = bitmovin.encodings.Muxing.TS.list(encoding_id=encoding.id).resource
for muxing in muxings:
drm = bitmovin.encodings.Muxing.TS.DRM.FairPlay.list(encoding.id, muxing.id).resource
segment_path = drm[0].outputs[0].outputPath
if 'audio' in segment_path:
# we ignore the audio muxing
continue
if '{uuid}' in segment_path:
# we ignore any muxing with placeholders in the path - they are the template muxings, not the result muxings
continue
segment_path = remove_output_base_path(segment_path)
variant_stream = VariantStream(audio=audio_media.groupId,
closed_captions='NONE',
segment_path=segment_path,
uri='video_{}.m3u8'.format(muxing.avgBitrate),
encoding_id=encoding.id,
stream_id=muxing.streams[0].streamId,
muxing_id=muxing.id,
drm_id=drm[0].id)
bitmovin.manifests.HLS.VariantStream.create(manifest_id=hls_manifest.id,
object_=variant_stream)
bitmovin.manifests.HLS.start(manifest_id=hls_manifest.id)
try:
bitmovin.manifests.DASH.wait_until_finished(manifest_id=dash_manifest.id, check_interval=1)
except BitmovinError as bitmovin_error:
print('Exception occurred while waiting for manifest creation to finish: {}'.format(bitmovin_error))
try:
bitmovin.manifests.HLS.wait_until_finished(manifest_id=hls_manifest.id, check_interval=1)
except BitmovinError as bitmovin_error:
print('Exception occurred while waiting for manifest creation to finish: {}'.format(bitmovin_error))
exit(-1)
def remove_output_base_path(text):
if text.startswith(OUTPUT_BASE_PATH):
return text[len(OUTPUT_BASE_PATH):]
return text
if __name__ == '__main__':
main()
| unlicense | e795751c24c80a57cd44391e56fb80a2 | 53.032828 | 120 | 0.578539 | 4.052462 | false | true | false | false |
bitmovin/bitmovin-python | bitmovin/services/encodings/stream_service.py | 1 | 1979 | from bitmovin.errors import MissingArgumentError
from bitmovin.resources.models import Stream as StreamResource
from .burn_in_srt_subtitle_service import BurnInSrtSubtitleService
from .thumbnail_service import ThumbnailService
from .sprite_service import SpriteService
from .stream_filter_service import StreamFilterService
from ..rest_service import RestService
class Stream(RestService):
BASE_ENDPOINT_URL = 'encoding/encodings/{encoding_id}/streams'
def __init__(self, http_client):
super().__init__(http_client=http_client, relative_url=self.BASE_ENDPOINT_URL, class_=StreamResource)
self.Sprite = SpriteService(http_client=http_client)
self.Thumbnail = ThumbnailService(http_client=http_client)
self.Filter = StreamFilterService(http_client=http_client)
self.BurnInSrtSubtitle = BurnInSrtSubtitleService(http_client=http_client)
def _get_endpoint_url(self, encoding_id):
if not encoding_id:
raise MissingArgumentError('encoding_id must be given')
return self.BASE_ENDPOINT_URL.replace('{encoding_id}', encoding_id)
def create(self, object_, encoding_id):
self.relative_url = self._get_endpoint_url(encoding_id=encoding_id)
return super().create(object_)
def delete(self, encoding_id, stream_id):
self.relative_url = self._get_endpoint_url(encoding_id=encoding_id)
return super().delete(id_=stream_id)
def retrieve(self, encoding_id, stream_id):
self.relative_url = self._get_endpoint_url(encoding_id=encoding_id)
return super().retrieve(id_=stream_id)
def list(self, encoding_id, offset=None, limit=None):
self.relative_url = self._get_endpoint_url(encoding_id=encoding_id)
return super().list(offset, limit)
def retrieve_custom_data(self, encoding_id, stream_id):
self.relative_url = self._get_endpoint_url(encoding_id=encoding_id)
return super().retrieve_custom_data(id_=stream_id)
| unlicense | 6adcb6c5ffb2643174be5028effd4c7d | 43.977273 | 109 | 0.717534 | 3.712946 | false | false | false | false |
bitmovin/bitmovin-python | examples/encoding/create_simple_encoding.py | 1 | 11824 | import datetime
from bitmovin import Bitmovin, Encoding, HTTPSInput, S3Output, H264CodecConfiguration, \
AACCodecConfiguration, H264Profile, StreamInput, SelectionMode, Stream, EncodingOutput, ACLEntry, ACLPermission, \
FMP4Muxing, MuxingStream, CloudRegion, DashManifest, FMP4Representation, FMP4RepresentationType, Period, \
VideoAdaptationSet, AudioAdaptationSet
from bitmovin.errors import BitmovinError
API_KEY = '<INSERT_YOUR_API_KEY>'
# https://<INSERT_YOUR_HTTP_HOST>/<INSERT_YOUR_HTTP_PATH>
HTTPS_INPUT_HOST = '<INSERT_YOUR_HTTPS_HOST>'
HTTPS_INPUT_PATH = '<INSERT_YOUR_HTTPS_PATH>'
S3_OUTPUT_ACCESSKEY = '<INSERT_YOUR_ACCESS_KEY>'
S3_OUTPUT_SECRETKEY = '<INSERT_YOUR_SECRET_KEY>'
S3_OUTPUT_BUCKETNAME = '<INSERT_YOUR_BUCKET_NAME>'
date_component = str(datetime.datetime.now()).replace(' ', '_').replace(':', '-').split('.')[0].replace('_', '__')
OUTPUT_BASE_PATH = '/your/output/base/path/{}/'.format(date_component)
def main():
bitmovin = Bitmovin(api_key=API_KEY)
https_input = HTTPSInput(name='create_simple_encoding HTTPS input', host=HTTPS_INPUT_HOST)
https_input = bitmovin.inputs.HTTPS.create(https_input).resource
s3_output = S3Output(access_key=S3_OUTPUT_ACCESSKEY,
secret_key=S3_OUTPUT_SECRETKEY,
bucket_name=S3_OUTPUT_BUCKETNAME,
name='Sample S3 Output')
s3_output = bitmovin.outputs.S3.create(s3_output).resource
encoding = Encoding(name='example encoding',
cloud_region=CloudRegion.GOOGLE_EUROPE_WEST_1)
encoding = bitmovin.encodings.Encoding.create(encoding).resource
video_codec_configuration_1080p = H264CodecConfiguration(name='example_video_codec_configuration_1080p',
bitrate=4800000,
rate=25.0,
width=1920,
height=1080,
profile=H264Profile.HIGH)
video_codec_configuration_1080p = bitmovin.codecConfigurations.H264.create(video_codec_configuration_1080p).resource
video_codec_configuration_720p = H264CodecConfiguration(name='example_video_codec_configuration_720p',
bitrate=2400000,
rate=25.0,
width=1280,
height=720,
profile=H264Profile.HIGH)
video_codec_configuration_720p = bitmovin.codecConfigurations.H264.create(video_codec_configuration_720p).resource
audio_codec_configuration = AACCodecConfiguration(name='example_audio_codec_configuration_english',
bitrate=128000,
rate=48000)
audio_codec_configuration = bitmovin.codecConfigurations.AAC.create(audio_codec_configuration).resource
video_input_stream = StreamInput(input_id=https_input.id,
input_path=HTTPS_INPUT_PATH,
selection_mode=SelectionMode.AUTO)
audio_input_stream = StreamInput(input_id=https_input.id,
input_path=HTTPS_INPUT_PATH,
selection_mode=SelectionMode.AUTO)
video_stream_1080p = Stream(codec_configuration_id=video_codec_configuration_1080p.id,
input_streams=[video_input_stream], name='Sample Stream 1080p')
video_stream_1080p = bitmovin.encodings.Stream.create(object_=video_stream_1080p,
encoding_id=encoding.id).resource
video_stream_720p = Stream(codec_configuration_id=video_codec_configuration_720p.id,
input_streams=[video_input_stream], name='Sample Stream 720p')
video_stream_720p = bitmovin.encodings.Stream.create(object_=video_stream_720p,
encoding_id=encoding.id).resource
audio_stream = Stream(codec_configuration_id=audio_codec_configuration.id,
input_streams=[audio_input_stream], name='Sample Stream AUDIO')
audio_stream = bitmovin.encodings.Stream.create(object_=audio_stream,
encoding_id=encoding.id).resource
acl_entry = ACLEntry(permission=ACLPermission.PUBLIC_READ)
video_muxing_stream_1080p = MuxingStream(video_stream_1080p.id)
video_muxing_stream_720p = MuxingStream(video_stream_720p.id)
audio_muxing_stream = MuxingStream(audio_stream.id)
video_muxing_1080p_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH + 'video/1080p/',
acl=[acl_entry])
video_muxing_1080p = FMP4Muxing(segment_length=4,
segment_naming='seg_%number%.m4s',
init_segment_name='init.mp4',
streams=[video_muxing_stream_1080p],
outputs=[video_muxing_1080p_output],
name='Sample Muxing 1080p')
video_muxing_1080p = bitmovin.encodings.Muxing.FMP4.create(object_=video_muxing_1080p,
encoding_id=encoding.id).resource
video_muxing_720p_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH + 'video/720p/',
acl=[acl_entry])
video_muxing_720p = FMP4Muxing(segment_length=4,
segment_naming='seg_%number%.m4s',
init_segment_name='init.mp4',
streams=[video_muxing_stream_720p],
outputs=[video_muxing_720p_output],
name='Sample Muxing 720p')
video_muxing_720p = bitmovin.encodings.Muxing.FMP4.create(object_=video_muxing_720p,
encoding_id=encoding.id).resource
audio_muxing_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH + 'audio/',
acl=[acl_entry])
audio_muxing = FMP4Muxing(segment_length=4,
segment_naming='seg_%number%.m4s',
init_segment_name='init.mp4',
streams=[audio_muxing_stream],
outputs=[audio_muxing_output],
name='Sample Muxing AUDIO')
audio_muxing = bitmovin.encodings.Muxing.FMP4.create(object_=audio_muxing,
encoding_id=encoding.id).resource
bitmovin.encodings.Encoding.start(encoding_id=encoding.id)
try:
bitmovin.encodings.Encoding.wait_until_finished(encoding_id=encoding.id)
except BitmovinError as bitmovin_error:
print("Exception occurred while waiting for encoding to finish: {}".format(bitmovin_error))
manifest_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH,
acl=[acl_entry])
dash_manifest = DashManifest(manifest_name='example_manifest_sintel_dash.mpd',
outputs=[manifest_output],
name='Sample DASH Manifest')
dash_manifest = bitmovin.manifests.DASH.create(dash_manifest).resource
period = Period()
period = bitmovin.manifests.DASH.add_period(object_=period, manifest_id=dash_manifest.id).resource
video_adaptation_set = VideoAdaptationSet()
video_adaptation_set = bitmovin.manifests.DASH.add_video_adaptation_set(object_=video_adaptation_set,
manifest_id=dash_manifest.id,
period_id=period.id).resource
audio_adaptation_set = AudioAdaptationSet(lang='en')
audio_adaptation_set = bitmovin.manifests.DASH.add_audio_adaptation_set(object_=audio_adaptation_set,
manifest_id=dash_manifest.id,
period_id=period.id).resource
fmp4_representation_1080p = FMP4Representation(FMP4RepresentationType.TEMPLATE,
encoding_id=encoding.id,
muxing_id=video_muxing_1080p.id,
segment_path='video/1080p/')
fmp4_representation_1080p = bitmovin.manifests.DASH.add_fmp4_representation(object_=fmp4_representation_1080p,
manifest_id=dash_manifest.id,
period_id=period.id,
adaptationset_id=video_adaptation_set.id
).resource
fmp4_representation_720p = FMP4Representation(FMP4RepresentationType.TEMPLATE,
encoding_id=encoding.id,
muxing_id=video_muxing_720p.id,
segment_path='video/720p/')
fmp4_representation_720p = bitmovin.manifests.DASH.add_fmp4_representation(object_=fmp4_representation_720p,
manifest_id=dash_manifest.id,
period_id=period.id,
adaptationset_id=video_adaptation_set.id
).resource
fmp4_representation_audio = FMP4Representation(FMP4RepresentationType.TEMPLATE,
encoding_id=encoding.id,
muxing_id=audio_muxing.id,
segment_path='audio/')
fmp4_representation_audio = bitmovin.manifests.DASH.add_fmp4_representation(object_=fmp4_representation_audio,
manifest_id=dash_manifest.id,
period_id=period.id,
adaptationset_id=audio_adaptation_set.id
).resource
bitmovin.manifests.DASH.start(manifest_id=dash_manifest.id)
try:
bitmovin.manifests.DASH.wait_until_finished(manifest_id=dash_manifest.id)
except BitmovinError as bitmovin_error:
print("Exception occurred while waiting for manifest creation to finish: {}".format(bitmovin_error))
if __name__ == '__main__':
main()
| unlicense | 6d71e0c9f507e86094c370bb59a170d0 | 62.229947 | 120 | 0.506174 | 4.679066 | false | true | false | false |
bitmovin/bitmovin-python | bitmovin/resources/models/encodings/muxings/ts_muxing.py | 1 | 1782 | from .muxing import Muxing
class TSMuxing(Muxing):
def __init__(self, streams, segment_length, segment_naming=None, outputs=None, id_=None, custom_data=None,
name=None, description=None, avg_bitrate=None, max_bitrate=None, min_bitrate=None, ignored_by=None,
stream_conditions_mode=None):
super().__init__(id_=id_, custom_data=custom_data, streams=streams, outputs=outputs,
name=name, description=description, avg_bitrate=avg_bitrate, max_bitrate=max_bitrate,
min_bitrate=min_bitrate, ignored_by=ignored_by, stream_conditions_mode=stream_conditions_mode)
self.segmentLength = segment_length
self.segmentNaming = segment_naming
@classmethod
def parse_from_json_object(cls, json_object):
muxing = super().parse_from_json_object(json_object=json_object)
segment_length = json_object['segmentLength']
segment_naming = json_object.get('segmentNaming')
ts_muxing = TSMuxing(segment_length=segment_length,
segment_naming=segment_naming,
id_=muxing.id,
streams=muxing.streams,
outputs=muxing.outputs,
custom_data=muxing.customData,
name=muxing.name,
description=muxing.description,
ignored_by=muxing.ignored_by,
stream_conditions_mode=muxing.stream_conditions_mode,
max_bitrate=muxing.maxBitrate,
avg_bitrate=muxing.avgBitrate,
min_bitrate=muxing.minBitrate)
return ts_muxing
| unlicense | 85ab286092150fc45985894905ec70e3 | 48.5 | 119 | 0.566779 | 4.335766 | false | false | false | false |
bitmovin/bitmovin-python | bitmovin/services/encodings/fmp4_drm_service.py | 1 | 1189 | from .drm_service import DRMService
from .fairplay_drm_service import FairPlayDRM
from .widevine_drm_service import WidevineDRM
from .playready_drm_service import PlayReadyDRM
from .primetime_drm_service import PrimeTimeDRM
from .marlin_drm_service import MarlinDRM
from .clearkey_drm_service import ClearKeyDRM
from .cenc_drm_service import CENCDRM
class FMP4DRMService(DRMService):
MUXING_TYPE_URL = 'fmp4'
def __init__(self, http_client):
super().__init__(http_client=http_client)
self.FairPlay = FairPlayDRM(http_client=http_client, muxing_type_url=self.MUXING_TYPE_URL)
self.Widevine = WidevineDRM(http_client=http_client, muxing_type_url=self.MUXING_TYPE_URL)
self.PlayReady = PlayReadyDRM(http_client=http_client, muxing_type_url=self.MUXING_TYPE_URL)
self.PrimeTime = PrimeTimeDRM(http_client=http_client, muxing_type_url=self.MUXING_TYPE_URL)
self.Marlin = MarlinDRM(http_client=http_client, muxing_type_url=self.MUXING_TYPE_URL)
self.ClearKey = ClearKeyDRM(http_client=http_client, muxing_type_url=self.MUXING_TYPE_URL)
self.CENC = CENCDRM(http_client=http_client, muxing_type_url=self.MUXING_TYPE_URL)
| unlicense | 0500eb9aa65add848e31a1554efe8b1f | 50.695652 | 100 | 0.751892 | 2.844498 | false | false | false | false |
bitmovin/bitmovin-python | bitmovin/resources/models/encodings/drms/playready_drm.py | 1 | 3411 | from .drm import DRM
from bitmovin.errors import InvalidTypeError
from bitmovin.resources.enums import PlayReadyMethod
from bitmovin.utils import Serializable
from .playready_drm_additional_information import PlayReadyDRMAdditionalInformation
class PlayReadyDRM(DRM, Serializable):
def __init__(self, key_seed=None, kid=None, method=None, la_url=None, outputs=None, id_=None, custom_data=None,
name=None, description=None, key=None, additional_information=None):
super().__init__(id_=id_, custom_data=custom_data, outputs=outputs, name=name, description=description)
self._method = None
self.method = method
self.keySeed = key_seed
self.kid = kid
self.laUrl = la_url
self.key = key
self._additional_information = None
if additional_information is not None:
self.additionalInformation = additional_information
@property
def method(self):
return self._method
@method.setter
def method(self, value):
if value is None:
self._method = None
return
if isinstance(value, str):
self._method = value
elif isinstance(value, PlayReadyMethod):
self._method = value.value
else:
raise InvalidTypeError(
'Invalid type {} for method: must be either str or PlayReadyMethod!'.format(type(value)))
@property
def additionalInformation(self):
return self._additional_information
@additionalInformation.setter
def additionalInformation(self, new_additional_information):
if new_additional_information is None:
self._additional_information = None
elif isinstance(new_additional_information, PlayReadyDRMAdditionalInformation):
self._additional_information = new_additional_information
else:
raise InvalidTypeError('Invalid type {} for playReady: must be a PlayReadyDRMAdditionalInformation!'.format(
type(new_additional_information))
)
@classmethod
def parse_from_json_object(cls, json_object):
drm = super().parse_from_json_object(json_object=json_object)
id_ = drm.id
custom_data = drm.customData
outputs = drm.outputs
name = drm.name
description = drm.description
method = json_object.get('method')
key_seed = json_object.get('keySeed')
kid = json_object.get('kid')
la_url = json_object.get('laUrl')
key = json_object.get('key')
additional_information = None
if json_object.get('additionalInformation') is not None:
additional_information = PlayReadyDRMAdditionalInformation.parse_from_json_object(
json_object.get('additionalInformation'))
playready_drm = PlayReadyDRM(key_seed=key_seed, kid=kid, method=method, la_url=la_url,
outputs=outputs, id_=id_, custom_data=custom_data,
name=name, description=description, key=key,
additional_information=additional_information)
return playready_drm
def serialize(self):
serialized = super().serialize()
serialized['method'] = self._method
serialized['additionalInformation'] = self.additionalInformation
return serialized
| unlicense | 5d397b2f493f507968ade6ca9316c92e | 37.761364 | 120 | 0.639109 | 4.395619 | false | false | false | false |
bitmovin/bitmovin-python | tests/bitmovin/services/manifests/smooth/mp4_representation_tests.py | 1 | 13675 | import unittest
import uuid
from bitmovin import Bitmovin, ACLEntry, ACLPermission, EncodingOutput, Encoding, \
Stream, StreamInput, MuxingStream, MP4Muxing, AbstractMP4Representation, SmoothManifest, MP4Representation
from tests.bitmovin import BitmovinTestCase
class MP4RepresentationTests(BitmovinTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
def setUp(self):
super().setUp()
self.bitmovin = Bitmovin(self.api_key)
self.assertIsNotNone(self.bitmovin)
self.assertTrue(isinstance(self.bitmovin, Bitmovin))
self.sampleEncoding = self._create_sample_encoding() # type: Encoding
(self.sampleMuxing, self.sampleStream) = self._create_sample_muxing() # type: MP4Muxing
def tearDown(self):
super().tearDown()
def test_add_mp4_representation(self):
sample_manifest = self._get_sample_manifest()
manifest_resource_response = self.bitmovin.manifests.Smooth.create(sample_manifest)
self.assertIsNotNone(manifest_resource_response)
self.assertIsNotNone(manifest_resource_response.resource)
self.assertIsNotNone(manifest_resource_response.resource.id)
self._compare_manifests(sample_manifest, manifest_resource_response.resource)
sample_mp4_representation = self._get_sample_mp4_representation()
mp4_representation_response = self.bitmovin.manifests.Smooth.MP4Representation.create(
object_=sample_mp4_representation, manifest_id=manifest_resource_response.resource.id)
self.assertIsNotNone(mp4_representation_response)
self.assertIsNotNone(mp4_representation_response.resource)
self.assertIsNotNone(mp4_representation_response.resource.id)
self._compare_mp4_representations(sample_mp4_representation, mp4_representation_response.resource)
def test_list_mp4_representation(self):
sample_manifest = self._get_sample_manifest()
manifest_resource_response = self.bitmovin.manifests.Smooth.create(sample_manifest)
self.assertIsNotNone(manifest_resource_response)
self.assertIsNotNone(manifest_resource_response.resource)
self.assertIsNotNone(manifest_resource_response.resource.id)
self._compare_manifests(sample_manifest, manifest_resource_response.resource)
sample_mp4_representation = self._get_sample_mp4_representation()
mp4_representation_response = self.bitmovin.manifests.Smooth.MP4Representation.create(
object_=sample_mp4_representation, manifest_id=manifest_resource_response.resource.id)
self.assertIsNotNone(mp4_representation_response)
self.assertIsNotNone(mp4_representation_response.resource)
self.assertIsNotNone(mp4_representation_response.resource.id)
self._compare_mp4_representations(sample_mp4_representation, mp4_representation_response.resource)
list_representation_resource_response = self.bitmovin.manifests.Smooth.MP4Representation.list(
manifest_id=manifest_resource_response.resource.id,
limit=1
)
self.assertIsNotNone(list_representation_resource_response)
self.assertTrue(isinstance(list_representation_resource_response.resource, list))
self.assertEqual(1, len(list_representation_resource_response.resource))
def test_retrieve_mp4_representation(self):
sample_manifest = self._get_sample_manifest()
manifest_resource_response = self.bitmovin.manifests.Smooth.create(sample_manifest)
self.assertIsNotNone(manifest_resource_response)
self.assertIsNotNone(manifest_resource_response.resource)
self.assertIsNotNone(manifest_resource_response.resource.id)
self._compare_manifests(sample_manifest, manifest_resource_response.resource)
sample_mp4_representation = self._get_sample_mp4_representation()
mp4_representation_response = self.bitmovin.manifests.Smooth.MP4Representation.create(
object_=sample_mp4_representation, manifest_id=manifest_resource_response.resource.id)
self.assertIsNotNone(mp4_representation_response)
self.assertIsNotNone(mp4_representation_response.resource)
self.assertIsNotNone(mp4_representation_response.resource.id)
self._compare_mp4_representations(sample_mp4_representation, mp4_representation_response.resource)
retrieve_representation_resource_response = self.bitmovin.manifests.Smooth.MP4Representation.retrieve(
manifest_id=manifest_resource_response.resource.id,
representation_id=mp4_representation_response.resource.id
)
self.assertIsNotNone(retrieve_representation_resource_response)
self.assertTrue(isinstance(retrieve_representation_resource_response.resource, MP4Representation))
self._compare_mp4_representations(sample_mp4_representation, retrieve_representation_resource_response.resource)
def test_delete_mp4_representation(self):
sample_manifest = self._get_sample_manifest()
manifest_resource_response = self.bitmovin.manifests.Smooth.create(sample_manifest)
self.assertIsNotNone(manifest_resource_response)
self.assertIsNotNone(manifest_resource_response.resource)
self.assertIsNotNone(manifest_resource_response.resource.id)
self._compare_manifests(sample_manifest, manifest_resource_response.resource)
sample_mp4_representation = self._get_sample_mp4_representation()
mp4_representation_response = self.bitmovin.manifests.Smooth.MP4Representation.create(
object_=sample_mp4_representation, manifest_id=manifest_resource_response.resource.id)
self.assertIsNotNone(mp4_representation_response)
self.assertIsNotNone(mp4_representation_response.resource)
self.assertIsNotNone(mp4_representation_response.resource.id)
self._compare_mp4_representations(sample_mp4_representation, mp4_representation_response.resource)
delete_sample_mp4_representation_resource_response = self.bitmovin.manifests.Smooth.MP4Representation.delete(
manifest_id=manifest_resource_response.resource.id,
representation_id=mp4_representation_response.resource.id
)
self.assertIsNotNone(delete_sample_mp4_representation_resource_response)
self.assertIsNotNone(delete_sample_mp4_representation_resource_response.resource)
self.assertIsNotNone(delete_sample_mp4_representation_resource_response.resource.id)
self.assertEqual(mp4_representation_response.resource.id,
delete_sample_mp4_representation_resource_response.resource.id)
def _compare_manifests(self, first: SmoothManifest, second: SmoothManifest):
self.assertEqual(first.serverManifestName, second.serverManifestName)
self.assertEqual(first.clientManifestName, second.clientManifestName)
self.assertEqual(len(first.outputs), len(second.outputs))
self.assertEqual(first.name, second.name)
self.assertEqual(first.description, second.description)
return True
def _compare_encodings(self, first: Encoding, second: Encoding):
self.assertEqual(first.name, second.name)
self.assertEqual(first.description, second.description)
self.assertEqual(first.encoderVersion, second.encoderVersion)
self.assertEqual(first.cloudRegion, second.cloudRegion)
return True
def _compare_muxings(self, first: MP4Muxing, second: MP4Muxing):
self.assertEqual(len(first.outputs), len(second.outputs))
self.assertEqual(first.fragmentDuration, second.fragmentDuration)
self.assertEqual(first.filename, second.filename)
self.assertEqual(first.name, second.name)
self.assertEqual(second.description, second.description)
return True
def _compare_mp4_representations(self, first: AbstractMP4Representation, second: AbstractMP4Representation):
self.assertEqual(first.encodingId, second.encodingId)
self.assertEqual(first.muxingId, second.muxingId)
self.assertEqual(first.mediaFile, second.mediaFile)
self.assertEqual(first.language, second.language)
self.assertEqual(first.trackName, second.trackName)
return True
def _get_sample_manifest(self):
encoding_output = self._get_sample_encoding_output()
manifest = SmoothManifest(server_manifest_name='bitmovin-python_Sample_Smooth_Manifest.ism',
client_manifest_name='bitmovin-python_Sample_Smooth_Manifest.ismc',
outputs=[encoding_output],
name='Sample Smooth Manifest')
self.assertIsNotNone(manifest)
self.assertIsNotNone(manifest.serverManifestName)
self.assertIsNotNone(manifest.clientManifestName)
self.assertIsNotNone(manifest.outputs)
return manifest
def _get_sample_mp4_representation(self):
encoding_id = self.sampleEncoding.id
muxing_id = self.sampleMuxing.id
media_file = 'myrendition.ismv'
language = 'some_language'
track_name = 'some_track'
mp4_representation = MP4Representation(encoding_id=encoding_id,
muxing_id=muxing_id,
media_file=media_file,
language=language,
track_name=track_name)
return mp4_representation
def _get_sample_encoding_output(self):
acl_entry = ACLEntry(scope='string', permission=ACLPermission.PUBLIC_READ)
sample_output = self.utils.get_sample_s3_output()
s3_output = self.bitmovin.outputs.S3.create(sample_output)
encoding_output = EncodingOutput(output_id=s3_output.resource.id,
output_path='/bitmovin-python/StreamTests/' + str(uuid.uuid4()),
acl=[acl_entry])
return encoding_output
def _get_sample_muxing(self):
stream = self._get_sample_stream()
create_stream_response = self.bitmovin.encodings.Stream.create(object_=stream,
encoding_id=self.sampleEncoding.id)
self.assertIsNotNone(create_stream_response)
self.assertIsNotNone(create_stream_response.resource)
self.assertIsNotNone(create_stream_response.resource.id)
muxing_stream = MuxingStream(stream_id=create_stream_response.resource.id)
muxing = MP4Muxing(streams=[muxing_stream],
filename="myrendition.ismv",
fragment_duration=4000,
outputs=stream.outputs,
name='Sample MP4 Muxing')
return muxing, create_stream_response.resource
def _get_sample_stream(self):
sample_codec_configuration = self.utils.get_sample_h264_codec_configuration()
h264_codec_configuration = self.bitmovin.codecConfigurations.H264.create(sample_codec_configuration)
(sample_input, sample_files) = self.utils.get_sample_s3_input()
s3_input = self.bitmovin.inputs.S3.create(sample_input)
stream_input = StreamInput(input_id=s3_input.resource.id,
input_path=sample_files.get('854b9c98-17b9-49ed-b75c-3b912730bfd1'),
selection_mode='AUTO')
acl_entry = ACLEntry(scope='string', permission=ACLPermission.PUBLIC_READ)
sample_output = self.utils.get_sample_s3_output()
s3_output = self.bitmovin.outputs.S3.create(sample_output)
encoding_output = EncodingOutput(output_id=s3_output.resource.id,
output_path='/bitmovin-python/StreamTests/'+str(uuid.uuid4()),
acl=[acl_entry])
stream = Stream(codec_configuration_id=h264_codec_configuration.resource.id,
input_streams=[stream_input],
outputs=[encoding_output],
name='Sample Stream')
self.assertIsNotNone(stream.codecConfigId)
self.assertIsNotNone(stream.inputStreams)
self.assertIsNotNone(stream.outputs)
return stream
def _create_sample_encoding(self):
sample_encoding = self.utils.get_sample_encoding()
encoding_resource_response = self.bitmovin.encodings.Encoding.create(sample_encoding)
self.assertIsNotNone(encoding_resource_response)
self.assertIsNotNone(encoding_resource_response.resource)
self.assertIsNotNone(encoding_resource_response.resource.id)
self._compare_encodings(sample_encoding, encoding_resource_response.resource)
return encoding_resource_response.resource
def _create_sample_muxing(self):
(sample_muxing, created_stream) = self._get_sample_muxing()
muxing_resource_response = self.bitmovin.encodings.Muxing.MP4.create(object_=sample_muxing,
encoding_id=self.sampleEncoding.id)
self.assertIsNotNone(muxing_resource_response)
self.assertIsNotNone(muxing_resource_response.resource)
self.assertIsNotNone(muxing_resource_response.resource.id)
self._compare_muxings(sample_muxing, muxing_resource_response.resource)
return muxing_resource_response.resource, created_stream
if __name__ == '__main__':
unittest.main()
| unlicense | b0f40b755a3295950ad646e7f1f59845 | 51.194656 | 120 | 0.690311 | 4.258798 | false | false | false | false |
bitmovin/bitmovin-python | bitmovin/resources/models/encodings/muxings/webm_muxing.py | 1 | 1711 | from .muxing import Muxing
class WebMMuxing(Muxing):
def __init__(self, streams, segment_length, segment_naming=None, init_segment_name=None, outputs=None,
id_=None, custom_data=None, name=None, description=None, ignored_by=None, stream_conditions_mode=None):
super().__init__(id_=id_, custom_data=custom_data, streams=streams, outputs=outputs,
name=name, description=description, ignored_by=ignored_by,
stream_conditions_mode=stream_conditions_mode)
self.segmentLength = segment_length
self.segmentNaming = segment_naming
self.initSegmentName = init_segment_name
@classmethod
def parse_from_json_object(cls, json_object):
muxing = super().parse_from_json_object(json_object=json_object)
segment_length = json_object.get('segmentLength')
segment_naming = json_object.get('segmentNaming')
init_segment_name = json_object.get('initSegmentName')
webm_muxing = WebMMuxing(segment_length=segment_length,
segment_naming=segment_naming,
init_segment_name=init_segment_name,
id_=muxing.id,
streams=muxing.streams,
outputs=muxing.outputs,
custom_data=muxing.customData,
name=muxing.name,
description=muxing.description,
ignored_by=muxing.ignored_by,
stream_conditions_mode=muxing.stream_conditions_mode)
return webm_muxing
| unlicense | fd86792814b75fd8055db72e0e889e73 | 46.527778 | 120 | 0.565167 | 4.432642 | false | false | false | false |
bitmovin/bitmovin-python | bitmovin/resources/models/filters/audio_mix_source_channel.py | 1 | 1514 | from bitmovin.errors import InvalidTypeError
from bitmovin.resources.enums import AudioMixFilterChannelType
from bitmovin.utils import Serializable
class AudioMixSourceChannel(Serializable):
def __init__(self, channel_type, channel_number, gain=1.0):
super().__init__()
self._channel_type = None
self.channelNumber = channel_number
self.gain = gain
self.channel_type = channel_type
@property
def channel_type(self):
return self._channel_type
@channel_type.setter
def channel_type(self, new_value):
if new_value is None:
return
if isinstance(new_value, str):
self._channel_type = new_value
elif isinstance(new_value, AudioMixFilterChannelType):
self._channel_type = new_value.value
else:
raise InvalidTypeError(
'Invalid type {} for channelType: must be either str or AudioMixFilterChannelType!'.format(
type(new_value)))
def serialize(self):
serialized = super().serialize()
serialized['type'] = self.channel_type
return serialized
@classmethod
def parse_from_json_object(cls, json_object):
channel_type = json_object.get('type')
channel_number = json_object.get('channelNumber')
gain = json_object.get('gain')
audio_mix_channel = AudioMixSourceChannel(channel_type=channel_type, channel_number=channel_number, gain=gain)
return audio_mix_channel
| unlicense | 49fabcf70c2a61f11f89c67d67e5faa1 | 34.209302 | 118 | 0.648613 | 4.21727 | false | false | false | false |
bitmovin/bitmovin-python | bitmovin/services/manifests/smooth_content_protection_service.py | 1 | 1726 | from bitmovin.resources import SmoothContentProtection
from bitmovin.errors import MissingArgumentError, FunctionalityNotAvailableError
from bitmovin.services.rest_service import RestService
class SmoothContentProtectionService(RestService):
BASE_ENDPOINT_URL = 'encoding/manifests/smooth/{manifest_id}/contentprotection'
def __init__(self, http_client):
self.resource_class = SmoothContentProtection
super().__init__(http_client=http_client, relative_url=self.BASE_ENDPOINT_URL, class_=self.resource_class)
def _get_endpoint_url(self, manifest_id):
if not manifest_id:
raise MissingArgumentError('manifest_id must be given')
endpoint_url = self.BASE_ENDPOINT_URL.replace('{manifest_id}', manifest_id)
return endpoint_url
def create(self, object_, manifest_id):
self.relative_url = self._get_endpoint_url(manifest_id=manifest_id)
return super().create(object_)
def delete(self, manifest_id, protection_id):
self.relative_url = self._get_endpoint_url(manifest_id=manifest_id)
return super().delete(id_=protection_id)
def retrieve(self, manifest_id, protection_id):
self.relative_url = self._get_endpoint_url(manifest_id=manifest_id)
return super().retrieve(id_=protection_id)
def list(self, manifest_id, offset=None, limit=None):
self.relative_url = self._get_endpoint_url(manifest_id=manifest_id)
return super().list(offset, limit)
def retrieve_custom_data(self, manifest_id, protection_id):
raise FunctionalityNotAvailableError('Retrieve Custom Data is not available for Smooth Manifest ' +
'Content Protection')
| unlicense | 60a66bed0737dca66eb302981a943037 | 43.25641 | 114 | 0.698146 | 4.013953 | false | false | false | false |
bitmovin/bitmovin-python | tests/bitmovin/services/outputs/sftp_output_service_tests.py | 1 | 6876 | import unittest
import json
from bitmovin import Bitmovin, Response, SFTPOutput, FTPTransferVersion
from bitmovin.errors import BitmovinApiError
from tests.bitmovin import BitmovinTestCase
class SFTPOutputTests(BitmovinTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
def setUp(self):
super().setUp()
self.bitmovin = Bitmovin(self.api_key)
self.assertIsNotNone(self.bitmovin)
self.assertTrue(isinstance(self.bitmovin, Bitmovin))
def tearDown(self):
super().tearDown()
def test_create_sftp_output(self):
sample_output = self._get_sample_sftp_output()
output_resource_response = self.bitmovin.outputs.SFTP.create(sample_output)
self.assertIsNotNone(output_resource_response)
self.assertIsNotNone(output_resource_response.resource)
self.assertIsNotNone(output_resource_response.resource.id)
self._compare_sftp_outputs(sample_output, output_resource_response.resource)
def test_create_sftp_output_without_name(self):
sample_output = self._get_sample_sftp_output()
sample_output.name = None
output_resource_response = self.bitmovin.outputs.SFTP.create(sample_output)
self.assertIsNotNone(output_resource_response)
self.assertIsNotNone(output_resource_response.resource)
self.assertIsNotNone(output_resource_response.resource.id)
self._compare_sftp_outputs(sample_output, output_resource_response.resource)
def test_create_sftp_output_custom(self):
sample_output = self._get_sample_sftp_output()
sample_output.port = 9921
output_resource_response = self.bitmovin.outputs.SFTP.create(sample_output)
self.assertIsNotNone(output_resource_response)
self.assertIsNotNone(output_resource_response.resource)
self.assertIsNotNone(output_resource_response.resource.id)
self._compare_sftp_outputs(sample_output, output_resource_response.resource)
self.assertEqual(sample_output.port, output_resource_response.resource.port)
def test_retrieve_sftp_output(self):
sample_output = self._get_sample_sftp_output()
created_output_response = self.bitmovin.outputs.SFTP.create(sample_output)
self.assertIsNotNone(created_output_response)
self.assertIsNotNone(created_output_response.resource)
self.assertIsNotNone(created_output_response.resource.id)
self._compare_sftp_outputs(sample_output, created_output_response.resource)
retrieved_output_response = self.bitmovin.outputs.SFTP.retrieve(created_output_response.resource.id)
self.assertIsNotNone(retrieved_output_response)
self.assertIsNotNone(retrieved_output_response.resource)
self._compare_sftp_outputs(created_output_response.resource, retrieved_output_response.resource)
def test_delete_sftp_output(self):
sample_output = self._get_sample_sftp_output()
created_output_response = self.bitmovin.outputs.SFTP.create(sample_output)
self.assertIsNotNone(created_output_response)
self.assertIsNotNone(created_output_response.resource)
self.assertIsNotNone(created_output_response.resource.id)
self._compare_sftp_outputs(sample_output, created_output_response.resource)
deleted_minimal_resource = self.bitmovin.outputs.SFTP.delete(created_output_response.resource.id)
self.assertIsNotNone(deleted_minimal_resource)
self.assertIsNotNone(deleted_minimal_resource.resource)
self.assertIsNotNone(deleted_minimal_resource.resource.id)
try:
self.bitmovin.outputs.SFTP.retrieve(created_output_response.resource.id)
self.fail(
'Previous statement should have thrown an exception. ' +
'Retrieving output after deleting it shouldn\'t be possible.'
)
except BitmovinApiError:
pass
def test_list_sftp_outputs(self):
sample_output = self._get_sample_sftp_output()
created_output_response = self.bitmovin.outputs.SFTP.create(sample_output)
self.assertIsNotNone(created_output_response)
self.assertIsNotNone(created_output_response.resource)
self.assertIsNotNone(created_output_response.resource.id)
self._compare_sftp_outputs(sample_output, created_output_response.resource)
outputs = self.bitmovin.outputs.SFTP.list()
self.assertIsNotNone(outputs)
self.assertIsNotNone(outputs.resource)
self.assertIsNotNone(outputs.response)
self.assertIsInstance(outputs.resource, list)
self.assertIsInstance(outputs.response, Response)
self.assertGreater(outputs.resource.__sizeof__(), 1)
def test_retrieve_sftp_output_custom_data(self):
sample_output = self._get_sample_sftp_output()
sample_output.customData = '<pre>my custom data</pre>'
created_output_response = self.bitmovin.outputs.SFTP.create(sample_output)
self.assertIsNotNone(created_output_response)
self.assertIsNotNone(created_output_response.resource)
self.assertIsNotNone(created_output_response.resource.id)
self._compare_sftp_outputs(sample_output, created_output_response.resource)
custom_data_response = self.bitmovin.outputs.SFTP.retrieve_custom_data(created_output_response.resource.id)
custom_data = custom_data_response.resource
self.assertEqual(sample_output.customData, json.loads(custom_data.customData))
def _compare_sftp_outputs(self, first: SFTPOutput, second: SFTPOutput):
"""
:param first: SFTPOutput
:param second: SFTPOutput
:return: bool
"""
self.assertEqual(first.host, second.host)
self.assertEqual(first.name, second.name)
self.assertEqual(first.description, second.description)
self.assertEqual(first.maxConcurrentConnections, second.maxConcurrentConnections)
self.assertEqual(first.transferVersion, second.transferVersion)
def _get_sample_sftp_output(self):
sftp_output_settings = self.settings.get('sampleObjects').get('outputs').get('sftp')\
.get('1b5110d3-8ed3-438d-a8cb-b12cb8b142ca')
sftp_output = SFTPOutput(
host=sftp_output_settings.get('host'),
username=sftp_output_settings.get('username'),
password=sftp_output_settings.get('password'),
name='Sample SFTP Output',
max_concurrent_connections=35,
transfer_version=FTPTransferVersion.V1_1_0
)
self.assertIsNotNone(sftp_output.host)
self.assertIsNotNone(sftp_output.username)
self.assertIsNotNone(sftp_output.password)
return sftp_output
if __name__ == '__main__':
unittest.main()
| unlicense | 0b5986a0fafc44393ad0122261b60893 | 45.147651 | 115 | 0.705934 | 4.02812 | false | true | false | false |
bitmovin/bitmovin-python | bitmovin/resources/response_error_data_detail.py | 1 | 1462 | from bitmovin.errors import InvalidTypeError
from .resource import Resource
from .link import Link
class ResponseErrorDataDetail(Resource):
def __init__(self, timestamp, type_, text, field, links):
super().__init__()
self._links = None # to suppress variable declared outside of __init__ warning
self.timestamp = timestamp
self.type = type_
self.text = text
self.field = field
self.links = links
@classmethod
def parse_from_json_object(cls, json_object):
timestamp = json_object.get('timestamp')
type_ = json_object.get('type')
text = json_object.get('text')
field = json_object.get('field')
links = json_object.get('links')
detail = ResponseErrorDataDetail(timestamp=timestamp, type_=type_, text=text, field=field, links=links)
return detail
@property
def links(self):
return self._links
@links.setter
def links(self, new_links):
if new_links is None:
return
if not isinstance(new_links, list):
raise InvalidTypeError('links has to be a list of Link objects')
if all(isinstance(link, Link) for link in new_links):
self._links = new_links
else:
links = []
for json_link in new_links:
link = Link.parse_from_json_object(json_link)
links.append(link)
self._links = links
| unlicense | 16bfbe2be6bd9e23c55b3a70b6761f57 | 30.782609 | 111 | 0.599863 | 4.225434 | false | false | false | false |
bitmovin/bitmovin-python | bitmovin/resources/models/manifests/smooth/mp4_representation.py | 1 | 1420 | from .abstract_mp4_representation import AbstractMP4Representation
class MP4Representation(AbstractMP4Representation):
def __init__(self, encoding_id, muxing_id, media_file, language=None, track_name=None, id_=None, custom_data=None):
super().__init__(id_=id_, custom_data=custom_data, encoding_id=encoding_id, muxing_id=muxing_id,
media_file=media_file, language=language, track_name=track_name)
@classmethod
def parse_from_json_object(cls, json_object):
representation = AbstractMP4Representation.parse_from_json_object(json_object=json_object)
id_ = representation.id
custom_data = representation.customData
encoding_id = representation.encodingId
muxing_id = representation.muxingId
media_file = representation.mediaFile
language = representation.language
track_name = representation.trackName
mp4_representation = MP4Representation(id_=id_,
custom_data=custom_data,
encoding_id=encoding_id,
muxing_id=muxing_id,
media_file=media_file,
language=language,
track_name=track_name)
return mp4_representation
| unlicense | ba16789692ce01f6c8dc8bf1653a84ea | 49.714286 | 119 | 0.573239 | 4.781145 | false | false | false | false |
bitmovin/bitmovin-python | bitmovin/resources/models/manifests/hls/audio_media.py | 1 | 2318 | from .abstract_standard_media import AbstractStandardMedia
class AudioMedia(AbstractStandardMedia):
def __init__(self, name, group_id, segment_path, encoding_id, stream_id, muxing_id, drm_id=None,
start_segment_number=None, end_segment_number=None, language=None, assoc_language=None,
is_default=None, autoselect=None, characteristics=None, uri=None, id_=None):
super().__init__(id_=id_, name=name, group_id=group_id, language=language, assoc_language=assoc_language,
is_default=is_default, autoselect=autoselect, characteristics=characteristics,
segment_path=segment_path, encoding_id=encoding_id, stream_id=stream_id, muxing_id=muxing_id,
drm_id=drm_id, start_segment_number=start_segment_number,
end_segment_number=end_segment_number)
self.uri = uri
@classmethod
def parse_from_json_object(cls, json_object):
media = super().parse_from_json_object(json_object=json_object)
id_ = media.id
name = media.name
group_id = media.groupId
language = media.language
assoc_language = media.assocLanguage
is_default = media.isDefault
autoselect = media.autoselect
characteristics = media.characteristics
segment_path = media.segmentPath
encoding_id = media.encodingId
stream_id = media.streamId
muxing_id = media.muxingId
drm_id = media.drmId
start_segment_number = media.startSegmentNumber
end_segment_number = media.endSegmentNumber
uri = json_object.get('uri')
audio_media = AudioMedia(id_=id_, name=name, group_id=group_id, language=language,
assoc_language=assoc_language,
is_default=is_default, autoselect=autoselect,
characteristics=characteristics, segment_path=segment_path,
encoding_id=encoding_id, stream_id=stream_id,
muxing_id=muxing_id, drm_id=drm_id,
start_segment_number=start_segment_number,
end_segment_number=end_segment_number, uri=uri)
return audio_media
| unlicense | 423c33f9e7f19a4193ef162a905ead74 | 51.681818 | 118 | 0.603969 | 4.052448 | false | false | false | false |
bitmovin/bitmovin-python | bitmovin/services/outputs/output_service.py | 1 | 1123 | from bitmovin.bitmovin_object import BitmovinObject
from .s3_output_service import S3
from .gcs_output_service import GCS
from .akamai_netstorage_output_service import AkamaiNetStorage
from .azure_output_service import Azure
from .ftp_output_service import FTP
from .sftp_output_service import SFTP
from .generic_s3_output_service import GenericS3
from .local_output_service import Local
from .s3_role_based_output_service import S3RoleBased
class OutputService(BitmovinObject):
def __init__(self, http_client):
super().__init__()
self.http_client = http_client
self.S3 = S3(http_client=self.http_client)
self.GCS = GCS(http_client=self.http_client)
self.AkamaiNetStorage = AkamaiNetStorage(http_client=self.http_client)
self.Azure = Azure(http_client=self.http_client)
self.FTP = FTP(http_client=self.http_client)
self.SFTP = SFTP(http_client=self.http_client)
self.GenericS3 = GenericS3(http_client=self.http_client)
self.Local = Local(http_client=self.http_client)
self.S3RoleBased = S3RoleBased(http_client=self.http_client)
| unlicense | 3ea787cb179a04a4b05fb6e60f4659c9 | 43.92 | 78 | 0.732858 | 3.217765 | false | false | false | false |
mozilla/telemetry-analysis-service | atmo/forms/widgets.py | 1 | 1474 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
from django import forms
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
from .cache import CachedFileCache
class CachedFileHiddenInput(forms.HiddenInput):
template_with_cachekey = """
<div class="alert alert-info">
<span class="glyphicon glyphicon-exclamation-sign" aria-hidden="true"></span>
<strong>Just uploaded file:</strong> %(file_name)s
<p class="help-block">This file will be used when the form is successfully submitted</p>
</div>
%(cachekey_field)s
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cache = CachedFileCache()
def render(self, name, value, attrs=None):
# render the hidden input first
cachekey_field = super().render(name, value, attrs)
# check if there is a cached file
metadata = self.cache.metadata(value)
if metadata is None:
# if not, just return the hidden input
return cachekey_field
# or render the additional cached file
return mark_safe(
self.template_with_cachekey
% {
"file_name": conditional_escape(metadata["name"]),
"cachekey_field": cachekey_field,
}
)
| mpl-2.0 | 4785f772564f632d46fdc2f690a0da83 | 34.095238 | 92 | 0.645862 | 3.973046 | false | false | false | false |
mozilla/telemetry-analysis-service | atmo/jobs/migrations/0002_auto_20161017_0913.py | 1 | 1301 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-10-17 09:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("jobs", "0001_initial")]
operations = [
migrations.AlterField(
model_name="sparkjob",
name="interval_in_hours",
field=models.IntegerField(
choices=[(24, "Daily"), (168, "Weekly"), (720, "Monthly")],
default=24,
help_text="Interval at which the job should run, in hours.",
),
),
migrations.AlterField(
model_name="sparkjob",
name="result_visibility",
field=models.CharField(
choices=[
(
"private",
"Private: results output to an S3 bucket, viewable with AWS credentials",
),
(
"public",
"Public: results output to a public S3 bucket, viewable by anyone",
),
],
default="private",
help_text="Whether notebook results are uploaded to a public or private bucket",
max_length=50,
),
),
]
| mpl-2.0 | e97ef9dc3a9578108c9d180e4191759f | 32.358974 | 97 | 0.470407 | 5.062257 | false | false | false | false |
mozilla/telemetry-analysis-service | tests/clusters/test_tasks.py | 1 | 8245 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
from datetime import timedelta
from django.conf import settings
from atmo.clusters import models, tasks
def test_deactivate_clusters(mocker, one_hour_ago, cluster_factory):
cluster = cluster_factory(
expires_at=one_hour_ago, most_recent_status=models.Cluster.STATUS_WAITING
)
deactivate = mocker.patch("atmo.clusters.models.Cluster.deactivate")
result = tasks.deactivate_clusters()
assert deactivate.call_count == 1
assert result == [[cluster.identifier, cluster.pk]]
def test_dont_deactivate_clusters(mocker, one_hour_ahead, cluster_factory):
cluster_factory(
expires_at=one_hour_ahead, most_recent_status=models.Cluster.STATUS_WAITING
)
deactivate = mocker.patch("atmo.clusters.models.Cluster.deactivate")
result = tasks.deactivate_clusters()
assert deactivate.call_count == 0
assert result == []
def test_extended_cluster_does_not_deactiveate(mocker, one_hour_ago, cluster_factory):
cluster = cluster_factory(
expires_at=one_hour_ago, most_recent_status=models.Cluster.STATUS_WAITING
)
cluster.extend(2)
deactivate = mocker.patch("atmo.clusters.models.Cluster.deactivate")
result = tasks.deactivate_clusters()
assert deactivate.call_count == 0
assert result == []
def test_send_expiration_mails(mailoutbox, mocker, now, cluster_factory):
cluster = cluster_factory(
expires_at=now + timedelta(minutes=59), # 1 hours is the cut-off
most_recent_status=models.Cluster.STATUS_WAITING,
)
assert len(mailoutbox) == 0
tasks.send_expiration_mails()
assert len(mailoutbox) == 1
message = mailoutbox[0]
assert message.subject == (
"%sCluster %s is expiring soon!"
% (settings.EMAIL_SUBJECT_PREFIX, cluster.identifier)
)
assert message.from_email == settings.DEFAULT_FROM_EMAIL
assert list(message.to) == [cluster.created_by.email]
cluster.refresh_from_db()
assert cluster.expiration_mail_sent
def test_update_master_address_success(cluster, mocker):
public_dns = "example.com"
mocker.patch(
"atmo.clusters.models.Cluster.info",
new_callable=mocker.PropertyMock,
return_value={"public_dns": public_dns},
)
result = tasks.update_master_address(cluster.pk)
assert result == public_dns
def test_update_master_address_noop(cluster_factory, mocker):
public_dns = "example.com"
cluster = cluster_factory(master_address=public_dns)
mocker.patch(
"atmo.clusters.models.Cluster.info",
new_callable=mocker.PropertyMock,
return_value={"public_dns": public_dns},
)
result = tasks.update_master_address(cluster.pk)
assert result is None
result = tasks.update_master_address(cluster.pk, force=True)
assert result == public_dns
def test_update_master_address_empty(cluster, mocker):
mocker.patch(
"atmo.clusters.models.Cluster.info",
new_callable=mocker.PropertyMock,
return_value={"public_dns": ""},
)
result = tasks.update_master_address(cluster.pk)
assert result is None
def test_update_clusters_empty():
assert tasks.update_clusters() == []
def test_update_clusters(mocker, now, user, cluster_factory):
cluster1 = cluster_factory(
created_by=user,
created_at=now - timedelta(days=1),
most_recent_status=models.Cluster.STATUS_RUNNING,
)
cluster2 = cluster_factory(
created_by=user,
created_at=now - timedelta(days=2),
most_recent_status=models.Cluster.STATUS_RUNNING,
)
cluster3 = cluster_factory(
created_by=user,
created_at=now - timedelta(days=3),
most_recent_status=models.Cluster.STATUS_RUNNING,
)
cluster_provisioner_list = mocker.patch(
"atmo.clusters.provisioners.ClusterProvisioner.list",
return_value=[
{
"jobflow_id": cluster1.jobflow_id,
"state": cluster1.most_recent_status,
"creation_datetime": cluster1.created_at,
"ready_datetime": None,
"end_datetime": None,
"state_change_reason_code": "",
"state_change_reason_message": "",
},
{
"jobflow_id": cluster2.jobflow_id,
"state": cluster2.most_recent_status,
"creation_datetime": cluster2.created_at,
"ready_datetime": now - timedelta(hours=6),
"end_datetime": None,
"state_change_reason_code": "",
"state_change_reason_message": "",
},
{
"jobflow_id": cluster3.jobflow_id,
"state": models.Cluster.STATUS_WAITING,
"creation_datetime": cluster3.created_at,
"ready_datetime": now - timedelta(hours=6),
"end_datetime": now - timedelta(hours=2),
"state_change_reason_code": "",
"state_change_reason_message": "",
},
# the cluster that should be ignored
{
"jobflow_id": "j-some-other-id",
"state": models.Cluster.STATUS_RUNNING,
"creation_datetime": now - timedelta(days=10),
"ready_datetime": None,
"end_datetime": None,
"state_change_reason_code": "",
"state_change_reason_message": "",
},
],
)
cluster_save = mocker.patch("atmo.clusters.models.Cluster.save")
metric_record = mocker.patch("atmo.stats.models.Metric.record")
result = tasks.update_clusters()
cluster_provisioner_list.assert_called_once_with(
created_after=(now - timedelta(days=3)).replace(hour=0, minute=0, second=0)
)
assert cluster_save.call_count == 3
assert result == [cluster1.identifier, cluster2.identifier, cluster3.identifier]
assert metric_record.call_args_list == [
mocker.call(
"cluster-ready",
data={
"identifier": cluster2.identifier,
"jobflow_id": cluster2.jobflow_id,
"size": cluster2.size,
},
),
mocker.call(
"cluster-time-to-ready",
64800,
data={
"identifier": cluster2.identifier,
"jobflow_id": cluster2.jobflow_id,
"size": cluster2.size,
},
),
mocker.call(
"cluster-normalized-instance-hours",
110,
data={
"identifier": cluster3.identifier,
"jobflow_id": cluster3.jobflow_id,
"size": cluster3.size,
},
),
]
def test_extended_cluster_resends_expiration_mail(mailoutbox, mocker, one_hour_ago, cluster_factory):
cluster = cluster_factory(
expires_at=one_hour_ago,
most_recent_status=models.Cluster.STATUS_WAITING,
)
# Send first expiration email
assert len(mailoutbox) == 0
tasks.send_expiration_mails()
assert len(mailoutbox) == 1
message = mailoutbox[0]
assert message.subject == (
'%sCluster %s is expiring soon!' %
(settings.EMAIL_SUBJECT_PREFIX, cluster.identifier)
)
assert message.from_email == settings.DEFAULT_FROM_EMAIL
assert list(message.to) == [cluster.created_by.email]
cluster.refresh_from_db()
assert cluster.expiration_mail_sent
# Extend cluster lifetime
cluster.extend(1)
cluster.refresh_from_db()
assert cluster.expiration_mail_sent is False
# Send second expiration email
tasks.send_expiration_mails()
assert len(mailoutbox) == 2
message = mailoutbox[1]
assert message.subject == (
'%sCluster %s is expiring soon!' %
(settings.EMAIL_SUBJECT_PREFIX, cluster.identifier)
)
assert message.from_email == settings.DEFAULT_FROM_EMAIL
assert list(message.to) == [cluster.created_by.email]
cluster.refresh_from_db()
assert cluster.expiration_mail_sent
| mpl-2.0 | f224945f0f69f033b0ed03799866c71b | 34.085106 | 101 | 0.616616 | 3.858212 | false | false | false | false |
mozilla/telemetry-analysis-service | atmo/clusters/tasks.py | 1 | 5239 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
from datetime import timedelta
import mail_builder
from botocore.exceptions import ClientError
from celery.utils.log import get_task_logger
from django.conf import settings
from django.db import transaction
from django.utils import timezone
from ..celery import celery
from .models import Cluster
from .provisioners import ClusterProvisioner
logger = get_task_logger(__name__)
@celery.task
def deactivate_clusters():
"""Deactivate clusters that have been expired."""
now = timezone.now()
deactivated_clusters = []
for cluster in Cluster.objects.active().filter(expires_at__lte=now):
with transaction.atomic():
deactivated_clusters.append([cluster.identifier, cluster.pk])
# The cluster is expired
logger.info(
"Cluster %s (%s) is expired, deactivating.",
cluster.pk,
cluster.identifier,
)
cluster.deactivate()
return deactivated_clusters
@celery.task
def send_expiration_mails():
"""Send expiration emails an hour before the cluster expires."""
deadline = timezone.now() + timedelta(hours=1)
with transaction.atomic():
soon_expired = (
Cluster.objects.select_for_update()
.active()
.filter(expires_at__lte=deadline, expiration_mail_sent=False)
)
for cluster in soon_expired:
with transaction.atomic():
message = mail_builder.build_message(
"atmo/clusters/mails/expiration.mail",
{"cluster": cluster, "deadline": deadline, "settings": settings},
)
message.send()
cluster.expiration_mail_sent = True
cluster.save()
@celery.task(max_retries=3, bind=True)
def update_master_address(self, cluster_id, force=False):
"""Update the public IP address for the cluster with the given cluster ID"""
try:
cluster = Cluster.objects.get(id=cluster_id)
# quick way out in case this job was called accidently
if cluster.master_address and not force:
return
# first get the cluster info from AWS
info = cluster.info
master_address = info.get("public_dns") or ""
# then store the public IP of the cluster if found in response
if master_address:
cluster.master_address = master_address
cluster.save()
return master_address
except ClientError as exc:
self.retry(exc=exc, countdown=celery.backoff(self.request.retries))
# This task runs every 5 minutes (300 seconds),
# which fits nicely in the backoff decay of 8 tries total
@celery.task(max_retries=7, bind=True)
def update_clusters(self):
"""
Update the cluster metadata from AWS for the pending clusters.
- To be used periodically.
- Won't update state if not needed.
- Will queue updating the Cluster's public IP address if needed.
"""
# only update the cluster info for clusters that are pending
active_clusters = Cluster.objects.active()
# Short-circuit for no active clusters (e.g. on weekends)
if not active_clusters.exists():
return []
# get the start dates of the active clusters, set to the start of the day
# to counteract time differences between atmo and AWS and use the oldest
# start date to limit the ListCluster API call to AWS
oldest_created_at = active_clusters.datetimes("created_at", "day")
try:
# build a mapping between jobflow ID and cluster info
cluster_mapping = {}
provisioner = ClusterProvisioner()
cluster_list = provisioner.list(created_after=oldest_created_at[0])
for cluster_info in cluster_list:
cluster_mapping[cluster_info["jobflow_id"]] = cluster_info
# go through pending clusters and update the state if needed
updated_clusters = []
for cluster in active_clusters:
with transaction.atomic():
info = cluster_mapping.get(cluster.jobflow_id)
# ignore if no info was found for some reason,
# the cluster was deleted in AWS but it wasn't deleted here yet
if info is None:
continue
# update cluster status
cluster.sync(info)
updated_clusters.append(cluster.identifier)
# if not given enqueue a job to update the public IP address
# but only if the cluster is running or waiting, so the
# API call isn't wasted
if (
not cluster.master_address
and cluster.most_recent_status in cluster.READY_STATUS_LIST
):
transaction.on_commit(
lambda: update_master_address.delay(cluster.id)
)
return updated_clusters
except ClientError as exc:
self.retry(exc=exc, countdown=celery.backoff(self.request.retries))
| mpl-2.0 | cd6ec65489208990541a991e56a8c969 | 37.807407 | 85 | 0.628937 | 4.516379 | false | false | false | false |
mitre/multiscanner | multiscanner/modules/Metadata/flarefloss.py | 2 | 2257 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import division, absolute_import, with_statement, print_function, unicode_literals
import os
import subprocess
__author__ = 'Emmanuelle Vargas-Gonzalez'
__license__ = 'MPL 2.0'
TYPE = 'Metadata'
NAME = 'floss'
DEFAULTCONF = {
'ENABLED': False,
'path': '/opt/floss',
'cmdline': [u'--show-metainfo']
}
def check(conf=DEFAULTCONF):
if not conf['ENABLED']:
return False
if os.path.isfile(conf['path']):
return True
else:
return False
def scan(filelist, conf=DEFAULTCONF):
results = []
for fname in filelist:
ret = {}
cmd = _build_command(conf, fname)
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
for f in p.stdout:
f = f.decode('utf-8')
if u'FLOSS static ASCII strings' in f:
_extract_data(p.stdout, ret, 'static_ascii_strings')
elif u'FLOSS static UTF-16 strings' in f:
_extract_data(p.stdout, ret, 'static_utf16_strings')
elif u'stackstrings' in f:
_extract_data(p.stdout, ret, 'stack_strings')
elif u'Vivisect workspace analysis information' in f:
_extract_data(p.stdout, ret, 'vivisect_meta_info')
except subprocess.CalledProcessError as e:
print(e.stderr)
print(e)
if ret:
results.append((fname, ret))
metadata = {}
metadata['Name'] = NAME
metadata['Type'] = TYPE
metadata['Include'] = False
return (results, metadata)
def _build_command(conf, fname):
cmd = [conf['path'], fname]
cmd.extend(conf['cmdline'])
return cmd
def _extract_data(out, ret, key):
"""Sub-routine to extract fragment of console output"""
ret[key] = []
feed = next(out, u'').decode('utf-8')
feed = feed.strip()
while feed != u'':
ret[key].append(feed)
feed = next(out, u'').decode('utf-8')
feed = feed.strip()
if not ret[key]:
del ret[key]
| mpl-2.0 | e4ea1656407a84679cee7ce042995555 | 26.864198 | 98 | 0.576872 | 3.640323 | false | false | false | false |
mitre/multiscanner | multiscanner/modules/Signature/YaraScan.py | 2 | 3515 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import division, absolute_import, with_statement, print_function, unicode_literals
import os
import time
from multiscanner.config import CONFIG
from multiscanner.common.utils import parseDir
__authors__ = "Nick Beede, Drew Bonasera"
__license__ = "MPL 2.0"
TYPE = "Signature"
NAME = "Yara"
DEFAULTCONF = {
"ruledir": os.path.join(os.path.split(CONFIG)[0], "etc", "yarasigs"),
"fileextensions": [".yar", ".yara", ".sig"],
"ignore-tags": ["TLPRED"],
"includes": False,
"ENABLED": True
}
try:
import yara
except ImportError:
print("yara-python module not installed...")
yara = False
def check(conf=DEFAULTCONF):
if not conf['ENABLED']:
return False
if not yara:
return False
return True
def scan(filelist, conf=DEFAULTCONF):
ruleDir = conf["ruledir"]
extlist = conf["fileextensions"]
includes = 'includes' in conf and conf['includes']
ruleset = {}
rules = parseDir(ruleDir, recursive=True)
for r in rules:
for ext in extlist:
if r.endswith(ext):
full_path = os.path.abspath(os.path.join(ruleDir, r))
ruleset[full_path] = full_path
break
# Ran into a weird issue with file locking, this fixes it
goodtogo = False
i = 0
yararules = None
while not goodtogo:
try:
yararules = yara.compile(filepaths=ruleset, includes=includes)
goodtogo = True
except yara.SyntaxError as e:
bad_file = os.path.abspath(str(e).split('(')[0])
if bad_file in ruleset:
del ruleset[bad_file]
print('WARNING: Yara', e)
else:
print('ERROR Yara: Invalid rule in', bad_file, 'but we are unable to remove it from our list. Aborting')
print(e)
return None
matches = []
for m in filelist:
# Ran into a weird issue with file locking, this fixes it
goodtogo = False
i = 0
while not goodtogo and i < 5:
try:
f = open(m, 'rb')
goodtogo = True
except Exception as e:
print('yara:', e)
time.sleep(3)
i += 1
try:
hit = yararules.match(data=f.read())
except Exception as e:
# TODO: log exception
continue
finally:
f.close()
if hit:
hdict = {}
for h in hit:
if not set(h.tags).intersection(set(conf["ignore-tags"])):
hit_dict = {
'meta': h.meta,
'namespace': h.namespace,
'rule': h.rule,
'tags': h.tags,
}
try:
h_key = '{}:{}'.format(hit_dict['namespace'].split('/')[-1], hit_dict['rule'])
except IndexError:
h_key = '{}'.format(hit_dict['rule'])
hdict[h_key] = hit_dict
matches.append((m, hdict))
metadata = {}
rulelist = list(ruleset)
rulelist.sort()
metadata["Name"] = NAME
metadata["Type"] = TYPE
metadata["Rules"] = rulelist
return (matches, metadata)
| mpl-2.0 | 1c728654712c63264e6a5827c40e6eb1 | 29.301724 | 120 | 0.528876 | 3.9099 | false | false | false | false |
mozilla/build-relengapi | relengapi/blueprints/auth/test_auth.py | 3 | 1039 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
import json
from nose.tools import eq_
from relengapi.lib.permissions import p
from relengapi.lib.testing.context import TestContext
test_context = TestContext(reuse_app=True)
@test_context
def test_root(client):
"""The auth root loads a page"""
resp = client.get('/auth/')
eq_(resp.status_code, 200, resp.data)
assert 'You have the following permissions' in resp.data, resp.data
p.test_auth.aaa.doc('test_auth test perm')
@test_context.specialize(perms=[p.test_auth.aaa])
def test_permissions(client):
"""The /permissions API endpoint returns the user's permissions"""
resp = client.get('/auth/permissions')
eq_(resp.status_code, 200, resp.data)
eq_(json.loads(resp.data)['result'], [
{'doc': 'test_auth test perm', 'name': 'test_auth.aaa'},
])
| mpl-2.0 | 9f1961f0829f43d1d6b738c91aa3749a | 28.685714 | 71 | 0.698749 | 3.36246 | false | true | false | false |
mozilla/build-relengapi | relengapi/blueprints/archiver/__init__.py | 1 | 9767 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
import datetime
from random import randint
import sqlalchemy as sa
import structlog
from flask import Blueprint
from flask import current_app
from flask import redirect
from flask import url_for
from relengapi.blueprints.archiver import tables
from relengapi.blueprints.archiver.tasks import TASK_TIME_OUT
from relengapi.blueprints.archiver.tasks import create_and_upload_archive
from relengapi.blueprints.archiver.types import MozharnessArchiveTask
from relengapi.lib import api
from relengapi.lib import badpenny
from relengapi.lib.time import now
bp = Blueprint('archiver', __name__)
logger = structlog.get_logger()
GET_EXPIRES_IN = 300
PENDING_EXPIRES_IN = 60
FINISHED_STATES = ['SUCCESS', 'FAILURE', 'REVOKED']
def delete_tracker(tracker):
session = current_app.db.session(tables.DB_DECLARATIVE_BASE)
logger.info("deleting tracker with id: {}".format(tracker.task_id),
archiver_task=tracker.task_id)
session.delete(tracker)
session.commit()
def update_tracker_state(tracker, state):
session = current_app.db.session(tables.DB_DECLARATIVE_BASE)
logger.info("updating tracker with id: {} to state: {}".format(tracker.id, state),
archiver_task=tracker.task_id, archiver_task_state=state)
try:
tracker.state = state
session.commit()
except sa.exc.IntegrityError:
session.rollback()
@badpenny.periodic_task(seconds=TASK_TIME_OUT)
def cleanup_old_tasks(job_status):
"""delete any tracker task if it is older than the time a task can live for."""
session = current_app.db.session(tables.DB_DECLARATIVE_BASE)
expiry_cutoff = now() - datetime.timedelta(seconds=TASK_TIME_OUT)
table = tables.ArchiverTask
for tracker in session.query(table).order_by(table.created_at):
if tracker.created_at < expiry_cutoff:
delete_tracker(tracker)
else:
break
def renew_tracker_pending_expiry(tracker):
pending_expires_at = now() + datetime.timedelta(seconds=PENDING_EXPIRES_IN)
session = current_app.db.session(tables.DB_DECLARATIVE_BASE)
logger.info("renewing tracker {} with pending expiry: {}".format(
tracker.id, pending_expires_at), archiver_task=tracker.task_id)
tracker.pending_expires_at = pending_expires_at
session.commit()
@bp.route('/status/<task_id>')
@api.apimethod(MozharnessArchiveTask, unicode)
def task_status(task_id):
"""
Check and return the current state of the create_and_upload_archive celery task with task id
of <task_id>.
If the task is unknown, state will be PENDING. Once the task starts it will be updated to
STARTED and finally, if it completes, it will be either SUCCESS (no exceptions), or FAILURE.
See update_state() within create_and_upload_archive and
http://celery.readthedocs.org/en/latest/reference/celery.states.html for more details.
If state is SUCCESS, it is safe to check response['s3_urls'] for the archives submitted to s3
"""
task = create_and_upload_archive.AsyncResult(task_id)
task_tracker = tables.ArchiverTask.query.filter(tables.ArchiverTask.task_id == task_id).first()
log = logger.bind(archiver_task=task_id, archiver_task_state=task.state)
log.info("checking status of task id {}: current state {}".format(task_id, task.state))
task_info = task.info or {}
response = {
'state': task.state,
}
if task.state != 'FAILURE':
response['status'] = task_info.get('status', 'no status available at this point.')
response['src_url'] = task_info.get('src_url', '')
response['s3_urls'] = task_info.get('s3_urls', {})
else:
# something went wrong
response['status'] = str(task.info) # this is the exception raised
response['src_url'] = ''
response['s3_urls'] = {}
# archiver does not create any custom states, so we can assume to have only the defaults:
# http://docs.celeryproject.org/en/latest/userguide/tasks.html#task-states
# therefore, delete our state_id tracker from the db if the celery state is in a final state:
# e.g. not RETRY, STARTED, or PENDING
if task_tracker:
if task.state in FINISHED_STATES:
delete_tracker(task_tracker)
elif task.state == "PENDING" and task_tracker.pending_expires_at < now():
log.info("Task {} has expired from pending too long. Re-creating task".format(task.id))
renew_tracker_pending_expiry(task_tracker) # let exceptions bubble up before moving on
create_and_upload_archive.apply_async(args=[task_tracker.src_url, task_tracker.s3_key],
task_id=task.id)
response['state'] = 'RETRY'
response['status'] = 'Task has expired from pending for too long. Re-creating task.'
elif task_tracker.state != task.state:
update_tracker_state(task_tracker, task.state)
return MozharnessArchiveTask(**response)
@bp.route('/hgmo/<path:repo>/<rev>')
@api.apimethod(None, unicode, unicode, unicode, unicode, unicode, status_code=302)
def get_hgmo_archive(repo, rev, subdir=None, suffix='tar.gz', preferred_region=None):
"""
An archiver for hg.mozilla.org related requests. Uses relengapi.blueprints.archiver.get_archive
:param repo: the repo location off of hg.mozilla.org/
:param rev: the rev associated with the repo
:param subdir: optional subdir path to only archive a portion of the repo
:param suffix: the archive extension type. defaulted to tar.gz
:param preferred_region: the preferred s3 region to use
"""
# allow for the short hash and full hash to be passed
rev = rev[0:12]
src_url = current_app.config['ARCHIVER_HGMO_URL_TEMPLATE'].format(
repo=repo, rev=rev, suffix=suffix, subdir=subdir or ''
)
# though slightly odd to append the archive suffix extension with a subdir, this:
# 1) allows us to have archives based on different subdir locations from the same repo and rev
# 2) is aligned with the hg.mozilla.org format
key = '{repo}-{rev}.{suffix}'.format(repo=repo, rev=rev, suffix=suffix)
if subdir:
key += '/{}'.format(subdir)
return get_archive(src_url, key, preferred_region)
def get_archive(src_url, key, preferred_region):
"""
A generic getter for retrieving an s3 location of an archive where the archive is based off a
src_url.
sub-dir: hg.mozilla.org supports archives of sub directories within a repository. This
flexibility allows for creating archives of only a portion of what would normally be an entire
repo archive.
logic flow:
If their is already a key within s3, a re-direct link is given for the
s3 location. If the key does not exist, download the archive from src url, upload it to s3
for each region supported and return all uploaded s3 url locations.
When the key does not exist, the remaining work will be assigned to a celery background task
with a url location returned immediately for obtaining task state updates.
"""
buckets = current_app.config['ARCHIVER_S3_BUCKETS']
random_region = buckets.keys()[randint(0, len(buckets.keys()) - 1)]
# use preferred region if available otherwise choose a valid one at random
region = preferred_region if preferred_region and preferred_region in buckets else random_region
bucket = buckets[region]
s3 = current_app.aws.connect_to('s3', region)
session = current_app.db.session(tables.DB_DECLARATIVE_BASE)
# first, see if the key exists
if not s3.get_bucket(bucket).get_key(key):
task_id = key.replace('/', '_') # keep things simple and avoid slashes in task url
# can't use unique support:
# api.pub.build.mozilla.org/docs/development/databases/#unique-row-support-get-or-create
# because we want to know when the row doesn't exist before creating it
tracker = tables.ArchiverTask.query.filter(tables.ArchiverTask.task_id == task_id).first()
if tracker and tracker.state in FINISHED_STATES:
log = logger.bind(archiver_task=task_id, archiver_task_state=tracker.state)
log.info('Task tracker: {} exists but finished with state: '
'{}'.format(task_id, tracker.state))
# remove tracker and try celery task again
delete_tracker(tracker)
tracker = None
if not tracker:
log = logger.bind(archiver_task=task_id)
log.info("Creating new celery task and task tracker for: {}".format(task_id))
task = create_and_upload_archive.apply_async(args=[src_url, key], task_id=task_id)
if task and task.id:
pending_expires_at = now() + datetime.timedelta(seconds=PENDING_EXPIRES_IN)
session.add(tables.ArchiverTask(task_id=task.id, s3_key=key, created_at=now(),
pending_expires_at=pending_expires_at,
src_url=src_url, state="PENDING"))
session.commit()
else:
return {}, 500
return {}, 202, {'Location': url_for('archiver.task_status', task_id=task_id)}
logger.info("generating GET URL to {}, expires in {}s".format(key, GET_EXPIRES_IN))
# return 302 pointing to s3 url with archive
signed_url = s3.generate_url(
method='GET', expires_in=GET_EXPIRES_IN,
bucket=bucket, key=key
)
return redirect(signed_url)
| mpl-2.0 | b4de6e75bf5cc14447d7e908afc63b2f | 45.070755 | 100 | 0.675847 | 3.773957 | false | false | false | false |
mozilla/build-relengapi | relengapi/blueprints/mapper/__init__.py | 1 | 13898 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
import calendar
import re
import time
import dateutil.parser
import sqlalchemy as sa
import structlog
from flask import Blueprint
from flask import Response
from flask import abort
from flask import g
from flask import jsonify
from flask import request
from sqlalchemy import orm
from sqlalchemy.orm.exc import MultipleResultsFound
from sqlalchemy.orm.exc import NoResultFound
from relengapi.lib import db
from relengapi.lib.permissions import p
DB_DECLARATIVE_BASE = 'relengapi'
logger = structlog.get_logger()
# logging.basicConfig(level=logging.DEBUG)
bp = Blueprint('mapper', __name__)
p.mapper.mapping.insert.doc("Allows new hg-git mappings to be inserted "
"into mapper db (hashes table)")
p.mapper.project.insert.doc("Allows new projects to be inserted into "
"mapper db (projects table)")
# TODO: replace abort with a custom exception
# - http://flask.pocoo.org/docs/patterns/apierrors/
class Project(db.declarative_base(DB_DECLARATIVE_BASE)):
"""Object-relational mapping between python class Project
and database table "projects"
"""
__tablename__ = 'releng_mapper_projects'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String(255), nullable=False, unique=True)
class Hash(db.declarative_base(DB_DECLARATIVE_BASE)):
"""Object-relational mapping between python class Hash
and database table "hashes"
"""
__tablename__ = 'releng_mapper_hashes'
hg_changeset = sa.Column(sa.String(40), nullable=False)
git_commit = sa.Column(sa.String(40), nullable=False)
project_id = sa.Column(
sa.Integer, sa.ForeignKey('releng_mapper_projects.id'), nullable=False)
project = orm.relationship(Project, primaryjoin=(project_id == Project.id))
# project = orm.relationship(Project, backref=orm.backref('hashes', order_by=id))
date_added = sa.Column(sa.Integer, nullable=False)
project_name = property(lambda self: self.project.name)
def as_json(self):
return jsonify(**{n: getattr(self, n)
for n in ('git_commit', 'hg_changeset',
'date_added', 'project_name')})
__table_args__ = (
# TODO: (needs verification) all queries specifying a hash are for
# (project, hash), so these aren't used
sa.Index('hg_changeset', 'hg_changeset'),
sa.Index('git_commit', 'git_commit'),
# TODO: this index is a prefix of others and will never be used
sa.Index('project_id', 'project_id'),
sa.Index('project_id__date_added', 'project_id', 'date_added'),
sa.Index('project_id__hg_changeset', 'project_id',
'hg_changeset', unique=True),
sa.Index(
'project_id__git_commit', 'project_id', 'git_commit', unique=True),
)
__mapper_args__ = {
# tell the SQLAlchemy ORM about one of the unique indexes; it doesn't
# matter which
'primary_key': [project_id, hg_changeset],
}
def _project_filter(projects_arg):
"""Helper method that returns the SQLAlchemy filter expression for the
project name(s) specified. This can be a comma-separated list, which is
the way we combine queries across multiple projects.
Args:
projects_arg: Comma-separated list of project names
Returns:
A SQLAlchemy filter expression
"""
if ',' in projects_arg:
return Project.name.in_(projects_arg.split(','))
else:
return Project.name == projects_arg
def _stream_mapfile(query):
"""Helper method to build a map file from a SQLAlchemy query.
Args:
query: SQLAlchemy query
Returns:
* Text output: 40 characters git commit SHA, a space,
40 characters hg changeset SHA, a newline (streamed); or
* HTTP 404: if the query returns no results
"""
# this helps keep memory use down a little, but the DBAPI still loads
# the entire result set into memory..
# http://docs.sqlalchemy.org/en/latest/orm/query.html#sqlalchemy.orm.query.Query.yield_per
query = query.yield_per(100)
if query.count() == 0:
abort(404, 'No mappings found')
def contents():
for r in query:
yield '%s %s' % (r.git_commit, r.hg_changeset) + "\n"
if contents:
return Response(contents(), mimetype='text/plain')
def _check_well_formed_sha(vcs, sha, exact_length=40):
"""Helper method to check for a well-formed SHA.
Args:
vcs: Name of the vcs system ('hg' or 'git')
sha: String to check against the well-formed SHA regex
exact_length: Number of characters SHA should be, or
None if exact length is not required
Returns:
None
Exceptions:
HTTP 400: Malformed SHA or unknown vcs
"""
if vcs not in ("git", "hg"):
abort(400, "Unknown vcs type %s" % vcs)
rev_regex = re.compile('''^[a-f0-9]{1,40}$''')
if sha is None:
abort(400, "%s SHA is <None>" % vcs)
elif sha == "":
abort(400, "%s SHA is an empty string" % vcs)
elif not rev_regex.match(sha):
abort(400, "%s SHA contains bad characters: '%s'" % (vcs, str(sha)))
if exact_length is not None and len(sha) != exact_length:
abort(400, "%s SHA should be %s characters long, but is %s characters long: '%s'"
% (vcs, exact_length, len(sha), str(sha)))
def _get_project(session, project):
"""Helper method to return Project class for a project with the given name.
Args:
session: SQLAlchemy ORM Session object
project: Name of the project (e.g. 'build-tools')
Returns:
the corresponding python Project object
Exceptions:
HTTP 404: Project could not be found
HTTP 500: Multiple projects with same name found
"""
try:
return Project.query.filter_by(name=project).one()
except MultipleResultsFound:
abort(500, "Multiple projects with name %s found in database" %
project)
except NoResultFound:
abort(404, "Could not find project %s in database" % project)
def _add_hash(session, git_commit, hg_changeset, project):
"""Helper method to add a git-hg mapping into the current SQLAlchemy ORM session.
Args:
session: SQLAlchemy ORM Session object
git_commit: String of the 40 character SHA of the git commit
hg_changeset: String of the 40 character SHA of the hg changeset
project: String of the name of the project (e.g. 'build-tools')
Exceptions:
HTTP 400: Malformed SHA
"""
_check_well_formed_sha('git', git_commit) # can raise http 400
_check_well_formed_sha('hg', hg_changeset) # can raise http 400
h = Hash(git_commit=git_commit, hg_changeset=hg_changeset, project=project,
date_added=time.time())
session.add(h)
@bp.route('/<projects>/rev/<vcs_type>/<commit>')
def get_rev(projects, vcs_type, commit):
# (documentation in relengapi/docs/usage/mapper.rst)
_check_well_formed_sha(vcs_type, commit, exact_length=None) # can raise http 400
q = Hash.query.join(Project).filter(_project_filter(projects))
if vcs_type == "git":
q = q.filter(sa.text("git_commit like :cspatttern")).params(
cspatttern=commit + "%")
elif vcs_type == "hg":
q = q.filter(sa.text("hg_changeset like :cspatttern")).params(
cspatttern=commit + "%")
try:
row = q.one()
return "%s %s" % (row.git_commit, row.hg_changeset)
except NoResultFound:
if vcs_type == "git":
abort(404, "No hg changeset found for git commit id %s in project(s) %s"
% (commit, projects))
elif vcs_type == "hg":
abort(404, "No git commit found for hg changeset %s in project(s) %s"
% (commit, projects))
except MultipleResultsFound:
abort(500, "Internal error - multiple results returned for %s commit %s"
"in project %s - this should not be possible in database"
% (vcs_type, commit, projects))
@bp.route('/<projects>/mapfile/full')
def get_full_mapfile(projects):
# (documentation in relengapi/docs/usage/mapper.rst)
q = Hash.query.join(Project).filter(_project_filter(projects))
q = q.order_by(Hash.hg_changeset)
return _stream_mapfile(q)
@bp.route('/<projects>/mapfile/since/<since>')
def get_mapfile_since(projects, since):
# (documentation in relengapi/docs/usage/mapper.rst)
try:
since_dt = dateutil.parser.parse(since)
except ValueError as e:
abort(400, 'Invalid date %s specified; see https://labix.org/python-dateutil: %s'
% (since, e.message))
since_epoch = calendar.timegm(since_dt.utctimetuple())
q = Hash.query.join(Project).filter(_project_filter(projects))
q = q.order_by(Hash.hg_changeset)
q = q.filter(Hash.date_added > since_epoch)
return _stream_mapfile(q)
@bp.route('/projects', methods=('GET',))
def get_projects():
# (documentation in relengapi/docs/usage/mapper.rst)
session = g.db.session(DB_DECLARATIVE_BASE)
q = session.query(Project)
rows = q.all()
return jsonify(projects=[x.name for x in rows])
def _insert_many(project, ignore_dups=False):
"""Update the database with many git-hg mappings.
Args:
project: Single project name string
ignore_dups: Boolean; if False, abort on duplicate entries without inserting
anything
Returns:
An empty json response body
Exceptions:
HTTP 400: Request content-type is not 'text/plain'
HTTP 400: Malformed SHA
HTTP 404: Project not found
HTTP 409: ignore_dups=False and there are duplicate entries
HTTP 500: Multiple projects found with matching project name
"""
if request.content_type != 'text/plain':
abort(
400, "HTTP request header 'Content-Type' must be set to 'text/plain'")
session = g.db.session(DB_DECLARATIVE_BASE)
proj = _get_project(session, project) # can raise HTTP 404 or HTTP 500
for line in request.stream.readlines():
line = line.rstrip()
try:
(git_commit, hg_changeset) = line.split(' ')
except ValueError:
logger.error(
"Received input line: '%s' for project %s", line, project)
logger.error("Was expecting an input line such as "
"'686a558fad7954d8481cfd6714cdd56b491d2988 "
"fef90029cb654ad9848337e262078e403baf0c7a'")
logger.error("i.e. where the first hash is a git commit SHA "
"and the second hash is a mercurial changeset SHA")
abort(400, "Input line '%s' received for project %s did not contain a space"
% (line, project))
# header/footer won't match this format
continue
_add_hash(session, git_commit, hg_changeset, proj) # can raise HTTP 400
if ignore_dups:
try:
session.commit()
except sa.exc.IntegrityError:
session.rollback()
if not ignore_dups:
try:
session.commit()
except sa.exc.IntegrityError:
session.rollback()
abort(409, "Some of the given mappings for project %s already exist"
% project)
return jsonify()
@bp.route('/<project>/insert', methods=('POST',))
@p.mapper.mapping.insert.require()
def insert_many_no_dups(project):
# (documentation in relengapi/docs/usage/mapper.rst)
return _insert_many(project, ignore_dups=False) # can raise HTTP 400, 404, 409, 500
@bp.route('/<project>/insert/ignoredups', methods=('POST',))
@p.mapper.mapping.insert.require()
def insert_many_ignore_dups(project):
# (documentation in relengapi/docs/usage/mapper.rst)
return _insert_many(project, ignore_dups=True) # can raise HTTP 400, 404, 500
@bp.route('/<project>/insert/<git_commit>/<hg_changeset>', methods=('POST',))
@p.mapper.mapping.insert.require()
def insert_one(project, git_commit, hg_changeset):
# (documentation in relengapi/docs/usage/mapper.rst)
session = g.db.session(DB_DECLARATIVE_BASE)
proj = _get_project(session, project) # can raise HTTP 404 or HTTP 500
_add_hash(session, git_commit, hg_changeset, proj) # can raise HTTP 400
try:
session.commit()
q = Hash.query.join(Project).filter(_project_filter(project))
q = q.filter(sa.text("git_commit == :commit")).params(commit=git_commit)
return q.one().as_json()
except sa.exc.IntegrityError:
abort(409, "Provided mapping %s %s for project %s already exists and "
"cannot be reinserted" % (git_commit, hg_changeset, project))
except NoResultFound:
abort(500, "Provided mapping %s %s for project %s could not be inserted "
"into the database" % (git_commit, hg_changeset, project))
except MultipleResultsFound:
abort(500, "Provided mapping %s %s for project %s has been inserted into "
"the database multiple times" % (git_commit, hg_changeset, project))
@bp.route('/<project>', methods=('POST',))
@p.mapper.project.insert.require()
def add_project(project):
# (documentation in relengapi/docs/usage/mapper.rst)
session = g.db.session(DB_DECLARATIVE_BASE)
p = Project(name=project)
session.add(p)
try:
session.commit()
except (sa.exc.IntegrityError, sa.exc.ProgrammingError):
abort(409, "Project %s could not be inserted into the database" %
project)
return jsonify()
| mpl-2.0 | a458146aa54ab09b65a4afe459fe9100 | 36.663957 | 94 | 0.638365 | 3.758248 | false | false | false | false |
librosa/librosa | docs/examples/plot_chroma.py | 2 | 7136 | # coding: utf-8
"""
===================================
Enhanced chroma and chroma variants
===================================
This notebook demonstrates a variety of techniques for enhancing chroma features and
also, introduces chroma variants implemented in librosa.
"""
###############################################################################################
#
# Enhanced chroma
# ^^^^^^^^^^^^^^^
# Beyond the default parameter settings of librosa's chroma functions, we apply the following
# enhancements:
#
# 1. Harmonic-percussive-residual source separation to eliminate transients.
# 2. Nearest-neighbor smoothing to eliminate passing tones and sparse noise. This is inspired by the
# recurrence-based smoothing technique of
# `Cho and Bello, 2011 <http://ismir2011.ismir.net/papers/OS8-4.pdf>`_.
# 3. Local median filtering to suppress remaining discontinuities.
# Code source: Brian McFee
# License: ISC
# sphinx_gallery_thumbnail_number = 5
import numpy as np
import scipy
import matplotlib.pyplot as plt
import librosa
import librosa.display
#######################################################################
# We'll use a track that has harmonic, melodic, and percussive elements
# Karissa Hobbs - Let's Go Fishin'
y, sr = librosa.load(librosa.ex('fishin'))
#######################################
# First, let's plot the original chroma
chroma_orig = librosa.feature.chroma_cqt(y=y, sr=sr)
# For display purposes, let's zoom in on a 15-second chunk from the middle of the song
idx = tuple([slice(None), slice(*list(librosa.time_to_frames([45, 60])))])
# And for comparison, we'll show the CQT matrix as well.
C = np.abs(librosa.cqt(y=y, sr=sr, bins_per_octave=12*3, n_bins=7*12*3))
fig, ax = plt.subplots(nrows=2, sharex=True)
img1 = librosa.display.specshow(librosa.amplitude_to_db(C, ref=np.max)[idx],
y_axis='cqt_note', x_axis='time', bins_per_octave=12*3,
ax=ax[0])
fig.colorbar(img1, ax=[ax[0]], format="%+2.f dB")
ax[0].label_outer()
img2 = librosa.display.specshow(chroma_orig[idx], y_axis='chroma', x_axis='time', ax=ax[1])
fig.colorbar(img2, ax=[ax[1]])
ax[1].set(ylabel='Default chroma')
########################################################
# We can do better by isolating the harmonic component of the audio signal.
# We'll use a large margin for separating harmonics from percussives:
y_harm = librosa.effects.harmonic(y=y, margin=8)
chroma_harm = librosa.feature.chroma_cqt(y=y_harm, sr=sr)
fig, ax = plt.subplots(nrows=2, sharex=True, sharey=True)
librosa.display.specshow(chroma_orig[idx], y_axis='chroma', x_axis='time', ax=ax[0])
ax[0].set(ylabel='Default chroma')
ax[0].label_outer()
librosa.display.specshow(chroma_harm[idx], y_axis='chroma', x_axis='time', ax=ax[1])
ax[1].set(ylabel='Harmonic')
###########################################
# There's still some noise in there though.
# We can clean it up using non-local filtering.
# This effectively removes any sparse additive noise from the features.
chroma_filter = np.minimum(chroma_harm,
librosa.decompose.nn_filter(chroma_harm,
aggregate=np.median,
metric='cosine'))
fig, ax = plt.subplots(nrows=2, sharex=True, sharey=True)
librosa.display.specshow(chroma_harm[idx], y_axis='chroma', x_axis='time', ax=ax[0])
ax[0].set(ylabel='Harmonic')
ax[0].label_outer()
librosa.display.specshow(chroma_filter[idx], y_axis='chroma', x_axis='time', ax=ax[1])
ax[1].set(ylabel='Non-local')
###########################################################
# Local discontinuities and transients can be suppressed by
# using a horizontal median filter.
chroma_smooth = scipy.ndimage.median_filter(chroma_filter, size=(1, 9))
fig, ax = plt.subplots(nrows=2, sharex=True, sharey=True)
librosa.display.specshow(chroma_filter[idx], y_axis='chroma', x_axis='time', ax=ax[0])
ax[0].set(ylabel='Non-local')
ax[0].label_outer()
librosa.display.specshow(chroma_smooth[idx], y_axis='chroma', x_axis='time', ax=ax[1])
ax[1].set(ylabel='Median-filtered')
#########################################################
# A final comparison between the CQT, original chromagram
# and the result of our filtering.
fig, ax = plt.subplots(nrows=3, sharex=True)
librosa.display.specshow(librosa.amplitude_to_db(C, ref=np.max)[idx],
y_axis='cqt_note', x_axis='time',
bins_per_octave=12*3, ax=ax[0])
ax[0].set(ylabel='CQT')
ax[0].label_outer()
librosa.display.specshow(chroma_orig[idx], y_axis='chroma', x_axis='time', ax=ax[1])
ax[1].set(ylabel='Default chroma')
ax[1].label_outer()
librosa.display.specshow(chroma_smooth[idx], y_axis='chroma', x_axis='time', ax=ax[2])
ax[2].set(ylabel='Processed')
#################################################################################################
# Chroma variants
# ^^^^^^^^^^^^^^^
# There are three chroma variants implemented in librosa: `chroma_stft`, `chroma_cqt`, and `chroma_cens`.
# `chroma_stft` and `chroma_cqt` are two alternative ways of plotting chroma.
# `chroma_stft` performs short-time fourier transform of an audio input and maps each STFT bin to chroma, while `chroma_cqt` uses constant-Q transform and maps each cq-bin to chroma.
#
# A comparison between the STFT and the CQT methods for chromagram.
chromagram_stft = librosa.feature.chroma_stft(y=y, sr=sr)
chromagram_cqt = librosa.feature.chroma_cqt(y=y, sr=sr)
fig, ax = plt.subplots(nrows=2, sharex=True, sharey=True)
librosa.display.specshow(chromagram_stft[idx], y_axis='chroma', x_axis='time', ax=ax[0])
ax[0].set(ylabel='STFT')
ax[0].label_outer()
librosa.display.specshow(chromagram_cqt[idx], y_axis='chroma', x_axis='time', ax=ax[1])
ax[1].set(ylabel='CQT')
###################################################################################################
# CENS features (`chroma_cens`) are variants of chroma features introduced in
# `Müller and Ewart, 2011 <http://ismir2011.ismir.net/papers/PS2-8.pdf>`_, in which
# additional post processing steps are performed on the constant-Q chromagram to obtain features
# that are invariant to dynamics and timbre.
#
# Thus, the CENS features are useful for applications, such as audio matching and retrieval.
#
# Following steps are additional processing done on the chromagram, and are implemented in `chroma_cens`:
# 1. L1-Normalization across each chroma vector
# 2. Quantization of the amplitudes based on "log-like" amplitude thresholds
# 3. Smoothing with sliding window (optional parameter)
# 4. Downsampling (not implemented)
#
# A comparison between the original constant-Q chromagram and the CENS features.
chromagram_cens = librosa.feature.chroma_cens(y=y, sr=sr)
fig, ax = plt.subplots(nrows=2, sharex=True, sharey=True)
librosa.display.specshow(chromagram_cqt[idx], y_axis='chroma', x_axis='time', ax=ax[0])
ax[0].set(ylabel='Orig')
librosa.display.specshow(chromagram_cens[idx], y_axis='chroma', x_axis='time', ax=ax[1])
ax[1].set(ylabel='CENS')
| isc | 30502d8965b8a21a201c411231bd7030 | 39.539773 | 188 | 0.631395 | 3.042644 | false | false | false | false |
librosa/librosa | scripts/create_intervals.py | 1 | 2095 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# CREATED:2022-08-11 07:26:21 by Brian McFee <brian.mcfee@nyu.edu>
"""Construct the interval cache for just intonation systems.
This creates the data file intervals.json, which maps truncated floating point
representations of intervals to their prime factorizations.
This script is primarily intended for developer use.
Usage:
python create_intervals.py
The output will be stored in intervals.pickle
"""
import msgpack
import numpy as np
import librosa
def main():
# Get the intervals
intervals_pythagorean = librosa.pythagorean_intervals(
bins_per_octave=72, sort=False, return_factors=True
)
intervals_3lim = librosa.plimit_intervals(
primes=[3],
bins_per_octave=72,
sort=False,
return_factors=True,
)
intervals_5lim = librosa.plimit_intervals(
primes=[3, 5],
bins_per_octave=72,
sort=False,
return_factors=True,
)
intervals_7lim = librosa.plimit_intervals(
primes=[3, 5, 7],
bins_per_octave=72,
sort=False,
return_factors=True,
)
intervals_23lim = librosa.plimit_intervals(
primes=[3, 5, 7, 11, 13, 17, 19, 23],
bins_per_octave=190,
sort=False,
return_factors=True,
)
all_intervals = np.concatenate(
(
intervals_pythagorean,
intervals_3lim,
intervals_5lim,
intervals_7lim,
intervals_23lim,
)
)
# Factorize the rationals and cache them, keyed by truncated float
factorized = dict()
for interval in all_intervals:
# Compute the interval
log_value = 0
for p in interval:
log_value += np.log2(p) * interval[p]
value = np.around(np.power(2.0, log_value), 6)
factorized[float(value)] = {
int(p): int(interval[p]) for p in interval if interval[p] != 0
}
with open("intervals.msgpack", "wb") as fdesc:
msgpack.dump(factorized, fdesc)
if __name__ == "__main__":
main()
| isc | 9c3908deeaeff7a3e072572a456bbc21 | 24.240964 | 78 | 0.608592 | 3.521008 | false | false | false | false |
cmusatyalab/opendiamond | opendiamond/dataretriever/diamond_store.py | 1 | 4494 | #
# The OpenDiamond Platform for Interactive Search
#
# Copyright (c) 2009-2018 Carnegie Mellon University
# All rights reserved.
#
# This software is distributed under the terms of the Eclipse Public
# License, Version 1.0 which can be found in the file named LICENSE.
# ANY USE, REPRODUCTION OR DISTRIBUTION OF THIS SOFTWARE CONSTITUTES
# RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT
#
import os
import datetime
from xml.sax.saxutils import quoteattr
from flask import Blueprint, url_for, Response, stream_with_context, send_file, \
jsonify
from werkzeug.datastructures import Headers
from werkzeug.security import safe_join
from opendiamond.dataretriever.util import ATTR_SUFFIX
BASEURL = 'collection'
STYLE = False
LOCAL_OBJ_URI = True # if true, return local file path, otherwise http.
INDEXDIR = DATAROOT = None
def init(config):
global INDEXDIR, DATAROOT # pylint: disable=global-statement
INDEXDIR = config.indexdir
DATAROOT = config.dataroot
scope_blueprint = Blueprint('diamond_store', __name__)
@scope_blueprint.route('/<gididx>')
@scope_blueprint.route('/<gididx>/limit/<int:limit>')
def get_scope(gididx, limit=None):
index = 'GIDIDX' + gididx.upper()
index = _get_index_absolute_path(index)
# Streaming response:
# http://flask.pocoo.org/docs/0.12/patterns/streaming/
def generate():
num_entries = 0
with open(index, 'r') as f:
for _ in f.readlines():
num_entries += 1
if limit is not None and num_entries >= limit:
break
with open(index, 'r') as f:
yield '<?xml version="1.0" encoding="UTF-8" ?>\n'
if STYLE:
yield '<?xml-stylesheet type="text/xsl" href="/scopelist.xsl" ?>\n'
yield '<objectlist count="{:d}">\n'.format(num_entries)
count = 0
for path in f.readlines():
path = path.strip()
yield _get_object_element(object_path=path) + '\n'
count += 1
if limit is not None and count >= limit:
break
yield '</objectlist>\n'
headers = Headers([('Content-Type', 'text/xml')])
return Response(stream_with_context(generate()),
status="200 OK",
headers=headers)
@scope_blueprint.route('/id/<path:object_path>')
def get_object_id(object_path):
headers = Headers([('Content-Type', 'text/xml')])
return Response(_get_object_element(object_path=object_path),
"200 OK",
headers=headers)
@scope_blueprint.route('/meta/<path:object_path>')
def get_object_meta(object_path):
path = _get_obj_absolute_path(object_path)
attrs = dict()
try:
with DiamondTextAttr(path, 'r') as attributes:
for key, value in attributes:
attrs[key] = value
except IOError:
pass
return jsonify(attrs)
def _get_object_element(object_path):
path = _get_obj_absolute_path(object_path)
if os.path.isfile(path + ATTR_SUFFIX):
return '<object id={} src={} meta={} />' \
.format(quoteattr(url_for('.get_object_id', object_path=object_path)),
quoteattr(_get_object_src_uri(object_path)),
quoteattr(url_for('.get_object_meta', object_path=object_path)))
else:
return '<object id={} src={} />' \
.format(quoteattr(url_for('.get_object_id', object_path=object_path)),
quoteattr(_get_object_src_uri(object_path)))
def _get_object_src_uri(object_path):
if LOCAL_OBJ_URI:
return 'file://' + _get_obj_absolute_path(object_path)
else:
return url_for('.get_object_src_http', obj_path=object_path)
def _get_obj_absolute_path(obj_path):
return safe_join(DATAROOT, obj_path)
def _get_index_absolute_path(index):
return safe_join(INDEXDIR, index)
@scope_blueprint.route('/obj/<path:obj_path>')
def get_object_src_http(obj_path):
path = _get_obj_absolute_path(obj_path)
headers = Headers()
# With add_etags=True, conditional=True
# Flask should be smart enough to do 304 Not Modified
response = send_file(path,
cache_timeout=datetime.timedelta(
days=365).total_seconds(),
add_etags=True,
conditional=True)
response.headers.extend(headers)
return response
| epl-1.0 | 61603e0df04e468098be76caa0e9a646 | 30.208333 | 84 | 0.609034 | 3.674571 | false | false | false | false |
cmusatyalab/opendiamond | opendiamond/scopeserver/flickr/views.py | 1 | 1909 | #
# The OpenDiamond Platform for Interactive Search
#
# Copyright (c) 2009-2019 Carnegie Mellon University
# All rights reserved.
#
# This software is distributed under the terms of the Eclipse Public
# License, Version 1.0 which can be found in the file named LICENSE.
# ANY USE, REPRODUCTION OR DISTRIBUTION OF THIS SOFTWARE CONSTITUTES
# RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT
#
from django.contrib.auth.decorators import permission_required
from django.conf import settings
from django.http import QueryDict, HttpResponse
from django.shortcuts import render
from opendiamond.scope import generate_cookie_django
from .forms import FlickrForm
@permission_required("flickr.search")
def index(request):
if request.method == 'POST':
form = FlickrForm(request.POST)
if form.is_valid():
tags = form.cleaned_data.get('tags', '').split('\n')
tags = [tag.strip() for tag in tags] # trim whitespace
tags = [tag for tag in tags if tag] # skip empty
tag_mode = 'all' if form.cleaned_data['tag_mode'] else 'any'
text = form.cleaned_data.get('text', '')
q = QueryDict('').copy()
if tags:
q['tags'] = ','.join(tags)
q['tag_mode'] = tag_mode
if text:
q['text'] = text
query = q.urlencode()
scope = ["/flickr/?%s" % query]
proxies = settings.FLICKR_PROXIES \
if form.cleaned_data['proxied'] else None
cookie = generate_cookie_django(
scope, settings.FLICKR_SERVERS, proxies,
blaster=getattr(settings, 'FLICKR_BLASTER', None))
return HttpResponse(cookie, content_type='application/x-diamond-scope')
else:
form = FlickrForm()
return render(request, 'scopeserver/simple_form.html', {
'form': form,
})
| epl-1.0 | 2ba9f0cbd0d98c26f27173808db7e218 | 31.355932 | 83 | 0.618649 | 3.787698 | false | false | false | false |
cmusatyalab/opendiamond | opendiamond/blaster/__init__.py | 1 | 4788 | #
# The OpenDiamond Platform for Interactive Search
#
# Copyright (c) 2012 Carnegie Mellon University
# All rights reserved.
#
# This software is distributed under the terms of the Eclipse Public
# License, Version 1.0 which can be found in the file named LICENSE.
# ANY USE, REPRODUCTION OR DISTRIBUTION OF THIS SOFTWARE CONSTITUTES
# RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT
#
'''JSON Blaster web application.'''
from future import standard_library
standard_library.install_aliases()
import logging
import os
from urllib.parse import urljoin
import threading
import time
from sockjs.tornado import SockJSRouter
import tornado.ioloop
from tornado.options import define, options
import tornado.web
from tornado.web import url
from opendiamond.blobcache import BlobCache
from opendiamond.blaster.cache import SearchCache
from opendiamond.blaster.handlers import (
SearchHandler, PostBlobHandler, EvaluateHandler, ResultHandler,
AttributeHandler, UIHandler, SearchConnection)
define('baseurl', type=str, default=None,
metavar='URL', help='Base URL for this JSON Blaster')
define('blob_cache_dir',
default=os.path.expanduser('~/.diamond/blob-cache-json'),
metavar='DIR', help='Cache directory for binary objects')
define('search_cache_dir',
default=os.path.expanduser('~/.diamond/search-cache-json'),
metavar='DIR', help='Cache directory for search definitions')
_log = logging.getLogger(__name__)
class JSONBlaster(tornado.web.Application):
handlers = (
(r'/$', SearchHandler),
(r'/blob$', PostBlobHandler),
url(r'/result/([0-9a-f]{64})$',
EvaluateHandler, name='evaluate'),
url(r'/result/([0-9a-f]{64})/([0-9a-f]{64})$',
ResultHandler, name='result'),
url(r'/result/([0-9a-f]{64})/([0-9a-f]{64})/raw/(.*)$',
AttributeHandler, name='attribute-raw'),
url(r'/result/([0-9a-f]{64})/([0-9a-f]{64})/image/(.*)$',
AttributeHandler, kwargs={'transcode': True},
name='attribute-image'),
url(r'/ui$', UIHandler, name='ui-search',
kwargs={'template': 'search.html'}),
url(r'/ui/results$', UIHandler, name='ui-results',
kwargs={'template': 'results.html'}),
url(r'/ui/result$', UIHandler, name='ui-result',
kwargs={'template': 'result.html'}),
)
app_settings = {
'static_path': os.path.join(os.path.dirname(__file__), 'static'),
'template_path': os.path.join(os.path.dirname(__file__), 'templates'),
}
sockjs_settings = {
'sockjs_url': '/static/sockjs.js',
}
cache_prune_interval = 3600 # seconds
# The blob cache is only used as a holding area for blobs that will soon
# be added to a search, so cache objects don't need a long lifetime.
blob_cache_days = 1
def __init__(self, **kwargs):
if options.baseurl is None:
raise ValueError('Base URL must be configured')
router = SockJSRouter(SearchConnection, '/search',
self.sockjs_settings)
# Allow connections to find the application
router.application = self
handlers = list(self.handlers)
router.apply_routes(handlers)
settings = dict(self.app_settings)
settings.update(kwargs)
tornado.web.Application.__init__(self, handlers, **settings)
if not os.path.isdir(options.blob_cache_dir):
os.makedirs(options.blob_cache_dir, 0o700)
self.blob_cache = BlobCache(options.blob_cache_dir)
self.search_cache = SearchCache(options.search_cache_dir)
self._pruner = threading.Thread(target=self._prune_cache_thread,
name='prune-cache')
self._pruner.daemon = True
self._pruner.start()
def reverse_url(self, name, *args):
'''Ensure all emitted URLs are absolute, since the browser's base
URL will point to the frontend application and not to us.'''
relative = tornado.web.Application.reverse_url(self, name, *args)
return urljoin(options.baseurl, relative)
# We don't want to abort the pruning thread on an exception
# pylint: disable=broad-except
def _prune_cache_thread(self):
'''Runs as a separate Python thread; cannot interact with Tornado
state.'''
while True:
try:
BlobCache.prune(self.blob_cache.basedir, self.blob_cache_days)
except Exception:
_log.exception('Pruning blob cache')
try:
self.search_cache.prune()
except Exception:
_log.exception('Pruning search cache')
time.sleep(self.cache_prune_interval)
# pylint: enable=broad-except
| epl-1.0 | 6b8db764ba21aae233b982b74ef65cf6 | 36.116279 | 78 | 0.637009 | 3.870655 | false | false | false | false |
cmusatyalab/opendiamond | opendiamond/filter/__init__.py | 1 | 21609 | #
# The OpenDiamond Platform for Interactive Search
#
# Copyright (c) 2011-2018 Carnegie Mellon University
# All rights reserved.
#
# This software is distributed under the terms of the Eclipse Public
# License, Version 1.0 which can be found in the file named LICENSE.
# ANY USE, REPRODUCTION OR DISTRIBUTION OF THIS SOFTWARE CONSTITUTES
# RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT
#
from __future__ import with_statement
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import zip
from builtins import filter
from builtins import object
import argparse
from io import BytesIO
import os
import PIL.Image
import select
import socket
import sys
from tempfile import mkstemp
import time
import threading
import traceback
from zipfile import ZipFile
from opendiamond.attributes import (
StringAttributeCodec, IntegerAttributeCodec, DoubleAttributeCodec,
RGBImageAttributeCodec, PatchesAttributeCodec, HeatMapAttributeCodec)
EXAMPLE_DIR = 'examples'
FILTER_PORT = 5555
class Session(object):
'''Represents the Diamond search session.'''
def __init__(self, filter_name, conn=None):
self.name = filter_name
self._conn = conn
def log(self, level, message):
if level == 'critical':
lval = 0x01
elif level == 'error':
lval = 0x02
elif level == 'info':
lval = 0x04
elif level == 'trace':
lval = 0x08
elif level == 'debug':
lval = 0x10
msg = '%s : %s' % (self.name, message)
if self._conn is not None:
self._conn.send_message('log', lval, msg)
else:
# Fallback logging to stderr so that filters can be tested
# outside of Diamond
print('[%s] %s' % (level, msg), file=sys.stderr)
def get_vars(self, vars):
'''vars is a tuple of session variables to be atomically read.
Returns a dict.'''
if self._conn is None:
raise RuntimeError('No connection to Diamond')
self._conn.send_message('get-session-variables', vars)
return dict(list(zip(vars, [float(v) for v in self._conn.get_array()])))
def update_vars(self, vars):
'''vars is a dict of session variables to be atomically updated.'''
if self._conn is None:
raise RuntimeError('No connection to Diamond')
names, values = list(zip(*list(vars.items())))
self._conn.send_message('update-session-variables', names, values)
def ensure_resource(self, scope, rtype, params):
"""Ensure a resource in a certain scope and return the handler"""
self._conn.send_message('ensure-resource', scope, rtype, params)
handle = self._conn.get_dict()
# convert k-v to str
handle = dict( [(k.decode(), v.decode()) for k,v in handle.items() ] )
return handle
def hint_large_attribute(self, size):
self._conn.send_message('hint-large-attribute', size)
class Filter(object):
'''A Diamond filter. Implement this.'''
# List of Parameters representing argument types and the corresponding
# attributes to store the arguments in. For example, argument 0 will be
# stored in a Filter attribute named by params[0].
params = ()
# If False, self.blob will be set to the contents of the blob argument.
# If True, self.blob will be a ZipFile object wrapping the blob argument.
blob_is_zip = False
# Set to True to decode example images from the blob argument and set
# self.examples to a list of PIL.Image.
load_examples = False
def __init__(self, args, blob, session=Session('filter')):
'''Called to initialize the filter. After a subclass calls the
constructor, it will find the parsed arguments stored as object
attributes as specified by the parameters, and the blob, if any,
in self.blob (unless self.blob_is_egg is True).'''
if len(args) != len(self.params):
raise ValueError(
'Incorrect argument list length. Expect %d. Received %s' % (
len(self.params), ','.join(args)))
for param, arg in zip(self.params, args):
setattr(self, str(param), param.parse(arg))
if self.blob_is_zip:
self.blob = ZipFile(BytesIO(blob), 'r')
else:
self.blob = blob
if self.load_examples:
self.examples = []
zf = ZipFile(BytesIO(blob), 'r')
for path in zf.namelist():
if (path.startswith(EXAMPLE_DIR + '/') and
not path.endswith('/')):
# We don't use zf.open() because it would cause all
# Images to share the same file offset pointer
data = zf.read(path)
self.examples.append(PIL.Image.open(BytesIO(data)))
self.session = session
def __call__(self, object):
'''Called once for each object to be evaluated. Returns the Diamond
search score.'''
raise NotImplementedError()
def load_egg(self, module=None, globals=None, data=None):
'''Treat data as the contents of an egg and add it to the Python
path. If data is not specified, self.blob will be used. As a
convenience, if module is specified, it will be added to the
specified globals, which should be set to globals().'''
if data is None:
data = self.blob
# NamedTemporaryFile always deletes the file on close on
# Python 2.5, so we can't use it
fd, name = mkstemp(prefix='filter-', suffix='.egg')
egg = os.fdopen(fd, 'r+')
egg.write(data)
egg.close()
sys.path.append(name)
if module is not None:
globals[module] = __import__(module, level=0)
@classmethod
def run(cls, classes=None, argv=None):
'''Try to run the filter. Returns True if we did something,
False if not.
If classes is specified, it is a list of filter classes that
should be supported. In this case, the first filter argument must
specify the name of the class that should be executed during this
run of the program. That argument will be stripped from the
argument list before it is given to the filter.'''
parser = argparse.ArgumentParser(description='A Diamond filter')
parser.add_argument('--filter', action='store_true')
parser.add_argument('--tcp', action='store_true', help='Communicate through TCP instead of stdio')
parser.add_argument('--port', type=int, default=FILTER_PORT, help='TCP port')
parser.add_argument('--client', dest='host', default=None, help='The TCP address to connect to. If none, the filter listens.')
parser.add_argument('--fifo_in', default=None, help='Communicate through a named pipe instead of stdio.')
parser.add_argument('--fifo_out', default=None)
if argv is None:
flags = parser.parse_args(argv)
else:
flags = parser.parse_args()
if flags.filter:
cls._run_loop(flags, classes)
return True
return False
@classmethod
def _run_loop(cls, flags, classes=None):
if flags.fifo_in is not None and flags.fifo_out is not None:
# not fork
print("Using FIFO in/out:", flags.fifo_in, flags.fifo_out)
fout = os.fdopen(os.open(flags.fifo_out, os.O_WRONLY), 'wb')
fin = os.fdopen(os.open(flags.fifo_in, os.O_RDONLY), 'rb')
conn = _DiamondConnection(fin, fout)
print("Connected!")
elif flags.tcp and flags.host is not None:
# connect to a TCP port as client
while True:
# spawn as many children as the server accepts
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((flags.host, flags.port))
print("Connected to %s:%d. Waiting for readable." % (flags.host, flags.port))
# Prevent too many just-connected sockets
# fork AFTER readable (diamondd picks up the connection and starts sending init)
readable, _, exceptional = select.select([sock,], [], [sock,])
if readable:
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
fin = sock.makefile('rb')
fout = sock.makefile('wb')
conn = _DiamondConnection(fin, fout)
pid = os.fork()
if pid == 0: # child
break
else:
print("Forked", pid)
if exceptional:
print("Broken connection.")
sock = None
except socket.error:
time.sleep(0.5)
pass
elif flags.tcp and flags.host is None:
# listen on TCP port
print("Listening on TCP port ", flags.port)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('', flags.port))
sock.listen(8)
while True:
c, addr = sock.accept()
c.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
print("Accepted connection from ", addr)
pid = os.fork()
if pid == 0: # child, set up the real stuff and start the filter loop
sock = None
fin = c.makefile('rb')
fout = c.makefile('wb')
conn = _DiamondConnection(fin, fout)
# TODO deliver stdout to Diamond under 'stdout' tag as in the old way
break
else: # server, continue listening forever
print("Forked", pid)
continue
else:
# old way: run an instance of the filter and speak through stdin/stdout
# Set aside stdin and stdout to prevent them from being accessed by
# mistake, even in forked children
fin = os.fdopen(os.dup(sys.stdin.fileno()), 'rb')
fout = os.fdopen(os.dup(sys.stdout.fileno()), 'wb')
fh = open('/dev/null', 'rb')
os.dup2(fh.fileno(), 0)
sys.stdin = os.fdopen(0, 'rb')
fh.close()
read_fd, write_fd = os.pipe()
os.dup2(write_fd, 1)
sys.stdout = os.fdopen(1, 'wb', 0)
os.close(write_fd)
conn = _DiamondConnection(fin, fout)
# Send the fake stdout to Diamond in the background
_StdoutThread(os.fdopen(read_fd, 'rb', 0), conn).start()
# Read arguments and initialize filter
ver = int(conn.get_item())
if ver != 1:
raise ValueError('Unknown protocol version %d' % ver)
name = conn.get_item().decode()
args = conn.get_array()
args = list(map(bytes.decode, args)) # to str
blob = conn.get_item()
session = Session(name, conn)
try:
if classes is not None:
# Use the class named by the first filter argument
target = args.pop(0)
for class_ in classes:
if class_.__name__ == target:
filter_class = class_
break
else:
raise ValueError('Filter class %s is not available' %
target)
else:
filter_class = cls
filter = filter_class(args, blob, session)
except:
session.log('critical', traceback.format_exc())
raise
conn.send_message('init-success')
# Main loop
while True:
obj = _DiamondObject(conn)
try:
result = filter(obj)
except:
session.log('error', traceback.format_exc())
raise
if result is True:
result = 1
elif result is False or result is None:
result = 0
conn.send_message('result', result)
obj.invalidate()
class LingeringObjectError(Exception):
'''Raised when an Object is accessed after it is no longer in play.'''
pass
class Object(object):
'''A Diamond object to be evaluated. Instantiating this class directly
will provide a dummy object that does not try to talk to Diamond. This
can be useful for filter testing.'''
def __init__(self, attrs=()):
self._attrs = dict(attrs)
self._valid = True
self._image = None
def get_binary(self, key):
'''Get the specified object attribute as raw binary data.'''
self.check_valid()
if key not in self._attrs:
self._attrs[key] = self._get_attribute(key)
if self._attrs[key] is None:
raise KeyError()
return self._attrs[key]
def set_binary(self, key, value):
'''Set the specified object attribute as raw binary data.'''
self.check_valid()
if value is None:
raise ValueError('Attribute value cannot be None')
self._set_attribute(key, value)
self._attrs[key] = value
def get_string(self, key):
'''Get the specified object attribute, interpreting the raw data
as a null-terminated string.'''
return StringAttributeCodec().decode(self.get_binary(key))
def set_string(self, key, value):
'''Set the specified object attribute as a null-terminated string.'''
self.set_binary(key, StringAttributeCodec().encode(value))
def get_int(self, key):
'''Get the specified object attribute, interpreting the raw data
as a native-endian integer. The key name should end with ".int".'''
return IntegerAttributeCodec().decode(self.get_binary(key))
def set_int(self, key, value):
'''Set the specified object attribute as an integer. The key name
should end with ".int".'''
self.set_binary(key, IntegerAttributeCodec().encode(value))
def get_double(self, key):
'''Get the specified object attribute, interpreting the raw data
as a native-endian double. The key name should end with ".double".'''
return DoubleAttributeCodec().decode(self.get_binary(key))
def set_double(self, key, value):
'''Set the specified object attribute as a double. The key name
should end with ".double".'''
self.set_binary(key, DoubleAttributeCodec().encode(value))
def get_rgbimage(self, key):
'''Get the specified object attribute, interpreting the raw data
as an RGBImage structure. The key name should end with ".rgbimage".'''
return RGBImageAttributeCodec().decode(self.get_binary(key))
def set_rgbimage(self, key, value):
'''Set the specified object attribute as an RGBImage structure.
The key name should end with ".rgbimage".'''
self.set_binary(key, RGBImageAttributeCodec().encode(value))
def get_patches(self, key):
'''Get the specified object attribute as a list of patches. Returns
(distance, patches), where patches is a tuple of (upper_left_coord,
lower_right_coord) tuples and a coordinate is an (x, y) tuple.'''
return PatchesAttributeCodec().decode(self.get_binary(key))
def set_patches(self, key, distance, patches):
'''Set the specified object attribute as a list of patches. distance
is a double. patches is a list of (upper_left_coord,
lower_right_coord) tuples, where a coordinate is an (x, y) tuple.
The key name should probably be _filter.%s.patches, where %s is the
filter name from Session.'''
self.set_binary(
key, PatchesAttributeCodec().encode((distance, patches)))
def get_heatmap(self, key):
'''Get the specified object attribute, interpreting the raw data
as a heat map image.'''
return HeatMapAttributeCodec().decode(self.get_binary(key))
def set_heatmap(self, key, value):
'''Set the specified object attribute as a heat map image. The key
name should probably be _filter.%s.heatmap.png, where %s is the
filter name from Session.'''
self.set_binary(key, HeatMapAttributeCodec().encode(value))
def __getitem__(self, key):
'''Syntactic sugar for self.get_string().'''
return self.get_string(key)
def __setitem__(self, key, value):
'''Syntactic sugar for self.set_string().'''
return self.set_string(key, value)
def __contains__(self, key):
self.check_valid()
try:
self.get_binary(key)
except KeyError:
return False
return True
@property
def data(self):
'''Convenience property to get the object data.'''
return self.get_binary('')
@property
def image(self):
'''Convenience property to get the decoded RGB image as a PIL Image.'''
if self._image is None:
self._image = self.get_rgbimage('_rgb_image.rgbimage')
return self._image
def omit(self, key):
'''Tell Diamond not to send the specified attribute back to the
client by default. Raises KeyError if the attribute does not exist.'''
self.check_valid()
self._omit_attribute(key)
def check_valid(self):
if not self._valid:
raise LingeringObjectError()
def invalidate(self):
'''Ensure the Object can't be used to send commands to Diamond once
Diamond has moved on to another object'''
self._valid = False
def _get_attribute(self, _key):
return None
def _set_attribute(self, _key, _value):
pass
def _omit_attribute(self, key):
if key not in self._attrs:
raise KeyError()
class _DiamondObject(Object):
'''A Diamond object to be evaluated.'''
def __init__(self, conn):
Object.__init__(self)
self._conn = conn
def _get_attribute(self, key):
self._conn.send_message('get-attribute', key)
return self._conn.get_item()
def _set_attribute(self, key, value):
self._conn.send_message('set-attribute', key, value)
def _omit_attribute(self, key):
self._conn.send_message('omit-attribute', key)
if not self._conn.get_boolean():
raise KeyError()
class _DiamondConnection(object):
'''Proxy object for the stdin/stdout protocol connection with the
Diamond server.'''
# XXX Work here to change the filter protocol (client side)
def __init__(self, fin, fout):
# fin and fout are in binary mode
self._fin = fin
self._fout = fout
self._output_lock = threading.Lock()
def get_item(self):
'''Read and return a bytes.'''
sizebuf = self._fin.readline().decode()
if not sizebuf:
# End of file
raise IOError('End of input stream')
elif not sizebuf.strip():
# No length value == no data
return None
size = int(sizebuf)
item = self._fin.read(size)
if len(item) != size:
raise IOError('Short read from stream')
# Swallow trailing newline
self._fin.read(1)
return item
def get_array(self):
'''Read and return an array of strings or blobs.'''
arr = []
while True:
el = self.get_item()
if el is None:
return arr
arr.append(el)
def get_boolean(self):
return self.get_item() == b'true'
def get_dict(self):
keys = self.get_array()
values = self.get_array()
dct = dict(list(zip(keys, values)))
return dct
def _send_value(self, value):
# send a single value
if not isinstance(value, bytes):
# convert value to bytes
value = str(value).encode()
self._fout.write(b'%d\n' % len(value))
self._fout.write(value)
self._fout.write(b'\n')
def send_message(self, tag, *values):
'''Atomically sends a message, consisting of a str tag followed by one
or more values. An argument can be a list or tuple, in which case
it is serialized as an array of values terminated by a blank line.'''
with self._output_lock:
self._fout.write(b'%s\n' % tag.encode())
for value in values:
if isinstance(value, (list, tuple)):
for el in value:
self._send_value(el)
self._fout.write(b'\n')
else:
self._send_value(value)
self._fout.flush()
class _StdoutThread(threading.Thread):
name = 'stdout thread'
def __init__(self, stdout_pipe, conn):
threading.Thread.__init__(self)
self.setDaemon(True)
self._pipe = stdout_pipe
self._conn = conn
def run(self):
try:
while True:
buf = self._pipe.read(32768)
if not buf:
break
self._conn.send_message('stdout', buf)
except IOError:
pass
| epl-1.0 | 52abbf5bb87866a0238892f9eaa56244 | 36.515625 | 134 | 0.578509 | 4.268023 | false | false | false | false |
cmusatyalab/opendiamond | opendiamond/server/listen.py | 1 | 9733 | #
# The OpenDiamond Platform for Interactive Search
#
# Copyright (c) 2011-2018 Carnegie Mellon University
# All rights reserved.
#
# This software is distributed under the terms of the Eclipse Public
# License, Version 1.0 which can be found in the file named LICENSE.
# ANY USE, REPRODUCTION OR DISTRIBUTION OF THIS SOFTWARE CONSTITUTES
# RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT
#
'''Listening for new connections; pairing control and data connections.'''
from builtins import object
import binascii
import errno
import logging
import os
import select
import socket
from weakref import WeakValueDictionary
from opendiamond.helpers import connection_ok
from opendiamond.protocol import NONCE_LEN, NULL_NONCE
# Listen parameters
BACKLOG = 16
# Connection identifiers
CONTROL = 0
DATA = 1
_log = logging.getLogger(__name__)
class ListenError(Exception):
'''Error opening listening socket.'''
class _ConnectionClosed(Exception):
'''The socket has been closed.'''
class _PendingConn(object):
'''A connection still in the matchmaking process, which is:
1. Accept connection
2. Read NONCE_LEN bytes, non-blockingly.
3. If all zero, this is a control channel connection. Generate NONCE_LEN
bytes of nonce and send them back down the connection, then monitor
the connection for closures so we can clean up our own state.
4. If nonzero, this is a blast channel connection. Look up the nonce to
see which control channel it corresponds to. If not found, close the
connection. Otherwise, we have a pairing; send the nonce back down
the connection and start handling RPCs.'''
def __init__(self, sock, peer):
self.sock = sock
self.sock.setblocking(0)
self.peer = peer
self.nonce = b''
def read_nonce(self):
'''Try to read the nonce. Returns CONTROL if this is a control
connection, DATA if a data connection, None if the caller should
call back later. The nonce is in self.nonce.'''
if len(self.nonce) < NONCE_LEN:
data = self.sock.recv(NONCE_LEN - len(self.nonce))
if not data:
raise _ConnectionClosed()
self.nonce += data
if len(self.nonce) == NONCE_LEN:
# We're done with non-blocking mode
self.sock.setblocking(1)
if self.nonce == NULL_NONCE:
self.nonce = os.urandom(NONCE_LEN)
return CONTROL
return DATA
return None
else:
# If the socket is still readable, either we have received
# unexpected data or the connection has died
raise _ConnectionClosed()
def send_nonce(self):
'''Send the nonce back to the client.'''
try:
self.sock.sendall(self.nonce)
except socket.error:
raise _ConnectionClosed()
@property
def nonce_str(self):
'''The nonce as a hex string.'''
return binascii.hexlify(self.nonce)
class _ListeningSocket(object):
'''A wrapper class for a listening socket which is to be added to a
_PendingConnPollSet.'''
def __init__(self, sock):
self.sock = sock
def accept(self):
return self.sock.accept()
class _PendingConnPollSet(object):
'''Convenience wrapper around a select.poll object which works not with
file descriptors, but with any object with a "sock" attribute containing
a network socket.'''
def __init__(self):
self._fd_to_pconn = dict()
self._pollset = select.poll()
def register(self, pconn, mask):
'''Add the pconn to the set with the specified mask. Can also be
used to update the mask for an existing pconn.'''
fd = pconn.sock.fileno()
self._fd_to_pconn[fd] = pconn
self._pollset.register(fd, mask)
def unregister(self, pconn):
'''Remove the pconn from the set.'''
fd = pconn.sock.fileno()
self._pollset.unregister(fd)
del self._fd_to_pconn[fd]
def poll(self):
'''Poll for events and return a list of (pconn, eventmask) pairs.
pconn will be None for events on the listening socket.'''
while True:
try:
items = self._pollset.poll()
except select.error as e:
# If poll() was interrupted by a signal, retry. If the
# signal was supposed to be fatal, the signal handler would
# have raised an exception.
if e.args[0] == errno.EINTR:
pass
else:
raise
else:
break
ret = []
for fd, event in items:
ret.append((self._fd_to_pconn[fd], event))
return ret
def close(self):
'''Unregister all connections from the pollset.'''
for pconn in list(self._fd_to_pconn.values()):
self.unregister(pconn)
class ConnListener(object):
'''Manager for listening socket and connections still in the matchmaking
process.'''
def __init__(self, port):
# Get a list of potential bind addresses
addrs = socket.getaddrinfo(None, port, 0, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# Try to bind to each address
socks = []
for family, type, proto, _canonname, addr in addrs:
try:
sock = socket.socket(family, type, proto)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if family == socket.AF_INET6:
# Ensure an IPv6 listener doesn't also bind to IPv4,
# since depending on the order of getaddrinfo return
# values this could cause the IPv6 bind to fail
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
sock.bind(addr)
sock.listen(BACKLOG)
sock.setblocking(0)
socks.append(sock)
except socket.error:
pass
if not socks:
# None of the addresses worked
raise ListenError("Couldn't bind listening socket")
self._poll = _PendingConnPollSet()
for sock in socks:
self._poll.register(_ListeningSocket(sock), select.POLLIN)
self._nonce_to_pending = WeakValueDictionary()
def _accept(self, lsock):
'''Accept waiting connections and add them to the pollset.'''
try:
while True:
sock, addr = lsock.accept()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
host = addr[0]
if connection_ok('diamondd', host):
pconn = _PendingConn(sock, host)
_log.debug('New connection from %s', pconn.peer)
self._poll.register(pconn, select.POLLIN)
else:
sock.close()
_log.info('Rejected connection from %s', host)
except socket.error:
pass
def _traffic(self, pconn):
'''Handle poll readiness events on the specified pconn.'''
try:
# Continue trying to read the nonce
ret = pconn.read_nonce()
if ret is not None:
# Have the nonce.
if ret == CONTROL:
_log.debug('Control connection from %s, nonce %s',
pconn.peer, pconn.nonce_str)
pconn.send_nonce()
self._nonce_to_pending[pconn.nonce] = pconn
else:
control = self._nonce_to_pending.get(pconn.nonce, None)
if control is not None:
# We have a match! Clean up pending state and
# return the connection handles.
_log.debug('Data connection from %s, accepted '
'nonce %s', pconn.peer, pconn.nonce_str)
pconn.send_nonce()
self._poll.unregister(control)
self._poll.unregister(pconn)
return (control.sock, pconn.sock)
else:
# No control connection for this data connection.
# Close it.
_log.warning('Data connection from %s, unknown '
'nonce %s', pconn.peer, pconn.nonce_str)
self._poll.unregister(pconn)
except _ConnectionClosed:
# Connection died, clean it up. _nonce_to_pending holds a weak
# reference to the pconn, so this should be sufficient to GC
# the pconn and close the fd.
_log.warning('Connection to %s died during setup', pconn.peer)
self._poll.unregister(pconn)
return None
def accept(self):
'''Returns a new (control, data) connection pair.'''
while True:
for pconn, _flags in self._poll.poll():
if hasattr(pconn, 'accept'):
# Listening socket
self._accept(pconn)
else:
# Traffic on a pending connection; attempt to pair it
ret = self._traffic(pconn)
if ret is not None:
return ret
# pconn may now be a dead connection; allow it to be GC'd
pconn = None
def shutdown(self):
'''Close listening socket and all pending connections.'''
self._poll.close()
| epl-1.0 | 9ce6aaff174c62b067251b1f145aa982 | 36.007605 | 79 | 0.562725 | 4.422081 | false | false | false | false |
eregs/regulations-site | regulations/views/sidebar.py | 4 | 1190 | from importlib import import_module
import six
from django.conf import settings
from django.views.generic.base import TemplateView
from regulations.generator import api_reader
class SideBarView(TemplateView):
""" View for handling the right-side sidebar """
template_name = 'regulations/sidebar.html'
# contains either class paths or class objects (not instances)
components = settings.SIDEBARS
def get_context_data(self, **kwargs):
context = super(SideBarView, self).get_context_data(**kwargs)
client = api_reader.ApiReader()
klasses = []
for class_or_class_path in self.components:
if isinstance(class_or_class_path, six.string_types):
module, class_name = class_or_class_path.rsplit('.', 1)
klasses.append(getattr(import_module(module), class_name))
else:
klasses.append(class_or_class_path)
sidebars = [klass(context['label_id'], context['version'])
for klass in klasses]
context['sidebars'] = [sidebar.full_context(client, self.request)
for sidebar in sidebars]
return context
| cc0-1.0 | 5705f09ee43b4eb1b21c90dd1883fe05 | 34 | 74 | 0.642017 | 4.204947 | false | false | false | false |
eregs/regulations-site | regulations/management/commands/eregs_cache.py | 2 | 4376 | from BeautifulSoup import BeautifulSoup
import requests
import threading
import sys
import time
from six.moves.urllib_parse import urlparse
class EregsCache():
@staticmethod
def write(msg):
sys.stdout.write(msg + "\n")
@staticmethod
def write_error(msg):
sys.stderr.write(msg + "\n")
def access_url(self, *args):
try:
req = requests.get(args[0])
status_txt = "Cached"
if req.status_code != 200:
status_txt = "Failed"
msg = "{0} (status {1}): {2}".format(
status_txt, str(req.status_code), req.url)
self.write(msg)
except Exception as err:
self.write_error("Failed: " + args[0])
self.write_error(str(err))
def get_main_reg_list(self):
try:
regulations_links = []
html = requests.get(self.full_url).text
soup = BeautifulSoup(html)
reg_list = soup.find("ul", {"class": "reg-list"})
for link in reg_list.findAll("li"):
regulations_links.append(link.find("a")["href"])
return regulations_links
except Exception as err:
self.write_error("Main Page Failed To Load")
self.write_error(str(err))
# figure out the partial path depending on root location of eregs
def get_partial_url(self, href):
href = href.split("/")
if self.full_url != self.base_url:
href[1] += "/partial"
else:
href[0] += "/partial"
href = '/'.join(href)
href = href.split("#")[0]
return self.base_url + href
def __init__(self, eregs_url, regulations=None):
""" kick off calling regulations website for caching purposes
Keyword arguments:
eregs_url -- full web url to main regulations page
regulations -- (optional) list of regulation paths to be processed
"""
url = urlparse(eregs_url)
self.base_url = url.scheme + "://" + url.netloc
self.full_url = eregs_url
self.write("Base Url:" + self.base_url)
self.write("eRegs location:" + self.full_url)
try:
if not regulations:
regulations_links = self.get_main_reg_list()
else:
regulations_links = regulations.split(",")
for link in regulations_links:
self.write("Getting NAV links from " + self.base_url + link)
reg_nav = requests.get(self.base_url+link).text
soup = BeautifulSoup(reg_nav)
reg_soup = soup.findAll("a")
for a in reg_soup:
if 'data-section-id' in a:
partial = self.get_partial_url(a["href"])
thread_count = len(threading.enumerate())
# process 5 web pages at time. Some servers hate a lot
# of requests at once. slow it down so the threads
# can catch up if there are too many threads being
# processed, end process and exit out
if thread_count <= 5:
threading.Thread(target=self.access_url,
args=(partial, )).start()
elif thread_count >= 12:
message = "URLs currently at {0}. Server too slow"
self.write(message.format(thread_count))
self.write("Shutting down")
raise Exception("Thread Count Too High")
else:
self.write("Currently Processing {0} Urls".format(
thread_count))
self.write("Waiting...")
time.sleep(thread_count * 2)
# let the threads catch up before doing the next batch
if len(threading.enumerate()) > 1:
time.sleep(len(threading.enumerate()) * 2)
except Exception as err:
self.write_error(str(err))
if __name__ == "__main__":
regulations_arg = None
if len(sys.argv) > 3:
regulations_arg = sys.argv[3]
EregsCache(sys.argv[1], regulations_arg)
| cc0-1.0 | 90a3faf81bfa47943f91c237bf1ab68f | 31.902256 | 78 | 0.508227 | 4.38477 | false | false | false | false |
eregs/regulations-site | regulations/tests/html_builder_test.py | 2 | 10765 | # -*- coding: utf-8 -*-
from unittest import TestCase
from regulations.generator.html_builder import (
CFRChangeHTMLBuilder, CFRHTMLBuilder, HTMLBuilder, PreambleHTMLBuilder)
from regulations.generator.layers.diff_applier import DiffApplier
from regulations.generator.layers.internal_citation import (
InternalCitationLayer)
from regulations.generator.node_types import REGTEXT, APPENDIX, INTERP
from regulations.generator.layers import diff_applier
class HTMLBuilderTest(TestCase):
def test_process_node_header(self):
builder = HTMLBuilder()
node = {'text': '', 'children': [], 'label': ['99', '22'],
'node_type': REGTEXT}
builder.process_node(node)
self.assertFalse('header' in node)
node = {'text': '', 'children': [], 'label': ['99', '22'],
'title': 'Some Title', 'node_type': REGTEXT}
builder.process_node(node)
self.assertTrue('header' in node)
self.assertEqual('Some Title', node['header'])
node = {'text': '', 'children': [], 'label': ['99', '22'],
'title': u'§ 22.1 Title', 'node_type': REGTEXT}
builder.process_node(node)
self.assertTrue('header' in node)
def test_process_node_title_diff(self):
builder = HTMLBuilder()
diff = {'204': {'title': [('delete', 0, 2), ('insert', 4, 'AAC')],
'text': [('delete', 0, 2), ('insert', 4, 'AAB')],
'op': ''}}
da = diff_applier.DiffApplier(diff, None)
node = {
"label_id": "204",
"title": "abcd",
'node_type': APPENDIX
}
builder.diff_applier = da
builder.process_node_title(node)
self.assertEqual('<del>ab</del>cd<ins>AAC</ins>', node['header'])
def test_node_title_no_diff(self):
builder = HTMLBuilder()
node = {
"label_id": "204",
"title": "abcd",
'node_type': APPENDIX
}
builder.process_node_title(node)
self.assertTrue('header' in node)
self.assertEqual(node['title'], 'abcd')
def test_is_collapsed(self):
for label, text in ((['111', '22', 'a'], '(a) '),
(['111', '22', 'xx'], ' (xx) '),
(['111', '22', 'a', '5'], '(5)')):
node = {'label': label, 'text': text}
self.assertTrue(HTMLBuilder.is_collapsed(node))
for label, text in ((['111', '22', 'a'], '(b) '),
(['111', '22', ''], '(a) Some text'),
(['111', '22', 'a'], ' ')):
node = {'label': label, 'text': text}
self.assertFalse(HTMLBuilder.is_collapsed(node))
def test_human_label(self):
self.assertEqual(
'111', HTMLBuilder.human_label({'label': ['111']}))
self.assertEqual(
'111-22-33-aa',
HTMLBuilder.human_label({'label': ['111', '22', '33', 'aa']}))
class CFRHTMLBuilderTest(TestCase):
def test_list_level_interpretations(self):
builder = CFRHTMLBuilder()
parts = ['101', '12', 'a', 'Interp', '1']
node_type = INTERP
result = builder.list_level(parts, node_type)
self.assertEquals(result, 1)
parts.append('j')
result = builder.list_level(parts, node_type)
self.assertEquals(result, 2)
parts.append('B')
result = builder.list_level(parts, node_type)
self.assertEquals(result, 3)
def test_list_level_appendices(self):
builder = CFRHTMLBuilder()
parts = ['101', 'A', '1', 'a']
node_type = APPENDIX
result = builder.list_level(parts, node_type)
self.assertEquals(result, 1)
parts.append('2')
result = builder.list_level(parts, node_type)
self.assertEquals(result, 2)
parts.append('k')
result = builder.list_level(parts, node_type)
self.assertEquals(result, 3)
parts.append('B')
result = builder.list_level(parts, node_type)
self.assertEquals(result, 4)
def test_list_level_regulations(self):
builder = CFRHTMLBuilder()
parts = ['101', '1', 'a']
node_type = REGTEXT
result = builder.list_level(parts, node_type)
self.assertEquals(result, 1)
parts.append('2')
result = builder.list_level(parts, node_type)
self.assertEquals(result, 2)
parts.append('k')
result = builder.list_level(parts, node_type)
self.assertEquals(result, 3)
parts.append('B')
result = builder.list_level(parts, node_type)
self.assertEquals(result, 4)
def test_list_level_regulations_no_level(self):
builder = CFRHTMLBuilder()
parts = ['101', '1']
node_type = REGTEXT
result = builder.list_level(parts, node_type)
self.assertEquals(result, 0)
def test_no_section_sign(self):
text = CFRHTMLBuilder.section_space(' abc')
self.assertEquals(text, ' abc')
def test_modify_interp_node(self):
node = {
'node_type': INTERP,
'label': ['872', '22', 'Interp'],
'children': [{'label': ['872', '22', 'Interp', '1']},
{'label': ['872', '22', 'a', 'Interp']},
{'label': ['872', '22', 'b', 'Interp']}]
}
builder = CFRHTMLBuilder()
builder.modify_interp_node(node)
self.assertTrue(node['section_header'])
self.assertEqual(node['header_children'],
[{'label': ['872', '22', 'a', 'Interp']},
{'label': ['872', '22', 'b', 'Interp']}])
self.assertEqual(node['par_children'],
[{'label': ['872', '22', 'Interp', '1']}])
node['label'] = ['872', '222', 'a', 'Interp']
builder.modify_interp_node(node)
self.assertFalse(node['section_header'])
def test_modify_interp_node_header(self):
node = {
'children': [],
'header': 'This interprets 22(a), a paragraph',
'label': ['872', '22', 'a', 'Interp'],
'node_type': INTERP,
}
icl = InternalCitationLayer(None)
icl.sectional = True
builder = CFRHTMLBuilder([icl])
builder.modify_interp_node(node)
header = 'This interprets {0}, a paragraph'.format(
icl.render_url(['872', '22', 'a'], '22(a)'))
self.assertEqual(header, node['header_markup'])
node['label'] = ['872', '22']
builder.modify_interp_node(node)
self.assertEqual(node['header'], node['header_markup'])
def test_process_node_title_section_space_diff(self):
"""" Diffs and sections spaces need to place nicely together. """
builder = CFRHTMLBuilder()
diff = {'204': {'title': [('delete', 7, 9), ('insert', 10, 'AAC')],
'text': [('delete', 0, 2), ('insert', 4, 'AAB')],
'op': ''}}
da = diff_applier.DiffApplier(diff, None)
node = {
"label_id": u"204",
"title": u"§ 101.6 abcd",
'node_type': APPENDIX
}
builder.diff_applier = da
builder.process_node_title(node)
self.assertEqual(
u'§ 101.6<del> a</del>b<ins>AAC</ins>cd', node['header'])
def test_human_label(self):
self.assertEqual(
'Regulation 111', CFRHTMLBuilder.human_label({'label': ['111']}))
self.assertEqual(
u'§ 111.22(f)',
CFRHTMLBuilder.human_label({'label': ['111', '22', 'f']}))
class PreambleHTMLBuilderTest(TestCase):
def setUp(self):
self.builder = PreambleHTMLBuilder()
def test_human_label(self):
self.assertEqual(
'FR #111_22',
PreambleHTMLBuilder.human_label({'label': ['111_22']}))
self.assertEqual(
'Section III.A.ii.4',
PreambleHTMLBuilder.human_label({
'label': ['111_22', 'III', 'A', 'ii', '4'],
'indexes': [2, 0, 1, 3],
}),
)
self.assertEqual(
'Section III.A.ii.4 Paragraph 3.5',
PreambleHTMLBuilder.human_label({
'label': ['111_22', 'III', 'A', 'ii', '4', 'p3', 'p7'],
'indexes': [2, 0, 1, 3, 2, 4]
}),
)
def test_accepts_comment(self):
"""All of the preamble can be commented on. Some of it is called
out"""
node = {'label': ['ABCD_123', 'II', 'B', 'p4'], 'text': 'Something',
'node_type': 'preamble', 'children': []}
self.builder.process_node(node)
self.assertTrue(node.get('accepts_comments'))
self.assertFalse(node.get('comments_calledout'))
node = {'title': 'B. Has a title', 'label': ['ABCD_123', 'II', 'B'],
'text': 'Something', 'node_type': 'preamble', 'children': []}
self.builder.process_node(node)
self.assertTrue(node.get('accepts_comments'))
self.assertTrue(node.get('comments_calledout'))
class CFRChangeHTMLBuilderTests(TestCase):
def setUp(self):
diffs = DiffApplier({'111-22-a': {'op': 'deleted'}}, '111-22')
self.builder = CFRChangeHTMLBuilder([], diffs)
def test_accepts_comment(self):
"""We can only comment on changed paragraphs"""
node = {'label': ['111', '21', 'a'], 'text': 'Something',
'node_type': 'regtext', 'children': []}
self.builder.process_node(node)
self.assertFalse(node.get('accepts_comments'))
self.assertEqual(node.get('stars_collapse'), 'full')
node['label'] = ['111', '22']
self.builder.process_node(node)
self.assertFalse(node.get('accepts_comments'))
self.assertEqual(node.get('stars_collapse'), 'inline')
node['label'] = ['111', '22', 'a', '5']
self.builder.process_node(node)
self.assertFalse(node.get('accepts_comments'))
self.assertEqual(node.get('stars_collapse'), 'full')
node['label'] = ['111', '22', 'a']
self.builder.process_node(node)
self.assertTrue(node.get('accepts_comments'))
self.assertEqual(node.get('stars_collapse'), 'none')
def test_preprocess(self):
diffs = DiffApplier({'111-22-a': {'op': 'deleted'},
'111-33-b-5-v': {'op': 'deleted'}}, '111-22')
builder = CFRChangeHTMLBuilder([], diffs)
self.assertEqual(
builder.diff_paths,
{('111',), ('111', '22'), ('111', '22', 'a'), ('111', '33'),
('111', '33', 'b'), ('111', '33', 'b', '5'),
('111', '33', 'b', '5', 'v')})
| cc0-1.0 | d676b101eac3c1d8f35e66064453aa2f | 35.726962 | 77 | 0.526531 | 3.65896 | false | true | false | false |
eregs/regulations-site | regulations/views/notice_home.py | 4 | 1559 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from operator import itemgetter
import logging
from django.http import Http404
from django.template.response import TemplateResponse
from django.views.generic.base import View
from regulations.generator.api_reader import ApiReader
from regulations.views.preamble import (
notice_data, CommentState)
logger = logging.getLogger(__name__)
class NoticeHomeView(View):
"""
Basic view that provides a list of regulations and notices to the context.
"""
template_name = None # We should probably have a default notice template.
def get(self, request, *args, **kwargs):
notices = ApiReader().notices().get("results", [])
context = {}
notices_meta = []
for notice in notices:
try:
if notice.get("document_number"):
_, meta, _ = notice_data(notice["document_number"])
notices_meta.append(meta)
except Http404:
pass
notices_meta = sorted(notices_meta, key=itemgetter("publication_date"),
reverse=True)
context["notices"] = notices_meta
# Django templates won't show contents of CommentState as an Enum, so:
context["comment_state"] = {state.name: state.value for state in
CommentState}
template = self.template_name
return TemplateResponse(request=request, template=template,
context=context)
| cc0-1.0 | 7bd87b719bb9e11c2a9d31b0dbbd0c5b | 30.18 | 79 | 0.614496 | 4.531977 | false | false | false | false |
eregs/regulations-site | regulations/settings/base.py | 2 | 9029 | import os
from os.path import join, abspath, dirname
import tempfile
from distutils.spawn import find_executable
from django.utils.crypto import get_random_string
here = lambda *x: join(abspath(dirname(__file__)), *x)
PROJECT_ROOT = here("..", "..")
root = lambda *x: join(abspath(PROJECT_ROOT), *x)
DEBUG = True
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'regsite.db'
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
if 'TMPDIR' in os.environ:
STATIC_ROOT = os.environ['TMPDIR'] + '/static/'
else:
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', get_random_string(50))
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"OPTIONS": {
"context_processors": (
# "django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"django.template.context_processors.request",
"regulations.context.eregs_globals",
),
# List of callables that know how to import templates from various
# sources.
"loaders": [
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader'))
],
}
},
]
# Order from
# https://docs.djangoproject.com/en/1.9/ref/middleware/#middleware-ordering
MIDDLEWARE_CLASSES = (
'django.middleware.security.SecurityMiddleware',
'django.middleware.cache.UpdateCacheMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
)
SECURE_BROWSER_XSS_FILTER = True
SECURE_CONTENT_TYPE_NOSNIFF = True
ROOT_URLCONF = 'regulations.urls'
INSTALLED_APPS = (
# Note: no admin
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'regulations.apps.RegulationsConfig',
)
# eregs specific settings
# The base URL for the API that we use to access layers and the regulation.
API_BASE = os.environ.get('EREGS_API_BASE', '')
DATE_FORMAT = 'n/j/Y'
# Analytics settings
ANALYTICS = {
'GOOGLE': {
'GTM_SITE_ID': '',
'GA_SITE_ID': '',
},
'DAP': {
'AGENCY': '',
'SUBAGENCY': '',
},
}
# Use the 'source' directory; useful for development
JS_DEBUG = False
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': tempfile.mkdtemp('eregs_cache'),
},
'eregs_longterm_cache': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': tempfile.mkdtemp('eregs_longterm_cache'),
'TIMEOUT': 60*60*24*15, # 15 days
'OPTIONS': {
'MAX_ENTRIES': 10000,
},
},
'api_cache': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'api_cache_memory',
'TIMEOUT': 3600,
'OPTIONS': {
'MAX_ENTRIES': 1000,
},
},
'regs_gov_cache': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
CACHE_MIDDLEWARE_ALIAS = 'default'
CACHE_MIDDLEWARE_KEY_PREFIX = 'eregs'
CACHE_MIDDLEWARE_SECONDS = 600
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Where should we look for data?
DATA_LAYERS = (
'regulations.generator.layers.defined.DefinedLayer',
'regulations.generator.layers.definitions.DefinitionsLayer',
'regulations.generator.layers.external_citation.ExternalCitationLayer',
'regulations.generator.layers.footnotes.FootnotesLayer',
'regulations.generator.layers.formatting.FormattingLayer',
'regulations.generator.layers.internal_citation.InternalCitationLayer',
# Should likely be moved to a CFPB-specific module
'regulations.generator.layers.interpretations.InterpretationsLayer',
'regulations.generator.layers.key_terms.KeyTermsLayer',
'regulations.generator.layers.meta.MetaLayer',
'regulations.generator.layers.paragraph_markers.MarkerHidingLayer',
'regulations.generator.layers.paragraph_markers.MarkerInfoLayer',
'regulations.generator.layers.toc_applier.TableOfContentsLayer',
'regulations.generator.layers.graphics.GraphicsLayer',
)
SIDEBARS = (
'regulations.generator.sidebar.analyses.Analyses',
'regulations.generator.sidebar.help.Help',
'regulations.generator.sidebar.print_part.PrintPart',
)
ATTACHMENT_BUCKET = os.getenv('S3_BUCKET')
ATTACHMENT_ACCESS_KEY_ID = os.getenv('S3_ACCESS_KEY_ID')
ATTACHMENT_SECRET_ACCESS_KEY = os.getenv('S3_SECRET_ACCESS_KEY')
# regulations.gov restrictions
ATTACHMENT_MAX_SIZE = 1024 * 1024 * 10
ATTACHMENT_PREVIEW_PREFIX = 'preview'
VALID_ATTACHMENT_EXTENSIONS = set([
"bmp", "doc", "xls", "pdf", "gif", "htm", "html", "jpg", "jpeg",
"png", "ppt", "rtf", "sgml", "tiff", "tif", "txt", "wpd", "xml",
"docx", "xlsx", "pptx"])
MAX_ATTACHMENT_COUNT = 10
REGS_GOV_API_URL = os.environ.get('REGS_GOV_API_URL')
REGS_GOV_API_KEY = os.environ.get('REGS_GOV_API_KEY')
COMMENT_DOCUMENT_ID = os.getenv('DOCUMENT_ID')
WKHTMLTOPDF_PATH = os.getenv('WKHTMLTOPDF_PATH',
find_executable('wkhtmltopdf'))
| cc0-1.0 | d821b30062456a22f16bf0b0b28b8113 | 31.952555 | 79 | 0.671392 | 3.597211 | false | false | false | false |
eregs/regulations-site | regulations/urls.py | 2 | 8520 | from django.conf.urls import url
from regulations.url_caches import daily_cache, lt_cache
from regulations.views.about import about
from regulations.views.chrome_breakaway import ChromeSXSView
from regulations.views.chrome import (
ChromeView, ChromeLandingView,
ChromeSearchView,
ChromeSubterpView)
from regulations.views.diff import ChromeSectionDiffView
from regulations.views.diff import PartialSectionDiffView
from regulations.views.partial import PartialDefinitionView
from regulations.views.partial import PartialParagraphView
from regulations.views.partial import PartialRegulationView, PartialSectionView
from regulations.views import partial_interp
from regulations.views.partial_search import PartialSearch
from regulations.views.partial_sxs import ParagraphSXSView
from regulations.views.preamble import (
CFRChangesView, PreambleView, ChromePreambleSearchView
)
from regulations.views.redirect import (
diff_redirect,
redirect_by_current_date,
redirect_by_date,
redirect_by_date_get
)
from regulations.views.sidebar import SideBarView
from regulations.views.universal_landing import universal
# Re-usable URL patterns.
meta_version = r'(?P<%s>[-\d\w_]+)'
version_pattern = meta_version % 'version'
newer_version_pattern = meta_version % 'newer_version'
notice_pattern = meta_version % 'notice_id'
reg_pattern = r'(?P<label_id>[\d]+)'
preamble_pattern = r'(?P<label_id>[\w]+)'
section_pattern = r'(?P<label_id>[\d]+[-][\w]+)'
interp_pattern = r'(?P<label_id>[-\w]+[-]Interp)'
paragraph_pattern = r'(?P<label_id>[-\w]+)'
subterp_pattern = r'(?P<label_id>[\d]+-(Appendices|Subpart(-[A-Z]+)?)-Interp)'
urlpatterns = [
url(r'^$', universal, name='universal_landing'),
# about page
url(r'^about$', about, name='about'),
# Redirect to version by date (by GET)
# Example http://.../regulation_redirect/201-3-v
url(r'^regulation_redirect/%s$' % paragraph_pattern, redirect_by_date_get,
name='redirect_by_date_get'),
# Redirect to a diff based on GET params
# Example http://.../diff_redirect/201-3/old_version?new_version=new
url(r'^diff_redirect/%s/%s$' % (section_pattern, version_pattern),
diff_redirect, name='diff_redirect'),
# A section by section paragraph with chrome
# Example: http://.../sxs/201-2-g/2011-1738
url(r'^sxs/%s/%s$' % (paragraph_pattern, notice_pattern),
lt_cache(ChromeSXSView.as_view()),
name='chrome_sxs_view'),
# Search results for non-JS viewers
# Example: http://.../search?q=term&version=2011-1738
url(r'^search(?:/cfr)?/%s$' % reg_pattern,
ChromeSearchView.as_view(), name='chrome_search',
kwargs={'doc_type': 'cfr'}),
url(r'^search/preamble/%s$' % preamble_pattern,
ChromePreambleSearchView.as_view(), name='chrome_search_preamble',
kwargs={'doc_type': 'preamble'}),
# Diff view of a section for non-JS viewers (or book markers)
# Example: http://.../diff/201-4/2011-1738/2013-10704
url(r'^diff/%s/%s/%s$' %
(section_pattern, version_pattern, newer_version_pattern),
lt_cache(ChromeSectionDiffView.as_view()),
name='chrome_section_diff_view'),
url(r'^preamble/(?P<doc_number>[\w-]+)/cfr_changes/(?P<section>[\w-]+)$',
daily_cache(CFRChangesView.as_view()), name='cfr_changes'),
url(r'^preamble/(?P<paragraphs>[-\w]+(/[-\w]+)*)$',
daily_cache(PreambleView.as_view()), name='chrome_preamble'),
# Redirect to version by date
# Example: http://.../201-3-v/1999/11/8
url(r'^%s/(?P<year>\d{4})/(?P<month>\d{2})/(?P<day>\d{2})$'
% paragraph_pattern, redirect_by_date, name='redirect_by_date'),
# Redirect to version by current date
# Example: http://.../201-3-v/CURRENT
url(r'^%s/CURRENT$' % paragraph_pattern,
redirect_by_current_date, name='redirect_by_current_date'),
# A regulation section with chrome
# Example: http://.../201-4/2013-10704
url(r'^%s/%s$' % (section_pattern, version_pattern),
lt_cache(ChromeView.as_view(partial_class=PartialSectionView)),
name='chrome_section_view'),
# Subterp, interpretations of a while subpart, emptypart or appendices
# Example: http://.../201-Subpart-A-Interp/2013-10706
# http://.../201-Subpart-Interp/2013-10706
# http://.../201-Appendices-Interp/2013-10706
url(r'^%s/%s$' % (subterp_pattern, version_pattern),
lt_cache(ChromeSubterpView.as_view()),
name=ChromeSubterpView.version_switch_view),
# Interpretation of a section/paragraph or appendix
# Example: http://.../201-4-Interp/2013-10704
url(r'^%s/%s$' % (interp_pattern, version_pattern),
lt_cache(ChromeView.as_view(
partial_class=partial_interp.PartialInterpView)),
name='chrome_interp_view'),
# The whole regulation with chrome
# Example: http://.../201/2013-10704
url(r'^%s/%s$' % (reg_pattern, version_pattern),
lt_cache(ChromeView.as_view(
partial_class=PartialRegulationView,
version_switch_view='chrome_regulation_view')),
name='chrome_regulation_view'),
# A regulation paragraph with chrome
# Example: http://.../201-2-g/2013-10704
url(r'^%s/%s$' % (paragraph_pattern, version_pattern),
lt_cache(ChromeView.as_view(
partial_class=PartialParagraphView,
version_switch_view='chrome_paragraph_view')),
name='chrome_paragraph_view'),
# A regulation landing page
# Example: http://.../201
url(r'^%s$' % reg_pattern, ChromeLandingView.as_view(),
name='regulation_landing_view'),
# Load just the sidebar
# Example: http://.../partial/sidebar/201-2/2013-10704
url(r'^partial/sidebar/%s/%s$' % (paragraph_pattern, version_pattern),
SideBarView.as_view(),
name='sidebar'),
# Load just search results
url(r'^partial/search(?:/cfr)?/%s$' % reg_pattern,
PartialSearch.as_view(), name='partial_search',
kwargs={'doc_type': 'cfr'}),
url(r'^partial/search/preamble/%s$' % preamble_pattern,
PartialSearch.as_view(), name='partial_search',
kwargs={'doc_type': 'preamble'}),
# A diff view of a section (without chrome)
url(r'^partial/diff/%s/%s/%s$' % (
section_pattern, version_pattern, newer_version_pattern),
lt_cache(PartialSectionDiffView.as_view()),
name='partial_section_diff_view'),
# A section by section paragraph (without chrome)
# Example: http://.../partial/sxs/201-2-g/2011-1738
url(r'^partial/sxs/%s/%s$' % (paragraph_pattern, notice_pattern),
lt_cache(ParagraphSXSView.as_view()),
name='paragraph_sxs_view'),
# A definition templated to be displayed in the sidebar (without chrome)
# Example: http://.../partial/definition/201-2-g/2011-1738
url(r'^partial/definition/%s/%s$' % (paragraph_pattern, version_pattern),
lt_cache(PartialDefinitionView.as_view()),
name='partial_definition_view'),
# A regulation section without chrome
# Example: http://.../partial/201-4/2013-10704
url(r'^partial/%s/%s$' % (section_pattern, version_pattern),
lt_cache(PartialSectionView.as_view()),
name='partial_section_view'),
# Subterp, interpretations of a whole subpart, emptypart or appendices
# Example: http://.../partial/201-Subpart-A-Interp/2013-10706
# http://.../partial/201-Subpart-Interp/2013-10706
# http://.../partial/201-Appendices-Interp/2013-10706
url(r'^partial/%s/%s$' % (subterp_pattern, version_pattern),
lt_cache(partial_interp.PartialSubterpView.as_view()),
name='partial_subterp_view'),
# An interpretation of a section/paragraph or appendix without chrome.
# Example: http://.../partial/201-2-Interp/2013-10704
url(r'^partial/%s/%s$' % (interp_pattern, version_pattern),
lt_cache(partial_interp.PartialInterpView.as_view()),
name='partial_interp_view'),
# The whole regulation without chrome; not too useful; added for symmetry
# Example: http://.../partial/201/2013-10704
url(r'^partial/%s/%s$' % (reg_pattern, version_pattern),
lt_cache(PartialRegulationView.as_view()),
name='partial_regulation_view'),
# A regulation paragraph without chrome.
# Example: http://.../partial/201-2-g/2013-10704
url(r'^partial/%s/%s$' % (paragraph_pattern, version_pattern),
lt_cache(PartialParagraphView.as_view()),
name='partial_paragraph_view'),
]
| cc0-1.0 | ad6c661d95161fa91db2ecd5cad8e46c | 45.813187 | 79 | 0.65892 | 3.368921 | false | false | false | false |
eregs/regulations-site | notice_comment/views.py | 2 | 7914 | import os
import json
import time
import logging
import celery
from django.conf import settings
from django.core.cache import caches
from django.shortcuts import redirect
from django.http import Http404, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_http_methods
from django.template.response import TemplateResponse
from django.utils.crypto import get_random_string
from django.views.generic.base import View
import requests
from notice_comment import docket, tasks
from regulations.views.preamble import (
common_context, CommentState, generate_html_tree, first_preamble_section,
notice_data)
logger = logging.getLogger(__name__)
# TODO: Expire preview URL at commenting deadline
PREVIEW_EXPIRATION_SECONDS = 60 * 60 * 24 * 90
def upload_proxy(request):
"""Create a random key name and a pair of temporary PUT and GET URLS to
permit attachment uploads and previews from the browser.
"""
filename = request.GET['name']
size = int(request.GET['size'])
valid, message = validate_attachment(filename, size)
if not valid:
logger.error(message)
return JsonResponse({'message': message}, status=400)
key, put_url = tasks.SignedUrl.generate(
method='put_object',
params={
'ContentLength': size,
'ContentType': request.GET.get('type', 'application/octet-stream'),
'Metadata': {'name': filename},
},
)
disposition = 'attachment; filename="{}"'.format(filename)
_, get_url = tasks.SignedUrl.generate(
key=key,
params={
'ResponseExpires': time.time() + PREVIEW_EXPIRATION_SECONDS,
'ResponseContentDisposition': disposition,
},
)
return JsonResponse({
'urls': {'get': get_url, 'put': put_url},
'key': key,
})
@csrf_exempt
@require_http_methods(['POST'])
def preview_comment(request):
"""Convert a comment to PDF, upload the result to S3, and return a signed
URL to GET the PDF.
"""
body = json.loads(request.body.decode('utf-8'))
sections = body.get('assembled_comment', [])
html = tasks.json_to_html(sections, mark_as_draft=True)
key = '/'.join([settings.ATTACHMENT_PREVIEW_PREFIX, get_random_string(50)])
document_number = tasks.get_document_number(sections)
content_disposition = tasks.generate_content_disposition(
document_number, draft=True)
with tasks.html_to_pdf(html) as pdf:
tasks.s3_client.put_object(
Body=pdf,
ContentType='application/pdf',
ContentDisposition=content_disposition,
Bucket=settings.ATTACHMENT_BUCKET,
Key=key,
)
_, url = tasks.SignedUrl.generate(key=key)
return JsonResponse({'url': url})
regs_gov_fmt = 'https://www.regulations.gov/#!documentDetail;D={document}'
class SubmitCommentView(View):
def get(self, request, doc_number):
preamble, _, _ = notice_data(doc_number)
section = first_preamble_section(preamble)
if section is None:
raise Http404
return redirect(
'chrome_preamble', paragraphs='/'.join(section['label']))
def post(self, request, doc_number):
form_data = {
key: value for key, value in request.POST.items()
if key != 'comments'
}
comments = json.loads(request.POST.get('comments', '[]'))
context = common_context(doc_number)
if context['meta']['comment_state'] != CommentState.OPEN:
raise Http404("Cannot comment on {}".format(doc_number))
context.update(generate_html_tree(context['preamble'], request,
id_prefix=[doc_number, 'preamble']))
context['comment_mode'] = 'write'
context.update({'message': '', 'metadata_url': ''})
valid, context['message'] = self.validate(comments, form_data)
context['regs_gov_url'] = regs_gov_fmt.format(
document=settings.COMMENT_DOCUMENT_ID)
# Catch any errors related to enqueueing comment submission. Because
# this step can fail for many reasons (e.g. no connection to broker,
# broker fails to write, etc.), catch `Exception`.
try:
_, context['metadata_url'] = self.enqueue(comments, form_data)
except Exception as exc:
logger.exception(exc)
template = 'notice_comment/confirm-chrome.html'
return TemplateResponse(request=request, template=template,
context=context)
def validate(self, comments, form_data):
valid, message = docket.sanitize_fields(form_data)
if not valid:
logger.error(message)
return valid, message
files = tasks.extract_files(comments)
# Account for the main comment itself submitted as an attachment
if len(files) > settings.MAX_ATTACHMENT_COUNT - 1:
message = "Too many attachments"
logger.error(message)
return False, message
return True, ''
def enqueue(self, comments, form_data):
metadata_url = tasks.SignedUrl.generate()
chain = celery.chain(
tasks.submit_comment.s(comments, form_data, metadata_url),
tasks.publish_tracking_number.s(metadata_url=metadata_url),
)
chain.delay()
return metadata_url
@require_http_methods(['GET', 'HEAD'])
def get_federal_agencies(request):
return lookup_regulations_gov(field='gov_agency',
dependentOnValue='Federal')
@require_http_methods(['GET', 'HEAD'])
def get_gov_agency_types(request):
return lookup_regulations_gov(field='gov_agency_type')
def lookup_regulations_gov(*args, **kwargs):
""" GET lookup values from regulations.gov. Use a cache """
cache = caches['regs_gov_cache']
cache_key = make_cache_key(**kwargs)
response = cache.get(cache_key)
if response is None:
logger.debug("Looking up in regs.gov")
response = requests.get(
settings.REGS_GOV_API_LOOKUP_URL,
params=kwargs,
headers={'X-Api-Key': settings.REGS_GOV_API_KEY}
)
if response.status_code == requests.codes.ok:
response = JsonResponse(response.json()['list'], safe=False)
cache.set(cache_key, response)
else:
logger.error("Failed to lookup regulations.gov: {}",
response.status_code, response.text)
response.raise_for_status()
return response
def validate_attachment(filename, size):
if size <= 0 or size > settings.ATTACHMENT_MAX_SIZE:
return False, "Invalid attachment size"
_, ext = os.path.splitext(filename)
if ext[1:].lower() not in settings.VALID_ATTACHMENT_EXTENSIONS:
return False, "Invalid attachment type"
return True, ""
def make_cache_key(*args, **kwargs):
""" Make a cache key of the form key1:value1:key2:value2.
Sort the keys to ensure repeatability
"""
return ":".join((key + ":" + str(kwargs[key]) for key in sorted(kwargs)))
class PrepareCommentView(View):
def get(self, request, doc_number):
context = common_context(doc_number)
if context['meta']['comment_state'] != CommentState.OPEN:
raise Http404("Cannot comment on {}".format(doc_number))
context.update(generate_html_tree(context['preamble'], request,
id_prefix=[doc_number, 'preamble']))
context['comment_mode'] = 'write'
context['comment_fields'] = docket.safe_get_document_fields(
settings.COMMENT_DOCUMENT_ID)
template = 'notice_comment/review-chrome.html'
return TemplateResponse(request=request, template=template,
context=context)
| cc0-1.0 | 29b2cc2c8c25d90a7fda80fca5fce0b6 | 34.648649 | 79 | 0.629644 | 4.064715 | false | false | false | false |
odlgroup/odl | odl/tomo/analytic/filtered_back_projection.py | 2 | 21537 | # coding: utf-8
# Copyright 2014-2019 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
from __future__ import print_function, division, absolute_import
import numpy as np
from odl.discr import ResizingOperator
from odl.trafos import FourierTransform, PYFFTW_AVAILABLE
__all__ = ('fbp_op', 'fbp_filter_op', 'tam_danielson_window',
'parker_weighting')
def _axis_in_detector(geometry):
"""A vector in the detector plane that points along the rotation axis."""
du, dv = geometry.det_axes_init
axis = geometry.axis
c = np.array([np.vdot(axis, du), np.vdot(axis, dv)])
cnorm = np.linalg.norm(c)
# Check for numerical errors
assert cnorm != 0
return c / cnorm
def _rotation_direction_in_detector(geometry):
"""A vector in the detector plane that points in the rotation direction."""
du, dv = geometry.det_axes_init
axis = geometry.axis
det_normal = np.cross(dv, du)
rot_dir = np.cross(axis, det_normal)
c = np.array([np.vdot(rot_dir, du), np.vdot(rot_dir, dv)])
cnorm = np.linalg.norm(c)
# Check for numerical errors
assert cnorm != 0
return c / cnorm
def _fbp_filter(norm_freq, filter_type, frequency_scaling):
"""Create a smoothing filter for FBP.
Parameters
----------
norm_freq : `array-like`
Frequencies normalized to lie in the interval [0, 1].
filter_type : {'Ram-Lak', 'Shepp-Logan', 'Cosine', 'Hamming', 'Hann',
callable}
The type of filter to be used.
If a string is given, use one of the standard filters with that name.
A callable should take an array of values in [0, 1] and return the
filter for these frequencies.
frequency_scaling : float
Scaling of the frequencies for the filter. All frequencies are scaled
by this number, any relative frequency above ``frequency_scaling`` is
set to 0.
Returns
-------
smoothing_filter : `numpy.ndarray`
Examples
--------
Create an FBP filter
>>> norm_freq = np.linspace(0, 1, 10)
>>> filt = _fbp_filter(norm_freq,
... filter_type='Hann',
... frequency_scaling=0.8)
"""
filter_type, filter_type_in = str(filter_type).lower(), filter_type
if callable(filter_type):
filt = filter_type(norm_freq)
elif filter_type == 'ram-lak':
filt = np.copy(norm_freq)
elif filter_type == 'shepp-logan':
filt = norm_freq * np.sinc(norm_freq / (2 * frequency_scaling))
elif filter_type == 'cosine':
filt = norm_freq * np.cos(norm_freq * np.pi / (2 * frequency_scaling))
elif filter_type == 'hamming':
filt = norm_freq * (
0.54 + 0.46 * np.cos(norm_freq * np.pi / (frequency_scaling)))
elif filter_type == 'hann':
filt = norm_freq * (
np.cos(norm_freq * np.pi / (2 * frequency_scaling)) ** 2)
else:
raise ValueError('unknown `filter_type` ({})'
''.format(filter_type_in))
indicator = (norm_freq <= frequency_scaling)
filt *= indicator
return filt
def tam_danielson_window(ray_trafo, smoothing_width=0.05, n_pi=1):
"""Create Tam-Danielson window from a `RayTransform`.
The Tam-Danielson window is an indicator function on the minimal set of
data needed to reconstruct a volume from given data. It is useful in
analytic reconstruction methods such as FBP to give a more accurate
reconstruction.
See [TAM1998] for more informationon the window.
See [PKGT2000] for information on the ``n_pi`` parameter.
Parameters
----------
ray_trafo : `RayTransform`
The ray transform for which to compute the window.
smoothing_width : positive float, optional
Width of the smoothing applied to the window's edges given as a
fraction of the width of the full window.
n_pi : odd int, optional
Total number of half rotations to include in the window. Values larger
than 1 should be used if the pitch is much smaller than the detector
height.
Returns
-------
tam_danielson_window : ``ray_trafo.range`` element
See Also
--------
fbp_op : Filtered back-projection operator from `RayTransform`
tam_danielson_window : Weighting for short scan data
odl.tomo.geometry.conebeam.ConeBeamGeometry :
Primary use case for this window function.
References
----------
[TSS1998] Tam, K C, Samarasekera, S and Sauer, F.
*Exact cone beam CT with a spiral scan*.
Physics in Medicine & Biology 4 (1998), p 1015.
https://dx.doi.org/10.1088/0031-9155/43/4/028
[PKGT2000] Proksa R, Köhler T, Grass M, Timmer J.
*The n-PI-method for helical cone-beam CT*
IEEE Trans Med Imaging. 2000 Sep;19(9):848-63.
https://www.ncbi.nlm.nih.gov/pubmed/11127600
"""
# Extract parameters
src_radius = ray_trafo.geometry.src_radius
det_radius = ray_trafo.geometry.det_radius
pitch = ray_trafo.geometry.pitch
if pitch == 0:
raise ValueError('Tam-Danielson window is only defined with '
'`pitch!=0`')
smoothing_width = float(smoothing_width)
if smoothing_width < 0:
raise ValueError('`smoothing_width` should be a positive float')
if n_pi % 2 != 1:
raise ValueError('`n_pi` must be odd, got {}'.format(n_pi))
# Find projection of axis on detector
axis_proj = _axis_in_detector(ray_trafo.geometry)
rot_dir = _rotation_direction_in_detector(ray_trafo.geometry)
# Find distance from projection of rotation axis for each pixel
dx = (rot_dir[0] * ray_trafo.range.meshgrid[1]
+ rot_dir[1] * ray_trafo.range.meshgrid[2])
dx_axis = dx * src_radius / (src_radius + det_radius)
def Vn(u):
return (pitch / (2 * np.pi)
* (1 + (u / src_radius) ** 2)
* (n_pi * np.pi / 2.0 - np.arctan(u / src_radius)))
lower_proj_axis = -Vn(dx_axis)
upper_proj_axis = Vn(-dx_axis)
lower_proj = lower_proj_axis * (src_radius + det_radius) / src_radius
upper_proj = upper_proj_axis * (src_radius + det_radius) / src_radius
# Compute a smoothed width
interval = (upper_proj - lower_proj)
width = interval * smoothing_width / np.sqrt(2)
# Create window function
def window_fcn(x):
# Lazy import to improve `import odl` time
import scipy.special
x_along_axis = axis_proj[0] * x[1] + axis_proj[1] * x[2]
if smoothing_width != 0:
lower_wndw = 0.5 * (
1 + scipy.special.erf((x_along_axis - lower_proj) / width))
upper_wndw = 0.5 * (
1 + scipy.special.erf((upper_proj - x_along_axis) / width))
else:
lower_wndw = (x_along_axis >= lower_proj)
upper_wndw = (x_along_axis <= upper_proj)
return lower_wndw * upper_wndw
return ray_trafo.range.element(window_fcn) / n_pi
def parker_weighting(ray_trafo, q=0.25):
"""Create parker weighting for a `RayTransform`.
Parker weighting is a weighting function that ensures that oversampled
fan/cone beam data are weighted such that each line has unit weight. It is
useful in analytic reconstruction methods such as FBP to give a more
accurate result and can improve convergence rates for iterative methods.
See the article `Parker weights revisited`_ for more information.
Parameters
----------
ray_trafo : `RayTransform`
The ray transform for which to compute the weights.
q : float, optional
Parameter controlling the speed of the roll-off at the edges of the
weighting. 1.0 gives the classical Parker weighting, while smaller
values in general lead to lower noise but stronger discretization
artifacts.
Returns
-------
parker_weighting : ``ray_trafo.range`` element
See Also
--------
fbp_op : Filtered back-projection operator from `RayTransform`
tam_danielson_window : Indicator function for helical data
odl.tomo.geometry.conebeam.FanBeamGeometry : Use case in 2d
odl.tomo.geometry.conebeam.ConeBeamGeometry : Use case in 3d (for pitch 0)
References
----------
.. _Parker weights revisited: https://www.ncbi.nlm.nih.gov/pubmed/11929021
"""
# Note: Parameter names taken from WES2002
# Extract parameters
src_radius = ray_trafo.geometry.src_radius
det_radius = ray_trafo.geometry.det_radius
ndim = ray_trafo.geometry.ndim
angles = ray_trafo.range.meshgrid[0]
min_rot_angle = ray_trafo.geometry.motion_partition.min_pt
alen = ray_trafo.geometry.motion_params.length
# Parker weightings are not defined for helical geometries
if ray_trafo.geometry.ndim != 2:
pitch = ray_trafo.geometry.pitch
if pitch != 0:
raise ValueError('Parker weighting window is only defined with '
'`pitch==0`')
# Find distance from projection of rotation axis for each pixel
if ndim == 2:
dx = ray_trafo.range.meshgrid[1]
elif ndim == 3:
# Find projection of axis on detector
rot_dir = _rotation_direction_in_detector(ray_trafo.geometry)
# If axis is aligned to a coordinate axis, save some memory and time by
# using broadcasting
if rot_dir[0] == 0:
dx = rot_dir[1] * ray_trafo.range.meshgrid[2]
elif rot_dir[1] == 0:
dx = rot_dir[0] * ray_trafo.range.meshgrid[1]
else:
dx = (rot_dir[0] * ray_trafo.range.meshgrid[1]
+ rot_dir[1] * ray_trafo.range.meshgrid[2])
# Compute parameters
dx_abs_max = np.max(np.abs(dx))
max_fan_angle = 2 * np.arctan2(dx_abs_max, src_radius + det_radius)
delta = max_fan_angle / 2
epsilon = alen - np.pi - max_fan_angle
if epsilon < 0:
raise Exception('data not sufficiently sampled for parker weighting')
# Define utility functions
def S(betap):
return (0.5 * (1.0 + np.sin(np.pi * betap)) * (np.abs(betap) < 0.5)
+ (betap >= 0.5))
def b(alpha):
return q * (2 * delta - 2 * alpha + epsilon)
# Create weighting function
beta = np.asarray(angles - min_rot_angle,
dtype=ray_trafo.range.dtype) # rotation angle
alpha = np.asarray(np.arctan2(dx, src_radius + det_radius),
dtype=ray_trafo.range.dtype)
# Compute sum in place to save memory
S_sum = S(beta / b(alpha) - 0.5)
S_sum += S((beta - 2 * delta + 2 * alpha - epsilon) / b(alpha) + 0.5)
S_sum -= S((beta - np.pi + 2 * alpha) / b(-alpha) - 0.5)
S_sum -= S((beta - np.pi - 2 * delta - epsilon) / b(-alpha) + 0.5)
scale = 0.5 * alen / np.pi
return ray_trafo.range.element(
np.broadcast_to(S_sum * scale, ray_trafo.range.shape))
def fbp_filter_op(ray_trafo, padding=True, filter_type='Ram-Lak',
frequency_scaling=1.0):
"""Create a filter operator for FBP from a `RayTransform`.
Parameters
----------
ray_trafo : `RayTransform`
The ray transform (forward operator) whose approximate inverse should
be computed. Its geometry has to be any of the following
`Parallel2dGeometry` : Exact reconstruction
`Parallel3dAxisGeometry` : Exact reconstruction
`FanBeamGeometry` : Approximate reconstruction, correct in limit of
fan angle = 0.
Only flat detectors are supported (det_curvature_radius is None).
`ConeBeamGeometry`, pitch = 0 (circular) : Approximate reconstruction,
correct in the limit of fan angle = 0 and cone angle = 0.
`ConeBeamGeometry`, pitch > 0 (helical) : Very approximate unless a
`tam_danielson_window` is used. Accurate with the window.
Other geometries: Not supported
padding : bool, optional
If the data space should be zero padded. Without padding, the data may
be corrupted due to the circular convolution used. Using padding makes
the algorithm slower.
filter_type : optional
The type of filter to be used.
The predefined options are, in approximate order from most noise
senstive to least noise sensitive:
``'Ram-Lak'``, ``'Shepp-Logan'``, ``'Cosine'``, ``'Hamming'`` and
``'Hann'``.
A callable can also be provided. It must take an array of values in
[0, 1] and return the filter for these frequencies.
frequency_scaling : float, optional
Relative cutoff frequency for the filter.
The normalized frequencies are rescaled so that they fit into the range
[0, frequency_scaling]. Any frequency above ``frequency_scaling`` is
set to zero.
Returns
-------
filter_op : `Operator`
Filtering operator for FBP based on ``ray_trafo``.
See Also
--------
tam_danielson_window : Windowing for helical data
"""
impl = 'pyfftw' if PYFFTW_AVAILABLE else 'numpy'
alen = ray_trafo.geometry.motion_params.length
if ray_trafo.domain.ndim == 2:
# Define ramp filter
def fourier_filter(x):
abs_freq = np.abs(x[1])
norm_freq = abs_freq / np.max(abs_freq)
filt = _fbp_filter(norm_freq, filter_type, frequency_scaling)
scaling = 1 / (2 * alen)
return filt * np.max(abs_freq) * scaling
# Define (padded) fourier transform
if padding:
# Define padding operator
ran_shp = (ray_trafo.range.shape[0],
ray_trafo.range.shape[1] * 2 - 1)
resizing = ResizingOperator(ray_trafo.range, ran_shp=ran_shp)
fourier = FourierTransform(resizing.range, axes=1, impl=impl)
fourier = fourier * resizing
else:
fourier = FourierTransform(ray_trafo.range, axes=1, impl=impl)
elif ray_trafo.domain.ndim == 3:
# Find the direction that the filter should be taken in
rot_dir = _rotation_direction_in_detector(ray_trafo.geometry)
# Find what axes should be used in the fourier transform
used_axes = (rot_dir != 0)
if used_axes[0] and not used_axes[1]:
axes = [1]
elif not used_axes[0] and used_axes[1]:
axes = [2]
else:
axes = [1, 2]
# Add scaling for cone-beam case
if hasattr(ray_trafo.geometry, 'src_radius'):
scale = (ray_trafo.geometry.src_radius
/ (ray_trafo.geometry.src_radius
+ ray_trafo.geometry.det_radius))
if ray_trafo.geometry.pitch != 0:
# In helical geometry the whole volume is not in each
# projection and we need to use another weighting.
# Ideally each point in the volume effects only
# the projections in a half rotation, so we assume that that
# is the case.
scale *= alen / (np.pi)
else:
scale = 1.0
# Define ramp filter
def fourier_filter(x):
# If axis is aligned to a coordinate axis, save some memory and
# time by using broadcasting
if not used_axes[0]:
abs_freq = np.abs(rot_dir[1] * x[2])
elif not used_axes[1]:
abs_freq = np.abs(rot_dir[0] * x[1])
else:
abs_freq = np.abs(rot_dir[0] * x[1] + rot_dir[1] * x[2])
norm_freq = abs_freq / np.max(abs_freq)
filt = _fbp_filter(norm_freq, filter_type, frequency_scaling)
scaling = scale * np.max(abs_freq) / (2 * alen)
return filt * scaling
# Define (padded) fourier transform
if padding:
# Define padding operator
if used_axes[0]:
padded_shape_u = ray_trafo.range.shape[1] * 2 - 1
else:
padded_shape_u = ray_trafo.range.shape[1]
if used_axes[1]:
padded_shape_v = ray_trafo.range.shape[2] * 2 - 1
else:
padded_shape_v = ray_trafo.range.shape[2]
ran_shp = (ray_trafo.range.shape[0],
padded_shape_u,
padded_shape_v)
resizing = ResizingOperator(ray_trafo.range, ran_shp=ran_shp)
fourier = FourierTransform(resizing.range, axes=axes, impl=impl)
fourier = fourier * resizing
else:
fourier = FourierTransform(ray_trafo.range, axes=axes, impl=impl)
else:
raise NotImplementedError('FBP only implemented in 2d and 3d')
# Create ramp in the detector direction
ramp_function = fourier.range.element(fourier_filter)
weight = 1
if not ray_trafo.range.is_weighted:
# Compensate for potentially unweighted range of the ray transform
weight *= ray_trafo.range.cell_volume
if not ray_trafo.domain.is_weighted:
# Compensate for potentially unweighted domain of the ray transform
weight /= ray_trafo.domain.cell_volume
ramp_function *= weight
# Create ramp filter via the convolution formula with fourier transforms
return fourier.inverse * ramp_function * fourier
def fbp_op(ray_trafo, padding=True, filter_type='Ram-Lak',
frequency_scaling=1.0):
"""Create filtered back-projection operator from a `RayTransform`.
The filtered back-projection is an approximate inverse to the ray
transform.
Parameters
----------
ray_trafo : `RayTransform`
The ray transform (forward operator) whose approximate inverse should
be computed. Its geometry has to be any of the following
`Parallel2dGeometry` : Exact reconstruction
`Parallel3dAxisGeometry` : Exact reconstruction
`FanBeamGeometry` : Approximate reconstruction, correct in limit of fan
angle = 0.
Only flat detectors are supported (det_curvature_radius is None).
`ConeBeamGeometry`, pitch = 0 (circular) : Approximate reconstruction,
correct in the limit of fan angle = 0 and cone angle = 0.
`ConeBeamGeometry`, pitch > 0 (helical) : Very approximate unless a
`tam_danielson_window` is used. Accurate with the window.
Other geometries: Not supported
padding : bool, optional
If the data space should be zero padded. Without padding, the data may
be corrupted due to the circular convolution used. Using padding makes
the algorithm slower.
filter_type : optional
The type of filter to be used.
The predefined options are, in approximate order from most noise
senstive to least noise sensitive:
``'Ram-Lak'``, ``'Shepp-Logan'``, ``'Cosine'``, ``'Hamming'`` and
``'Hann'``.
A callable can also be provided. It must take an array of values in
[0, 1] and return the filter for these frequencies.
frequency_scaling : float, optional
Relative cutoff frequency for the filter.
The normalized frequencies are rescaled so that they fit into the range
[0, frequency_scaling]. Any frequency above ``frequency_scaling`` is
set to zero.
Returns
-------
fbp_op : `Operator`
Approximate inverse operator of ``ray_trafo``.
See Also
--------
tam_danielson_window : Windowing for helical data.
parker_weighting : Windowing for overcomplete fan-beam data.
"""
return ray_trafo.adjoint * fbp_filter_op(ray_trafo, padding, filter_type,
frequency_scaling)
if __name__ == '__main__':
import odl
import matplotlib.pyplot as plt
from odl.util.testutils import run_doctests
# Display the various filters
x = np.linspace(0, 1, 100)
cutoff = 0.7
plt.figure('fbp filter')
for filter_name in ['Ram-Lak', 'Shepp-Logan', 'Cosine', 'Hamming', 'Hann',
np.sqrt]:
plt.plot(x, _fbp_filter(x, filter_name, cutoff), label=filter_name)
plt.title('Filters with frequency scaling = {}'.format(cutoff))
plt.legend(loc=2)
# Show the Tam-Danielson window
# Create Ray Transform in helical geometry
reco_space = odl.uniform_discr(
min_pt=[-20, -20, 0], max_pt=[20, 20, 40], shape=[300, 300, 300])
angle_partition = odl.uniform_partition(0, 8 * 2 * np.pi, 2000)
detector_partition = odl.uniform_partition([-40, -4], [40, 4], [500, 500])
geometry = odl.tomo.ConeBeamGeometry(
angle_partition, detector_partition, src_radius=100, det_radius=100,
pitch=5.0)
ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl='astra_cuda')
# Crete and show TD window
td_window = tam_danielson_window(ray_trafo, smoothing_width=0)
td_window.show('Tam-Danielson window', coords=[0, None, None])
# Show the Parker weighting
# Create Ray Transform in fan beam geometry
geometry = odl.tomo.cone_beam_geometry(reco_space,
src_radius=40, det_radius=80)
ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl='astra_cuda')
# Crete and show parker weighting
parker_weighting = parker_weighting(ray_trafo)
parker_weighting.show('Parker weighting')
# Also run the doctests
run_doctests()
| mpl-2.0 | 3151f4359e19cd99fe30493705c087be | 35.939966 | 79 | 0.616688 | 3.629255 | false | false | false | false |
odlgroup/odl | odl/contrib/datasets/ct/fips.py | 2 | 4207 | # Copyright 2014-2017 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
#
# The data is licensed under a
# Creative Commons Attribution 4.0 International License.
#
# You should have received a copy of the license along with this
# work. If not, see <http://creativecommons.org/licenses/by/4.0/>.
"""Tomographic datasets from the Finish Inverse Problems Society (FIPS)."""
from __future__ import division
import numpy as np
from odl.contrib.datasets.util import get_data
from odl.discr import uniform_partition
from odl.tomo import FanBeamGeometry
__all__ = ('walnut_data', 'walnut_geometry',
'lotus_root_data', 'lotus_root_geometry')
DATA_SUBSET = 'ct_fips'
def walnut_data():
"""Tomographic X-ray data of a walnut.
Notes
-----
See the article `Tomographic X-ray data of a walnut`_ for further
information.
See Also
--------
walnut_geometry
References
----------
.. _Tomographic X-ray data of a walnut: https://arxiv.org/abs/1502.04064
"""
# TODO: Store data in some ODL controlled url
url = 'http://www.fips.fi/dataset/CT_walnut_v1/FullSizeSinograms.mat'
dct = get_data('walnut.mat', subset=DATA_SUBSET, url=url)
# Change axes to match ODL definitions
data = np.swapaxes(dct['sinogram1200'], 0, 1)[::-1, ::-1]
data = data.astype('float')
# Very crude gain normalization
data = -np.log(data / np.max(data, axis=1)[:, None])
return data
def walnut_geometry():
"""Tomographic geometry for the walnut dataset.
Notes
-----
See the article `Tomographic X-ray data of a walnut`_ for further
information.
See Also
--------
walnut_data
References
----------
.. _Tomographic X-ray data of a walnut: https://arxiv.org/abs/1502.04064
"""
# To get the same rotation as in the reference article
a_offset = -np.pi / 2
apart = uniform_partition(a_offset, a_offset + 2 * np.pi, 1200)
# TODO: Find exact value, determined experimentally
d_offset = -0.279
dpart = uniform_partition(d_offset - 57.4, d_offset + 57.4, 2296)
geometry = FanBeamGeometry(apart, dpart,
src_radius=110, det_radius=190)
return geometry
def lotus_root_data():
"""Tomographic X-ray data of a lotus root.
Notes
-----
See the article `Tomographic X-ray data of a lotus root filled with
attenuating objects`_ for further information.
See Also
--------
lotus_root_geometry
References
----------
.. _Tomographic X-ray data of a lotus root filled with attenuating objects:
https://arxiv.org/abs/1609.07299
"""
# TODO: Store data in some ODL controlled url
url = 'http://www.fips.fi/dataset/CT_Lotus_v1/sinogram.mat'
dct = get_data('lotus_root.mat', subset=DATA_SUBSET, url=url)
# Change axes to match ODL definitions
data = np.swapaxes(dct['sinogram'], 0, 1)[:, :]
data = data.astype('float')
return data
def lotus_root_geometry():
"""Tomographic geometry for the lotus root dataset.
Notes
-----
See the article `Tomographic X-ray data of a lotus root filled with
attenuating objects`_ for further information.
See Also
--------
lotus_root_geometry
References
----------
.. _Tomographic X-ray data of a lotus root filled with attenuating objects:
https://arxiv.org/abs/1609.07299
"""
# To get the same rotation as in the reference article
a_offset = np.pi / 2
apart = uniform_partition(a_offset,
a_offset + 2 * np.pi * 366. / 360.,
366)
# TODO: Find exact value, determined experimentally
d_offset = 0.35
dpart = uniform_partition(d_offset - 60, d_offset + 60, 2240)
geometry = FanBeamGeometry(apart, dpart,
src_radius=540, det_radius=90)
return geometry
if __name__ == '__main__':
from odl.util.testutils import run_doctests
run_doctests()
| mpl-2.0 | d18469e0579512b2d84bf28972d8f0b4 | 26.496732 | 79 | 0.63228 | 3.420325 | false | false | false | false |
odlgroup/odl | odl/test/operator/oputils_test.py | 1 | 6918 | # Copyright 2014-2017 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
from __future__ import division
import numpy as np
import pytest
import odl
from odl.operator.oputils import matrix_representation, power_method_opnorm
from odl.operator.pspace_ops import ProductSpaceOperator
from odl.util.testutils import all_almost_equal
def test_matrix_representation():
"""Verify that the matrix repr returns the correct matrix"""
n = 3
A = np.random.rand(n, n)
Aop = odl.MatrixOperator(A)
matrix_repr = matrix_representation(Aop)
assert all_almost_equal(A, matrix_repr)
def test_matrix_representation_product_to_lin_space():
"""Verify that the matrix repr works for product spaces.
Here, since the domain shape ``(2, 3)`` and the range has shape ``(1, 3)``,
the shape of the matrix representation will be ``(2, 3, 1, 3)``.
"""
n = 3
A = np.random.rand(n, n)
Aop = odl.MatrixOperator(A)
B = np.random.rand(n, n)
Bop = odl.MatrixOperator(B)
ABop = ProductSpaceOperator([[Aop, Bop]])
matrix_repr = matrix_representation(ABop)
assert matrix_repr.shape == (1, n, 2, n)
assert np.linalg.norm(A - matrix_repr[0, :, 0, :]) == pytest.approx(0)
assert np.linalg.norm(B - matrix_repr[0, :, 1, :]) == pytest.approx(0)
def test_matrix_representation_lin_space_to_product():
"""Verify that the matrix repr works for product spaces.
Here, since the domain shape ``(1, 3)`` and the range has shape ``(2, 3)``,
the shape of the matrix representation will be ``(2, 3, 1, 3)``.
"""
n = 3
A = np.random.rand(n, n)
Aop = odl.MatrixOperator(A)
B = np.random.rand(n, n)
Bop = odl.MatrixOperator(B)
ABop = ProductSpaceOperator([[Aop],
[Bop]])
matrix_repr = matrix_representation(ABop)
assert matrix_repr.shape == (2, n, 1, n)
assert np.linalg.norm(A - matrix_repr[0, :, 0, :]) == pytest.approx(0)
assert np.linalg.norm(B - matrix_repr[1, :, 0, :]) == pytest.approx(0)
def test_matrix_representation_product_to_product():
"""Verify that the matrix repr works for product spaces.
Here, since the domain and range has shape ``(2, 3)``, the shape of the
matrix representation will be ``(2, 3, 2, 3)``.
"""
n = 3
A = np.random.rand(n, n)
Aop = odl.MatrixOperator(A)
B = np.random.rand(n, n)
Bop = odl.MatrixOperator(B)
ABop = ProductSpaceOperator([[Aop, 0],
[0, Bop]])
matrix_repr = matrix_representation(ABop)
assert matrix_repr.shape == (2, n, 2, n)
assert np.linalg.norm(A - matrix_repr[0, :, 0, :]) == pytest.approx(0)
assert np.linalg.norm(B - matrix_repr[1, :, 1, :]) == pytest.approx(0)
def test_matrix_representation_not_linear_op():
"""Verify error when operator is non-linear"""
class MyNonLinOp(odl.Operator):
"""Small nonlinear test operator."""
def _call(self, x):
return x ** 2
nonlin_op = MyNonLinOp(domain=odl.rn(3), range=odl.rn(3), linear=False)
with pytest.raises(ValueError):
matrix_representation(nonlin_op)
def test_matrix_representation_wrong_domain():
"""Verify that the matrix representation function gives correct error"""
class MyOp(odl.Operator):
"""Small test operator."""
def __init__(self):
super(MyOp, self).__init__(
domain=odl.rn(3) * odl.rn(3) ** 2,
range=odl.rn(4),
linear=True)
def _call(self, x, out):
return odl.rn(np.random.rand(4))
nonlin_op = MyOp()
with pytest.raises(TypeError):
matrix_representation(nonlin_op)
def test_matrix_representation_wrong_range():
"""Verify that the matrix representation function gives correct error"""
class MyOp(odl.Operator):
"""Small test operator."""
def __init__(self):
super(MyOp, self).__init__(
domain=odl.rn(3),
range=odl.rn(3) * odl.rn(3) ** 2,
linear=True)
def _call(self, x, out):
return odl.rn(np.random.rand(4))
nonlin_op = MyOp()
with pytest.raises(TypeError):
matrix_representation(nonlin_op)
def test_power_method_opnorm_symm():
"""Test the power method on a symmetrix matrix operator"""
# Test matrix with singular values 1.2 and 1.0
mat = np.array([[0.9509044, -0.64566614],
[-0.44583952, -0.95923051]])
op = odl.MatrixOperator(mat)
true_opnorm = 1.2
opnorm_est = power_method_opnorm(op, maxiter=100)
assert opnorm_est == pytest.approx(true_opnorm, rel=1e-2)
# Start at a different point
xstart = odl.rn(2).element([0.8, 0.5])
opnorm_est = power_method_opnorm(op, xstart=xstart, maxiter=100)
assert opnorm_est == pytest.approx(true_opnorm, rel=1e-2)
def test_power_method_opnorm_nonsymm():
"""Test the power method on a nonsymmetrix matrix operator"""
# Singular values 5.5 and 6
mat = np.array([[-1.52441557, 5.04276365],
[1.90246927, 2.54424763],
[5.32935411, 0.04573162]])
op = odl.MatrixOperator(mat)
true_opnorm = 6
# Start vector (1, 1) is close to the wrong eigenvector
xstart = odl.rn(2).element([1, 1])
opnorm_est = power_method_opnorm(op, xstart=xstart, maxiter=50)
assert opnorm_est == pytest.approx(true_opnorm, rel=1e-2)
# Start close to the correct eigenvector, converges very fast
xstart = odl.rn(2).element([-0.8, 0.5])
opnorm_est = power_method_opnorm(op, xstart=xstart, maxiter=6)
assert opnorm_est == pytest.approx(true_opnorm, rel=1e-2)
def test_power_method_opnorm_exceptions():
"""Test the exceptions"""
space = odl.rn(2)
op = odl.IdentityOperator(space)
with pytest.raises(ValueError):
# Too small number of iterates
power_method_opnorm(op, maxiter=0)
with pytest.raises(ValueError):
# Negative number of iterates
power_method_opnorm(op, maxiter=-5)
with pytest.raises(ValueError):
# Input vector is zero
power_method_opnorm(op, maxiter=2, xstart=space.zero())
with pytest.raises(ValueError):
# Input vector in the null space
op = odl.MatrixOperator([[0., 1.],
[0., 0.]])
power_method_opnorm(op, maxiter=2, xstart=[1, 0])
with pytest.raises(ValueError):
# Uneven number of iterates for non square operator
op = odl.MatrixOperator([[1., 2., 3.],
[4., 5., 6.]])
power_method_opnorm(op, maxiter=1, xstart=op.domain.one())
if __name__ == '__main__':
odl.util.test_file(__file__)
| mpl-2.0 | 0892c907be5f4b0e34b4bb7b50863eef | 31.176744 | 79 | 0.613472 | 3.337192 | false | true | false | false |
odlgroup/odl | examples/tomo/backends/astra_performance_cuda_cone_3d_cg.py | 2 | 3351 | """Performance example of running native ASTRA vs using ODL for reconstruction.
In this example, a 256x256x256 image is reconstructed using the Conjugate
Gradient Least Squares method on the GPU.
In general, pure ASTRA is faster than ODL since it does not need to perform
any copies and all arithmetic is performed on the GPU. Despite this, ODL is
not much slower. In this example, the overhead is about 40 %, depending on the
hardware used.
"""
import astra
import numpy as np
import matplotlib.pyplot as plt
import odl
from odl.util.testutils import timer
# Common geometry parameters
domain_size = np.array([256, 256, 256])
n_angles = 360
det_size = 512
niter = 10
# Create reconstruction space
reco_space = odl.uniform_discr(-domain_size / 2, domain_size / 2, domain_size)
# Create geometry
apart = odl.uniform_partition(0, 2 * np.pi, n_angles)
dpart = odl.uniform_partition([-500, -500], [500, 500],
[det_size, det_size])
geometry = odl.tomo.ConeBeamGeometry(apart, dpart,
src_radius=500, det_radius=500)
phantom = odl.phantom.shepp_logan(reco_space, modified=True).asarray()
# --- ASTRA ---
# Define ASTRA geometry
astra_vol_geom = astra.create_vol_geom(*domain_size)
det_row_count = geometry.det_partition.shape[1]
det_col_count = geometry.det_partition.shape[0]
vec = odl.tomo.backends.astra_setup.astra_conebeam_3d_geom_to_vec(geometry)
astra_proj_geom = astra.create_proj_geom('cone_vec', det_row_count,
det_col_count, vec)
# Create ASTRA projector
proj_cfg = {}
proj_cfg['type'] = 'cuda3d'
proj_cfg['VolumeGeometry'] = astra_vol_geom
proj_cfg['ProjectionGeometry'] = astra_proj_geom
proj_cfg['options'] = {}
proj_id = astra.projector3d.create(proj_cfg)
# Create sinogram
sinogram_id, sinogram = astra.create_sino3d_gpu(phantom,
astra_proj_geom,
astra_vol_geom)
# Create a data object for the reconstruction
rec_id = astra.data3d.create('-vol', astra_vol_geom)
# Set up the parameters for a reconstruction algorithm using the CUDA backend
cfg = astra.astra_dict('CGLS3D_CUDA')
cfg['ReconstructionDataId'] = rec_id
cfg['ProjectionDataId'] = sinogram_id
cfg['ProjectorId'] = proj_id
# Create the algorithm object from the configuration structure
alg_id = astra.algorithm.create(cfg)
with timer('ASTRA Run'):
# Run the algorithm
astra.algorithm.run(alg_id, niter)
# Get the result
rec = astra.data3d.get(rec_id)
# Clean up.
astra.algorithm.delete(alg_id)
astra.data3d.delete(rec_id)
astra.data3d.delete(sinogram_id)
astra.projector3d.delete(proj_id)
# --- ODL ---
# Create ray transform
ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl='astra_cuda')
# Create sinogram
data = ray_trafo(phantom)
# Solve with CGLS (aka CGN)
x = reco_space.zero()
with timer('ODL Run'):
odl.solvers.conjugate_gradient_normal(ray_trafo, x, data, niter=niter)
coords = (slice(None), slice(None), 128)
# Display results for comparison
plt.figure('Phantom')
plt.imshow(phantom.T[coords], origin='lower', cmap='bone')
plt.figure('ASTRA Reconstruction')
plt.imshow(rec.T[coords], origin='lower', cmap='bone')
plt.figure('ODL Reconstruction')
plt.imshow(x.asarray().T[coords], origin='lower', cmap='bone')
plt.show()
| mpl-2.0 | e0810c2dc95000208325afb0b71c67b3 | 29.743119 | 79 | 0.695016 | 3.043597 | false | false | false | false |
odlgroup/odl | examples/space/simple_rn.py | 2 | 4104 | """An example of a very simple space, the space rn.
Including some benchmarks with an optimized version.
"""
import numpy as np
import odl
from odl.space.base_tensors import TensorSpace, Tensor
from odl.util.testutils import timer
class SimpleRn(TensorSpace):
"""The real space R^n, non-optimized implmentation."""
def __init__(self, size):
super(SimpleRn, self).__init__(size, dtype=float)
def zero(self):
return self.element(np.zeros(self.size))
def one(self):
return self.element(np.ones(self.size))
def _lincomb(self, a, x1, b, x2, out):
out.data[:] = a * x1.data + b * x2.data
def _inner(self, x1, x2):
return float(np.vdot(x1.data, x2.data))
def _multiply(self, x1, x2, out):
out.data[:] = x1.data * x2.data
def _divide(self, x1, x2, out):
out.data[:] = x1.data / x2.data
def element(self, *args, **kwargs):
if not args and not kwargs:
return self.element(np.empty(self.size))
if isinstance(args[0], np.ndarray):
if args[0].shape == (self.size,):
return RnVector(self, args[0])
else:
raise ValueError('input array {} is of shape {}, expected '
'shape ({},).'.format(args[0], args[0].shape,
self.dim,))
else:
return self.element(np.array(
*args, **kwargs).astype(np.float64, copy=False))
return self.element(np.empty(self.dim, dtype=np.float64))
class RnVector(Tensor):
def __init__(self, space, data):
super(RnVector, self).__init__(space)
self.data = data
def __getitem__(self, index):
return self.data.__getitem__(index)
def __setitem__(self, index, value):
return self.data.__setitem__(index, value)
def asarray(self, *args):
return self.data(*args)
r5 = SimpleRn(5)
# odl.diagnostics.SpaceTest(r5).run_tests()
# Do some tests to compare
n = 10 ** 7
iterations = 10
# Perform some benchmarks with rn
opt_spc = odl.rn(n)
simple_spc = SimpleRn(n)
x, y, z = np.random.rand(n), np.random.rand(n), np.random.rand(n)
ox, oy, oz = (opt_spc.element(x.copy()), opt_spc.element(y.copy()),
opt_spc.element(z.copy()))
sx, sy, sz = (simple_spc.element(x.copy()), simple_spc.element(y.copy()),
simple_spc.element(z.copy()))
if 'cuda' in odl.space.entry_points.tensor_space_impl_names():
cu_spc = odl.rn(n, impl='cuda')
cx, cy, cz = (cu_spc.element(x.copy()), cu_spc.element(y.copy()),
cu_spc.element(z.copy()))
print(" lincomb:")
with timer("SimpleRn"):
for _ in range(iterations):
simple_spc.lincomb(2.13, sx, 3.14, sy, out=sz)
print("result: {}".format(sz[1:5]))
with timer("odl numpy"):
for _ in range(iterations):
opt_spc.lincomb(2.13, ox, 3.14, oy, out=oz)
print("result: {}".format(oz[1:5]))
if 'cuda' in odl.space.entry_points.tensor_space_impl_names():
with timer("odl cuda"):
for _ in range(iterations):
cu_spc.lincomb(2.13, cx, 3.14, cy, out=cz)
print("result: {}".format(cz[1:5]))
print("\n Norm:")
with timer("SimpleRn"):
for _ in range(iterations):
result = sz.norm()
print("result: {}".format(result))
with timer("odl numpy"):
for _ in range(iterations):
result = oz.norm()
print("result: {}".format(result))
if 'cuda' in odl.space.entry_points.tensor_space_impl_names():
with timer("odl cuda"):
for _ in range(iterations):
result = cz.norm()
print("result: {}".format(result))
print("\n Inner:")
with timer("SimpleRn"):
for _ in range(iterations):
result = sz.inner(sx)
print("result: {}".format(result))
with timer("odl numpy"):
for _ in range(iterations):
result = oz.inner(ox)
print("result: {}".format(result))
if 'cuda' in odl.space.entry_points.tensor_space_impl_names():
with timer("odl cuda"):
for _ in range(iterations):
result = cz.inner(cx)
print("result: {}".format(result))
| mpl-2.0 | cd6d1fb2ab59aa33ab26b22c5d6bac99 | 28.52518 | 78 | 0.584064 | 3.152074 | false | false | false | false |
odlgroup/odl | odl/solvers/nonsmooth/difference_convex.py | 2 | 8749 | # Copyright 2014-2019 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""Solvers for the optimization of the difference of convex functions.
Collection of DCA (d.c. algorithms) and related methods which make use of
structured optimization if the objective function can be written as a
difference of two convex functions.
"""
from __future__ import print_function, division, absolute_import
__all__ = ('dca', 'prox_dca', 'doubleprox_dc')
def dca(x, f, g, niter, callback=None):
r"""Subgradient DCA of Tao and An.
This algorithm solves a problem of the form ::
min_x f(x) - g(x),
where ``f`` and ``g`` are proper, convex and lower semicontinuous
functions.
Parameters
----------
x : `LinearSpaceElement`
Initial point, updated in-place.
f : `Functional`
Convex functional. Needs to implement ``f.convex_conj.gradient``.
g : `Functional`
Convex functional. Needs to implement ``g.gradient``.
niter : int
Number of iterations.
callback : callable, optional
Function called with the current iterate after each iteration.
Notes
-----
The algorithm is described in Section 3 and in particular in Theorem 3 of
`[TA1997] <http://journals.math.ac.vn/acta/pdf/9701289.pdf>`_. The problem
.. math::
\min f(x) - g(x)
has the first-order optimality condition :math:`0 \in \partial f(x) -
\partial g(x)`, i.e., aims at finding an :math:`x` so that there exists a
common element
.. math::
y \in \partial f(x) \cap \partial g(x).
The element :math:`y` can be seen as a solution of the Toland dual problem
.. math::
\min g^*(y) - f^*(y)
and the iteration is given by
.. math::
y_n \in \partial g(x_n), \qquad x_{n+1} \in \partial f^*(y_n),
for :math:`n\geq 0`. Here, a subgradient is found by evaluating the
gradient method of the respective functionals.
References
----------
[TA1997] Tao, P D, and An, L T H. *Convex analysis approach to d.c.
programming: Theory, algorithms and applications*. Acta Mathematica
Vietnamica, 22.1 (1997), pp 289--355.
See also
--------
prox_dca :
Solver with a proximal step for ``f`` and a subgradient step for ``g``.
doubleprox_dc :
Solver with proximal steps for all the nonsmooth convex functionals
and a gradient step for a smooth functional.
"""
space = f.domain
if g.domain != space:
raise ValueError('`f.domain` and `g.domain` need to be equal, but '
'{} != {}'.format(space, g.domain))
f_convex_conj = f.convex_conj
for _ in range(niter):
f_convex_conj.gradient(g.gradient(x), out=x)
if callback is not None:
callback(x)
def prox_dca(x, f, g, niter, gamma, callback=None):
r"""Proximal DCA of Sun, Sampaio and Candido.
This algorithm solves a problem of the form ::
min_x f(x) - g(x)
where ``f`` and ``g`` are two proper, convex and lower semicontinuous
functions.
Parameters
----------
x : `LinearSpaceElement`
Initial point, updated in-place.
f : `Functional`
Convex functional. Needs to implement ``f.proximal``.
g : `Functional`
Convex functional. Needs to implement ``g.gradient``.
niter : int
Number of iterations.
gamma : positive float
Stepsize in the primal updates.
callback : callable, optional
Function called with the current iterate after each iteration.
Notes
-----
The algorithm was proposed as Algorithm 2.3 in
`[SSC2003]
<http://www.global-sci.org/jcm/readabs.php?vol=21&no=4&page=451&year=2003&ppage=462>`_.
It solves the problem
.. math ::
\min f(x) - g(x)
by using subgradients of :math:`g` and proximal points of :math:`f`.
The iteration is given by
.. math ::
y_n \in \partial g(x_n), \qquad x_{n+1}
= \mathrm{Prox}_{\gamma f}(x_n + \gamma y_n).
In contrast to `dca`, `prox_dca` uses proximal steps with respect to the
convex part ``f``. Both algorithms use subgradients of the concave part
``g``.
References
----------
[SSC2003] Sun, W, Sampaio R J B, and Candido M A B. *Proximal point
algorithm for minimization of DC function*. Journal of Computational
Mathematics, 21.4 (2003), pp 451--462.
See also
--------
dca :
Solver with subgradinet steps for all the functionals.
doubleprox_dc :
Solver with proximal steps for all the nonsmooth convex functionals
and a gradient step for a smooth functional.
"""
space = f.domain
if g.domain != space:
raise ValueError('`f.domain` and `g.domain` need to be equal, but '
'{} != {}'.format(space, g.domain))
for _ in range(niter):
f.proximal(gamma)(x.lincomb(1, x, gamma, g.gradient(x)), out=x)
if callback is not None:
callback(x)
def doubleprox_dc(x, y, f, phi, g, K, niter, gamma, mu, callback=None):
r"""Double-proxmial gradient d.c. algorithm of Banert and Bot.
This algorithm solves a problem of the form ::
min_x f(x) + phi(x) - g(Kx).
Parameters
----------
x : `LinearSpaceElement`
Initial primal guess, updated in-place.
y : `LinearSpaceElement`
Initial dual guess, updated in-place.
f : `Functional`
Convex functional. Needs to implement ``g.proximal``.
phi : `Functional`
Convex functional. Needs to implement ``phi.gradient``.
Convergence can be guaranteed if the gradient is Lipschitz continuous.
g : `Functional`
Convex functional. Needs to implement ``h.convex_conj.proximal``.
K : `Operator`
Linear operator. Needs to implement ``K.adjoint``
niter : int
Number of iterations.
gamma : positive float
Stepsize in the primal updates.
mu : positive float
Stepsize in the dual updates.
callback : callable, optional
Function called with the current iterate after each iteration.
Notes
-----
This algorithm is proposed in `[BB2016]
<https://arxiv.org/abs/1610.06538>`_ and solves the d.c. problem
.. math ::
\min_x f(x) + \varphi(x) - g(Kx)
together with its Toland dual
.. math ::
\min_y g^*(y) - (f + \varphi)^*(K^* y).
The iterations are given by
.. math ::
x_{n+1} &= \mathrm{Prox}_{\gamma f} (x_n + \gamma (K^* y_n
- \nabla \varphi(x_n))), \\
y_{n+1} &= \mathrm{Prox}_{\mu g^*} (y_n + \mu K x_{n+1}).
To guarantee convergence, the parameter :math:`\gamma` must satisfy
:math:`0 < \gamma < 2/L` where :math:`L` is the Lipschitz constant of
:math:`\nabla \varphi`.
References
----------
[BB2016] Banert, S, and Bot, R I. *A general double-proximal gradient
algorithm for d.c. programming*. arXiv:1610.06538 [math.OC] (2016).
See also
--------
dca :
Solver with subgradient steps for all the functionals.
prox_dca :
Solver with a proximal step for ``f`` and a subgradient step for ``g``.
"""
primal_space = f.domain
dual_space = g.domain
if phi.domain != primal_space:
raise ValueError('`f.domain` and `phi.domain` need to be equal, but '
'{} != {}'.format(primal_space, phi.domain))
if K.domain != primal_space:
raise ValueError('`f.domain` and `K.domain` need to be equal, but '
'{} != {}'.format(primal_space, K.domain))
if K.range != dual_space:
raise ValueError('`g.domain` and `K.range` need to be equal, but '
'{} != {}'.format(dual_space, K.range))
g_convex_conj = g.convex_conj
for _ in range(niter):
f.proximal(gamma)(x.lincomb(1, x,
gamma, K.adjoint(y) - phi.gradient(x)),
out=x)
g_convex_conj.proximal(mu)(y.lincomb(1, y, mu, K(x)), out=y)
if callback is not None:
callback(x)
def doubleprox_dc_simple(x, y, f, phi, g, K, niter, gamma, mu):
"""Non-optimized version of ``doubleprox_dc``.
This function is intended for debugging. It makes a lot of copies and
performs no error checking.
"""
for _ in range(niter):
f.proximal(gamma)(x + gamma * K.adjoint(y) -
gamma * phi.gradient(x), out=x)
g.convex_conj.proximal(mu)(y + mu * K(x), out=y)
| mpl-2.0 | 40872556c64f59295a8a06bec06aba08 | 31.645522 | 91 | 0.59824 | 3.536378 | false | false | false | false |
odlgroup/odl | odl/trafos/backends/pyfftw_bindings.py | 2 | 11456 | # Copyright 2014-2020 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""Bindings to the ``pyFFTW`` back-end for Fourier transforms.
The `pyFFTW <https://pyfftw.readthedocs.io>`_ package is a Python
wrapper around the well-known `FFTW <http://fftw.org/>`_ library for fast
Fourier transforms.
"""
from __future__ import print_function, division, absolute_import
from multiprocessing import cpu_count
import numpy as np
from packaging.version import parse as parse_version
import warnings
try:
import pyfftw
PYFFTW_AVAILABLE = True
except ImportError:
PYFFTW_AVAILABLE = False
else:
if parse_version(pyfftw.__version__) < parse_version('0.10.3'):
warnings.warn('PyFFTW < 0.10.3 is known to cause problems with some '
'ODL functionality, see issue #1002.',
RuntimeWarning)
from odl.util import (
is_real_dtype, dtype_repr, complex_dtype, normalized_axes_tuple)
__all__ = ('pyfftw_call', 'PYFFTW_AVAILABLE')
def pyfftw_call(array_in, array_out, direction='forward', axes=None,
halfcomplex=False, **kwargs):
"""Calculate the DFT with pyfftw.
The discrete Fourier (forward) transform calcuates the sum::
f_hat[k] = sum_j( f[j] * exp(-2*pi*1j * j*k/N) )
where the summation is taken over all indices
``j = (j[0], ..., j[d-1])`` in the range ``0 <= j < N``
(component-wise), with ``N`` being the shape of the input array.
The output indices ``k`` lie in the same range, except
for half-complex transforms, where the last axis ``i`` in ``axes``
is shortened to ``0 <= k[i] < floor(N[i]/2) + 1``.
In the backward transform, sign of the the exponential argument
is flipped.
Parameters
----------
array_in : `numpy.ndarray`
Array to be transformed
array_out : `numpy.ndarray`
Output array storing the transformed values, may be aliased
with ``array_in``.
direction : {'forward', 'backward'}, optional
Direction of the transform
axes : int or sequence of ints, optional
Dimensions along which to take the transform. ``None`` means
using all axes and is equivalent to ``np.arange(ndim)``.
halfcomplex : bool, optional
If ``True``, calculate only the negative frequency part along the
last axis. If ``False``, calculate the full complex FFT.
This option can only be used with real input data.
Other Parameters
----------------
fftw_plan : ``pyfftw.FFTW``, optional
Use this plan instead of calculating a new one. If specified,
the options ``planning_effort``, ``planning_timelimit`` and
``threads`` have no effect.
planning_effort : str, optional
Flag for the amount of effort put into finding an optimal
FFTW plan. See the `FFTW doc on planner flags
<http://www.fftw.org/fftw3_doc/Planner-Flags.html>`_.
Available options: {'estimate', 'measure', 'patient', 'exhaustive'}
Default: 'estimate'
planning_timelimit : float or ``None``, optional
Limit planning time to roughly this many seconds.
Default: ``None`` (no limit)
threads : int, optional
Number of threads to use.
Default: Number of CPUs if the number of data points is larger
than 4096, else 1.
normalise_idft : bool, optional
If ``True``, the result of the backward transform is divided by
``1 / N``, where ``N`` is the total number of points in
``array_in[axes]``. This ensures that the IDFT is the true
inverse of the forward DFT.
Default: ``False``
import_wisdom : filename or file handle, optional
File to load FFTW wisdom from. If the file does not exist,
it is ignored.
export_wisdom : filename or file handle, optional
File to append the accumulated FFTW wisdom to
Returns
-------
fftw_plan : ``pyfftw.FFTW``
The plan object created from the input arguments. It can be
reused for transforms of the same size with the same data types.
Note that reuse only gives a speedup if the initial plan
used a planner flag other than ``'estimate'``.
If ``fftw_plan`` was specified, the returned object is a
reference to it.
Notes
-----
* The planning and direction flags can also be specified as
capitalized and prepended by ``'FFTW_'``, i.e. in the original
FFTW form.
* For a ``halfcomplex`` forward transform, the arrays must fulfill
``array_out.shape[axes[-1]] == array_in.shape[axes[-1]] // 2 + 1``,
and vice versa for backward transforms.
* All planning schemes except ``'estimate'`` require an internal copy
of the input array but are often several times faster after the
first call (measuring results are cached). Typically,
'measure' is a good compromise. If you cannot afford the copy,
use ``'estimate'``.
* If a plan is provided via the ``fftw_plan`` parameter, no copy
is needed internally.
"""
import pickle
if not array_in.flags.aligned:
raise ValueError('input array not aligned')
if not array_out.flags.aligned:
raise ValueError('output array not aligned')
if axes is None:
axes = tuple(range(array_in.ndim))
axes = normalized_axes_tuple(axes, array_in.ndim)
direction = _flag_pyfftw_to_odl(direction)
fftw_plan_in = kwargs.pop('fftw_plan', None)
planning_effort = _flag_pyfftw_to_odl(
kwargs.pop('planning_effort', 'estimate')
)
planning_timelimit = kwargs.pop('planning_timelimit', None)
threads = kwargs.pop('threads', None)
normalise_idft = kwargs.pop('normalise_idft', False)
wimport = kwargs.pop('import_wisdom', '')
wexport = kwargs.pop('export_wisdom', '')
# Cast input to complex if necessary
array_in_copied = False
if is_real_dtype(array_in.dtype) and not halfcomplex:
# Need to cast array_in to complex dtype
array_in = array_in.astype(complex_dtype(array_in.dtype))
array_in_copied = True
# Do consistency checks on the arguments
_pyfftw_check_args(array_in, array_out, axes, halfcomplex, direction)
# Import wisdom if possible
if wimport:
try:
with open(wimport, 'rb') as wfile:
wisdom = pickle.load(wfile)
except IOError:
wisdom = []
except TypeError: # Got file handle
wisdom = pickle.load(wimport)
if wisdom:
pyfftw.import_wisdom(wisdom)
# Copy input array if it hasn't been done yet and the planner is likely
# to destroy it. If we already have a plan, we don't have to worry.
planner_destroys = _pyfftw_destroys_input(
[planning_effort], direction, halfcomplex, array_in.ndim)
must_copy_array_in = fftw_plan_in is None and planner_destroys
if must_copy_array_in and not array_in_copied:
plan_arr_in = np.empty_like(array_in)
flags = [_flag_odl_to_pyfftw(planning_effort), 'FFTW_DESTROY_INPUT']
else:
plan_arr_in = array_in
flags = [_flag_odl_to_pyfftw(planning_effort)]
if fftw_plan_in is None:
if threads is None:
if plan_arr_in.size <= 4096: # Trade-off wrt threading overhead
threads = 1
else:
threads = cpu_count()
fftw_plan = pyfftw.FFTW(
plan_arr_in, array_out, direction=_flag_odl_to_pyfftw(direction),
flags=flags, planning_timelimit=planning_timelimit,
threads=threads, axes=axes)
else:
fftw_plan = fftw_plan_in
fftw_plan(array_in, array_out, normalise_idft=normalise_idft)
if wexport:
try:
with open(wexport, 'ab') as wfile:
pickle.dump(pyfftw.export_wisdom(), wfile)
except TypeError: # Got file handle
pickle.dump(pyfftw.export_wisdom(), wexport)
return fftw_plan
def _flag_pyfftw_to_odl(flag):
return flag.lstrip('FFTW_').lower()
def _flag_odl_to_pyfftw(flag):
return 'FFTW_' + flag.upper()
def _pyfftw_destroys_input(flags, direction, halfcomplex, ndim):
"""Return ``True`` if FFTW destroys an input array, ``False`` otherwise."""
if any(flag in flags or _flag_pyfftw_to_odl(flag) in flags
for flag in ('FFTW_MEASURE', 'FFTW_PATIENT', 'FFTW_EXHAUSTIVE',
'FFTW_DESTROY_INPUT')):
return True
elif (direction in ('backward', 'FFTW_BACKWARD') and halfcomplex and
ndim != 1):
return True
else:
return False
def _pyfftw_check_args(arr_in, arr_out, axes, halfcomplex, direction):
"""Raise an error if anything is not ok with in and out."""
if len(set(axes)) != len(axes):
raise ValueError('duplicate axes are not allowed')
if direction == 'forward':
out_shape = list(arr_in.shape)
if halfcomplex:
try:
out_shape[axes[-1]] = arr_in.shape[axes[-1]] // 2 + 1
except IndexError:
raise IndexError('axis index {} out of range for array '
'with {} axes'
''.format(axes[-1], arr_in.ndim))
if arr_out.shape != tuple(out_shape):
raise ValueError('expected output shape {}, got {}'
''.format(tuple(out_shape), arr_out.shape))
if is_real_dtype(arr_in.dtype):
out_dtype = complex_dtype(arr_in.dtype)
elif halfcomplex:
raise ValueError('cannot combine halfcomplex forward transform '
'with complex input')
else:
out_dtype = arr_in.dtype
if arr_out.dtype != out_dtype:
raise ValueError('expected output dtype {}, got {}'
''.format(dtype_repr(out_dtype),
dtype_repr(arr_out.dtype)))
elif direction == 'backward':
in_shape = list(arr_out.shape)
if halfcomplex:
try:
in_shape[axes[-1]] = arr_out.shape[axes[-1]] // 2 + 1
except IndexError as err:
raise IndexError('axis index {} out of range for array '
'with {} axes'
''.format(axes[-1], arr_out.ndim))
if arr_in.shape != tuple(in_shape):
raise ValueError('expected input shape {}, got {}'
''.format(tuple(in_shape), arr_in.shape))
if is_real_dtype(arr_out.dtype):
in_dtype = complex_dtype(arr_out.dtype)
elif halfcomplex:
raise ValueError('cannot combine halfcomplex backward transform '
'with complex output')
else:
in_dtype = arr_out.dtype
if arr_in.dtype != in_dtype:
raise ValueError('expected input dtype {}, got {}'
''.format(dtype_repr(in_dtype),
dtype_repr(arr_in.dtype)))
else: # Shouldn't happen
raise RuntimeError
if __name__ == '__main__':
from odl.util.testutils import run_doctests
run_doctests(skip_if=not PYFFTW_AVAILABLE)
| mpl-2.0 | 285da23224144ed015bb2f0feef82b3d | 36.684211 | 79 | 0.604661 | 3.841717 | false | false | false | false |
odlgroup/odl | examples/solvers/pdhg_denoising_ROF_algorithm_comparison.py | 2 | 6272 | """Total variation denoising using PDHG.
Three different algorithms (or variants of PDHG) are compared to solve the
ROF (Rudin-Osher-Fatemi) problem / L2-TV
(ROF) min_{x >= 0} 1/2 ||x - d||_2^2 + lam || |grad(x)| ||_1
Where ``grad`` the spatial gradient and ``d`` is given noisy data.
Algorithms 1 and 2 are two different assignments of the functional parts of ROF
to the functions f and g of PDHG. Algorithm 3 improves upon algorithm 2 by
making use of the strong convexity of the problem.
For further details and a description of the solution method used, see
https://odlgroup.github.io/odl/guide/pdhg_guide.html in the ODL documentation.
"""
import numpy as np
import scipy.misc
import odl
import matplotlib.pyplot as plt
# --- define setting --- #
# Read test image: use only every second pixel, convert integer to float
image = scipy.misc.ascent()[::2, ::2].astype('float')
shape = image.shape
# Rescale max to 1
image /= image.max()
# Discretized spaces
space = odl.uniform_discr([0, 0], shape, shape)
# Create space element of ground truth
orig = space.element(image.copy())
# Add noise and convert to space element
noisy = orig + 0.1 * odl.phantom.white_noise(space)
# Gradient operator
gradient = odl.Gradient(space, method='forward')
# regularization parameter
reg_param = 0.3
# l2-squared data matching
factr = 0.5 / reg_param
l2_norm = factr * odl.solvers.L2NormSquared(space).translated(noisy)
# Isotropic TV-regularization: l1-norm of grad(x)
l1_norm = odl.solvers.GroupL1Norm(gradient.range, 2)
# characteristic function
char_fun = odl.solvers.IndicatorNonnegativity(space)
# define objective
obj = l2_norm + l1_norm * gradient + char_fun
# strong convexity of "g"
strong_convexity = 1 / reg_param
# define callback to store function values
class CallbackStore(odl.solvers.util.callback.Callback):
def __init__(self):
self.iteration_count = 0
self.iteration_counts = []
self.ergodic_iterate = 0
self.obj_function_values = []
self.obj_function_values_ergodic = []
def __call__(self, x):
self.iteration_count += 1
k = self.iteration_count
self.iteration_counts.append(self.iteration_count)
self.ergodic_iterate = (k - 1) / k * self.ergodic_iterate + 1 / k * x
self.obj_function_values.append(obj(x))
self.obj_function_values_ergodic.append(obj(self.ergodic_iterate))
def reset(self):
self.iteration_count = 0
self.iteration_counts = []
self.ergodic_iterate = 0
self.obj_function_values = []
self.obj_function_values_ergodic = []
callback = (odl.solvers.CallbackPrintIteration() & CallbackStore())
# number of iterations
niter = 500
# %% Run Algorithms
# --- Algorithm 1 --- #
# Operator assignment
op = odl.BroadcastOperator(odl.IdentityOperator(space), gradient)
# Make separable sum of functionals, order must correspond to the operator K
g = odl.solvers.SeparableSum(l2_norm, l1_norm)
# Non-negativity constraint
f = char_fun
# Estimated operator norm, add 10 percent to ensure ||K||_2^2 * sigma * tau < 1
op_norm = 1.1 * odl.power_method_opnorm(op, xstart=noisy)
tau = 1.0 / op_norm # Step size for the primal variable
sigma = 1.0 / op_norm # Step size for the dual variable
# Starting point
x_start = op.domain.zero()
# Run algorithm 1
x_alg1 = x_start.copy()
callback.reset()
odl.solvers.pdhg(x_alg1, f, g, op, niter=niter, tau=tau, sigma=sigma,
callback=callback)
obj_alg1 = callback.callbacks[1].obj_function_values
obj_ergodic_alg1 = callback.callbacks[1].obj_function_values_ergodic
# --- algorithm 2 and 3 --- #
# Operator assignment
op = gradient
# Assign functional f
g = l1_norm
# Create new functional that combines data fit and characteritic function
f = odl.solvers.FunctionalQuadraticPerturb(char_fun, factr, -2 * factr * noisy)
# The operator norm of the gradient with forward differences is well-known
op_norm = np.sqrt(8) + 1e-4
tau = 1.0 / op_norm # Step size for the primal variable
sigma = 1.0 / op_norm # Step size for the dual variable
# Run algorithms 2 and 3
x_alg2 = x_start.copy()
callback.reset()
odl.solvers.pdhg(x_alg2, f, g, op, niter=niter, tau=tau, sigma=sigma,
gamma_primal=0, callback=callback)
obj_alg2 = callback.callbacks[1].obj_function_values
obj_ergodic_alg2 = callback.callbacks[1].obj_function_values_ergodic
x_alg3 = x_start.copy()
callback.reset()
odl.solvers.pdhg(x_alg3, f, g, op, niter=niter, tau=tau, sigma=sigma,
gamma_primal=strong_convexity, callback=callback)
obj_alg3 = callback.callbacks[1].obj_function_values
obj_ergodic_alg3 = callback.callbacks[1].obj_function_values_ergodic
# %% Display results
# show images
plt.figure(0)
ax1 = plt.subplot(231)
ax1.imshow(orig, clim=[0, 1], cmap='gray')
ax1.title.set_text('Original Image')
ax2 = plt.subplot(232)
ax2.imshow(noisy, clim=[0, 1], cmap='gray')
ax2.title.set_text('Noisy Image')
ax3 = plt.subplot(234)
ax3.imshow(x_alg1, clim=[0, 1], cmap='gray')
ax3.title.set_text('Algo 1')
ax4 = plt.subplot(235)
ax4.imshow(x_alg2, clim=[0, 1], cmap='gray')
ax4.title.set_text('Algo 2')
ax5 = plt.subplot(236)
ax5.imshow(x_alg3, clim=[0, 1], cmap='gray')
ax5.title.set_text('Algo 3')
# show function values
i = np.array(callback.callbacks[1].iteration_counts)
plt.figure(1)
plt.clf()
plt.loglog(i, obj_alg1, label='Algo 1')
plt.loglog(i, obj_alg2, label='Algo 2')
plt.loglog(i, obj_alg3, label='Algo 3')
plt.title('Function Values')
plt.legend()
# show convergence rates
plt.figure(2)
plt.clf()
obj_opt = min(obj_alg1 + obj_alg2 + obj_alg3)
def rel_fun(x):
return (np.array(x) - obj_opt) / (x[0] - obj_opt)
plt.loglog(i, rel_fun(obj_alg1), label='Algo 1')
plt.loglog(i, rel_fun(obj_alg2), label='Algo 2')
plt.loglog(i, rel_fun(obj_alg3), label='Algo 3')
plt.loglog(i[1:], 1. / i[1:], '--', label=r'$1/k$')
plt.loglog(i[1:], 1. / i[1:]**2, ':', label=r'$1/k^2$')
plt.title('Relative Function Values')
plt.legend()
# show ergodic convergence rates
plt.figure(3)
plt.clf()
plt.loglog(i, rel_fun(obj_ergodic_alg1), label='Algo 1')
plt.loglog(i, rel_fun(obj_ergodic_alg2), label='Algo 2')
plt.loglog(i[1:], 4. / i[1:], '--', label=r'$O(1/k)$')
plt.title('Relative Ergodic Function Values')
plt.legend()
| mpl-2.0 | c65c1e53d7f9fb19698795120eb82348 | 27.903226 | 79 | 0.690848 | 2.818876 | false | false | false | false |
odlgroup/odl | examples/solvers/kaczmarz_tomography.py | 2 | 2389 | """Tomography using the `kaczmarz` solver.
Solves the inverse problem
A(x) = g
Where ``A`` is a fan (cone) beam forward projector, ``x`` the result and
``g`` is given data.
In order to solve this using `kaczmarz`'s method, the operator is split into
several sub-operators (each representing a subset of the angles and detector
points). This allows a faster solution.
"""
import odl
# --- Set up the forward operator (ray transform) --- #
# Reconstruction space: discretized functions on the rectangle
# [-20, 20]^2 with 300 samples per dimension.
space = odl.uniform_discr(
min_pt=[-20, -20], max_pt=[20, 20], shape=[128, 128], dtype='float32')
# Make a parallel beam geometry with flat detector
geometry = odl.tomo.parallel_beam_geometry(space)
# Here we split the geometry according to both angular subsets and
# detector subsets.
# For practical applications these choices should be fine tuned,
# these values are selected to give an illustrative visualization.
split = 'interlaced'
if split == 'block':
# Split the data into blocks:
# 111 222 333
n = 20
ns = geometry.angles.size // n
ray_trafos = [odl.tomo.RayTransform(space, geometry[i * ns:(i + 1) * ns])
for i in range(n)]
elif split == 'interlaced':
# Split the data into slices:
# 123 123 123
n = 20
ray_trafos = [odl.tomo.RayTransform(space, geometry[i::n])
for i in range(n)]
# Create one large ray transform from components
ray_trafo = odl.BroadcastOperator(*ray_trafos)
# --- Generate artificial data --- #
# Create phantom
phantom = odl.phantom.shepp_logan(space, modified=True)
# Create sinogram of forward projected phantom with noise
data = ray_trafo(phantom)
# Compute steplength
omega = n * odl.power_method_opnorm(ray_trafo) ** (-2)
# Optionally pass callback to the solver to display intermediate results
callback = (odl.solvers.CallbackPrintIteration() &
odl.solvers.CallbackShow())
# Choose a starting point
x = space.zero()
# Run the algorithm, call the callback in each iteration for visualization.
# Note that using only 5 iterations still gives a decent reconstruction.
odl.solvers.kaczmarz(
ray_trafos, x, data, niter=5, omega=omega,
callback=callback, callback_loop='inner')
# Display images
phantom.show(title='Original Image')
x.show(title='Reconstructed Image', force_show=True)
| mpl-2.0 | b97d4110e37f7dfb0b12d74b1eec4c96 | 28.134146 | 77 | 0.704479 | 3.374294 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/eroprofile.py | 60 | 3218 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlencode
from ..utils import (
ExtractorError,
unescapeHTML
)
class EroProfileIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?eroprofile\.com/m/videos/view/(?P<id>[^/]+)'
_LOGIN_URL = 'http://www.eroprofile.com/auth/auth.php?'
_NETRC_MACHINE = 'eroprofile'
_TESTS = [{
'url': 'http://www.eroprofile.com/m/videos/view/sexy-babe-softcore',
'md5': 'c26f351332edf23e1ea28ce9ec9de32f',
'info_dict': {
'id': '3733775',
'display_id': 'sexy-babe-softcore',
'ext': 'm4v',
'title': 'sexy babe softcore',
'thumbnail': r're:https?://.*\.jpg',
'age_limit': 18,
}
}, {
'url': 'http://www.eroprofile.com/m/videos/view/Try-It-On-Pee_cut_2-wmv-4shared-com-file-sharing-download-movie-file',
'md5': '1baa9602ede46ce904c431f5418d8916',
'info_dict': {
'id': '1133519',
'ext': 'm4v',
'title': 'Try It On Pee_cut_2.wmv - 4shared.com - file sharing - download movie file',
'thumbnail': r're:https?://.*\.jpg',
'age_limit': 18,
},
'skip': 'Requires login',
}]
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
query = compat_urllib_parse_urlencode({
'username': username,
'password': password,
'url': 'http://www.eroprofile.com/',
})
login_url = self._LOGIN_URL + query
login_page = self._download_webpage(login_url, None, False)
m = re.search(r'Your username or password was incorrect\.', login_page)
if m:
raise ExtractorError(
'Wrong username and/or password.', expected=True)
self.report_login()
redirect_url = self._search_regex(
r'<script[^>]+?src="([^"]+)"', login_page, 'login redirect url')
self._download_webpage(redirect_url, None, False)
def _real_initialize(self):
self._login()
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
m = re.search(r'You must be logged in to view this video\.', webpage)
if m:
self.raise_login_required('This video requires login')
video_id = self._search_regex(
[r"glbUpdViews\s*\('\d*','(\d+)'", r'p/report/video/(\d+)'],
webpage, 'video id', default=None)
video_url = unescapeHTML(self._search_regex(
r'<source src="([^"]+)', webpage, 'video url'))
title = self._html_search_regex(
r'Title:</th><td>([^<]+)</td>', webpage, 'title')
thumbnail = self._search_regex(
r'onclick="showVideoPlayer\(\)"><img src="([^"]+)',
webpage, 'thumbnail', fatal=False)
return {
'id': video_id,
'display_id': display_id,
'url': video_url,
'title': title,
'thumbnail': thumbnail,
'age_limit': 18,
}
| unlicense | 4507f96378d0cb601810b6812b2b56c0 | 32.873684 | 126 | 0.538222 | 3.555801 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/tv5unis.py | 5 | 4090 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_age_limit,
smuggle_url,
try_get,
)
class TV5UnisBaseIE(InfoExtractor):
_GEO_COUNTRIES = ['CA']
def _real_extract(self, url):
groups = re.match(self._VALID_URL, url).groups()
product = self._download_json(
'https://api.tv5unis.ca/graphql', groups[0], query={
'query': '''{
%s(%s) {
collection {
title
}
episodeNumber
rating {
name
}
seasonNumber
tags
title
videoElement {
... on Video {
mediaId
}
}
}
}''' % (self._GQL_QUERY_NAME, self._gql_args(groups)),
})['data'][self._GQL_QUERY_NAME]
media_id = product['videoElement']['mediaId']
return {
'_type': 'url_transparent',
'id': media_id,
'title': product.get('title'),
'url': smuggle_url('limelight:media:' + media_id, {'geo_countries': self._GEO_COUNTRIES}),
'age_limit': parse_age_limit(try_get(product, lambda x: x['rating']['name'])),
'tags': product.get('tags'),
'series': try_get(product, lambda x: x['collection']['title']),
'season_number': int_or_none(product.get('seasonNumber')),
'episode_number': int_or_none(product.get('episodeNumber')),
'ie_key': 'LimelightMedia',
}
class TV5UnisVideoIE(TV5UnisBaseIE):
IE_NAME = 'tv5unis:video'
_VALID_URL = r'https?://(?:www\.)?tv5unis\.ca/videos/[^/]+/(?P<id>\d+)'
_TEST = {
'url': 'https://www.tv5unis.ca/videos/bande-annonces/71843',
'md5': '3d794164928bda97fb87a17e89923d9b',
'info_dict': {
'id': 'a883684aecb2486cad9bdc7bbe17f861',
'ext': 'mp4',
'title': 'Watatatow',
'duration': 10.01,
}
}
_GQL_QUERY_NAME = 'productById'
@staticmethod
def _gql_args(groups):
return 'id: %s' % groups
class TV5UnisIE(TV5UnisBaseIE):
IE_NAME = 'tv5unis'
_VALID_URL = r'https?://(?:www\.)?tv5unis\.ca/videos/(?P<id>[^/]+)(?:/saisons/(?P<season_number>\d+)/episodes/(?P<episode_number>\d+))?/?(?:[?#&]|$)'
_TESTS = [{
'url': 'https://www.tv5unis.ca/videos/watatatow/saisons/6/episodes/1',
'md5': 'a479907d2e531a73e1f8dc48d6388d02',
'info_dict': {
'id': 'e5ee23a586c44612a56aad61accf16ef',
'ext': 'mp4',
'title': 'Je ne peux pas lui résister',
'description': "Atys, le nouveau concierge de l'école, a réussi à ébranler la confiance de Mado en affirmant qu\'une médaille, ce n'est que du métal. Comme Mado essaie de lui prouver que ses valeurs sont solides, il veut la mettre à l'épreuve...",
'subtitles': {
'fr': 'count:1',
},
'duration': 1370,
'age_limit': 8,
'tags': 'count:3',
'series': 'Watatatow',
'season_number': 6,
'episode_number': 1,
},
}, {
'url': 'https://www.tv5unis.ca/videos/le-voyage-de-fanny',
'md5': '9ca80ebb575c681d10cae1adff3d4774',
'info_dict': {
'id': '726188eefe094d8faefb13381d42bc06',
'ext': 'mp4',
'title': 'Le voyage de Fanny',
'description': "Fanny, 12 ans, cachée dans un foyer loin de ses parents, s'occupe de ses deux soeurs. Devant fuir, Fanny prend la tête d'un groupe de huit enfants et s'engage dans un dangereux périple à travers la France occupée pour rejoindre la frontière suisse.",
'subtitles': {
'fr': 'count:1',
},
'duration': 5587.034,
'tags': 'count:4',
},
}]
_GQL_QUERY_NAME = 'productByRootProductSlug'
@staticmethod
def _gql_args(groups):
args = 'rootProductSlug: "%s"' % groups[0]
if groups[1]:
args += ', seasonNumber: %s, episodeNumber: %s' % groups[1:]
return args
| unlicense | b7f74821857740dc88367348e351ad45 | 32.677686 | 278 | 0.544049 | 3.031994 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/esri.py | 64 | 2628 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
int_or_none,
parse_filesize,
unified_strdate,
)
class EsriVideoIE(InfoExtractor):
_VALID_URL = r'https?://video\.esri\.com/watch/(?P<id>[0-9]+)'
_TEST = {
'url': 'https://video.esri.com/watch/1124/arcgis-online-_dash_-developing-applications',
'md5': 'd4aaf1408b221f1b38227a9bbaeb95bc',
'info_dict': {
'id': '1124',
'ext': 'mp4',
'title': 'ArcGIS Online - Developing Applications',
'description': 'Jeremy Bartley demonstrates how to develop applications with ArcGIS Online.',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 185,
'upload_date': '20120419',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
formats = []
for width, height, content in re.findall(
r'(?s)<li><strong>(\d+)x(\d+):</strong>(.+?)</li>', webpage):
for video_url, ext, filesize in re.findall(
r'<a[^>]+href="([^"]+)">([^<]+) \(([^<]+)\)</a>', content):
formats.append({
'url': compat_urlparse.urljoin(url, video_url),
'ext': ext.lower(),
'format_id': '%s-%s' % (ext.lower(), height),
'width': int(width),
'height': int(height),
'filesize_approx': parse_filesize(filesize),
})
self._sort_formats(formats)
title = self._html_search_meta('title', webpage, 'title')
description = self._html_search_meta(
'description', webpage, 'description', fatal=False)
thumbnail = self._html_search_meta('thumbnail', webpage, 'thumbnail', fatal=False)
if thumbnail:
thumbnail = re.sub(r'_[st]\.jpg$', '_x.jpg', thumbnail)
duration = int_or_none(self._search_regex(
[r'var\s+videoSeconds\s*=\s*(\d+)', r"'duration'\s*:\s*(\d+)"],
webpage, 'duration', fatal=False))
upload_date = unified_strdate(self._html_search_meta(
'last-modified', webpage, 'upload date', fatal=False))
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'upload_date': upload_date,
'formats': formats
}
| unlicense | 6c148ae200411ed468612e70aa79b981 | 34.513514 | 105 | 0.523973 | 3.797688 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/dtube.py | 20 | 2798 | # coding: utf-8
from __future__ import unicode_literals
import json
import re
from socket import timeout
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_iso8601,
)
class DTubeIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?d\.tube/(?:#!/)?v/(?P<uploader_id>[0-9a-z.-]+)/(?P<id>[0-9a-z]{8})'
_TEST = {
'url': 'https://d.tube/#!/v/broncnutz/x380jtr1',
'md5': '9f29088fa08d699a7565ee983f56a06e',
'info_dict': {
'id': 'x380jtr1',
'ext': 'mp4',
'title': 'Lefty 3-Rings is Back Baby!! NCAA Picks',
'description': 'md5:60be222088183be3a42f196f34235776',
'uploader_id': 'broncnutz',
'upload_date': '20190107',
'timestamp': 1546854054,
},
'params': {
'format': '480p',
},
}
def _real_extract(self, url):
uploader_id, video_id = re.match(self._VALID_URL, url).groups()
result = self._download_json('https://api.steemit.com/', video_id, data=json.dumps({
'jsonrpc': '2.0',
'method': 'get_content',
'params': [uploader_id, video_id],
}).encode())['result']
metadata = json.loads(result['json_metadata'])
video = metadata['video']
content = video['content']
info = video.get('info', {})
title = info.get('title') or result['title']
def canonical_url(h):
if not h:
return None
return 'https://video.dtube.top/ipfs/' + h
formats = []
for q in ('240', '480', '720', '1080', ''):
video_url = canonical_url(content.get('video%shash' % q))
if not video_url:
continue
format_id = (q + 'p') if q else 'Source'
try:
self.to_screen('%s: Checking %s video format URL' % (video_id, format_id))
self._downloader._opener.open(video_url, timeout=5).close()
except timeout:
self.to_screen(
'%s: %s URL is invalid, skipping' % (video_id, format_id))
continue
formats.append({
'format_id': format_id,
'url': video_url,
'height': int_or_none(q),
'ext': 'mp4',
})
return {
'id': video_id,
'title': title,
'description': content.get('description'),
'thumbnail': canonical_url(info.get('snaphash')),
'tags': content.get('tags') or metadata.get('tags'),
'duration': info.get('duration'),
'formats': formats,
'timestamp': parse_iso8601(result.get('created')),
'uploader_id': uploader_id,
}
| unlicense | 2f5017e4846e6976e5aeafb8a0734a57 | 32.710843 | 105 | 0.498213 | 3.582586 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/zapiks.py | 12 | 3832 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
parse_duration,
parse_iso8601,
xpath_with_ns,
xpath_text,
int_or_none,
)
class ZapiksIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?zapiks\.(?:fr|com)/(?:(?:[a-z]{2}/)?(?P<display_id>.+?)\.html|index\.php\?.*\bmedia_id=(?P<id>\d+))'
_TESTS = [
{
'url': 'http://www.zapiks.fr/ep2s3-bon-appetit-eh-be-viva.html',
'md5': 'aeb3c473b2d564b2d46d664d28d5f050',
'info_dict': {
'id': '80798',
'ext': 'mp4',
'title': 'EP2S3 - Bon Appétit - Eh bé viva les pyrénées con!',
'description': 'md5:7054d6f6f620c6519be1fe710d4da847',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 528,
'timestamp': 1359044972,
'upload_date': '20130124',
'view_count': int,
},
},
{
'url': 'http://www.zapiks.com/ep3s5-bon-appetit-baqueira-m-1.html',
'only_matching': True,
},
{
'url': 'http://www.zapiks.com/nl/ep3s5-bon-appetit-baqueira-m-1.html',
'only_matching': True,
},
{
'url': 'http://www.zapiks.fr/index.php?action=playerIframe&media_id=118046&width=640&height=360&autoStart=false&language=fr',
'only_matching': True,
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id') or video_id
webpage = self._download_webpage(url, display_id)
if not video_id:
video_id = self._search_regex(
r'data-media-id="(\d+)"', webpage, 'video id')
playlist = self._download_xml(
'http://www.zapiks.fr/view/index.php?action=playlist&media_id=%s&lang=en' % video_id,
display_id)
NS_MAP = {
'jwplayer': 'http://rss.jwpcdn.com/'
}
def ns(path):
return xpath_with_ns(path, NS_MAP)
item = playlist.find('./channel/item')
title = xpath_text(item, 'title', 'title') or self._og_search_title(webpage)
description = self._og_search_description(webpage, default=None)
thumbnail = xpath_text(
item, ns('./jwplayer:image'), 'thumbnail') or self._og_search_thumbnail(webpage, default=None)
duration = parse_duration(self._html_search_meta(
'duration', webpage, 'duration', default=None))
timestamp = parse_iso8601(self._html_search_meta(
'uploadDate', webpage, 'upload date', default=None), ' ')
view_count = int_or_none(self._search_regex(
r'UserPlays:(\d+)', webpage, 'view count', default=None))
comment_count = int_or_none(self._search_regex(
r'UserComments:(\d+)', webpage, 'comment count', default=None))
formats = []
for source in item.findall(ns('./jwplayer:source')):
format_id = source.attrib['label']
f = {
'url': source.attrib['file'],
'format_id': format_id,
}
m = re.search(r'^(?P<height>\d+)[pP]', format_id)
if m:
f['height'] = int(m.group('height'))
formats.append(f)
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'timestamp': timestamp,
'view_count': view_count,
'comment_count': comment_count,
'formats': formats,
}
| unlicense | 61da1a2a5078f4415d9ed4aa7f5929f6 | 34.119266 | 157 | 0.522466 | 3.464253 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/udemy.py | 17 | 19413 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_kwargs,
compat_str,
compat_urllib_request,
compat_urlparse,
)
from ..utils import (
determine_ext,
extract_attributes,
ExtractorError,
float_or_none,
int_or_none,
js_to_json,
sanitized_Request,
try_get,
unescapeHTML,
url_or_none,
urlencode_postdata,
)
class UdemyIE(InfoExtractor):
IE_NAME = 'udemy'
_VALID_URL = r'''(?x)
https?://
(?:[^/]+\.)?udemy\.com/
(?:
[^#]+\#/lecture/|
lecture/view/?\?lectureId=|
[^/]+/learn/v4/t/lecture/
)
(?P<id>\d+)
'''
_LOGIN_URL = 'https://www.udemy.com/join/login-popup/?displayType=ajax&showSkipButton=1'
_ORIGIN_URL = 'https://www.udemy.com'
_NETRC_MACHINE = 'udemy'
_TESTS = [{
'url': 'https://www.udemy.com/java-tutorial/#/lecture/172757',
'md5': '98eda5b657e752cf945d8445e261b5c5',
'info_dict': {
'id': '160614',
'ext': 'mp4',
'title': 'Introduction and Installation',
'description': 'md5:c0d51f6f21ef4ec65f091055a5eef876',
'duration': 579.29,
},
'skip': 'Requires udemy account credentials',
}, {
# new URL schema
'url': 'https://www.udemy.com/electric-bass-right-from-the-start/learn/v4/t/lecture/4580906',
'only_matching': True,
}, {
# no url in outputs format entry
'url': 'https://www.udemy.com/learn-web-development-complete-step-by-step-guide-to-success/learn/v4/t/lecture/4125812',
'only_matching': True,
}, {
# only outputs rendition
'url': 'https://www.udemy.com/how-you-can-help-your-local-community-5-amazing-examples/learn/v4/t/lecture/3225750?start=0',
'only_matching': True,
}, {
'url': 'https://wipro.udemy.com/java-tutorial/#/lecture/172757',
'only_matching': True,
}]
def _extract_course_info(self, webpage, video_id):
course = self._parse_json(
unescapeHTML(self._search_regex(
r'ng-init=["\'].*\bcourse=({.+?})[;"\']',
webpage, 'course', default='{}')),
video_id, fatal=False) or {}
course_id = course.get('id') or self._search_regex(
[
r'data-course-id=["\'](\d+)',
r'"courseId"\s*:\s*(\d+)'
], webpage, 'course id')
return course_id, course.get('title')
def _enroll_course(self, base_url, webpage, course_id):
def combine_url(base_url, url):
return compat_urlparse.urljoin(base_url, url) if not url.startswith('http') else url
checkout_url = unescapeHTML(self._search_regex(
r'href=(["\'])(?P<url>(?:https?://(?:www\.)?udemy\.com)?/(?:payment|cart)/checkout/.+?)\1',
webpage, 'checkout url', group='url', default=None))
if checkout_url:
raise ExtractorError(
'Course %s is not free. You have to pay for it before you can download. '
'Use this URL to confirm purchase: %s'
% (course_id, combine_url(base_url, checkout_url)),
expected=True)
enroll_url = unescapeHTML(self._search_regex(
r'href=(["\'])(?P<url>(?:https?://(?:www\.)?udemy\.com)?/course/subscribe/.+?)\1',
webpage, 'enroll url', group='url', default=None))
if enroll_url:
webpage = self._download_webpage(
combine_url(base_url, enroll_url),
course_id, 'Enrolling in the course',
headers={'Referer': base_url})
if '>You have enrolled in' in webpage:
self.to_screen('%s: Successfully enrolled in the course' % course_id)
def _download_lecture(self, course_id, lecture_id):
return self._download_json(
'https://www.udemy.com/api-2.0/users/me/subscribed-courses/%s/lectures/%s?'
% (course_id, lecture_id),
lecture_id, 'Downloading lecture JSON', query={
'fields[lecture]': 'title,description,view_html,asset',
'fields[asset]': 'asset_type,stream_url,thumbnail_url,download_urls,stream_urls,captions,data',
})
def _handle_error(self, response):
if not isinstance(response, dict):
return
error = response.get('error')
if error:
error_str = 'Udemy returned error #%s: %s' % (error.get('code'), error.get('message'))
error_data = error.get('data')
if error_data:
error_str += ' - %s' % error_data.get('formErrors')
raise ExtractorError(error_str, expected=True)
def _download_webpage_handle(self, *args, **kwargs):
headers = kwargs.get('headers', {}).copy()
headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 Safari/537.36'
kwargs['headers'] = headers
ret = super(UdemyIE, self)._download_webpage_handle(
*args, **compat_kwargs(kwargs))
if not ret:
return ret
webpage, _ = ret
if any(p in webpage for p in (
'>Please verify you are a human',
'Access to this page has been denied because we believe you are using automation tools to browse the website',
'"_pxCaptcha"')):
raise ExtractorError(
'Udemy asks you to solve a CAPTCHA. Login with browser, '
'solve CAPTCHA, then export cookies and pass cookie file to '
'youtube-dl with --cookies.', expected=True)
return ret
def _download_json(self, url_or_request, *args, **kwargs):
headers = {
'X-Udemy-Snail-Case': 'true',
'X-Requested-With': 'XMLHttpRequest',
}
for cookie in self._downloader.cookiejar:
if cookie.name == 'client_id':
headers['X-Udemy-Client-Id'] = cookie.value
elif cookie.name == 'access_token':
headers['X-Udemy-Bearer-Token'] = cookie.value
headers['X-Udemy-Authorization'] = 'Bearer %s' % cookie.value
if isinstance(url_or_request, compat_urllib_request.Request):
for header, value in headers.items():
url_or_request.add_header(header, value)
else:
url_or_request = sanitized_Request(url_or_request, headers=headers)
response = super(UdemyIE, self)._download_json(url_or_request, *args, **kwargs)
self._handle_error(response)
return response
def _real_initialize(self):
self._login()
def _login(self):
username, password = self._get_login_info()
if username is None:
return
login_popup = self._download_webpage(
self._LOGIN_URL, None, 'Downloading login popup')
def is_logged(webpage):
return any(re.search(p, webpage) for p in (
r'href=["\'](?:https://www\.udemy\.com)?/user/logout/',
r'>Logout<'))
# already logged in
if is_logged(login_popup):
return
login_form = self._form_hidden_inputs('login-form', login_popup)
login_form.update({
'email': username,
'password': password,
})
response = self._download_webpage(
self._LOGIN_URL, None, 'Logging in',
data=urlencode_postdata(login_form),
headers={
'Referer': self._ORIGIN_URL,
'Origin': self._ORIGIN_URL,
})
if not is_logged(response):
error = self._html_search_regex(
r'(?s)<div[^>]+class="form-errors[^"]*">(.+?)</div>',
response, 'error message', default=None)
if error:
raise ExtractorError('Unable to login: %s' % error, expected=True)
raise ExtractorError('Unable to log in')
def _real_extract(self, url):
lecture_id = self._match_id(url)
webpage = self._download_webpage(url, lecture_id)
course_id, _ = self._extract_course_info(webpage, lecture_id)
try:
lecture = self._download_lecture(course_id, lecture_id)
except ExtractorError as e:
# Error could possibly mean we are not enrolled in the course
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
self._enroll_course(url, webpage, course_id)
lecture = self._download_lecture(course_id, lecture_id)
else:
raise
title = lecture['title']
description = lecture.get('description')
asset = lecture['asset']
asset_type = asset.get('asset_type') or asset.get('assetType')
if asset_type != 'Video':
raise ExtractorError(
'Lecture %s is not a video' % lecture_id, expected=True)
stream_url = asset.get('stream_url') or asset.get('streamUrl')
if stream_url:
youtube_url = self._search_regex(
r'(https?://www\.youtube\.com/watch\?v=.*)', stream_url, 'youtube URL', default=None)
if youtube_url:
return self.url_result(youtube_url, 'Youtube')
video_id = compat_str(asset['id'])
thumbnail = asset.get('thumbnail_url') or asset.get('thumbnailUrl')
duration = float_or_none(asset.get('data', {}).get('duration'))
subtitles = {}
automatic_captions = {}
formats = []
def extract_output_format(src, f_id):
return {
'url': src.get('url'),
'format_id': '%sp' % (src.get('height') or f_id),
'width': int_or_none(src.get('width')),
'height': int_or_none(src.get('height')),
'vbr': int_or_none(src.get('video_bitrate_in_kbps')),
'vcodec': src.get('video_codec'),
'fps': int_or_none(src.get('frame_rate')),
'abr': int_or_none(src.get('audio_bitrate_in_kbps')),
'acodec': src.get('audio_codec'),
'asr': int_or_none(src.get('audio_sample_rate')),
'tbr': int_or_none(src.get('total_bitrate_in_kbps')),
'filesize': int_or_none(src.get('file_size_in_bytes')),
}
outputs = asset.get('data', {}).get('outputs')
if not isinstance(outputs, dict):
outputs = {}
def add_output_format_meta(f, key):
output = outputs.get(key)
if isinstance(output, dict):
output_format = extract_output_format(output, key)
output_format.update(f)
return output_format
return f
def extract_formats(source_list):
if not isinstance(source_list, list):
return
for source in source_list:
video_url = url_or_none(source.get('file') or source.get('src'))
if not video_url:
continue
if source.get('type') == 'application/x-mpegURL' or determine_ext(video_url) == 'm3u8':
formats.extend(self._extract_m3u8_formats(
video_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
continue
format_id = source.get('label')
f = {
'url': video_url,
'format_id': '%sp' % format_id,
'height': int_or_none(format_id),
}
if format_id:
# Some videos contain additional metadata (e.g.
# https://www.udemy.com/ios9-swift/learn/#/lecture/3383208)
f = add_output_format_meta(f, format_id)
formats.append(f)
def extract_subtitles(track_list):
if not isinstance(track_list, list):
return
for track in track_list:
if not isinstance(track, dict):
continue
if track.get('kind') != 'captions':
continue
src = url_or_none(track.get('src'))
if not src:
continue
lang = track.get('language') or track.get(
'srclang') or track.get('label')
sub_dict = automatic_captions if track.get(
'autogenerated') is True else subtitles
sub_dict.setdefault(lang, []).append({
'url': src,
})
for url_kind in ('download', 'stream'):
urls = asset.get('%s_urls' % url_kind)
if isinstance(urls, dict):
extract_formats(urls.get('Video'))
captions = asset.get('captions')
if isinstance(captions, list):
for cc in captions:
if not isinstance(cc, dict):
continue
cc_url = url_or_none(cc.get('url'))
if not cc_url:
continue
lang = try_get(cc, lambda x: x['locale']['locale'], compat_str)
sub_dict = (automatic_captions if cc.get('source') == 'auto'
else subtitles)
sub_dict.setdefault(lang or 'en', []).append({
'url': cc_url,
})
view_html = lecture.get('view_html')
if view_html:
view_html_urls = set()
for source in re.findall(r'<source[^>]+>', view_html):
attributes = extract_attributes(source)
src = attributes.get('src')
if not src:
continue
res = attributes.get('data-res')
height = int_or_none(res)
if src in view_html_urls:
continue
view_html_urls.add(src)
if attributes.get('type') == 'application/x-mpegURL' or determine_ext(src) == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
src, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False)
for f in m3u8_formats:
m = re.search(r'/hls_(?P<height>\d{3,4})_(?P<tbr>\d{2,})/', f['url'])
if m:
if not f.get('height'):
f['height'] = int(m.group('height'))
if not f.get('tbr'):
f['tbr'] = int(m.group('tbr'))
formats.extend(m3u8_formats)
else:
formats.append(add_output_format_meta({
'url': src,
'format_id': '%dp' % height if height else None,
'height': height,
}, res))
# react rendition since 2017.04.15 (see
# https://github.com/ytdl-org/youtube-dl/issues/12744)
data = self._parse_json(
self._search_regex(
r'videojs-setup-data=(["\'])(?P<data>{.+?})\1', view_html,
'setup data', default='{}', group='data'), video_id,
transform_source=unescapeHTML, fatal=False)
if data and isinstance(data, dict):
extract_formats(data.get('sources'))
if not duration:
duration = int_or_none(data.get('duration'))
extract_subtitles(data.get('tracks'))
if not subtitles and not automatic_captions:
text_tracks = self._parse_json(
self._search_regex(
r'text-tracks=(["\'])(?P<data>\[.+?\])\1', view_html,
'text tracks', default='{}', group='data'), video_id,
transform_source=lambda s: js_to_json(unescapeHTML(s)),
fatal=False)
extract_subtitles(text_tracks)
if not formats and outputs:
for format_id, output in outputs.items():
f = extract_output_format(output, format_id)
if f.get('url'):
formats.append(f)
self._sort_formats(formats, field_preference=('height', 'width', 'tbr', 'format_id'))
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
'automatic_captions': automatic_captions,
}
class UdemyCourseIE(UdemyIE):
IE_NAME = 'udemy:course'
_VALID_URL = r'https?://(?:[^/]+\.)?udemy\.com/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://www.udemy.com/java-tutorial/',
'only_matching': True,
}, {
'url': 'https://wipro.udemy.com/java-tutorial/',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return False if UdemyIE.suitable(url) else super(UdemyCourseIE, cls).suitable(url)
def _real_extract(self, url):
course_path = self._match_id(url)
webpage = self._download_webpage(url, course_path)
course_id, title = self._extract_course_info(webpage, course_path)
self._enroll_course(url, webpage, course_id)
response = self._download_json(
'https://www.udemy.com/api-2.0/courses/%s/cached-subscriber-curriculum-items' % course_id,
course_id, 'Downloading course curriculum', query={
'fields[chapter]': 'title,object_index',
'fields[lecture]': 'title,asset',
'page_size': '1000',
})
entries = []
chapter, chapter_number = [None] * 2
for entry in response['results']:
clazz = entry.get('_class')
if clazz == 'lecture':
asset = entry.get('asset')
if isinstance(asset, dict):
asset_type = asset.get('asset_type') or asset.get('assetType')
if asset_type != 'Video':
continue
lecture_id = entry.get('id')
if lecture_id:
entry = {
'_type': 'url_transparent',
'url': 'https://www.udemy.com/%s/learn/v4/t/lecture/%s' % (course_path, entry['id']),
'title': entry.get('title'),
'ie_key': UdemyIE.ie_key(),
}
if chapter_number:
entry['chapter_number'] = chapter_number
if chapter:
entry['chapter'] = chapter
entries.append(entry)
elif clazz == 'chapter':
chapter_number = entry.get('object_index')
chapter = entry.get('title')
return self.playlist_result(entries, course_id, title)
| unlicense | af2a196321d2b1e334d708c53563a697 | 39.359667 | 149 | 0.506362 | 3.959413 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/theplatform.py | 5 | 17541 | # coding: utf-8
from __future__ import unicode_literals
import re
import time
import hmac
import binascii
import hashlib
from .once import OnceIE
from .adobepass import AdobePassIE
from ..compat import (
compat_parse_qs,
compat_urllib_parse_urlparse,
)
from ..utils import (
determine_ext,
ExtractorError,
float_or_none,
int_or_none,
sanitized_Request,
unsmuggle_url,
update_url_query,
xpath_with_ns,
mimetype2ext,
find_xpath_attr,
)
default_ns = 'http://www.w3.org/2005/SMIL21/Language'
_x = lambda p: xpath_with_ns(p, {'smil': default_ns})
class ThePlatformBaseIE(OnceIE):
_TP_TLD = 'com'
def _extract_theplatform_smil(self, smil_url, video_id, note='Downloading SMIL data'):
meta = self._download_xml(
smil_url, video_id, note=note, query={'format': 'SMIL'},
headers=self.geo_verification_headers())
error_element = find_xpath_attr(meta, _x('.//smil:ref'), 'src')
if error_element is not None:
exception = find_xpath_attr(
error_element, _x('.//smil:param'), 'name', 'exception')
if exception is not None:
if exception.get('value') == 'GeoLocationBlocked':
self.raise_geo_restricted(error_element.attrib['abstract'])
elif error_element.attrib['src'].startswith(
'http://link.theplatform.%s/s/errorFiles/Unavailable.'
% self._TP_TLD):
raise ExtractorError(
error_element.attrib['abstract'], expected=True)
smil_formats = self._parse_smil_formats(
meta, smil_url, video_id, namespace=default_ns,
# the parameters are from syfy.com, other sites may use others,
# they also work for nbc.com
f4m_params={'g': 'UXWGVKRWHFSP', 'hdcore': '3.0.3'},
transform_rtmp_url=lambda streamer, src: (streamer, 'mp4:' + src))
formats = []
for _format in smil_formats:
if OnceIE.suitable(_format['url']):
formats.extend(self._extract_once_formats(_format['url']))
else:
media_url = _format['url']
if determine_ext(media_url) == 'm3u8':
hdnea2 = self._get_cookies(media_url).get('hdnea2')
if hdnea2:
_format['url'] = update_url_query(media_url, {'hdnea3': hdnea2.value})
formats.append(_format)
subtitles = self._parse_smil_subtitles(meta, default_ns)
return formats, subtitles
def _download_theplatform_metadata(self, path, video_id):
info_url = 'http://link.theplatform.%s/s/%s?format=preview' % (self._TP_TLD, path)
return self._download_json(info_url, video_id)
def _parse_theplatform_metadata(self, info):
subtitles = {}
captions = info.get('captions')
if isinstance(captions, list):
for caption in captions:
lang, src, mime = caption.get('lang', 'en'), caption.get('src'), caption.get('type')
subtitles.setdefault(lang, []).append({
'ext': mimetype2ext(mime),
'url': src,
})
duration = info.get('duration')
tp_chapters = info.get('chapters', [])
chapters = []
if tp_chapters:
def _add_chapter(start_time, end_time):
start_time = float_or_none(start_time, 1000)
end_time = float_or_none(end_time, 1000)
if start_time is None or end_time is None:
return
chapters.append({
'start_time': start_time,
'end_time': end_time,
})
for chapter in tp_chapters[:-1]:
_add_chapter(chapter.get('startTime'), chapter.get('endTime'))
_add_chapter(tp_chapters[-1].get('startTime'), tp_chapters[-1].get('endTime') or duration)
return {
'title': info['title'],
'subtitles': subtitles,
'description': info['description'],
'thumbnail': info['defaultThumbnailUrl'],
'duration': float_or_none(duration, 1000),
'timestamp': int_or_none(info.get('pubDate'), 1000) or None,
'uploader': info.get('billingCode'),
'chapters': chapters,
}
def _extract_theplatform_metadata(self, path, video_id):
info = self._download_theplatform_metadata(path, video_id)
return self._parse_theplatform_metadata(info)
class ThePlatformIE(ThePlatformBaseIE, AdobePassIE):
_VALID_URL = r'''(?x)
(?:https?://(?:link|player)\.theplatform\.com/[sp]/(?P<provider_id>[^/]+)/
(?:(?:(?:[^/]+/)+select/)?(?P<media>media/(?:guid/\d+/)?)?|(?P<config>(?:[^/\?]+/(?:swf|config)|onsite)/select/))?
|theplatform:)(?P<id>[^/\?&]+)'''
_TESTS = [{
# from http://www.metacafe.com/watch/cb-e9I_cZgTgIPd/blackberrys_big_bold_z30/
'url': 'http://link.theplatform.com/s/dJ5BDC/e9I_cZgTgIPd/meta.smil?format=smil&Tracking=true&mbr=true',
'info_dict': {
'id': 'e9I_cZgTgIPd',
'ext': 'flv',
'title': 'Blackberry\'s big, bold Z30',
'description': 'The Z30 is Blackberry\'s biggest, baddest mobile messaging device yet.',
'duration': 247,
'timestamp': 1383239700,
'upload_date': '20131031',
'uploader': 'CBSI-NEW',
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': '404 Not Found',
}, {
# from http://www.cnet.com/videos/tesla-model-s-a-second-step-towards-a-cleaner-motoring-future/
'url': 'http://link.theplatform.com/s/kYEXFC/22d_qsQ6MIRT',
'info_dict': {
'id': '22d_qsQ6MIRT',
'ext': 'flv',
'description': 'md5:ac330c9258c04f9d7512cf26b9595409',
'title': 'Tesla Model S: A second step towards a cleaner motoring future',
'timestamp': 1426176191,
'upload_date': '20150312',
'uploader': 'CBSI-NEW',
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
'url': 'https://player.theplatform.com/p/D6x-PC/pulse_preview/embed/select/media/yMBg9E8KFxZD',
'info_dict': {
'id': 'yMBg9E8KFxZD',
'ext': 'mp4',
'description': 'md5:644ad9188d655b742f942bf2e06b002d',
'title': 'HIGHLIGHTS: USA bag first ever series Cup win',
'uploader': 'EGSM',
}
}, {
'url': 'http://player.theplatform.com/p/NnzsPC/widget/select/media/4Y0TlYUr_ZT7',
'only_matching': True,
}, {
'url': 'http://player.theplatform.com/p/2E2eJC/nbcNewsOffsite?guid=tdy_or_siri_150701',
'md5': 'fb96bb3d85118930a5b055783a3bd992',
'info_dict': {
'id': 'tdy_or_siri_150701',
'ext': 'mp4',
'title': 'iPhone Siri’s sassy response to a math question has people talking',
'description': 'md5:a565d1deadd5086f3331d57298ec6333',
'duration': 83.0,
'thumbnail': r're:^https?://.*\.jpg$',
'timestamp': 1435752600,
'upload_date': '20150701',
'uploader': 'NBCU-NEWS',
},
}, {
# From http://www.nbc.com/the-blacklist/video/sir-crispin-crandall/2928790?onid=137781#vc137781=1
# geo-restricted (US), HLS encrypted with AES-128
'url': 'http://player.theplatform.com/p/NnzsPC/onsite_universal/select/media/guid/2410887629/2928790?fwsitesection=nbc_the_blacklist_video_library&autoPlay=true&carouselID=137781',
'only_matching': True,
}]
@classmethod
def _extract_urls(cls, webpage):
m = re.search(
r'''(?x)
<meta\s+
property=(["'])(?:og:video(?::(?:secure_)?url)?|twitter:player)\1\s+
content=(["'])(?P<url>https?://player\.theplatform\.com/p/.+?)\2
''', webpage)
if m:
return [m.group('url')]
# Are whitespaces ignored in URLs?
# https://github.com/ytdl-org/youtube-dl/issues/12044
matches = re.findall(
r'(?s)<(?:iframe|script)[^>]+src=(["\'])((?:https?:)?//player\.theplatform\.com/p/.+?)\1', webpage)
if matches:
return [re.sub(r'\s', '', list(zip(*matches))[1][0])]
@staticmethod
def _sign_url(url, sig_key, sig_secret, life=600, include_qs=False):
flags = '10' if include_qs else '00'
expiration_date = '%x' % (int(time.time()) + life)
def str_to_hex(str):
return binascii.b2a_hex(str.encode('ascii')).decode('ascii')
def hex_to_bytes(hex):
return binascii.a2b_hex(hex.encode('ascii'))
relative_path = re.match(r'https?://link\.theplatform\.com/s/([^?]+)', url).group(1)
clear_text = hex_to_bytes(flags + expiration_date + str_to_hex(relative_path))
checksum = hmac.new(sig_key.encode('ascii'), clear_text, hashlib.sha1).hexdigest()
sig = flags + expiration_date + checksum + str_to_hex(sig_secret)
return '%s&sig=%s' % (url, sig)
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
self._initialize_geo_bypass({
'countries': smuggled_data.get('geo_countries'),
})
mobj = re.match(self._VALID_URL, url)
provider_id = mobj.group('provider_id')
video_id = mobj.group('id')
if not provider_id:
provider_id = 'dJ5BDC'
path = provider_id + '/'
if mobj.group('media'):
path += mobj.group('media')
path += video_id
qs_dict = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
if 'guid' in qs_dict:
webpage = self._download_webpage(url, video_id)
scripts = re.findall(r'<script[^>]+src="([^"]+)"', webpage)
feed_id = None
# feed id usually locates in the last script.
# Seems there's no pattern for the interested script filename, so
# I try one by one
for script in reversed(scripts):
feed_script = self._download_webpage(
self._proto_relative_url(script, 'http:'),
video_id, 'Downloading feed script')
feed_id = self._search_regex(
r'defaultFeedId\s*:\s*"([^"]+)"', feed_script,
'default feed id', default=None)
if feed_id is not None:
break
if feed_id is None:
raise ExtractorError('Unable to find feed id')
return self.url_result('http://feed.theplatform.com/f/%s/%s?byGuid=%s' % (
provider_id, feed_id, qs_dict['guid'][0]))
if smuggled_data.get('force_smil_url', False):
smil_url = url
# Explicitly specified SMIL (see https://github.com/ytdl-org/youtube-dl/issues/7385)
elif '/guid/' in url:
headers = {}
source_url = smuggled_data.get('source_url')
if source_url:
headers['Referer'] = source_url
request = sanitized_Request(url, headers=headers)
webpage = self._download_webpage(request, video_id)
smil_url = self._search_regex(
r'<link[^>]+href=(["\'])(?P<url>.+?)\1[^>]+type=["\']application/smil\+xml',
webpage, 'smil url', group='url')
path = self._search_regex(
r'link\.theplatform\.com/s/((?:[^/?#&]+/)+[^/?#&]+)', smil_url, 'path')
smil_url += '?' if '?' not in smil_url else '&' + 'formats=m3u,mpeg4'
elif mobj.group('config'):
config_url = url + '&form=json'
config_url = config_url.replace('swf/', 'config/')
config_url = config_url.replace('onsite/', 'onsite/config/')
config = self._download_json(config_url, video_id, 'Downloading config')
if 'releaseUrl' in config:
release_url = config['releaseUrl']
else:
release_url = 'http://link.theplatform.com/s/%s?mbr=true' % path
smil_url = release_url + '&formats=MPEG4&manifest=f4m'
else:
smil_url = 'http://link.theplatform.com/s/%s?mbr=true' % path
sig = smuggled_data.get('sig')
if sig:
smil_url = self._sign_url(smil_url, sig['key'], sig['secret'])
formats, subtitles = self._extract_theplatform_smil(smil_url, video_id)
self._sort_formats(formats)
ret = self._extract_theplatform_metadata(path, video_id)
combined_subtitles = self._merge_subtitles(ret.get('subtitles', {}), subtitles)
ret.update({
'id': video_id,
'formats': formats,
'subtitles': combined_subtitles,
})
return ret
class ThePlatformFeedIE(ThePlatformBaseIE):
_URL_TEMPLATE = '%s//feed.theplatform.com/f/%s/%s?form=json&%s'
_VALID_URL = r'https?://feed\.theplatform\.com/f/(?P<provider_id>[^/]+)/(?P<feed_id>[^?/]+)\?(?:[^&]+&)*(?P<filter>by(?:Gui|I)d=(?P<id>[^&]+))'
_TESTS = [{
# From http://player.theplatform.com/p/7wvmTC/MSNBCEmbeddedOffSite?guid=n_hardball_5biden_140207
'url': 'http://feed.theplatform.com/f/7wvmTC/msnbc_video-p-test?form=json&pretty=true&range=-40&byGuid=n_hardball_5biden_140207',
'md5': '6e32495b5073ab414471b615c5ded394',
'info_dict': {
'id': 'n_hardball_5biden_140207',
'ext': 'mp4',
'title': 'The Biden factor: will Joe run in 2016?',
'description': 'Could Vice President Joe Biden be preparing a 2016 campaign? Mark Halperin and Sam Stein weigh in.',
'thumbnail': r're:^https?://.*\.jpg$',
'upload_date': '20140208',
'timestamp': 1391824260,
'duration': 467.0,
'categories': ['MSNBC/Issues/Democrats', 'MSNBC/Issues/Elections/Election 2016'],
'uploader': 'NBCU-NEWS',
},
}, {
'url': 'http://feed.theplatform.com/f/2E2eJC/nnd_NBCNews?byGuid=nn_netcast_180306.Copy.01',
'only_matching': True,
}]
def _extract_feed_info(self, provider_id, feed_id, filter_query, video_id, custom_fields=None, asset_types_query={}, account_id=None):
real_url = self._URL_TEMPLATE % (self.http_scheme(), provider_id, feed_id, filter_query)
entry = self._download_json(real_url, video_id)['entries'][0]
main_smil_url = 'http://link.theplatform.com/s/%s/media/guid/%d/%s' % (provider_id, account_id, entry['guid']) if account_id else entry.get('plmedia$publicUrl')
formats = []
subtitles = {}
first_video_id = None
duration = None
asset_types = []
for item in entry['media$content']:
smil_url = item['plfile$url']
cur_video_id = ThePlatformIE._match_id(smil_url)
if first_video_id is None:
first_video_id = cur_video_id
duration = float_or_none(item.get('plfile$duration'))
file_asset_types = item.get('plfile$assetTypes') or compat_parse_qs(compat_urllib_parse_urlparse(smil_url).query)['assetTypes']
for asset_type in file_asset_types:
if asset_type in asset_types:
continue
asset_types.append(asset_type)
query = {
'mbr': 'true',
'formats': item['plfile$format'],
'assetTypes': asset_type,
}
if asset_type in asset_types_query:
query.update(asset_types_query[asset_type])
cur_formats, cur_subtitles = self._extract_theplatform_smil(update_url_query(
main_smil_url or smil_url, query), video_id, 'Downloading SMIL data for %s' % asset_type)
formats.extend(cur_formats)
subtitles = self._merge_subtitles(subtitles, cur_subtitles)
self._sort_formats(formats)
thumbnails = [{
'url': thumbnail['plfile$url'],
'width': int_or_none(thumbnail.get('plfile$width')),
'height': int_or_none(thumbnail.get('plfile$height')),
} for thumbnail in entry.get('media$thumbnails', [])]
timestamp = int_or_none(entry.get('media$availableDate'), scale=1000)
categories = [item['media$name'] for item in entry.get('media$categories', [])]
ret = self._extract_theplatform_metadata('%s/%s' % (provider_id, first_video_id), video_id)
subtitles = self._merge_subtitles(subtitles, ret['subtitles'])
ret.update({
'id': video_id,
'formats': formats,
'subtitles': subtitles,
'thumbnails': thumbnails,
'duration': duration,
'timestamp': timestamp,
'categories': categories,
})
if custom_fields:
ret.update(custom_fields(entry))
return ret
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
provider_id = mobj.group('provider_id')
feed_id = mobj.group('feed_id')
filter_query = mobj.group('filter')
return self._extract_feed_info(provider_id, feed_id, filter_query, video_id)
| unlicense | ee058a1dd651ffa7f7ec88d292f28b19 | 41.364734 | 188 | 0.549803 | 3.48896 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/giantbomb.py | 11 | 3056 | from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..utils import (
determine_ext,
int_or_none,
qualities,
unescapeHTML,
)
class GiantBombIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?giantbomb\.com/(?:videos|shows)/(?P<display_id>[^/]+)/(?P<id>\d+-\d+)'
_TESTS = [{
'url': 'http://www.giantbomb.com/videos/quick-look-destiny-the-dark-below/2300-9782/',
'md5': '132f5a803e7e0ab0e274d84bda1e77ae',
'info_dict': {
'id': '2300-9782',
'display_id': 'quick-look-destiny-the-dark-below',
'ext': 'mp4',
'title': 'Quick Look: Destiny: The Dark Below',
'description': 'md5:0aa3aaf2772a41b91d44c63f30dfad24',
'duration': 2399,
'thumbnail': r're:^https?://.*\.jpg$',
}
}, {
'url': 'https://www.giantbomb.com/shows/ben-stranding/2970-20212',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id)
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
video = json.loads(unescapeHTML(self._search_regex(
r'data-video="([^"]+)"', webpage, 'data-video')))
duration = int_or_none(video.get('lengthSeconds'))
quality = qualities([
'f4m_low', 'progressive_low', 'f4m_high',
'progressive_high', 'f4m_hd', 'progressive_hd'])
formats = []
for format_id, video_url in video['videoStreams'].items():
if format_id == 'f4m_stream':
continue
ext = determine_ext(video_url)
if ext == 'f4m':
f4m_formats = self._extract_f4m_formats(video_url + '?hdcore=3.3.1', display_id)
if f4m_formats:
f4m_formats[0]['quality'] = quality(format_id)
formats.extend(f4m_formats)
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
video_url, display_id, ext='mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
else:
formats.append({
'url': video_url,
'format_id': format_id,
'quality': quality(format_id),
})
if not formats:
youtube_id = video.get('youtubeID')
if youtube_id:
return self.url_result(youtube_id, 'Youtube')
self._sort_formats(formats)
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
}
| unlicense | 24516d336165d27f9695db422db1a67f | 32.955556 | 108 | 0.525851 | 3.553488 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/svt.py | 1 | 14831 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
determine_ext,
dict_get,
int_or_none,
unified_timestamp,
str_or_none,
strip_or_none,
try_get,
)
class SVTBaseIE(InfoExtractor):
_GEO_COUNTRIES = ['SE']
def _extract_video(self, video_info, video_id):
is_live = dict_get(video_info, ('live', 'simulcast'), default=False)
m3u8_protocol = 'm3u8' if is_live else 'm3u8_native'
formats = []
for vr in video_info['videoReferences']:
player_type = vr.get('playerType') or vr.get('format')
vurl = vr['url']
ext = determine_ext(vurl)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
vurl, video_id,
ext='mp4', entry_protocol=m3u8_protocol,
m3u8_id=player_type, fatal=False))
elif ext == 'f4m':
formats.extend(self._extract_f4m_formats(
vurl + '?hdcore=3.3.0', video_id,
f4m_id=player_type, fatal=False))
elif ext == 'mpd':
if player_type == 'dashhbbtv':
formats.extend(self._extract_mpd_formats(
vurl, video_id, mpd_id=player_type, fatal=False))
else:
formats.append({
'format_id': player_type,
'url': vurl,
})
rights = try_get(video_info, lambda x: x['rights'], dict) or {}
if not formats and rights.get('geoBlockedSweden'):
self.raise_geo_restricted(
'This video is only available in Sweden',
countries=self._GEO_COUNTRIES)
self._sort_formats(formats)
subtitles = {}
subtitle_references = dict_get(video_info, ('subtitles', 'subtitleReferences'))
if isinstance(subtitle_references, list):
for sr in subtitle_references:
subtitle_url = sr.get('url')
subtitle_lang = sr.get('language', 'sv')
if subtitle_url:
if determine_ext(subtitle_url) == 'm3u8':
# TODO(yan12125): handle WebVTT in m3u8 manifests
continue
subtitles.setdefault(subtitle_lang, []).append({'url': subtitle_url})
title = video_info.get('title')
series = video_info.get('programTitle')
season_number = int_or_none(video_info.get('season'))
episode = video_info.get('episodeTitle')
episode_number = int_or_none(video_info.get('episodeNumber'))
timestamp = unified_timestamp(rights.get('validFrom'))
duration = int_or_none(dict_get(video_info, ('materialLength', 'contentDuration')))
age_limit = None
adult = dict_get(
video_info, ('inappropriateForChildren', 'blockedForChildren'),
skip_false_values=False)
if adult is not None:
age_limit = 18 if adult else 0
return {
'id': video_id,
'title': title,
'formats': formats,
'subtitles': subtitles,
'duration': duration,
'timestamp': timestamp,
'age_limit': age_limit,
'series': series,
'season_number': season_number,
'episode': episode,
'episode_number': episode_number,
'is_live': is_live,
}
class SVTIE(SVTBaseIE):
_VALID_URL = r'https?://(?:www\.)?svt\.se/wd\?(?:.*?&)?widgetId=(?P<widget_id>\d+)&.*?\barticleId=(?P<id>\d+)'
_TEST = {
'url': 'http://www.svt.se/wd?widgetId=23991§ionId=541&articleId=2900353&type=embed&contextSectionId=123&autostart=false',
'md5': '33e9a5d8f646523ce0868ecfb0eed77d',
'info_dict': {
'id': '2900353',
'ext': 'mp4',
'title': 'Stjärnorna skojar till det - under SVT-intervjun',
'duration': 27,
'age_limit': 0,
},
}
@staticmethod
def _extract_url(webpage):
mobj = re.search(
r'(?:<iframe src|href)="(?P<url>%s[^"]*)"' % SVTIE._VALID_URL, webpage)
if mobj:
return mobj.group('url')
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
widget_id = mobj.group('widget_id')
article_id = mobj.group('id')
info = self._download_json(
'http://www.svt.se/wd?widgetId=%s&articleId=%s&format=json&type=embed&output=json' % (widget_id, article_id),
article_id)
info_dict = self._extract_video(info['video'], article_id)
info_dict['title'] = info['context']['title']
return info_dict
class SVTPlayBaseIE(SVTBaseIE):
_SVTPLAY_RE = r'root\s*\[\s*(["\'])_*svtplay\1\s*\]\s*=\s*(?P<json>{.+?})\s*;\s*\n'
class SVTPlayIE(SVTPlayBaseIE):
IE_DESC = 'SVT Play and Öppet arkiv'
_VALID_URL = r'''(?x)
(?:
(?:
svt:|
https?://(?:www\.)?svt\.se/barnkanalen/barnplay/[^/]+/
)
(?P<svt_id>[^/?#&]+)|
https?://(?:www\.)?(?:svtplay|oppetarkiv)\.se/(?:video|klipp|kanaler)/(?P<id>[^/?#&]+)
)
'''
_TESTS = [{
'url': 'https://www.svtplay.se/video/26194546/det-har-ar-himlen',
'md5': '2382036fd6f8c994856c323fe51c426e',
'info_dict': {
'id': 'jNwpV9P',
'ext': 'mp4',
'title': 'Det här är himlen',
'timestamp': 1586044800,
'upload_date': '20200405',
'duration': 3515,
'thumbnail': r're:^https?://(?:.*[\.-]jpg|www.svtstatic.se/image/.*)$',
'age_limit': 0,
'subtitles': {
'sv': [{
'ext': 'vtt',
}]
},
},
'params': {
'format': 'bestvideo',
# skip for now due to download test asserts that segment is > 10000 bytes and svt uses
# init segments that are smaller
# AssertionError: Expected test_SVTPlay_jNwpV9P.mp4 to be at least 9.77KiB, but it's only 864.00B
'skip_download': True,
},
}, {
# geo restricted to Sweden
'url': 'http://www.oppetarkiv.se/video/5219710/trollflojten',
'only_matching': True,
}, {
'url': 'http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg',
'only_matching': True,
}, {
'url': 'https://www.svtplay.se/kanaler/svt1',
'only_matching': True,
}, {
'url': 'svt:1376446-003A',
'only_matching': True,
}, {
'url': 'svt:14278044',
'only_matching': True,
}, {
'url': 'https://www.svt.se/barnkanalen/barnplay/kar/eWv5MLX/',
'only_matching': True,
}, {
'url': 'svt:eWv5MLX',
'only_matching': True,
}]
def _adjust_title(self, info):
if info['is_live']:
info['title'] = self._live_title(info['title'])
def _extract_by_video_id(self, video_id, webpage=None):
data = self._download_json(
'https://api.svt.se/videoplayer-api/video/%s' % video_id,
video_id, headers=self.geo_verification_headers())
info_dict = self._extract_video(data, video_id)
if not info_dict.get('title'):
title = dict_get(info_dict, ('episode', 'series'))
if not title and webpage:
title = re.sub(
r'\s*\|\s*.+?$', '', self._og_search_title(webpage))
if not title:
title = video_id
info_dict['title'] = title
self._adjust_title(info_dict)
return info_dict
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id, svt_id = mobj.group('id', 'svt_id')
if svt_id:
return self._extract_by_video_id(svt_id)
webpage = self._download_webpage(url, video_id)
data = self._parse_json(
self._search_regex(
self._SVTPLAY_RE, webpage, 'embedded data', default='{}',
group='json'),
video_id, fatal=False)
thumbnail = self._og_search_thumbnail(webpage)
if data:
video_info = try_get(
data, lambda x: x['context']['dispatcher']['stores']['VideoTitlePageStore']['data']['video'],
dict)
if video_info:
info_dict = self._extract_video(video_info, video_id)
info_dict.update({
'title': data['context']['dispatcher']['stores']['MetaStore']['title'],
'thumbnail': thumbnail,
})
self._adjust_title(info_dict)
return info_dict
svt_id = try_get(
data, lambda x: x['statistics']['dataLake']['content']['id'],
compat_str)
if not svt_id:
svt_id = self._search_regex(
(r'<video[^>]+data-video-id=["\']([\da-zA-Z-]+)',
r'["\']videoSvtId["\']\s*:\s*["\']([\da-zA-Z-]+)',
r'["\']videoSvtId\\?["\']\s*:\s*\\?["\']([\da-zA-Z-]+)',
r'"content"\s*:\s*{.*?"id"\s*:\s*"([\da-zA-Z-]+)"',
r'["\']svtId["\']\s*:\s*["\']([\da-zA-Z-]+)',
r'["\']svtId\\?["\']\s*:\s*\\?["\']([\da-zA-Z-]+)'),
webpage, 'video id')
info_dict = self._extract_by_video_id(svt_id, webpage)
info_dict['thumbnail'] = thumbnail
return info_dict
class SVTSeriesIE(SVTPlayBaseIE):
_VALID_URL = r'https?://(?:www\.)?svtplay\.se/(?P<id>[^/?&#]+)(?:.+?\btab=(?P<season_slug>[^&#]+))?'
_TESTS = [{
'url': 'https://www.svtplay.se/rederiet',
'info_dict': {
'id': '14445680',
'title': 'Rederiet',
'description': 'md5:d9fdfff17f5d8f73468176ecd2836039',
},
'playlist_mincount': 318,
}, {
'url': 'https://www.svtplay.se/rederiet?tab=season-2-14445680',
'info_dict': {
'id': 'season-2-14445680',
'title': 'Rederiet - Säsong 2',
'description': 'md5:d9fdfff17f5d8f73468176ecd2836039',
},
'playlist_mincount': 12,
}]
@classmethod
def suitable(cls, url):
return False if SVTIE.suitable(url) or SVTPlayIE.suitable(url) else super(SVTSeriesIE, cls).suitable(url)
def _real_extract(self, url):
series_slug, season_id = re.match(self._VALID_URL, url).groups()
series = self._download_json(
'https://api.svt.se/contento/graphql', series_slug,
'Downloading series page', query={
'query': '''{
listablesBySlug(slugs: ["%s"]) {
associatedContent(include: [productionPeriod, season]) {
items {
item {
... on Episode {
videoSvtId
}
}
}
id
name
}
id
longDescription
name
shortDescription
}
}''' % series_slug,
})['data']['listablesBySlug'][0]
season_name = None
entries = []
for season in series['associatedContent']:
if not isinstance(season, dict):
continue
if season_id:
if season.get('id') != season_id:
continue
season_name = season.get('name')
items = season.get('items')
if not isinstance(items, list):
continue
for item in items:
video = item.get('item') or {}
content_id = video.get('videoSvtId')
if not content_id or not isinstance(content_id, compat_str):
continue
entries.append(self.url_result(
'svt:' + content_id, SVTPlayIE.ie_key(), content_id))
title = series.get('name')
season_name = season_name or season_id
if title and season_name:
title = '%s - %s' % (title, season_name)
elif season_id:
title = season_id
return self.playlist_result(
entries, season_id or series.get('id'), title,
dict_get(series, ('longDescription', 'shortDescription')))
class SVTPageIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?svt\.se/(?P<path>(?:[^/]+/)*(?P<id>[^/?&#]+))'
_TESTS = [{
'url': 'https://www.svt.se/sport/ishockey/bakom-masken-lehners-kamp-mot-mental-ohalsa',
'info_dict': {
'id': '25298267',
'title': 'Bakom masken – Lehners kamp mot mental ohälsa',
},
'playlist_count': 4,
}, {
'url': 'https://www.svt.se/nyheter/utrikes/svenska-andrea-ar-en-mil-fran-branderna-i-kalifornien',
'info_dict': {
'id': '24243746',
'title': 'Svenska Andrea redo att fly sitt hem i Kalifornien',
},
'playlist_count': 2,
}, {
# only programTitle
'url': 'http://www.svt.se/sport/ishockey/jagr-tacklar-giroux-under-intervjun',
'info_dict': {
'id': '8439V2K',
'ext': 'mp4',
'title': 'Stjärnorna skojar till det - under SVT-intervjun',
'duration': 27,
'age_limit': 0,
},
}, {
'url': 'https://www.svt.se/nyheter/lokalt/vast/svt-testar-tar-nagon-upp-skrapet-1',
'only_matching': True,
}, {
'url': 'https://www.svt.se/vader/manadskronikor/maj2018',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return False if SVTIE.suitable(url) or SVTPlayIE.suitable(url) else super(SVTPageIE, cls).suitable(url)
def _real_extract(self, url):
path, display_id = re.match(self._VALID_URL, url).groups()
article = self._download_json(
'https://api.svt.se/nss-api/page/' + path, display_id,
query={'q': 'articles'})['articles']['content'][0]
entries = []
def _process_content(content):
if content.get('_type') in ('VIDEOCLIP', 'VIDEOEPISODE'):
video_id = compat_str(content['image']['svtId'])
entries.append(self.url_result(
'svt:' + video_id, SVTPlayIE.ie_key(), video_id))
for media in article.get('media', []):
_process_content(media)
for obj in article.get('structuredBody', []):
_process_content(obj.get('content') or {})
return self.playlist_result(
entries, str_or_none(article.get('id')),
strip_or_none(article.get('title')))
| unlicense | 4d1dbe351871c83d7707270b9f255cdd | 34.629808 | 133 | 0.50587 | 3.466324 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/bandcamp.py | 3 | 14363 | # coding: utf-8
from __future__ import unicode_literals
import random
import re
import time
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
ExtractorError,
float_or_none,
int_or_none,
KNOWN_EXTENSIONS,
parse_filesize,
str_or_none,
try_get,
update_url_query,
unified_strdate,
unified_timestamp,
url_or_none,
urljoin,
)
class BandcampIE(InfoExtractor):
_VALID_URL = r'https?://[^/]+\.bandcamp\.com/track/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song',
'md5': 'c557841d5e50261777a6585648adf439',
'info_dict': {
'id': '1812978515',
'ext': 'mp3',
'title': "youtube-dl \"'/\\ä↭ - youtube-dl \"'/\\ä↭ - youtube-dl test song \"'/\\ä↭",
'duration': 9.8485,
'uploader': 'youtube-dl "\'/\\ä↭',
'upload_date': '20121129',
'timestamp': 1354224127,
},
'_skip': 'There is a limit of 200 free downloads / month for the test song'
}, {
# free download
'url': 'http://benprunty.bandcamp.com/track/lanius-battle',
'info_dict': {
'id': '2650410135',
'ext': 'aiff',
'title': 'Ben Prunty - Lanius (Battle)',
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': 'Ben Prunty',
'timestamp': 1396508491,
'upload_date': '20140403',
'release_date': '20140403',
'duration': 260.877,
'track': 'Lanius (Battle)',
'track_number': 1,
'track_id': '2650410135',
'artist': 'Ben Prunty',
'album': 'FTL: Advanced Edition Soundtrack',
},
}, {
# no free download, mp3 128
'url': 'https://relapsealumni.bandcamp.com/track/hail-to-fire',
'md5': 'fec12ff55e804bb7f7ebeb77a800c8b7',
'info_dict': {
'id': '2584466013',
'ext': 'mp3',
'title': 'Mastodon - Hail to Fire',
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': 'Mastodon',
'timestamp': 1322005399,
'upload_date': '20111122',
'release_date': '20040207',
'duration': 120.79,
'track': 'Hail to Fire',
'track_number': 5,
'track_id': '2584466013',
'artist': 'Mastodon',
'album': 'Call of the Mastodon',
},
}]
def _extract_data_attr(self, webpage, video_id, attr='tralbum', fatal=True):
return self._parse_json(self._html_search_regex(
r'data-%s=(["\'])({.+?})\1' % attr, webpage,
attr + ' data', group=2), video_id, fatal=fatal)
def _real_extract(self, url):
title = self._match_id(url)
webpage = self._download_webpage(url, title)
tralbum = self._extract_data_attr(webpage, title)
thumbnail = self._og_search_thumbnail(webpage)
track_id = None
track = None
track_number = None
duration = None
formats = []
track_info = try_get(tralbum, lambda x: x['trackinfo'][0], dict)
if track_info:
file_ = track_info.get('file')
if isinstance(file_, dict):
for format_id, format_url in file_.items():
if not url_or_none(format_url):
continue
ext, abr_str = format_id.split('-', 1)
formats.append({
'format_id': format_id,
'url': self._proto_relative_url(format_url, 'http:'),
'ext': ext,
'vcodec': 'none',
'acodec': ext,
'abr': int_or_none(abr_str),
})
track = track_info.get('title')
track_id = str_or_none(
track_info.get('track_id') or track_info.get('id'))
track_number = int_or_none(track_info.get('track_num'))
duration = float_or_none(track_info.get('duration'))
embed = self._extract_data_attr(webpage, title, 'embed', False)
current = tralbum.get('current') or {}
artist = embed.get('artist') or current.get('artist') or tralbum.get('artist')
timestamp = unified_timestamp(
current.get('publish_date') or tralbum.get('album_publish_date'))
download_link = tralbum.get('freeDownloadPage')
if download_link:
track_id = compat_str(tralbum['id'])
download_webpage = self._download_webpage(
download_link, track_id, 'Downloading free downloads page')
blob = self._extract_data_attr(download_webpage, track_id, 'blob')
info = try_get(
blob, (lambda x: x['digital_items'][0],
lambda x: x['download_items'][0]), dict)
if info:
downloads = info.get('downloads')
if isinstance(downloads, dict):
if not track:
track = info.get('title')
if not artist:
artist = info.get('artist')
if not thumbnail:
thumbnail = info.get('thumb_url')
download_formats = {}
download_formats_list = blob.get('download_formats')
if isinstance(download_formats_list, list):
for f in blob['download_formats']:
name, ext = f.get('name'), f.get('file_extension')
if all(isinstance(x, compat_str) for x in (name, ext)):
download_formats[name] = ext.strip('.')
for format_id, f in downloads.items():
format_url = f.get('url')
if not format_url:
continue
# Stat URL generation algorithm is reverse engineered from
# download_*_bundle_*.js
stat_url = update_url_query(
format_url.replace('/download/', '/statdownload/'), {
'.rand': int(time.time() * 1000 * random.random()),
})
format_id = f.get('encoding_name') or format_id
stat = self._download_json(
stat_url, track_id, 'Downloading %s JSON' % format_id,
transform_source=lambda s: s[s.index('{'):s.rindex('}') + 1],
fatal=False)
if not stat:
continue
retry_url = url_or_none(stat.get('retry_url'))
if not retry_url:
continue
formats.append({
'url': self._proto_relative_url(retry_url, 'http:'),
'ext': download_formats.get(format_id),
'format_id': format_id,
'format_note': f.get('description'),
'filesize': parse_filesize(f.get('size_mb')),
'vcodec': 'none',
})
self._sort_formats(formats)
title = '%s - %s' % (artist, track) if artist else track
if not duration:
duration = float_or_none(self._html_search_meta(
'duration', webpage, default=None))
return {
'id': track_id,
'title': title,
'thumbnail': thumbnail,
'uploader': artist,
'timestamp': timestamp,
'release_date': unified_strdate(tralbum.get('album_release_date')),
'duration': duration,
'track': track,
'track_number': track_number,
'track_id': track_id,
'artist': artist,
'album': embed.get('album_title'),
'formats': formats,
}
class BandcampAlbumIE(BandcampIE):
IE_NAME = 'Bandcamp:album'
_VALID_URL = r'https?://(?:(?P<subdomain>[^.]+)\.)?bandcamp\.com(?:/album/(?P<id>[^/?#&]+))?'
_TESTS = [{
'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1',
'playlist': [
{
'md5': '39bc1eded3476e927c724321ddf116cf',
'info_dict': {
'id': '1353101989',
'ext': 'mp3',
'title': 'Blazo - Intro',
'timestamp': 1311756226,
'upload_date': '20110727',
'uploader': 'Blazo',
}
},
{
'md5': '1a2c32e2691474643e912cc6cd4bffaa',
'info_dict': {
'id': '38097443',
'ext': 'mp3',
'title': 'Blazo - Kero One - Keep It Alive (Blazo remix)',
'timestamp': 1311757238,
'upload_date': '20110727',
'uploader': 'Blazo',
}
},
],
'info_dict': {
'title': 'Jazz Format Mixtape vol.1',
'id': 'jazz-format-mixtape-vol-1',
'uploader_id': 'blazo',
},
'params': {
'playlistend': 2
},
'skip': 'Bandcamp imposes download limits.'
}, {
'url': 'http://nightbringer.bandcamp.com/album/hierophany-of-the-open-grave',
'info_dict': {
'title': 'Hierophany of the Open Grave',
'uploader_id': 'nightbringer',
'id': 'hierophany-of-the-open-grave',
},
'playlist_mincount': 9,
}, {
'url': 'http://dotscale.bandcamp.com',
'info_dict': {
'title': 'Loom',
'id': 'dotscale',
'uploader_id': 'dotscale',
},
'playlist_mincount': 7,
}, {
# with escaped quote in title
'url': 'https://jstrecords.bandcamp.com/album/entropy-ep',
'info_dict': {
'title': '"Entropy" EP',
'uploader_id': 'jstrecords',
'id': 'entropy-ep',
'description': 'md5:0ff22959c943622972596062f2f366a5',
},
'playlist_mincount': 3,
}, {
# not all tracks have songs
'url': 'https://insulters.bandcamp.com/album/we-are-the-plague',
'info_dict': {
'id': 'we-are-the-plague',
'title': 'WE ARE THE PLAGUE',
'uploader_id': 'insulters',
'description': 'md5:b3cf845ee41b2b1141dc7bde9237255f',
},
'playlist_count': 2,
}]
@classmethod
def suitable(cls, url):
return (False
if BandcampWeeklyIE.suitable(url) or BandcampIE.suitable(url)
else super(BandcampAlbumIE, cls).suitable(url))
def _real_extract(self, url):
uploader_id, album_id = re.match(self._VALID_URL, url).groups()
playlist_id = album_id or uploader_id
webpage = self._download_webpage(url, playlist_id)
tralbum = self._extract_data_attr(webpage, playlist_id)
track_info = tralbum.get('trackinfo')
if not track_info:
raise ExtractorError('The page doesn\'t contain any tracks')
# Only tracks with duration info have songs
entries = [
self.url_result(
urljoin(url, t['title_link']), BandcampIE.ie_key(),
str_or_none(t.get('track_id') or t.get('id')), t.get('title'))
for t in track_info
if t.get('duration')]
current = tralbum.get('current') or {}
return {
'_type': 'playlist',
'uploader_id': uploader_id,
'id': playlist_id,
'title': current.get('title'),
'description': current.get('about'),
'entries': entries,
}
class BandcampWeeklyIE(BandcampIE):
IE_NAME = 'Bandcamp:weekly'
_VALID_URL = r'https?://(?:www\.)?bandcamp\.com/?\?(?:.*?&)?show=(?P<id>\d+)'
_TESTS = [{
'url': 'https://bandcamp.com/?show=224',
'md5': 'b00df799c733cf7e0c567ed187dea0fd',
'info_dict': {
'id': '224',
'ext': 'opus',
'title': 'BC Weekly April 4th 2017 - Magic Moments',
'description': 'md5:5d48150916e8e02d030623a48512c874',
'duration': 5829.77,
'release_date': '20170404',
'series': 'Bandcamp Weekly',
'episode': 'Magic Moments',
'episode_id': '224',
},
'params': {
'format': 'opus-lo',
},
}, {
'url': 'https://bandcamp.com/?blah/blah@&show=228',
'only_matching': True
}]
def _real_extract(self, url):
show_id = self._match_id(url)
webpage = self._download_webpage(url, show_id)
blob = self._extract_data_attr(webpage, show_id, 'blob')
show = blob['bcw_data'][show_id]
formats = []
for format_id, format_url in show['audio_stream'].items():
if not url_or_none(format_url):
continue
for known_ext in KNOWN_EXTENSIONS:
if known_ext in format_id:
ext = known_ext
break
else:
ext = None
formats.append({
'format_id': format_id,
'url': format_url,
'ext': ext,
'vcodec': 'none',
})
self._sort_formats(formats)
title = show.get('audio_title') or 'Bandcamp Weekly'
subtitle = show.get('subtitle')
if subtitle:
title += ' - %s' % subtitle
return {
'id': show_id,
'title': title,
'description': show.get('desc') or show.get('short_desc'),
'duration': float_or_none(show.get('audio_duration')),
'is_live': False,
'release_date': unified_strdate(show.get('published_date')),
'series': 'Bandcamp Weekly',
'episode': show.get('subtitle'),
'episode_id': show_id,
'formats': formats
}
| unlicense | da9df8042f0999e2ba835d980d797351 | 35.892031 | 99 | 0.473416 | 3.857796 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/pladform.py | 28 | 4244 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
determine_ext,
ExtractorError,
int_or_none,
xpath_text,
qualities,
)
class PladformIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://
(?:
(?:
out\.pladform\.ru/player|
static\.pladform\.ru/player\.swf
)
\?.*\bvideoid=|
video\.pladform\.ru/catalog/video/videoid/
)
(?P<id>\d+)
'''
_TESTS = [{
'url': 'https://out.pladform.ru/player?pl=64471&videoid=3777899&vk_puid15=0&vk_puid34=0',
'md5': '53362fac3a27352da20fa2803cc5cd6f',
'info_dict': {
'id': '3777899',
'ext': 'mp4',
'title': 'СТУДИЯ СОЮЗ • Шоу Студия Союз, 24 выпуск (01.02.2018) Нурлан Сабуров и Слава Комиссаренко',
'description': 'md5:05140e8bf1b7e2d46e7ba140be57fd95',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 3190,
},
}, {
'url': 'http://static.pladform.ru/player.swf?pl=21469&videoid=100183293&vkcid=0',
'only_matching': True,
}, {
'url': 'http://video.pladform.ru/catalog/video/videoid/100183293/vkcid/0',
'only_matching': True,
}]
@staticmethod
def _extract_url(webpage):
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//out\.pladform\.ru/player\?.+?)\1', webpage)
if mobj:
return mobj.group('url')
def _real_extract(self, url):
video_id = self._match_id(url)
qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
pl = qs.get('pl', ['1'])[0]
video = self._download_xml(
'http://out.pladform.ru/getVideo', video_id, query={
'pl': pl,
'videoid': video_id,
})
def fail(text):
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, text),
expected=True)
if video.tag == 'error':
fail(video.text)
quality = qualities(('ld', 'sd', 'hd'))
formats = []
for src in video.findall('./src'):
if src is None:
continue
format_url = src.text
if not format_url:
continue
if src.get('type') == 'hls' or determine_ext(format_url) == 'm3u8':
formats.extend(self._extract_m3u8_formats(
format_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
else:
formats.append({
'url': src.text,
'format_id': src.get('quality'),
'quality': quality(src.get('quality')),
})
if not formats:
error = xpath_text(video, './cap', 'error', default=None)
if error:
fail(error)
self._sort_formats(formats)
webpage = self._download_webpage(
'http://video.pladform.ru/catalog/video/videoid/%s' % video_id,
video_id)
title = self._og_search_title(webpage, fatal=False) or xpath_text(
video, './/title', 'title', fatal=True)
description = self._search_regex(
r'</h3>\s*<p>([^<]+)</p>', webpage, 'description', fatal=False)
thumbnail = self._og_search_thumbnail(webpage) or xpath_text(
video, './/cover', 'cover')
duration = int_or_none(xpath_text(video, './/time', 'duration'))
age_limit = int_or_none(xpath_text(video, './/age18', 'age limit'))
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'age_limit': age_limit,
'formats': formats,
}
| unlicense | bfa65d86c89eb5ef21a2b1364f1d1480 | 32.456 | 113 | 0.482544 | 3.583548 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/appleconnect.py | 51 | 1756 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
str_to_int,
ExtractorError
)
class AppleConnectIE(InfoExtractor):
_VALID_URL = r'https?://itunes\.apple\.com/\w{0,2}/?post/idsa\.(?P<id>[\w-]+)'
_TEST = {
'url': 'https://itunes.apple.com/us/post/idsa.4ab17a39-2720-11e5-96c5-a5b38f6c42d3',
'md5': 'e7c38568a01ea45402570e6029206723',
'info_dict': {
'id': '4ab17a39-2720-11e5-96c5-a5b38f6c42d3',
'ext': 'm4v',
'title': 'Energy',
'uploader': 'Drake',
'thumbnail': r're:^https?://.*\.jpg$',
'upload_date': '20150710',
'timestamp': 1436545535,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
try:
video_json = self._html_search_regex(
r'class="auc-video-data">(\{.*?\})', webpage, 'json')
except ExtractorError:
raise ExtractorError('This post doesn\'t contain a video', expected=True)
video_data = self._parse_json(video_json, video_id)
timestamp = str_to_int(self._html_search_regex(r'data-timestamp="(\d+)"', webpage, 'timestamp'))
like_count = str_to_int(self._html_search_regex(r'(\d+) Loves', webpage, 'like count'))
return {
'id': video_id,
'url': video_data['sslSrc'],
'title': video_data['title'],
'description': video_data['description'],
'uploader': video_data['artistName'],
'thumbnail': video_data['artworkUrl'],
'timestamp': timestamp,
'like_count': like_count,
}
| unlicense | 8a9f547470c03e553bdf58e366f8c651 | 34.12 | 104 | 0.551253 | 3.319471 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/rte.py | 20 | 6289 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_HTTPError
from ..utils import (
float_or_none,
parse_iso8601,
str_or_none,
try_get,
unescapeHTML,
url_or_none,
ExtractorError,
)
class RteBaseIE(InfoExtractor):
def _real_extract(self, url):
item_id = self._match_id(url)
info_dict = {}
formats = []
ENDPOINTS = (
'https://feeds.rasset.ie/rteavgen/player/playlist?type=iptv&format=json&showId=',
'http://www.rte.ie/rteavgen/getplaylist/?type=web&format=json&id=',
)
for num, ep_url in enumerate(ENDPOINTS, start=1):
try:
data = self._download_json(ep_url + item_id, item_id)
except ExtractorError as ee:
if num < len(ENDPOINTS) or formats:
continue
if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404:
error_info = self._parse_json(ee.cause.read().decode(), item_id, fatal=False)
if error_info:
raise ExtractorError(
'%s said: %s' % (self.IE_NAME, error_info['message']),
expected=True)
raise
# NB the string values in the JSON are stored using XML escaping(!)
show = try_get(data, lambda x: x['shows'][0], dict)
if not show:
continue
if not info_dict:
title = unescapeHTML(show['title'])
description = unescapeHTML(show.get('description'))
thumbnail = show.get('thumbnail')
duration = float_or_none(show.get('duration'), 1000)
timestamp = parse_iso8601(show.get('published'))
info_dict = {
'id': item_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'duration': duration,
}
mg = try_get(show, lambda x: x['media:group'][0], dict)
if not mg:
continue
if mg.get('url'):
m = re.match(r'(?P<url>rtmpe?://[^/]+)/(?P<app>.+)/(?P<playpath>mp4:.*)', mg['url'])
if m:
m = m.groupdict()
formats.append({
'url': m['url'] + '/' + m['app'],
'app': m['app'],
'play_path': m['playpath'],
'player_url': url,
'ext': 'flv',
'format_id': 'rtmp',
})
if mg.get('hls_server') and mg.get('hls_url'):
formats.extend(self._extract_m3u8_formats(
mg['hls_server'] + mg['hls_url'], item_id, 'mp4',
entry_protocol='m3u8_native', m3u8_id='hls', fatal=False))
if mg.get('hds_server') and mg.get('hds_url'):
formats.extend(self._extract_f4m_formats(
mg['hds_server'] + mg['hds_url'], item_id,
f4m_id='hds', fatal=False))
mg_rte_server = str_or_none(mg.get('rte:server'))
mg_url = str_or_none(mg.get('url'))
if mg_rte_server and mg_url:
hds_url = url_or_none(mg_rte_server + mg_url)
if hds_url:
formats.extend(self._extract_f4m_formats(
hds_url, item_id, f4m_id='hds', fatal=False))
self._sort_formats(formats)
info_dict['formats'] = formats
return info_dict
class RteIE(RteBaseIE):
IE_NAME = 'rte'
IE_DESC = 'Raidió Teilifís Éireann TV'
_VALID_URL = r'https?://(?:www\.)?rte\.ie/player/[^/]{2,3}/show/[^/]+/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.rte.ie/player/ie/show/iwitness-862/10478715/',
'md5': '4a76eb3396d98f697e6e8110563d2604',
'info_dict': {
'id': '10478715',
'ext': 'mp4',
'title': 'iWitness',
'thumbnail': r're:^https?://.*\.jpg$',
'description': 'The spirit of Ireland, one voice and one minute at a time.',
'duration': 60.046,
'upload_date': '20151012',
'timestamp': 1444694160,
},
}
class RteRadioIE(RteBaseIE):
IE_NAME = 'rte:radio'
IE_DESC = 'Raidió Teilifís Éireann radio'
# Radioplayer URLs have two distinct specifier formats,
# the old format #!rii=<channel_id>:<id>:<playable_item_id>:<date>:
# the new format #!rii=b<channel_id>_<id>_<playable_item_id>_<date>_
# where the IDs are int/empty, the date is DD-MM-YYYY, and the specifier may be truncated.
# An <id> uniquely defines an individual recording, and is the only part we require.
_VALID_URL = r'https?://(?:www\.)?rte\.ie/radio/utils/radioplayer/rteradioweb\.html#!rii=(?:b?[0-9]*)(?:%3A|:|%5F|_)(?P<id>[0-9]+)'
_TESTS = [{
# Old-style player URL; HLS and RTMPE formats
'url': 'http://www.rte.ie/radio/utils/radioplayer/rteradioweb.html#!rii=16:10507902:2414:27-12-2015:',
'md5': 'c79ccb2c195998440065456b69760411',
'info_dict': {
'id': '10507902',
'ext': 'mp4',
'title': 'Gloria',
'thumbnail': r're:^https?://.*\.jpg$',
'description': 'md5:9ce124a7fb41559ec68f06387cabddf0',
'timestamp': 1451203200,
'upload_date': '20151227',
'duration': 7230.0,
},
}, {
# New-style player URL; RTMPE formats only
'url': 'http://rte.ie/radio/utils/radioplayer/rteradioweb.html#!rii=b16_3250678_8861_06-04-2012_',
'info_dict': {
'id': '3250678',
'ext': 'flv',
'title': 'The Lyric Concert with Paul Herriott',
'thumbnail': r're:^https?://.*\.jpg$',
'description': '',
'timestamp': 1333742400,
'upload_date': '20120406',
'duration': 7199.016,
},
'params': {
# rtmp download
'skip_download': True,
},
}]
| unlicense | 75063e1c147552bff16898a8546ccd6a | 36.622754 | 135 | 0.495464 | 3.533746 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/voot.py | 28 | 3560 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
try_get,
unified_timestamp,
)
class VootIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?voot\.com/(?:[^/]+/)+(?P<id>\d+)'
_GEO_COUNTRIES = ['IN']
_TESTS = [{
'url': 'https://www.voot.com/shows/ishq-ka-rang-safed/1/360558/is-this-the-end-of-kamini-/441353',
'info_dict': {
'id': '0_8ledb18o',
'ext': 'mp4',
'title': 'Ishq Ka Rang Safed - Season 01 - Episode 340',
'description': 'md5:06291fbbbc4dcbe21235c40c262507c1',
'timestamp': 1472162937,
'upload_date': '20160825',
'duration': 1146,
'series': 'Ishq Ka Rang Safed',
'season_number': 1,
'episode': 'Is this the end of Kamini?',
'episode_number': 340,
'view_count': int,
'like_count': int,
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Failed to download m3u8 information'],
}, {
'url': 'https://www.voot.com/kids/characters/mighty-cat-masked-niyander-e-/400478/school-bag-disappears/440925',
'only_matching': True,
}, {
'url': 'https://www.voot.com/movies/pandavas-5/424627',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
media_info = self._download_json(
'https://wapi.voot.com/ws/ott/getMediaInfo.json', video_id,
query={
'platform': 'Web',
'pId': 2,
'mediaId': video_id,
})
status_code = try_get(media_info, lambda x: x['status']['code'], int)
if status_code != 0:
raise ExtractorError(media_info['status']['message'], expected=True)
media = media_info['assets']
entry_id = media['EntryId']
title = media['MediaName']
formats = self._extract_m3u8_formats(
'https://cdnapisec.kaltura.com/p/1982551/playManifest/pt/https/f/applehttp/t/web/e/' + entry_id,
video_id, 'mp4', m3u8_id='hls')
self._sort_formats(formats)
description, series, season_number, episode, episode_number = [None] * 5
for meta in try_get(media, lambda x: x['Metas'], list) or []:
key, value = meta.get('Key'), meta.get('Value')
if not key or not value:
continue
if key == 'ContentSynopsis':
description = value
elif key == 'RefSeriesTitle':
series = value
elif key == 'RefSeriesSeason':
season_number = int_or_none(value)
elif key == 'EpisodeMainTitle':
episode = value
elif key == 'EpisodeNo':
episode_number = int_or_none(value)
return {
'extractor_key': 'Kaltura',
'id': entry_id,
'title': title,
'description': description,
'series': series,
'season_number': season_number,
'episode': episode,
'episode_number': episode_number,
'timestamp': unified_timestamp(media.get('CreationDate')),
'duration': int_or_none(media.get('Duration')),
'view_count': int_or_none(media.get('ViewCounter')),
'like_count': int_or_none(media.get('like_counter')),
'formats': formats,
}
| unlicense | 4c00c8f59c483eef73893c0dd284e8c9 | 34.6 | 120 | 0.524438 | 3.556444 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/steam.py | 26 | 5646 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
extract_attributes,
ExtractorError,
get_element_by_class,
js_to_json,
)
class SteamIE(InfoExtractor):
_VALID_URL = r"""(?x)
https?://store\.steampowered\.com/
(agecheck/)?
(?P<urltype>video|app)/ #If the page is only for videos or for a game
(?P<gameID>\d+)/?
(?P<videoID>\d*)(?P<extra>\??) # For urltype == video we sometimes get the videoID
|
https?://(?:www\.)?steamcommunity\.com/sharedfiles/filedetails/\?id=(?P<fileID>[0-9]+)
"""
_VIDEO_PAGE_TEMPLATE = 'http://store.steampowered.com/video/%s/'
_AGECHECK_TEMPLATE = 'http://store.steampowered.com/agecheck/video/%s/?snr=1_agecheck_agecheck__age-gate&ageDay=1&ageMonth=January&ageYear=1970'
_TESTS = [{
'url': 'http://store.steampowered.com/video/105600/',
'playlist': [
{
'md5': '6a294ee0c4b1f47f5bb76a65e31e3592',
'info_dict': {
'id': '2040428',
'ext': 'mp4',
'title': 'Terraria 1.3 Trailer',
'playlist_index': 1,
}
},
{
'md5': '911672b20064ca3263fa89650ba5a7aa',
'info_dict': {
'id': '2029566',
'ext': 'mp4',
'title': 'Terraria 1.2 Trailer',
'playlist_index': 2,
}
}
],
'info_dict': {
'id': '105600',
'title': 'Terraria',
},
'params': {
'playlistend': 2,
}
}, {
'url': 'http://steamcommunity.com/sharedfiles/filedetails/?id=242472205',
'info_dict': {
'id': 'X8kpJBlzD2E',
'ext': 'mp4',
'upload_date': '20140617',
'title': 'FRONTIERS - Trapping',
'description': 'md5:bf6f7f773def614054089e5769c12a6e',
'uploader': 'AAD Productions',
'uploader_id': 'AtomicAgeDogGames',
}
}]
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
fileID = m.group('fileID')
if fileID:
videourl = url
playlist_id = fileID
else:
gameID = m.group('gameID')
playlist_id = gameID
videourl = self._VIDEO_PAGE_TEMPLATE % playlist_id
self._set_cookie('steampowered.com', 'mature_content', '1')
webpage = self._download_webpage(videourl, playlist_id)
if re.search('<h2>Please enter your birth date to continue:</h2>', webpage) is not None:
videourl = self._AGECHECK_TEMPLATE % playlist_id
self.report_age_confirmation()
webpage = self._download_webpage(videourl, playlist_id)
flash_vars = self._parse_json(self._search_regex(
r'(?s)rgMovieFlashvars\s*=\s*({.+?});', webpage,
'flash vars'), playlist_id, js_to_json)
playlist_title = None
entries = []
if fileID:
playlist_title = get_element_by_class('workshopItemTitle', webpage)
for movie in flash_vars.values():
if not movie:
continue
youtube_id = movie.get('YOUTUBE_VIDEO_ID')
if not youtube_id:
continue
entries.append({
'_type': 'url',
'url': youtube_id,
'ie_key': 'Youtube',
})
else:
playlist_title = get_element_by_class('apphub_AppName', webpage)
for movie_id, movie in flash_vars.items():
if not movie:
continue
video_id = self._search_regex(r'movie_(\d+)', movie_id, 'video id', fatal=False)
title = movie.get('MOVIE_NAME')
if not title or not video_id:
continue
entry = {
'id': video_id,
'title': title.replace('+', ' '),
}
formats = []
flv_url = movie.get('FILENAME')
if flv_url:
formats.append({
'format_id': 'flv',
'url': flv_url,
})
highlight_element = self._search_regex(
r'(<div[^>]+id="highlight_movie_%s"[^>]+>)' % video_id,
webpage, 'highlight element', fatal=False)
if highlight_element:
highlight_attribs = extract_attributes(highlight_element)
if highlight_attribs:
entry['thumbnail'] = highlight_attribs.get('data-poster')
for quality in ('', '-hd'):
for ext in ('webm', 'mp4'):
video_url = highlight_attribs.get('data-%s%s-source' % (ext, quality))
if video_url:
formats.append({
'format_id': ext + quality,
'url': video_url,
})
if not formats:
continue
entry['formats'] = formats
entries.append(entry)
if not entries:
raise ExtractorError('Could not find any videos')
return self.playlist_result(entries, playlist_id, playlist_title)
| unlicense | 16c16377c3b7bd951bb7e88ed8896a6b | 36.892617 | 148 | 0.462806 | 4.076534 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/tvanouvelles.py | 68 | 2401 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .brightcove import BrightcoveNewIE
class TVANouvellesIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tvanouvelles\.ca/videos/(?P<id>\d+)'
_TEST = {
'url': 'http://www.tvanouvelles.ca/videos/5117035533001',
'info_dict': {
'id': '5117035533001',
'ext': 'mp4',
'title': 'L’industrie du taxi dénonce l’entente entre Québec et Uber: explications',
'description': 'md5:479653b7c8cf115747bf5118066bd8b3',
'uploader_id': '1741764581',
'timestamp': 1473352030,
'upload_date': '20160908',
},
'add_ie': ['BrightcoveNew'],
}
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/1741764581/default_default/index.html?videoId=%s'
def _real_extract(self, url):
brightcove_id = self._match_id(url)
return self.url_result(
self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id,
BrightcoveNewIE.ie_key(), brightcove_id)
class TVANouvellesArticleIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tvanouvelles\.ca/(?:[^/]+/)+(?P<id>[^/?#&]+)'
_TEST = {
'url': 'http://www.tvanouvelles.ca/2016/11/17/des-policiers-qui-ont-la-meche-un-peu-courte',
'info_dict': {
'id': 'des-policiers-qui-ont-la-meche-un-peu-courte',
'title': 'Des policiers qui ont «la mèche un peu courte»?',
'description': 'md5:92d363c8eb0f0f030de9a4a84a90a3a0',
},
'playlist_mincount': 4,
}
@classmethod
def suitable(cls, url):
return False if TVANouvellesIE.suitable(url) else super(TVANouvellesArticleIE, cls).suitable(url)
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
entries = [
self.url_result(
'http://www.tvanouvelles.ca/videos/%s' % mobj.group('id'),
ie=TVANouvellesIE.ie_key(), video_id=mobj.group('id'))
for mobj in re.finditer(
r'data-video-id=(["\'])?(?P<id>\d+)', webpage)]
title = self._og_search_title(webpage, fatal=False)
description = self._og_search_description(webpage)
return self.playlist_result(entries, display_id, title, description)
| unlicense | 621b671affc23f0579da524537dd8998 | 35.8 | 110 | 0.598662 | 3.078507 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/nbc.py | 5 | 20411 | from __future__ import unicode_literals
import base64
import json
import re
from .common import InfoExtractor
from .theplatform import ThePlatformIE
from .adobepass import AdobePassIE
from ..compat import compat_urllib_parse_unquote
from ..utils import (
int_or_none,
parse_duration,
smuggle_url,
try_get,
unified_timestamp,
update_url_query,
)
class NBCIE(AdobePassIE):
_VALID_URL = r'https?(?P<permalink>://(?:www\.)?nbc\.com/(?:classic-tv/)?[^/]+/video/[^/]+/(?P<id>n?\d+))'
_TESTS = [
{
'url': 'http://www.nbc.com/the-tonight-show/video/jimmy-fallon-surprises-fans-at-ben-jerrys/2848237',
'info_dict': {
'id': '2848237',
'ext': 'mp4',
'title': 'Jimmy Fallon Surprises Fans at Ben & Jerry\'s',
'description': 'Jimmy gives out free scoops of his new "Tonight Dough" ice cream flavor by surprising customers at the Ben & Jerry\'s scoop shop.',
'timestamp': 1424246400,
'upload_date': '20150218',
'uploader': 'NBCU-COM',
},
'params': {
# m3u8 download
'skip_download': True,
},
},
{
'url': 'http://www.nbc.com/saturday-night-live/video/star-wars-teaser/2832821',
'info_dict': {
'id': '2832821',
'ext': 'mp4',
'title': 'Star Wars Teaser',
'description': 'md5:0b40f9cbde5b671a7ff62fceccc4f442',
'timestamp': 1417852800,
'upload_date': '20141206',
'uploader': 'NBCU-COM',
},
'params': {
# m3u8 download
'skip_download': True,
},
'skip': 'Only works from US',
},
{
# HLS streams requires the 'hdnea3' cookie
'url': 'http://www.nbc.com/Kings/video/goliath/n1806',
'info_dict': {
'id': '101528f5a9e8127b107e98c5e6ce4638',
'ext': 'mp4',
'title': 'Goliath',
'description': 'When an unknown soldier saves the life of the King\'s son in battle, he\'s thrust into the limelight and politics of the kingdom.',
'timestamp': 1237100400,
'upload_date': '20090315',
'uploader': 'NBCU-COM',
},
'params': {
'skip_download': True,
},
'skip': 'Only works from US',
},
{
'url': 'https://www.nbc.com/classic-tv/charles-in-charge/video/charles-in-charge-pilot/n3310',
'only_matching': True,
},
{
# Percent escaped url
'url': 'https://www.nbc.com/up-all-night/video/day-after-valentine%27s-day/n2189',
'only_matching': True,
}
]
def _real_extract(self, url):
permalink, video_id = re.match(self._VALID_URL, url).groups()
permalink = 'http' + compat_urllib_parse_unquote(permalink)
video_data = self._download_json(
'https://friendship.nbc.co/v2/graphql', video_id, query={
'query': '''query bonanzaPage(
$app: NBCUBrands! = nbc
$name: String!
$oneApp: Boolean
$platform: SupportedPlatforms! = web
$type: EntityPageType! = VIDEO
$userId: String!
) {
bonanzaPage(
app: $app
name: $name
oneApp: $oneApp
platform: $platform
type: $type
userId: $userId
) {
metadata {
... on VideoPageData {
description
episodeNumber
keywords
locked
mpxAccountId
mpxGuid
rating
resourceId
seasonNumber
secondaryTitle
seriesShortTitle
}
}
}
}''',
'variables': json.dumps({
'name': permalink,
'oneApp': True,
'userId': '0',
}),
})['data']['bonanzaPage']['metadata']
query = {
'mbr': 'true',
'manifest': 'm3u',
}
video_id = video_data['mpxGuid']
title = video_data['secondaryTitle']
if video_data.get('locked'):
resource = self._get_mvpd_resource(
video_data.get('resourceId') or 'nbcentertainment',
title, video_id, video_data.get('rating'))
query['auth'] = self._extract_mvpd_auth(
url, video_id, 'nbcentertainment', resource)
theplatform_url = smuggle_url(update_url_query(
'http://link.theplatform.com/s/NnzsPC/media/guid/%s/%s' % (video_data.get('mpxAccountId') or '2410887629', video_id),
query), {'force_smil_url': True})
return {
'_type': 'url_transparent',
'id': video_id,
'title': title,
'url': theplatform_url,
'description': video_data.get('description'),
'tags': video_data.get('keywords'),
'season_number': int_or_none(video_data.get('seasonNumber')),
'episode_number': int_or_none(video_data.get('episodeNumber')),
'episode': title,
'series': video_data.get('seriesShortTitle'),
'ie_key': 'ThePlatform',
}
class NBCSportsVPlayerIE(InfoExtractor):
_VALID_URL_BASE = r'https?://(?:vplayer\.nbcsports\.com|(?:www\.)?nbcsports\.com/vplayer)/'
_VALID_URL = _VALID_URL_BASE + r'(?:[^/]+/)+(?P<id>[0-9a-zA-Z_]+)'
_TESTS = [{
'url': 'https://vplayer.nbcsports.com/p/BxmELC/nbcsports_embed/select/9CsDKds0kvHI',
'info_dict': {
'id': '9CsDKds0kvHI',
'ext': 'mp4',
'description': 'md5:df390f70a9ba7c95ff1daace988f0d8d',
'title': 'Tyler Kalinoski hits buzzer-beater to lift Davidson',
'timestamp': 1426270238,
'upload_date': '20150313',
'uploader': 'NBCU-SPORTS',
}
}, {
'url': 'https://vplayer.nbcsports.com/p/BxmELC/nbcsports_embed/select/media/_hqLjQ95yx8Z',
'only_matching': True,
}, {
'url': 'https://www.nbcsports.com/vplayer/p/BxmELC/nbcsports/select/PHJSaFWbrTY9?form=html&autoPlay=true',
'only_matching': True,
}]
@staticmethod
def _extract_url(webpage):
iframe_m = re.search(
r'<(?:iframe[^>]+|div[^>]+data-(?:mpx-)?)src="(?P<url>%s[^"]+)"' % NBCSportsVPlayerIE._VALID_URL_BASE, webpage)
if iframe_m:
return iframe_m.group('url')
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
theplatform_url = self._og_search_video_url(webpage).replace(
'vplayer.nbcsports.com', 'player.theplatform.com')
return self.url_result(theplatform_url, 'ThePlatform')
class NBCSportsIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?nbcsports\.com//?(?!vplayer/)(?:[^/]+/)+(?P<id>[0-9a-z-]+)'
_TESTS = [{
# iframe src
'url': 'http://www.nbcsports.com//college-basketball/ncaab/tom-izzo-michigan-st-has-so-much-respect-duke',
'info_dict': {
'id': 'PHJSaFWbrTY9',
'ext': 'mp4',
'title': 'Tom Izzo, Michigan St. has \'so much respect\' for Duke',
'description': 'md5:ecb459c9d59e0766ac9c7d5d0eda8113',
'uploader': 'NBCU-SPORTS',
'upload_date': '20150330',
'timestamp': 1427726529,
}
}, {
# data-mpx-src
'url': 'https://www.nbcsports.com/philadelphia/philadelphia-phillies/bruce-bochy-hector-neris-hes-idiot',
'only_matching': True,
}, {
# data-src
'url': 'https://www.nbcsports.com/boston/video/report-card-pats-secondary-no-match-josh-allen',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
return self.url_result(
NBCSportsVPlayerIE._extract_url(webpage), 'NBCSportsVPlayer')
class NBCSportsStreamIE(AdobePassIE):
_VALID_URL = r'https?://stream\.nbcsports\.com/.+?\bpid=(?P<id>\d+)'
_TEST = {
'url': 'http://stream.nbcsports.com/nbcsn/generic?pid=206559',
'info_dict': {
'id': '206559',
'ext': 'mp4',
'title': 'Amgen Tour of California Women\'s Recap',
'description': 'md5:66520066b3b5281ada7698d0ea2aa894',
},
'params': {
# m3u8 download
'skip_download': True,
},
'skip': 'Requires Adobe Pass Authentication',
}
def _real_extract(self, url):
video_id = self._match_id(url)
live_source = self._download_json(
'http://stream.nbcsports.com/data/live_sources_%s.json' % video_id,
video_id)
video_source = live_source['videoSources'][0]
title = video_source['title']
source_url = None
for k in ('source', 'msl4source', 'iossource', 'hlsv4'):
sk = k + 'Url'
source_url = video_source.get(sk) or video_source.get(sk + 'Alt')
if source_url:
break
else:
source_url = video_source['ottStreamUrl']
is_live = video_source.get('type') == 'live' or video_source.get('status') == 'Live'
resource = self._get_mvpd_resource('nbcsports', title, video_id, '')
token = self._extract_mvpd_auth(url, video_id, 'nbcsports', resource)
tokenized_url = self._download_json(
'https://token.playmakerservices.com/cdn',
video_id, data=json.dumps({
'requestorId': 'nbcsports',
'pid': video_id,
'application': 'NBCSports',
'version': 'v1',
'platform': 'desktop',
'cdn': 'akamai',
'url': video_source['sourceUrl'],
'token': base64.b64encode(token.encode()).decode(),
'resourceId': base64.b64encode(resource.encode()).decode(),
}).encode())['tokenizedUrl']
formats = self._extract_m3u8_formats(tokenized_url, video_id, 'mp4')
self._sort_formats(formats)
return {
'id': video_id,
'title': self._live_title(title) if is_live else title,
'description': live_source.get('description'),
'formats': formats,
'is_live': is_live,
}
class NBCNewsIE(ThePlatformIE):
_VALID_URL = r'(?x)https?://(?:www\.)?(?:nbcnews|today|msnbc)\.com/([^/]+/)*(?:.*-)?(?P<id>[^/?]+)'
_TESTS = [
{
'url': 'http://www.nbcnews.com/watch/nbcnews-com/how-twitter-reacted-to-the-snowden-interview-269389891880',
'md5': 'cf4bc9e6ce0130f00f545d80ecedd4bf',
'info_dict': {
'id': '269389891880',
'ext': 'mp4',
'title': 'How Twitter Reacted To The Snowden Interview',
'description': 'md5:65a0bd5d76fe114f3c2727aa3a81fe64',
'timestamp': 1401363060,
'upload_date': '20140529',
},
},
{
'url': 'http://www.nbcnews.com/feature/dateline-full-episodes/full-episode-family-business-n285156',
'md5': 'fdbf39ab73a72df5896b6234ff98518a',
'info_dict': {
'id': '529953347624',
'ext': 'mp4',
'title': 'FULL EPISODE: Family Business',
'description': 'md5:757988edbaae9d7be1d585eb5d55cc04',
},
'skip': 'This page is unavailable.',
},
{
'url': 'http://www.nbcnews.com/nightly-news/video/nightly-news-with-brian-williams-full-broadcast-february-4-394064451844',
'md5': '8eb831eca25bfa7d25ddd83e85946548',
'info_dict': {
'id': '394064451844',
'ext': 'mp4',
'title': 'Nightly News with Brian Williams Full Broadcast (February 4)',
'description': 'md5:1c10c1eccbe84a26e5debb4381e2d3c5',
'timestamp': 1423104900,
'upload_date': '20150205',
},
},
{
'url': 'http://www.nbcnews.com/business/autos/volkswagen-11-million-vehicles-could-have-suspect-software-emissions-scandal-n431456',
'md5': '4a8c4cec9e1ded51060bdda36ff0a5c0',
'info_dict': {
'id': 'n431456',
'ext': 'mp4',
'title': "Volkswagen U.S. Chief: We 'Totally Screwed Up'",
'description': 'md5:d22d1281a24f22ea0880741bb4dd6301',
'upload_date': '20150922',
'timestamp': 1442917800,
},
},
{
'url': 'http://www.today.com/video/see-the-aurora-borealis-from-space-in-stunning-new-nasa-video-669831235788',
'md5': '118d7ca3f0bea6534f119c68ef539f71',
'info_dict': {
'id': '669831235788',
'ext': 'mp4',
'title': 'See the aurora borealis from space in stunning new NASA video',
'description': 'md5:74752b7358afb99939c5f8bb2d1d04b1',
'upload_date': '20160420',
'timestamp': 1461152093,
},
},
{
'url': 'http://www.msnbc.com/all-in-with-chris-hayes/watch/the-chaotic-gop-immigration-vote-314487875924',
'md5': '6d236bf4f3dddc226633ce6e2c3f814d',
'info_dict': {
'id': '314487875924',
'ext': 'mp4',
'title': 'The chaotic GOP immigration vote',
'description': 'The Republican House votes on a border bill that has no chance of getting through the Senate or signed by the President and is drawing criticism from all sides.',
'thumbnail': r're:^https?://.*\.jpg$',
'timestamp': 1406937606,
'upload_date': '20140802',
},
},
{
'url': 'http://www.nbcnews.com/watch/dateline/full-episode--deadly-betrayal-386250819952',
'only_matching': True,
},
{
# From http://www.vulture.com/2016/06/letterman-couldnt-care-less-about-late-night.html
'url': 'http://www.nbcnews.com/widget/video-embed/701714499682',
'only_matching': True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
data = self._parse_json(self._search_regex(
r'<script[^>]+id="__NEXT_DATA__"[^>]*>({.+?})</script>',
webpage, 'bootstrap json'), video_id)['props']['initialState']
video_data = try_get(data, lambda x: x['video']['current'], dict)
if not video_data:
video_data = data['article']['content'][0]['primaryMedia']['video']
title = video_data['headline']['primary']
formats = []
for va in video_data.get('videoAssets', []):
public_url = va.get('publicUrl')
if not public_url:
continue
if '://link.theplatform.com/' in public_url:
public_url = update_url_query(public_url, {'format': 'redirect'})
format_id = va.get('format')
if format_id == 'M3U':
formats.extend(self._extract_m3u8_formats(
public_url, video_id, 'mp4', 'm3u8_native',
m3u8_id=format_id, fatal=False))
continue
tbr = int_or_none(va.get('bitrate'), 1000)
if tbr:
format_id += '-%d' % tbr
formats.append({
'format_id': format_id,
'url': public_url,
'width': int_or_none(va.get('width')),
'height': int_or_none(va.get('height')),
'tbr': tbr,
'ext': 'mp4',
})
self._sort_formats(formats)
subtitles = {}
closed_captioning = video_data.get('closedCaptioning')
if closed_captioning:
for cc_url in closed_captioning.values():
if not cc_url:
continue
subtitles.setdefault('en', []).append({
'url': cc_url,
})
return {
'id': video_id,
'title': title,
'description': try_get(video_data, lambda x: x['description']['primary']),
'thumbnail': try_get(video_data, lambda x: x['primaryImage']['url']['primary']),
'duration': parse_duration(video_data.get('duration')),
'timestamp': unified_timestamp(video_data.get('datePublished')),
'formats': formats,
'subtitles': subtitles,
}
class NBCOlympicsIE(InfoExtractor):
IE_NAME = 'nbcolympics'
_VALID_URL = r'https?://www\.nbcolympics\.com/video/(?P<id>[a-z-]+)'
_TEST = {
# Geo-restricted to US
'url': 'http://www.nbcolympics.com/video/justin-roses-son-leo-was-tears-after-his-dad-won-gold',
'md5': '54fecf846d05429fbaa18af557ee523a',
'info_dict': {
'id': 'WjTBzDXx5AUq',
'display_id': 'justin-roses-son-leo-was-tears-after-his-dad-won-gold',
'ext': 'mp4',
'title': 'Rose\'s son Leo was in tears after his dad won gold',
'description': 'Olympic gold medalist Justin Rose gets emotional talking to the impact his win in men\'s golf has already had on his children.',
'timestamp': 1471274964,
'upload_date': '20160815',
'uploader': 'NBCU-SPORTS',
},
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
drupal_settings = self._parse_json(self._search_regex(
r'jQuery\.extend\(Drupal\.settings\s*,\s*({.+?})\);',
webpage, 'drupal settings'), display_id)
iframe_url = drupal_settings['vod']['iframe_url']
theplatform_url = iframe_url.replace(
'vplayer.nbcolympics.com', 'player.theplatform.com')
return {
'_type': 'url_transparent',
'url': theplatform_url,
'ie_key': ThePlatformIE.ie_key(),
'display_id': display_id,
}
class NBCOlympicsStreamIE(AdobePassIE):
IE_NAME = 'nbcolympics:stream'
_VALID_URL = r'https?://stream\.nbcolympics\.com/(?P<id>[0-9a-z-]+)'
_TEST = {
'url': 'http://stream.nbcolympics.com/2018-winter-olympics-nbcsn-evening-feb-8',
'info_dict': {
'id': '203493',
'ext': 'mp4',
'title': 're:Curling, Alpine, Luge [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
},
'params': {
# m3u8 download
'skip_download': True,
},
}
_DATA_URL_TEMPLATE = 'http://stream.nbcolympics.com/data/%s_%s.json'
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
pid = self._search_regex(r'pid\s*=\s*(\d+);', webpage, 'pid')
resource = self._search_regex(
r"resource\s*=\s*'(.+)';", webpage,
'resource').replace("' + pid + '", pid)
event_config = self._download_json(
self._DATA_URL_TEMPLATE % ('event_config', pid),
pid)['eventConfig']
title = self._live_title(event_config['eventTitle'])
source_url = self._download_json(
self._DATA_URL_TEMPLATE % ('live_sources', pid),
pid)['videoSources'][0]['sourceUrl']
media_token = self._extract_mvpd_auth(
url, pid, event_config.get('requestorId', 'NBCOlympics'), resource)
formats = self._extract_m3u8_formats(self._download_webpage(
'http://sp.auth.adobe.com/tvs/v1/sign', pid, query={
'cdn': 'akamai',
'mediaToken': base64.b64encode(media_token.encode()),
'resource': base64.b64encode(resource.encode()),
'url': source_url,
}), pid, 'mp4')
self._sort_formats(formats)
return {
'id': pid,
'display_id': display_id,
'title': title,
'formats': formats,
'is_live': True,
}
| unlicense | c74e924fba4b4bdd5e88fa6cc1ab445f | 37.878095 | 194 | 0.525354 | 3.333497 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/hbo.py | 15 | 6128 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
xpath_text,
xpath_element,
int_or_none,
parse_duration,
urljoin,
)
class HBOBaseIE(InfoExtractor):
_FORMATS_INFO = {
'pro7': {
'width': 1280,
'height': 720,
},
'1920': {
'width': 1280,
'height': 720,
},
'pro6': {
'width': 768,
'height': 432,
},
'640': {
'width': 768,
'height': 432,
},
'pro5': {
'width': 640,
'height': 360,
},
'highwifi': {
'width': 640,
'height': 360,
},
'high3g': {
'width': 640,
'height': 360,
},
'medwifi': {
'width': 400,
'height': 224,
},
'med3g': {
'width': 400,
'height': 224,
},
}
def _extract_info(self, url, display_id):
video_data = self._download_xml(url, display_id)
video_id = xpath_text(video_data, 'id', fatal=True)
episode_title = title = xpath_text(video_data, 'title', fatal=True)
series = xpath_text(video_data, 'program')
if series:
title = '%s - %s' % (series, title)
formats = []
for source in xpath_element(video_data, 'videos', 'sources', True):
if source.tag == 'size':
path = xpath_text(source, './/path')
if not path:
continue
width = source.attrib.get('width')
format_info = self._FORMATS_INFO.get(width, {})
height = format_info.get('height')
fmt = {
'url': path,
'format_id': 'http%s' % ('-%dp' % height if height else ''),
'width': format_info.get('width'),
'height': height,
}
rtmp = re.search(r'^(?P<url>rtmpe?://[^/]+/(?P<app>.+))/(?P<playpath>mp4:.+)$', path)
if rtmp:
fmt.update({
'url': rtmp.group('url'),
'play_path': rtmp.group('playpath'),
'app': rtmp.group('app'),
'ext': 'flv',
'format_id': fmt['format_id'].replace('http', 'rtmp'),
})
formats.append(fmt)
else:
video_url = source.text
if not video_url:
continue
if source.tag == 'tarball':
formats.extend(self._extract_m3u8_formats(
video_url.replace('.tar', '/base_index_w8.m3u8'),
video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False))
elif source.tag == 'hls':
m3u8_formats = self._extract_m3u8_formats(
video_url.replace('.tar', '/base_index.m3u8'),
video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)
for f in m3u8_formats:
if f.get('vcodec') == 'none' and not f.get('tbr'):
f['tbr'] = int_or_none(self._search_regex(
r'-(\d+)k/', f['url'], 'tbr', default=None))
formats.extend(m3u8_formats)
elif source.tag == 'dash':
formats.extend(self._extract_mpd_formats(
video_url.replace('.tar', '/manifest.mpd'),
video_id, mpd_id='dash', fatal=False))
else:
format_info = self._FORMATS_INFO.get(source.tag, {})
formats.append({
'format_id': 'http-%s' % source.tag,
'url': video_url,
'width': format_info.get('width'),
'height': format_info.get('height'),
})
self._sort_formats(formats)
thumbnails = []
card_sizes = xpath_element(video_data, 'titleCardSizes')
if card_sizes is not None:
for size in card_sizes:
path = xpath_text(size, 'path')
if not path:
continue
width = int_or_none(size.get('width'))
thumbnails.append({
'id': width,
'url': path,
'width': width,
})
subtitles = None
caption_url = xpath_text(video_data, 'captionUrl')
if caption_url:
subtitles = {
'en': [{
'url': caption_url,
'ext': 'ttml'
}],
}
return {
'id': video_id,
'title': title,
'duration': parse_duration(xpath_text(video_data, 'duration/tv14')),
'series': series,
'episode': episode_title,
'formats': formats,
'thumbnails': thumbnails,
'subtitles': subtitles,
}
class HBOIE(HBOBaseIE):
IE_NAME = 'hbo'
_VALID_URL = r'https?://(?:www\.)?hbo\.com/(?:video|embed)(?:/[^/]+)*/(?P<id>[^/?#]+)'
_TEST = {
'url': 'https://www.hbo.com/video/game-of-thrones/seasons/season-8/videos/trailer',
'md5': '8126210656f433c452a21367f9ad85b3',
'info_dict': {
'id': '22113301',
'ext': 'mp4',
'title': 'Game of Thrones - Trailer',
},
'expected_warnings': ['Unknown MIME type application/mp4 in DASH manifest'],
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
location_path = self._parse_json(self._html_search_regex(
r'data-state="({.+?})"', webpage, 'state'), display_id)['video']['locationUrl']
return self._extract_info(urljoin(url, location_path), display_id)
| unlicense | 16be25efe65c289644bd339726004cfb | 34.017143 | 101 | 0.431625 | 3.966343 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/footyroom.py | 70 | 1875 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from .streamable import StreamableIE
class FootyRoomIE(InfoExtractor):
_VALID_URL = r'https?://footyroom\.com/matches/(?P<id>\d+)'
_TESTS = [{
'url': 'http://footyroom.com/matches/79922154/hull-city-vs-chelsea/review',
'info_dict': {
'id': '79922154',
'title': 'VIDEO Hull City 0 - 2 Chelsea',
},
'playlist_count': 2,
'add_ie': [StreamableIE.ie_key()],
}, {
'url': 'http://footyroom.com/matches/75817984/georgia-vs-germany/review',
'info_dict': {
'id': '75817984',
'title': 'VIDEO Georgia 0 - 2 Germany',
},
'playlist_count': 1,
'add_ie': ['Playwire']
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
playlist = self._parse_json(self._search_regex(
r'DataStore\.media\s*=\s*([^;]+)', webpage, 'media data'),
playlist_id)
playlist_title = self._og_search_title(webpage)
entries = []
for video in playlist:
payload = video.get('payload')
if not payload:
continue
playwire_url = self._html_search_regex(
r'data-config="([^"]+)"', payload,
'playwire url', default=None)
if playwire_url:
entries.append(self.url_result(self._proto_relative_url(
playwire_url, 'http:'), 'Playwire'))
streamable_url = StreamableIE._extract_url(payload)
if streamable_url:
entries.append(self.url_result(
streamable_url, StreamableIE.ie_key()))
return self.playlist_result(entries, playlist_id, playlist_title)
| unlicense | 6c8a97c49619fed0444fd3bf96b38997 | 32.482143 | 83 | 0.546133 | 3.690945 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/nexx.py | 13 | 17106 | # coding: utf-8
from __future__ import unicode_literals
import hashlib
import random
import re
import time
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
ExtractorError,
int_or_none,
parse_duration,
try_get,
urlencode_postdata,
)
class NexxIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
https?://api\.nexx(?:\.cloud|cdn\.com)/v3/(?P<domain_id>\d+)/videos/byid/|
nexx:(?:(?P<domain_id_s>\d+):)?|
https?://arc\.nexx\.cloud/api/video/
)
(?P<id>\d+)
'''
_TESTS = [{
# movie
'url': 'https://api.nexx.cloud/v3/748/videos/byid/128907',
'md5': '31899fd683de49ad46f4ee67e53e83fe',
'info_dict': {
'id': '128907',
'ext': 'mp4',
'title': 'Stiftung Warentest',
'alt_title': 'Wie ein Test abläuft',
'description': 'md5:d1ddb1ef63de721132abd38639cc2fd2',
'creator': 'SPIEGEL TV',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 2509,
'timestamp': 1384264416,
'upload_date': '20131112',
},
}, {
# episode
'url': 'https://api.nexx.cloud/v3/741/videos/byid/247858',
'info_dict': {
'id': '247858',
'ext': 'mp4',
'title': 'Return of the Golden Child (OV)',
'description': 'md5:5d969537509a92b733de21bae249dc63',
'release_year': 2017,
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 1397,
'timestamp': 1495033267,
'upload_date': '20170517',
'episode_number': 2,
'season_number': 2,
},
'params': {
'skip_download': True,
},
'skip': 'HTTP Error 404: Not Found',
}, {
# does not work via arc
'url': 'nexx:741:1269984',
'md5': 'c714b5b238b2958dc8d5642addba6886',
'info_dict': {
'id': '1269984',
'ext': 'mp4',
'title': '1 TAG ohne KLO... wortwörtlich! 😑',
'alt_title': '1 TAG ohne KLO... wortwörtlich! 😑',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 607,
'timestamp': 1518614955,
'upload_date': '20180214',
},
}, {
# free cdn from http://www.spiegel.de/video/eifel-zoo-aufregung-um-ausgebrochene-raubtiere-video-99018031.html
'url': 'nexx:747:1533779',
'md5': '6bf6883912b82b7069fb86c2297e9893',
'info_dict': {
'id': '1533779',
'ext': 'mp4',
'title': 'Aufregung um ausgebrochene Raubtiere',
'alt_title': 'Eifel-Zoo',
'description': 'md5:f21375c91c74ad741dcb164c427999d2',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 111,
'timestamp': 1527874460,
'upload_date': '20180601',
},
}, {
'url': 'https://api.nexxcdn.com/v3/748/videos/byid/128907',
'only_matching': True,
}, {
'url': 'nexx:748:128907',
'only_matching': True,
}, {
'url': 'nexx:128907',
'only_matching': True,
}, {
'url': 'https://arc.nexx.cloud/api/video/128907.json',
'only_matching': True,
}]
@staticmethod
def _extract_domain_id(webpage):
mobj = re.search(
r'<script\b[^>]+\bsrc=["\'](?:https?:)?//(?:require|arc)\.nexx(?:\.cloud|cdn\.com)/(?:sdk/)?(?P<id>\d+)',
webpage)
return mobj.group('id') if mobj else None
@staticmethod
def _extract_urls(webpage):
# Reference:
# 1. https://nx-s.akamaized.net/files/201510/44.pdf
entries = []
# JavaScript Integration
domain_id = NexxIE._extract_domain_id(webpage)
if domain_id:
for video_id in re.findall(
r'(?is)onPLAYReady.+?_play\.(?:init|(?:control\.)?addPlayer)\s*\(.+?\s*,\s*["\']?(\d+)',
webpage):
entries.append(
'https://api.nexx.cloud/v3/%s/videos/byid/%s'
% (domain_id, video_id))
# TODO: support more embed formats
return entries
@staticmethod
def _extract_url(webpage):
return NexxIE._extract_urls(webpage)[0]
def _handle_error(self, response):
status = int_or_none(try_get(
response, lambda x: x['metadata']['status']) or 200)
if 200 <= status < 300:
return
raise ExtractorError(
'%s said: %s' % (self.IE_NAME, response['metadata']['errorhint']),
expected=True)
def _call_api(self, domain_id, path, video_id, data=None, headers={}):
headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
result = self._download_json(
'https://api.nexx.cloud/v3/%s/%s' % (domain_id, path), video_id,
'Downloading %s JSON' % path, data=urlencode_postdata(data),
headers=headers)
self._handle_error(result)
return result['result']
def _extract_free_formats(self, video, video_id):
stream_data = video['streamdata']
cdn = stream_data['cdnType']
assert cdn == 'free'
hash = video['general']['hash']
ps = compat_str(stream_data['originalDomain'])
if stream_data['applyFolderHierarchy'] == 1:
s = ('%04d' % int(video_id))[::-1]
ps += '/%s/%s' % (s[0:2], s[2:4])
ps += '/%s/%s_' % (video_id, hash)
t = 'http://%s' + ps
fd = stream_data['azureFileDistribution'].split(',')
cdn_provider = stream_data['cdnProvider']
def p0(p):
return '_%s' % p if stream_data['applyAzureStructure'] == 1 else ''
formats = []
if cdn_provider == 'ak':
t += ','
for i in fd:
p = i.split(':')
t += p[1] + p0(int(p[0])) + ','
t += '.mp4.csmil/master.%s'
elif cdn_provider == 'ce':
k = t.split('/')
h = k.pop()
http_base = t = '/'.join(k)
http_base = http_base % stream_data['cdnPathHTTP']
t += '/asset.ism/manifest.%s?dcp_ver=aos4&videostream='
for i in fd:
p = i.split(':')
tbr = int(p[0])
filename = '%s%s%s.mp4' % (h, p[1], p0(tbr))
f = {
'url': http_base + '/' + filename,
'format_id': '%s-http-%d' % (cdn, tbr),
'tbr': tbr,
}
width_height = p[1].split('x')
if len(width_height) == 2:
f.update({
'width': int_or_none(width_height[0]),
'height': int_or_none(width_height[1]),
})
formats.append(f)
a = filename + ':%s' % (tbr * 1000)
t += a + ','
t = t[:-1] + '&audiostream=' + a.split(':')[0]
else:
assert False
if cdn_provider == 'ce':
formats.extend(self._extract_mpd_formats(
t % (stream_data['cdnPathDASH'], 'mpd'), video_id,
mpd_id='%s-dash' % cdn, fatal=False))
formats.extend(self._extract_m3u8_formats(
t % (stream_data['cdnPathHLS'], 'm3u8'), video_id, 'mp4',
entry_protocol='m3u8_native', m3u8_id='%s-hls' % cdn, fatal=False))
return formats
def _extract_azure_formats(self, video, video_id):
stream_data = video['streamdata']
cdn = stream_data['cdnType']
assert cdn == 'azure'
azure_locator = stream_data['azureLocator']
def get_cdn_shield_base(shield_type='', static=False):
for secure in ('', 's'):
cdn_shield = stream_data.get('cdnShield%sHTTP%s' % (shield_type, secure.upper()))
if cdn_shield:
return 'http%s://%s' % (secure, cdn_shield)
else:
if 'fb' in stream_data['azureAccount']:
prefix = 'df' if static else 'f'
else:
prefix = 'd' if static else 'p'
account = int(stream_data['azureAccount'].replace('nexxplayplus', '').replace('nexxplayfb', ''))
return 'http://nx-%s%02d.akamaized.net/' % (prefix, account)
language = video['general'].get('language_raw') or ''
azure_stream_base = get_cdn_shield_base()
is_ml = ',' in language
azure_manifest_url = '%s%s/%s_src%s.ism/Manifest' % (
azure_stream_base, azure_locator, video_id, ('_manifest' if is_ml else '')) + '%s'
protection_token = try_get(
video, lambda x: x['protectiondata']['token'], compat_str)
if protection_token:
azure_manifest_url += '?hdnts=%s' % protection_token
formats = self._extract_m3u8_formats(
azure_manifest_url % '(format=m3u8-aapl)',
video_id, 'mp4', 'm3u8_native',
m3u8_id='%s-hls' % cdn, fatal=False)
formats.extend(self._extract_mpd_formats(
azure_manifest_url % '(format=mpd-time-csf)',
video_id, mpd_id='%s-dash' % cdn, fatal=False))
formats.extend(self._extract_ism_formats(
azure_manifest_url % '', video_id, ism_id='%s-mss' % cdn, fatal=False))
azure_progressive_base = get_cdn_shield_base('Prog', True)
azure_file_distribution = stream_data.get('azureFileDistribution')
if azure_file_distribution:
fds = azure_file_distribution.split(',')
if fds:
for fd in fds:
ss = fd.split(':')
if len(ss) == 2:
tbr = int_or_none(ss[0])
if tbr:
f = {
'url': '%s%s/%s_src_%s_%d.mp4' % (
azure_progressive_base, azure_locator, video_id, ss[1], tbr),
'format_id': '%s-http-%d' % (cdn, tbr),
'tbr': tbr,
}
width_height = ss[1].split('x')
if len(width_height) == 2:
f.update({
'width': int_or_none(width_height[0]),
'height': int_or_none(width_height[1]),
})
formats.append(f)
return formats
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
domain_id = mobj.group('domain_id') or mobj.group('domain_id_s')
video_id = mobj.group('id')
video = None
def find_video(result):
if isinstance(result, dict):
return result
elif isinstance(result, list):
vid = int(video_id)
for v in result:
if try_get(v, lambda x: x['general']['ID'], int) == vid:
return v
return None
response = self._download_json(
'https://arc.nexx.cloud/api/video/%s.json' % video_id,
video_id, fatal=False)
if response and isinstance(response, dict):
result = response.get('result')
if result:
video = find_video(result)
# not all videos work via arc, e.g. nexx:741:1269984
if not video:
# Reverse engineered from JS code (see getDeviceID function)
device_id = '%d:%d:%d%d' % (
random.randint(1, 4), int(time.time()),
random.randint(1e4, 99999), random.randint(1, 9))
result = self._call_api(domain_id, 'session/init', video_id, data={
'nxp_devh': device_id,
'nxp_userh': '',
'precid': '0',
'playlicense': '0',
'screenx': '1920',
'screeny': '1080',
'playerversion': '6.0.00',
'gateway': 'html5',
'adGateway': '',
'explicitlanguage': 'en-US',
'addTextTemplates': '1',
'addDomainData': '1',
'addAdModel': '1',
}, headers={
'X-Request-Enable-Auth-Fallback': '1',
})
cid = result['general']['cid']
# As described in [1] X-Request-Token generation algorithm is
# as follows:
# md5( operation + domain_id + domain_secret )
# where domain_secret is a static value that will be given by nexx.tv
# as per [1]. Here is how this "secret" is generated (reversed
# from _play.api.init function, search for clienttoken). So it's
# actually not static and not that much of a secret.
# 1. https://nexxtvstorage.blob.core.windows.net/files/201610/27.pdf
secret = result['device']['clienttoken'][int(device_id[0]):]
secret = secret[0:len(secret) - int(device_id[-1])]
op = 'byid'
# Reversed from JS code for _play.api.call function (search for
# X-Request-Token)
request_token = hashlib.md5(
''.join((op, domain_id, secret)).encode('utf-8')).hexdigest()
result = self._call_api(
domain_id, 'videos/%s/%s' % (op, video_id), video_id, data={
'additionalfields': 'language,channel,actors,studio,licenseby,slug,subtitle,teaser,description',
'addInteractionOptions': '1',
'addStatusDetails': '1',
'addStreamDetails': '1',
'addCaptions': '1',
'addScenes': '1',
'addHotSpots': '1',
'addBumpers': '1',
'captionFormat': 'data',
}, headers={
'X-Request-CID': cid,
'X-Request-Token': request_token,
})
video = find_video(result)
general = video['general']
title = general['title']
cdn = video['streamdata']['cdnType']
if cdn == 'azure':
formats = self._extract_azure_formats(video, video_id)
elif cdn == 'free':
formats = self._extract_free_formats(video, video_id)
else:
# TODO: reverse more cdns
assert False
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'alt_title': general.get('subtitle'),
'description': general.get('description'),
'release_year': int_or_none(general.get('year')),
'creator': general.get('studio') or general.get('studio_adref'),
'thumbnail': try_get(
video, lambda x: x['imagedata']['thumb'], compat_str),
'duration': parse_duration(general.get('runtime')),
'timestamp': int_or_none(general.get('uploaded')),
'episode_number': int_or_none(try_get(
video, lambda x: x['episodedata']['episode'])),
'season_number': int_or_none(try_get(
video, lambda x: x['episodedata']['season'])),
'formats': formats,
}
class NexxEmbedIE(InfoExtractor):
_VALID_URL = r'https?://embed\.nexx(?:\.cloud|cdn\.com)/\d+/(?:video/)?(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'http://embed.nexx.cloud/748/KC1614647Z27Y7T?autoplay=1',
'md5': '16746bfc28c42049492385c989b26c4a',
'info_dict': {
'id': '161464',
'ext': 'mp4',
'title': 'Nervenkitzel Achterbahn',
'alt_title': 'Karussellbauer in Deutschland',
'description': 'md5:ffe7b1cc59a01f585e0569949aef73cc',
'creator': 'SPIEGEL TV',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 2761,
'timestamp': 1394021479,
'upload_date': '20140305',
},
'params': {
'format': 'bestvideo',
'skip_download': True,
},
}, {
'url': 'https://embed.nexx.cloud/11888/video/DSRTO7UVOX06S7',
'only_matching': True,
}]
@staticmethod
def _extract_urls(webpage):
# Reference:
# 1. https://nx-s.akamaized.net/files/201510/44.pdf
# iFrame Embed Integration
return [mobj.group('url') for mobj in re.finditer(
r'<iframe[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//embed\.nexx(?:\.cloud|cdn\.com)/\d+/(?:(?!\1).)+)\1',
webpage)]
def _real_extract(self, url):
embed_id = self._match_id(url)
webpage = self._download_webpage(url, embed_id)
return self.url_result(NexxIE._extract_url(webpage), ie=NexxIE.ie_key())
| unlicense | fc969b1a7a27688d84af42c1c3233dc9 | 36.741722 | 118 | 0.480962 | 3.616117 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/sportdeutschland.py | 8 | 2964 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
parse_iso8601,
sanitized_Request,
)
class SportDeutschlandIE(InfoExtractor):
_VALID_URL = r'https?://sportdeutschland\.tv/(?P<sport>[^/?#]+)/(?P<id>[^?#/]+)(?:$|[?#])'
_TESTS = [{
'url': 'https://sportdeutschland.tv/badminton/re-live-deutsche-meisterschaften-2020-halbfinals?playlistId=0',
'info_dict': {
'id': 're-live-deutsche-meisterschaften-2020-halbfinals',
'ext': 'mp4',
'title': 're:Re-live: Deutsche Meisterschaften 2020.*Halbfinals',
'categories': ['Badminton-Deutschland'],
'view_count': int,
'thumbnail': r're:^https?://.*\.(?:jpg|png)$',
'timestamp': int,
'upload_date': '20200201',
'description': 're:.*', # meaningless description for THIS video
},
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
sport_id = mobj.group('sport')
api_url = 'https://proxy.vidibusdynamic.net/ssl/backend.sportdeutschland.tv/api/permalinks/%s/%s?access_token=true' % (
sport_id, video_id)
req = sanitized_Request(api_url, headers={
'Accept': 'application/vnd.vidibus.v2.html+json',
'Referer': url,
})
data = self._download_json(req, video_id)
asset = data['asset']
categories = [data['section']['title']]
formats = []
smil_url = asset['video']
if '.smil' in smil_url:
m3u8_url = smil_url.replace('.smil', '.m3u8')
formats.extend(
self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4'))
smil_doc = self._download_xml(
smil_url, video_id, note='Downloading SMIL metadata')
base_url_el = smil_doc.find('./head/meta')
if base_url_el:
base_url = base_url_el.attrib['base']
formats.extend([{
'format_id': 'rmtp',
'url': base_url if base_url_el else n.attrib['src'],
'play_path': n.attrib['src'],
'ext': 'flv',
'preference': -100,
'format_note': 'Seems to fail at example stream',
} for n in smil_doc.findall('./body/video')])
else:
formats.append({'url': smil_url})
self._sort_formats(formats)
return {
'id': video_id,
'formats': formats,
'title': asset['title'],
'thumbnail': asset.get('image'),
'description': asset.get('teaser'),
'duration': asset.get('duration'),
'categories': categories,
'view_count': asset.get('views'),
'rtmp_live': asset.get('live'),
'timestamp': parse_iso8601(asset.get('date')),
}
| unlicense | 144c821e450604d565f2c733fe0e92a0 | 35.146341 | 127 | 0.524629 | 3.495283 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/usatoday.py | 20 | 2703 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
ExtractorError,
get_element_by_attribute,
parse_duration,
try_get,
update_url_query,
)
from ..compat import compat_str
class USATodayIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?usatoday\.com/(?:[^/]+/)*(?P<id>[^?/#]+)'
_TESTS = [{
# Brightcove Partner ID = 29906170001
'url': 'http://www.usatoday.com/media/cinematic/video/81729424/us-france-warn-syrian-regime-ahead-of-new-peace-talks/',
'md5': '033587d2529dc3411a1ab3644c3b8827',
'info_dict': {
'id': '4799374959001',
'ext': 'mp4',
'title': 'US, France warn Syrian regime ahead of new peace talks',
'timestamp': 1457891045,
'description': 'md5:7e50464fdf2126b0f533748d3c78d58f',
'uploader_id': '29906170001',
'upload_date': '20160313',
}
}, {
# ui-video-data[asset_metadata][items][brightcoveaccount] = 28911775001
'url': 'https://www.usatoday.com/story/tech/science/2018/08/21/yellowstone-supervolcano-eruption-stop-worrying-its-blow/973633002/',
'info_dict': {
'id': '5824495846001',
'ext': 'mp4',
'title': 'Yellowstone more likely to crack rather than explode',
'timestamp': 1534790612,
'description': 'md5:3715e7927639a4f16b474e9391687c62',
'uploader_id': '28911775001',
'upload_date': '20180820',
}
}]
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/default_default/index.html?videoId=%s'
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(update_url_query(url, {'ajax': 'true'}), display_id)
ui_video_data = get_element_by_attribute('class', 'ui-video-data', webpage)
if not ui_video_data:
raise ExtractorError('no video on the webpage', expected=True)
video_data = self._parse_json(ui_video_data, display_id)
item = try_get(video_data, lambda x: x['asset_metadata']['items'], dict) or {}
return {
'_type': 'url_transparent',
'url': self.BRIGHTCOVE_URL_TEMPLATE % (item.get('brightcoveaccount', '29906170001'), item.get('brightcoveid') or video_data['brightcove_id']),
'id': compat_str(video_data['id']),
'title': video_data['title'],
'thumbnail': video_data.get('thumbnail'),
'description': video_data.get('description'),
'duration': parse_duration(video_data.get('length')),
'ie_key': 'BrightcoveNew',
}
| unlicense | f624792cb18eae9bd7c4b173b08f550b | 41.904762 | 154 | 0.596374 | 3.28034 | false | false | false | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/nosvideo.py | 64 | 2480 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
sanitized_Request,
urlencode_postdata,
xpath_text,
xpath_with_ns,
)
_x = lambda p: xpath_with_ns(p, {'xspf': 'http://xspf.org/ns/0/'})
class NosVideoIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?nosvideo\.com/' + \
r'(?:embed/|\?v=)(?P<id>[A-Za-z0-9]{12})/?'
_PLAYLIST_URL = 'http://nosvideo.com/xml/{xml_id:s}.xml'
_FILE_DELETED_REGEX = r'<b>File Not Found</b>'
_TEST = {
'url': 'http://nosvideo.com/?v=mu8fle7g7rpq',
'md5': '6124ed47130d8be3eacae635b071e6b6',
'info_dict': {
'id': 'mu8fle7g7rpq',
'ext': 'mp4',
'title': 'big_buck_bunny_480p_surround-fix.avi.mp4',
'thumbnail': r're:^https?://.*\.jpg$',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
fields = {
'id': video_id,
'op': 'download1',
'method_free': 'Continue to Video',
}
req = sanitized_Request(url, urlencode_postdata(fields))
req.add_header('Content-type', 'application/x-www-form-urlencoded')
webpage = self._download_webpage(req, video_id,
'Downloading download page')
if re.search(self._FILE_DELETED_REGEX, webpage) is not None:
raise ExtractorError('Video %s does not exist' % video_id,
expected=True)
xml_id = self._search_regex(r'php\|([^\|]+)\|', webpage, 'XML ID')
playlist_url = self._PLAYLIST_URL.format(xml_id=xml_id)
playlist = self._download_xml(playlist_url, video_id)
track = playlist.find(_x('.//xspf:track'))
if track is None:
raise ExtractorError(
'XML playlist is missing the \'track\' element',
expected=True)
title = xpath_text(track, _x('./xspf:title'), 'title')
url = xpath_text(track, _x('./xspf:file'), 'URL', fatal=True)
thumbnail = xpath_text(track, _x('./xspf:image'), 'thumbnail')
if title is not None:
title = title.strip()
formats = [{
'format_id': 'sd',
'url': url,
}]
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'formats': formats,
}
| unlicense | 6d9857e9910e980169fa4390226678fa | 32.066667 | 75 | 0.523387 | 3.497884 | false | false | false | false |
pyeve/cerberus | cerberus/tests/test_rule_…of.py | 3 | 11263 | from pytest import mark
from cerberus import errors
from cerberus.tests import assert_fail, assert_not_has_error, assert_success
@mark.parametrize(
("test_function", "document"),
[
(assert_success, {'field': 5}),
(assert_fail, {'field': -1}),
(assert_fail, {'field': 11}),
],
)
def test_allof(test_function, document):
test_function(
schema={'field': {'allof': [{'type': 'integer'}, {'min': 0}, {'max': 10}]}},
document=document,
)
def test_anyof_fails():
schema = {'field': {'type': 'integer', 'anyof': [{'min': 0}, {'min': 10}]}}
assert_fail(
document={'field': -1},
schema=schema,
error=(('field',), ('field', 'anyof'), errors.ANYOF, ({'min': 0}, {'min': 10})),
child_errors=[
(('field',), ('field', 'anyof', 0, 'min'), errors.MIN_VALUE, 0),
(('field',), ('field', 'anyof', 1, 'min'), errors.MIN_VALUE, 10),
],
)
assert_fail(document={'field': 5.5}, schema=schema)
assert_fail(document={'field': '5.5'}, schema=schema)
assert_fail(
schema={'field': {'anyof': [{'min': 0, 'max': 10}, {'min': 100, 'max': 110}]}},
document={'field': 50},
)
@mark.parametrize(
("schema", "document"),
[
({'field': {'min': 0, 'max': 10}}, {'field': 5}),
(
{'field': {'anyof': [{'min': 0, 'max': 10}, {'min': 100, 'max': 110}]}},
{'field': 105},
),
(
{'field': {'type': 'integer', 'anyof': [{'min': 0}, {'min': 10}]}},
{'field': 10},
),
(
{'field': {'type': 'integer', 'anyof': [{'min': 0}, {'min': 10}]}},
{'field': 5},
),
],
)
def test_anyof_succeeds(schema, document):
assert_success(schema=schema, document=document)
@mark.parametrize(
("test_function", "document"),
[
(assert_success, {'field': -1.5}),
(assert_success, {'field': -1}),
(assert_success, {'field': 11}),
(assert_success, {'field': 11.5}),
(assert_fail, {'field': 5}),
(assert_fail, {'field': 5.5}),
(assert_fail, {'field': '5.5'}),
],
)
def test_anyof_in_allof(test_function, document):
test_function(
schema={
'field': {
'allof': [
{'anyof': [{'type': 'float'}, {'type': 'integer'}]},
{'anyof': [{'min': 10}, {'max': 0}]},
]
}
},
document=document,
)
def test_anyof_in_itemsrules(validator):
# test that a list of schemas can be specified.
valid_parts = (
{
'schema': {
'model number': {'type': ('string',)},
'count': {'type': ('integer',)},
}
},
{'schema': {'serial number': {'type': (str,)}, 'count': {'type': (int,)}}},
)
valid_item = {'type': ('dict', 'string'), 'anyof': valid_parts}
schema = {'parts': {'type': 'list', 'itemsrules': valid_item}}
document = {
'parts': [
{'model number': 'MX-009', 'count': 100},
{'serial number': '898-001'},
'misc',
]
}
# document is valid. each entry in 'parts' matches a type or schema
assert_success(document=document, schema=schema, validator=validator)
document['parts'].append({'product name': "Monitors", 'count': 18})
# document is invalid. 'product name' does not match any valid schemas
assert_fail(document=document, schema=schema, validator=validator)
document['parts'].pop()
# document is valid again
assert_success(document=document, schema=schema, validator=validator)
document['parts'].append({'product name': "Monitors", 'count': 18})
document['parts'].append(10)
# and invalid. numbers are not allowed.
_errors = assert_fail(
document,
schema,
validator=validator,
error=('parts', ('parts', 'itemsrules'), errors.ITEMSRULES, valid_item),
child_errors=[
(('parts', 3), ('parts', 'itemsrules', 'anyof'), errors.ANYOF, valid_parts),
(
('parts', 4),
('parts', 'itemsrules', 'type'),
errors.TYPE,
('dict', 'string'),
),
],
)
assert_not_has_error(
_errors,
('parts', 4),
('parts', 'itemsrules', 'anyof'),
errors.ANYOF,
valid_parts,
)
# tests errors.BasicErrorHandler's tree representation
_errors = validator.errors
assert 'parts' in _errors
assert 3 in _errors['parts'][-1]
assert _errors['parts'][-1][3][0] == "no definitions validate"
scope = _errors['parts'][-1][3][-1]
assert 'anyof definition 0' in scope
assert 'anyof definition 1' in scope
assert scope['anyof definition 0'] == [{"product name": ["unknown field"]}]
assert scope['anyof definition 1'] == [{"product name": ["unknown field"]}]
assert _errors['parts'][-1][4] == ["must be one of these types: ('dict', 'string')"]
@mark.parametrize(
("test_function", "document"),
[
(assert_success, {'field': {'val': 0}}),
(assert_success, {'field': {'val': '0'}}),
(assert_fail, {'field': {'val': 1.1}}),
],
)
def test_anyof_with_semantically_equal_schemas(test_function, document):
test_function(
schema={
'field': {
'anyof': [
{'type': 'dict', 'schema': {'val': {'type': 'integer'}}},
{'type': 'dict', 'schema': {'val': {'type': 'string'}}},
]
}
},
document=document,
)
test_function(
schema={
'field': {
'type': 'dict',
'anyof': [
{'schema': {'val': {'type': 'integer'}}},
{'schema': {'val': {'type': 'string'}}},
],
}
},
document=document,
)
@mark.parametrize(
("test_function", "document"),
[
(assert_success, {'field': -1}),
(assert_fail, {'field': -5}),
(assert_fail, {'field': 1}),
(assert_fail, {'field': 5}),
(assert_fail, {'field': 11}),
(assert_fail, {'field': 15}),
],
)
def test_noneof(test_function, document):
test_function(
schema={
'field': {
'type': 'integer',
'noneof': [{'min': 0}, {'min': 10}, {'allowed': [-5, 5, 15]}],
}
},
document=document,
)
@mark.parametrize(
("test_function", "document"),
[
(assert_success, {'field': -5}),
(assert_success, {'field': 1}),
(assert_fail, {'field': -1}),
(assert_fail, {'field': 5}),
(assert_fail, {'field': 11}),
(assert_fail, {'field': 15}),
],
)
def test_oneof(test_function, document):
test_function(
schema={
'field': {
'type': 'integer',
'oneof': [{'min': 0}, {'min': 10}, {'allowed': [-5, 5, 15]}],
}
},
document=document,
)
def test_schema_is_not_spoiled(validator):
validator.schema = {
'field': {'type': 'integer', 'anyof': [{'min': 0}, {'min': 10}]}
}
assert 'type' not in validator.schema['field']['anyof'][0]
assert 'type' not in validator.schema['field']['anyof'][1]
assert 'allow_unknown' not in validator.schema['field']['anyof'][0]
assert 'allow_unknown' not in validator.schema['field']['anyof'][1]
@mark.parametrize("document", [{'field': 'bar'}, {'field': 23}])
def test_anyof_type(document):
assert_success(
schema={'field': {'anyof_type': ['string', 'integer']}}, document=document
)
@mark.parametrize(
("test_function", "document"),
[
(assert_success, {'oneof_schema': {'digits': 19}}),
(assert_success, {'oneof_schema': {'text': '84'}}),
(assert_fail, {'oneof_schema': {'digits': 19, 'text': '84'}}),
],
)
def test_oneof_schema(test_function, document):
test_function(
schema={
'oneof_schema': {
'type': 'dict',
'oneof_schema': [
{'digits': {'type': 'integer', 'min': 0, 'max': 99}},
{'text': {'type': 'string', 'regex': '^[0-9]{2}$'}},
],
}
},
document=document,
)
@mark.parametrize(
"document", [{'nested_oneof_type': {'foo': 'a'}}, {'nested_oneof_type': {'bar': 3}}]
)
def test_oneof_type_in_valuesrules(document):
assert_success(
schema={
'nested_oneof_type': {'valuesrules': {'oneof_type': ['string', 'integer']}}
},
document=document,
)
def test_oneof_type_in_oneof_schema(validator):
assert_fail(
schema={
'abc': {
'type': 'dict',
'oneof_schema': [
{
'foo': {
'type': 'dict',
'schema': {'bar': {'oneof_type': ['integer', 'float']}},
}
},
{'baz': {'type': 'string'}},
],
}
},
document={'abc': {'foo': {'bar': 'bad'}}},
validator=validator,
)
assert validator.errors == {
'abc': [
'none or more than one rule validate',
{
'oneof definition 0': [
{
'foo': [
{
'bar': [
'none or more than one rule validate',
{
'oneof definition 0': [
"must be one of these types: ('integer',)"
],
'oneof definition 1': [
"must be one of these " "types: ('float',)"
],
},
]
}
]
}
],
'oneof definition 1': [{'foo': ['unknown field']}],
},
]
}
def test_allow_unknown_in_oneof():
# https://github.com/pyeve/cerberus/issues/251
schema = {
'test': {
'oneof': (
{
'type': ('dict',),
'allow_unknown': True,
'schema': {'known': {'type': ('string',)}},
},
{'type': ('dict',), 'schema': {'known': {'type': ('string',)}}},
)
}
}
# check regression and that allow unknown does not cause any different
# than expected behaviour for one-of.
assert_fail(
schema=schema,
document={'test': {'known': 's'}},
error=('test', ('test', 'oneof'), errors.ONEOF, schema['test']['oneof']),
)
# check that allow_unknown is actually applied
assert_success(document={'test': {'known': 's', 'unknown': 'asd'}}, schema=schema)
| isc | 76faacddbe51576836ba75de8af335a4 | 29.605978 | 88 | 0.447838 | 3.947774 | false | true | false | false |
unitedstates/congress-legislators | scripts/untire.py | 13 | 1156 | #!/usr/bin/env python
# "Un-retire" a Member of Congress: Move a Member of Congress
# from the legislators-historical file to the legislators-current file
# and give the Member a new term.
#
# python unretire.py bioguideID
import sys
import rtyaml
import utils
from collections import OrderedDict
def run():
if len(sys.argv) != 2:
print("Usage:")
print("python untire.py bioguideID")
sys.exit()
print("Loading current YAML...")
y = utils.load_data("legislators-current.yaml")
print("Loading historical YAML...")
y1 = utils.load_data("legislators-historical.yaml")
for moc in y1:
if moc["id"].get("bioguide", None) != sys.argv[1]: continue
print("Updating:")
rtyaml.pprint(moc["id"])
print()
rtyaml.pprint(moc["name"])
moc["terms"].append(OrderedDict([
("type", moc["terms"][-1]["type"]),
("start", None),
("end", None),
("state", moc["terms"][-1]["state"]),
("party", moc["terms"][-1]["party"]),
]))
y1.remove(moc)
y.append(moc)
break
print("Saving changes...")
utils.save_data(y, "legislators-current.yaml")
utils.save_data(y1, "legislators-historical.yaml")
if __name__ == '__main__':
run() | cc0-1.0 | 1022d8829087e216c6070423521d5f66 | 21.25 | 70 | 0.653114 | 2.792271 | false | false | false | false |
unitedstates/congress-legislators | scripts/wikipedia_ids.py | 3 | 5436 | # Scans Wikipedia for pages using the CongBio and CongLinks
# templates, which have Bioguide IDs. Updates the 'wikipedia'
# ID field for matching Members of Congress, and for pages
# using the CongLinks template also updates a variety of
# other ID as found in the template.
import lxml.etree, re, urllib.request, urllib.parse, urllib.error
import utils, os.path
def run():
# Field mapping. And which fields should be turned into integers.
# See https://en.wikipedia.org/wiki/Template:CongLinks for what's possibly available.
fieldmap = {
"congbio": "bioguide",
#"fec": "fec", # handled specially...
"govtrack": "govtrack", # for sanity checking since we definitely have this already (I caught some Wikipedia errors)
"opensecrets": "opensecrets",
"votesmart": "votesmart",
"cspan": "cspan",
}
int_fields = ("govtrack", "votesmart", "cspan")
# default to not caching
cache = utils.flags().get('cache', False)
# Load legislator files and map bioguide IDs.
y1 = utils.load_data("legislators-current.yaml")
y2 = utils.load_data("legislators-historical.yaml")
bioguides = { }
for y in y1+y2:
bioguides[y["id"]["bioguide"]] = y
# Okay now the Wikipedia stuff...
def get_matching_pages():
# Does a Wikipedia API search for pages containing either of the
# two templates. Returns the pages.
page_titles = set()
for template in ("CongLinks", "CongBio"):
eicontinue = ""
while True:
# construct query URL, using the "eicontinue" of the last query to get the next batch
url = 'http://en.wikipedia.org/w/api.php?action=query&list=embeddedin&eititle=Template:%s&eilimit=500&format=xml' % template
if eicontinue: url += "&eicontinue=" + eicontinue
# load the XML
print("Getting %s pages (%d...)" % (template, len(page_titles)))
dom = lxml.etree.fromstring(utils.download(url, None, True)) # can't cache eicontinue probably
for pgname in dom.xpath("query/embeddedin/ei/@title"):
page_titles.add(pgname)
# get the next eicontinue value and loop
eicontinue = dom.xpath("string(query-continue/embeddedin/@eicontinue)")
if not eicontinue: break
return page_titles
# Get the list of Wikipedia pages that use any of the templates we care about.
page_list_cache_file = os.path.join(utils.cache_dir(), "legislators/wikipedia/page_titles")
if cache and os.path.exists(page_list_cache_file):
# Load from cache.
matching_pages = open(page_list_cache_file).read().split("\n")
else:
# Query Wikipedia API and save to cache.
matching_pages = get_matching_pages()
utils.write(("\n".join(matching_pages)), page_list_cache_file)
# Filter out things that aren't actually pages (User:, Talk:, etcetera, anything with a colon).
matching_pages = [p for p in matching_pages if ":" not in p]
# Load each page's content and parse the template.
for p in sorted(matching_pages):
if " campaign" in p: continue
if " (surname)" in p: continue
if "career of " in p: continue
if "for Congress" in p: continue
if p.startswith("List of "): continue
if p in ("New York in the American Civil War", "Upper Marlboro, Maryland"): continue
# Query the Wikipedia API to get the raw page content in XML,
# and then use XPath to get the raw page text.
url = "http://en.wikipedia.org/w/api.php?action=query&titles=" + urllib.parse.quote(p.encode("utf8")) + "&export&exportnowrap"
cache_path = "legislators/wikipedia/pages/" + p
dom = lxml.etree.fromstring(utils.download(url, cache_path, not cache))
page_content = dom.xpath("string(mw:page/mw:revision/mw:text)", namespaces={ "mw": "http://www.mediawiki.org/xml/export-0.8/" })
# Build a dict for the IDs that we want to insert into our files.
new_ids = {
"wikipedia": p # Wikipedia page name, with spaces for spaces (not underscores)
}
if "CongLinks" in page_content:
# Parse the key/val pairs in the template.
m = re.search(r"\{\{\s*CongLinks\s+([^}]*\S)\s*\}\}", page_content)
if not m: continue # no template?
for arg in m.group(1).split("|"):
if "=" not in arg: continue
key, val = arg.split("=", 1)
key = key.strip()
val = val.strip()
if val and key in fieldmap:
try:
if fieldmap[key] in int_fields: val = int(val)
except ValueError:
print("invalid value", key, val)
continue
if key == "opensecrets": val = val.replace("&newMem=Y", "").replace("&newmem=Y", "").replace("&cycle=2004", "").upper()
new_ids[fieldmap[key]] = val
if "bioguide" not in new_ids: continue
new_ids["bioguide"] = new_ids["bioguide"].upper() # hmm
bioguide = new_ids["bioguide"]
else:
m = re.search(r"\{\{\s*CongBio\s*\|\s*(\w+)\s*\}\}", page_content)
if not m: continue # no template?
bioguide = m.group(1).upper()
if not bioguide in bioguides:
print("Member not found: " + bioguide, p, "(Might have been a delegate to the Constitutional Convention.)")
continue
# handle FEC ids specially because they are stored in an array...
fec_id = new_ids.get("fec")
if fec_id: del new_ids["fec"]
member = bioguides[bioguide]
member["id"].update(new_ids)
# ...finish the FEC id.
if fec_id:
if fec_id not in bioguides[bioguide]["id"].get("fec", []):
bioguides[bioguide]["id"].setdefault("fec", []).append(fec_id)
#print p.encode("utf8"), new_ids
utils.save_data(y1, "legislators-current.yaml")
utils.save_data(y2, "legislators-historical.yaml")
if __name__ == '__main__':
run()
| cc0-1.0 | 5c8469b48aac0dc20a99a51b91c97747 | 35.979592 | 130 | 0.675681 | 2.998345 | false | false | false | false |
unitedstates/congress-legislators | scripts/utils.py | 1 | 14567 | # Helpful functions for finding data about members and committees
CURRENT_CONGRESS = 115
states = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming',
'OL': 'Orleans',
'DK': 'Dakota',
'PI': 'Philippine Islands'
}
import urllib.request, urllib.error, urllib.parse
import os, errno, sys, traceback
import re, html.entities
import pprint
import rtyaml
from datetime import datetime, date
import time
import json
import lxml.html # for meta redirect parsing
import yaml
import smtplib
import email.utils
from email.mime.text import MIMEText
# read in an opt-in config file for supplying email settings
# returns None if it's not there, and this should always be handled gracefully
path = "email/config.yml"
if os.path.exists(path):
email_settings = yaml.load(open(path, 'r')).get('email', None)
else:
email_settings = None
def congress_from_legislative_year(year):
return ((year + 1) / 2) - 894
def legislative_year(date=None):
if not date:
date = datetime.now()
if date.month == 1:
if date.day == 1 or date.day == 2:
return date.year - 1
elif date.day == 3:
if isinstance(date,datetime):
if date.hour < 12:
return date.year -1
else:
return date.year
else:
return date.year
else:
return date.year
else:
return date.year
def congress_start_end_dates(congress):
# Get the start date and end date of the given Congress (i.g. 1 for the 1st Congress).
# Sadly, the date of the end of one Congress is identical with the date of the start
# of the next because the switchover time is at noon (at least since 1935).
# Also see get_congress_from_date.
start_year = 1789 + (congress-1)*2
end_year = start_year + 2
if congress < 73:
# The 1st Congress met on March 4, 1789, per an act of the Continental
# Congress adpoted Sept 13, 1788. The Constitutional term period would
# end two years later, of course. Looking at actual adjournment dates,
# it seems that Congress believed its term ended on March 3rd's.
if congress != 69:
return (date(start_year, 3, 4), date(end_year, 3, 3))
else:
# But the 69th Congress (and only that Congress) adjourned on a March 4,
# which means that they must have viewed their Constitutional term as
# expiring at the actual time of day that the first Congress began?
# Since we use March 4 as the term end dates for the 69th Congress in our
# data, we'll use that as the end date for the 69th Congress only.
return (date(start_year, 3, 4), date(end_year, 3, 4))
elif congress == 73:
# The end of the 73rd Congress was changed by the 20th Amendment. So
# it began on a March 4 but ended on the January 3rd (at noon) that
# preceded the usual March 3 (1935). (Congress adjourned in 1934 anyway.)
return (date(start_year, 3, 4), date(end_year, 1, 3))
else:
# Starting with the 74th Congress, Congresses begin and end on January
# 3rds at noon.
return (date(start_year, 1, 3), date(end_year, 1, 3))
def get_congress_from_date(d, range_type=None):
# This is the inverse of congress_start_end_dates.
#
# Return the Congress number that the date 'd' occurs in by first computing
# the 'legislative year' it occurs in, and then using some simple arithmetic
# counting back to 1789 (the first legislative year) and dividing by two
# (since Congresses are two years).
#
# Since Congresses start and end on the same date at noon (at least since
# 1935, but we treat it similarly for prior years), those dates are ambiguous.
# The caller passes range_type='start' to exclude the possibility that
# if the date is on a transition date that it is in the subsequent Congress,
# and vice versa for range_type='end'.
if (d.year % 2) == 0:
# Even years occur entirely within a Congress.
y = d.year
else:
# In odd-numbered years, the period before the transition date, and if
# range_type == 'end' the transition date itself, is assigned to the
# previous legislative year
# Get the transition date of this year.
if d.year < 1935:
# Through 1933, the transition date was March 4. Although most
# Congresses adjourned on March 3 or earlier probably to not
# have to deal with the question of what time the first session
# began.
td = date(d.year, 3, 4)
else:
# Since 1935, the transition date is Jan 3.
td = date(d.year, 1, 3)
# Check if d is before, after, or on the transition date.
if d < td:
y = d.year - 1
elif d > td:
y = d.year
else:
if range_type == "end":
# Assign this date to the previous Congress.
y = d.year - 1
elif range_type == "start":
# Assign this date to the next Congress.
y = d.year
else:
raise ValueError("Date {} is ambiguous; must pass range_type='start' or 'end'.".format(d))
# Now do some simple integer math to compute the Congress number.
return ((y + 1) // 2) - 894
def parse_date(date):
return datetime.strptime(date, "%Y-%m-%d").date()
def log(object):
if isinstance(object, str):
print(object)
else:
pprint(object)
def uniq(seq):
seen = set()
seen_add = seen.add
return [ x for x in seq if x not in seen and not seen_add(x)]
def args():
args = []
for token in sys.argv[1:]:
if not token.startswith("--"):
args.append(token)
return args
def flags():
options = {}
for token in sys.argv[1:]:
if token.startswith("--"):
if "=" in token:
key, value = token.split('=')
else:
key, value = token, True
key = key.split("--")[1]
if value == 'True': value = True
elif value == 'False': value = False
options[key.lower()] = value
return options
##### Data management
def data_dir():
return ".."
def load_data(path):
return yaml_load(os.path.join(data_dir(), path))
def save_data(data, path):
yaml_dump(data, os.path.join(data_dir(), path))
write(
json.dumps(data, default=format_datetime),
"../alternate_formats/%s" %path.replace(".yaml", ".json"))
##### Downloading
import scrapelib
scraper = scrapelib.Scraper(requests_per_minute=60, retry_attempts=3)
scraper.user_agent = "the @unitedstates project (https://github.com/unitedstates/congress-legislators)"
def cache_dir():
return "cache"
def download(url, destination=None, force=False, options=None):
if not destination and not force:
raise TypeError("destination must not be None if force is False.")
if not options:
options = {}
# get the path to cache the file, or None if destination is None
cache = os.path.join(cache_dir(), destination) if destination else None
if not force and os.path.exists(cache):
if options.get('debug', False):
log("Cached: (%s, %s)" % (cache, url))
with open(cache, 'r') as f:
body = f.read()
else:
try:
if options.get('debug', False):
log("Downloading: %s" % url)
if options.get('urllib', False):
response = urllib.request.urlopen(url)
body = response.read().decode("utf-8") # guessing encoding
else:
response = scraper.urlopen(url)
body = str(response) # ensure is unicode not bytes
except scrapelib.HTTPError:
log("Error downloading %s" % url)
return None
# don't allow 0-byte files
if (not body) or (not body.strip()):
return None
# the downloader can optionally parse the body as HTML
# and look for meta redirects. a bit expensive, so opt-in.
if options.get('check_redirects', False):
try:
html_tree = lxml.html.fromstring(body)
except ValueError:
log("Error parsing source from url {0}".format(url))
return None
meta = html_tree.xpath("//meta[translate(@http-equiv, 'REFSH', 'refsh') = 'refresh']/@content")
if meta:
attr = meta[0]
wait, text = attr.split(";")
if text.lower().startswith("url="):
new_url = text[4:]
if not new_url.startswith(url): #dont print if a local redirect
print("Found redirect for {}, downloading {} instead..".format(url, new_url))
options.pop('check_redirects')
body = download(new_url, None, True, options)
# cache content to disk
if cache: write(body, cache)
return body
from pytz import timezone
eastern_time_zone = timezone('US/Eastern')
def format_datetime(obj):
if isinstance(obj, datetime):
return eastern_time_zone.localize(obj.replace(microsecond=0)).isoformat()
elif isinstance(obj, str):
return obj
else:
return None
def write(content, destination):
# content must be a str instance (not bytes), will be written in utf-8 per open()'s default
mkdir_p(os.path.dirname(destination))
f = open(destination, 'w')
f.write(content)
f.close()
# mkdir -p in python, from:
# http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST:
pass
else:
raise
def format_exception(exception):
exc_type, exc_value, exc_traceback = sys.exc_info()
return "\n".join(traceback.format_exception(exc_type, exc_value, exc_traceback))
# taken from http://effbot.org/zone/re-sub.htm#unescape-html
def unescape(text, encoding=None):
def remove_unicode_control(str):
remove_re = re.compile('[\x00-\x08\x0B-\x0C\x0E-\x1F\x7F]')
return remove_re.sub('', str)
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
if encoding == None:
try:
if text[:3] == "&#x":
return chr(int(text[3:-1], 16))
else:
return chr(int(text[2:-1]))
except ValueError:
pass
else:
try:
if text[:3] == "&#x":
return bytes([int(text[3:-1], 16)]).decode(encoding)
else:
return bytes([int(text[2:-1])]).decode(encoding)
except ValueError:
pass
else:
# named entity
try:
text = chr(html.entities.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
text = re.sub("&#?\w+;", fixup, text)
text = remove_unicode_control(text)
return text
##### YAML serialization ######
# Apply some common settings for loading/dumping YAML and cache the
# data in pickled format which is a LOT faster than YAML.
def yaml_load(path, use_cache=True):
# Loading YAML is ridiculously slow, so cache the YAML data
# in a pickled file which loads much faster.
# Check if the .pickle file exists and a hash stored inside it
# matches the hash of the YAML file, and if so unpickle it.
import pickle as pickle, os.path, hashlib
h = hashlib.sha1(open(path, 'rb').read()).hexdigest()
if use_cache and os.path.exists(path + ".pickle"):
try:
store = pickle.load(open(path + ".pickle", 'rb'))
if store["hash"] == h:
return store["data"]
except EOFError:
pass # bad .pickle file, pretend it doesn't exist
# No cached pickled data exists, so load the YAML file.
data = rtyaml.load(open(path))
# Store in a pickled file for fast access later.
pickle.dump({ "hash": h, "data": data }, open(path+".pickle", "wb"))
return data
def yaml_dump(data, path):
# write file
rtyaml.dump(data, open(path, "w"))
# Store in a pickled file for fast access later.
import pickle as pickle, hashlib
h = hashlib.sha1(open(path, 'rb').read()).hexdigest()
pickle.dump({ "hash": h, "data": data }, open(path+".pickle", "wb"))
# if email settings are supplied, email the text - otherwise, just print it
def admin(body):
try:
if isinstance(body, Exception):
body = format_exception(body)
print(body) # always print it
if email_settings:
send_email(body)
except Exception as exception:
print("Exception logging message to admin, halting as to avoid loop")
print(format_exception(exception))
# this should only be called if the settings are definitely there
def send_email(message):
print("Sending email to %s..." % email_settings['to'])
# adapted from http://www.doughellmann.com/PyMOTW/smtplib/
msg = MIMEText(message)
msg.set_unixfrom('author')
msg['To'] = email.utils.formataddr(('Recipient', email_settings['to']))
msg['From'] = email.utils.formataddr((email_settings['from_name'], email_settings['from']))
msg['Subject'] = "%s - %i" % (email_settings['subject'], int(time.time()))
server = smtplib.SMTP(email_settings['hostname'])
try:
server.ehlo()
if email_settings['starttls'] and server.has_extn('STARTTLS'):
server.starttls()
server.ehlo()
server.login(email_settings['user_name'], email_settings['password'])
server.sendmail(email_settings['from'], [email_settings['to']], msg.as_string())
finally:
server.quit()
print("Sent email to %s." % email_settings['to'])
| cc0-1.0 | 36e5be6c0686b18e595ae90abcf5abc7 | 30.059701 | 103 | 0.617286 | 3.39874 | false | false | false | false |
unitedstates/congress-legislators | scripts/committee_membership.py | 1 | 13819 | #!/usr/bin/env python
# Data Sources:
# House:
# http://clerk.house.gov/xml/lists/MemberData.xml
# Senate:
# https://www.senate.gov/general/committee_membership/committee_memberships_{thomas_id}.xml
# Data Files Updated:
# committee-membership-current.yaml:
# All entries are overwritten except for house members of joint committees
# which have to be manually entered since there is no source of this data
# committees-current.yaml:
# Fro House committees, updates name, address, and phone
# For Senate committees, updates name and url
import re, lxml.html, lxml.etree
from collections import OrderedDict
import utils
from utils import download, load_data, save_data
def run():
committee_membership = load_data("committee-membership-current.yaml")
committees_current = load_data("committees-current.yaml")
# default to not caching
cache = utils.flags().get('cache', False)
force = not cache
# map house/senate committee IDs to their dicts
house_ref = { }
for cx in committees_current:
if "house_committee_id" in cx:
house_ref[cx["house_committee_id"]] = cx
senate_ref = { }
for cx in committees_current:
if "senate_committee_id" in cx:
senate_ref[cx["senate_committee_id"]] = cx
# map state/district to current senators because the Senate committee
# membership data does not contain IDs for senators, and map to bioguide
# IDs so we can copy forward the official_full name for House members
legislators_current = load_data("legislators-current.yaml")
senators = { }
for moc in legislators_current:
term = moc["terms"][-1]
if term["type"] == "sen":
for n in [moc["name"]] + moc.get("other_names", []):
senators[(term["state"], n["last"])] = moc
legislators_current = { moc["id"]["bioguide"]: moc for moc in legislators_current }
# Scrape clerk.house.gov...
def scrape_house():
# clear out all of the existing House members of committees (i.e. all House committee membership
# and the House part of Joint committee membership)
for committee, members in committee_membership.items():
for m in list(members): # must clone before editing list
if committee[0] == "H" or m.get("chamber") == "house":
members.remove(m)
r = download("http://clerk.house.gov/xml/lists/MemberData.xml", "clerk_xml")
dom = lxml.etree.fromstring(r.encode("latin-1")) # must be bytes to parse if there is an encoding declaration inside the string
# Update committee metadata.
def update_house_committee_metadata(xml_cx, cx, parentdict, is_subcommittee):
sub_prefix = "sub" if is_subcommittee else ""
if cx is None:
# New committee.
if not is_subcommittee:
cx = {
"type": "house",
"thomas_id": "H" + xml_cx.attrib["type"][0].upper() + xml_cx.attrib["comcode"][0:2],
"house_committee_id": xml_cx.attrib["comcode"][0:2]
}
house_ref[cx["house_committee_id"]] = cx
else:
cx = {
"name": None, # placeholder so order is right
"thomas_id": xml_cx.attrib["subcomcode"][2:]
}
parentdict.append(cx)
cx["name"] = xml_cx.find(sub_prefix + "committee-fullname").text
if not is_subcommittee and not cx["name"].startswith("Joint "): cx["name"] = "House " + cx["name"]
building = xml_cx.attrib[sub_prefix + "com-building-code"]
if building == "C":
building = "CAPITOL"
#address format: 1301 LHOB; Washington, DC 20515-6001
cx["address"] = xml_cx.attrib[sub_prefix + "com-room"] + " " + building \
+ "; Washington, DC " + xml_cx.attrib[sub_prefix + "com-zip"] \
+ (("-" + xml_cx.attrib[sub_prefix + "com-zip-suffix"]) if xml_cx.attrib[sub_prefix + "com-zip-suffix"] != "0" else "")
cx["phone"] = "(202) " + xml_cx.attrib[sub_prefix + "com-phone"]
if not is_subcommittee:
for xml_sx in xml_cx.findall("subcommittee"):
sxx = [s for s in cx["subcommittees"] if s["thomas_id"] == xml_sx.attrib["subcomcode"][2:]]
update_house_committee_metadata(xml_sx, sxx[0] if len(sxx) > 0 else None, cx["subcommittees"], True)
committees = dom.xpath("/MemberData/committees")[0]
for xml_cx in committees.findall("committee"):
house_committee_id = xml_cx.attrib["comcode"][0:2]
update_house_committee_metadata(xml_cx, house_ref.get(house_committee_id), committees_current, False)
# Determine which party is in the majority. Only the majority
# party holds chair positions. At least one should have the
# position Chair.
house_majority_caucus = dom.xpath("string(/MemberData/members/member[committee-assignments/committee[@leadership='Chair']]/member-info/caucus)")
for xml_member in dom.xpath("/MemberData/members/member"):
bioguide_id = xml_member.xpath("member-info/bioguideID")[0].text
if not bioguide_id: #sometimes the xml has vacancies as blanks
continue
# Although there is a name in the XML data, for consistency use the one we
# have in legislators-current.yaml, if one is set.
try:
official_name = legislators_current[bioguide_id]["name"]["official_full"]
except KeyError:
official_name = xml_member.xpath("member-info/official-name")[0].text
#is using caucus better than using party?
caucus = xml_member.xpath("member-info/caucus")[0].text
party = "majority" if caucus == house_majority_caucus else "minority"
#for each committee or subcommittee membership
for cm in xml_member.xpath("committee-assignments/committee|committee-assignments/subcommittee"):
if "comcode" in cm.attrib:
house_committee_id = cm.attrib["comcode"][:2]
if house_committee_id == "HL": continue # this doesn't appear to be a committee and seems like a data error
thomas_committee_id = house_ref[house_committee_id]["thomas_id"]
elif "subcomcode" in cm.attrib:
house_committee_id = cm.attrib["subcomcode"][:2]
thomas_committee_id = house_ref[house_committee_id]["thomas_id"] + cm.attrib["subcomcode"][2:]
else:
continue # some nodes are invalid
membership = OrderedDict()
membership["name"] = official_name
membership["party"] = party
membership["rank"] = int(cm.attrib["rank"])
if "leadership" in cm.attrib:
membership["title"] = cm.attrib["leadership"] # TODO .replace("woman", "").replace("man", "")
elif membership["rank"] == 1:
#xml doesn't contain ranking member titles
if membership["party"] == "majority":
membership["title"] = "Chair"
else:
membership["title"] = "Ranking Member"
membership["bioguide"] = bioguide_id
if house_ref[house_committee_id]["type"] == "joint":
membership["chamber"] = "house"
committee_membership.setdefault(thomas_committee_id, []).append(membership)
# Scrape senate.gov....
def scrape_senate():
url = "https://www.senate.gov/pagelayout/committees/b_three_sections_with_teasers/membership.htm"
body = download(url, "committees/membership/senate.html", force)
for id, name in re.findall(r'value="/general/committee_membership/committee_memberships_(....).htm">(.*?)</option>', body, re.I | re.S):
if id not in senate_ref:
print("Unrecognized committee:", id, name)
continue
cx = senate_ref[id]
is_joint = (id[0] == "J")
# Scrape some metadata on the HTML page first.
committee_url = "https://www.senate.gov/general/committee_membership/committee_memberships_%s.htm" % id
print("[%s] Fetching members for %s (%s)" % (id, name, committee_url))
body2 = download(committee_url, "committees/membership/senate/%s.html" % id, force)
if not body2:
print("\tcommittee page not good:", committee_url)
continue
m = re.search(r'<span class="contenttext"><a href="(http://(.*?).senate.gov/)">', body2, re.I)
if m:
cx["url"] = m.group(1)
# Use the XML for the rest.
print("\tDownloading XML...")
committee_url = "https://www.senate.gov/general/committee_membership/committee_memberships_%s.xml" % id
body3 = download(committee_url, "committees/membership/senate/%s.xml" % id, force)
dom = lxml.etree.fromstring(body3.encode("utf8")) # must be bytes to parse if there is an encoding declaration inside the string
cx["name"] = dom.xpath("committees/committee_name")[0].text
if id[0] != "J" and id[0:2] != 'SC':
cx["name"] = "Senate " + cx["name"]
majority_party = dom.xpath("committees/majority_party")[0].text
# update full committee members
scrape_senate_members(
dom.xpath("committees/members/member"),
committee_membership.setdefault(id, []),
majority_party, is_joint)
# update subcommittees
for subcom in dom.xpath("committees/subcommittee"):
scid = subcom.xpath("committee_code")[0].text[4:]
for sx in cx.get('subcommittees', []):
if sx["thomas_id"] == scid:
break
else:
print("Subcommittee not found, creating it", scid, name)
sx = OrderedDict()
sx['thomas_id'] = scid
cx.setdefault('subcommittees', []).append(sx)
# update metadata
name = subcom.xpath("subcommittee_name")[0].text
sx["name"] = name.strip()
sx["name"] = re.sub(r"^\s*Subcommittee on\s*", "", sx["name"])
sx["name"] = re.sub(r"\s+", " ", sx["name"])
scrape_senate_members(
subcom.xpath("members/member"),
committee_membership.setdefault(id + scid, []),
majority_party, is_joint)
def scrape_senate_members(members, output_list, majority_party, is_joint):
# Keep a copy of the previous membership, and then clear the Senate members
# of the committee.
existing_members_data = list(output_list) # clone
if not is_joint:
output_list.clear()
else:
for m in list(output_list): # must clone before editing list
if m.get("chamber") == "senate":
output_list.remove(m)
# Update members.
ids = set()
count_by_party = { "majority": 0, "minority": 0 }
for node in members:
ids.add(scrape_senate_member(output_list, node, majority_party, is_joint, count_by_party, existing_members_data))
# Purge non-members. Ignore House members of joint committees.
i = 0
while i < len(output_list):
if output_list[i]['bioguide'] not in ids and output_list[i].get("chamber") in (None, "senate"):
output_list[i:i+1] = []
else:
i += 1
# sort by party, then by rank, since we get the nodes in the XML in a rough seniority order that ignores party
output_list.sort(key = lambda e : (e["party"] != "majority", e["rank"]))
def scrape_senate_member(output_list, membernode, majority_party, is_joint, count_by_party, existing_members_data):
last_name = membernode.xpath("name/last")[0].text
state = membernode.xpath("state")[0].text
party = "majority" if membernode.xpath("party")[0].text == majority_party else "minority"
title = membernode.xpath("position")[0].text
if title == "Member": title = None
if title == "Ranking": title = "Ranking Member"
# look up senator by state and last name
if (state, last_name) == ("NM", "Lujan"): last_name = "Luján"
if (state, last_name) not in senators:
print("\t[%s] Unknown member: %s" % (state, last_name))
return None
moc = senators[(state, last_name)]
entry = OrderedDict()
if 'official_full' in moc['name']:
entry["name"] = moc['name']['official_full']
else:
print("missing name->official_full field for", moc['id']['bioguide'])
entry["party"] = party
count_by_party[party] += 1
entry["rank"] = count_by_party[party]
if title: entry["title"] = title
entry.update(ids_from(moc["id"]))
if is_joint: entry["chamber"] = "senate"
# Look for an existing entry for this member and take
# start_date and source from it, if set.
for item in existing_members_data:
if item["bioguide"] == entry["bioguide"]:
for key in ("start_date", "source"):
if key in item:
entry[key] = item[key]
output_list.append(entry)
# Return bioguide ID of member added.
return entry["bioguide"]
# stick to a specific small set of official IDs to cross-link members
# this limits the IDs from going out of control in this file, while
# preserving us flexibility to be inclusive of IDs in the main leg files
def ids_from(moc):
ids = {}
if "bioguide" in moc:
ids["bioguide"] = moc["bioguide"]
if len(ids) == 0:
raise ValueError("Missing an official ID for this legislator, won't be able to link back")
return ids
# MAIN
scrape_house()
scrape_senate()
# ensure each committee has members in a stable, sorted order
for comm, mbrs in committee_membership.items():
# joint committees also have to sort by chamber
if comm[0] == "J":
mbrs.sort(key=lambda entry: (entry["party"] == "minority", entry["rank"], entry["chamber"] != "senate"))
# Senate and House committees have different sort orders to match
# earlier data, but there's no particular reason for this
elif comm[0] == "S":
mbrs.sort(key=lambda entry: (entry["party"] == "minority", entry["rank"]))
else:
mbrs.sort(key=lambda entry: (entry["rank"], entry["party"] == "minority"))
save_data(committee_membership, "committee-membership-current.yaml")
save_data(committees_current, "committees-current.yaml")
if __name__ == '__main__':
run()
| cc0-1.0 | efe0c23bd7e137d585174efa34ed23ed | 40.247761 | 148 | 0.633232 | 3.36778 | false | false | false | false |
unitedstates/congress-legislators | scripts/export_csv.py | 13 | 2716 | # Converts the specified YAML file to an equivalent-ish CSV file
# (on standard output).
#
# python export_csv.py ../legislators-current.yaml
import sys, csv
from collections import OrderedDict
from utils import yaml_load
def run():
if len(sys.argv) < 2:
print("Usage: python export_csv.py ../legislators-current.yaml > legislators-current.csv")
sys.exit(0)
data = yaml_load(sys.argv[1])
###############################################
def flatten_object(obj, path, ret):
"""Takes an object obj and flattens it into a dictionary ret.
For instance { "x": { "y": 123 } } is turned into { "x__y": 123 }.
"""
for k, v in list(obj.items()):
if isinstance(v, dict):
flatten_object(v, (path + "__" if path else "") + k + "__", ret)
elif isinstance(v, list):
# don't peek inside lists
pass
else:
ret[path + k] = v
return ret
# Scan through the records recursively to get a list of column names.
# Attempt to preserve the field order as found in the YAML file. Since
# any field may be absent, no one record can provide the complete field
# order. Build the best field order by looking at what each field tends
# to be preceded by.
fields = set()
preceding_keys = dict() # maps keys to a dict of *previous* keys and how often they occurred
for record in data:
prev_key = None
for key in flatten_object(record, "", OrderedDict()):
fields.add(key)
preceding_keys.setdefault(key, {}).setdefault(prev_key, 0)
preceding_keys[key][prev_key] += 1
prev_key = key
# Convert to relative frequencies.
for k, v in list(preceding_keys.items()):
s = float(sum(v.values()))
for k2 in v:
v[k2] /= s
# Get a good order for the fields. Greedily add keys from left to right
# maximizing the conditional probability that the preceding key would
# precede the key on the right.
field_order = [None]
prev_key = None
while len(field_order) < len(fields):
# Which key is such that prev_key is its most likely precedessor?
# We do it this way (and not what is prev_key's most likely follower)
# because we should be using a probability (of sorts) that is
# conditional on the key being present. Otherwise we lost infrequent
# keys.
next_key = max([f for f in fields if f not in field_order], key =
lambda k :
max(preceding_keys[k].get(pk, 0) for pk in field_order))
field_order.append(next_key)
prev_key = next_key
field_order = field_order[1:] # remove the None at the start
# Write CSV header.
w = csv.writer(sys.stdout)
w.writerow(field_order)
# Write the objects.
for record in data:
obj = flatten_object(record, "", {})
w.writerow([
obj.get(f, "")
for f in field_order
])
if __name__ == '__main__':
run() | cc0-1.0 | e9312f24f3a2799e3cf1145dd4b04633 | 29.52809 | 93 | 0.663108 | 3.172897 | false | false | false | false |
unitedstates/congress-legislators | scripts/contact_forms.py | 1 | 2128 | #!/usr/bin/env python
'''Gets contact webform URLs for the intersection of members with bioguide ids
and with correlating contact form steps in unitedstates/contact-congress:
args:
<bioguide_id bioguide_id ...>
A list of bioguide ids to import.
options:
--debug[=True]
Whether or not verbose output should be printed to the command line
'''
import yaml
from urllib.request import urlopen
import utils
from utils import load_data, save_data
# These members have forms in iframes, and Contact-Congress has different
# needs than human users might.
SKIP_BIOGUIDES = ['M000312']
def run():
options = utils.flags()
debug = options.get('debug', False)
filename = "legislators-current.yaml"
args = utils.args()
legislators = load_data(filename)
if len(args) != 0:
bioguides = args
print("Fetching contact forms for %s..." % ', '.join(bioguides))
else:
bioguides = [member['id']['bioguide'] for member in legislators]
print("Fetching contact forms for all current members...")
for legislator in legislators:
bioguide = legislator['id']['bioguide']
if bioguide not in bioguides: continue
if bioguide in SKIP_BIOGUIDES: continue
if debug: print("Downloading form for %s" % bioguide, flush=True)
try:
steps = contact_steps_for(bioguide)
except LegislatorNotFoundError as e:
if debug: print("skipping, %s..." % e, flush=True)
continue
legislator['terms'][-1]['contact_form'] = steps['contact_form']['steps'][0]['visit']
print("Saving data to %s..." % filename)
save_data(legislators, filename)
def contact_steps_for(bioguide):
base_url = "https://raw.githubusercontent.com/unitedstates/contact-congress/main/members/{bioguide}.yaml"
response = urlopen(base_url.format(bioguide=bioguide))
if response.code == 404:
raise LegislatorNotFoundError("%s not found in unitedstates/contact-congress!" % bioguide)
return yaml.load(response.read())
class LegislatorNotFoundError(Exception):
pass
if __name__ == '__main__':
run()
| cc0-1.0 | cfea34a39238f264346ce1f9dae900d6 | 27.756757 | 109 | 0.672462 | 3.482815 | false | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.