text stringlengths 0 1.05M | meta dict |
|---|---|
""" Auction REST resources """
from flask import g
from flask_restful import Resource, fields, marshal, reqparse
from penelophant import app, auther, crud
from penelophant.helpers.invoice import get_invoice_by_id_or_abort
from penelophant.exceptions import InvoiceAlreadyPaid
import balanced
invoice_fields = {
'id': fields.Integer,
'bid': fields.Nested({
'id': fields.Integer,
'bid_time': fields.DateTime,
'price': fields.Fixed(decimals=2),
'auction': fields.Nested({
'id': fields.Integer,
'title': fields.String,
'type': fields.String
})
}),
'amount': fields.Fixed(decimals=2),
'payer': fields.Nested({
'id': fields.Integer,
'display_name': fields.String
}),
'payee': fields.Nested({
'id': fields.Integer,
'display_name': fields.String
}),
'paid': fields.Boolean
}
class InvoiceList(Resource):
""" Invoice List REST API endpoint """
@auther.login_required
def get(self):
""" List all of the user's invoices """
invoices = g.user.invoices
return marshal(invoices, invoice_fields), 200
class Invoice(Resource):
""" Invoice REST API endpoint """
@auther.login_required
def get(self, invoice_id):
""" View a specific invoice """
invoice = get_invoice_by_id_or_abort(invoice_id)
return marshal(invoice, invoice_fields), 200
@auther.login_required
def put(self, invoice_id):
""" User pays an invoice """
parser = reqparse.RequestParser()
parser.add_argument('ccId', type=str, required=True, location='args')
args = parser.parse_args()
invoice = get_invoice_by_id_or_abort(invoice_id)
if invoice.paid:
raise InvoiceAlreadyPaid
card = balanced.Card.fetch('/cards/%s' % args.ccId)
debit = card.debit(
appears_on_statement_as=app.config['STATEMENT_MSG'],
amount=int(invoice.amount*100),
description="Invoice for invoice #%s" % (invoice.id),
meta={
'invoice_id': invoice.id,
'bid_id': invoice.bid.id,
'auction_id': invoice.bid.auction.id,
'payer': invoice.payer.id,
'payee': invoice.payee.id
}
)
invoice.provider = "balanced"
invoice.provider_details = debit.id
invoice.paid = True
crud.save()
return marshal(invoice, invoice_fields), 200
| {
"repo_name": "kevinoconnor7/penelophant",
"path": "penelophant/api/invoice.py",
"copies": "1",
"size": "2333",
"license": "apache-2.0",
"hash": -8158018231018845000,
"line_mean": 25.5113636364,
"line_max": 73,
"alpha_frac": 0.6403771967,
"autogenerated": false,
"ratio": 3.5509893455098935,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9611440525649088,
"avg_score": 0.015985203312161134,
"num_lines": 88
} |
""" Auction REST resources """
from flask import g
from flask_restful import Resource, reqparse, abort, fields, marshal
from decimal import Decimal
from datetime import datetime, timedelta
from penelophant import crud, auther
from penelophant.database import db
from penelophant.helpers.auction import find_auction_type, get_all_auction_types
from penelophant.models.Auction import Auction as Auction_model
auction_fields = {
'id': fields.Integer,
'title': fields.String,
'description': fields.String,
'type': fields.String,
'reserve_met': fields.Boolean,
'sealed_bids': fields.Boolean,
'start_time': fields.DateTime,
'end_time': fields.DateTime,
'highest_bid': fields.Nested({
'id': fields.Integer,
'price': fields.Fixed(decimals=2)
}),
'creator': fields.Nested({
'id': fields.Integer,
'display_name': fields.String
}),
'bids': fields.List(fields.Nested({
'price': fields.Fixed(decimals=2),
'bid_time': fields.DateTime,
'user': fields.Nested({
'id': fields.Integer,
'display_name': fields.String
})
})),
'has_started': fields.Boolean,
'has_ended': fields.Boolean,
'current_price': fields.Fixed(decimals=2)
}
class AuctionList(Resource):
""" Auction List REST API """
def get(self):
""" List all auctions """
session = db.session
auctions = session.query(Auction_model)\
.filter(Auction_model.start_time <= datetime.utcnow())\
.filter(Auction_model.end_time > datetime.utcnow())
parser = reqparse.RequestParser()
parser.add_argument('query', type=str)
args = parser.parse_args()
if args.query is not None and args.query:
auctions = auctions.filter(Auction_model.title.ilike("%"+str(args.query)+"%"))
return marshal(auctions.all(), auction_fields), 200
@auther.login_required
def post(self):
""" Handle auction creation """
parser = reqparse.RequestParser()
parser.add_argument('title', type=str, required=True)
parser.add_argument('description', type=str)
parser.add_argument('type', type=str, required=True)
parser.add_argument('start_time', type=int) # Must be a UNIX timestamp
parser.add_argument('end_time', type=int, required=True) # Must be a UNIX timestamp
parser.add_argument('reserve', type=Decimal)
parser.add_argument('start_price', type=Decimal)
args = parser.parse_args()
print(args.title)
start_time = datetime.utcnow()
if args.start_time:
start_time = datetime.utcfromtimestamp(args.start_time)
end_time = datetime.utcfromtimestamp(args.end_time)
if args.title is None:
abort(400, message="You need a title for this auction!")
if args.type is None:
abort(400, message="You need a type for this auction!")
if not end_time > start_time:
abort(400, message="End time cannot before the start time")
if not start_time >= datetime.utcnow()-timedelta(minutes=5):
abort(400, message="Start time cannot be in the past")
else:
if start_time < datetime.utcnow():
start_time = datetime.utcnow()
if args.start_price is None:
args.start_price = 0
if args.reserve is None:
args.reserve = 0
if args.reserve < 0:
abort(400, message="Reserve price must be positive")
if args.start_price < 0:
abort(400, message="Start price must be positive")
if args.start_price > args.reserve:
args.reserve = 0
auction = find_auction_type(args.type)()
auction.title = args.title
auction.description = args.description
auction.start_time = start_time
auction.end_time = end_time
auction.reserve = args.reserve
auction.start_price = args.start_price
auction.creator = g.user
crud.add(auction)
return marshal(auction, auction_fields), 201
class Auction(Resource):
""" Auction REST API Endpoint """
def get(self, auction_id):
""" Retrieve a specific auction """
session = db.session
auction = session.query(Auction_model).get(auction_id)
if auction.start_time > datetime.utcnow() and auction.creator != g.user:
abort(403, message="Not authorized to view this auction")
return marshal(auction, auction_fields), 200
#pylint: disable=R0915
@auther.login_required
def put(self, auction_id):
""" Update an auction """
session = db.session
auction = session.query(Auction_model).get(auction_id)
if auction.creator != g.user:
abort(403, message="Not authorized to update auction")
parser = reqparse.RequestParser()
parser.add_argument('title', type=str)
parser.add_argument('description', type=str)
parser.add_argument('reserve', type=Decimal)
parser.add_argument('start_time', type=int) # Must be a UNIX timestamp
parser.add_argument('end_time', type=int) # Must be a UNIX timestamp
parser.add_argument('start_price', type=Decimal)
args = parser.parse_args()
if not args.start_time:
start_time = auction.start_time
else:
start_time = datetime.utcfromtimestamp(args.start_time)
if not args.end_time:
end_time = auction.end_time
else:
end_time = datetime.utcfromtimestamp(args.end_time)
if args.title is None:
args.title = auction.title
if args.description is None:
args.description = auction.description
if args.reserve is None:
args.reserve = auction.reserve
if args.start_price is None:
args.start_price = auction.start_price
if not end_time > start_time:
abort(400, message="End time cannot before the start time")
if args.start_price is None:
args.start_price = 0
if args.reserve is None:
args.reserve = 0
if args.reserve < 0:
abort(400, message="Reserve price must be positive")
if args.start_price < 0:
abort(400, message="Start price must be positive")
# Auction has started
if datetime.utcnow() >= auction.start_time:
if args.reserve > auction.reserve:
abort(400, message="Reserve cannot be increased once the auction has started")
if args.start_price != auction.start_price:
abort(400, message="Starting price cannot be changed once the auction has started")
if start_time != auction.start_time:
abort(400, message="Start time cannot be changed once the auction has started")
if end_time != auction.end_time:
abort(400, message="End time cannot be changed once the auction has started")
else:
if not start_time >= datetime.utcnow()-timedelta(seconds=30):
abort(400, message="Start time cannot be in the past")
auction.title = args.title
auction.description = args.description
auction.start_time = start_time
auction.end_time = end_time
auction.reserve = args.reserve
auction.start_price = args.start_price
crud.save()
return marshal(auction, auction_fields), 200
class AuctionTypeList(Resource):
""" REST endpoint for auction types """
def get(self):
""" Get a list of all auction types """
types = get_all_auction_types()
ret_fields = {
'title': fields.String(attribute='__title__'),
'description': fields.String(attribute='__description__'),
'type': fields.String(attribute='__type__')
}
return marshal(types, ret_fields), 200
| {
"repo_name": "kevinoconnor7/penelophant",
"path": "penelophant/api/auction.py",
"copies": "1",
"size": "7284",
"license": "apache-2.0",
"hash": 8037048691055955000,
"line_mean": 29.8644067797,
"line_max": 91,
"alpha_frac": 0.6722954421,
"autogenerated": false,
"ratio": 3.6166832174776564,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4788978659577656,
"avg_score": null,
"num_lines": null
} |
"""Audience field models"""
from datetime import datetime
from emma import exceptions as ex
from emma.model import BaseApiModel, str_fields_to_datetime
class Field(BaseApiModel):
"""
Encapsulates operations for a :class:`Field`
:param account: The Account which owns this Field
:type account: :class:`Account`
:param raw: The raw values of this :class:`Field`
:type raw: :class:`dict`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> fld = acct.fields[123]
>>> fld
<Field>
"""
def __init__(self, account, raw=None):
self.account = account
super(Field, self).__init__(raw)
def _parse_raw(self, raw):
raw.update(str_fields_to_datetime(['deleted_at'], raw))
return raw
def is_deleted(self):
"""
Whether a field has been deleted
:rtype: :class:`bool`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> fld = acct.fields[123]
>>> fld.is_deleted()
False
>>> fld.delete()
>>> fld.is_deleted()
True
"""
return 'deleted_at' in self._dict and bool(self._dict['deleted_at'])
def delete(self):
"""
Delete this field
:rtype: :class:`None`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> fld = acct.fields[123]
>>> fld.delete()
None
"""
if not 'field_id' in self._dict:
raise ex.NoFieldIdError()
if self.is_deleted():
return None
path = "/fields/%s" % self._dict['field_id']
if self.account.adapter.delete(path):
self._dict['deleted_at'] = datetime.now()
if self._dict['field_id'] in self.account.fields:
del(self.account.fields._dict[self._dict['field_id']])
def extract(self):
"""
Extracts data from the model in a format suitable for using with the API
:rtype: :class:`dict`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> fld = acct.fields[123]
>>> fld.extract()
{'field_id':123, 'shortcut_name':u"test_field", ...}
"""
keys = ['display_name', 'field_type', 'widget_type', 'column_order',
'shortcut_name', 'options']
return dict(x for x in self._dict.items() if x[0] in keys)
def _add(self):
"""Add a single field"""
path = '/fields'
data = self.extract()
self._dict['field_id'] = self.account.adapter.post(path, data)
self.account.fields._dict[self._dict['field_id']] = self
def _update(self):
"""Update a single field"""
path = '/fields/%s' % self._dict['field_id']
data = self.extract()
self.account.adapter.put(path, data)
def save(self):
"""
Add or update this :class:`Field`
:rtype: :class:`None`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> fld = acct.members[123]
>>> fld['shortcut_name'] = u"new_name"
>>> fld.save()
None
>>> fld = acct.members.factory({'shortcut_name': u"test_field"})
>>> fld.save()
None
"""
if 'field_id' not in self._dict:
return self._add()
else:
return self._update()
def clear_member_information(self):
"""
Clear all member information for this field
:rtype: :class:`None`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> fld = acct.members[123]
>>> fld.clear_member_information()
None
"""
if 'field_id' not in self._dict:
raise ex.NoFieldIdError()
path = '/fields/%s/clear' % self._dict['field_id']
if not self.account.adapter.post(path):
raise ex.ClearMemberFieldInformationError()
| {
"repo_name": "myemma/EmmaPython",
"path": "emma/model/field.py",
"copies": "1",
"size": "4482",
"license": "mit",
"hash": 8242425333227555000,
"line_mean": 28.4868421053,
"line_max": 80,
"alpha_frac": 0.5327978581,
"autogenerated": false,
"ratio": 3.6677577741407528,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47005556322407527,
"avg_score": null,
"num_lines": null
} |
"""Audience group models"""
from datetime import datetime
from emma import exceptions as ex
from emma.model import BaseApiModel, str_fields_to_datetime
import emma.model.member
class Group(BaseApiModel):
"""
Encapsulates operations for a :class:`Group`
:param account: The Account which owns this Group
:type account: :class:`Account`
:param raw: The raw values of this :class:`Group`
:type raw: :class:`dict`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> grp = acct.groups[123]
>>> grp
<Group>
"""
def __init__(self, account, raw=None):
self.account = account
self.members = GroupMemberCollection(self)
super(Group, self).__init__(raw)
def _parse_raw(self, raw):
raw.update(str_fields_to_datetime(['deleted_at'], raw))
return raw
def is_deleted(self):
"""
Whether a group has been deleted
:rtype: :class:`bool`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> grp = acct.groups[123]
>>> grp.is_deleted()
False
>>> grp.delete()
>>> grp.is_deleted()
True
"""
return 'deleted_at' in self._dict and bool(self._dict['deleted_at'])
def delete(self):
"""
Delete this group
:rtype: :class:`None`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> grp = acct.groups[123]
>>> grp.delete()
None
"""
if not 'member_group_id' in self._dict:
raise ex.NoGroupIdError()
if self.is_deleted():
return None
path = "/groups/%s" % self._dict['member_group_id']
if self.account.adapter.delete(path):
self._dict['deleted_at'] = datetime.now()
if self._dict['member_group_id'] in self.account.groups:
del(self.account.groups._dict[self._dict['member_group_id']])
def extract(self):
"""
Extracts data from the model in a format suitable for using with the API
:rtype: :class:`dict`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> grp = acct.groups[123]
>>> grp.extract()
{'member_group_id':123, 'group_name':u"My Group", ...}
"""
if 'group_name' not in self._dict:
raise ex.NoGroupNameError()
keys = ['group_name']
return dict(x for x in self._dict.items() if x[0] in keys)
def _add(self):
"""Add a single group"""
self.account.groups.save([self])
def _update(self):
"""Update a single group"""
path = "/groups/%s" % self._dict['member_group_id']
data = self.extract()
if not self.account.adapter.put(path, data):
raise ex.GroupUpdateError()
def save(self):
"""
Add or update this :class:`Group`
:rtype: :class:`None`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> grp = acct.groups[123]
>>> grp['group_name'] = u"Renamed Group"
>>> grp.save()
None
>>> grp = acct.groups.factory({'group_name': u"New Group"})
>>> grp.save()
None
"""
if 'member_group_id' not in self._dict:
return self._add()
else:
return self._update()
class GroupMemberCollection(BaseApiModel):
"""
Encapsulates operations for the set of :class:`Member` objects of a
:class:`Group`
:param group: The group which owns this collection
:type group: :class:`Group`
"""
def __init__(self, group):
self.group = group
super(GroupMemberCollection, self).__init__()
def __delitem__(self, key):
self.remove_by_id([key])
def fetch_all(self, deleted=False):
"""
Lazy-loads the set of :class:`Member` objects
:param deleted: Include deleted members
:type deleted: :class:`bool`
:rtype: :class:`dict` of :class:`Member` objects
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> grp = acct.groups[1024]
>>> grp.members.fetch_all()
{200: <Member>, 201: <Member>, ...}
"""
if not 'member_group_id' in self.group:
raise ex.NoGroupIdError()
member = emma.model.member
path = '/groups/%s/members' % self.group['member_group_id']
params = {'deleted': True} if deleted else {}
if not self._dict:
self._dict = dict(
(x['member_id'], member.Member(self.group.account, x))
for x in self.group.account.adapter.paginated_get(path, params))
return self._dict
def add_by_id(self, member_ids=None):
"""
Makes given members part of this group
:param member_ids: Set of identifiers to add
:type member_ids: :class:`list` of :class:`int`
:rtype: :class:`None`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> grp = acct.groups[1024]
>>> grp.members.add_by_id([200, 201])
None
"""
if 'member_group_id' not in self.group:
raise ex.NoGroupIdError()
if not member_ids:
return None
path = '/groups/%s/members' % self.group['member_group_id']
data = {'member_ids': member_ids}
self.group.account.adapter.put(path, data)
def add_by_status(self, statuses=None):
"""
Makes all members of a particular status part of this group
:param statuses: Set of statuses to add
:type statuses: :class:`list` of :class:`str`
:rtype: :class:`None`
Usage::
>>> from emma.model.account import Account
>>> from emma.enumerations import MemberStatus
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> grp = acct.groups[1024]
>>> grp.members.add_by_status([MemberStatus.Active])
None
"""
if 'member_group_id' not in self.group:
raise ex.NoGroupIdError()
if not statuses:
return None
path = '/members/%s/copy' % self.group['member_group_id']
data = {'member_status_id': statuses}
if not self.group.account.adapter.put(path, data):
raise ex.MemberCopyToGroupError()
def add_by_group(self, group, statuses):
"""
Makes all members of a particular group part of this group
:param group: The group to copy members from
:type group: :class:`Group`
:param statuses: Set of statuses to add
:type statuses: :class:`list` of :class:`str`
:rtype: :class:`None`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> grp = acct.groups[1024]
>>> grp.members.add_by_group(acct.groups[199])
None
>>> from emma.enumerations import MemberStatus
>>> grp.members.add_by_group(acct.groups[200], [MemberStatus.Active])
None
"""
if 'member_group_id' not in self.group:
raise ex.NoGroupIdError()
if 'member_group_id' not in group:
raise ex.NoGroupIdError()
path = '/groups/%s/%s/members/copy' % (
group['member_group_id'],
self.group['member_group_id'])
data = {'member_status_id': statuses} if statuses else {}
if not self.group.account.adapter.put(path, data):
raise ex.MemberCopyToGroupError()
def remove_by_id(self, member_ids=None):
"""
Remove given members from this group
:param member_ids: Set of identifiers to remove
:type member_ids: :class:`list` of :class:`int`
:rtype: :class:`None`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> grp = acct.groups[1024]
>>> grp.members.remove_by_id([200, 201])
None
"""
if 'member_group_id' not in self.group:
raise ex.NoGroupIdError()
if not member_ids:
return None
path = '/groups/%s/members/remove' % self.group['member_group_id']
data = {'member_ids': member_ids}
removed = self.group.account.adapter.put(path, data)
self._dict = dict(x for x in self._dict.items() if x[0] not in removed)
def remove_all(self, status=None):
"""
Remove all members from this group
:param status: A status to remove
:type status: :class:`str`
:rtype: :class:`None`
Usage::
>>> from emma.model.account import Account
>>> from emma.enumerations import MemberStatus
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> grp = acct.groups[1024]
>>> grp.members.remove_all(MemberStatus.Active)
None
>>> grp.members.remove_all()
None
"""
if 'member_group_id' not in self.group:
raise ex.NoGroupIdError()
path = '/groups/%s/members' % self.group['member_group_id']
params = {'member_status_id': status} if status else {}
if self.group.account.adapter.delete(path, params):
self._dict = {}
| {
"repo_name": "myemma/EmmaPython",
"path": "emma/model/group.py",
"copies": "1",
"size": "10120",
"license": "mit",
"hash": -5085210349442561000,
"line_mean": 31.2292993631,
"line_max": 84,
"alpha_frac": 0.5502964427,
"autogenerated": false,
"ratio": 3.706959706959707,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9755827218459151,
"avg_score": 0.00028578624011126665,
"num_lines": 314
} |
"""Audience import models"""
from emma import exceptions as ex
from emma.model import BaseApiModel, str_fields_to_datetime
from emma.model.member import Member
class MemberImport(BaseApiModel):
"""
Encapsulates operations for a :class:`MemberImport`
:param account: The Account which owns this import
:type account: :class:`Account`
:param raw: The raw values of this :class:`MemberImport`
:type raw: :class:`dict`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> mprt = acct.imports[123]
>>> mprt
<MemberImport>
"""
def __init__(self, account, raw=None):
self.account = account
super(MemberImport, self).__init__(raw)
self.members = ImportMemberCollection(self)
def _parse_raw(self, raw):
raw.update(
str_fields_to_datetime(['import_started', 'import_finished'], raw))
return raw
class ImportMemberCollection(BaseApiModel):
"""
Encapsulates operations for the set of :class:`Member` objects of a
:class:`MemberImport`
:param member_import: The Import which owns this collection
:type member_import: :class:`MemberImport`
"""
def __init__(self, member_import):
self.member_import = member_import
super(ImportMemberCollection, self).__init__()
def fetch_all(self):
"""
Lazy-loads the full set of :class:`Member` objects
:rtype: :class:`dict` of :class:`Member` objects
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> imprt = acct.imports[1024]
>>> imprt.members.fetch_all()
{200: <Member>, 201: <Member>, ...}
"""
if not 'import_id' in self.member_import:
raise ex.NoImportIdError()
path = '/members/imports/%s/members' % self.member_import['import_id']
if not self._dict:
self._dict = dict(
(x['member_id'], Member(self.member_import.account, x))
for x in self.member_import.account.adapter.paginated_get(path))
return self._dict
| {
"repo_name": "myemma/EmmaPython",
"path": "emma/model/member_import.py",
"copies": "1",
"size": "2260",
"license": "mit",
"hash": 4459941538730640000,
"line_mean": 31.2857142857,
"line_max": 84,
"alpha_frac": 0.6097345133,
"autogenerated": false,
"ratio": 3.7293729372937294,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4839107450593729,
"avg_score": null,
"num_lines": null
} |
"""Audience mailing models"""
from datetime import datetime
from emma import exceptions as ex
from emma.enumerations import MailingStatus
from emma.model import BaseApiModel, str_fields_to_datetime
import emma.model.group
import emma.model.member
import emma.model.search
import emma.model.message
class Mailing(BaseApiModel):
"""
Encapsulates operations for a :class:`Mailing`
:param account: The Account which owns this Mailing
:type account: :class:`Account`
:param raw: The raw values of this :class:`Mailing`
:type raw: :class:`dict`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> mlng = acct.mailings[123]
>>> mlng
<Mailing>
"""
def __init__(self, account, raw=None):
self.account = account
super(Mailing, self).__init__(raw)
self.groups = MailingGroupCollection(self)
self.members = MailingMemberCollection(self)
self.messages = MailingMessageCollection(self)
self.searches = MailingSearchCollection(self)
def _parse_raw(self, raw):
raw.update(str_fields_to_datetime(
['clicked', 'opened', 'delivery_ts', 'forwarded', 'shared', 'sent',
'send_finished', 'send_at', 'archived_ts', 'send_started',
'started_or_finished'],
raw))
return raw
def update_status(self, status):
"""
Update status of a current mailing.
:param status: The new mailing status
:type status: :class:`str`
:rtype: :class:`None`
Usage::
>>> from emma.model.account import Account
>>> from emma.enumerations import MailingStatus
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> mlng = acct.mailings[123]
>>> mlng.update_statues(MailingStatus.Canceled)
None
>>> mlng.update_statues(MailingStatus.Ready)
<MailingStatusUpdateError>
"""
if 'mailing_id' not in self._dict:
raise ex.NoMailingIdError()
path = "/mailings/%s" % self._dict['mailing_id']
data = {'status': {
MailingStatus.Canceled: "canceled",
MailingStatus.Paused: "paused",
MailingStatus.Ready: "ready"
}[status]}
self._dict['status'] = self.account.adapter.put(path, data)
def is_archived(self):
"""
Whether a mailing has been archived
:rtype: :class:`bool`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> mlng = acct.mailings[123]
>>> mlng.is_archived()
False
>>> mlng.archive()
>>> mlng.is_archived()
True
"""
return 'archived_ts' in self._dict and bool(self._dict['archived_ts'])
def archive(self):
"""
Sets archived timestamp for a mailing.
:rtype: :class:`None`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> mlng = acct.mailings[123]
>>> mlng.archive()
None
"""
if 'mailing_id' not in self._dict:
raise ex.NoMailingIdError()
if self.is_archived():
return None
path = "/mailings/%s" % self._dict['mailing_id']
if not self.account.adapter.delete(path):
raise ex.MailingArchiveError()
self._dict['archived_ts'] = datetime.now()
def cancel(self):
"""
Cancels a mailing that has a current status of pending or paused.
:rtype: :class:`None`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> mlng = acct.mailings[123]
>>> mlng.cancel()
None
"""
if 'mailing_id' not in self._dict:
raise ex.NoMailingIdError()
path = "/mailings/cancel/%s" % self._dict['mailing_id']
if not self.account.adapter.delete(path):
raise ex.MailingCancelError()
def send_additional(self, recipient_emails=None, sender=None,
heads_up_emails=None, recipient_groups=None,
recipient_searches=None):
"""
Send a prior mailing to additional recipients. A new mailing will be
created that inherits its content from the original.
:param recipient_emails: The additional emails to which this mailing shall be sent
:type recipient_emails: :class:`list` of :class:`str`
:rtype: :class:`int`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> mlng = acct.mailings[123]
>>> mlng.send_additional(["test2@example.com"])
124
"""
if 'mailing_id' not in self._dict:
raise ex.NoMailingIdError()
path = "/mailings/%s" % self._dict['mailing_id']
data = dict(x for x in {
'recipient_emails': recipient_emails,
'sender': sender,
'heads_up_emails': heads_up_emails,
'recipient_groups': recipient_groups,
'recipient_searches': recipient_searches
}.items() if x[1] is not None)
if not data:
return None
result = self.account.adapter.post(path, data)
if result:
return result['mailing_id']
def get_heads_up_emails(self):
"""
Get heads up email address(es) related to a mailing.
:rtype: :class:`list` of :class:`str`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> mlng = acct.mailings[123]
>>> mlng.get_heads_up_emails()
["headsup1@example.com", "headsup2@example.com"]
"""
if 'mailing_id' not in self._dict:
raise ex.NoMailingIdError()
path = "/mailings/%s/headsup" % self._dict['mailing_id']
return self.account.adapter.get(path)
def force_split_test_winner(self, winner_id):
"""
Declare the winner of a split test manually. In the event that the test
duration has not elapsed, the current stats for each test will be frozen
and the content defined in the user declared winner will sent to the
remaining members for the mailing. Please note, any messages that are
pending for each of the test variations will receive the content
assigned to them when the test was initially constructed.
:param winner_id: The identifier for the winner
:type winner_id: :class:`int`
:rtype: :class:`None`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> mlng = acct.mailings[123]
>>> mlng.winner(12)
None
"""
if 'mailing_id' not in self._dict:
raise ex.NoMailingIdError()
path = "/mailings/%s/winner/%s" % (self._dict['mailing_id'], winner_id)
self.account.adapter.post(path)
class MailingGroupCollection(BaseApiModel):
"""
Encapsulates operations for the set of :class:`Group` objects of a
:class:`Mailing`
:param mailing: The Mailing which owns this collection
:type mailing: :class:`Mailing`
"""
def __init__(self, mailing):
self.mailing = mailing
super(MailingGroupCollection, self).__init__()
def fetch_all(self):
"""
Lazy-loads the full set of :class:`Group` objects
:rtype: :class:`dict` of :class:`Group` objects
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> mlng = acct.mailings[123]
>>> mlng.groups.fetch_all()
{123: <Group>, 321: <Group>, ...}
"""
if 'mailing_id' not in self.mailing:
raise ex.NoMailingIdError()
group = emma.model.group
path = '/mailings/%s/groups' % self.mailing['mailing_id']
if not self._dict:
self._dict = dict(
(x['group_id'], group.Group(self.mailing.account, x))
for x in self.mailing.account.adapter.paginated_get(path))
return self._dict
class MailingMemberCollection(BaseApiModel):
"""
Encapsulates operations for the set of :class:`Member` objects of a
:class:`Mailing`
:param mailing: The Mailing which owns this collection
:type mailing: :class:`Mailing`
"""
def __init__(self, mailing):
self.mailing = mailing
super(MailingMemberCollection, self).__init__()
def fetch_all(self):
"""
Lazy-loads the full set of :class:`Member` objects
:rtype: :class:`dict` of :class:`Member` objects
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> mlng = acct.mailings[123]
>>> mlng.members.fetch_all()
{123: <Member>, 321: <Member>, ...}
"""
if 'mailing_id' not in self.mailing:
raise ex.NoMailingIdError()
member = emma.model.member
path = '/mailings/%s/members' % self.mailing['mailing_id']
if not self._dict:
self._dict = dict(
(x['member_id'], member.Member(self.mailing.account, x))
for x in self.mailing.account.adapter.paginated_get(path))
return self._dict
class MailingSearchCollection(BaseApiModel):
"""
Encapsulates operations for the set of :class:`Search` objects of a
:class:`Mailing`
:param mailing: The Mailing which owns this collection
:type mailing: :class:`Mailing`
"""
def __init__(self, mailing):
self.mailing = mailing
super(MailingSearchCollection, self).__init__()
def fetch_all(self):
"""
Lazy-loads the full set of :class:`Search` objects
:rtype: :class:`dict` of :class:`Search` objects
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> mlng = acct.mailings[123]
>>> mlng.searches.fetch_all()
{123: <Search>, 321: <Search>, ...}
"""
if 'mailing_id' not in self.mailing:
raise ex.NoMailingIdError()
search = emma.model.search
path = '/mailings/%s/searches' % self.mailing['mailing_id']
if not self._dict:
self._dict = dict(
(x['search_id'], search.Search(self.mailing.account, x))
for x in self.mailing.account.adapter.paginated_get(path))
return self._dict
class MailingMessageCollection(BaseApiModel):
"""
Encapsulates operations for the set of :class:`Message` objects of a
:class:`Mailing`
:param mailing: The Mailing which owns this collection
:type mailing: :class:`Mailing`
"""
def __init__(self, mailing):
self.mailing = mailing
super(MailingMessageCollection, self).__init__()
def __getitem__(self, key):
item = self.find_one_by_member_id(key)
if not item:
raise KeyError(key)
return item
def find_one_by_member_id(self, member_id, message_type=None):
"""
Lazy-loads a single :class:`Message` by Member ID
:param member_id: The member identifier
:type member_id: :class:`int`
:param message_type: The portion of the message to retrieve
:type message_type: :class:`str`
:rtype: :class:`dict` or :class:`None`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> mlng = acct.mailings[123]
>>> mlng.messages(12)
{'plaintext': ..., 'subject': ..., 'html_body': ...}
>>> from emma.enumerations import PersonalizedMessageType as pmt
>>> mlng.messages(12, type=pmt.Html)
{'html_body': ...}
"""
if 'mailing_id' not in self.mailing:
raise ex.NoMailingIdError()
member_id = int(member_id)
path = "/mailings/%s/messages/%s" % (
self.mailing['mailing_id'], member_id)
params = {'type': message_type} if message_type else {}
if member_id not in self._dict:
message = emma.model.message
raw = self.mailing.account.adapter.get(path, params)
if raw:
self._dict[member_id] = message.Message(
self.mailing, member_id, raw)
return (member_id in self._dict) and self._dict[member_id] or None
| {
"repo_name": "myemma/EmmaPython",
"path": "emma/model/mailing.py",
"copies": "1",
"size": "13207",
"license": "mit",
"hash": 7528211253292622000,
"line_mean": 32.2670025189,
"line_max": 90,
"alpha_frac": 0.5724994321,
"autogenerated": false,
"ratio": 3.736067892503536,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4808567324603536,
"avg_score": null,
"num_lines": null
} |
"""Audience member models"""
from datetime import datetime
from emma import exceptions as ex
from emma.enumerations import MemberStatus
from emma.model import BaseApiModel, str_fields_to_datetime
import emma.model.group
import emma.model.mailing
class Member(BaseApiModel):
"""
Encapsulates operations for a :class:`Member`
:param account: The Account which owns this Member
:type account: :class:`Account`
:param raw: The raw values of this :class:`Member`
:type raw: :class:`dict`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> mbr = acct.members[123]
>>> mbr
<Member>
>>> mbr.groups
<MemberGroupCollection>
>>> mbr.mailings
<MemberMailingCollection>
"""
def __init__(self, account, raw=None):
self.account = account
self.groups = MemberGroupCollection(self)
self.mailings = MemberMailingCollection(self)
super(Member, self).__init__(raw)
def _parse_raw(self, raw):
if 'fields' in raw:
raw.update(raw['fields'])
del(raw['fields'])
raw.update(str_fields_to_datetime(
['last_modified_at', 'member_since', 'deleted_at'],
raw))
return raw
def opt_out(self):
"""
Opt-out this :class:`Member` from future mailings on this
:class:`Account`
:rtype: :class:`None`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> mbr = acct.members[123]
>>> mbr.opt_out()
None
"""
if 'email' not in self._dict:
raise ex.NoMemberEmailError()
path = '/members/email/optout/%s' % self._dict['email']
if self.account.adapter.put(path):
self._dict['member_status_id'] = MemberStatus.OptOut
def get_opt_out_detail(self):
"""
Get details about this :class:`Member`'s opt-out history
:rtype: :class:`list`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> mbr = acct.members[123]
>>> mbr.get_opt_out_detail()
[...]
"""
if 'member_id' not in self._dict:
raise ex.NoMemberIdError()
if self._dict['member_status_id'] != MemberStatus.OptOut:
return []
path = '/members/%s/optout' % self._dict['member_id']
return self.account.adapter.get(path)
def has_opted_out(self):
"""
Check if this :class:`Member` has opted-out
:rtype: :class:`bool`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> mbr = acct.members[123]
>>> mbr.has_opted_out()
False
>>> mbr.opt_out()
>>> mbr.has_opted_out()
True
"""
if 'member_status_id' not in self._dict:
raise ex.NoMemberStatusError()
return self._dict['member_status_id'] == MemberStatus.OptOut
def extract(self):
"""
Extracts data from the model in a format suitable for using with the API
:rtype: :class:`dict`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> mbr = acct.members[123]
>>> mbr.extract()
{'member_id':123, 'email':u"test@example.org", 'fields':{...}}
"""
if 'email' not in self._dict:
raise ex.NoMemberEmailError
extracted = dict(x for x in self._dict.items()
if x[0] in ['member_id', 'email'])
fields = dict(x for x in self._dict.items()
if x[0] in self.account.fields.export_shortcuts())
if fields:
extracted['fields'] = fields
return extracted
def _add(self, signup_form_id, group_ids):
"""Add a single member"""
path = '/members/add'
data = self.extract()
if group_ids:
data['group_ids'] = group_ids
if signup_form_id:
data['signup_form_id'] = signup_form_id
outcome = self.account.adapter.post(path, data)
self['member_status_id'] = outcome['status']
if 'member_id' in outcome:
self['member_id'] = outcome['member_id']
def _update(self):
"""Update a single member"""
path = "/members/%s" % self._dict['member_id']
data = self.extract()
if self._dict['member_status_id'] in (
MemberStatus.Active, MemberStatus.Error, MemberStatus.OptOut):
data['status_to'] = self._dict['member_status_id']
if not self.account.adapter.put(path, data):
raise ex.MemberUpdateError()
def save(self, signup_form_id=None, group_ids=None):
"""
Add or update this :class:`Member`
:rtype: :class:`None`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> mbr = acct.members[123]
>>> mbr['last_name'] = u"New-Name"
>>> mbr.save()
None
>>> mbr = acct.members.factory({'email': u"new@example.com"})
>>> mbr.save()
None
"""
if 'member_id' not in self._dict:
return self._add(signup_form_id, group_ids)
else:
return self._update()
def is_deleted(self):
"""
Whether a member has been deleted
:rtype: :class:`bool`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> mbr = acct.members[123]
>>> mbr.is_deleted()
False
>>> mbr.delete()
>>> mbr.is_deleted()
True
"""
return 'deleted_at' in self._dict and bool(self._dict['deleted_at'])
def delete(self):
"""
Delete this member
:rtype: :class:`None`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> mbr = acct.members[123]
>>> mbr.delete()
None
"""
if not 'member_id' in self._dict:
raise ex.NoMemberIdError()
if self.is_deleted():
return None
path = "/members/%s" % self._dict['member_id']
if self.account.adapter.delete(path):
self._dict['deleted_at'] = datetime.now()
def add_groups(self, group_ids=None):
"""
Convenience method for adding groups to a Member
:param group_ids: Set of Group identifiers to add
:type group_ids: :class:`list` of :class:`int`
:rtype: :class:`None`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> mbr = acct.members[123]
>>> mbr.add_groups([1024, 1025])
None
"""
return self.groups.save(
[self.groups.factory({'member_group_id': x}) for x in group_ids])
def drop_groups(self, group_ids=None):
"""
Convenience method for dropping groups from a Member
:param group_ids: Set of Group identifiers to drop
:type group_ids: :class:`list` of :class:`int`
:rtype: :class:`None`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> mbr = acct.members[123]
>>> mbr.drop_groups([1024, 1025]) # Drop a specific list of groups
None
>>> mbr.drop_groups() # Drop all groups
None
"""
return self.groups.delete(group_ids)
class MemberMailingCollection(BaseApiModel):
"""
Encapsulates operations for the set of :class:`Mailing` objects of a
:class:`Member`
:param member: The Member which owns this collection
:type member: :class:`Member`
"""
def __init__(self, member):
self.member = member
super(MemberMailingCollection, self).__init__()
def fetch_all(self):
"""
Lazy-loads the full set of :class:`Mailing` objects
:rtype: :class:`dict` of :class:`Mailing` objects
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> mbr = acct.members[123]
>>> mbr.mailings.fetch_all()
{123: <Mailing>, 321: <Mailing>, ...}
"""
if 'member_id' not in self.member:
raise ex.NoMemberIdError()
mailing = emma.model.mailing
path = '/members/%s/mailings' % self.member['member_id']
if not self._dict:
self._dict = dict(
(x['mailing_id'], mailing.Mailing(self.member.account, x))
for x in self.member.account.adapter.paginated_get(path))
return self._dict
class MemberGroupCollection(BaseApiModel):
"""
Encapsulates operations for the set of :class:`Group` objects of a
:class:`Member`
:param member: The Member which owns this collection
:type member: :class:`Member`
"""
def __init__(self, member):
self.member = member
super(MemberGroupCollection, self).__init__()
def __delitem__(self, key):
self._delete_by_list([key])
def factory(self, raw=None):
"""
Creates a :class:`Group`
:rtype: :class:`Group`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> mbr = acct.members[123]
>>> mbr.groups.factory()
<Group{}>
>>> mbr.groups.factory({'member_group_id':1024})
<Group{'member_group_id':1024}>
"""
return emma.model.group.Group(self.member.account, raw)
def fetch_all(self):
"""
Lazy-loads the full set of :class:`Group` objects
:rtype: :class:`dict` of :class:`Group` objects
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> mbr = acct.members[123]
>>> mbr.groups.fetch_all()
{123: <Group>, 321: <Group>, ...}
"""
if 'member_id' not in self.member:
raise ex.NoMemberIdError()
group = emma.model.group
path = '/members/%s/groups' % self.member['member_id']
if not self._dict:
self._dict = dict(
(x['member_group_id'], group.Group(self.member.account, x))
for x in self.member.account.adapter.paginated_get(path))
return self._dict
def save(self, groups=None):
"""
:param groups: List of :class:`Group` objects to save
:type groups: :class:`list` of :class:`Group` objects
:rtype: :class:`None`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> grps = acct.members[123].groups
>>> grps.save([
... grps.factory({'member_group_id': 300}),
... grps.factory({'member_group_id': 301}),
... grps.factory({'member_group_id': 302})
... ])
None
"""
if 'member_id' not in self.member:
raise ex.NoMemberIdError()
if not groups:
return None
path = '/members/%s/groups' % self.member['member_id']
data = {'group_ids': [x['member_group_id'] for x in groups]}
if self.member.account.adapter.put(path, data):
self.clear()
def _delete_by_list(self, group_ids):
"""Drop groups by list of identifiers"""
path = '/members/%s/groups/remove' % self.member['member_id']
data = {'group_ids': group_ids}
if self.member.account.adapter.put(path, data):
self._dict = dict(x for x in self._dict.items()
if x[0] not in group_ids)
def _delete_all_groups(self):
"""Drop all groups"""
path = '/members/%s/groups' % self.member['member_id']
if self.member.account.adapter.delete(path, {}):
self._dict = {}
def delete(self, group_ids=None):
"""
:param group_ids: List of group identifiers to delete
:type group_ids: :class:`list` of :class:`int`
:rtype: :class:`None`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> grps = acct.members[123].groups
>>> grps.delete([300, 301]) # Delete a specific list of groups
None
>>> grps.delete() # Delete all groups
None
"""
if 'member_id' not in self.member:
raise ex.NoMemberIdError()
return (self._delete_by_list(group_ids)
if group_ids
else self._delete_all_groups())
| {
"repo_name": "myemma/EmmaPython",
"path": "emma/model/member.py",
"copies": "1",
"size": "13611",
"license": "mit",
"hash": 1704164264946743000,
"line_mean": 30.9507042254,
"line_max": 80,
"alpha_frac": 0.5400044082,
"autogenerated": false,
"ratio": 3.6393048128342245,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4679309221034224,
"avg_score": null,
"num_lines": null
} |
# audience.py
import socket
import json
DEFAULT_SOCKET_PATH="/var/run/orchestra/conductor.sock"
class ServerError(Exception):
pass
def submit_job(score, scope, target, args=None, sockname=DEFAULT_SOCKET_PATH):
reqObj = {
'op': 'queue',
'score': score,
'scope': scope,
'players': None,
'params': {}
}
reqObj['players'] = list(target)
if args is not None:
reqObj['params'] = args
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(sockname)
f = sock.makefile()
try:
f.write(json.dumps(reqObj))
f.flush()
resp = json.load(f)
if resp[0] == 'OK':
return resp[1]
else:
raise ServerError(resp[1])
finally:
sock.close()
def get_status(jobid, sockname=DEFAULT_SOCKET_PATH):
reqObj = {
'op': 'status',
'id': jobid
}
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(sockname)
f = sock.makefile()
try:
f.write(json.dumps(reqObj))
f.flush()
resp = json.load(f)
if resp[0] == 'OK':
return resp[1]
else:
raise ServerError(resp[1])
finally:
sock.close()
| {
"repo_name": "anchor/Orchestra",
"path": "python/audience.py",
"copies": "1",
"size": "1264",
"license": "bsd-3-clause",
"hash": 81808291112471220,
"line_mean": 20.7931034483,
"line_max": 78,
"alpha_frac": 0.5490506329,
"autogenerated": false,
"ratio": 3.4162162162162164,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4465266849116216,
"avg_score": null,
"num_lines": null
} |
"""Audience search models"""
from datetime import datetime
from emma import exceptions as ex
from emma.model import BaseApiModel, str_fields_to_datetime
import emma.model.member
class Search(BaseApiModel):
"""
Encapsulates operations for a :class:`Search`
:param account: The Account which owns this Search
:type account: :class:`Account`
:param raw: The raw values of this :class:`Search`
:type raw: :class:`dict`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> srch = acct.searches[123]
>>> srch
<Search>
"""
def __init__(self, account, raw=None):
self.account = account
super(Search, self).__init__(raw)
self.members = SearchMemberCollection(self)
def _parse_raw(self, raw):
raw.update(str_fields_to_datetime(['deleted_at', 'last_run_at'], raw))
return raw
def is_deleted(self):
"""
Whether a search has been deleted
:rtype: :class:`bool`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> srch = acct.searches[123]
>>> srch.is_deleted()
False
>>> srch.delete()
>>> srch.is_deleted()
True
"""
return 'deleted_at' in self._dict and bool(self._dict['deleted_at'])
def delete(self):
"""
Delete this search
:rtype: :class:`None`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> srch = acct.searches[123]
>>> srch.delete()
None
"""
if not 'search_id' in self._dict:
raise ex.NoSearchIdError()
if self.is_deleted():
return None
path = "/searches/%s" % self._dict['search_id']
if self.account.adapter.delete(path):
self._dict['deleted_at'] = datetime.now()
if self._dict['search_id'] in self.account.searches:
del(self.account.searches._dict[self._dict['search_id']])
def extract(self):
"""
Extracts data from the model in a format suitable for using with the API
:rtype: :class:`dict`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> srch = acct.searches[123]
>>> srch.extract()
{'name':..., 'criteria':...}
"""
keys = ['name', 'criteria']
return dict(x for x in self._dict.items() if x[0] in keys)
def _add(self):
"""Add a single search"""
path = '/searches'
data = self.extract()
self._dict['search_id'] = self.account.adapter.post(path, data)
self.account.searches._dict[self._dict['search_id']] = self
def _update(self):
"""Update a single search"""
path = '/searches/%s' % self._dict['search_id']
data = self.extract()
self.account.adapter.put(path, data)
def save(self):
"""
Add or update this :class:`Search`
:rtype: :class:`None`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> srch = acct.searches[123]
>>> srch['name'] = u"Renamed Search"
>>> srch.save()
123
>>> srch = acct.searches.factory(
... {
... 'name': u"Test Search",
... 'criteria': ["group", "eq", "Test Group"]
... }
... )
>>> srch.save()
124
"""
if 'search_id' not in self._dict:
return self._add()
else:
return self._update()
class SearchMemberCollection(BaseApiModel):
"""
Encapsulates operations for the set of :class:`Member` objects of a
:class:`Search`
:param search: The search which owns this collection
:type search: :class:`Search`
"""
def __init__(self, search):
self.search = search
super(SearchMemberCollection, self).__init__()
def fetch_all(self):
"""
Lazy-loads the full set of :class:`Member` objects
:rtype: :class:`dict` of :class:`Member` objects
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> srch = acct.searches[1024]
>>> srch.members.fetch_all()
{200: <Member>, 201: <Member>, ...}
"""
if not 'search_id' in self.search:
raise ex.NoSearchIdError()
path = '/searches/%s/members' % self.search['search_id']
if not self._dict:
member = emma.model.member
self._dict = dict(
(x['member_id'], member.Member(self.search.account, x))
for x in self.search.account.adapter.paginated_get(path))
return self._dict
| {
"repo_name": "myemma/EmmaPython",
"path": "emma/model/search.py",
"copies": "1",
"size": "5247",
"license": "mit",
"hash": 4183900311628053000,
"line_mean": 29.1551724138,
"line_max": 80,
"alpha_frac": 0.5334476844,
"autogenerated": false,
"ratio": 3.6872803935347855,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9718652459268238,
"avg_score": 0.00041512373330960156,
"num_lines": 174
} |
"""Audience trigger models"""
from datetime import datetime
from emma import exceptions as ex
from emma.model import BaseApiModel, str_fields_to_datetime
import emma.model.mailing
class Trigger(BaseApiModel):
"""
Encapsulates operations for a :class:`Trigger`
:param account: The Account which owns this Trigger
:type account: :class:`Account`
:param raw: The raw values of this :class:`Trigger`
:type raw: :class:`dict`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> acct.triggers[123]
<Trigger>
"""
def __init__(self, account, raw=None):
self.account = account
super(Trigger, self).__init__(raw)
self.mailings = TriggerMailingCollection(self)
def _parse_raw(self, raw):
raw.update(str_fields_to_datetime(['deleted_at', 'start_ts'], raw))
if 'parent_mailing' in raw:
mailing = emma.model.mailing
raw['parent_mailing'] = mailing.Mailing(
self.account,
raw['parent_mailing'])
return raw
def is_deleted(self):
"""
Whether a trigger has been deleted
:rtype: :class:`bool`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> trggr = acct.triggers[123]
>>> trggr.is_deleted()
False
>>> trggr.delete()
>>> trggr.is_deleted()
True
"""
return 'deleted_at' in self._dict and bool(self._dict['deleted_at'])
def delete(self):
"""
Delete this trigger
:rtype: :class:`None`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> trggr = acct.triggers[123]
>>> trggr.delete()
None
"""
if not 'trigger_id' in self._dict:
raise ex.NoTriggerIdError()
if self.is_deleted():
return None
path = "/triggers/%s" % self._dict['trigger_id']
if self.account.adapter.delete(path):
self._dict['deleted_at'] = datetime.now()
if self._dict['trigger_id'] in self.account.triggers:
del(self.account.triggers._dict[self._dict['trigger_id']])
def extract(self):
"""
Extracts data from the model in a format suitable for using with the API
:rtype: :class:`dict`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> trggr = acct.triggers[123]
>>> trggr.extract()
{'name':..., 'criteria':...}
"""
keys = ['name', 'event_type', 'parent_mailing_id', 'groups', 'links',
'signups', 'surveys', 'field_id', 'push_offset', 'is_disabled']
return dict(x for x in self._dict.items() if x[0] in keys)
def _add(self):
"""Add a single trigger"""
path = '/triggers'
data = self.extract()
self._dict['trigger_id'] = self.account.adapter.post(path, data)
self.account.triggers._dict[self._dict['trigger_id']] = self
def _update(self):
"""Update a single trigger"""
path = '/triggers/%s' % self._dict['trigger_id']
data = self.extract()
self.account.adapter.put(path, data)
def save(self):
"""
Add or update this :class:`Trigger`
:rtype: :class:`None`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> trggr = acct.triggers[123]
>>> trggr['name'] = u"Renamed Trigger"
>>> trggr.save()
123
>>> from emma.enumerations import EventType
>>> trggr = acct.triggers.factory(
... {
... 'parent_mailing_id': 200,
... 'object_ids': [10, 20, 30],
... 'name': 'Test Trigger',
... 'event_type': EventType.Click
... }
... )
>>> trggr.save()
124
"""
if 'trigger_id' not in self._dict:
return self._add()
else:
return self._update()
class TriggerMailingCollection(BaseApiModel):
"""
Encapsulates operations for the set of :class:`Mailing` objects of a
:class:`Trigger`
:param trigger: The trigger which owns this collection
:type trigger: :class:`Trigger`
"""
def __init__(self, trigger):
self.trigger = trigger
super(TriggerMailingCollection, self).__init__()
def fetch_all(self):
"""
Lazy-loads the full set of :class:`Mailing` objects
:rtype: :class:`dict` of :class:`Mailing` objects
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> trggr = acct.triggers[1024]
>>> trggr.mailings.fetch_all()
{200: <Mailing>, 201: <Mailing>, ...}
"""
if not 'trigger_id' in self.trigger:
raise ex.NoSearchIdError()
path = '/triggers/%s/mailings' % self.trigger['trigger_id']
if not self._dict:
mailing = emma.model.mailing
self._dict = dict(
(x['mailing_id'], mailing.Mailing(self.trigger.account, x))
for x in self.trigger.account.adapter.paginated_get(path))
return self._dict
| {
"repo_name": "myemma/EmmaPython",
"path": "emma/model/trigger.py",
"copies": "1",
"size": "5758",
"license": "mit",
"hash": 7322290880077317000,
"line_mean": 30.6373626374,
"line_max": 80,
"alpha_frac": 0.5375130254,
"autogenerated": false,
"ratio": 3.7292746113989637,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9764851319682586,
"avg_score": 0.0003872634232755909,
"num_lines": 182
} |
"""Audience webhook models"""
from datetime import datetime
from emma import exceptions as ex
from emma.model import BaseApiModel, str_fields_to_datetime
class WebHook(BaseApiModel):
"""
Encapsulates operations for a :class:`WebHook`
:param account: The Account which owns this WebHook
:type account: :class:`Account`
:param raw: The raw values of this :class:`WebHook`
:type raw: :class:`dict`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> acct.webhooks[123]
<WebHook>
"""
def __init__(self, account, raw=None):
self.account = account
super(WebHook, self).__init__(raw)
def delete(self):
"""
Delete this webhook
:rtype: :class:`None`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> wbhk = acct.webhooks[123]
>>> wbhk.delete()
None
"""
if not 'webhook_id' in self._dict:
raise ex.NoWebHookIdError()
path = "/webhooks/%s" % self._dict['webhook_id']
self.account.adapter.delete(path)
if self._dict['webhook_id'] in self.account.webhooks:
del(self.account.webhooks._dict[self._dict['webhook_id']])
def extract(self):
"""
Extracts data from the model in a format suitable for using with the API
:rtype: :class:`dict`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> wbhk = acct.webhooks[123]
>>> wbhk.extract()
{'name':..., 'criteria':...}
"""
keys = ['url', 'event', 'method', 'public_key']
return dict(x for x in self._dict.items() if x[0] in keys)
def _add(self):
"""Add a single trigger"""
path = '/webhooks'
data = self.extract()
self._dict['webhook_id'] = self.account.adapter.post(path, data)
self.account.triggers._dict[self._dict['webhook_id']] = self
def _update(self):
"""Update a single trigger"""
path = '/webhooks/%s' % self._dict['webhook_id']
data = self.extract()
self.account.adapter.put(path, data)
def save(self):
"""
Add or update this :class:`WebHook`
:rtype: :class:`None`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> wbhk = acct.webhooks[123]
>>> wbhk['url'] = u"http://v2.example.com"
>>> wbhk.save()
123
>>> from emma.enumerations import WebHookMethod
>>> wbhk = acct.webhooks.factory(
... {
... 'event': u"mailing_finish",
... 'url': u"http://example.com",
... 'method': WebHookMethod.Get
... }
... )
>>> wbhk.save()
124
"""
if 'webhook_id' not in self._dict:
return self._add()
else:
return self._update()
| {
"repo_name": "myemma/EmmaPython",
"path": "emma/model/webhook.py",
"copies": "1",
"size": "3296",
"license": "mit",
"hash": -9098683469810252000,
"line_mean": 29.2385321101,
"line_max": 80,
"alpha_frac": 0.5285194175,
"autogenerated": false,
"ratio": 3.6061269146608317,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9633013236153225,
"avg_score": 0.0003266192015214133,
"num_lines": 109
} |
"""AudioAddict utility class."""
# pylint: disable=line-too-long, old-style-class, broad-except, too-many-instance-attributes
import urllib2
import json
import random
class AudioAddict:
"""AudioAddict utility class."""
def __init__(self):
"""Init. You know."""
self.listenkey = None
# Valid streaming services according to audioaddict.com.
self.validservices = {
'radiotunes': 'RadioTunes.com',
'di': 'DI.fm',
'jazzradio': 'JazzRadio.com',
'rockradio': 'RockRadio.com',
'classicalradio': 'ClassicalRadio.com',
'zenradio': 'ZenRadio.com',
}
# Each service proposes a selection of stream types.
# It's worth noting that public3 is the *only* common type.
self.validstreams = {
'premium_medium': {'codec': 'aac', 'bitrate': 64},
'premium': {'codec': 'aac', 'bitrate': 128},
'premium_high': {'codec': 'mp3', 'bitrate': 320},
}
self.streampref = 'premium_high'
self.sourcepref = None
self.service = None
self.chanlist = []
# All streaming services use a common API service.
self.apihost = 'api.audioaddict.com'
# The batch API endpoint requires a static dummy auth header.
self.authheader = ['Authorization', 'Basic ZXBoZW1lcm9uOmRheWVpcGgwbmVAcHA=']
self.batchinfo = {}
def get_apihost(self, host_only=False, ssl=False):
"""Get the AA API host; normally used as part of a URL."""
if host_only:
return self.apihost
obj = '://' + self.apihost + '/v1'
if ssl:
obj = 'https' + obj
else:
obj = 'http' + obj
return obj
def set_listenkey(self, listenkey=None):
"""Set the listen_key."""
self.listenkey = listenkey
def get_listenkey(self, key_only=False):
"""Get the listen_key; normally used as part of a URL."""
if not self.listenkey:
return ''
elif key_only:
return self.listenkey
else:
return '?listen_key=' + self.listenkey
def get_validservices(self):
"""Get list of valid services."""
return self.validservices
def set_service(self, serv=None):
"""Set which service we're using."""
if serv not in self.validservices.keys():
raise Exception('Invalid service')
self.service = serv
def get_service(self):
"""Get which service we're using."""
return self.service
def get_servicename(self, serv=None):
"""Get the name of a given service."""
if not serv:
serv = self.get_service()
if serv not in self.get_validservices().keys():
raise Exception('Invalid service')
return self.validservices[serv]
def get_validstreams(self):
"""Get the list of valid streams."""
return self.validstreams
def get_serviceurl(self, serv=None, prefix='listen'):
"""Get the service URL for the service we're using."""
if not serv:
serv = self.get_service()
url = 'http://' + prefix + '.' + self.get_servicename(serv)
url = url.lower()
return url
def set_streampref(self, stream=None):
"""Set the preferred stream."""
if stream not in self.get_validstreams():
raise Exception('Invalid stream')
self.streampref = stream
def get_streamdetails(self):
"""Get the details for a stream."""
details = {}
stream = self.get_streampref()
if stream in self.get_validstreams():
details = self.get_validstreams()[stream]
return details
def get_streampref(self):
"""Get the preferred stream."""
return self.streampref
def set_sourcepref(self, source=None):
"""Set the preferred source."""
self.sourcepref = source
def get_sourcepref(self):
"""Get the preferred source."""
return self.sourcepref
def get_chanlist(self, refresh=False):
"""Get the master channel list."""
if not self.chanlist or refresh:
try:
data = urllib2.urlopen(self.get_serviceurl() + '/' + self.get_streampref())
self.chanlist = json.loads(data.read())
except Exception:
raise
return self.chanlist
def get_chaninfo(self, key):
"""Get the info for a particular channel."""
chaninfo = None
for chan in self.get_chanlist():
if chan['key'] == key:
chaninfo = chan.copy()
if not chaninfo:
raise Exception('Invalid channel')
return chaninfo
def get_chanhist(self, key):
"""Get track history for a channel."""
servurl = self.get_apihost() + '/' + self.get_service() + '/track_history/channel/' + \
str(self.get_chaninfo(key)['id'])
data = urllib2.urlopen(servurl)
history = json.loads(data.read())
return history
def get_nowplaying(self, key):
"""Get current track for a channel."""
# Normally the current song is position 0, but if an advertisement
# was played in the public stream, it will pollute the history -
# in that case, we pick the track from position 1.
track = 'Unknown - Unknown'
if 'ad' not in self.get_chanhist(key)[0]:
track = self.get_chanhist(key)[0]['track']
else:
track = self.get_chanhist(key)[1]['track']
return track
def get_batchinfo(self, refresh=False):
"""Get the massive batch info blob."""
if self.batchinfo and not refresh:
return self.batchinfo
url = self.get_apihost() + '/' + self.get_service() + '/mobile/batch_update?stream_set_key=' + \
self.get_streampref()
req = urllib2.Request(url)
req.add_header(*self.authheader)
# AA started gzip compressing (just) this response in June 2017.
req.add_header('Accept-Encoding', 'gzip')
response = urllib2.urlopen(req)
# This may or may not be a permanent change, so we'll wrap this in a
# conditional for now. Also, if other endpoints start returning gzip'd
# data, this should be implemented more generically. OK for today tho.
if response.info().get('Content-Encoding') == 'gzip':
from StringIO import StringIO
import gzip
buf = StringIO(response.read())
obj = gzip.GzipFile(fileobj=buf)
data = obj.read()
batch = json.loads(data)
# Only the "All" channel filter is interesting for now.
for i in batch['channel_filters']:
if i['name'] == 'All':
batchinfo = i['channels']
for channel in batchinfo:
for ss_channel in batch['stream_sets'][0]['streamlist']['channels']:
if channel['id'] == ss_channel['id']:
streamurl = None
# Look through the list for the preferred source.
if self.get_sourcepref():
for stream in ss_channel['streams']:
if self.get_sourcepref() in stream['url']:
streamurl = stream['url']
# If there is no preferred source or one was not found, pick at random.
if not streamurl:
streamurl = random.choice([x['url'] for x in ss_channel['streams']])
if streamurl:
channel['streamurl'] = streamurl + '?' + self.get_listenkey(key_only=True)
self.batchinfo = batchinfo
return self.batchinfo
| {
"repo_name": "phrawzty/AudioAddict.bundle",
"path": "Contents/Code/audioaddict.py",
"copies": "1",
"size": "8024",
"license": "apache-2.0",
"hash": 4047027420342683000,
"line_mean": 29.7432950192,
"line_max": 106,
"alpha_frac": 0.5493519442,
"autogenerated": false,
"ratio": 4.252252252252252,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00035890182415466564,
"num_lines": 261
} |
__version__ = '1.0'
__author__ = 'Michael A. Casey'
__copyright__ = "Copyright (C) 2010 Michael Casey, Dartmouth College, All Rights Reserved"
__license__ = "gpl 2.0 or higher"
__email__ = 'mcasey@dartmouth.edu'
import os
import os.path
import time
import glob
import tempfile
import shutil
import subprocess
import hashlib
import random
import error
import features
try: # OSX / Linux
BREGMAN_ROOT = os.environ['HOME']
except: # Windows
BREGMAN_ROOT = os.environ['HOMEPATH']
DATA_ROOT = BREGMAN_ROOT + os.sep + "exp"
class AudioCollection:
"""
::
A class for extracting, persisting, and searching audio features in a collection
Initialization:
AudioCollection("/path/to/collection")
Instantiate a new collection at the given path. This directory is where features and
audioDB databases will be stored. The collection path does not have to be the same as the
audio files path for inserted audio. Audio can come from any location, but features are consolodated into
the AudioCollection path.
"""
collection_stem = "collection_"
def __init__(self, path=None, root_path=DATA_ROOT):
self.root_path = root_path
self.collection_path = path
self.adb_path = None
self.adb = None
self.rTupleList = None
self.feature_params = features.Features.default_feature_params()
self.audio_collection = set()
self.cache_temporary_files = False
self.uid="%016X"%long(random.random()*100000000000000000L) # unique instance ID
self.old_uid=self.uid
self.adb_data_size=256
self.adb_ntracks=20000
def insert_audio_files(self, audio_list):
"""
::
Maintain a set of audio files as a collection. Each item is unique in the set. Collisions are ignored.
"""
for item in audio_list: self.audio_collection.add(item)
def _gen_adb_hash(self):
"""
::
Generate a hash key based on self.feature_params to make an audioDB instance unique to feature set.
"""
m = hashlib.md5()
k = self.feature_params.keys()
k.sort() # ensure vals ordered by key lexicographical order
vals = [self.feature_params.get(i) for i in k]
m.update(vals.__repr__())
return m.hexdigest()
def _insert_features_into_audioDB(self):
"""
::
Persist features, powers, and audio-file keys in an audioDB instance.
Inserts self.rTupleList into audioDB instance associated with current feature set.
If features already exist, warn once, but continue.
Given an adb-instance path, exectute the audioDB command to:
Make lists of features, powers, and database keys
Batch insert features, powers, linked to databse keys
Returns full path to audioDB instance if OK or None of not OK
"""
self._new_adb() # test to see if we require a new audiodb instance for features
pth, sep, nm = self.adb_path.rpartition(os.sep)
# List names for audioDB insertion
fListName = pth + os.sep + self.uid + "_fList.txt"
pListName = pth + os.sep + self.uid + "_pList.txt"
kListName = pth + os.sep + self.uid + "_kList.txt"
# unpack the names of files we want to insert
fList, pList, kList = zip(*self.rTupleList)
# write fList, pList, kList to text files
self._write_list_to_file(fList, fListName)
self._write_list_to_file(pList, pListName)
self._write_list_to_file(kList, kListName)
# do BATCHINSERT
command = ["audioDB", "--BATCHINSERT", "-d", self.adb_path, "-F", fListName, "-W", pListName, "-K", kListName]
self._do_subprocess(command)
return 1
def _write_list_to_file(self,lst, pth):
"""
::
Utility routine to write a list of strings to a text file
"""
try:
f = open(pth,"w")
except:
print "Error opening: ", pth
raise error.BregmanError()
for s in lst: f.write(s+"\n")
f.close()
def extract_features(self, key=None, keyrepl=None, extract_only=False, wave_suffix=".wav"):
"""
::
Extract features over the collection
Pre-requisites:
self.audio_collection - set of audio files to extract
self.feature_params - features to extract
Arguments:
key - optional string to append on filename stem as database key
keyrepl - if key is specified then keyrepl is a pattern to replace with key
extract_only - set to True to skip audioDB insertion
wave_suffix - fileName extension for key replacement
Returns rTupleList of features,powers,keys or None if fail.
"""
aList, fList, pList, kList = self._get_extract_lists(key, keyrepl, wave_suffix)
self._fftextract_list(zip(aList,fList,pList,kList))
self.rTupleList = zip(fList,pList,kList) # what will be inserted into audioDB
if not extract_only:
self._insert_features_into_audioDB() # possibly generate new audiodb instance
self.audio_collection.clear() # clear the audio_collection queue
return self.rTupleList
def _get_extract_lists(self, key=None, keyrepl=None, wave_suffix=".wav"):
"""
::
Map from self.audio_collection to aList, fList, pList, kList
"""
# The audio queue
aList = list(self.audio_collection)
aList.sort()
# The keys that will identify managed items
if key == None:
kList = aList # use the audio file names as keys
else:
# replace keyrepl with key as database key
if not keyrepl:
print "key requires keyrepl for filename substitution"
raise error.BregmanError()
kList = [a.replace(keyrepl,key) for a in aList]
feature_suffix= self._get_feature_suffix()
power_suffix=self.feature_params['power_ext']
fList = [k.replace(wave_suffix, feature_suffix) for k in kList]
pList = [k.replace(wave_suffix, power_suffix) for k in kList]
return (aList, fList, pList, kList)
def _get_feature_suffix(self):
"""
::
Return a standardized feature suffix for extracted features
"""
return "." + self.feature_params['feature'] + "%02d"%self.feature_params['ncoef']
def _fftextract_list(self, extract_list):
command=[]
feature_keys = {'stft':'-f', 'cqft':'-q', 'mfcc':'-m', 'chroma':'-c', 'power':'-P', 'hram':'-H'}
feat = feature_keys[self.feature_params['feature']]
ncoef = "%d"%self.feature_params['ncoef']
nfft = "%d"%self.feature_params['nfft']
wfft = "%d"%self.feature_params['wfft']
nhop = "%d"%self.feature_params['nhop']
logl = "%d"%self.feature_params['log10']
mag = "%d"%self.feature_params['magnitude']
lo = "%f"%self.feature_params['lo']
hi = "%f"%self.feature_params['hi']
# lcoef = "%d"%self.feature_params['lcoef'] # not used yet
for a,f,p,k in extract_list:
if not len(glob.glob(f)):
command=["fftExtract", "-p", "bregman.wis",
"-n", nfft, "-w", wfft, "-h", nhop, feat, ncoef,
"-l", lo, "-i", hi, "-g" , logl, "-a", mag, a, f]
ret = self._do_subprocess(command)
if ret:
print "Error extacting features: ", command
return None
else:
print "Warning: feature file already exists", f
if not len(glob.glob(p)):
command=["fftExtract", "-p", "bregman.wis",
"-n", nfft, "-w", wfft, "-h", nhop,
"-P", "-l", lo, "-i", hi, a, p]
ret = self._do_subprocess(command)
if ret:
print "Error extacting powers: ", command
return None
else:
print "Warning: power file already exists", p
def _remove_temporary_files(self, key=""):
"""
::
Remove cached feature and power files based on current feature_params settings.
"""
fList = glob.glob(self.collection_path+os.sep + "*" + key + "."
+ self.feature_params['feature']+"%02d"%self.feature_params['ncoef'])
for f in fList: os.remove(f)
pList = glob.glob(self.collection_path + os.sep + "*" + key + self.feature_params["power_ext"] )
for p in pList: os.remove(p)
def initialize(self):
"""
::
Make a new collection path with an empty audioDB instance.
Each instance is unique to a set of feature_params.
Return False if an equivalent instance already exists.
Return True if new instance was created.
"""
if not self._gen_collection_path(self.collection_stem):
return 0
print "Made new directory: ", self.collection_path
# self._new_adb() # This is now done on feature_insert
return 1
def _gen_adb_path(self):
"""
::
Name a new adb instance
"""
if not self.collection_path:
print "Error: self.collection_path not set"
raise error.BregmanError()
adb_path = self.collection_path + os.sep + self.collection_stem + self._gen_adb_hash() +".adb"
return adb_path
def _gen_collection_path(self, name_prefix):
"""
::
Make a new unique directory in the self.root_path directory
"""
self.collection_path = tempfile.mkdtemp(prefix=name_prefix,dir=self.root_path)
if not self.collection_path:
print "Error making new directory in location: ", self.root_path
return 0
shutil.copymode(self.root_path, self.collection_path) # set permissions
return 1
def _new_adb(self):
"""
::
Make a new audioDB instance in the adb_path location
Make database L2norm and Power compliant
"""
self.adb_path = self._gen_adb_path()
if self.adb_path == None:
print "self.adb_path must have a string value"
raise error.BregmanError()
else:
f = None
try:
f = open(self.adb_path,"rb")
except:
print "Making new audioDB database: ", self.adb_path
finally:
if f:
f.close()
print "AudioDB database already exists: ", self.adb_path
return 0
# create a NEW audiodb database instance
command = ["audioDB", "--NEW", "-d", self.adb_path, "--datasize", "%d"%self.adb_data_size, "--ntracks", "%d"%self.adb_ntracks]
self._do_subprocess(command)
# make L2NORM compliant
command = ["audioDB", "--L2NORM", "-d", self.adb_path]
self._do_subprocess(command)
# make POWER compliant
command = ["audioDB", "--POWER", "-d", self.adb_path]
self._do_subprocess(command)
return 1
def _do_subprocess(self,command):
"""
::
Call an external (shell) command, inform about any errors
"""
res = subprocess.call(command)
if res:
print "Error in ", command
raise error.BregmanError()
return res
def load(self, path=None):
"""
::
Load stored data for this collection
"""
pass
def save(self):
"""
::
Save data for this collection
"""
pass
def toc(self, collection_path=None):
"""
::
List contents of this collection, or collection at collection_path.
"""
if collection_path == None:
collection_path = self.collection_path
dlist=glob.glob(collection_path + os.sep + "*.data")
toclist = []
for d in dlist:
self.load(d)
toclist.append(self.feature_params)
return zip(dlist,toclist)
@classmethod
def lc(cls, expand=False, limit=None):
"""
::
Alias for ls_collections()
"""
return cls.ls_collections(expand, limit)
@classmethod
def ls_collections(cls, expand=False, limit=None):
"""
::
For the given class, return a list of instances
If expand is set to True, pint each collection's TOC
"""
dlist, tm = cls._get_collection_list_by_time()
dlist = zip(dlist[:limit], tm[:limit])
if expand:
R = cls()
k = R.toc(dlist[0][0])
for d,t in dlist:
print d, t
print k[0][1].keys()
for i, v in enumerate(R.toc(d)):
print "[%d]"%i, v[1].values()
print ""
return dlist
@classmethod
def _get_collection_list_by_time(cls):
dlist = glob.glob(DATA_ROOT + os.sep + cls.collection_stem + "*")
# sort into descending order of time
tm = {}
for d in dlist: tm[ d ] = os.path.getmtime( d )
dlist.sort( lambda x,y: cmp( tm[x], tm[y] ) )
dlist.reverse()
tm = [time.ctime(tm[d]) for d in dlist]
return (dlist, tm)
| {
"repo_name": "bregmanstudio/BregmanToolkit",
"path": "bregman/deprecated/audiocollection.py",
"copies": "1",
"size": "13816",
"license": "mit",
"hash": 4722630798900262000,
"line_mean": 34.6082474227,
"line_max": 134,
"alpha_frac": 0.5589171975,
"autogenerated": false,
"ratio": 3.965556831228473,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5024474028728473,
"avg_score": null,
"num_lines": null
} |
AUDIO_ENABLED = True
HELP_ENABLED = True
try:
from PyQt5.QtCore import (QRectF, Qt, QModelIndex, QItemSelection,
pyqtSignal as Signal, pyqtSlot as Slot,
QThread,QAbstractTableModel,QAbstractListModel,
QSize, QSettings,QPoint, QItemSelectionModel,
QSortFilterProxyModel, QAbstractProxyModel, QAbstractItemModel,
QSharedMemory, QEvent, QIODevice, QProcess, QUrl, QTime,
QStringListModel)
from PyQt5.QtGui import (QFont, QKeySequence, QPainter, QFontMetrics, QPen,
QRegion,QStandardItemModel,QStandardItem, QIcon, QPixmap,
QDesktopServices, QCursor)
from PyQt5.QtWidgets import (QMainWindow, QLayout, QHBoxLayout, QLabel, QAction,
QApplication, QWidget, QMessageBox,QSplitter,
QDialog, QListWidget, QGroupBox,QVBoxLayout,
QPushButton, QFrame, QGridLayout,QRadioButton,
QFormLayout, QLineEdit, QFileDialog, QComboBox,
QProgressDialog, QCheckBox, QMessageBox,QTableView,
QAbstractItemView, QHeaderView, QDockWidget, QTreeView,
QStyle, QMenu, QSizePolicy, QButtonGroup,QTabWidget,
QTableWidget, QToolBar, QStyledItemDelegate, QDataWidgetMapper,
QSlider, QItemDelegate, QScrollArea, QBoxLayout, QStackedWidget,
QCompleter, QTableWidgetItem)
from PyQt5.QtNetwork import QLocalSocket, QLocalServer
try:
from PyQt5.QtWebEngineWidgets import QWebEngineView as QWebView #This is required for PyQt5.9
except ImportError:
try:
from PyQt5.QtWebKitWidgets import QWebView
except ImportError:
HELP_ENABLED = False
try:
from PyQt5.QtMultimedia import QSound, QMediaPlayer, QMediaContent, QAudioOutput
except ImportError:
AUDIO_ENABLED = False
except ImportError:
raise(Exception("We could not find an installation of PyQt5. Please double check that it is installed."))
import locale
import sys
if sys.platform.startswith('win'):
locale_string = 'English_US'
else:
locale_string = 'en_US.UTF-8'
locale.setlocale(locale.LC_ALL, locale_string)
import time
| {
"repo_name": "PhonologicalCorpusTools/CorpusTools",
"path": "corpustools/gui/imports.py",
"copies": "1",
"size": "2517",
"license": "bsd-3-clause",
"hash": -2895694287166294500,
"line_mean": 48.3529411765,
"line_max": 110,
"alpha_frac": 0.6046881208,
"autogenerated": false,
"ratio": 4.526978417266187,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5631666538066187,
"avg_score": null,
"num_lines": null
} |
# Audio Feature Extraction functions
import numpy as np
#import mir3.modules.features as feat
import mir3.modules.features.flatness as flatness
import mir3.modules.features.mfcc as mfcc
import mir3.modules.features.filterbank as fbank
import mir3.modules.tool.wav2spectrogram as wav2spec
import mir3.data.spectrogram as spec
import mir3.data.feature_track as track
import mir3.modules.tool.to_texture_window as texture
def audio_feature_extraction(filename_in, frame_len=1024, frame_step=512, data_as_feature=False):
"""Extracts features from an audio file.
Inputs:
filename_in - string containing filename from which the features will be
extracted
frame_len and frame_step - frame length and step between frames (in samples)
for the framewise feature processing
data_as_feature - whether or not to include the wav data as a feture in the output
Outputs:
numpy array in which each column represents a different feature and each
line represents the corresponding frame in the audio file.
"""
# Open audio file
audio_file = open(filename_in, 'rb')
w_spec = wav2spec.Wav2Spectrogram()
s = w_spec.convert(audio_file, window_length=frame_len,\
window_step=frame_step)
audio_file.close()
# Hand-made features
flat = flatness.Flatness().calc_track(s)
#print flat.data.shape
#energy = mir.energy(s.data)
#flux = mir.flux(s.data)
#centroid = mir.centroid(s.data)
#rolloff = mir.rolloff(s.data)
#low_energy = mir.rolloff(s.data, 10)
#flatness.shape = (flatness.shape[0],1)
#energy.shape = (energy.shape[0],1)
#flux.shape = (flux.shape[0], 1)
#centroid.shape = (centroid.shape[0],1)
#rolloff.shape = (rolloff.shape[0],1)
#low_energy.shape = (low_energy.shape[0],1)
# MFCCs
mfccs = mfcc.Mfcc().calc_track(s, 30)
# Filterbank with triangular frequencies
#f = fbank.FilterBank()
#frequency = 10.0
#central_frequencies = []
#while frequency < (s.metadata.sampling_configuration.fs)/2:
# central_frequencies.append(float(frequency))
# frequency *= 1.5 # Tune this at will.
# # Values <= 1 will break the system.
#H = f.build_filterbank(central_frequencies, s)
#filterbank_out = np.dot(H, s.data).T
#print flatness.shape, energy.shape, flux.shape, centroid.shape,\
# rolloff.shape, low_energy.shape
#print filterbank_out.shape
#print s.data.T.shape
tex = texture.ToTextureWindow()
twin = tex.to_texture(mfccs, 30)
#all_features = np.hstack( (flatness, energy, flux, centroid, rolloff,\
# low_energy, mfccs, filterbank_out) )
#if data_as_feature:
# all_features = np.hstack((all_features, s.data.T))
return twin
if __name__ == "__main__":
import sys
import time
if len(sys.argv)==1:
print "Usage: python " + sys.argv[0] + " <wav_file>"
exit()
T0 = time.time()
feat = audio_feature_extraction(sys.argv[1])
T1 = time.time()
print "Feature extraction took ", T1-T0, " seconds"
print feat.metadata.feature
if len(sys.argv)==2:
exit()
if sys.argv[2] == '-v':
for i in xrange(feat.data.shape[0]):
line=""
for j in xrange(feat.data.shape[1]-1):
line += str(feat.data[i,j]) + ", "
line += str(feat.data[i,-1]) + "\n"
print line,
#print feat.data.shape
| {
"repo_name": "pymir3/pymir3",
"path": "afe.py",
"copies": "1",
"size": "3477",
"license": "mit",
"hash": -2612570113952917500,
"line_mean": 29.5,
"line_max": 97,
"alpha_frac": 0.6404946793,
"autogenerated": false,
"ratio": 3.2586691658856606,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9306278685443753,
"avg_score": 0.018577031948381468,
"num_lines": 114
} |
''' Audio file info computed by soxi.
'''
import os
from numbers import Number
from pathlib import Path
from typing import List, Dict
from typing import Optional, Union
from .core import VALID_FORMATS
from .core import sox
from .core import soxi
from .log import logger
def bitdepth(input_filepath: Union[str, Path]) -> Optional[int]:
'''
Number of bits per sample, or None if not applicable.
Parameters
----------
input_filepath : str
Path to audio file.
Returns
-------
bitdepth : int or None
Number of bits per sample.
Returns None if not applicable.
'''
validate_input_file(input_filepath)
output = soxi(input_filepath, 'b')
if output == '0':
logger.warning("Bit depth unavailable for %s", input_filepath)
return None
return int(output)
def bitrate(input_filepath: Union[str, Path]) -> Optional[float]:
'''
Bit rate averaged over the whole file.
Expressed in bytes per second (bps), or None if not applicable.
Parameters
----------
input_filepath : str
Path to audio file.
Returns
-------
bitrate : float or None
Bit rate, expressed in bytes per second.
Returns None if not applicable.
'''
validate_input_file(input_filepath)
output = soxi(input_filepath, 'B')
# The characters below stand for kilo, Mega, Giga, etc.
greek_prefixes = '\0kMGTPEZY'
if output == "0":
logger.warning("Bit rate unavailable for %s", input_filepath)
return None
elif output[-1] in greek_prefixes:
multiplier = 1000.0**(greek_prefixes.index(output[-1]))
return float(output[:-1])*multiplier
else:
return float(output[:-1])
def channels(input_filepath: Union[str, Path]) -> int:
'''
Show number of channels.
Parameters
----------
input_filepath : str
Path to audio file.
Returns
-------
channels : int
number of channels
'''
validate_input_file(input_filepath)
output = soxi(input_filepath, 'c')
return int(output)
def comments(input_filepath: Union[str, Path]) -> str:
'''
Show file comments (annotations) if available.
Parameters
----------
input_filepath : str
Path to audio file.
Returns
-------
comments : str
File comments from header.
If no comments are present, returns an empty string.
'''
validate_input_file(input_filepath)
output = soxi(input_filepath, 'a')
return str(output)
def duration(input_filepath: Union[str, Path]) -> Optional[float]:
'''
Show duration in seconds, or None if not available.
Parameters
----------
input_filepath : str
Path to audio file.
Returns
-------
duration : float or None
Duration of audio file in seconds.
If unavailable or empty, returns None.
'''
validate_input_file(input_filepath)
output = soxi(input_filepath, 'D')
if float(output) == 0.0:
logger.warning("Duration unavailable for %s", input_filepath)
return None
return float(output)
def encoding(input_filepath: Union[str, Path]) -> str:
'''
Show the name of the audio encoding.
Parameters
----------
input_filepath : str
Path to audio file.
Returns
-------
encoding : str
audio encoding type
'''
validate_input_file(input_filepath)
output = soxi(input_filepath, 'e')
return str(output)
def file_type(input_filepath: Union[str, Path]) -> str:
'''
Show detected file-type.
Parameters
----------
input_filepath : str
Path to audio file.
Returns
-------
file_type : str
file format type (ex. 'wav')
'''
validate_input_file(input_filepath)
output = soxi(input_filepath, 't')
return str(output)
def num_samples(input_filepath: Union[str, Path]) -> Optional[int]:
'''
Show number of samples, or None if unavailable.
Parameters
----------
input_filepath : path-like (str or pathlib.Path)
Path to audio file.
Returns
-------
n_samples : int or None
total number of samples in audio file.
Returns None if empty or unavailable.
'''
input_filepath = str(input_filepath)
validate_input_file(input_filepath)
output = soxi(input_filepath, 's')
if output == '0':
logger.warning("Number of samples unavailable for %s", input_filepath)
return None
return int(output)
def sample_rate(input_filepath: Union[str, Path]) -> float:
'''
Show sample-rate.
Parameters
----------
input_filepath : str
Path to audio file.
Returns
-------
samplerate : float
number of samples/second
'''
validate_input_file(input_filepath)
output = soxi(input_filepath, 'r')
return float(output)
def silent(input_filepath: Union[str, Path], threshold:float = 0.001) -> bool:
'''
Determine if an input file is silent.
Parameters
----------
input_filepath : str
The input filepath.
threshold : float
Threshold for determining silence
Returns
-------
is_silent : bool
True if file is determined silent.
'''
validate_input_file(input_filepath)
stat_dictionary = stat(input_filepath)
mean_norm = stat_dictionary['Mean norm']
if mean_norm is not float('nan'):
if mean_norm >= threshold:
return False
else:
return True
else:
return True
def validate_input_file(input_filepath: Union[str, Path]) -> None:
'''Input file validation function. Checks that file exists and can be
processed by SoX.
Parameters
----------
input_filepath : path-like (str or pathlib.Path)
The input filepath.
'''
input_filepath = Path(input_filepath)
if not input_filepath.exists():
raise IOError(
"input_filepath {} does not exist.".format(input_filepath)
)
ext = file_extension(input_filepath)
if ext not in VALID_FORMATS:
logger.info("Valid formats: %s", " ".join(VALID_FORMATS))
logger.warning(
"This install of SoX cannot process .{} files.".format(ext)
)
def validate_input_file_list(input_filepath_list: List[Union[str, Path]]) -> None:
'''Input file list validation function. Checks that object is a list and
contains valid filepaths that can be processed by SoX.
Parameters
----------
input_filepath_list : list
A list of filepaths.
'''
if not isinstance(input_filepath_list, list):
raise TypeError("input_filepath_list must be a list.")
elif len(input_filepath_list) < 2:
raise ValueError("input_filepath_list must have at least 2 files.")
for input_filepath in input_filepath_list:
validate_input_file(input_filepath)
def validate_output_file(output_filepath: Union[str, Path]) -> None:
'''Output file validation function. Checks that file can be written, and
has a valid file extension. Throws a warning if the path already exists,
as it will be overwritten on build.
Parameters
----------
output_filepath : path-like (str or pathlib.Path)
The output filepath.
'''
# This function enforces use of the path as a string, because
# os.access has no analog in pathlib.
output_filepath = str(output_filepath)
if output_filepath == '-n':
return
nowrite_conditions = [
bool(os.path.dirname(output_filepath)) or
not os.access(os.getcwd(), os.W_OK),
not os.access(os.path.dirname(output_filepath), os.W_OK)]
if all(nowrite_conditions):
raise IOError(
"SoX cannot write to output_filepath {}".format(output_filepath)
)
ext = file_extension(output_filepath)
if ext not in VALID_FORMATS:
logger.info("Valid formats: %s", " ".join(VALID_FORMATS))
logger.warning(
"This install of SoX cannot process .{} files.".format(ext)
)
if os.path.exists(output_filepath):
logger.warning(
'output_file: %s already exists and will be overwritten on build',
output_filepath
)
def file_extension(filepath: Union[str, Path]) -> str:
'''Get the extension of a filepath.
Parameters
----------
filepath : path-like (str or pathlib.Path)
File path.
Returns
-------
extension : str
The file's extension
'''
return Path(filepath).suffix[1:].lower()
def info(filepath: Union[str, Path]) -> Dict[str, Union[str, Number]]:
'''Get a dictionary of file information
Parameters
----------
filepath : str
File path.
Returns
-------
info_dictionary : dict
Dictionary of file information. Fields are:
* channels
* sample_rate
* bitdepth
* bitrate
* duration
* num_samples
* encoding
* silent
'''
info_dictionary = {
'channels': channels(filepath),
'sample_rate': sample_rate(filepath),
'bitdepth': bitdepth(filepath),
'bitrate': bitrate(filepath),
'duration': duration(filepath),
'num_samples': num_samples(filepath),
'encoding': encoding(filepath),
'silent': silent(filepath)
}
return info_dictionary
def stat(filepath: Union[str, Path]) -> Dict[str, Optional[float]]:
'''Returns a dictionary of audio statistics.
Parameters
----------
filepath : str
File path.
Returns
-------
stat_dictionary : dict
Dictionary of audio statistics.
'''
stat_output = _stat_call(filepath)
stat_dictionary = _parse_stat(stat_output)
return stat_dictionary
def _stat_call(filepath: Union[str, Path]) -> str:
'''Call sox's stat function.
Parameters
----------
filepath : str
File path.
Returns
-------
stat_output : str
Sox output from stderr.
'''
validate_input_file(filepath)
args = ['sox', filepath, '-n', 'stat']
_, _, stat_output = sox(args)
return stat_output
def _parse_stat(stat_output: str) -> Dict[str, Optional[float]]:
'''Parse the string output from sox's stat function
Parameters
----------
stat_output : str
Sox output from stderr.
Returns
-------
stat_dictionary : dict
Dictionary of audio statistics.
'''
lines = stat_output.split('\n')
stat_dict = {}
for line in lines:
split_line = line.split(':')
if len(split_line) == 2:
key = split_line[0]
val = split_line[1].strip(' ')
try:
val = float(val)
except ValueError:
val = None
stat_dict[key] = val
return stat_dict
| {
"repo_name": "rabitt/pysox",
"path": "sox/file_info.py",
"copies": "1",
"size": "10952",
"license": "bsd-3-clause",
"hash": 1358571279818416000,
"line_mean": 24.00456621,
"line_max": 82,
"alpha_frac": 0.5945945946,
"autogenerated": false,
"ratio": 4.176964149504196,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5271558744104196,
"avg_score": null,
"num_lines": null
} |
"""Audio file I/O and audio data manipulation."""
import numpy as np
import scipy.io.wavfile
SND_DTYPES = {'int16': 16, np.int16: 16, 'int32': 32, np.int32: 32}
"""Data types that SciPy can import from ``.wav``"""
def snd_norm(snd, factor=None):
""" Scale elements of an array of (.wav) data from -1 to 1.
Default factor is determined from ``snd.dtype``, corresponding to the
format imported by ``scipy.io.wavfile``. Can scale other types of data if
``factor`` is appropriately specified and ``snd`` can be scaled
element-wise with the division operator, as for ``np.ndarray``.
Args:
snd (np.ndarray): Data (audio from .wav) to be scaled.
factor (int): Divide elements of ``snd`` by this number.
Returns:
scaled (np.ndarray): Same shape as ``snd``, with elements scaled.
"""
if factor is None:
factor = 2. ** (SND_DTYPES[snd.dtype.name] - 1)
scaled = snd / factor
return scaled
def wav_read_norm(wavfile_name):
""" Return contents of .wav as array scaled from -1 to 1.
Args:
wavfile_name (str): Name of the .wav file to read.
Returns:
sample_rate (int): The file's audio sampling rate.
snd (np.ndarray): The file's audio samples as ``float``.
"""
sample_rate, snd = scipy.io.wavfile.read(wavfile_name)
snd = snd_norm(snd)
return sample_rate, snd
def buffers_to_snd(buffers, stereo=True, channel_ind=None, dtype=np.int32):
""" Convert a series of JACK buffers to 2-channel SciPy audio.
Args:
buffers (np.ndarray): Series of JACK buffers in a 3D array. Second
dimension length is number of channels, third is ``buffer_size``.
stereo (bool): If ``True``, the two channels of ``snd`` are taken by
default from ``buffers[0:2]``, else both from ``buffers[0]`` (mono).
channel_ind: If stereo, can be a length-2 ``slice`` or a Numpy advanced
index selecting two channels in ``buffers``. If mono, an integer,
slice, or Numpy advanced index for a single channel must be passed.
dtype (str): Datatype of the returned array.
Must be a key in ``SND_DTYPES`` to ensure SciPy compatibility.
Returns:
snd (np.ndarray): SciPy-compatible array of audio frames.
"""
if stereo:
if channel_ind is None:
channel_ind = slice(0, 2)
buffers_ = buffers[channel_ind]
else:
if channel_ind is None:
channel_ind = 0
buffers_ = np.concatenate(np.atleast_2d(buffers[channel_ind]) * 2)
snd = buffers_.reshape((2, buffers_.size // 2)).T
snd = snd * 2.**(SND_DTYPES[dtype] - 1)
snd = snd.astype(dtype)
return snd
| {
"repo_name": "laporte-m/muser",
"path": "muser/audio.py",
"copies": "1",
"size": "2717",
"license": "mit",
"hash": 7838820722829125000,
"line_mean": 35.7162162162,
"line_max": 80,
"alpha_frac": 0.6245859404,
"autogenerated": false,
"ratio": 3.5516339869281044,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9675385759827271,
"avg_score": 0.00016683350016683348,
"num_lines": 74
} |
# Audio Function Generator; "afg.py"...
# -------------------------------------
#
# A fun Python program to generate a Sine, Square, Triangle, Sawtooth,
# and Pulse waveforms using STANDARD Python at the earphone sockets.
# This is for (PC)Linux(OS), (ONLY?), and was done purely for fun.
# (It has now been tested on Debian 6.0.0 and Knoppix 5.1.1!)
#
# Although it is fairly easy to alter the frequency within limits I
# have left it at approximately 1KHz to keep the code size down...
#
# It would be tricky to change the output level using STANDARD Python
# for (PC)Linux(OS) 2009 using this idea to generate waveforms, but it
# is possible within limits.
#
# (Original idea copyright, (C)2009, B.Walker, G0LCU.)
# Issued as Public Domain, (to LXF) and you may do with it as you please.
#
# It is assumed that /dev/audio exists; if NOT, then install oss-compat
# from the distro`s repository.
#
# Ensure the sound system is not already in use.
#
# Copy the file to the Lib folder(/drawer/directory) or where the modules
# reside as "afg.py"...
#
# For a quick way to run just use at the ">>>" prompt:-
#
# >>> import afg[RETURN/ENTER]
#
# And away we go...
#
# The waveforms generated are unfiltered and therefore not "pure",
# but hey, an audio function generator signal source, for free, without
# external hardware, AND, using standard Python, what more do you want... :)
#
# Using my notebook about 150mV p-p was generated at the earphone
# socket(s).
#
# Coded on a(n) HP dual core notebook running PCLinuxOS 2009 and
# Python 2.5.2 for Linux...
#
# You will need an oscilloscope connected to the earphone socket(s)
# to see the resultant waveform(s) generated, or listen to the
# harshness of the sound... ;o)
#
# It is EASILY possible to generate pseudo-random noise also but
# I'll leave that for you to work out... :)
# Import any modules...
import os
# Clear a terminal window ready to run this program.
print os.system("clear"),chr(13)," ",chr(13),
# The program proper...
def main():
# Make all variables global, a quirk of mine... :)
global sine
global square
global triangle
global sawtoothplus
global sawtoothminus
global pulseplus
global pulseminus
global waveform
global select
global count
# Allocate values to variables.
# Any discrepancy between random soundcards may require small changes
# in the numeric values inside each waveform mode...
# These all oscillate at around 1KHz.
sine=chr(15)+chr(45)+chr(63)+chr(45)+chr(15)+chr(3)+chr(0)+chr(3)
square=chr(63)+chr(63)+chr(63)+chr(63)+chr(0)+chr(0)+chr(0)+chr(0)
triangle=chr(0)+chr(7)+chr(15)+chr(29)+chr(63)+chr(29)+chr(15)+chr(7)
sawtoothplus=chr(63)+chr(39)+chr(26)+chr(18)+chr(12)+chr(8)+chr(4)+chr(0)
sawtoothminus=chr(0)+chr(4)+chr(8)+chr(12)+chr(18)+chr(26)+chr(39)+chr(63)
pulseplus=chr(0)+chr(63)+chr(63)+chr(63)+chr(63)+chr(63)+chr(63)+chr(63)
pulseminus=chr(63)+chr(0)+chr(0)+chr(0)+chr(0)+chr(0)+chr(0)+chr(0)
# This is the INITIAL default waveform, the Square Wave.
waveform=square
select="G0LCU."
count=1
# A continuous loop to change modes as required...
while 1:
# Set up a basic user window.
print os.system("clear"),chr(13)," ",chr(13),
print
print "Simple Function Generator using STANDARD Python 2.5.2"
print "for PCLinuxOS 2009, issued as Public Domain to LXF..."
print
print "Original idea copyright, (C)2009, B.Walker, G0LCU."
print
print "1) Sinewave."
print "2) Squarewave."
print "3) Triangle."
print "4) Positive going sawtooth."
print "5) Negative going sawtooth."
print "6) Positive going pulse."
print "7) Negative going pulse."
print "Q) or q) to quit..."
print
# Enter a number for the mode required.
select=raw_input("Select a number/letter and press RETURN/ENTER:- ")
if select=="": select="2"
if len(select)!=1: break
if select=="Q": break
if select=="q": break
if select=="1": waveform=sine
if select=="2": waveform=square
if select=="3": waveform=triangle
if select=="4": waveform=sawtoothplus
if select=="5": waveform=sawtoothminus
if select=="6": waveform=pulseplus
if select=="7": waveform=pulseminus
# Re-use the variable ~select~ again...
if select<=chr(48): select="Default OR last"
if select>=chr(56): select="Default OR last"
if select=="1": select="Sine wave"
if select=="2": select="Square wave"
if select=="3": select="Triangle wave"
if select=="4": select="Positive going sawtooth"
if select=="5": select="Negative going sawtooth"
if select=="6": select="Positive going pulse"
if select=="7": select="Negative going pulse"
print os.system("clear"),chr(13)," ",chr(13),
print
print select+" audio waveform generation..."
print
# Open up the audio channel(s) to write directly to.
audio=file('/dev/audio', 'wb')
# Make the tone generation time finite in milliseconds...
# A count of 10000 is 10 seconds of tone burst...
count=0
while count<10000:
# Write the waveform to the audio device.
audio.write(waveform)
count=count+1
# Close the audio device when finished.
audio.close()
main()
# End of demo...
# Enjoy finding simple solutions to often very difficult problems...
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/577592_Simple_1KHz_Audio_FunctiGenerator_Using/recipe-577592.py",
"copies": "1",
"size": "5147",
"license": "mit",
"hash": 3093338485334539000,
"line_mean": 34.0136054422,
"line_max": 76,
"alpha_frac": 0.693413639,
"autogenerated": false,
"ratio": 3.071002386634845,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4264416025634845,
"avg_score": null,
"num_lines": null
} |
"""Audio handling submodule."""
import collections
import time
import os
import logging
from array import array
import wave
import pyaudio
from snowboydetect import snowboydetect
logging.basicConfig()
_LOGGER = logging.getLogger("snowboy")
_LOGGER.setLevel(logging.INFO)
TOP_DIR = os.path.dirname(os.path.abspath(__file__))
RESOURCE_FILE = os.path.join(TOP_DIR, "resources/common.res")
DETECT_DING = os.path.join(TOP_DIR, "resources/ding.wav")
DETECT_DONG = os.path.join(TOP_DIR, "resources/dong.wav")
BUFFER_LENGTH = 5 # Seconds
RECORDING_SILENCE_START = 3
RECORDING_SILENCE = 4
RECORDING_THRESHOLD = 3000
class RingBuffer:
"""Ring buffer to hold audio from PortAudio."""
def __init__(self, size=4096):
"""Set buffer max size on init."""
self._buf = collections.deque(maxlen=size)
def extend(self, data):
"""Add data to the end of buffer."""
self._buf.extend(data)
def get(self):
"""Retrieve data from the beginning of buffer and clears it."""
tmp = bytes(bytearray(self._buf))
self._buf.clear()
return tmp
@property
def length(self):
"""Get the length of the buffer."""
return len(self._buf)
def play_audio_file(fname=DETECT_DING):
"""Play a wave file.
By default it plays a Ding sound.
:param str fname: wave file name
:return: None
"""
ding_wav = wave.open(fname, 'rb')
ding_data = ding_wav.readframes(ding_wav.getnframes())
audio = pyaudio.PyAudio()
stream_out = audio.open(
format=audio.get_format_from_width(ding_wav.getsampwidth()),
channels=ding_wav.getnchannels(),
rate=ding_wav.getframerate(), input=False, output=True)
stream_out.start_stream()
stream_out.write(ding_data)
time.sleep(0.2)
stream_out.stop_stream()
stream_out.close()
audio.terminate()
class HotwordDetector:
"""
Detect whether a keyword exists in a microphone input stream.
:param decoder_model: decoder model file path, a string or list of strings
:param resource: resource file path.
:param sensitivity: decoder sensitivity, a float of a list of floats.
The bigger the value, the more senstive the
decoder. If an empty list is provided, then the
default sensitivity in the model will be used.
:param audio_gain: multiply input volume by this factor.
"""
# pylint: disable=too-many-instance-attributes
# Needs refactoring as port of opsdroid/opsdroid-audio#12
def __init__(self, decoder_model,
resource=RESOURCE_FILE,
sensitivity=None,
audio_gain=1):
"""Initialise the HotwordDetector object."""
def audio_callback(in_data, frame_count, time_info, status):
"""Extend buffer with data from pyaudio."""
self.ring_buffer.extend(in_data)
play_data = chr(0) * len(in_data)
return play_data, pyaudio.paContinue
self.recording = False
self.recording_silence = 0
self.recording_time = 0
self.last_chunk_silent = False
if not isinstance(decoder_model, list):
decoder_model = [decoder_model]
if sensitivity is None:
sensitivity = []
elif not isinstance(sensitivity, list):
sensitivity = [sensitivity]
model_str = ",".join(decoder_model)
# pylint: disable=unexpected-keyword-arg
self.detector = snowboydetect.SnowboyDetect(
resource_filename=str(resource.encode()),
model_str=str(model_str.encode()))
self.detector.SetAudioGain(audio_gain)
self.num_hotwords = self.detector.NumHotwords()
if len(decoder_model) > 1 and len(sensitivity) == 1:
sensitivity = sensitivity*self.num_hotwords
if sensitivity:
assert self.num_hotwords == len(sensitivity), \
"number of hotwords in decoder_model (%d) and sensitivity " \
"(%d) does not match" % (self.num_hotwords, len(sensitivity))
sensitivity_str = ",".join([str(t) for t in sensitivity])
if sensitivity:
self.detector.SetSensitivity(sensitivity_str.encode())
self.ring_buffer = RingBuffer(
self.detector.NumChannels() *
self.detector.SampleRate() * BUFFER_LENGTH)
self.record_buffer = RingBuffer(None)
self.audio = pyaudio.PyAudio()
self.stream_in = self.audio.open(
input=True, output=False,
format=self.audio.get_format_from_width(
self.detector.BitsPerSample() / 8),
channels=self.detector.NumChannels(),
rate=self.detector.SampleRate(),
frames_per_buffer=2048,
stream_callback=audio_callback)
def start(self, detected_callback=play_audio_file,
recording_callback=None,
interrupt_check=lambda: False,
sleep_time=0.03):
"""
Start the voice detector.
For every `sleep_time` second it checks the
audio buffer for triggering keywords. If detected, then call
corresponding function in `detected_callback`, which can be a single
function (single model) or a list of callback functions (multiple
models). Every loop it also calls `interrupt_check` -- if it returns
True, then breaks from the loop and return.
:param detected_callback: a function or list of functions. The number
of items must match the number of models in
`decoder_model`.
:param interrupt_check: a function that returns True if the main loop
needs to stop.
:param float sleep_time: how much time in second every loop waits.
:return: None
"""
# pylint: disable=too-many-branches
# Needs refactoring as port of opsdroid/opsdroid-audio#12
if interrupt_check():
_LOGGER.debug("detect voice return")
return
if not isinstance(detected_callback, list):
detected_callback = [detected_callback]
if len(detected_callback) == 1 and self.num_hotwords > 1:
detected_callback *= self.num_hotwords
assert self.num_hotwords == len(detected_callback), \
"Error: hotwords in your models (%d) do not match the number " \
"of callbacks (%d)" % (self.num_hotwords, len(detected_callback))
_LOGGER.debug("detecting...")
while True:
if interrupt_check():
_LOGGER.debug("detect voice break")
break
data = self.ring_buffer.get()
if not data:
time.sleep(sleep_time)
continue
data_as_ints = array('h', data)
if self.recording:
self.record_buffer.extend(data)
self.recording_time += 1
if self.recording_time > RECORDING_SILENCE_START and \
max(data_as_ints) < RECORDING_THRESHOLD and \
self.last_chunk_silent:
self.recording_silence += 1
else:
self.recording_silence = 0
if self.recording_silence >= RECORDING_SILENCE:
_LOGGER.info("Stopping recording")
if recording_callback is not None:
recording_callback(self.record_buffer.get(), self)
self.recording = False
self.recording_silence = 0
self.recording_time = 0
if max(data_as_ints) > RECORDING_THRESHOLD:
self.last_chunk_silent = False
else:
self.last_chunk_silent = True
else:
ans = self.detector.RunDetection(data)
if ans == -1:
_LOGGER.warning(
"Error initializing streams or reading audio data")
elif ans > 0:
_LOGGER.info("Keyword detected, starting recording")
self.recording = True
self.record_buffer.extend(data)
callback = detected_callback[ans-1]
if callback is not None:
callback(data, self)
_LOGGER.debug("finished.")
def terminate(self):
"""
Terminate audio stream. Users cannot call start() again to detect.
:return: None
"""
self.stream_in.stop_stream()
self.stream_in.close()
self.audio.terminate()
| {
"repo_name": "opsdroid/opsdroid-audio",
"path": "opsdroidaudio/audio.py",
"copies": "1",
"size": "8757",
"license": "apache-2.0",
"hash": -2750489212286487000,
"line_mean": 35.640167364,
"line_max": 78,
"alpha_frac": 0.5819344524,
"autogenerated": false,
"ratio": 4.232479458675689,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5314413911075689,
"avg_score": null,
"num_lines": null
} |
"""Audio Layers"""
__author__ = 'thor'
from numpy import hstack, vstack, zeros, shape, mean, random, linspace
from numpy import argmax, argmin, flipud, percentile, ndarray, ceil
import numpy as np
import os
import re
import librosa
import librosa.display
import soundfile as sf
import wave
import contextlib
from IPython.display import Audio
import matplotlib.pyplot as plt
# import numpy as np
from scipy.signal import resample as scipy_signal_resample
# from functools import partial
from ut.util.log import printProgress
from ut.util.pfunc import filter_kwargs_to_func_arguments
import subprocess
default_sr = 44100
wav_text_info_exp = re.compile("^.*WAVEbextZ\x03\x00\x00([^\x00]+)")
TMP_FILE = 'ut_sound_util_tmp_file.wav'
def convert_to_wav(source_file, target_file=None, sample_rate=default_sr, print_stats=False):
if target_file is None:
folder, filename = os.path.split(source_file)
extension_less_filename, ext = os.path.splitext(filename)
target_file = os.path.join(folder, extension_less_filename + '.wav')
if print_stats:
return subprocess.call(['ffmpeg', '-i', source_file, '-ar', str(sample_rate), target_file])
else:
return subprocess.call(['ffmpeg', '-nostats', '-i', source_file, '-ar', str(sample_rate), target_file])
def complete_sref(sref):
"""
Complete sref dict with missing fields, if any
"""
sref = dict({'offset_s': 0.0}, **sref) # takes care of making a copy (so doesn't overwrite sref)
if 'duration' not in list(sref.keys()):
if is_wav_file(sref['filepath']):
sref['duration'] = get_duration_of_wav_file(sref['filepath'])
else:
sound = Sound.from_file(sref['filepath'])
sref['duration'] = duration_of_wf_and_sr(sound.wf, sound.sr) - sref['offset_s']
return sref
def sound_file_info_dict(filepath):
filename = os.path.basename(filepath)
(shortname, extension) = os.path.splitext(filename)
d = {'filepath': filepath,
'name': shortname,
'size': os.path.getsize(filepath),
'ext': extension[1:]
}
if extension == '.wav':
with contextlib.closing(wave.open(filepath, 'r')) as f:
d['channels'] = f.getnchannels()
d['sample_width'] = f.getsampwidth()
d['frames'] = f.getnframes()
d['frame_rate'] = f.getframerate()
d['duration'] = d['frames'] / float(d['frame_rate'])
with open(filepath, 'r') as f:
text_info = get_wav_text_info(f)
if text_info is not None:
d['inner_wav_text'] = text_info
return d
def get_frame_rate_of_wav_file(filepath):
with contextlib.closing(wave.open(filepath, 'r')) as f:
return f.getframerate()
def get_duration_of_wav_file(filepath):
with contextlib.closing(wave.open(filepath, 'r')) as f:
return f.getnframes() / f.getframerate()
def is_mono(wf):
return len(shape(wf)) == 1
def ensure_mono(wf):
if is_mono(wf):
return wf
else:
return mean(wf, axis=1)
# return wf[:, 0]
def resample_wf(wf, sr, new_sr):
# TODO: Replace using sox
# tfm = sox.Transformer()
# tfm.set_output_format(rate=44100)
# tfm.build('Sample-01.wav', 'test.wav')
# return round(len(wf) * new_sr / sr)
return scipy_signal_resample(wf, num=int(round(len(wf) * new_sr / sr)))
def suffix_with_silence(wf, num_silence_pts):
if is_mono(wf):
return hstack([wf, zeros(num_silence_pts)])
else:
return vstack([wf, zeros((num_silence_pts, 2))])
def prefix_with_silence(wf, num_silence_pts):
if is_mono(wf):
return hstack([zeros(num_silence_pts), wf])
else:
return vstack([zeros((num_silence_pts, 2)), wf])
def stereo_to_mono_and_extreme_silence_cropping(source, target, subtype=None, print_progress=False):
if os.path.isdir(source) and os.path.isdir(target):
from glob import iglob
if source[-1] != '/':
source += '/'
for i, filepath in enumerate(iglob(source + '*.wav')):
filename = os.path.basename(filepath)
if print_progress:
printProgress("{}: {}".format(i, filename))
stereo_to_mono_and_extreme_silence_cropping(
filepath,
os.path.join(target, filename)
)
else:
wf, sr = wf_and_sr(source)
wf = ensure_mono(wf)
wf = crop_head_and_tail_silence(wf)
sf.write(target, wf, samplerate=sr, subtype=subtype)
def get_wav_text_info(filespec):
if isinstance(filespec, str):
with open(filespec, 'r') as fd:
m = wav_text_info_exp.match(fd.read())
if m is not None:
return m.groups()[0]
else:
return None
else:
m = wav_text_info_exp.match(filespec.read())
if m is not None:
return m.groups()[0]
else:
return None
def wf_and_sr(*args, **kwargs):
if len(args) > 0:
args_0 = args[0]
if isinstance(args_0, str):
kwargs['filepath'] = args_0
elif isinstance(args_0, tuple):
kwargs['wf'], kwargs['sr'] = args_0
kwargs_keys = list(kwargs.keys())
if 'filepath' in kwargs_keys:
return wf_and_sr_from_filepath(filepath=kwargs['filepath'])
if 'wf' in kwargs_keys:
return kwargs['wf'], kwargs['sr']
def hear_sound(*args, **kwargs):
wf, sr = wf_and_sr(*args, **kwargs)
wf[random.randint(len(wf))] *= 1.001 # hack to avoid having exactly the same sound twice (creates an Audio bug!!)
try:
return Audio(data=wf, rate=sr, autoplay=kwargs.get('autoplay', False))
except ValueError:
try:
# just return left audio (stereo PCM signals are unsupported
return Audio(data=wf[0, :], rate=sr, autoplay=kwargs.get('autoplay', False))
except:
return Audio(data=wf[:, 0], rate=sr, autoplay=kwargs.get('autoplay', False))
def plot_wf(*args, **kwargs):
wf, sr = wf_and_sr(*args, **kwargs)
plt.plot(linspace(start=0, stop=len(wf) / float(sr), num=len(wf)), wf)
def display_sound(*args, **kwargs):
plot_wf(*args, **kwargs)
return hear_sound(*args, **kwargs)
def duration_of_wf_and_sr(wf, sr):
return len(wf) / float(sr)
def n_wf_points_from_duration_and_sr(duration, sr):
return int(round(duration * sr))
def get_consecutive_zeros_locations(wf, sr, thresh_consecutive_zeros_seconds=0.1):
thresh_consecutive_zeros = thresh_consecutive_zeros_seconds * sr
list_of_too_many_zeros_idx_and_len = list()
cum_of_zeros = 0
for i in range(len(wf)):
if wf[i] == 0:
cum_of_zeros += 1 # accumulate
else:
if cum_of_zeros > thresh_consecutive_zeros:
list_of_too_many_zeros_idx_and_len.append({'idx': i - cum_of_zeros, 'len': cum_of_zeros}) # remember
cum_of_zeros = 0 # reinit
if cum_of_zeros > thresh_consecutive_zeros:
list_of_too_many_zeros_idx_and_len.append({'idx': i - cum_of_zeros, 'len': cum_of_zeros}) # remember
return list_of_too_many_zeros_idx_and_len
def crop_head_and_tail_silence(wf):
assert len(wf.shape) == 1, "The silence crop is only implemented for mono sounds"
first_non_zero = argmax(wf != 0)
last_non_zero = len(wf) - argmin(flipud(wf == 0))
return wf[first_non_zero:last_non_zero]
def is_wav_file(filepath):
return os.path.splitext(filepath)[1] == '.wav'
def wav_file_framerate(file_pointer_or_path):
if isinstance(file_pointer_or_path, str):
file_pointer_or_path = wave.open(file_pointer_or_path)
frame_rate = file_pointer_or_path.getframerate()
file_pointer_or_path.close()
else:
frame_rate = file_pointer_or_path.getframerate()
return frame_rate
def wf_and_sr_from_filepath(filepath, **kwargs):
must_ensure_mono = kwargs.pop('ensure_mono', True)
if is_wav_file(filepath):
kwargs = dict({'always_2d': False}, **kwargs)
if 'offset_s' in list(kwargs.keys()) or 'duration' in list(kwargs.keys()):
sample_rate = wave.Wave_read(filepath).getframerate()
start = int(round(kwargs.pop('offset_s', 0) * sample_rate))
kwargs['start'] = start
duration = kwargs.pop('duration', None)
if duration is not None:
kwargs['stop'] = int(start + round(duration * sample_rate))
kwargs = filter_kwargs_to_func_arguments(sf.read, kwargs)
wf, sr = sf.read(filepath, **kwargs)
else:
kwargs['offset'] = kwargs.pop('offset_s', 0.0)
wf, sr = librosa.load(filepath, **kwargs)
if must_ensure_mono:
wf = ensure_mono(wf)
return wf, sr
# kwargs = dict({'sr': None}, **kwargs)
# return librosa.load(filepath, **kwargs)
def wave_form(filepath, **kwargs):
return wf_and_sr_from_filepath(filepath, **kwargs)[0]
def weighted_mean(yw1, yw2):
common_len = min([len(yw1[0]), len(yw2[0])])
a = yw1[0][:common_len]
b = yw2[0][:common_len]
return (a * yw1[1] + b * yw2[1]) / (yw1[1] + yw2[1])
def mk_transformed_copies_of_sound_files(source_path_iterator,
file_reader=wf_and_sr_from_filepath,
transform_fun=None,
source_path_to_target_path=None,
save_fun=None,
onerror_fun=None):
"""
Gets every filepath
fed by source_path_iterator one by one,
reads the file in with file_reader(filepath) to get a wave form and sample rate
feeds these to the transform_fun(wf, sr), which returns another wf and sr,
which are passed to save_fun(wf, sr, filepath) to be saved as a sound file,
the target filepath being computed from source_path through the function source_path_to_target_path(path)
If there's any errors and a onerror_fun(source_path, e) is given, it will be called instead of raising error
"""
assert source_path_to_target_path is not None, "You must provide a save_fun (function or target folder)"
if isinstance(source_path_to_target_path, str):
target_folder = source_path_to_target_path
assert os.path.exists(target_folder), \
"The folder {} doesn't exist".format(target_folder)
def source_path_to_target_path(source_path):
source_name = os.path.splitext(os.path.basename(source_path))[0]
return os.path.join(target_folder, source_name + '.wav')
if save_fun is None:
def save_fun(wf, sr, filepath):
sf.write(file=filepath, data=wf, samplerate=sr)
for source_path in source_path_iterator:
try:
wf, sr = file_reader(source_path)
if transform_fun is not None:
wf, sr = transform_fun(wf, sr)
target_path = source_path_to_target_path(source_path)
save_fun(wf=wf, sr=sr, filepath=target_path)
except Exception as e:
if onerror_fun is not None:
onerror_fun(source_path, e)
else:
raise e
def plot_melspectrogram(spect_mat, sr=default_sr, hop_length=512, name=None):
# Make a new figure
plt.figure(figsize=(12, 4))
# Display the spectrogram on a mel scale
# sample rate and hop length parameters are used to render the time axis
librosa.display.specshow(spect_mat, sr=sr, hop_length=hop_length, x_axis='time', y_axis='mel')
# Put a descriptive title on the plot
if name is not None:
plt.title('mel power spectrogram of "{}"'.format(name))
else:
plt.title('mel power spectrogram')
# draw a color bar
plt.colorbar(format='%+02.0f dB')
# Make the figure layout compact
plt.tight_layout()
class Sound(object):
def __init__(self, wf=None, sr=default_sr, name=''):
if wf is None:
wf = np.array([])
self.wf = wf.copy()
self.sr = sr
self.name = name
self.info = {}
def copy(self):
return Sound(wf=self.wf.copy(), sr=self.sr, name=self.name)
####################################################################################################################
# CREATION
@classmethod
def from_file(cls, filepath, name=None, get_wav_info=False, **kwargs):
"""
Construct sound object from sound file
:param filepath: filepath of the sound file
:param name: name to give this sound (will default to file name)
:param kwargs: additional options, such as:
* offset_s and duration (to retrieve only a segment of sound). Works with .wav file only
* ensure_mono (if present and True (the default), will convert to mono)
:return:
"""
file_name, extension = os.path.splitext((os.path.basename(filepath)))
name = name or file_name
# kwargs = dict({'always_2d': False, 'ensure_mono': True}, **kwargs)
wf, sr = wf_and_sr_from_filepath(filepath, **kwargs)
if name is None:
name = filepath
sound = Sound(wf=wf, sr=sr, name=name)
if get_wav_info and extension == '.wav':
try:
sound.info = sound_file_info_dict(filepath)
offset_s = kwargs.get('offset_s', None)
if offset_s is not None:
sound.info['offset_s'] = float(offset_s)
duration = kwargs.get('duration', None)
if duration is not None:
sound.info['duration'] = float(duration)
if duration is not None or offset_s is not None:
offset_s = offset_s or 0
sound.info['frames'] = int((duration - offset_s) * default_sr)
sound.info.pop('size')
except Exception:
pass
return sound
@classmethod
def from_(cls, sound):
if isinstance(sound, tuple) and len(sound) == 2: # then it's a (wf, sr) tuple
return Sound(sound[0], sound[1])
elif isinstance(sound, str) and os.path.isfile(sound):
return Sound.from_file(sound)
elif isinstance(sound, dict):
if 'wf' in sound and 'sr' in sound:
return Sound(sound['wf'], sound['sr'])
else:
return Sound.from_sref(sound)
elif hasattr(sound, 'wf') and hasattr(sound, 'sr'):
return Sound(sound.wf, sound.sr)
else:
try:
return Sound.from_sound_iter(sound)
except:
raise TypeError("Couldn't figure out how that format represents sound")
@classmethod
def from_sref(cls, sref):
wf, sr = wf_and_sr_from_filepath(**complete_sref(sref))
# filepath = sref['filepath']
# sample_rate = wave.Wave_read(sref['filepath']).getframerate()
# start = int(round(sref.get('offset_s', 0) * sample_rate))
# duration = sref.get('duration', None)
# kwargs = {}
# if duration is not None:
# kwargs = {'stop': int(start + round(duration * sample_rate))}
# wf, sr = sf.read(filepath, always_2d=False, start=start, **kwargs)
return Sound(wf, sr, name=sref.get('name', sref['filepath']))
@classmethod
def from_sound_iter(cls, sound_iter):
wf = []
for sound in sound_iter:
if len(wf) == 0:
sr = sound.sr
wf.extend(list(sound.wf))
return cls(wf=np.array(wf), sr=sr)
@classmethod
def from_sound_mix_spec(cls,
sound_mix_spec,
name='from_sound_mix_spec',
pre_normalization_function=lambda wf: wf / percentile(abs(wf), 95)):
"""
Mix all sounds specified in the sound_mix_spec.
A sound_mix_spec is an iterator that yields either of these formats:
* a wave form
* a Sound object
* (This is the complete specification) a {sound, offset_s, weight} dict indicating
offset_s (default 0 seconds): where the sound should be inserted
weight (default 1): a weight, relative to the other sounds in the iterator, indicating whether the
"volume" should be increased or decreased before mixing the sound
Note: All wave forms are normalized before being multiplied by the given weight. The normalization function is
given by the pre_normalization_function argument (default is no normalization)
Note: If some of the sounds in the sound_mix_spec have different sample rates, they will be resampled to the
sample rate of the first sound encountered. This process requires (not so fast) fast fourrier transform,
so better have the same sample rate.
"""
def _mk_sound_mix_spec(_sound_mix_spec):
sound_mix_spec_default = dict(sound=None, offset_s=0, weight=1)
_sound_mix_spec = _sound_mix_spec.copy()
if isinstance(_sound_mix_spec, dict): # if sound_mix_spec is a dict...
if 'filepath' in list(_sound_mix_spec.keys()): # ... and it has a 'filepath' key...
sref = _sound_mix_spec # ... assume it's an sref...
sound = Sound.from_sref(sref) # ... and get the sound from it, and make an actual sound_mix_spec
_sound_mix_spec = dict(sound_mix_spec_default, sound=sound)
else: # If it's not an sref...
sound = _sound_mix_spec['sound'] # ... assume it has a sound key
if isinstance(sound, dict): # and if that "sound" is an sref, replace it by a actual sound object
_sound_mix_spec['sound'] = Sound.from_sref(_sound_mix_spec['sound'])
_sound_mix_spec = dict(sound_mix_spec_default, **_sound_mix_spec)
elif isinstance(_sound_mix_spec, ndarray):
_sound_mix_spec = dict(sound_mix_spec_default, sound=Sound(wf=_sound_mix_spec, sr=None))
elif hasattr(_sound_mix_spec, 'wf'):
_sound_mix_spec = dict(sound_mix_spec_default, sound=_sound_mix_spec)
else:
_sound_mix_spec = dict(sound_mix_spec_default, **_sound_mix_spec)
_sound_mix_spec['sound'] = _sound_mix_spec[
'sound'].copy() # to make sure the we don't overwrite it in manip
_sound_mix_spec['sound'].wf = ensure_mono(_sound_mix_spec['sound'].wf)
# print(sound_mix_spec)
return _sound_mix_spec
# if the sound_iterator is a dict, take its values (ignore the keys)
if isinstance(sound_mix_spec, dict):
sound_mix_spec = list(sound_mix_spec.values())
sound_mix_spec = iter(sound_mix_spec)
# compute the weight factor. All input weights will be multiplied by this factor to avoid last sounds having
# more volume than the previous ones
# take the first sound as the sound to begin (and accumulate) with. As a result, the sr will be taken from there
spec = _mk_sound_mix_spec(next(sound_mix_spec))
result_sound = spec['sound']
result_sound_sr = result_sound.sr # will be the final sr, and all other sounds will be resampled to it
result_sound.name = name
result_sound.info = {} # we don't want to keep the first sound's info around
# offset the sound by required amount
offset_length = ceil(spec.get('offset_s', 0) * result_sound_sr)
result_sound.wf = prefix_with_silence(result_sound.wf, offset_length)
# all subsequent weights should be multiplied by a weight_factor,
# since the accumulating sound is considered to be of unit weight in the Sound.mix_in() method:
weight_factor = 1 / spec.get('weight', 1.0)
# initialize sound counter
sounds_mixed_so_far = 1
try:
while True:
spec = _mk_sound_mix_spec(next(sound_mix_spec))
# resample sound to match self, if necessary
# (mix_in() method does it, but better do it before,
# because we're probably going to prefix this sound with silence, so it'll be longer)
if spec['sound'].sr != result_sound.sr:
spec['sound'] = spec['sound'].resample(new_sr=result_sound.sr)
# divide weight by number of sounds mixed so far, to avoid last sounds having more volume
# than the previous ones
weight = weight_factor * spec['weight'] / sounds_mixed_so_far
# print(weight)
# offset the new sound
# print sound_mix_spec['sound_tag'], sound_mix_spec.get('offset_s')
offset_length = ceil(spec.get('offset_s', 0) * result_sound_sr)
spec['sound'].wf = prefix_with_silence(spec['sound'].wf, offset_length)
# finally, mix these sounds
result_sound.mix_in(spec['sound'],
weight=weight,
pre_normalization_function=pre_normalization_function)
# increment the counter
sounds_mixed_so_far += 1
except StopIteration:
pass
return result_sound
def save_to_wav(self, filepath=None, samplerate=None, **kwargs):
samplerate = samplerate or self.sr
filepath = filepath or (self.name + '.wav')
sf.write(filepath, self.wf, samplerate=samplerate, **kwargs)
####################################################################################################################
# TRANSFORMATIONS
def ensure_mono(self):
self.wf = ensure_mono(self.wf)
def resample(self, new_sr, inplace=False):
new_wf = resample_wf(self.wf, self.sr, new_sr)
if not inplace:
return Sound(wf=new_wf, sr=new_sr, name=self.name)
else:
self.wf = new_wf
self.sr = new_sr
def crop_with_idx(self, first_idx, last_idx):
"""
Crop with frame indices.
:param first_idx: First frame index (starting with 0, like with lists)
:param last_idx: Last frame index. Like with list indices again. If frame n is actually the (n+1)th frame...
:return:
"""
cropped_sound = self.copy()
cropped_sound.wf = cropped_sound.wf[first_idx:last_idx]
return cropped_sound
# def __getitem__
def crop_with_seconds(self, first_second, last_second):
return self.crop_with_idx(int(round(first_second * self.sr)), int(round(last_second * self.sr)))
def mix_in(self,
sound,
weight=1,
pre_normalization_function=lambda wf: wf / percentile(abs(wf), 95)):
# resample sound to match self, if necessary
if sound.sr != self.sr:
sound = sound.resample(new_sr=self.sr)
new_wf = sound.wf.copy()
# suffix the shortest sound with silence to match lengths
existing_sound_length = len(self.wf)
new_sound_length = len(new_wf)
length_difference = existing_sound_length - new_sound_length
if length_difference > 0:
new_wf = suffix_with_silence(new_wf, length_difference)
elif length_difference < 0:
self.wf = suffix_with_silence(self.wf, -length_difference)
# mix the new wf into self.wf
# print(pre_normalization_function(arange(100)))
self.wf = weighted_mean([pre_normalization_function(self.wf), 1],
[pre_normalization_function(new_wf), weight])
def append(self, sound, glue=0.0):
assert sound.sr == self.sr, "you can only append sounds if they have the same sample rate at this point"
if isinstance(glue, (float, int)):
n_samples = int(glue * self.sr)
glue = zeros(n_samples)
self.wf = hstack((self.wf, glue, sound.wf))
def melspectr_matrix(self, **mel_kwargs):
mel_kwargs = dict({'n_fft': 2048, 'hop_length': 512, 'n_mels': 128}, **mel_kwargs)
S = librosa.feature.melspectrogram(np.array(self.wf).astype(float), sr=self.sr, **mel_kwargs)
# Convert to log scale (dB). We'll use the peak power as reference.
return librosa.amplitude_to_db(S, ref=np.max)
####################################################################################################################
# DISPLAY FUNCTIONS
def plot_wf(self, **kwargs):
kwargs = dict(alpha=0.8, **kwargs)
plot_wf(wf=self.wf.copy(), sr=self.sr, **kwargs)
def hear(self, autoplay=False, **kwargs):
wf = np.array(ensure_mono(self.wf)).astype(float)
wf[np.random.randint(
len(wf))] *= 1.001 # hack to avoid having exactly the same sound twice (creates an Audio bug)
return Audio(data=wf, rate=self.sr, autoplay=autoplay, **kwargs)
def display_sound(self, **kwargs):
self.plot_wf()
return self.hear(**kwargs)
def display(self, sound_plot='mel', autoplay=False, **kwargs):
"""
:param sound_plot: 'mel' (default) to plot melspectrogram, 'wf' to plot wave form, and None to plot nothing at all
:param kwargs:
:return:
"""
if sound_plot == 'mel':
self.melspectrogram(plot_it=True, **kwargs)
elif sound_plot == 'wf':
self.plot_wf(**kwargs)
return self.hear(autoplay=autoplay)
def melspectrogram(self, plot_it=False, **mel_kwargs):
mel_kwargs = dict({'n_fft': 2048, 'hop_length': 512, 'n_mels': 128}, **mel_kwargs)
log_S = self.melspectr_matrix(**mel_kwargs)
if plot_it:
plot_melspectrogram(log_S, sr=self.sr, hop_length=mel_kwargs['hop_length'])
return log_S
####################################################################################################################
# MISC
def duration(self):
return duration_of_wf_and_sr(self.wf, self.sr)
def wf_sr_dict(self):
return {'wf': self.wf, 'sr': self.sr}
def wf_sr_tuple(self):
return self.wf, self.sr
| {
"repo_name": "thorwhalen/ut",
"path": "sound/util.py",
"copies": "1",
"size": "26365",
"license": "mit",
"hash": -8427761824746068000,
"line_mean": 38.766214178,
"line_max": 122,
"alpha_frac": 0.5789114356,
"autogenerated": false,
"ratio": 3.648629947412123,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4727541383012123,
"avg_score": null,
"num_lines": null
} |
"""Audio module."""
import logging
import pygame
from .assets import AssetManager
from .modules import Module
from .locals import * # noqa
logger = logging.getLogger(__name__)
class AudioModule(Module):
"""
Audio module.
The audio manager is in charge of playing music and sound.
"""
def __init__(self):
"""Constructor."""
self.configuration = self.container.get(Configuration)
self.assets = self.container.get(AssetManager)
self.master_volume = self.configuration.get('akurra.audio.master.volume', 1.0)
self.bgm_volume = self.configuration.get('akurra.audio.background_music.volume', 1.0)
self.sfx_volume = self.configuration.get('akurra.audio.special_effects.volume', 1.0)
self.channels = [False for x in range(0, 8)]
self.sounds = {}
self.music = {}
def add_channel(self, name):
"""Add a channel entry."""
for i in range(0, len(self.channels)):
if self.channels[i] is False:
self.channels[i] = name
logger.debug('Added audio channel "%s" [slot=%s]', name, i + 1)
def get_channel(self, name):
"""Retrieve a channel."""
for i in range(0, len(self.channels)):
if name is self.channels[i]:
return pygame.mixer.Channel(i + 1)
return None
def remove_channel(self, name):
"""Remove a channel entry."""
for i in range(0, len(self.channels)):
if self.channels[i] is name:
self.channels.remove(name)
logger.debug('Removed audio channel "%s" [slot=%s]', name, i + 1)
def add_sound(self, relative_path, name):
"""Add a sound."""
sound = self.assets.get_sound(relative_path)
self.sounds[name] = sound
logger.debug('Added sound "%s"', name)
def remove_sound(self, name):
"""Remove a state."""
self.sounds.pop(name, None)
logger.debug('Removed sound "%s"', name)
def add_music(self, relative_path, name):
"""Add a music path and name."""
absolute_path = self.assets.get_path(relative_path)
self.music[name] = absolute_path
logger.debug('Added music "%s"', name)
def remove_music(self, name):
"""Remove a music object."""
self.music.pop(name, None)
logger.debug('Removed music "%s"', name)
# Play a piece of music, only one can be played at a time.
# set loop counter to -1 to loop indefinitely
def play_music(self, name, loop_counter=-1, starting_point=0.0):
"""Play background music."""
logger.debug('Playing music "%s"', name)
pygame.mixer.music.load(self.music[name])
pygame.mixer.music.set_volume(self.master_volume * self.bgm_volume)
pygame.mixer.music.play(loop_counter, starting_point)
def stop_music():
"""Stop playing background music."""
logger.debug("Stopping background music")
pygame.mixer.music.stop()
def play_sound(self, name, channel=None, queue=False):
"""Play sound effect."""
sound = self.sounds[name]
sound.set_volume(self.master_volume * self.sfx_volume)
if channel:
channel = self.get_channel(channel)
if queue:
channel.queue(sound)
elif not channel.get_busy():
channel.play(sound)
else:
sound.play()
def stop_sound(self, name):
"""Stop sound effect."""
logger.debug('Stopping sound "%s"', name)
self.sounds[name].stop()
| {
"repo_name": "multatronic/akurra",
"path": "akurra/audio.py",
"copies": "1",
"size": "3596",
"license": "mit",
"hash": 7836698978463048000,
"line_mean": 30.8230088496,
"line_max": 93,
"alpha_frac": 0.5895439377,
"autogenerated": false,
"ratio": 3.83778014941302,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49273240871130203,
"avg_score": null,
"num_lines": null
} |
'''Audio module
This module contains all audio related classes.
'''
import logging
import sdl2
from sdl2 import sdlmixer as mixer
from sdl2.ext.compat import byteify
from vulk.exception import SDL2Error, SoundError
logger = logging.getLogger()
class VulkAudio():
'''This class is only used in baseapp.
It initializes and close the audio system.
'''
def open(self, configuration):
'''Open and configure audio system
*Parameters:*
- `configuration`: Configuration parameters from Application
'''
if sdl2.SDL_InitSubSystem(sdl2.SDL_INIT_AUDIO) != 0:
msg = "Cannot initialize audio system: %s" % sdl2.SDL_GetError()
logger.critical(msg)
raise SDL2Error(msg)
if mixer.Mix_OpenAudio(mixer.MIX_DEFAULT_FREQUENCY,
mixer.MIX_DEFAULT_FORMAT, 2, 1024):
msg = "Cannot open mixed audio: %s" % mixer.Mix_GetError()
logger.critical(msg)
raise SDL2Error(msg)
mixer.Mix_AllocateChannels(configuration.audio_channel)
logger.info("Audio initialized")
def close(self):
'''Close the audio system'''
mixer.Mix_CloseAudio()
sdl2.SDL_Quit(sdl2.SDL_INIT_AUDIO)
logger.info("Audio stopped")
class Sound():
'''
Sound effects are small audio samples, usually no longer than a few
seconds, that are played back on specific game events such as a character
jumping or shooting a gun.
Sound effects can be stored in various formats. Vulk is based on SDL2 and
thus supports WAVE, AIFF, RIFF, OGG, and VOC files.
'''
def __init__(self, path):
'''Load the sound file
*Parameters:*
- `path`: Path to the sound file
'''
self.sample = mixer.Mix_LoadWAV(byteify(path, "utf-8"))
if not self.sample:
msg = "Cannot load file %s" % path
logger.error(msg)
raise SoundError(msg)
def play(self, volume=1.0, repeat=1):
'''
Play the sound
*Parameters:*
- `volume`: Sound volume 0.0 to 1.0
- `repeat`: Number of time to repeat the sound, 0=infinity
*Returns:*
Channel id of the sound
'''
channel = mixer.Mix_PlayChannel(-1, self.sample, repeat-1)
if channel == -1:
msg = "Cannot play the sound: %s" % mixer.Mix_GetError()
logger.error(msg)
raise SoundError(msg)
mixer.Mix_Volume(channel, int(mixer.MIX_MAX_VOLUME * volume))
return channel
class Music():
'''
For any sound that's longer than a few seconds it is preferable to stream
it from disk instead of fully loading it into RAM. Vulk provides a Music
class that lets you do that.
Music can be stored in various formats. Vulk is based on SDL2 and
thus supports WAVE, MOD, MIDI, OGG, MP3, FLAC files.
**Note: You can play only one music at a time**
'''
def __init__(self, path):
'''Load the music file
*Parameters:*
- `path`: Path to the music file
'''
self.music = mixer.Mix_LoadMUS(byteify(path, "utf-8"))
if not self.music:
msg = "Cannot load file %s" % path
logger.error(msg)
raise SoundError(msg)
def play(self, volume=1.0, repeat=1):
'''
Play the music
*Parameters:*
- `volume`: Sound volume 0.0 to 1.0
- `repeat`: Number of time to repeat the sound, 0=infinity
*Returns:*
Channel id of the sound
'''
if not repeat:
repeat = -1
result = mixer.Mix_PlayMusic(self.music, repeat)
if result == -1:
msg = "Cannot play the music: %s" % mixer.Mix_GetError()
logger.error(msg)
raise SoundError(msg)
mixer.Mix_VolumeMusic(int(mixer.MIX_MAX_VOLUME * volume))
| {
"repo_name": "realitix/vulk",
"path": "vulk/audio.py",
"copies": "1",
"size": "3938",
"license": "apache-2.0",
"hash": -2530578404915854300,
"line_mean": 26.1586206897,
"line_max": 77,
"alpha_frac": 0.5896394109,
"autogenerated": false,
"ratio": 3.819592628516004,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9909232039416004,
"avg_score": 0,
"num_lines": 145
} |
"""Audio module provides all the audio features (playing of sounds) for the media controller."""
import logging
from kivy.clock import Clock
from mpf.core.case_insensitive_dict import CaseInsensitiveDict
from mpf.core.utility_functions import Util
from mpfmc.core.audio.audio_interface import AudioInterface
from mpfmc.core.audio.audio_exception import AudioException
__all__ = ('SoundSystem',
'AudioInterface',
'AudioException')
# ---------------------------------------------------------------------------
# Default sound system and track values
# ---------------------------------------------------------------------------
DEFAULT_AUDIO_ENABLED = True
DEFAULT_AUDIO_BUFFER_SAMPLE_SIZE = 2048
DEFAULT_SAMPLE_RATE = 44100
DEFAULT_AUDIO_CHANNELS = 1
DEFAULT_MASTER_VOLUME = 0.5
DEFAULT_TRACK_MAX_SIMULTANEOUS_SOUNDS = 1
DEFAULT_TRACK_VOLUME = 0.5
# pylint: disable=too-many-instance-attributes
class SoundSystem:
"""Sound system for MPF.
The SoundSystem class is used to read the sound system settings from the
config file and then initialize the audio interface and create the
specified tracks.
"""
# pylint: disable=invalid-name, too-many-branches
def __init__(self, mc):
"""Initialise sound system."""
self.mc = mc
self.log = logging.getLogger('SoundSystem')
self._initialized = False
self.audio_interface = None
self.config = dict()
self.sound_events = dict()
self.tracks = CaseInsensitiveDict()
self.clock_event = None
self.log.debug("Loading the Sound System")
# Load configuration for sound system
if 'sound_system' not in self.mc.machine_config:
self.log.info("SoundSystem: Using default 'sound_system' settings")
self.config = dict()
else:
self.config = self.mc.machine_config['sound_system']
# TODO: Use config spec validator
# Validate configuration and provide default values where needed
if 'enabled' not in self.config:
self.config['enabled'] = DEFAULT_AUDIO_ENABLED
# If the sound system has been disabled, abort initialization
if not self.config['enabled']:
self.log.info("SoundSystem: The sound system has been disabled in "
"the configuration file (enabled: False). No audio "
"features will be available.")
return
if 'buffer' not in self.config or self.config['buffer'] == 'auto':
self.config['buffer'] = DEFAULT_AUDIO_BUFFER_SAMPLE_SIZE
elif not AudioInterface.power_of_two(self.config['buffer']):
self.log.warning("SoundSystem: The buffer setting is not a power of "
"two. Default buffer size will be used.")
self.config['buffer'] = DEFAULT_AUDIO_BUFFER_SAMPLE_SIZE
if 'frequency' not in self.config or self.config['frequency'] == 'auto':
self.config['frequency'] = DEFAULT_SAMPLE_RATE
if 'channels' not in self.config:
self.config['channels'] = DEFAULT_AUDIO_CHANNELS
if 'master_volume' not in self.config:
self.config['master_volume'] = DEFAULT_MASTER_VOLUME
# Initialize audio interface library (get audio output)
try:
self.audio_interface = AudioInterface(
rate=self.config['frequency'],
channels=self.config['channels'],
buffer_samples=self.config['buffer'])
except AudioException:
self.log.error("Could not initialize the audio interface. "
"Audio features will not be available.")
self.audio_interface = None
return
# Setup tracks in audio system (including initial volume levels)
if 'tracks' in self.config:
for track_name, track_config in self.config['tracks'].items():
self._create_track(track_name, track_config)
else:
self._create_track('default')
self.log.info("No audio tracks are specified in your machine config file. "
"a track named 'default' has been created.")
# Set initial master volume level to off
self.master_volume = 0.0
if "master_volume" in self.config:
self.log.warning("master_volume in sound_system is deprecated and will be removed in the future.")
# Establish machine tick function callback (will process internal audio events)
self.clock_event = Clock.schedule_interval(self.tick, 0)
# Start audio engine processing
self.audio_interface.enable()
self._initialized = True
self.mc.events.add_handler("shutdown", self.shutdown)
self.mc.events.add_handler("machine_var_master_volume", self._set_volume)
def _set_volume(self, **kwargs):
self.master_volume = kwargs['value']
def shutdown(self, **kwargs):
"""Shuts down the audio interface"""
del kwargs
if self.enabled:
self.audio_interface.shutdown()
self._initialized = False
@property
def enabled(self):
"""Return true if enabled."""
return self._initialized
@property
def master_volume(self) -> float:
"""Return master volume."""
return self.audio_interface.get_master_volume()
@master_volume.setter
def master_volume(self, value: float):
"""Set master volume."""
# Constrain volume to the range 0.0 to 1.0
value = min(max(value, 0.0), 1.0)
self.audio_interface.set_master_volume(value)
self.log.info("Setting master volume to %s", value)
@property
def default_track(self):
"""Return default track."""
return self.audio_interface.get_track(0)
def _create_track(self, name, config=None): # noqa
"""Create a track in the audio system with the specified name and configuration.
Args:
name: The track name (which will be used to reference the track, such as
"voice" or "sfx".
config: A Python dictionary containing the configuration settings for
this track.
"""
if self.audio_interface is None:
raise AudioException("Could not create '{}' track - the sound_system has "
"not been initialized".format(name))
# Validate track config parameters
if name in self.tracks:
raise AudioException("Could not create '{}' track - a track with that name "
"already exists".format(name))
if not config:
config = {}
if 'volume' not in config:
config['volume'] = DEFAULT_TRACK_VOLUME
if 'type' not in config:
config['type'] = 'standard'
if config['type'] not in ['standard', 'playlist', 'sound_loop']:
raise AudioException("Could not create '{}' track - an illegal value for "
"'type' was found".format(name))
# Validate type-specific parameters and create the track
track = None
if config['type'] == 'standard':
if 'simultaneous_sounds' not in config:
config['simultaneous_sounds'] = DEFAULT_TRACK_MAX_SIMULTANEOUS_SOUNDS
track = self.audio_interface.create_standard_track(self.mc,
name,
config['simultaneous_sounds'],
config['volume'])
elif config['type'] == 'playlist':
config.setdefault('crossfade_time', 0.0)
config['crossfade_time'] = Util.string_to_secs(config['crossfade_time'])
track = self.audio_interface.create_playlist_track(self.mc,
name,
config['crossfade_time'],
config['volume'])
elif config['type'] == 'sound_loop':
if 'max_layers' not in config:
config['max_layers'] = 8
track = self.audio_interface.create_sound_loop_track(self.mc,
name,
config['max_layers'],
config['volume'])
if track is None:
raise AudioException("Could not create '{}' track due to an error".format(name))
self.tracks[name] = track
if 'events_when_stopped' in config and config['events_when_stopped'] is not None:
track.events_when_stopped = Util.string_to_event_list(config['events_when_stopped'])
if 'events_when_played' in config and config['events_when_played'] is not None:
track.events_when_played = Util.string_to_event_list(config['events_when_played'])
if 'events_when_paused' in config and config['events_when_paused'] is not None:
track.events_when_paused = Util.string_to_event_list(config['events_when_paused'])
if 'events_when_resumed' in config and config['events_when_resumed'] is not None:
track.events_when_resumed = Util.string_to_event_list(config['events_when_resumed'])
def tick(self, dt):
"""Clock callback function"""
del dt
self.audio_interface.process()
| {
"repo_name": "missionpinball/mpf-mc",
"path": "mpfmc/core/audio/__init__.py",
"copies": "1",
"size": "9669",
"license": "mit",
"hash": 3346656535099191000,
"line_mean": 40.3205128205,
"line_max": 110,
"alpha_frac": 0.5717240666,
"autogenerated": false,
"ratio": 4.621892925430211,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.569361699203021,
"avg_score": null,
"num_lines": null
} |
from subprocess import Popen, PIPE
from scipy.io import wavfile
from scikits.talkbox.features.mfcc import mfcc
import os, tempfile, warnings
import numpy as np
def find_offset(file1, file2, fs=8000, trim=60*15, correl_nframes=1000):
tmp1 = convert_and_trim(file1, fs, trim)
tmp2 = convert_and_trim(file2, fs, trim)
# Removing warnings because of 18 bits block size
# outputted by ffmpeg
# https://trac.ffmpeg.org/ticket/1843
warnings.simplefilter("ignore", wavfile.WavFileWarning)
a1 = wavfile.read(tmp1, mmap=True)[1] / (2.0 ** 15)
a2 = wavfile.read(tmp2, mmap=True)[1] / (2.0 ** 15)
# We truncate zeroes off the beginning of each signals
# (only seems to happen in ffmpeg, not in sox)
a1 = ensure_non_zero(a1)
a2 = ensure_non_zero(a2)
mfcc1 = mfcc(a1, nwin=256, nfft=512, fs=fs, nceps=13)[0]
mfcc2 = mfcc(a2, nwin=256, nfft=512, fs=fs, nceps=13)[0]
mfcc1 = std_mfcc(mfcc1)
mfcc2 = std_mfcc(mfcc2)
c = cross_correlation(mfcc1, mfcc2, nframes=correl_nframes)
max_k_index = np.argmax(c)
# The MFCC window overlap is hardcoded in scikits.talkbox
offset = max_k_index * 160.0 / float(fs) # * over / sample rate
score = (c[max_k_index] - np.mean(c)) / np.std(c) # standard score of peak
os.remove(tmp1)
os.remove(tmp2)
return offset, score
def ensure_non_zero(signal):
# We add a little bit of static to avoid
# 'divide by zero encountered in log'
# during MFCC computation
signal += np.random.random(len(signal)) * 10**-10
return signal
def cross_correlation(mfcc1, mfcc2, nframes):
n1, mdim1 = mfcc1.shape
n2, mdim2 = mfcc2.shape
n = n1 - nframes + 1
c = np.zeros(n)
for k in range(n):
cc = np.sum(np.multiply(mfcc1[k:k+nframes], mfcc2[:nframes]), axis=0)
c[k] = np.linalg.norm(cc)
return c
def std_mfcc(mfcc):
return (mfcc - np.mean(mfcc, axis=0)) / np.std(mfcc, axis=0)
def convert_and_trim(afile, fs, trim):
tmp = tempfile.NamedTemporaryFile(mode='r+b', prefix='offset_', suffix='.wav')
tmp_name = tmp.name
tmp.close()
psox = Popen([
'ffmpeg', '-loglevel', 'panic', '-i', afile,
'-ac', '1', '-ar', str(fs), '-ss', '0', '-t', str(trim),
'-acodec', 'pcm_s16le', tmp_name
], stderr=PIPE)
psox.communicate()
if not psox.returncode == 0:
raise Exception("FFMpeg failed")
return tmp_name
| {
"repo_name": "bbcrd/audio-offset-finder",
"path": "audio_offset_finder/audio_offset_finder.py",
"copies": "1",
"size": "3035",
"license": "apache-2.0",
"hash": -2488187212178116000,
"line_mean": 36.4691358025,
"line_max": 82,
"alpha_frac": 0.6622734761,
"autogenerated": false,
"ratio": 2.996051332675222,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.914277065971294,
"avg_score": 0.0031108298124565186,
"num_lines": 81
} |
# Audio parser.
# (c) Adrian deWynter, 2016
import speech_recognition as sr
import sys
import os
args = sys.argv
# For the moment, sphinx is the only interpreter (you can change it here.)
engine="sphinx"
key=""
usr,pwd="",""
writeMode = False
KEY=""
FILEPATH=""
OUTPUT=""
# For readability, all functions are contained here
# (two need wrappers to make up for the extra args)
r = sr.Recognizer()
def wrapper_houndify(audio,key):
return r.recognize_houndify(audio, key[0], key[1])
def wrapper_ibm(audio,key):
return r.recognize_ibm(audio, key[0], key[1])
functions={"google":r.recognize_google,
"wit.ai":r.recognize_wit,
"ibm":wrapper_ibm,
"bing":r.recognize_bing,
"houndify":wrapper_houndify,
"api.ai":r.recognize_api,
"sphinx":r.recognize_sphinx}
def displayHelp():
print "Hi, this is a script to parse .wav files. It supports the following flags:"
print "(No spaces!)"
print ""
print "\t -engine=someengine"
print "\t -key=apikey"
print "\t -usr=username -pwd=password"
print "\t -out=path/to/outputfile"
print ""
print "The only flag required is the path to the file:"
print ""
print "\t python audioprocessor.py path/to/file.wav"
print ""
print "The engines supported at this time are:"
print "(in format: name [what you put after -engine=])"
print ""
print "\t Sphinx [sphinx]"
print "\t Wit.ai [wit.ai]"
print "\t Google [google]"
print "\t IBM StT [ibm]"
print "\t Bing [bing]"
print "\t Houndify [houndify]"
print "\t api.ai [api.ai]"
sys.exit(-1)
def argError():
print "Error: invalid arguments."
print "Make sure to run this script as:"
print "\t python audioprocessor.py PATH/FILE.wav [OUTPUT=PATH/OUTPUT] [ENGINE=ENGINE] [ARGS=ARGS]"
print "You can also run this script with the -help flag to obtain more detailed info:"
print "\t python audioprocessor.py -help"
sys.exit(-1)
# Parse arguments the ugly way because I couldn't think of a better way.
for i in range(1,len(args)):
current=args[i]
if current=="-help":
displayHelp()
if not current.startswith("-") and FILEPATH=="":
FILEPATH=current
else:
flag,arg=current.split("=",1)
flag=flag.strip("-")
if arg and flag:
if flag=="usr":
usr=arg
elif flag=="pwd":
pwd=arg
elif flag=="key":
key=arg
elif flag=="engine":
engine=arg
elif flag=="out" and OUTPUT=="":
OUTPUT=arg
writeMode=True
if OUTPUT==FILEPATH:
print "Error: destination and source files are the same."
sys.exit(-1)
# Can't supply all three at the same time
if usr != "" and pwd !="" and key == "":
KEY=[usr,pwd]
if key != "" and usr == "" and pwd == "":
KEY=key
if KEY=="" and engine != "sphinx":
print "Error: You need to supply an API key for engines other than Sphinx."
print "For the case of Houndify and IBM, use -usr and -pwd instead of -api."
print "Make sure to specify the key as one of the two:"
print "\t python audioprocessor.py PATH/FILE.wav -engine=someengine -key=apikey"
print "\t python audioprocessor.py PATH/FILE.wav -engine=someengine -usr=user -pwd=password"
sys.exit(-1)
if engine not in functions:
print "Error: You specified an invalid engine."
print "Check the help (-help) for a list of available engines."
sys.exit(-1)
# Alright, now we can start
with sr.AudioFile(FILEPATH) as source:
audio = r.record(source)
try:
if engine=="sphinx":
txt=r.recognize_sphinx(audio)
else:
txt=functions[engine](audio,KEY)
if writeMode:
with open(OUTPUT,'a+') as f:
f.write('\n')
f.write(txt)
else:
print txt
except sr.UnknownValueError:
couldNotParseError()
print "Error: Could not parse file."
print "Make sure the file is supported by the engine."
except sr.RequestError as e:
print "Error: Could not request results from engine."
print "Make sure your keys are valid, or that you're connected to the internet."
print "Error:",e | {
"repo_name": "adewynter/Tools",
"path": "Docker/dltoolboxio/scripts/audioprocessor.py",
"copies": "1",
"size": "3828",
"license": "mit",
"hash": -4286851625964337700,
"line_mean": 26.7463768116,
"line_max": 99,
"alpha_frac": 0.6880877743,
"autogenerated": false,
"ratio": 2.985959438377535,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4174047212677535,
"avg_score": null,
"num_lines": null
} |
# Audio Play
import pyaudio
import wave
import time
import sys
import pygame as pg
def play_audio(audio_file, volume=0.8):
'''
stream music with mixer.music module in a blocking manner
this will stream the sound from disk while playing
'''
# set up the mixer
freq = 44100 # audio CD quality
bitsize = -16 # unsigned 16 bit
channels = 2 # 1 is mono, 2 is stereo
buffer = 2048 # number of samples (experiment to get best sound)
pg.mixer.init()
# volume value 0.0 to 1.0
pg.mixer.music.set_volume(volume)
clock = pg.time.Clock()
try:
pg.mixer.music.load(audio_file)
print("Audio file {} loaded!".format(audio_file))
except pg.error:
print("File {} not found! ({})".format(audio_file, pg.get_error()))
return
pg.mixer.music.play()
while pg.mixer.music.get_busy():
# check if playback has finished
clock.tick(30)
def play_any_audio(filename):
pg.mixer.init()
pg.mixer.music.load(filename)
pg.mixer.music.play()
def play_wav_audio(filename):
WAVE_FILENAME = filename
if len(sys.argv) < 2:
print("Plays a wave file.\n\nUsage: %s filename.wav" % WAVE_FILENAME)
sys.exit(-1)
wf = wave.open(WAVE_FILENAME, 'rb')
p = pyaudio.PyAudio()
def callback(in_data, frame_count, time_info, status):
data = wf.readframes(frame_count)
return (data, pyaudio.paContinue)
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True,
stream_callback=callback)
stream.start_stream()
while stream.is_active():
time.sleep(0.1)
stream.stop_stream()
stream.close()
wf.close()
p.terminate()
def record_audio(filename, seconds):
CHUNK = 1024
FORMAT = pyaudio.paInt16
#CHANNELS = 2
CHANNELS = 1
#RATE = 44100
RATE = 16000
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
print("* recording")
frames = []
for i in range(0, int(RATE / CHUNK * seconds)):
data = stream.read(CHUNK)
frames.append(data)
print("* done recording")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(filename, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close() | {
"repo_name": "JosuVicente/facial_and_characteristics_recognition_with_speech_support",
"path": "src/audio_utils.py",
"copies": "1",
"size": "2676",
"license": "mit",
"hash": 7930898633438605000,
"line_mean": 24.2547169811,
"line_max": 77,
"alpha_frac": 0.5885650224,
"autogenerated": false,
"ratio": 3.5210526315789474,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46096176539789474,
"avg_score": null,
"num_lines": null
} |
"""audio_read reads in a whole audio file with resampling."""
# Equivalent to:
#import librosa
#def audio_read(filename, sr=11025, channels=1):
# """Read a soundfile, return (d, sr)."""
# d, sr = librosa.load(filename, sr=sr, mono=(channels == 1))
# return d, sr
# The code below is adapted from:
# https://github.com/bmcfee/librosa/blob/master/librosa/core/audio.py
# This is its original copyright notice:
# Copyright (c) 2014, Brian McFee, Matt McVicar, Dawen Liang, Colin Raffel, Douglas Repetto, Dan Ellis.
#
# Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import print_function
import sys
import os
import numpy as np
import re
from sys import platform
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
if platform == "linux" or platform == "linux2":
FFMPEG_BIN = "ffmpeg" # on Linux
FFMPEG_AUDIO_DEVICE = "alsa"
FFMPEG_INPUT = "pulse"
# FFMPEG_INPUT = "default"
# FFMPEG_AUDIO_DEVICE = "pulse"
elif platform == "win32":
FFMPEG_BIN = "ffmpeg.exe" # on Windows
FFMPEG_AUDIO_DEVICE = "dshow"
FFMPEG_INPUT = u"audio=Mikrofon (Urz\u0105dzenie zgodne ze "
# elif platform == "darwin":
# FFMPEG_BIN = "ffmpeg" # on OSX
# FFMPEG_AUDIO_DEVICE = "dsound"
def audio_read(filename, sr=None, channels=None, thread={'interrupted':False}):
"""Read a soundfile, return (d, sr)."""
# Hacked version of librosa.load and audioread/ff.
offset = 0.0
duration = None
dtype = np.float32
y = []
if isinstance(filename, basestring):
FFmpegArgs = os.path.realpath(filename)
elif isinstance(filename, dict):
FFmpegArgs = filename
with FFmpegAudioFile(FFmpegArgs,
sample_rate=sr, channels=channels) as input_file:
sr = input_file.sample_rate
metadata = input_file.metadata
channels = input_file.channels
s_start = int(np.floor(sr * offset) * channels)
if duration is None:
s_end = np.inf
else:
s_end = s_start + int(np.ceil(sr * duration) * channels)
num_read = 0
for frame in input_file:
frame = buf_to_float(frame, dtype=dtype)
num_read_prev = num_read
num_read += len(frame)
if thread['interrupted']:
break
if num_read < s_start:
# offset is after the current frame, keep reading.
continue
if s_end < num_read_prev:
# we're off the end. stop reading
break
if s_end < num_read:
# the end is in this frame. crop.
frame = frame[:s_end - num_read_prev]
if num_read_prev <= s_start < num_read:
# beginning is in this frame
frame = frame[(s_start - num_read_prev):]
# tack on the current frame
y.append(frame)
if not len(y):
# Zero-length read
y = np.zeros(0, dtype=dtype)
else:
y = np.concatenate(y)
if channels > 1:
y = y.reshape((-1, 2)).T
# Final cleanup for dtype and contiguity
y = np.ascontiguousarray(y, dtype=dtype)
return (y, sr, metadata)
def buf_to_float(x, n_bytes=2, dtype=np.float32):
"""Convert an integer buffer to floating point values.
This is primarily useful when loading integer-valued wav data
into numpy arrays.
.. seealso:: :func:`librosa.util.buf_to_float`
:parameters:
- x : np.ndarray [dtype=int]
The integer-valued data buffer
- n_bytes : int [1, 2, 4]
The number of bytes per sample in ``x``
- dtype : numeric type
The target output type (default: 32-bit float)
:return:
- x_float : np.ndarray [dtype=float]
The input data buffer cast to floating point
"""
# Invert the scale of the data
scale = 1./float(1 << ((8 * n_bytes) - 1))
# Construct the format string
fmt = '<i{:d}'.format(n_bytes)
# Rescale and format the data buffer
return scale * np.frombuffer(x, fmt).astype(dtype)
# The code below is adapted from:
# https://github.com/sampsyo/audioread/blob/master/audioread/ffdec.py
# Below is its original copyright notice:
# This file is part of audioread.
# Copyright 2014, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
import subprocess
import re
import threading
import time
try:
import queue
except ImportError:
import Queue as queue
class QueueReaderThread(threading.Thread):
"""A thread that consumes data from a filehandle and sends the data
over a Queue.
"""
def __init__(self, fh, blocksize=1024, discard=False):
super(QueueReaderThread, self).__init__()
self.fh = fh
self.blocksize = blocksize
self.daemon = True
self.discard = discard
self.queue = None if discard else queue.Queue()
def run(self):
while True:
data = self.fh.read(self.blocksize)
if not self.discard:
self.queue.put(data)
if not data:
# Stream closed (EOF).
break
class FFmpegAudioFile(object):
"""An audio file decoded by the ffmpeg command-line utility."""
def __init__(self, filenameOrDevice, channels=None, sample_rate=None, block_size=4096):
filename = ''
isFilename = False
FFmpegDevice = {}
isDevice = False
if isinstance(filenameOrDevice, basestring):
filename = filenameOrDevice
isFilename = True
elif isinstance(filenameOrDevice, dict):
FFmpegDevice = filenameOrDevice
isDevice = True
else:
raise ValueError(filenameOrDevice + " is neither a string nor dictionary")
if isFilename and not os.path.isfile(filename):
# if not os.path.isfile(filename):
raise ValueError(filename + " not found.")
# get metadata from file
self.metadata = {}
metadata_include=['artist','album','title','genre','track']
if isFilename:
try:
metadata_popen_args = [FFMPEG_BIN,
'-i', filename,
'-f', 'ffmetadata',
'-loglevel', 'error',
'-']
# startupinfo = None
# if platform == 'win32':
# startupinfo = subprocess.STARTUPINFO()
# startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
if platform == "win32":
shell = True
else:
shell = False
metadata_proc = subprocess.Popen(
metadata_popen_args
, stdout=subprocess.PIPE
, stderr=subprocess.PIPE
, shell=shell
# startupinfo = startupinfo
)
# metadata_proc.wait()
stdout, stderr = metadata_proc.communicate()
if(stderr):
raise RuntimeError(stderr.splitlines())
stdout = stdout.splitlines()
for index, line in enumerate(stdout):
line = line.decode('utf-8')
line = line.rstrip('\n')
if(index==0 or line==''):
continue
tag = re.split('=',line)
if len(tag) == 2:
[tagname,tagvalue] = tag
if(tagname in metadata_include):
self.metadata[tagname]=tagvalue
except Exception as e:
eprint('Error reading ffmpeg metadata ' + str(e))
# procede with reading the audio file
if isFilename:
popen_args = [FFMPEG_BIN, '-i', filename, '-f', 's16le']
elif isDevice:
popen_args = [FFmpegDevice['FFMPEG_BIN'],
'-f', FFmpegDevice['FFMPEG_AUDIO_DEVICE'],
'-ac', FFmpegDevice['FFMPEG_INPUT_CHANNELS'],
'-i', FFmpegDevice['FFMPEG_INPUT'],
'-t', '00:10',
'-f', 's16le']
self.channels = channels
self.sample_rate = sample_rate
if channels:
popen_args.extend(['-ac', str(channels)])
if sample_rate:
popen_args.extend(['-ar', str(sample_rate)])
popen_args.append('-')
# startupinfo = None
# if platform == 'win32':
# startupinfo = subprocess.STARTUPINFO()
# startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
if platform == "win32":
shell = True
else:
shell = False
self.proc = subprocess.Popen(
popen_args
, stdout=subprocess.PIPE
, stderr=subprocess.PIPE
, shell=shell
# startupinfo = startupinfo
)
# Start another thread to consume the standard output of the
# process, which contains raw audio data.
self.stdout_reader = QueueReaderThread(self.proc.stdout, block_size)
self.stdout_reader.start()
# Read relevant information from stderr.
try:
self._get_info()
except ValueError:
raise ValueError("Error reading header info from " + filename)
# Start a separate thread to read the rest of the data from
# stderr. This (a) avoids filling up the OS buffer and (b)
# collects the error output for diagnosis.
self.stderr_reader = QueueReaderThread(self.proc.stderr)
self.stderr_reader.start()
def read_data(self, timeout=10.0):
"""Read blocks of raw PCM data from the file."""
# Read from stdout in a separate thread and consume data from
# the queue.
start_time = time.time()
while True:
# Wait for data to be available or a timeout.
data = None
try:
data = self.stdout_reader.queue.get(timeout=timeout)
if data:
yield data
else:
# End of file.
break
except queue.Empty:
# Queue read timed out.
end_time = time.time()
if not data:
if end_time - start_time >= timeout:
# Nothing interesting has happened for a while --
# FFmpeg is probably hanging.
raise ValueError('ffmpeg output: {}'.format(
''.join(self.stderr_reader.queue.queue)
))
else:
start_time = end_time
# Keep waiting.
continue
def _get_info(self):
"""Reads the tool's output from its stderr stream, extracts the
relevant information, and parses it.
"""
out_parts = []
while True:
line = self.proc.stderr.readline()
if not line:
# EOF and data not found.
raise ValueError("stream info not found")
# In Python 3, result of reading from stderr is bytes.
if isinstance(line, bytes):
line = line.decode('utf8', 'ignore')
line = line.strip().lower()
if 'no such file' in line:
raise IOError('file not found')
elif 'invalid data found' in line:
raise UnsupportedError()
elif 'duration:' in line:
out_parts.append(line)
elif 'audio:' in line:
out_parts.append(line)
self._parse_info(''.join(out_parts))
break
def _parse_info(self, s):
"""Given relevant data from the ffmpeg output, set audio
parameter fields on this object.
"""
# Sample rate.
match = re.search(r'(\d+) hz', s)
if match:
self.sample_rate_orig = int(match.group(1))
else:
self.sample_rate_orig = 0
if self.sample_rate is None:
self.sample_rate = self.sample_rate_orig
# Channel count.
match = re.search(r'hz, ([^,]+),', s)
if match:
mode = match.group(1)
if mode == 'stereo':
self.channels_orig = 2
else:
match = re.match(r'(\d+) ', mode)
if match:
self.channels_orig = int(match.group(1))
else:
self.channels_orig = 1
else:
self.channels_orig = 0
if self.channels is None:
self.channels = self.channels_orig
# Duration.
match = re.search(
r'duration: (\d+):(\d+):(\d+).(\d)', s
)
if match:
durparts = list(map(int, match.groups()))
duration = (
durparts[0] * 60 * 60 +
durparts[1] * 60 +
durparts[2] +
float(durparts[3]) / 10
)
self.duration = duration
else:
# No duration found.
self.duration = 0
def close(self):
"""Close the ffmpeg process used to perform the decoding."""
# Kill the process if it is still running.
if hasattr(self, 'proc') and self.proc.returncode is None:
self.proc.kill()
self.proc.wait()
def __del__(self):
self.close()
# Iteration.
def __iter__(self):
return self.read_data()
# Context manager.
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return False
| {
"repo_name": "piotrwicijowski/whistler",
"path": "audio_read.py",
"copies": "1",
"size": "15179",
"license": "mit",
"hash": 503601591262938100,
"line_mean": 35.4004796163,
"line_max": 487,
"alpha_frac": 0.5476645365,
"autogenerated": false,
"ratio": 4.257784011220196,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0024348696022653687,
"num_lines": 417
} |
"""audio recorder"""
import pyaudio
import wave
import time
from baseaudio import BaseAudio
class AudioRecorder(BaseAudio):
"""Tools to record audio data."""
def __init__(self, **kwargs):
"""Initializing for recording"""
super(AudioRecorder, self).__init__(**kwargs)
self.frames_perbuff = kwargs.get('chunk', 2048)
self.channels = kwargs.get('channels', 1)
self.format = pyaudio.paInt16 # paInt8
# if recording is longer than max_length, it stops
self.max_length = kwargs.get('max_length', 60) # in seconds
def start_record(self, **kwargs):
countdown = kwargs.get('countdown', 3)
savename = kwargs.get('savename', None)
# Countdown before recording
for isec_left in reversed(range(countdown)):
print(isec_left + 1)
time.sleep(0.8)
# Record
print('start recording')
audio_api = pyaudio.PyAudio()
stream = audio_api.open(format=self.format,
channels=self.channels,
rate=self.sampling_rate,
input=True,
frames_per_buffer=self.frames_perbuff)
frames = []
nchunks = int(self.max_length *
self.sampling_rate / self.frames_perbuff)
try:
for i in range(0, nchunks):
data = stream.read(self.frames_perbuff)
frames.append(data)
print('max length ({}sec) reached...stop!'.format(self.max_length))
except KeyboardInterrupt:
print('\nStopped by user')
print("* done recording")
stream.stop_stream()
stream.close()
audio_api.terminate()
if savename is not None:
print('saving as {}'.format(savename))
wf = wave.open(savename, 'wb')
wf.setnchannels(self.channels)
wf.setsampwidth(audio_api.get_sample_size(self.format))
wf.setframerate(self.sampling_rate)
wf.writeframes(b''.join(frames))
wf.close()
if __name__ == "__main__":
rec = AudioRecorder(max_length=20)
rec.start_record(savename='test.wav')
#print(rec)
| {
"repo_name": "jfraj/soundeval",
"path": "recorder.py",
"copies": "1",
"size": "2248",
"license": "mit",
"hash": 3767249545451493000,
"line_mean": 34.6825396825,
"line_max": 79,
"alpha_frac": 0.5573843416,
"autogenerated": false,
"ratio": 3.9787610619469027,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5036145403546902,
"avg_score": null,
"num_lines": null
} |
"""Audio source and audio processing compoenents
The two main base classes are :class:`AudioSource` which provides audio and
:class:`AudioProcessor` which act as a pipeline processor on another
:class:`AudioSource`.
"""
import asyncio
import audioop
import collections
import time
import wave
import janus
try:
import pyaudio
except ImportError:
# This is a workaround for doc generation where pyaudio cannot be installed
# TODO(greghaynes): Only fail open during doc gen
pass
class NoMoreChunksError(Exception):
pass
class NoDefaultInputDeviceError(Exception):
def __init__(self):
super(NoDefaultInputDeviceError, self).__init__(
'No default input device'
)
# Using a namedtuple for audio chunks due to their lightweight nature
AudioChunk = collections.namedtuple('AudioChunk',
['start_time', 'audio', 'width', 'freq'])
"""A sequence of audio samples.
This is the low level structure used for passing audio. Typically these
are obtained from iterating over an :class:`AudioBlock`.
In order to make this object use minimal memory it is implemented as a
namedtuple.
:param start_time: Unix timestamp of the first sample.
:type start_time: int
:param audio: Bytes array of audio samples.
:type audio: bytes
:param width: Number of bytes per sample.
:type width: int
:param freq: Sampling frequency.
:type freq: int
"""
class AudioBlock(object):
"""An iterator over :class:`AudioChunk`.
Blocks are used to deliniate continuous chunks of audio. As an example,
when using the :class:`SquelchedSource` audio source a consumer often
would like to know at what points the squelch was triggered on and off.
"""
def __init__(self):
self._stopped = asyncio.Event()
def __aiter__(self):
return self
@property
def ended(self):
return self._stopped.is_set()
def end(self):
self._stopped.set()
async def __anext__(self):
if self._stopped.is_set():
raise StopAsyncIteration()
chunk_task = asyncio.ensure_future(self._next_chunk())
stop_task = asyncio.ensure_future(self._stopped.wait())
try:
done, pending = await asyncio.wait(
[chunk_task, stop_task],
return_when=asyncio.FIRST_COMPLETED
)
for task in pending:
task.cancel()
if chunk_task.done():
try:
return chunk_task.result()
except StopAsyncIteration:
self.end()
raise
else:
raise StopAsyncIteration()
finally:
chunk_task.cancel()
stop_task.cancel()
class QueueAudioBlock(AudioBlock):
def __init__(self, queue=None):
self._q = queue or asyncio.Queue()
super(QueueAudioBlock, self).__init__()
async def _next_chunk(self):
chunk = await self._q.get()
if chunk is None:
raise StopAsyncIteration('No more audio chunks')
return chunk
async def add_chunk(self, chunk):
await self._q.put(chunk)
def chunk_sample_cnt(chunk):
"""Number of samples which occured in an AudioChunk
:param chunk: The chunk to examine.
:type chink: AudioChunk
"""
return int(len(chunk.audio) / chunk.width)
def merge_chunks(chunks):
assert(len(chunks) > 0)
audio = b''.join([x.audio for x in chunks])
return AudioChunk(chunks[0].start_time,
audio,
chunks[0].width,
chunks[0].freq)
def split_chunk(chunk, sample_offset):
offset = int(sample_offset * chunk.width)
first_audio = memoryview(chunk.audio)[:offset]
second_audio = memoryview(chunk.audio)[offset:]
first_chunk = AudioChunk(
chunk.start_time, first_audio, chunk.width, chunk.freq
)
second_chunk = AudioChunk(
chunk.start_time, second_audio, chunk.width, chunk.freq
)
return first_chunk, second_chunk
class EvenChunkIterator(object):
"""Iterate over chunks from an audio source in even sized increments.
:parameter iterator: Iterator over audio chunks.
:type iterator: Iterator
:parameter chunk_size: Number of samples in resulting chunks
:type chunk_size: int
"""
def __init__(self, iterator, chunk_size):
self._iterator = iterator
self._chunk_size = chunk_size
self._cur_chunk = None
def __aiter__(self):
return self
async def __anext__(self):
sample_queue = collections.deque()
ret_chunk_size = 0
while ret_chunk_size < self._chunk_size:
chunk = self._cur_chunk or await self._iterator.__anext__()
self._cur_chunk = None
cur_chunk_size = chunk_sample_cnt(chunk)
ret_chunk_size += cur_chunk_size
sample_queue.append(chunk)
if ret_chunk_size > self._chunk_size:
# We need to break up the chunk
merged_chunk = merge_chunks(sample_queue)
ret_chunk, leftover_chunk = split_chunk(merged_chunk,
self._chunk_size)
self._cur_chunk = leftover_chunk
return ret_chunk
return merge_chunks(sample_queue)
class RememberingIterator(object):
def __init__(self, iterator, memory_size):
self._iterator = iterator
self.memory_size = memory_size
self._buff = collections.deque(maxlen=memory_size)
def __aiter__(self):
return self
async def __anext__(self):
ret = await self._iterator.__anext__()
self._buff.append(ret)
return ret
def memory(self):
return self._buff
class _ListenCtxtMgr(object):
def __init__(self, source):
self._source = source
async def __aenter__(self):
await self._source.start()
async def __aexit__(self, *args):
await self._source.stop()
class AudioSource(object):
"""Base class for providing audio.
All classes which provide audio in some form implement this class.
Audio is obtained by first entering the :func:`listen` context manager
and then iterating over the :class:`AudioSource` to obtain
:class:`AudioBlock`.
"""
def __init__(self):
self.running = False
self._last_block = None
def listen(self):
"""Listen to the AudioSource.
:ret: Async context manager which starts and stops the AudioSource.
"""
return _ListenCtxtMgr(self)
async def start(self):
"""Start the audio source.
This is where initialization / opening of audio devices should happen.
"""
self.running = True
async def stop(self):
"""Stop the audio source.
This is where deinitialization / closing of audio devices should
happen.
"""
if self._last_block is not None:
self._last_block.end()
self.running = False
def __aiter__(self):
return self
async def __anext__(self):
self._last_block = await self._next_block()
return self._last_block
class SingleBlockAudioSource(AudioSource):
def __init__(self):
super(SingleBlockAudioSource, self).__init__()
self._block_returned = False
async def _next_block(self):
if self._block_returned:
raise StopAsyncIteration()
else:
self._block_returned = True
return await self._get_block()
class AudioSourceProcessor(AudioSource):
"""Base class for being a pipeline processor of an :class:`AudioSource`
:parameter source: Input source
:type source: AudioSource
"""
def __init__(self, source):
super(AudioSourceProcessor, self).__init__()
self._source = source
async def start(self):
"""Start the input audio source.
This is intended to be called from the base class, not directly.
"""
await super(AudioSourceProcessor, self).start()
await self._source.start()
async def stop(self):
"""Stop the input audio source.
This is intended to be called from the base class, not directly.
"""
await self._source.stop()
await super(AudioSourceProcessor, self).stop()
class Microphone(AudioSource):
"""Use a local microphone as an audio source.
:parameter audio_format: Sample format, default paInt16
:type audio: PyAudio format
:parameter channels: Number of channels in microphone.
:type channels: int
:parameter rate: Sample frequency
:type rate: int
:parameter device_ndx: PyAudio device index
:type device_ndx: int
"""
def __init__(self,
audio_format=None,
channels=1,
rate=16000,
device_ndx=0):
super(Microphone, self).__init__()
audio_format = audio_format or pyaudio.paInt16
self._format = audio_format
self._channels = channels
self._rate = rate
self._device_ndx = device_ndx
self._pyaudio = None
self._stream = None
self._stream_queue = None
async def start(self):
await super(Microphone, self).start()
loop = asyncio.get_event_loop()
self._stream_queue = janus.Queue(loop=loop)
self._pyaudio = pyaudio.PyAudio()
self._stream = self._pyaudio.open(
input=True,
format=self._format,
channels=self._channels,
rate=self._rate,
input_device_index=self._device_ndx,
stream_callback=self._stream_callback
)
async def stop(self):
await self._stream_queue.async_q.put(None)
await super(Microphone, self).stop()
self._stream.stop_stream()
self._stream.close()
self._pyaudio.terminate()
async def _next_block(self):
return QueueAudioBlock(self._stream_queue.async_q)
def _stream_callback(self, in_data, frame_count,
time_info, status_flags):
chunk = AudioChunk(start_time=time_info['input_buffer_adc_time'],
audio=in_data, freq=self._rate, width=2)
self._stream_queue.sync_q.put(chunk)
retflag = pyaudio.paContinue if self.running else pyaudio.paComplete
return (None, retflag)
class _WaveAudioBlock(AudioBlock):
def __init__(self, wave_fp, nframes, samprate, sampwidth, n_channels):
super(_WaveAudioBlock, self).__init__()
self._wave_fp = wave_fp
self._nframes = nframes
self._sampwidth = sampwidth
self._samprate = samprate
self._n_channels = n_channels
async def _next_chunk(self):
frames = self._wave_fp.readframes(self._nframes)
if self._n_channels == 2:
frames = audioop.tomono(frames, self._sampwidth, .5, .5)
if len(frames) == 0:
raise StopAsyncIteration('No more frames in wav')
chunk = AudioChunk(0, audio=frames, width=self._sampwidth,
freq=self._samprate)
return chunk
class WaveSource(SingleBlockAudioSource):
"""Use a wave file as an audio source.
:parameter wave_path: Path to wave file.
:type wave_path: string
:parameter chunk_frames: Chunk size to return from get_chunk
:type chunk_frames: int
"""
def __init__(self, wave_path, chunk_frames=None):
super(WaveSource, self).__init__()
self._wave_path = wave_path
self._chunk_frames = chunk_frames
self._wave_fp = None
self._width = None
self._freq = None
self._channels = None
self._out_queue = None
async def start(self):
await super(WaveSource, self).start()
self._wave_fp = wave.open(self._wave_path)
self._width = self._wave_fp.getsampwidth()
self._freq = self._wave_fp.getframerate()
self._channels = self._wave_fp.getnchannels()
self._out_queue = asyncio.Queue()
self._returned_block = False
assert(self._channels <= 2)
async def stop(self):
await self._out_queue.put(None)
self._wave_fp.close()
await super(WaveSource, self).stop()
async def _get_block(self):
frame_cnt = self._chunk_frames or self._wave_fp.getnframes()
return _WaveAudioBlock(self._wave_fp, frame_cnt, self._freq,
self._width, self._channels)
class _RateConvertBlock(AudioBlock):
def __init__(self, src_block, n_channels, out_rate):
super(_RateConvertBlock, self).__init__()
self._src_block = src_block
self._n_channels = n_channels
self._out_rate = out_rate
self._state = None
async def _next_chunk(self):
chunk = await self._src_block.__anext__()
new_aud, self._state = audioop.ratecv(chunk.audio, 2, self._n_channels,
chunk.freq, self._out_rate,
self._state)
return AudioChunk(chunk.start_time, new_aud, 2, self._out_rate)
class RateConvert(AudioSourceProcessor):
def __init__(self, source, n_channels, out_rate):
super(RateConvert, self).__init__(source)
self._n_channels = n_channels
self._out_rate = out_rate
async def _next_block(self):
src_block = await self._source.__anext__()
return _RateConvertBlock(src_block, self._n_channels, self._out_rate)
class SquelchedBlock(AudioBlock):
def __init__(self, source, squelch_level):
super(SquelchedBlock, self).__init__()
self._source = source
self.squelch_level = squelch_level
self._sent_mem = False
async def _next_chunk(self):
if not self._sent_mem:
self._sent_mem = True
return merge_chunks(self._source.memory())
async for chunk in self._source:
squelch_triggered = SquelchedSource.check_squelch(
self.squelch_level,
True,
self._source.memory()
)
if squelch_triggered:
return chunk
else:
raise StopAsyncIteration()
raise StopAsyncIteration()
class SquelchedSource(AudioSourceProcessor):
"""Filter out samples below a volume level from an audio source.
This is useful to prevent constant transcription attempts of background
noise, and also to correctly create a 'trigger window' where
transcription attempts are made.
A sliding window of prefix_samples size is inspected. When the rms of
prefix_samples * sample_size samples surpasses the squelch_level this
source begins to emit audio. Once the rms of the sliding window passes
below 80% of the squelch level this source stop emitting audio.
:parameter source: Input source
:type source: AudioSource
:parameter sample_size: Size of each sample to inspect.
:type sample_size: int
:parameter squelch_level: RMS value to trigger squelch
:type squelch_level: int
:parameter prefix_samples: Number of samples of sample_size to check
:type prefix_samples: int
"""
def __init__(self, source, sample_size=1600, squelch_level=None,
prefix_samples=4):
super(SquelchedSource, self).__init__(source)
self._sample_size = sample_size
self.squelch_level = squelch_level
self._prefix_samples = prefix_samples
self._sample_width = 2
self._src_block = None
@staticmethod
def check_squelch(level, is_triggered, chunks):
rms_vals = [audioop.rms(x.audio, x.width) for x in chunks]
median_rms = sorted(rms_vals)[int(len(rms_vals) * .5)]
if is_triggered:
if median_rms < (level * .8):
return False
else:
return True
else:
if median_rms > level:
return True
else:
return False
async def detect_squelch_level(self, detect_time=10, threshold=.8):
start_time = time.time()
end_time = start_time + detect_time
audio_chunks = collections.deque()
async with self._source.listen():
async for block in self._source:
if time.time() > end_time:
break
even_iter = EvenChunkIterator(block, self._sample_size)
try:
while time.time() < end_time:
audio_chunks.append(await even_iter.__anext__())
except StopAsyncIteration:
pass
rms_vals = [audioop.rms(x.audio, self._sample_width) for x in
audio_chunks
if len(x.audio) == self._sample_size * self._sample_width]
level = sorted(rms_vals)[int(threshold * len(rms_vals)):][0]
self.squelch_level = level
return level
async def start(self):
assert(self.squelch_level is not None)
await super(SquelchedSource, self).start()
async def _next_block(self):
if self._src_block is None or self._src_block.ended:
self._src_block = await self._source.__anext__()
even_iter = EvenChunkIterator(self._src_block, self._sample_size)
self._mem_iter = RememberingIterator(even_iter,
self._prefix_samples)
async for _ in self._mem_iter: # NOQA
if SquelchedSource.check_squelch(self.squelch_level,
False,
self._mem_iter.memory()):
return SquelchedBlock(self._mem_iter,
self.squelch_level)
raise StopAsyncIteration()
class AudioPlayer(object):
"""Play audio from an audio source.
This is not generally useful for transcription, but can be very useful
in the development of :class:`AudioSource` or :class:`AudioProcessor`
classes.
:param source: Source to play.
:type source: AudioSource
:param width: Bytes per sample.
:type width: int
:param channels: Number of channels in output device.
:type channels: int
:param freq: Sampling frequency of output device.
:type freq: int
"""
def __init__(self, source, width, channels, freq):
self._source = source
self._width = width
self._channels = channels
self._freq = freq
async def play(self):
"""Play audio from source.
This method will block until the source runs out of audio.
"""
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(self._width),
channels=self._channels,
rate=self._freq,
output=True)
async with self._source.listen():
async for block in self._source:
async for chunk in block:
stream.write(chunk.audio)
stream.stop_stream()
stream.close()
p.terminate()
| {
"repo_name": "ibm-dev/streamtotext",
"path": "streamtotext/audio.py",
"copies": "1",
"size": "19261",
"license": "apache-2.0",
"hash": 7737453575911213000,
"line_mean": 30.9950166113,
"line_max": 79,
"alpha_frac": 0.5940501532,
"autogenerated": false,
"ratio": 4.1359244148593515,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5229974568059351,
"avg_score": null,
"num_lines": null
} |
"""Audio sources that can provide raw PCM frames that pyatv can stream."""
from abc import ABC, abstractmethod, abstractproperty
import asyncio
from contextlib import suppress
from functools import partial
import io
import logging
from typing import Optional, Union
import miniaudio
from miniaudio import SampleFormat
from pyatv.exceptions import NotSupportedError
_LOGGER = logging.getLogger(__name__)
def _int2sf(sample_size: int) -> SampleFormat:
if sample_size == 1:
return SampleFormat.UNSIGNED8
if sample_size == 2:
return SampleFormat.SIGNED16
if sample_size == 3:
return SampleFormat.SIGNED24
if sample_size == 4:
return SampleFormat.SIGNED32
raise NotSupportedError(f"unsupported sample size: {sample_size}")
class AudioSource(ABC):
"""Audio source that returns raw PCM frames."""
async def close(self) -> None:
"""Close underlying resources."""
@abstractmethod
async def readframes(self, nframes: int) -> bytes:
"""Read number of frames and advance in stream."""
@abstractproperty
def sample_rate(self) -> int:
"""Return sample rate."""
@abstractproperty
def channels(self) -> int:
"""Return number of audio channels."""
@abstractproperty
def sample_size(self) -> int:
"""Return number of bytes per sample."""
@abstractproperty
def duration(self) -> int:
"""Return duration in seconds."""
@abstractproperty
def supports_seek(self) -> bool:
"""Return if source supports seeking."""
class ReaderWrapper(miniaudio.StreamableSource):
"""Wraps a reader into a StreamableSource that miniaudio can consume."""
def __init__(self, reader: io.BufferedReader) -> None:
"""Initialize a new ReaderWrapper instance."""
self.reader: io.BufferedReader = reader
def read(self, num_bytes: int) -> Union[bytes, memoryview]:
"""Read and return data from buffer."""
return self.reader.read(num_bytes)
def seek(self, offset: int, origin: miniaudio.SeekOrigin) -> bool:
"""Seek in stream."""
if not self.reader.seekable():
return False
whence = 1 if origin == miniaudio.SeekOrigin.CURRENT else 0
self.reader.seek(offset, whence)
return True
class BufferedReaderSource(AudioSource):
"""Audio source used to play a file from a buffer.
This audio source adds a small internal buffer (corresponding to 0,5s) to deal with
tiny hiccups. Proper buffering should be done by the source buffer.
"""
CHUNK_SIZE = 352 * 3
def __init__(
self,
reader: miniaudio.WavFileReadStream,
wrapper: ReaderWrapper,
sample_rate: int,
channels: int,
sample_size: int,
) -> None:
"""Initialize a new MiniaudioWrapper instance."""
self.loop = asyncio.get_event_loop()
self.reader: miniaudio.WavFileReadStream = reader
self.wrapper: ReaderWrapper = wrapper
self._buffer_task: Optional[asyncio.Task] = asyncio.ensure_future(
self._buffering_task()
)
self._audio_buffer: bytes = b""
self._buffer_needs_refilling: asyncio.Event = asyncio.Event()
self._data_was_added_to_buffer: asyncio.Event = asyncio.Event()
self._buffer_size: int = int(sample_rate / 2)
self._sample_rate: int = sample_rate
self._channels: int = channels
self._sample_size: int = sample_size
@classmethod
async def open(
cls,
buffered_reader: io.BufferedReader,
sample_rate: int,
channels: int,
sample_size: int,
) -> "BufferedReaderSource":
"""Return a new AudioSource instance playing from the provided buffer."""
wrapper = ReaderWrapper(buffered_reader)
loop = asyncio.get_event_loop()
src = await loop.run_in_executor(
None,
partial(
miniaudio.stream_any,
wrapper,
output_format=_int2sf(sample_size),
nchannels=channels,
sample_rate=sample_rate,
),
)
reader = miniaudio.WavFileReadStream(
src, sample_rate, channels, _int2sf(sample_size)
)
# TODO: We get a WAV file back, but we expect to return raw PCM samples so
# the WAVE header must be removed. It would be better to actually parse the
# header, ensuring we remove the correct amount of data. But for now we are
# lazy.
await loop.run_in_executor(None, reader.read, 44)
# The source stream is passed here and saved to not be garbage collected
instance = cls(reader, wrapper, sample_rate, channels, sample_size)
return instance
async def close(self) -> None:
"""Close underlying resources."""
if self._buffer_task:
self._buffer_task.cancel()
with suppress(asyncio.CancelledError):
await self._buffer_task
self._buffer_task = None
async def readframes(self, nframes: int) -> bytes:
"""Read number of frames and advance in stream."""
# If buffer is empty but the buffering task is still running, that means we are
# buffering and need to wait for more data to be added to buffer.
buffer_task_running = self._buffer_task and self._buffer_task.done()
if not self._audio_buffer and not buffer_task_running:
_LOGGER.debug("Audio source is buffering")
self._buffer_needs_refilling.set()
self._data_was_added_to_buffer.clear()
await self._data_was_added_to_buffer.wait()
total_bytes = nframes * self._sample_size * self._channels
# Return data corresponding to requested frame, or what is left
available_data = min(total_bytes, len(self._audio_buffer))
data = self._audio_buffer[0:available_data]
self._audio_buffer = self._audio_buffer[available_data:]
# Simple buffering scheme: fill up the buffer again when reaching <= 50%
if len(self._audio_buffer) < 0.5 * self._buffer_size:
self._buffer_needs_refilling.set()
return data
async def _buffering_task(self) -> None:
_LOGGER.debug("Starting audio buffering task")
while True:
try:
# Read a chunk and add it to the internal buffer. If no data as read,
# just break out.
chunk = await self.loop.run_in_executor(
None, self.reader.read, self.CHUNK_SIZE
)
if not chunk:
break
self._audio_buffer += chunk
self._data_was_added_to_buffer.set()
if len(self._audio_buffer) >= self._buffer_size:
await self._buffer_needs_refilling.wait()
self._buffer_needs_refilling.clear()
except Exception:
_LOGGER.exception("an error occurred during buffering")
self._data_was_added_to_buffer.set()
@property
def sample_rate(self) -> int:
"""Return sample rate."""
return self._sample_rate
@property
def channels(self) -> int:
"""Return number of audio channels."""
return self._channels
@property
def sample_size(self) -> int:
"""Return number of bytes per sample."""
return self._sample_size
@property
def duration(self) -> int:
"""Return duration in seconds."""
return 0 # We don't know the duration
@property
def supports_seek(self) -> bool:
"""Return if source supports seeking."""
return self.wrapper.reader.seekable()
class FileSource(AudioSource):
"""Audio source used to play a local audio file."""
def __init__(self, src: miniaudio.DecodedSoundFile) -> None:
"""Initialize a new FileSource instance."""
self.src: miniaudio.DecodedSoundFile = src
self.samples: bytes = self.src.samples.tobytes()
self.pos: int = 0
@classmethod
async def open(
cls, filename: str, sample_rate: int, channels: int, sample_size: int
) -> "FileSource":
"""Return a new AudioSource instance playing from the provided file."""
loop = asyncio.get_event_loop()
src = await loop.run_in_executor(
None,
partial(
miniaudio.decode_file,
filename,
output_format=_int2sf(sample_size),
nchannels=channels,
sample_rate=sample_rate,
),
)
return cls(src)
async def readframes(self, nframes: int) -> bytes:
"""Read number of frames and advance in stream."""
if self.pos >= len(self.samples):
return b""
bytes_to_read = (self.sample_size * self.channels) * nframes
data = self.samples[self.pos : min(len(self.samples), self.pos + bytes_to_read)]
self.pos += bytes_to_read
return data
@property
def sample_rate(self) -> int:
"""Return sample rate."""
return self.src.sample_rate
@property
def channels(self) -> int:
"""Return number of audio channels."""
return self.src.nchannels
@property
def sample_size(self) -> int:
"""Return number of bytes per sample."""
return self.src.sample_width
@property
def duration(self) -> int:
"""Return duration in seconds."""
return round(self.src.duration)
@property
def supports_seek(self) -> bool:
"""Return if source supports seeking."""
return True
async def open_source(
source: Union[str, io.BufferedReader],
sample_rate: int,
channels: int,
sample_size: int,
) -> AudioSource:
"""Create an AudioSource from given input source."""
if isinstance(source, str):
return await FileSource.open(source, sample_rate, channels, sample_size)
return await BufferedReaderSource.open(source, sample_rate, channels, sample_size)
| {
"repo_name": "postlund/pyatv",
"path": "pyatv/raop/audio_source.py",
"copies": "1",
"size": "10119",
"license": "mit",
"hash": -7436319976929296000,
"line_mean": 32.2861842105,
"line_max": 88,
"alpha_frac": 0.6100405178,
"autogenerated": false,
"ratio": 4.244546979865772,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5354587497665771,
"avg_score": null,
"num_lines": null
} |
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
import os
import requests
from SocketServer import ThreadingMixIn
import subprocess
from threading import Thread
from galicaster.core import context
conf = context.get_conf()
dispatcher = context.get_dispatcher()
_http_host = conf.get('ddp', 'http_host')
_id = conf.get('ingest', 'hostname')
_port = conf.get_int('audiostream', 'port') or 31337
src = conf.get('audiostream', 'src') or 'alsasrc'
device = conf.get('audiostream', 'device') or None
if device:
device_params = 'device=' + device
else:
device_params = ''
def init():
audiostream = AudioStream()
audiostream.start()
class AudioStream(Thread):
def __init__(self):
Thread.__init__(self)
serveraddr = ('', _port)
server = ThreadedHTTPServer(serveraddr, AudioStreamer)
server.allow_reuse_address = True
server.timeout = 30
self.server = server
dispatcher.connect('action-quit', self.shutdown)
def run(self):
self.server.serve_forever()
def shutdown(self, whatever):
self.server.shutdown()
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
class AudioStreamer(BaseHTTPRequestHandler):
def _writeheaders(self):
self.send_response(200) # 200 OK http response
self.send_header('Content-type', 'audio/mpeg')
self.end_headers()
def _not_allowed(self):
self.send_response(403) # 200 OK http response
self.end_headers()
def do_HEAD(self):
self._writeheaders()
def do_GET(self):
data = {'_id': _id, 'streamKey': self.path[1:]}
r = requests.post(_http_host + '/stream_key', data=data)
# key
try:
self._writeheaders()
DataChunkSize = 10000
devnull = open(os.devnull, 'wb')
command = 'gst-launch-1.0 {} {} ! '.format(src, device_params) + \
'lamemp3enc bitrate=128 cbr=true ! ' + \
'filesink location=/dev/stdout'
p = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=devnull,
bufsize=-1,
shell=True)
while(p.poll() is None):
stdoutdata = p.stdout.read(DataChunkSize)
self.wfile.write(stdoutdata)
stdoutdata = p.stdout.read(DataChunkSize)
self.wfile.write(stdoutdata)
except Exception:
pass
p.kill()
try:
self.wfile.flush()
self.wfile.close()
except:
pass
def handle_one_request(self):
try:
BaseHTTPRequestHandler.handle_one_request(self)
except:
self.close_connection = 1
self.rfile = None
self.wfile = None
def finish(self):
try:
BaseHTTPRequestHandler.finish(self)
except:
pass | {
"repo_name": "SussexLearningSystems/peakaboo",
"path": "docs/capture_agent_plugins/examples/galicaster_2_manchester/audiostream.py",
"copies": "1",
"size": "4145",
"license": "mit",
"hash": 7981984166726927000,
"line_mean": 30.6488549618,
"line_max": 78,
"alpha_frac": 0.6364294331,
"autogenerated": false,
"ratio": 4.140859140859141,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5277288573959141,
"avg_score": null,
"num_lines": null
} |
'''Audio System (aud)
This module provides access to the audaspace audio library.
'''
AUD_DEVICE_JACK = 3 #constant value
AUD_DEVICE_NULL = 0 #constant value
AUD_DEVICE_OPENAL = 1 #constant value
AUD_DEVICE_SDL = 2 #constant value
AUD_DISTANCE_MODEL_EXPONENT = 5 #constant value
AUD_DISTANCE_MODEL_EXPONENT_CLAMPED = 6 #constant value
AUD_DISTANCE_MODEL_INVALID = 0 #constant value
AUD_DISTANCE_MODEL_INVERSE = 1 #constant value
AUD_DISTANCE_MODEL_INVERSE_CLAMPED = 2 #constant value
AUD_DISTANCE_MODEL_LINEAR = 3 #constant value
AUD_DISTANCE_MODEL_LINEAR_CLAMPED = 4 #constant value
AUD_FORMAT_FLOAT32 = 36 #constant value
AUD_FORMAT_FLOAT64 = 40 #constant value
AUD_FORMAT_INVALID = 0 #constant value
AUD_FORMAT_S16 = 18 #constant value
AUD_FORMAT_S24 = 19 #constant value
AUD_FORMAT_S32 = 20 #constant value
AUD_FORMAT_U8 = 1 #constant value
AUD_STATUS_INVALID = 0 #constant value
AUD_STATUS_PAUSED = 2 #constant value
AUD_STATUS_PLAYING = 1 #constant value
def device(*argv):
'''device()
Returns the application's Device.
@returns (Device): The application's Device.
'''
return Device
class Device:
'''Device objects represent an audio output backend like OpenAL or SDL, but might also represent a file output or RAM buffer output.
'''
def lock(*argv):
'''lock()
Locks the device so that it's guaranteed, that no samples are read from the streams until :meth:unlock is called.
This is useful if you want to do start/stop/pause/resume some sounds at the same time.
Note: The device has to be unlocked as often as locked to be able to continue playback... warning:: Make sure the time between locking and unlocking is as short as possible to avoid clicks.
'''
pass
def play(*argv):
'''play(factory, keep=False)
Plays a factory.
Arguments:
@factory (Factory): The factory to play.
@keep (bool): See :attr:Handle.keep.
@returns (Handle): The playback handle with which playback can be controlled with.
'''
return Handle
def stopAll(*argv):
'''stopAll()
Stops all playing and paused sounds.
'''
pass
def unlock(*argv):
'''unlock()
Unlocks the device after a lock call, see :meth:lock for details.
'''
pass
channels = None
'''The channel count of the device.
'''
distance_model = None
'''The distance model of the device.
(seealso http://connect.creativelabs.com/openal/Documentation/OpenAL%201.1%20Specification.htm#_Toc199835864)
'''
doppler_factor = None
'''The doppler factor of the device.
This factor is a scaling factor for the velocity vectors in doppler calculation. So a value bigger than 1 will exaggerate the effect as it raises the velocity.
'''
format = None
'''The native sample format of the device.
'''
listener_location = None
'''The listeners's location in 3D space, a 3D tuple of floats.
'''
listener_orientation = None
'''The listener's orientation in 3D space as quaternion, a 4 float tuple.
'''
listener_velocity = None
'''The listener's velocity in 3D space, a 3D tuple of floats.
'''
rate = None
'''The sampling rate of the device in Hz.
'''
speed_of_sound = None
'''The speed of sound of the device.
The speed of sound in air is typically 343 m/s.
'''
volume = None
'''The overall volume of the device.
'''
class Factory:
'''Factory objects are immutable and represent a sound that can be played simultaneously multiple times. They are called factories because they create reader objects internally that are used for playback.
'''
@classmethod
def file(*argv):
'''file(filename)
Creates a factory object of a sound file.
Arguments:
@filename (string): Path of the file.
@returns (Factory): The created Factory object... warning:: If the file doesn't exist or can't be read you will not get an exception immediately, but when you try to start playback of that factory.
'''
return Factory
@classmethod
def sine(*argv):
'''sine(frequency, rate=44100)
Creates a sine factory which plays a sine wave.
Arguments:
@frequency (float): The frequency of the sine wave in Hz.
@rate (int): The sampling rate in Hz. It's recommended to set this value to the playback device's samling rate to avoid resamping.
@returns (Factory): The created Factory object.
'''
return Factory
def buffer(*argv):
'''buffer()
Buffers a factory into RAM.
This saves CPU usage needed for decoding and file access if the underlying factory reads from a file on the harddisk, but it consumes a lot of memory.
@returns (Factory): The created Factory object.
Note: Only known-length factories can be buffered... warning:: Raw PCM data needs a lot of space, only buffer short factories.
'''
return Factory
def delay(*argv):
'''delay(time)
Delays by playing adding silence in front of the other factory's data.
Arguments:
@time (float): How many seconds of silence should be added before the factory.
@returns (Factory): The created Factory object.
'''
return Factory
def fadein(*argv):
'''fadein(start, length)
Fades a factory in by raising the volume linearly in the given time interval.
Arguments:
@start (float): Time in seconds when the fading should start.
@length (float): Time in seconds how long the fading should last.
@returns (Factory): The created Factory object.
Note: Before the fade starts it plays silence.
'''
return Factory
def fadeout(*argv):
'''fadeout(start, length)
Fades a factory in by lowering the volume linearly in the given time interval.
Arguments:
@start (float): Time in seconds when the fading should start.
@length (float): Time in seconds how long the fading should last.
@returns (Factory): The created Factory object.
Note: After the fade this factory plays silence, so that the length of the factory is not altered.
'''
return Factory
def filter(*argv):
'''filter(b, a = (1))
Filters a factory with the supplied IIR filter coefficients.
Without the second parameter you'll get a FIR filter.
If the first value of the a sequence is 0 it will be set to 1 automatically.
If the first value of the a sequence is neither 0 nor 1, all filter coefficients will be scaled by this value so that it is 1 in the end, you don't have to scale yourself.
Arguments:
@b (sequence of float): The nominator filter coefficients.
@a (sequence of float): The denominator filter coefficients.
@returns (Factory): The created Factory object.
'''
return Factory
def highpass(*argv):
'''highpass(frequency, Q=0.5)
Creates a second order highpass filter based on the transfer function H(s) = s^2 / (s^2 + s/Q + 1)
Arguments:
@frequency (float): The cut off trequency of the highpass.
@Q (float): Q factor of the lowpass.
@returns (Factory): The created Factory object.
'''
return Factory
def join(*argv):
'''join(factory)
Plays two factories in sequence.
Arguments:
@factory (Factory): The factory to play second.
@returns (Factory): The created Factory object.
Note: The two factories have to have the same specifications (channels and samplerate).
'''
return Factory
def limit(*argv):
'''limit(start, end)
Limits a factory within a specific start and end time.
Arguments:
@start (float): Start time in seconds.
@end (float): End time in seconds.
@returns (Factory): The created Factory object.
'''
return Factory
def loop(*argv):
'''loop(count)
Loops a factory.
Arguments:
@count (integer): How often the factory should be looped. Negative values mean endlessly.
@returns (Factory): The created Factory object.
Note: This is a filter function, you might consider using :attr:Handle.loop_count instead.
'''
return Factory
def lowpass(*argv):
'''lowpass(frequency, Q=0.5)
Creates a second order lowpass filter based on the transfer function H(s) = 1 / (s^2 + s/Q + 1)
Arguments:
@frequency (float): The cut off trequency of the lowpass.
@Q (float): Q factor of the lowpass.
@returns (Factory): The created Factory object.
'''
return Factory
def mix(*argv):
'''mix(factory)
Mixes two factories.
Arguments:
@factory (Factory): The factory to mix over the other.
@returns (Factory): The created Factory object.
Note: The two factories have to have the same specifications (channels and samplerate).
'''
return Factory
def pingpong(*argv):
'''pingpong()
Plays a factory forward and then backward.
This is like joining a factory with its reverse.
@returns (Factory): The created Factory object.
'''
return Factory
def pitch(*argv):
'''pitch(factor)
Changes the pitch of a factory with a specific factor.
Arguments:
@factor (float): The factor to change the pitch with.
@returns (Factory): The created Factory object.
Note: This is done by changing the sample rate of the underlying factory, which has to be an integer, so the factor value rounded and the factor may not be 100 % accurate.
'''
return Factory
def reverse(*argv):
'''reverse()
Plays a factory reversed.
@returns (Factory): The created Factory object.
Note: The factory has to have a finite length and has to be seekable. It's recommended to use this only with factories with fast and accurate seeking, which is not true for encoded audio files, such ones should be buffered using :meth:buffer before being played reversed... warning:: If seeking is not accurate in the underlying factory you'll likely hear skips/jumps/cracks.
'''
return Factory
def square(*argv):
'''square(threshold = 0)
Makes a square wave out of an audio wave by setting all samples with a amplitude >= threshold to 1, all <= -threshold to -1 and all between to 0.
Arguments:
@threshold (float): Threshold value over which an amplitude counts non-zero.
@returns (Factory): The created Factory object.
'''
return Factory
def volume(*argv):
'''volume(volume)
Changes the volume of a factory.
Arguments:
@volume (float): The new volume..
@returns (Factory): The created Factory object.
Note: Should be in the range [0, 1] to avoid clipping.
'''
return Factory
class Handle:
'''Handle objects are playback handles that can be used to control playback of a sound. If a sound is played back multiple times then there are as many handles.
'''
def pause(*argv):
'''pause()
Pauses playback.
@returns (bool): Whether the action succeeded.
'''
return bool
def resume(*argv):
'''resume()
Resumes playback.
@returns (bool): Whether the action succeeded.
'''
return bool
def stop(*argv):
'''stop()
Stops playback.
@returns (bool): Whether the action succeeded.
Note: This makes the handle invalid.
'''
return bool
attenuation = None
'''This factor is used for distance based attenuation of the source.
(seealso :attr:Device.distance_model)
'''
cone_angle_inner = None
'''The opening angle of the inner cone of the source. If the cone values of a source are set there are two (audible) cones with the apex at the :attr:location of the source and with infinite height, heading in the direction of the source's :attr:orientation.
In the inner cone the volume is normal. Outside the outer cone the volume will be :attr:cone_volume_outer and in the area between the volume will be interpolated linearly.
'''
cone_angle_outer = None
'''The opening angle of the outer cone of the source.
(seealso :attr:cone_angle_inner)
'''
cone_volume_outer = None
'''The volume outside the outer cone of the source.
(seealso :attr:cone_angle_inner)
'''
distance_maximum = None
'''The maximum distance of the source.
If the listener is further away the source volume will be 0.
(seealso :attr:Device.distance_model)
'''
distance_reference = None
'''The reference distance of the source.
At this distance the volume will be exactly :attr:volume.
(seealso :attr:Device.distance_model)
'''
keep = None
'''Whether the sound should be kept paused in the device when its end is reached.
This can be used to seek the sound to some position and start playback again.
.. warning:: If this is set to true and you forget stopping this equals a memory leak as the handle exists until the device is destroyed.
'''
location = None
'''The source's location in 3D space, a 3D tuple of floats.
'''
loop_count = None
'''The (remaining) loop count of the sound. A negative value indicates infinity.
'''
orientation = None
'''The source's orientation in 3D space as quaternion, a 4 float tuple.
'''
pitch = None
'''The pitch of the sound.
'''
position = None
'''The playback position of the sound in seconds.
'''
relative = None
'''Whether the source's location, velocity and orientation is relative or absolute to the listener.
'''
status = None
'''Whether the sound is playing, paused or stopped (=invalid).
'''
velocity = None
'''The source's velocity in 3D space, a 3D tuple of floats.
'''
volume = None
'''The volume of the sound.
'''
volume_maximum = None
'''The maximum volume of the source.
(seealso :attr:Device.distance_model)
'''
volume_minimum = None
'''The minimum volume of the source.
(seealso :attr:Device.distance_model)
'''
class error:
| {
"repo_name": "kabuku/blender-python",
"path": "blenderlib/aud.py",
"copies": "1",
"size": "16042",
"license": "mit",
"hash": 3885488376378045400,
"line_mean": 24.7366666667,
"line_max": 385,
"alpha_frac": 0.5864605411,
"autogenerated": false,
"ratio": 4.397478070175438,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5483938611275438,
"avg_score": null,
"num_lines": null
} |
"""Audio time-scale modification and scrambling via synchronous overlap-add.
2015-01-18 Dan Ellis dpwe@ee.columbia.edu
"""
from __future__ import print_function
import argparse
import numpy as np
import scipy.io.wavfile
def Solafs(input_waveform, frame_size, overlap_size, max_shift,
input_point_from_output_point_fn):
"""Perform synchronous overlap-add with fixed synthesis step.
Args:
input_waveform: np.array of <frames x channels> of source waveform.
frame_size: Number of points in each fixed synthesis step.
overlap_size: Number of points in overlap between successive
synthesis windows.
max_shift: Largest allowable shift away from the ideal input point
to inspect when searching for best alignment.
input_from_output_fn: To find the next window's worth of output
waveform we start from
ideal_input_point = input_point_from_output_point_fn(
current_output_point, input_length).
Returns:
output_waveform: Synthesized waveform, same number of channels
as input_waveform.
"""
print("frame_size=", frame_size, " overlap_size=", overlap_size,
"max_shift=", max_shift)
# Create the crossfade window.
crossfade_window_high_to_low = (0.5 * (1.0 +
np.cos(np.pi *
np.arange(overlap_size) /
overlap_size)))[:, np.newaxis]
print("crossfade_window.shape=", crossfade_window_high_to_low.shape)
# Initialize output waveform; we'll extend it dynamically.
input_frames = input_waveform.shape[0]
if len(input_waveform.shape) > 1:
channels = input_waveform.shape[1]
else:
input_waveform = np.reshape(input_waveform, (input_frames, 1))
channels = 1
output_waveform = np.empty(((frame_size*64), channels))
# Pre-fill first window
input_base_point = input_point_from_output_point_fn(0, input_frames)
output_waveform[:frame_size] = input_waveform[input_base_point :
input_base_point + frame_size]
# Loop through adding new windows from input.
current_output_point = frame_size
while True:
ideal_input_point = input_point_from_output_point_fn(current_output_point,
input_frames)
if ideal_input_point is None:
break
ideal_input_point = max(max_shift, ideal_input_point)
#print("current_output_point=", current_output_point,
# " ideal_input_point=", ideal_input_point)
aligned_input_point = _FindBestAlignmentConv(
output_waveform[current_output_point - overlap_size :
current_output_point],
input_waveform[ideal_input_point - max_shift :
ideal_input_point + max_shift + overlap_size])
if aligned_input_point is None:
break
else:
aligned_input_point += ideal_input_point - max_shift
#print("aligned_input_point=", aligned_input_point)
if aligned_input_point + frame_size > input_frames:
break
output_overlap_range = np.arange(
current_output_point - overlap_size, current_output_point)
if current_output_point + frame_size > output_waveform.shape[0]:
# Double the length of output_waveform if it filled up.
output_waveform = np.vstack([
output_waveform, np.empty((output_waveform.shape[0], channels))])
# Crossfade into region of overlap.
output_waveform[output_overlap_range] = (
crossfade_window_high_to_low * output_waveform[output_overlap_range] +
(1 - crossfade_window_high_to_low) *
input_waveform[aligned_input_point :
aligned_input_point + overlap_size])
# Copy across remainder of new frame
output_waveform[current_output_point :
current_output_point + frame_size - overlap_size] = (
input_waveform[aligned_input_point + overlap_size :
aligned_input_point + frame_size])
current_output_point += frame_size - overlap_size
return output_waveform[:current_output_point]
def _InputPointFromOutputPointFnTimeScaling(output_duration_ratio):
"""Return function that can be used as input_point_from_output_point_fn."""
return (lambda output_point, input_length:
None if output_point > output_duration_ratio * input_length
else int(round(output_point / output_duration_ratio)))
def _InputPointFromOutputPointFnTimeBlur(blur_radius):
"""Return function that can be used as input_point_from_output_point_fn."""
return (lambda output_point, input_length:
None if output_point > input_length
else min(input_length, max(0, int(round(output_point +
blur_radius *
np.random.randn(1))))))
def _CosineSimilarity(vec_a, vec_b):
"""Calculate cosine similarity between two equal-sized vectors."""
return np.sum(vec_a * vec_b) / np.sqrt(np.sum(vec_a**2)*np.sum(vec_b**2))
def _FindBestAlignmentSlow(overlap_waveform, source_waveform,
alignment_fn=_CosineSimilarity):
"""Find start point of maximum correlation between overlap and source."""
#print("FBA: ola_wv.shape=", overlap_waveform.shape,
# "src_wv.shape=", source_waveform.shape)
overlap_size, channels = overlap_waveform.shape
num_shifts = source_waveform.shape[0] - overlap_size + 1
alignment_scores = np.empty(num_shifts)
for shift in np.arange(num_shifts):
alignment_scores[shift] = alignment_fn(
overlap_waveform, source_waveform[shift : shift + overlap_size])
return np.argmax(alignment_scores)
def _FindBestAlignmentConv(overlap_waveform, source_waveform):
"""Find best cosine distance via correlation - zillion times faster."""
len_ov, nchans = overlap_waveform.shape
cos_dist = np.zeros(len(source_waveform) - len_ov + 1)
for chan in range(nchans):
sum_sqs = np.cumsum(source_waveform[:, chan]**2)
denom = np.sqrt(sum_sqs[len_ov - 1:] - np.hstack([0., sum_sqs[:-len_ov]]))
cos_dist += np.correlate(source_waveform[:, chan], overlap_waveform[:, chan]) / denom / np.sqrt(sum(overlap_waveform[:, chan]**2))
return np.argmax(cos_dist)
def main(argv):
"""Main routine to modify a wav file using solafs."""
parser = argparse.ArgumentParser(description="Modify WAV files with solafs.")
parser.add_argument('input', type=str, help="input WAV file")
parser.add_argument('output', type=str, help="output WAV file")
parser.add_argument('--scale', type=float,
help="Factor scaling output duration.")
parser.add_argument('--win', type=float, default=0.025,
help="Window time in seconds.")
parser.add_argument('--hop', type=float, default=0.010,
help="Window hop advance in seconds.")
parser.add_argument('--max_shift', type=float, default=0.015,
help="Maximum time shift to synchronize.")
parser.add_argument('--shuffle', type=float, default=0.0,
help="SD of time over which to shuffle frames.")
parser.add_argument('--max_duration', type=float, default=0.0,
help="Truncate input at this duration.")
args = parser.parse_args()
window_sec = args.win
hop_sec = args.hop
max_shift = args.max_shift
shuffle_time = args.shuffle
time_factor = args.scale
max_duration = args.max_duration
sr, data = scipy.io.wavfile.read(args.input)
input_duration = len(data)/float(sr)
if max_duration > 0.0 and input_duration > max_duration:
data = data[:int(round(max_duration * sr))]
data = data.astype(float) / 32768
if shuffle_time > 0.0:
time_mapping_fn = _InputPointFromOutputPointFnTimeBlur(
int(round(shuffle_time * sr)))
else:
time_mapping_fn = _InputPointFromOutputPointFnTimeScaling(
time_factor)
data_out = Solafs(data,
int(round(window_sec * sr)),
int(round(hop_sec * sr)),
int(round(max_shift * sr)),
time_mapping_fn)
scipy.io.wavfile.write(args.output, sr, (data_out * 32768).astype(np.int16))
# Run the main function if called from the command line
if __name__ == "__main__":
import sys
main(sys.argv)
| {
"repo_name": "dpwe/solafs",
"path": "solafs.py",
"copies": "1",
"size": "8344",
"license": "mit",
"hash": -518806669664955840,
"line_mean": 42.4583333333,
"line_max": 134,
"alpha_frac": 0.6427372963,
"autogenerated": false,
"ratio": 3.6871409633230225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4829878259623022,
"avg_score": null,
"num_lines": null
} |
"""Audio tools and helpers
`segment` in function arguments stands for AudioSegment.
"""
import os.path
import random
import threading
from pydub.utils import make_chunks
from mutagen.easyid3 import EasyID3
import pyaudio
import pydub
SEGMENT_LENGTH_SECONDS = 35 # 35
MINIMUM_STARTING_POINT = 30 # skip at least 30 seconds from the beginning
MAXIMUM_STARTING_POINT = 90 # ...and no more than 90 seconds
_CURRENT_SONG_PLAYER = None
class PyaudioPlayer(threading.Thread):
"""Improved audio player, based on pydub.playback
This player is based on threading, with simple method to stop playing
without raising KeyboardInterruption.
"""
def __init__(self, segment, notifier=None):
super(PyaudioPlayer, self).__init__()
self.segment = segment
self._playing = True
self._notifier = notifier
def run(self):
player = pyaudio.PyAudio()
stream = player.open(
format=player.get_format_from_width(self.segment.sample_width),
channels=self.segment.channels,
rate=self.segment.frame_rate,
output=True,
)
# break audio into quarter-second chunks (to allows interrupts)
for i, chunk in enumerate(make_chunks(self.segment, 250)):
if self._notifier:
self._notifier(i*250)
if not self._playing:
break
stream.write(chunk._data)
stream.stop_stream()
stream.close()
player.terminate()
def stop(self):
"""Stops playing current song"""
self._playing = False
def play(segment, notifier=None):
"""Plays segment using global player
If another song is being played, it's stopped (and its player is
destroyed).
"""
global _CURRENT_SONG_PLAYER
stop()
_CURRENT_SONG_PLAYER = PyaudioPlayer(segment, notifier)
_CURRENT_SONG_PLAYER.start()
def stop():
"""Stops playing current song and destroys the player."""
global _CURRENT_SONG_PLAYER
if _CURRENT_SONG_PLAYER:
_CURRENT_SONG_PLAYER.stop()
_CURRENT_SONG_PLAYER = None
def load(filename):
"""Loads a track based on path
Note: only MP3 supported right now.
"""
return pydub.AudioSegment.from_mp3(filename)
def speed_up(segment, speed):
"""Speeds up the track, while keeping the same length
Note: pydub's speedup is SLOW.
"""
if speed <= 1:
raise ValueError('speed must not be lower than 1')
return segment.speedup(playback_speed=speed, chunk_size=80, crossfade=5)
def reverse(segment):
"""Reverses the track"""
return segment.reverse()
def frequency(segment, frequency):
"""Changes frequency
Lower frequency worsenes the quality.
"""
return segment.set_frame_rate(frequency)
def volume_changer(segment, slice_length=250):
"""Changes volume of the track on set interval
The track becomes something like this:
H L H L H L H L...
where H means high volume, and L stands for low (reduced) volume.
"""
# Split segment into equally sized slices
slices = make_chunks(segment, slice_length)
result = slices[0]
for i, s in enumerate(slices[1:]):
if i % 2 == 0:
s -= 15
result += s
return result
def pitch(segment, rate):
"""Changes the pitch, and also track's speed"""
return segment._spawn(
segment._data,
{'frame_rate': int(segment.frame_rate*rate)},
)
def tone_down(segment, rate):
"""Lowers track's tone while keeping the same speed
Basically does the same thing as pitch, but retains the speed.
Note: pydub's speedup is SLOW.
"""
result = segment._spawn(
segment._data,
{'frame_rate': int(segment.frame_rate*rate)},
)
return result.speedup(
playback_speed=round(1/rate, 2),
chunk_size=80,
crossfade=5,
)
def mix_segments(segments, slice_length=500):
"""Mixes two tracks together
Given two tracks 1 and 2, output becomes something like this:
1 2 1 2 1 2 1 2...
"""
segments_count = len(segments)
# Cut to the shortest segment
shortest_length = min(len(segment) for segment in segments)
segments = [segment[:shortest_length] for segment in segments]
slices = [make_chunks(segment, slice_length) for segment in segments]
first = slices[0][0]
for i, s in enumerate(slices[0][1:], start=1):
first += slices[i % segments_count][i]
return first
def cut(segment, length=None, min_start=None, max_start=None):
"""Selects random sample from the segment"""
if not length:
length = SEGMENT_LENGTH_SECONDS * 1000
start = random.randint(
min_start if min_start is not None else MINIMUM_STARTING_POINT * 1000,
max_start if max_start is not None else MAXIMUM_STARTING_POINT * 1000,
)
end = start + length
if len(segment) < end: # segment is too short?
end = len(segment) - 1
start = end - length
return segment[start:end]
def get_info(filename):
"""Returns tuple of string info about the song
Note: only MP3 supported right now.
"""
info = EasyID3(filename)
return (
', '.join(info['title']),
', '.join(info['artist']),
os.path.basename(filename),
)
def overlay(tracks):
"""Mixes multiple tracks together by layering one onto another"""
main_track = tracks[0]
for track in tracks[1:]:
main_track = main_track.overlay(track, loop=True)
return main_track
| {
"repo_name": "modrzew/ekoie",
"path": "audio.py",
"copies": "1",
"size": "5544",
"license": "mit",
"hash": -1015278968371964700,
"line_mean": 26.3103448276,
"line_max": 78,
"alpha_frac": 0.6383477633,
"autogenerated": false,
"ratio": 3.7868852459016393,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9925233009201639,
"avg_score": 0,
"num_lines": 203
} |
"""Audio Utils"""
# Collected from various sources:
# License: BSD 3-clause
# Authors: Kyle Kastner
# LTSD routine from jfsantos (Joao Felipe Santos)
# Harvest, Cheaptrick, D4C, WORLD routines based on MATLAB code from M. Morise
# http://ml.cs.yamanashi.ac.jp/world/english/
# MGC code based on r9y9 (Ryuichi Yamamoto) MelGeneralizedCepstrums.jl
# Pieces also adapted from SPTK
import numpy as np
import scipy as sp
from numpy.lib.stride_tricks import as_strided
import scipy.signal as sg
from scipy.interpolate import interp1d
import wave
from scipy.cluster.vq import vq
from scipy import linalg, fftpack
from numpy.testing import assert_almost_equal
from scipy.linalg import svd
from scipy.io import wavfile
from scipy.signal import firwin
import zipfile
import tarfile
import os
import copy
import multiprocessing
from multiprocessing import Pool
import functools
import time
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib2 as urllib
def download(url, server_fname, local_fname=None, progress_update_percentage=5,
bypass_certificate_check=False):
"""
An internet download utility modified from
http://stackoverflow.com/questions/22676/
how-do-i-download-a-file-over-http-using-python/22776#22776
"""
if bypass_certificate_check:
import ssl
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
u = urllib.request.urlopen(url, context=ctx)
else:
u = urllib.request.urlopen(url)
if local_fname is None:
local_fname = server_fname
full_path = local_fname
meta = u.info()
with open(full_path, 'wb') as f:
try:
file_size = int(meta.get("Content-Length"))
except TypeError:
print("WARNING: Cannot get file size, displaying bytes instead!")
file_size = 100
print(("Downloading: %s Bytes: %s" % (server_fname, file_size)))
file_size_dl = 0
block_sz = int(1E7)
p = 0
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
if (file_size_dl * 100. / file_size) > p:
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl *
100. / file_size)
print(status)
p += progress_update_percentage
def fetch_sample_speech_tapestry():
url = "https://www.dropbox.com/s/qte66a7haqspq2g/tapestry.wav?dl=1"
wav_path = "tapestry.wav"
if not os.path.exists(wav_path):
download(url, wav_path)
fs, d = wavfile.read(wav_path)
d = d.astype('float32') / (2 ** 15)
# file is stereo? - just choose one channel
return fs, d
def fetch_sample_file(wav_path):
if not os.path.exists(wav_path):
raise ValueError("Unable to find file at path %s" % wav_path)
fs, d = wavfile.read(wav_path)
d = d.astype('float32') / (2 ** 15)
# file is stereo - just choose one channel
if len(d.shape) > 1:
d = d[:, 0]
return fs, d
def fetch_sample_music():
url = "http://www.music.helsinki.fi/tmt/opetus/uusmedia/esim/"
url += "a2002011001-e02-16kHz.wav"
wav_path = "test.wav"
if not os.path.exists(wav_path):
download(url, wav_path)
fs, d = wavfile.read(wav_path)
d = d.astype('float32') / (2 ** 15)
# file is stereo - just choose one channel
d = d[:, 0]
return fs, d
def fetch_sample_speech_fruit(n_samples=None):
url = 'https://dl.dropboxusercontent.com/u/15378192/audio.tar.gz'
wav_path = "audio.tar.gz"
if not os.path.exists(wav_path):
download(url, wav_path)
tf = tarfile.open(wav_path)
wav_names = [fname for fname in tf.getnames()
if ".wav" in fname.split(os.sep)[-1]]
speech = []
print("Loading speech files...")
for wav_name in wav_names[:n_samples]:
f = tf.extractfile(wav_name)
fs, d = wavfile.read(f)
d = d.astype('float32') / (2 ** 15)
speech.append(d)
return fs, speech
def fetch_sample_speech_eustace(n_samples=None):
"""
http://www.cstr.ed.ac.uk/projects/eustace/download.html
"""
# data
url = "http://www.cstr.ed.ac.uk/projects/eustace/down/eustace_wav.zip"
wav_path = "eustace_wav.zip"
if not os.path.exists(wav_path):
download(url, wav_path)
# labels
url = "http://www.cstr.ed.ac.uk/projects/eustace/down/eustace_labels.zip"
labels_path = "eustace_labels.zip"
if not os.path.exists(labels_path):
download(url, labels_path)
# Read wavfiles
# 16 kHz wav
zf = zipfile.ZipFile(wav_path, 'r')
wav_names = [fname for fname in zf.namelist()
if ".wav" in fname.split(os.sep)[-1]]
fs = 16000
speech = []
print("Loading speech files...")
for wav_name in wav_names[:n_samples]:
wav_str = zf.read(wav_name)
d = np.frombuffer(wav_str, dtype=np.int16)
d = d.astype('float32') / (2 ** 15)
speech.append(d)
zf = zipfile.ZipFile(labels_path, 'r')
label_names = [fname for fname in zf.namelist()
if ".lab" in fname.split(os.sep)[-1]]
labels = []
print("Loading label files...")
for label_name in label_names[:n_samples]:
label_file_str = zf.read(label_name)
labels.append(label_file_str)
return fs, speech
def stft(X, fftsize=128, step="half", mean_normalize=True, real=False,
compute_onesided=True):
"""
Compute STFT for 1D real valued input X
"""
if real:
local_fft = fftpack.rfft
cut = -1
else:
local_fft = fftpack.fft
cut = None
if compute_onesided:
cut = fftsize // 2 + 1
if mean_normalize:
X -= X.mean()
if step == "half":
X = halfoverlap(X, fftsize)
else:
X = overlap(X, fftsize, step)
size = fftsize
win = 0.54 - .46 * np.cos(2 * np.pi * np.arange(size) / (size - 1))
X = X * win[None]
X = local_fft(X)[:, :cut]
return X
def istft(X, fftsize=128, step="half", wsola=False, mean_normalize=True,
real=False, compute_onesided=True):
"""
Compute ISTFT for STFT transformed X
"""
if real:
local_ifft = fftpack.irfft
X_pad = np.zeros((X.shape[0], X.shape[1] + 1)) + 0j
X_pad[:, :-1] = X
X = X_pad
else:
local_ifft = fftpack.ifft
if compute_onesided:
X_pad = np.zeros((X.shape[0], 2 * X.shape[1])) + 0j
X_pad[:, :fftsize // 2 + 1] = X
X_pad[:, fftsize // 2 + 1:] = 0
X = X_pad
X = local_ifft(X).astype("float64")
if step == "half":
X = invert_halfoverlap(X)
else:
X = overlap_add(X, step, wsola=wsola)
if mean_normalize:
X -= np.mean(X)
return X
def mdct_slow(X, dctsize=128):
M = dctsize
N = 2 * dctsize
N_0 = (M + 1) / 2
X = halfoverlap(X, N)
X = sine_window(X)
n, k = np.meshgrid(np.arange(N), np.arange(M))
# Use transpose due to "samples as rows" convention
tf = np.cos(np.pi * (n + N_0) * (k + 0.5) / M).T
return np.dot(X, tf)
def imdct_slow(X, dctsize=128):
M = dctsize
N = 2 * dctsize
N_0 = (M + 1) / 2
N_4 = N / 4
n, k = np.meshgrid(np.arange(N), np.arange(M))
# inverse *is not* transposed
tf = np.cos(np.pi * (n + N_0) * (k + 0.5) / M)
X_r = np.dot(X, tf) / N_4
X_r = sine_window(X_r)
X = invert_halfoverlap(X_r)
return X
def nsgcwin(fmin, fmax, n_bins, fs, signal_len, gamma):
"""
Nonstationary Gabor window calculation
References
----------
Velasco G. A., Holighaus N., Dorfler M., Grill T.
Constructing an invertible constant-Q transform with nonstationary Gabor
frames, Proceedings of the 14th International Conference on Digital
Audio Effects (DAFx 11), Paris, France, 2011
Holighaus N., Dorfler M., Velasco G. A. and Grill T.
A framework for invertible, real-time constant-Q transforms, submitted.
Original matlab code copyright follows:
AUTHOR(s) : Monika Dorfler, Gino Angelo Velasco, Nicki Holighaus, 2010-2011
COPYRIGHT : (c) NUHAG, Dept.Math., University of Vienna, AUSTRIA
http://nuhag.eu/
Permission is granted to modify and re-distribute this
code in any manner as long as this notice is preserved.
All standard disclaimers apply.
"""
# use a hanning window
# no fractional shifts
fftres = fs / signal_len
fmin = float(fmin)
fmax = float(fmax)
gamma = float(gamma)
nyq = fs / 2.
b = np.floor(n_bins * np.log2(fmax / fmin))
fbas = fmin * 2 ** (np.arange(b + 1) / float(n_bins))
Q = 2 ** (1. / n_bins) - 2 ** (-1. / n_bins)
cqtbw = Q * fbas + gamma
cqtbw = cqtbw.ravel()
maxidx = np.where(fbas + cqtbw / 2. > nyq)[0]
if len(maxidx) > 0:
# replicate bug in MATLAB version...
# or is it a feature
if sum(maxidx) == 0:
first = len(cqtbw) - 1
else:
first = maxidx[0]
fbas = fbas[:first]
cqtbw = cqtbw[:first]
minidx = np.where(fbas - cqtbw / 2. < 0)[0]
if len(minidx) > 0:
fbas = fbas[minidx[-1] + 1:]
cqtbw = cqtbw[minidx[-1] + 1:]
fbas_len = len(fbas)
fbas_new = np.zeros((2 * (len(fbas) + 1)))
fbas_new[1:len(fbas) + 1] = fbas
fbas = fbas_new
fbas[fbas_len + 1] = nyq
fbas[fbas_len + 2:] = fs - fbas[1:fbas_len + 1][::-1]
bw = np.zeros_like(fbas)
bw[0] = 2 * fmin
bw[1:len(cqtbw) + 1] = cqtbw
bw[len(cqtbw) + 1] = fbas[fbas_len + 2] - fbas[fbas_len]
bw[-len(cqtbw):] = cqtbw[::-1]
bw = bw / fftres
fbas = fbas / fftres
posit = np.zeros_like(fbas)
posit[:fbas_len + 2] = np.floor(fbas[:fbas_len + 2])
posit[fbas_len + 2:] = np.ceil(fbas[fbas_len + 2:])
base_shift = -posit[-1] % signal_len
shift = np.zeros_like(posit).astype("int32")
shift[1:] = (posit[1:] - posit[:-1]).astype("int32")
shift[0] = base_shift
bw = np.round(bw)
bwfac = 1
M = bw
min_win = 4
for ii in range(len(bw)):
if bw[ii] < min_win:
bw[ii] = min_win
M[ii] = bw[ii]
def _win(numel):
if numel % 2 == 0:
s1 = np.arange(0, .5, 1. / numel)
if len(s1) != numel // 2:
# edge case with small floating point numbers...
s1 = s1[:-1]
s2 = np.arange(-.5, 0, 1. / numel)
if len(s2) != numel // 2:
# edge case with small floating point numbers...
s2 = s2[:-1]
x = np.concatenate((s1, s2))
else:
s1 = np.arange(0, .5, 1. / numel)
s2 = np.arange(-.5 + .5 / numel, 0, 1. / numel)
if len(s2) != numel // 2: # assume integer truncate 27 // 2 = 13
s2 = s2[:-1]
x = np.concatenate((s1, s2))
assert len(x) == numel
g = .5 + .5 * np.cos(2 * np.pi * x)
return g
multiscale = [_win(bi) for bi in bw]
bw = bwfac * np.ceil(M / bwfac)
for kk in [0, fbas_len + 1]:
if M[kk] > M[kk + 1]:
multiscale[kk] = np.ones(M[kk]).astype(multiscale[0].dtype)
i1 = np.floor(M[kk] / 2) - np.floor(M[kk + 1] / 2)
i2 = np.floor(M[kk] / 2) + np.ceil(M[kk + 1] / 2)
# Very rarely, gets an off by 1 error? Seems to be at the end...
# for now, slice
multiscale[kk][i1:i2] = _win(M[kk + 1])
multiscale[kk] = multiscale[kk] / np.sqrt(M[kk])
return multiscale, shift, M
def nsgtf_real(X, multiscale, shift, window_lens):
"""
Nonstationary Gabor Transform for real values
References
----------
Velasco G. A., Holighaus N., Dorfler M., Grill T.
Constructing an invertible constant-Q transform with nonstationary Gabor
frames, Proceedings of the 14th International Conference on Digital
Audio Effects (DAFx 11), Paris, France, 2011
Holighaus N., Dorfler M., Velasco G. A. and Grill T.
A framework for invertible, real-time constant-Q transforms, submitted.
Original matlab code copyright follows:
AUTHOR(s) : Monika Dorfler, Gino Angelo Velasco, Nicki Holighaus, 2010-2011
COPYRIGHT : (c) NUHAG, Dept.Math., University of Vienna, AUSTRIA
http://nuhag.eu/
Permission is granted to modify and re-distribute this
code in any manner as long as this notice is preserved.
All standard disclaimers apply.
"""
# This will break with multchannel input
signal_len = len(X)
N = len(shift)
X_fft = np.fft.fft(X)
fill = np.sum(shift) - signal_len
if fill > 0:
X_fft_tmp = np.zeros((signal_len + shift))
X_fft_tmp[:len(X_fft)] = X_fft
X_fft = X_fft_tmp
posit = np.cumsum(shift) - shift[0]
scale_lens = np.array([len(m) for m in multiscale])
N = np.where(posit - np.floor(scale_lens) <= (signal_len + fill) / 2)[0][-1]
c = []
# c[0] is almost exact
for ii in range(N):
idx_l = np.arange(np.ceil(scale_lens[ii] / 2), scale_lens[ii])
idx_r = np.arange(np.ceil(scale_lens[ii] / 2))
idx = np.concatenate((idx_l, idx_r))
idx = idx.astype("int32")
subwin_range = posit[ii] + np.arange(-np.floor(scale_lens[ii] / 2),
np.ceil(scale_lens[ii] / 2))
win_range = subwin_range % (signal_len + fill)
win_range = win_range.astype("int32")
if window_lens[ii] < scale_lens[ii]:
raise ValueError("Not handling 'not enough channels' case")
else:
temp = np.zeros((window_lens[ii],)).astype(X_fft.dtype)
temp_idx_l = np.arange(len(temp) - np.floor(scale_lens[ii] / 2),
len(temp))
temp_idx_r = np.arange(np.ceil(scale_lens[ii] / 2))
temp_idx = np.concatenate((temp_idx_l, temp_idx_r))
temp_idx = temp_idx.astype("int32")
temp[temp_idx] = X_fft[win_range] * multiscale[ii][idx]
fs_new_bins = window_lens[ii]
fk_bins = posit[ii]
displace = fk_bins - np.floor(fk_bins / fs_new_bins) * fs_new_bins
displace = displace.astype("int32")
temp = np.roll(temp, displace)
c.append(np.fft.ifft(temp))
if 0:
# cell2mat concatenation
c = np.concatenate(c)
return c
def nsdual(multiscale, shift, window_lens):
"""
Calculation of nonstationary inverse gabor filters
References
----------
Velasco G. A., Holighaus N., Dorfler M., Grill T.
Constructing an invertible constant-Q transform with nonstationary Gabor
frames, Proceedings of the 14th International Conference on Digital
Audio Effects (DAFx 11), Paris, France, 2011
Holighaus N., Dorfler M., Velasco G. A. and Grill T.
A framework for invertible, real-time constant-Q transforms, submitted.
Original matlab code copyright follows:
AUTHOR(s) : Monika Dorfler, Gino Angelo Velasco, Nicki Holighaus, 2010-2011
COPYRIGHT : (c) NUHAG, Dept.Math., University of Vienna, AUSTRIA
http://nuhag.eu/
Permission is granted to modify and re-distribute this
code in any manner as long as this notice is preserved.
All standard disclaimers apply.
"""
N = len(shift)
posit = np.cumsum(shift)
seq_len = posit[-1]
posit = posit - shift[0]
diagonal = np.zeros((seq_len,))
win_range = []
for ii in range(N):
filt_len = len(multiscale[ii])
idx = np.arange(-np.floor(filt_len / 2), np.ceil(filt_len / 2))
win_range.append((posit[ii] + idx) % seq_len)
subdiag = window_lens[ii] * np.fft.fftshift(multiscale[ii]) ** 2
ind = win_range[ii].astype(np.int)
diagonal[ind] = diagonal[ind] + subdiag
dual_multiscale = multiscale
for ii in range(N):
ind = win_range[ii].astype(np.int)
dual_multiscale[ii] = np.fft.ifftshift(
np.fft.fftshift(dual_multiscale[ii]) / diagonal[ind])
return dual_multiscale
def nsgitf_real(c, c_dc, c_nyq, multiscale, shift):
"""
Nonstationary Inverse Gabor Transform on real valued signal
References
----------
Velasco G. A., Holighaus N., Dorfler M., Grill T.
Constructing an invertible constant-Q transform with nonstationary Gabor
frames, Proceedings of the 14th International Conference on Digital
Audio Effects (DAFx 11), Paris, France, 2011
Holighaus N., Dorfler M., Velasco G. A. and Grill T.
A framework for invertible, real-time constant-Q transforms, submitted.
Original matlab code copyright follows:
AUTHOR(s) : Monika Dorfler, Gino Angelo Velasco, Nicki Holighaus, 2010-2011
COPYRIGHT : (c) NUHAG, Dept.Math., University of Vienna, AUSTRIA
http://nuhag.eu/
Permission is granted to modify and re-distribute this
code in any manner as long as this notice is preserved.
All standard disclaimers apply.
"""
c_l = []
c_l.append(c_dc)
c_l.extend([ci for ci in c])
c_l.append(c_nyq)
posit = np.cumsum(shift)
seq_len = posit[-1]
posit -= shift[0]
out = np.zeros((seq_len,)).astype(c_l[1].dtype)
for ii in range(len(c_l)):
filt_len = len(multiscale[ii])
win_range = posit[ii] + np.arange(-np.floor(filt_len / 2),
np.ceil(filt_len / 2))
win_range = (win_range % seq_len).astype(np.int)
temp = np.fft.fft(c_l[ii]) * len(c_l[ii])
fs_new_bins = len(c_l[ii])
fk_bins = posit[ii]
displace = int(fk_bins - np.floor(fk_bins / fs_new_bins) * fs_new_bins)
temp = np.roll(temp, -displace)
l = np.arange(len(temp) - np.floor(filt_len / 2), len(temp))
r = np.arange(np.ceil(filt_len / 2))
temp_idx = (np.concatenate((l, r)) % len(temp)).astype(np.int)
temp = temp[temp_idx]
lf = np.arange(filt_len - np.floor(filt_len / 2), filt_len)
rf = np.arange(np.ceil(filt_len / 2))
filt_idx = np.concatenate((lf, rf)).astype(np.int)
m = multiscale[ii][filt_idx]
out[win_range] = out[win_range] + m * temp
nyq_bin = np.floor(seq_len / 2) + 1
out_idx = np.arange(
nyq_bin - np.abs(1 - seq_len % 2) - 1, 0, -1).astype(np.int)
out[nyq_bin:] = np.conj(out[out_idx])
t_out = np.real(np.fft.ifft(out)).astype(np.float64)
return t_out
def cqt(X, fs, n_bins=48, fmin=27.5, fmax="nyq", gamma=20):
"""
Constant Q Transform
References
----------
Velasco G. A., Holighaus N., Dorfler M., Grill T.
Constructing an invertible constant-Q transform with nonstationary Gabor
frames, Proceedings of the 14th International Conference on Digital
Audio Effects (DAFx 11), Paris, France, 2011
Holighaus N., Dorfler M., Velasco G. A. and Grill T.
A framework for invertible, real-time constant-Q transforms, submitted.
Original matlab code copyright follows:
AUTHOR(s) : Monika Dorfler, Gino Angelo Velasco, Nicki Holighaus, 2010-2011
COPYRIGHT : (c) NUHAG, Dept.Math., University of Vienna, AUSTRIA
http://nuhag.eu/
Permission is granted to modify and re-distribute this
code in any manner as long as this notice is preserved.
All standard disclaimers apply.
"""
if fmax == "nyq":
fmax = fs / 2.
multiscale, shift, window_lens = nsgcwin(fmin, fmax, n_bins, fs,
len(X), gamma)
fbas = fs * np.cumsum(shift[1:]) / len(X)
fbas = fbas[:len(window_lens) // 2 - 1]
bins = window_lens.shape[0] // 2 - 1
window_lens[1:bins + 1] = window_lens[bins + 2]
window_lens[bins + 2:] = window_lens[1:bins + 1][::-1]
norm = 2. * window_lens[:bins + 2] / float(len(X))
norm = np.concatenate((norm, norm[1:-1][::-1]))
multiscale = [norm[ii] * multiscale[ii] for ii in range(2 * (bins + 1))]
c = nsgtf_real(X, multiscale, shift, window_lens)
c_dc = c[0]
c_nyq = c[-1]
c_sub = c[1:-1]
c = np.vstack(c_sub)
return c, c_dc, c_nyq, multiscale, shift, window_lens
def icqt(X_cq, c_dc, c_nyq, multiscale, shift, window_lens):
"""
Inverse constant Q Transform
References
----------
Velasco G. A., Holighaus N., Dorfler M., Grill T.
Constructing an invertible constant-Q transform with nonstationary Gabor
frames, Proceedings of the 14th International Conference on Digital
Audio Effects (DAFx 11), Paris, France, 2011
Holighaus N., Dorfler M., Velasco G. A. and Grill T.
A framework for invertible, real-time constant-Q transforms, submitted.
Original matlab code copyright follows:
AUTHOR(s) : Monika Dorfler, Gino Angelo Velasco, Nicki Holighaus, 2010-2011
COPYRIGHT : (c) NUHAG, Dept.Math., University of Vienna, AUSTRIA
http://nuhag.eu/
Permission is granted to modify and re-distribute this
code in any manner as long as this notice is preserved.
All standard disclaimers apply.
"""
new_multiscale = nsdual(multiscale, shift, window_lens)
X = nsgitf_real(X_cq, c_dc, c_nyq, new_multiscale, shift)
return X
def rolling_mean(X, window_size):
w = 1.0 / window_size * np.ones((window_size))
return np.correlate(X, w, 'valid')
def rolling_window(X, window_size):
# for 1d data
shape = X.shape[:-1] + (X.shape[-1] - window_size + 1, window_size)
strides = X.strides + (X.strides[-1],)
return np.lib.stride_tricks.as_strided(X, shape=shape, strides=strides)
def voiced_unvoiced(X, window_size=256, window_step=128, copy=True):
"""
Voiced unvoiced detection from a raw signal
Based on code from:
https://www.clear.rice.edu/elec532/PROJECTS96/lpc/code.html
Other references:
http://www.seas.ucla.edu/spapl/code/harmfreq_MOLRT_VAD.m
Parameters
----------
X : ndarray
Raw input signal
window_size : int, optional (default=256)
The window size to use, in samples.
window_step : int, optional (default=128)
How far the window steps after each calculation, in samples.
copy : bool, optional (default=True)
Whether to make a copy of the input array or allow in place changes.
"""
X = np.array(X, copy=copy)
if len(X.shape) < 2:
X = X[None]
n_points = X.shape[1]
n_windows = n_points // window_step
# Padding
pad_sizes = [(window_size - window_step) // 2,
window_size - window_step // 2]
# TODO: Handling for odd window sizes / steps
X = np.hstack((np.zeros((X.shape[0], pad_sizes[0])), X,
np.zeros((X.shape[0], pad_sizes[1]))))
clipping_factor = 0.6
b, a = sg.butter(10, np.pi * 9 / 40)
voiced_unvoiced = np.zeros((n_windows, 1))
period = np.zeros((n_windows, 1))
for window in range(max(n_windows - 1, 1)):
XX = X.ravel()[window * window_step + np.arange(window_size)]
XX *= sg.hamming(len(XX))
XX = sg.lfilter(b, a, XX)
left_max = np.max(np.abs(XX[:len(XX) // 3]))
right_max = np.max(np.abs(XX[-len(XX) // 3:]))
clip_value = clipping_factor * np.min([left_max, right_max])
XX_clip = np.clip(XX, clip_value, -clip_value)
XX_corr = np.correlate(XX_clip, XX_clip, mode='full')
center = np.argmax(XX_corr)
right_XX_corr = XX_corr[center:]
prev_window = max([window - 1, 0])
if voiced_unvoiced[prev_window] > 0:
# Want it to be harder to turn off than turn on
strength_factor = .29
else:
strength_factor = .3
start = np.where(right_XX_corr < .3 * XX_corr[center])[0]
# 20 is hardcoded but should depend on samplerate?
try:
start = np.max([20, start[0]])
except IndexError:
start = 20
search_corr = right_XX_corr[start:]
index = np.argmax(search_corr)
second_max = search_corr[index]
if (second_max > strength_factor * XX_corr[center]):
voiced_unvoiced[window] = 1
period[window] = start + index - 1
else:
voiced_unvoiced[window] = 0
period[window] = 0
return np.array(voiced_unvoiced), np.array(period)
def lpc_analysis(X, order=8, window_step=128, window_size=2 * 128,
emphasis=0.9, voiced_start_threshold=.9,
voiced_stop_threshold=.6, truncate=False, copy=True):
"""
Extract LPC coefficients from a signal
Based on code from:
http://labrosa.ee.columbia.edu/matlab/sws/
_rParameters
----------
X : ndarray
Signals to extract LPC coefficients from
order : int, optional (default=8)
Order of the LPC coefficients. For speech, use the general rule that the
order is two times the expected number of formants plus 2.
This can be formulated as 2 + 2 * (fs // 2000). For approx. signals
with fs = 7000, this is 8 coefficients - 2 + 2 * (7000 // 2000).
window_step : int, optional (default=128)
The size (in samples) of the space between each window
window_size : int, optional (default=2 * 128)
The size of each window (in samples) to extract coefficients over
emphasis : float, optional (default=0.9)
The emphasis coefficient to use for filtering
voiced_start_threshold : float, optional (default=0.9)
Upper power threshold for estimating when speech has started
voiced_stop_threshold : float, optional (default=0.6)
Lower power threshold for estimating when speech has stopped
truncate : bool, optional (default=False)
Whether to cut the data at the last window or do zero padding.
copy : bool, optional (default=True)
Whether to copy the input X or modify in place
Returns
-------
lp_coefficients : ndarray
lp coefficients to describe the frame
per_frame_gain : ndarray
calculated gain for each frame
residual_excitation : ndarray
leftover energy which is not described by lp coefficents and gain
voiced_frames : ndarray
array of [0, 1] values which holds voiced/unvoiced decision for each
frame.
References
----------
D. P. W. Ellis (2004), "Sinewave Speech Analysis/Synthesis in Matlab",
Web resource, available: http://www.ee.columbia.edu/ln/labrosa/matlab/sws/
"""
X = np.array(X, copy=copy)
if len(X.shape) < 2:
X = X[None]
n_points = X.shape[1]
n_windows = int(n_points // window_step)
if not truncate:
pad_sizes = [(window_size - window_step) // 2,
window_size - window_step // 2]
# TODO: Handling for odd window sizes / steps
X = np.hstack((np.zeros((X.shape[0], int(pad_sizes[0]))), X,
np.zeros((X.shape[0], int(pad_sizes[1])))))
else:
pad_sizes = [0, 0]
X = X[0, :n_windows * window_step]
lp_coefficients = np.zeros((n_windows, order + 1))
per_frame_gain = np.zeros((n_windows, 1))
residual_excitation = np.zeros(
int(((n_windows - 1) * window_step + window_size)))
# Pre-emphasis high-pass filter
X = sg.lfilter([1, -emphasis], 1, X)
# stride_tricks.as_strided?
autocorr_X = np.zeros((n_windows, int(2 * window_size - 1)))
for window in range(max(n_windows - 1, 1)):
wtws = int(window * window_step)
XX = X.ravel()[wtws + np.arange(window_size, dtype="int32")]
WXX = XX * sg.hanning(window_size)
autocorr_X[window] = np.correlate(WXX, WXX, mode='full')
center = np.argmax(autocorr_X[window])
RXX = autocorr_X[window,
np.arange(center, window_size + order, dtype="int32")]
R = linalg.toeplitz(RXX[:-1])
solved_R = linalg.pinv(R).dot(RXX[1:])
filter_coefs = np.hstack((1, -solved_R))
residual_signal = sg.lfilter(filter_coefs, 1, WXX)
gain = np.sqrt(np.mean(residual_signal ** 2))
lp_coefficients[window] = filter_coefs
per_frame_gain[window] = gain
assign_range = wtws + np.arange(window_size, dtype="int32")
residual_excitation[assign_range] += residual_signal / gain
# Throw away first part in overlap mode for proper synthesis
residual_excitation = residual_excitation[int(pad_sizes[0]):]
return lp_coefficients, per_frame_gain, residual_excitation
def lpc_to_frequency(lp_coefficients, per_frame_gain):
"""
Extract resonant frequencies and magnitudes from LPC coefficients and gains.
Parameters
----------
lp_coefficients : ndarray
LPC coefficients, such as those calculated by ``lpc_analysis``
per_frame_gain : ndarray
Gain calculated for each frame, such as those calculated
by ``lpc_analysis``
Returns
-------
frequencies : ndarray
Resonant frequencies calculated from LPC coefficients and gain. Returned
frequencies are from 0 to 2 * pi
magnitudes : ndarray
Magnitudes of resonant frequencies
References
----------
D. P. W. Ellis (2004), "Sinewave Speech Analysis/Synthesis in Matlab",
Web resource, available: http://www.ee.columbia.edu/ln/labrosa/matlab/sws/
"""
n_windows, order = lp_coefficients.shape
frame_frequencies = np.zeros((n_windows, (order - 1) // 2))
frame_magnitudes = np.zeros_like(frame_frequencies)
for window in range(n_windows):
w_coefs = lp_coefficients[window]
g_coefs = per_frame_gain[window]
roots = np.roots(np.hstack(([1], w_coefs[1:])))
# Roots doesn't return the same thing as MATLAB... agh
frequencies, index = np.unique(
np.abs(np.angle(roots)), return_index=True)
# Make sure 0 doesn't show up...
gtz = np.where(frequencies > 0)[0]
frequencies = frequencies[gtz]
index = index[gtz]
magnitudes = g_coefs / (1. - np.abs(roots))
sort_index = np.argsort(frequencies)
frame_frequencies[window, :len(sort_index)] = frequencies[sort_index]
frame_magnitudes[window, :len(sort_index)] = magnitudes[sort_index]
return frame_frequencies, frame_magnitudes
def lpc_to_lsf(all_lpc):
if len(all_lpc.shape) < 2:
all_lpc = all_lpc[None]
order = all_lpc.shape[1] - 1
all_lsf = np.zeros((len(all_lpc), order))
for i in range(len(all_lpc)):
lpc = all_lpc[i]
lpc1 = np.append(lpc, 0)
lpc2 = lpc1[::-1]
sum_filt = lpc1 + lpc2
diff_filt = lpc1 - lpc2
if order % 2 != 0:
deconv_diff, _ = sg.deconvolve(diff_filt, [1, 0, -1])
deconv_sum = sum_filt
else:
deconv_diff, _ = sg.deconvolve(diff_filt, [1, -1])
deconv_sum, _ = sg.deconvolve(sum_filt, [1, 1])
roots_diff = np.roots(deconv_diff)
roots_sum = np.roots(deconv_sum)
angle_diff = np.angle(roots_diff[::2])
angle_sum = np.angle(roots_sum[::2])
lsf = np.sort(np.hstack((angle_diff, angle_sum)))
if len(lsf) != 0:
all_lsf[i] = lsf
return np.squeeze(all_lsf)
def lsf_to_lpc(all_lsf):
if len(all_lsf.shape) < 2:
all_lsf = all_lsf[None]
order = all_lsf.shape[1]
all_lpc = np.zeros((len(all_lsf), order + 1))
for i in range(len(all_lsf)):
lsf = all_lsf[i]
zeros = np.exp(1j * lsf)
sum_zeros = zeros[::2]
diff_zeros = zeros[1::2]
sum_zeros = np.hstack((sum_zeros, np.conj(sum_zeros)))
diff_zeros = np.hstack((diff_zeros, np.conj(diff_zeros)))
sum_filt = np.poly(sum_zeros)
diff_filt = np.poly(diff_zeros)
if order % 2 != 0:
deconv_diff = sg.convolve(diff_filt, [1, 0, -1])
deconv_sum = sum_filt
else:
deconv_diff = sg.convolve(diff_filt, [1, -1])
deconv_sum = sg.convolve(sum_filt, [1, 1])
lpc = .5 * (deconv_sum + deconv_diff)
# Last coefficient is 0 and not returned
all_lpc[i] = lpc[:-1]
return np.squeeze(all_lpc)
def lpc_synthesis(lp_coefficients, per_frame_gain, residual_excitation=None,
voiced_frames=None, window_step=128, emphasis=0.9):
"""
Synthesize a signal from LPC coefficients
Based on code from:
http://labrosa.ee.columbia.edu/matlab/sws/
http://web.uvic.ca/~tyoon/resource/auditorytoolbox/auditorytoolbox/synlpc.html
Parameters
----------
lp_coefficients : ndarray
Linear prediction coefficients
per_frame_gain : ndarray
Gain coefficients
residual_excitation : ndarray or None, optional (default=None)
Residual excitations. If None, this will be synthesized with white noise
voiced_frames : ndarray or None, optional (default=None)
Voiced frames. If None, all frames assumed to be voiced.
window_step : int, optional (default=128)
The size (in samples) of the space between each window
emphasis : float, optional (default=0.9)
The emphasis coefficient to use for filtering
overlap_add : bool, optional (default=True)
What type of processing to use when joining windows
copy : bool, optional (default=True)
Whether to copy the input X or modify in place
Returns
-------
synthesized : ndarray
Sound vector synthesized from input arguments
References
----------
D. P. W. Ellis (2004), "Sinewave Speech Analysis/Synthesis in Matlab",
Web resource, available: http://www.ee.columbia.edu/ln/labrosa/matlab/sws/
"""
# TODO: Incorporate better synthesis from
# http://eecs.oregonstate.edu/education/docs/ece352/CompleteManual.pdf
window_size = 2 * window_step
[n_windows, order] = lp_coefficients.shape
n_points = (n_windows + 1) * window_step
n_excitation_points = n_points + window_step + window_step // 2
random_state = np.random.RandomState(1999)
if residual_excitation is None:
# Need to generate excitation
if voiced_frames is None:
# No voiced/unvoiced info
voiced_frames = np.ones((lp_coefficients.shape[0], 1))
residual_excitation = np.zeros((n_excitation_points))
f, m = lpc_to_frequency(lp_coefficients, per_frame_gain)
t = np.linspace(0, 1, window_size, endpoint=False)
hanning = sg.hanning(window_size)
for window in range(n_windows):
window_base = window * window_step
index = window_base + np.arange(window_size)
if voiced_frames[window]:
sig = np.zeros_like(t)
cycles = np.cumsum(f[window][0] * t)
sig += sg.sawtooth(cycles, 0.001)
residual_excitation[index] += hanning * sig
residual_excitation[index] += hanning * 0.01 * random_state.randn(
window_size)
else:
n_excitation_points = residual_excitation.shape[0]
n_points = n_excitation_points + window_step + window_step // 2
residual_excitation = np.hstack((residual_excitation,
np.zeros(window_size)))
if voiced_frames is None:
voiced_frames = np.ones_like(per_frame_gain)
synthesized = np.zeros((n_points))
for window in range(n_windows):
window_base = window * window_step
oldbit = synthesized[window_base + np.arange(window_step)]
w_coefs = lp_coefficients[window]
if not np.all(w_coefs):
# Hack to make lfilter avoid
# ValueError: BUG: filter coefficient a[0] == 0 not supported yet
# when all coeffs are 0
w_coefs = [1]
g_coefs = voiced_frames[window] * per_frame_gain[window]
index = window_base + np.arange(window_size)
newbit = g_coefs * sg.lfilter([1], w_coefs,
residual_excitation[index])
synthesized[index] = np.hstack((oldbit, np.zeros(
(window_size - window_step))))
synthesized[index] += sg.hanning(window_size) * newbit
synthesized = sg.lfilter([1], [1, -emphasis], synthesized)
return synthesized
def soundsc(X, gain_scale=.9, copy=True):
"""
Approximate implementation of soundsc from MATLAB without the audio playing.
Parameters
----------
X : ndarray
Signal to be rescaled
gain_scale : float
Gain multipler, default .9 (90% of maximum representation)
copy : bool, optional (default=True)
Whether to make a copy of input signal or operate in place.
Returns
-------
X_sc : ndarray
(-32767, 32767) scaled version of X as int16, suitable for writing
with scipy.io.wavfile
"""
X = np.array(X, copy=copy)
X = (X - X.min()) / (X.max() - X.min())
X = 2 * X - 1
X = gain_scale * X
X = X * 2 ** 15
return X.astype('int16')
def _wav2array(nchannels, sampwidth, data):
# wavio.py
# Author: Warren Weckesser
# License: BSD 3-Clause (http://opensource.org/licenses/BSD-3-Clause)
"""data must be the string containing the bytes from the wav file."""
num_samples, remainder = divmod(len(data), sampwidth * nchannels)
if remainder > 0:
raise ValueError('The length of data is not a multiple of '
'sampwidth * num_channels.')
if sampwidth > 4:
raise ValueError("sampwidth must not be greater than 4.")
if sampwidth == 3:
a = np.empty((num_samples, nchannels, 4), dtype=np.uint8)
raw_bytes = np.fromstring(data, dtype=np.uint8)
a[:, :, :sampwidth] = raw_bytes.reshape(-1, nchannels, sampwidth)
a[:, :, sampwidth:] = (a[:, :, sampwidth - 1:sampwidth] >> 7) * 255
result = a.view('<i4').reshape(a.shape[:-1])
else:
# 8 bit samples are stored as unsigned ints; others as signed ints.
dt_char = 'u' if sampwidth == 1 else 'i'
a = np.fromstring(data, dtype='<%s%d' % (dt_char, sampwidth))
result = a.reshape(-1, nchannels)
return result
def readwav(file):
# wavio.py
# Author: Warren Weckesser
# License: BSD 3-Clause (http://opensource.org/licenses/BSD-3-Clause)
"""
Read a wav file.
Returns the frame rate, sample width (in bytes) and a numpy array
containing the data.
This function does not read compressed wav files.
"""
wav = wave.open(file)
rate = wav.getframerate()
nchannels = wav.getnchannels()
sampwidth = wav.getsampwidth()
nframes = wav.getnframes()
data = wav.readframes(nframes)
wav.close()
array = _wav2array(nchannels, sampwidth, data)
return rate, sampwidth, array
def csvd(arr):
"""
Do the complex SVD of a 2D array, returning real valued U, S, VT
http://stemblab.github.io/complex-svd/
"""
C_r = arr.real
C_i = arr.imag
block_x = C_r.shape[0]
block_y = C_r.shape[1]
K = np.zeros((2 * block_x, 2 * block_y))
# Upper left
K[:block_x, :block_y] = C_r
# Lower left
K[:block_x, block_y:] = C_i
# Upper right
K[block_x:, :block_y] = -C_i
# Lower right
K[block_x:, block_y:] = C_r
return svd(K, full_matrices=False)
def icsvd(U, S, VT):
"""
Invert back to complex values from the output of csvd
U, S, VT = csvd(X)
X_rec = inv_csvd(U, S, VT)
"""
K = U.dot(np.diag(S)).dot(VT)
block_x = U.shape[0] // 2
block_y = U.shape[1] // 2
arr_rec = np.zeros((block_x, block_y)) + 0j
arr_rec.real = K[:block_x, :block_y]
arr_rec.imag = K[:block_x, block_y:]
return arr_rec
def sinusoid_analysis(X, input_sample_rate, resample_block=128, copy=True):
"""
Contruct a sinusoidal model for the input signal.
Parameters
----------
X : ndarray
Input signal to model
input_sample_rate : int
The sample rate of the input signal
resample_block : int, optional (default=128)
Controls the step size of the sinusoidal model
Returns
-------
frequencies_hz : ndarray
Frequencies for the sinusoids, in Hz.
magnitudes : ndarray
Magnitudes of sinusoids returned in ``frequencies``
References
----------
D. P. W. Ellis (2004), "Sinewave Speech Analysis/Synthesis in Matlab",
Web resource, available: http://www.ee.columbia.edu/ln/labrosa/matlab/sws/
"""
X = np.array(X, copy=copy)
resample_to = 8000
if input_sample_rate != resample_to:
if input_sample_rate % resample_to != 0:
raise ValueError("Input sample rate must be a multiple of 8k!")
# Should be able to use resample... ?
# resampled_count = round(len(X) * resample_to / input_sample_rate)
# X = sg.resample(X, resampled_count, window=sg.hanning(len(X)))
X = sg.decimate(X, input_sample_rate // resample_to, zero_phase=True)
step_size = 2 * round(resample_block / input_sample_rate * resample_to / 2.)
a, g, e = lpc_analysis(X, order=8, window_step=step_size,
window_size=2 * step_size)
f, m = lpc_to_frequency(a, g)
f_hz = f * resample_to / (2 * np.pi)
return f_hz, m
def slinterp(X, factor, copy=True):
"""
Slow-ish linear interpolation of a 1D numpy array. There must be some
better function to do this in numpy.
Parameters
----------
X : ndarray
1D input array to interpolate
factor : int
Integer factor to interpolate by
Return
------
X_r : ndarray
"""
sz = np.product(X.shape)
X = np.array(X, copy=copy)
X_s = np.hstack((X[1:], [0]))
X_r = np.zeros((factor, sz))
for i in range(factor):
X_r[i, :] = (factor - i) / float(factor) * X + (i / float(factor)) * X_s
return X_r.T.ravel()[:(sz - 1) * factor + 1]
def sinusoid_synthesis(frequencies_hz, magnitudes, input_sample_rate=16000,
resample_block=128):
"""
Create a time series based on input frequencies and magnitudes.
Parameters
----------
frequencies_hz : ndarray
Input signal to model
magnitudes : int
The sample rate of the input signal
input_sample_rate : int, optional (default=16000)
The sample rate parameter that the sinusoid analysis was run with
resample_block : int, optional (default=128)
Controls the step size of the sinusoidal model
Returns
-------
synthesized : ndarray
Sound vector synthesized from input arguments
References
----------
D. P. W. Ellis (2004), "Sinewave Speech Analysis/Synthesis in Matlab",
Web resource, available: http://www.ee.columbia.edu/ln/labrosa/matlab/sws/
"""
rows, cols = frequencies_hz.shape
synthesized = np.zeros((1 + ((rows - 1) * resample_block),))
for col in range(cols):
mags = slinterp(magnitudes[:, col], resample_block)
freqs = slinterp(frequencies_hz[:, col], resample_block)
cycles = np.cumsum(2 * np.pi * freqs / float(input_sample_rate))
sines = mags * np.cos(cycles)
synthesized += sines
return synthesized
def dct_compress(X, n_components, window_size=128):
"""
Compress using the DCT
Parameters
----------
X : ndarray, shape=(n_samples,)
The input signal to compress. Should be 1-dimensional
n_components : int
The number of DCT components to keep. Setting n_components to about
.5 * window_size can give compression with fairly good reconstruction.
window_size : int
The input X is broken into windows of window_size, each of which are
then compressed with the DCT.
Returns
-------
X_compressed : ndarray, shape=(num_windows, window_size)
A 2D array of non-overlapping DCT coefficients. For use with uncompress
Reference
---------
http://nbviewer.ipython.org/github/craffel/crucialpython/blob/master/week3/stride_tricks.ipynb
"""
if len(X) % window_size != 0:
append = np.zeros((window_size - len(X) % window_size))
X = np.hstack((X, append))
num_frames = len(X) // window_size
X_strided = X.reshape((num_frames, window_size))
X_dct = fftpack.dct(X_strided, norm='ortho')
if n_components is not None:
X_dct = X_dct[:, :n_components]
return X_dct
def dct_uncompress(X_compressed, window_size=128):
"""
Uncompress a DCT compressed signal (such as returned by ``compress``).
Parameters
----------
X_compressed : ndarray, shape=(n_samples, n_features)
Windowed and compressed array.
window_size : int, optional (default=128)
Size of the window used when ``compress`` was called.
Returns
-------
X_reconstructed : ndarray, shape=(n_samples)
Reconstructed version of X.
"""
if X_compressed.shape[1] % window_size != 0:
append = np.zeros((X_compressed.shape[0],
window_size - X_compressed.shape[1] % window_size))
X_compressed = np.hstack((X_compressed, append))
X_r = fftpack.idct(X_compressed, norm='ortho')
return X_r.ravel()
def sine_window(X):
"""
Apply a sinusoid window to X.
Parameters
----------
X : ndarray, shape=(n_samples, n_features)
Input array of samples
Returns
-------
X_windowed : ndarray, shape=(n_samples, n_features)
Windowed version of X.
"""
i = np.arange(X.shape[1])
win = np.sin(np.pi * (i + 0.5) / X.shape[1])
row_stride = 0
col_stride = win.itemsize
strided_win = as_strided(win, shape=X.shape,
strides=(row_stride, col_stride))
return X * strided_win
def kaiserbessel_window(X, alpha=6.5):
"""
Apply a Kaiser-Bessel window to X.
Parameters
----------
X : ndarray, shape=(n_samples, n_features)
Input array of samples
alpha : float, optional (default=6.5)
Tuning parameter for Kaiser-Bessel function. alpha=6.5 should make
perfect reconstruction possible for DCT.
Returns
-------
X_windowed : ndarray, shape=(n_samples, n_features)
Windowed version of X.
"""
beta = np.pi * alpha
win = sg.kaiser(X.shape[1], beta)
row_stride = 0
col_stride = win.itemsize
strided_win = as_strided(win, shape=X.shape,
strides=(row_stride, col_stride))
return X * strided_win
def overlap(X, window_size, window_step):
"""
Create an overlapped version of X
Parameters
----------
X : ndarray, shape=(n_samples,)
Input signal to window and overlap
window_size : int
Size of windows to take
window_step : int
Step size between windows
Returns
-------
X_strided : shape=(n_windows, window_size)
2D array of overlapped X
"""
if window_size % 2 != 0:
raise ValueError("Window size must be even!")
# Make sure there are an even number of windows before stridetricks
append = np.zeros((window_size - len(X) % window_size))
X = np.hstack((X, append))
overlap_sz = window_size - window_step
new_shape = X.shape[:-1] + ((X.shape[-1] - overlap_sz) // window_step, window_size)
new_strides = X.strides[:-1] + (window_step * X.strides[-1],) + X.strides[-1:]
X_strided = as_strided(X, shape=new_shape, strides=new_strides)
return X_strided
def halfoverlap(X, window_size):
"""
Create an overlapped version of X using 50% of window_size as overlap.
Parameters
----------
X : ndarray, shape=(n_samples,)
Input signal to window and overlap
window_size : int
Size of windows to take
Returns
-------
X_strided : shape=(n_windows, window_size)
2D array of overlapped X
"""
if window_size % 2 != 0:
raise ValueError("Window size must be even!")
window_step = window_size // 2
# Make sure there are an even number of windows before stridetricks
append = np.zeros((window_size - len(X) % window_size))
X = np.hstack((X, append))
num_frames = len(X) // window_step - 1
row_stride = X.itemsize * window_step
col_stride = X.itemsize
X_strided = as_strided(X, shape=(num_frames, window_size),
strides=(row_stride, col_stride))
return X_strided
def invert_halfoverlap(X_strided):
"""
Invert ``halfoverlap`` function to reconstruct X
Parameters
----------
X_strided : ndarray, shape=(n_windows, window_size)
X as overlapped windows
Returns
-------
X : ndarray, shape=(n_samples,)
Reconstructed version of X
"""
# Hardcoded 50% overlap! Can generalize later...
n_rows, n_cols = X_strided.shape
X = np.zeros((((int(n_rows // 2) + 1) * n_cols),)).astype(X_strided.dtype)
start_index = 0
end_index = n_cols
window_step = n_cols // 2
for row in range(X_strided.shape[0]):
X[start_index:end_index] += X_strided[row]
start_index += window_step
end_index += window_step
return X
def overlap_add(X_strided, window_step, wsola=False):
"""
overlap add to reconstruct X
Parameters
----------
X_strided : ndarray, shape=(n_windows, window_size)
X as overlapped windows
window_step : int
step size for overlap add
Returns
-------
X : ndarray, shape=(n_samples,)
Reconstructed version of X
"""
n_rows, window_size = X_strided.shape
# Start with largest size (no overlap) then truncate after we finish
# +2 for one window on each side
X = np.zeros(((n_rows + 2) * window_size,)).astype(X_strided.dtype)
start_index = 0
total_windowing_sum = np.zeros((X.shape[0]))
win = 0.54 - .46 * np.cos(2 * np.pi * np.arange(window_size) / (
window_size - 1))
for i in range(n_rows):
end_index = start_index + window_size
if wsola:
offset_size = window_size - window_step
offset = xcorr_offset(X[start_index:start_index + offset_size],
X_strided[i, :offset_size])
ss = start_index - offset
st = end_index - offset
if start_index - offset < 0:
ss = 0
st = 0 + (end_index - start_index)
X[ss:st] += X_strided[i]
total_windowing_sum[ss:st] += win
start_index = start_index + window_step
else:
X[start_index:end_index] += X_strided[i]
total_windowing_sum[start_index:end_index] += win
start_index += window_step
# Not using this right now
# X = np.real(X) / (total_windowing_sum + 1)
X = X[:end_index]
return X
def overlap_dct_compress(X, n_components, window_size):
"""
Overlap (at 50% of window_size) and compress X.
Parameters
----------
X : ndarray, shape=(n_samples,)
Input signal to compress
n_components : int
number of DCT components to keep
window_size : int
Size of windows to take
Returns
-------
X_dct : ndarray, shape=(n_windows, n_components)
Windowed and compressed version of X
"""
X_strided = halfoverlap(X, window_size)
X_dct = fftpack.dct(X_strided, norm='ortho')
if n_components is not None:
X_dct = X_dct[:, :n_components]
return X_dct
# Evil voice is caused by adding double the zeros before inverse DCT...
# Very cool bug but makes sense
def overlap_dct_uncompress(X_compressed, window_size):
"""
Uncompress X as returned from ``overlap_compress``.
Parameters
----------
X_compressed : ndarray, shape=(n_windows, n_components)
Windowed and compressed version of X
window_size : int
Size of windows originally used when compressing X
Returns
-------
X_reconstructed : ndarray, shape=(n_samples,)
Reconstructed version of X
"""
if X_compressed.shape[1] % window_size != 0:
append = np.zeros((X_compressed.shape[0], window_size -
X_compressed.shape[1] % window_size))
X_compressed = np.hstack((X_compressed, append))
X_r = fftpack.idct(X_compressed, norm='ortho')
return invert_halfoverlap(X_r)
def herz_to_mel(freqs):
"""
Based on code by Dan Ellis
http://labrosa.ee.columbia.edu/matlab/tf_agc/
"""
f_0 = 0 # 133.33333
f_sp = 200 / 3. # 66.66667
bark_freq = 1000.
bark_pt = (bark_freq - f_0) / f_sp
# The magic 1.0711703 which is the ratio needed to get from 1000 Hz
# to 6400 Hz in 27 steps, and is *almost* the ratio between 1000 Hz
# and the preceding linear filter center at 933.33333 Hz
# (actually 1000/933.33333 = 1.07142857142857 and
# exp(log(6.4)/27) = 1.07117028749447)
if not isinstance(freqs, np.ndarray):
freqs = np.array(freqs)[None]
log_step = np.exp(np.log(6.4) / 27)
lin_pts = (freqs < bark_freq)
mel = 0. * freqs
mel[lin_pts] = (freqs[lin_pts] - f_0) / f_sp
mel[~lin_pts] = bark_pt + np.log(freqs[~lin_pts] / bark_freq) / np.log(
log_step)
return mel
def mel_to_herz(mel):
"""
Based on code by Dan Ellis
http://labrosa.ee.columbia.edu/matlab/tf_agc/
"""
f_0 = 0 # 133.33333
f_sp = 200 / 3. # 66.66667
bark_freq = 1000.
bark_pt = (bark_freq - f_0) / f_sp
# The magic 1.0711703 which is the ratio needed to get from 1000 Hz
# to 6400 Hz in 27 steps, and is *almost* the ratio between 1000 Hz
# and the preceding linear filter center at 933.33333 Hz
# (actually 1000/933.33333 = 1.07142857142857 and
# exp(log(6.4)/27) = 1.07117028749447)
if not isinstance(mel, np.ndarray):
mel = np.array(mel)[None]
log_step = np.exp(np.log(6.4) / 27)
lin_pts = (mel < bark_pt)
freqs = 0. * mel
freqs[lin_pts] = f_0 + f_sp * mel[lin_pts]
freqs[~lin_pts] = bark_freq * np.exp(np.log(log_step) * (
mel[~lin_pts] - bark_pt))
return freqs
def mel_freq_weights(n_fft, fs, n_filts=None, width=None):
"""
Based on code by Dan Ellis
http://labrosa.ee.columbia.edu/matlab/tf_agc/
"""
min_freq = 0
max_freq = fs // 2
if width is None:
width = 1.
if n_filts is None:
n_filts = int(herz_to_mel(max_freq) / 2) + 1
else:
n_filts = int(n_filts)
assert n_filts > 0
weights = np.zeros((n_filts, n_fft))
fft_freqs = np.arange(n_fft // 2) / n_fft * fs
min_mel = herz_to_mel(min_freq)
max_mel = herz_to_mel(max_freq)
partial = np.arange(n_filts + 2) / (n_filts + 1.) * (max_mel - min_mel)
bin_freqs = mel_to_herz(min_mel + partial)
bin_bin = np.round(bin_freqs / fs * (n_fft - 1))
for i in range(n_filts):
fs_i = bin_freqs[i + np.arange(3)]
fs_i = fs_i[1] + width * (fs_i - fs_i[1])
lo_slope = (fft_freqs - fs_i[0]) / float(fs_i[1] - fs_i[0])
hi_slope = (fs_i[2] - fft_freqs) / float(fs_i[2] - fs_i[1])
weights[i, :n_fft // 2] = np.maximum(
0, np.minimum(lo_slope, hi_slope))
# Constant amplitude multiplier
weights = np.diag(2. / (bin_freqs[2:n_filts + 2]
- bin_freqs[:n_filts])).dot(weights)
weights[:, n_fft // 2:] = 0
return weights
def time_attack_agc(X, fs, t_scale=0.5, f_scale=1.):
"""
AGC based on code by Dan Ellis
http://labrosa.ee.columbia.edu/matlab/tf_agc/
"""
# 32 ms grid for FFT
n_fft = 2 ** int(np.log(0.032 * fs) / np.log(2))
f_scale = float(f_scale)
window_size = n_fft
window_step = window_size // 2
X_freq = stft(X, window_size, mean_normalize=False)
fft_fs = fs / window_step
n_bands = max(10, 20 / f_scale)
mel_width = f_scale * n_bands / 10.
f_to_a = mel_freq_weights(n_fft, fs, n_bands, mel_width)
f_to_a = f_to_a[:, :n_fft // 2 + 1]
audiogram = np.abs(X_freq).dot(f_to_a.T)
fbg = np.zeros_like(audiogram)
state = np.zeros((audiogram.shape[1],))
alpha = np.exp(-(1. / fft_fs) / t_scale)
for i in range(len(audiogram)):
state = np.maximum(alpha * state, audiogram[i])
fbg[i] = state
sf_to_a = np.sum(f_to_a, axis=0)
E = np.diag(1. / (sf_to_a + (sf_to_a == 0)))
E = E.dot(f_to_a.T)
E = fbg.dot(E.T)
E[E <= 0] = np.min(E[E > 0])
ts = istft(X_freq / E, window_size, mean_normalize=False)
return ts, X_freq, E
def hebbian_kmeans(X, n_clusters=10, n_epochs=10, W=None, learning_rate=0.01,
batch_size=100, random_state=None, verbose=True):
"""
Modified from existing code from R. Memisevic
See http://www.cs.toronto.edu/~rfm/code/hebbian_kmeans.py
"""
if W is None:
if random_state is None:
random_state = np.random.RandomState()
W = 0.1 * random_state.randn(n_clusters, X.shape[1])
else:
assert n_clusters == W.shape[0]
X2 = (X ** 2).sum(axis=1, keepdims=True)
last_print = 0
for e in range(n_epochs):
for i in range(0, X.shape[0], batch_size):
X_i = X[i: i + batch_size]
X2_i = X2[i: i + batch_size]
D = -2 * np.dot(W, X_i.T)
D += (W ** 2).sum(axis=1, keepdims=True)
D += X2_i.T
S = (D == D.min(axis=0)[None, :]).astype("float").T
W += learning_rate * (
np.dot(S.T, X_i) - S.sum(axis=0)[:, None] * W)
if verbose:
if e == 0 or e > (.05 * n_epochs + last_print):
last_print = e
print(("Epoch %i of %i, cost %.4f" % (
e + 1, n_epochs, D.min(axis=0).sum())))
return W
def complex_to_real_view(arr_c):
# Inplace view from complex to r, i as separate columns
assert arr_c.dtype in [np.complex64, np.complex128]
shp = arr_c.shape
dtype = np.float64 if arr_c.dtype == np.complex128 else np.float32
arr_r = arr_c.ravel().view(dtype=dtype).reshape(shp[0], 2 * shp[1])
return arr_r
def real_to_complex_view(arr_r):
# Inplace view from real, image as columns to complex
assert arr_r.dtype not in [np.complex64, np.complex128]
shp = arr_r.shape
dtype = np.complex128 if arr_r.dtype == np.float64 else np.complex64
arr_c = arr_r.ravel().view(dtype=dtype).reshape(shp[0], shp[1] // 2)
return arr_c
def complex_to_abs(arr_c):
return np.abs(arr_c)
def complex_to_angle(arr_c):
return np.angle(arr_c)
def abs_and_angle_to_complex(arr_abs, arr_angle):
# abs(f_c2 - f_c) < 1E-15
return arr_abs * np.exp(1j * arr_angle)
def angle_to_sin_cos(arr_angle):
return np.hstack((np.sin(arr_angle), np.cos(arr_angle)))
def sin_cos_to_angle(arr_sin, arr_cos):
return np.arctan2(arr_sin, arr_cos)
def polyphase_core(x, m, f):
# x = input data
# m = decimation rate
# f = filter
# Hack job - append zeros to match decimation rate
if x.shape[0] % m != 0:
x = np.append(x, np.zeros((m - x.shape[0] % m,)))
if f.shape[0] % m != 0:
f = np.append(f, np.zeros((m - f.shape[0] % m,)))
polyphase = p = np.zeros((m, (x.shape[0] + f.shape[0]) / m), dtype=x.dtype)
p[0, :-1] = np.convolve(x[::m], f[::m])
# Invert the x values when applying filters
for i in range(1, m):
p[i, 1:] = np.convolve(x[m - i::m], f[i::m])
return p
def polyphase_single_filter(x, m, f):
return np.sum(polyphase_core(x, m, f), axis=0)
def polyphase_lowpass(arr, downsample=2, n_taps=50, filter_pad=1.1):
filt = firwin(downsample * n_taps, 1 / (downsample * filter_pad))
filtered = polyphase_single_filter(arr, downsample, filt)
return filtered
def window(arr, window_size, window_step=1, axis=0):
"""
Directly taken from Erik Rigtorp's post to numpy-discussion.
<http://www.mail-archive.com/numpy-discussion@scipy.org/msg29450.html>
<http://stackoverflow.com/questions/4936620/using-strides-for-an-efficient-moving-average-filter>
"""
if window_size < 1:
raise ValueError("`window_size` must be at least 1.")
if window_size > arr.shape[-1]:
raise ValueError("`window_size` is too long.")
orig = list(range(len(arr.shape)))
trans = list(range(len(arr.shape)))
trans[axis] = orig[-1]
trans[-1] = orig[axis]
arr = arr.transpose(trans)
shape = arr.shape[:-1] + (arr.shape[-1] - window_size + 1, window_size)
strides = arr.strides + (arr.strides[-1],)
strided = as_strided(arr, shape=shape, strides=strides)
if window_step > 1:
strided = strided[..., ::window_step, :]
orig = list(range(len(strided.shape)))
trans = list(range(len(strided.shape)))
trans[-2] = orig[-1]
trans[-1] = orig[-2]
trans = trans[::-1]
strided = strided.transpose(trans)
return strided
def unwindow(arr, window_size, window_step=1, axis=0):
# undo windows by broadcast
if axis != 0:
raise ValueError("axis != 0 currently unsupported")
shp = arr.shape
unwindowed = np.tile(arr[:, None, ...], (1, window_step, 1, 1))
unwindowed = unwindowed.reshape(shp[0] * window_step, *shp[1:])
return unwindowed.mean(axis=1)
def xcorr_offset(x1, x2):
"""
Under MSR-LA License
Based on MATLAB implementation from Spectrogram Inversion Toolbox
References
----------
D. Griffin and J. Lim. Signal estimation from modified
short-time Fourier transform. IEEE Trans. Acoust. Speech
Signal Process., 32(2):236-243, 1984.
Malcolm Slaney, Daniel Naar and Richard F. Lyon. Auditory
Model Inversion for Sound Separation. Proc. IEEE-ICASSP,
Adelaide, 1994, II.77-80.
Xinglei Zhu, G. Beauregard, L. Wyse. Real-Time Signal
Estimation from Modified Short-Time Fourier Transform
Magnitude Spectra. IEEE Transactions on Audio Speech and
Language Processing, 08/2007.
"""
x1 = x1 - x1.mean()
x2 = x2 - x2.mean()
frame_size = len(x2)
half = frame_size // 2
corrs = np.convolve(x1.astype('float32'), x2[::-1].astype('float32'))
corrs[:half] = -1E30
corrs[-half:] = -1E30
offset = corrs.argmax() - len(x1)
return offset
def invert_spectrogram(X_s, step, calculate_offset=True, set_zero_phase=True):
"""
Under MSR-LA License
Based on MATLAB implementation from Spectrogram Inversion Toolbox
References
----------
D. Griffin and J. Lim. Signal estimation from modified
short-time Fourier transform. IEEE Trans. Acoust. Speech
Signal Process., 32(2):236-243, 1984.
Malcolm Slaney, Daniel Naar and Richard F. Lyon. Auditory
Model Inversion for Sound Separation. Proc. IEEE-ICASSP,
Adelaide, 1994, II.77-80.
Xinglei Zhu, G. Beauregard, L. Wyse. Real-Time Signal
Estimation from Modified Short-Time Fourier Transform
Magnitude Spectra. IEEE Transactions on Audio Speech and
Language Processing, 08/2007.
"""
size = int(X_s.shape[1] // 2)
wave = np.zeros((X_s.shape[0] * step + size))
# Getting overflow warnings with 32 bit...
wave = wave.astype('float64')
total_windowing_sum = np.zeros((X_s.shape[0] * step + size))
win = 0.54 - .46 * np.cos(2 * np.pi * np.arange(size) / (size - 1))
est_start = int(size // 2) - 1
est_end = est_start + size
for i in range(X_s.shape[0]):
wave_start = int(step * i)
wave_end = wave_start + size
if set_zero_phase:
spectral_slice = X_s[i].real + 0j
else:
# already complex
spectral_slice = X_s[i]
# Don't need fftshift due to different impl.
wave_est = np.real(np.fft.ifft(spectral_slice))[::-1]
if calculate_offset and i > 0:
offset_size = size - step
if offset_size <= 0:
print("WARNING: Large step size >50\% detected! "
"This code works best with high overlap - try "
"with 75% or greater")
offset_size = step
offset = xcorr_offset(wave[wave_start:wave_start + offset_size],
wave_est[est_start:est_start + offset_size])
else:
offset = 0
wave[wave_start:wave_end] += win * wave_est[
est_start - offset:est_end - offset]
total_windowing_sum[wave_start:wave_end] += win
wave = np.real(wave) / (total_windowing_sum + 1E-6)
return wave
def iterate_invert_spectrogram(X_s, fftsize, step, n_iter=10, verbose=False,
complex_input=False):
"""
Under MSR-LA License
Based on MATLAB implementation from Spectrogram Inversion Toolbox
References
----------
D. Griffin and J. Lim. Signal estimation from modified
short-time Fourier transform. IEEE Trans. Acoust. Speech
Signal Process., 32(2):236-243, 1984.
Malcolm Slaney, Daniel Naar and Richard F. Lyon. Auditory
Model Inversion for Sound Separation. Proc. IEEE-ICASSP,
Adelaide, 1994, II.77-80.
Xinglei Zhu, G. Beauregard, L. Wyse. Real-Time Signal
Estimation from Modified Short-Time Fourier Transform
Magnitude Spectra. IEEE Transactions on Audio Speech and
Language Processing, 08/2007.
"""
reg = np.max(X_s) / 1E8
X_best = copy.deepcopy(X_s)
try:
for i in range(n_iter):
if verbose:
print(("Runnning iter %i" % i))
if i == 0 and not complex_input:
X_t = invert_spectrogram(X_best, step, calculate_offset=True,
set_zero_phase=True)
else:
# Calculate offset was False in the MATLAB version
# but in mine it massively improves the result
# Possible bug in my impl?
X_t = invert_spectrogram(X_best, step, calculate_offset=True,
set_zero_phase=False)
est = stft(X_t, fftsize=fftsize, step=step, compute_onesided=False)
phase = est / np.maximum(reg, np.abs(est))
phase = phase[:len(X_s)]
X_s = X_s[:len(phase)]
X_best = X_s * phase
except ValueError:
raise ValueError("The iterate_invert_spectrogram algorithm requires"
" stft(..., compute_onesided=False),",
" be sure you have calculated stft with this argument")
X_t = invert_spectrogram(X_best, step, calculate_offset=True,
set_zero_phase=False)
return np.real(X_t)
def pretty_spectrogram(d, log=True, thresh=5, fft_size=512, step_size=64):
"""
creates a spectrogram
log: take the log of the spectrgram
thresh: threshold minimum power for log spectrogram
"""
specgram = np.abs(stft(d, fftsize=fft_size, step=step_size, real=False,
compute_onesided=True))
if log == True:
specgram /= specgram.max() # volume normalize to max 1
specgram = np.log10(specgram) # take log
specgram[specgram < -thresh] = -thresh # set anything less than the threshold as the threshold
else:
specgram[specgram < thresh] = thresh # set anything less than the threshold as the threshold
return specgram
# Also mostly modified or taken from https://gist.github.com/kastnerkyle/179d6e9a88202ab0a2fe
def invert_pretty_spectrogram(X_s, log=True, fft_size=512, step_size=512 / 4, n_iter=10):
if log == True:
X_s = np.power(10, X_s)
X_s = np.concatenate([X_s, X_s[:, ::-1]], axis=1)
X_t = iterate_invert_spectrogram(X_s, fft_size, step_size, n_iter=n_iter)
return X_t
def harvest_get_downsampled_signal(x, fs, target_fs):
decimation_ratio = np.round(fs / target_fs)
offset = np.ceil(140. / decimation_ratio) * decimation_ratio
start_pad = x[0] * np.ones(int(offset), dtype=np.float32)
end_pad = x[-1] * np.ones(int(offset), dtype=np.float32)
x = np.concatenate((start_pad, x, end_pad), axis=0)
if fs < target_fs:
raise ValueError("CASE NOT HANDLED IN harvest_get_downsampled_signal")
else:
try:
y0 = sg.decimate(x, int(decimation_ratio), 3, zero_phase=True)
except:
y0 = sg.decimate(x, int(decimation_ratio), 3)
actual_fs = fs / decimation_ratio
y = y0[int(offset / decimation_ratio):-int(offset / decimation_ratio)]
y = y - np.mean(y)
return y, actual_fs
def harvest_get_raw_f0_candidates(number_of_frames, boundary_f0_list,
y_length, temporal_positions, actual_fs, y_spectrum, f0_floor,
f0_ceil):
raw_f0_candidates = np.zeros((len(boundary_f0_list), number_of_frames), dtype=np.float32)
for i in range(len(boundary_f0_list)):
raw_f0_candidates[i, :] = harvest_get_f0_candidate_from_raw_event(
boundary_f0_list[i], actual_fs, y_spectrum, y_length,
temporal_positions, f0_floor, f0_ceil)
return raw_f0_candidates
def harvest_nuttall(N):
t = np.arange(0, N) * 2 * np.pi / (N - 1)
coefs = np.array([0.355768, -0.487396, 0.144232, -0.012604])
window = np.cos(t[:, None].dot(np.array([0., 1., 2., 3.])[None])).dot(coefs[:, None])
# 1D window...
return window.ravel()
def harvest_get_f0_candidate_from_raw_event(boundary_f0,
fs, y_spectrum, y_length, temporal_positions, f0_floor,
f0_ceil):
filter_length_half = int(np.round(fs / boundary_f0 * 2))
band_pass_filter_base = harvest_nuttall(filter_length_half * 2 + 1)
shifter = np.cos(2 * np.pi * boundary_f0 * np.arange(-filter_length_half, filter_length_half + 1) / float(fs))
band_pass_filter = band_pass_filter_base * shifter
index_bias = filter_length_half
# possible numerical issues if 32 bit
spectrum_low_pass_filter = np.fft.fft(band_pass_filter.astype("float64"), len(y_spectrum))
filtered_signal = np.real(np.fft.ifft(spectrum_low_pass_filter * y_spectrum))
index_bias = filter_length_half + 1
filtered_signal = filtered_signal[index_bias + np.arange(y_length).astype("int32")]
negative_zero_cross = harvest_zero_crossing_engine(filtered_signal, fs)
positive_zero_cross = harvest_zero_crossing_engine(-filtered_signal, fs)
d_filtered_signal = filtered_signal[1:] - filtered_signal[:-1]
peak = harvest_zero_crossing_engine(d_filtered_signal, fs)
dip = harvest_zero_crossing_engine(-d_filtered_signal, fs)
f0_candidate = harvest_get_f0_candidate_contour(negative_zero_cross,
positive_zero_cross, peak, dip, temporal_positions)
f0_candidate[f0_candidate > (boundary_f0 * 1.1)] = 0.
f0_candidate[f0_candidate < (boundary_f0 * .9)] = 0.
f0_candidate[f0_candidate > f0_ceil] = 0.
f0_candidate[f0_candidate < f0_floor] = 0.
return f0_candidate
def harvest_get_f0_candidate_contour(negative_zero_cross_tup,
positive_zero_cross_tup, peak_tup, dip_tup, temporal_positions):
# 0 is inteval locations
# 1 is interval based f0
usable_channel = max(0, len(negative_zero_cross_tup[0]) - 2)
usable_channel *= max(0, len(positive_zero_cross_tup[0]) - 2)
usable_channel *= max(0, len(peak_tup[0]) - 2)
usable_channel *= max(0, len(dip_tup[0]) - 2)
if usable_channel > 0:
interpolated_f0_list = np.zeros((4, len(temporal_positions)))
nz = interp1d(negative_zero_cross_tup[0], negative_zero_cross_tup[1],
kind="linear", bounds_error=False, fill_value="extrapolate")
pz = interp1d(positive_zero_cross_tup[0], positive_zero_cross_tup[1],
kind="linear", bounds_error=False, fill_value="extrapolate")
pkz = interp1d(peak_tup[0], peak_tup[1],
kind="linear", bounds_error=False, fill_value="extrapolate")
dz = interp1d(dip_tup[0], dip_tup[1],
kind="linear", bounds_error=False, fill_value="extrapolate")
interpolated_f0_list[0, :] = nz(temporal_positions)
interpolated_f0_list[1, :] = pz(temporal_positions)
interpolated_f0_list[2, :] = pkz(temporal_positions)
interpolated_f0_list[3, :] = dz(temporal_positions)
f0_candidate = np.mean(interpolated_f0_list, axis=0)
else:
f0_candidate = temporal_positions * 0
return f0_candidate
def harvest_zero_crossing_engine(x, fs, debug=False):
# negative zero crossing, going from positive to negative
x_shift = x.copy()
x_shift[:-1] = x_shift[1:]
x_shift[-1] = x[-1]
# +1 here to avoid edge case at 0
points = np.arange(len(x)) + 1
negative_going_points = points * ((x_shift * x < 0) * (x_shift < x))
edge_list = negative_going_points[negative_going_points > 0]
# -1 to correct index
fine_edge_list = edge_list - x[edge_list - 1] / (x[edge_list] - x[edge_list - 1]).astype("float32")
interval_locations = (fine_edge_list[:-1] + fine_edge_list[1:]) / float(2) / fs
interval_based_f0 = float(fs) / (fine_edge_list[1:] - fine_edge_list[:-1])
return interval_locations, interval_based_f0
def harvest_detect_official_f0_candidates(raw_f0_candidates):
number_of_channels, number_of_frames = raw_f0_candidates.shape
f0_candidates = np.zeros((int(np.round(number_of_channels / 10.)), number_of_frames))
number_of_candidates = 0
threshold = 10
for i in range(number_of_frames):
tmp = raw_f0_candidates[:, i].copy()
tmp[tmp > 0] = 1.
tmp[0] = 0
tmp[-1] = 0
tmp = tmp[1:] - tmp[:-1]
st = np.where(tmp == 1)[0]
ed = np.where(tmp == -1)[0]
count = 0
for j in range(len(st)):
dif = ed[j] - st[j]
if dif >= threshold:
tmp_f0 = raw_f0_candidates[st[j] + 1: ed[j] + 1, i]
f0_candidates[count, i] = np.mean(tmp_f0)
count = count + 1
number_of_candidates = max(number_of_candidates, count)
return f0_candidates, number_of_candidates
def harvest_overlap_f0_candidates(f0_candidates, max_number_of_f0_candidates):
n = 3 # this is the optimized parameter... apparently
number_of_candidates = n * 2 + 1
new_f0_candidates = f0_candidates[number_of_candidates, :].copy()
new_f0_candidates = new_f0_candidates[None]
# hack to bypass magic matlab-isms of allocating when indexing OOB
new_f0_candidates = np.vstack(
[new_f0_candidates] + (new_f0_candidates.shape[-1] - 1) * [np.zeros_like(new_f0_candidates)])
# this indexing is megagross, possible source for bugs!
all_nonzero = []
for i in range(number_of_candidates):
st = max(-(i - n), 0)
ed = min(-(i - n), 0)
f1_b = np.arange(max_number_of_f0_candidates).astype("int32")
f1 = f1_b + int(i * max_number_of_f0_candidates)
all_nonzero = list(set(all_nonzero + list(f1)))
f2 = None if ed == 0 else ed
f3 = -ed
f4 = None if st == 0 else -st
new_f0_candidates[f1, st:f2] = f0_candidates[f1_b, f3:f4]
new_f0_candidates = new_f0_candidates[all_nonzero, :]
return new_f0_candidates
def harvest_refine_candidates(x, fs, temporal_positions, f0_candidates,
f0_floor, f0_ceil):
new_f0_candidates = f0_candidates.copy()
f0_scores = f0_candidates * 0.
for i in range(len(temporal_positions)):
for j in range(len(f0_candidates)):
tmp_f0 = f0_candidates[j, i]
if tmp_f0 == 0:
continue
res = harvest_get_refined_f0(x, fs, temporal_positions[i],
tmp_f0, f0_floor, f0_ceil)
new_f0_candidates[j, i] = res[0]
f0_scores[j, i] = res[1]
return new_f0_candidates, f0_scores
def harvest_get_refined_f0(x, fs, current_time, current_f0, f0_floor,
f0_ceil):
half_window_length = np.ceil(3. * fs / current_f0 / 2.)
window_length_in_time = (2. * half_window_length + 1) / float(fs)
base_time = np.arange(-half_window_length, half_window_length + 1) / float(fs)
fft_size = int(2 ** np.ceil(np.log2((half_window_length * 2 + 1)) + 1))
frequency_axis = np.arange(fft_size) / fft_size * float(fs)
base_index = np.round((current_time + base_time) * fs + 0.001)
index_time = (base_index - 1) / float(fs)
window_time = index_time - current_time
part1 = np.cos(2 * np.pi * window_time / window_length_in_time)
part2 = np.cos(4 * np.pi * window_time / window_length_in_time)
main_window = 0.42 + 0.5 * part1 + 0.08 * part2
ext = np.zeros((len(main_window) + 2))
ext[1:-1] = main_window
diff_window = -((ext[1:-1] - ext[:-2]) + (ext[2:] - ext[1:-1])) / float(2)
safe_index = np.maximum(1, np.minimum(len(x), base_index)).astype("int32") - 1
spectrum = np.fft.fft(x[safe_index] * main_window, fft_size)
diff_spectrum = np.fft.fft(x[safe_index] * diff_window, fft_size)
numerator_i = np.real(spectrum) * np.imag(diff_spectrum) - np.imag(spectrum) * np.real(diff_spectrum)
power_spectrum = np.abs(spectrum) ** 2
instantaneous_frequency = frequency_axis + numerator_i / power_spectrum * float(fs) / 2. / np.pi
number_of_harmonics = int(min(np.floor(float(fs) / 2. / current_f0), 6.))
harmonics_index = np.arange(number_of_harmonics) + 1
index_list = np.round(current_f0 * fft_size / fs * harmonics_index).astype("int32")
instantaneous_frequency_list = instantaneous_frequency[index_list]
amplitude_list = np.sqrt(power_spectrum[index_list])
refined_f0 = np.sum(amplitude_list * instantaneous_frequency_list)
refined_f0 /= np.sum(amplitude_list * harmonics_index.astype("float32"))
variation = np.abs(
((instantaneous_frequency_list / harmonics_index.astype("float32")) - current_f0) / float(current_f0))
refined_score = 1. / (0.000000000001 + np.mean(variation))
if (refined_f0 < f0_floor) or (refined_f0 > f0_ceil) or (refined_score < 2.5):
refined_f0 = 0.
redined_score = 0.
return refined_f0, refined_score
def harvest_select_best_f0(reference_f0, f0_candidates, allowed_range):
best_f0 = 0
best_error = allowed_range
for i in range(len(f0_candidates)):
tmp = np.abs(reference_f0 - f0_candidates[i]) / reference_f0
if tmp > best_error:
continue
best_f0 = f0_candidates[i]
best_error = tmp
return best_f0, best_error
def harvest_remove_unreliable_candidates(f0_candidates, f0_scores):
new_f0_candidates = f0_candidates.copy()
new_f0_scores = f0_scores.copy()
threshold = 0.05
f0_length = f0_candidates.shape[1]
number_of_candidates = len(f0_candidates)
for i in range(1, f0_length - 1):
for j in range(number_of_candidates):
reference_f0 = f0_candidates[j, i]
if reference_f0 == 0:
continue
_, min_error1 = harvest_select_best_f0(reference_f0, f0_candidates[:, i + 1], 1)
_, min_error2 = harvest_select_best_f0(reference_f0, f0_candidates[:, i - 1], 1)
min_error = min([min_error1, min_error2])
if min_error > threshold:
new_f0_candidates[j, i] = 0
new_f0_scores[j, i] = 0
return new_f0_candidates, new_f0_scores
def harvest_search_f0_base(f0_candidates, f0_scores):
f0_base = f0_candidates[0, :] * 0.
for i in range(len(f0_base)):
max_index = np.argmax(f0_scores[:, i])
f0_base[i] = f0_candidates[max_index, i]
return f0_base
def harvest_fix_step_1(f0_base, allowed_range):
# Step 1: Rapid change of f0 contour is replaced by 0
f0_step1 = f0_base.copy()
f0_step1[0] = 0.
f0_step1[1] = 0.
for i in range(2, len(f0_base)):
if f0_base[i] == 0:
continue
reference_f0 = f0_base[i - 1] * 2 - f0_base[i - 2]
c1 = np.abs((f0_base[i] - reference_f0) / reference_f0) > allowed_range
c2 = np.abs((f0_base[i] - f0_base[i - 1]) / f0_base[i - 1]) > allowed_range
if c1 and c2:
f0_step1[i] = 0.
return f0_step1
def harvest_fix_step_2(f0_step1, voice_range_minimum):
f0_step2 = f0_step1.copy()
boundary_list = harvest_get_boundary_list(f0_step1)
for i in range(1, int(len(boundary_list) / 2.) + 1):
distance = boundary_list[(2 * i) - 1] - boundary_list[(2 * i) - 2]
if distance < voice_range_minimum:
# need one more due to range not including last index
lb = boundary_list[(2 * i) - 2]
ub = boundary_list[(2 * i) - 1] + 1
f0_step2[lb:ub] = 0.
return f0_step2
def harvest_fix_step_3(f0_step2, f0_candidates, allowed_range, f0_scores):
f0_step3 = f0_step2.copy()
boundary_list = harvest_get_boundary_list(f0_step2)
multichannel_f0 = harvest_get_multichannel_f0(f0_step2, boundary_list)
rrange = np.zeros((int(len(boundary_list) / 2), 2))
threshold1 = 100
threshold2 = 2200
count = 0
for i in range(1, int(len(boundary_list) / 2) + 1):
# changed to 2 * i - 2
extended_f0, tmp_range_1 = harvest_extend_f0(multichannel_f0[i - 1, :],
boundary_list[(2 * i) - 1],
min([len(f0_step2) - 1, boundary_list[(2 * i) - 1] + threshold1]),
1, f0_candidates, allowed_range)
tmp_f0_sequence, tmp_range_0 = harvest_extend_f0(extended_f0,
boundary_list[(2 * i) - 2],
max([2, boundary_list[(2 * i) - 2] - threshold1]), -1,
f0_candidates, allowed_range)
mean_f0 = np.mean(tmp_f0_sequence[tmp_range_0: tmp_range_1 + 1])
if threshold2 / mean_f0 < (tmp_range_1 - tmp_range_0):
multichannel_f0[count, :] = tmp_f0_sequence
rrange[count, :] = np.array([tmp_range_0, tmp_range_1])
count = count + 1
if count > 0:
multichannel_f0 = multichannel_f0[:count, :]
rrange = rrange[:count, :]
f0_step3 = harvest_merge_f0(multichannel_f0, rrange, f0_candidates,
f0_scores)
return f0_step3
def harvest_merge_f0(multichannel_f0, rrange, f0_candidates, f0_scores):
number_of_channels = len(multichannel_f0)
sorted_order = np.argsort(rrange[:, 0])
f0 = multichannel_f0[sorted_order[0], :]
for i in range(1, number_of_channels):
if rrange[sorted_order[i], 0] - rrange[sorted_order[0], 1] > 0:
# no overlapping
f0[int(rrange[sorted_order[i], 0]):int(rrange[sorted_order[i], 1])] = multichannel_f0[sorted_order[i],
int(rrange[sorted_order[i], 0]):int(
rrange[sorted_order[i], 1])]
cp = rrange.copy()
rrange[sorted_order[0], 0] = cp[sorted_order[i], 0]
rrange[sorted_order[0], 1] = cp[sorted_order[i], 1]
else:
cp = rrange.copy()
res = harvest_merge_f0_sub(f0, cp[sorted_order[0], 0],
cp[sorted_order[0], 1],
multichannel_f0[sorted_order[i], :],
cp[sorted_order[i], 0],
cp[sorted_order[i], 1], f0_candidates, f0_scores)
f0 = res[0]
rrange[sorted_order[0], 1] = res[1]
return f0
def harvest_merge_f0_sub(f0_1, st1, ed1, f0_2, st2, ed2, f0_candidates,
f0_scores):
merged_f0 = f0_1
if (st1 <= st2) and (ed1 >= ed2):
new_ed = ed1
return merged_f0, new_ed
new_ed = ed2
score1 = 0.
score2 = 0.
for i in range(int(st2), int(ed1) + 1):
score1 = score1 + harvest_serach_score(f0_1[i], f0_candidates[:, i], f0_scores[:, i])
score2 = score2 + harvest_serach_score(f0_2[i], f0_candidates[:, i], f0_scores[:, i])
if score1 > score2:
merged_f0[int(ed1):int(ed2) + 1] = f0_2[int(ed1):int(ed2) + 1]
else:
merged_f0[int(st2):int(ed2) + 1] = f0_2[int(st2):int(ed2) + 1]
return merged_f0, new_ed
def harvest_serach_score(f0, f0_candidates, f0_scores):
score = 0
for i in range(len(f0_candidates)):
if (f0 == f0_candidates[i]) and (score < f0_scores[i]):
score = f0_scores[i]
return score
def harvest_extend_f0(f0, origin, last_point, shift, f0_candidates,
allowed_range):
threshold = 4
extended_f0 = f0.copy()
tmp_f0 = extended_f0[origin]
shifted_origin = origin
count = 0
for i in np.arange(origin, last_point + shift, shift):
# off by 1 issues
if (i + shift) >= f0_candidates.shape[1]:
continue
bf0, bs = harvest_select_best_f0(tmp_f0,
f0_candidates[:, i + shift], allowed_range)
extended_f0[i + shift] = bf0
if extended_f0[i + shift] != 0:
tmp_f0 = extended_f0[i + shift]
count = 0
shifted_origin = i + shift
else:
count = count + 1
if count == threshold:
break
return extended_f0, shifted_origin
def harvest_get_multichannel_f0(f0, boundary_list):
multichannel_f0 = np.zeros((int(len(boundary_list) / 2), len(f0)))
for i in range(1, int(len(boundary_list) / 2) + 1):
sl = boundary_list[(2 * i) - 2]
el = boundary_list[(2 * i) - 1] + 1
multichannel_f0[i - 1, sl:el] = f0[sl:el]
return multichannel_f0
def harvest_get_boundary_list(f0):
vuv = f0.copy()
vuv[vuv != 0] = 1.
vuv[0] = 0
vuv[-1] = 0
diff_vuv = vuv[1:] - vuv[:-1]
boundary_list = np.where(diff_vuv != 0)[0]
boundary_list[::2] = boundary_list[::2] + 1
return boundary_list
def harvest_fix_step_4(f0_step3, threshold):
f0_step4 = f0_step3.copy()
boundary_list = harvest_get_boundary_list(f0_step3)
for i in range(1, int(len(boundary_list) / 2.)):
distance = boundary_list[(2 * i)] - boundary_list[(2 * i) - 1] - 1
if distance >= threshold:
continue
boundary0 = f0_step3[boundary_list[(2 * i) - 1]] + 1
boundary1 = f0_step3[boundary_list[(2 * i)]] - 1
coefficient = (boundary1 - boundary0) / float((distance + 1))
count = 1
st = boundary_list[(2 * i) - 1] + 1
ed = boundary_list[(2 * i)]
for j in range(st, ed):
f0_step4[j] = boundary0 + coefficient * count
count = count + 1
return f0_step4
def harvest_fix_f0_contour(f0_candidates, f0_scores):
f0_base = harvest_search_f0_base(f0_candidates, f0_scores)
f0_step1 = harvest_fix_step_1(f0_base, 0.008) # optimized?
f0_step2 = harvest_fix_step_2(f0_step1, 6) # optimized?
f0_step3 = harvest_fix_step_3(f0_step2, f0_candidates, 0.18, f0_scores) # optimized?
f0 = harvest_fix_step_4(f0_step3, 9) # optimized
vuv = f0.copy()
vuv[vuv != 0] = 1.
return f0, vuv
def harvest_filter_f0_contour(f0, st, ed, b, a):
smoothed_f0 = f0.copy()
smoothed_f0[:st] = smoothed_f0[st]
smoothed_f0[ed + 1:] = smoothed_f0[ed]
aaa = sg.lfilter(b, a, smoothed_f0)
bbb = sg.lfilter(b, a, aaa[::-1])
smoothed_f0 = bbb[::-1].copy()
smoothed_f0[:st] = 0.
smoothed_f0[ed + 1:] = 0.
return smoothed_f0
def harvest_smooth_f0_contour(f0):
b = np.array([0.0078202080334971724, 0.015640416066994345, 0.0078202080334971724])
a = np.array([1.0, -1.7347257688092754, 0.76600660094326412])
smoothed_f0 = np.concatenate([np.zeros(300, ), f0, np.zeros(300, )])
boundary_list = harvest_get_boundary_list(smoothed_f0)
multichannel_f0 = harvest_get_multichannel_f0(smoothed_f0, boundary_list)
for i in range(1, int(len(boundary_list) / 2) + 1):
tmp_f0_contour = harvest_filter_f0_contour(multichannel_f0[i - 1, :],
boundary_list[(2 * i) - 2], boundary_list[(2 * i) - 1], b, a)
st = boundary_list[(2 * i) - 2]
ed = boundary_list[(2 * i) - 1] + 1
smoothed_f0[st:ed] = tmp_f0_contour[st:ed]
smoothed_f0 = smoothed_f0[300:-300]
return smoothed_f0
def _world_get_temporal_positions(x_len, fs):
frame_period = 5
basic_frame_period = 1
basic_temporal_positions = np.arange(0, x_len / float(fs), basic_frame_period / float(1000))
temporal_positions = np.arange(0,
x_len / float(fs),
frame_period / float(1000))
return basic_temporal_positions, temporal_positions
def harvest(x, fs):
f0_floor = 71
f0_ceil = 800
target_fs = 8000
channels_in_octave = 40.
basic_temporal_positions, temporal_positions = _world_get_temporal_positions(len(x), fs)
adjusted_f0_floor = f0_floor * 0.9
adjusted_f0_ceil = f0_ceil * 1.1
boundary_f0_list = np.arange(1, np.ceil(
np.log2(adjusted_f0_ceil / adjusted_f0_floor) * channels_in_octave) + 1) / float(channels_in_octave)
boundary_f0_list = adjusted_f0_floor * 2.0 ** boundary_f0_list
y, actual_fs = harvest_get_downsampled_signal(x, fs, target_fs)
fft_size = 2. ** np.ceil(np.log2(len(y) + np.round(fs / f0_floor * 4) + 1))
y_spectrum = np.fft.fft(y, int(fft_size))
raw_f0_candidates = harvest_get_raw_f0_candidates(
len(basic_temporal_positions),
boundary_f0_list, len(y), basic_temporal_positions, actual_fs,
y_spectrum, f0_floor, f0_ceil)
f0_candidates, number_of_candidates = harvest_detect_official_f0_candidates(raw_f0_candidates)
f0_candidates = harvest_overlap_f0_candidates(f0_candidates, number_of_candidates)
f0_candidates, f0_scores = harvest_refine_candidates(y, actual_fs,
basic_temporal_positions, f0_candidates, f0_floor, f0_ceil)
f0_candidates, f0_scores = harvest_remove_unreliable_candidates(f0_candidates, f0_scores)
connected_f0, vuv = harvest_fix_f0_contour(f0_candidates, f0_scores)
smoothed_f0 = harvest_smooth_f0_contour(connected_f0)
idx = np.minimum(len(smoothed_f0) - 1, np.round(temporal_positions * 1000)).astype("int32")
f0 = smoothed_f0[idx]
vuv = vuv[idx]
f0_candidates = f0_candidates
return temporal_positions, f0, vuv, f0_candidates
def cheaptrick_get_windowed_waveform(x, fs, current_f0, current_position):
half_window_length = np.round(1.5 * fs / float(current_f0))
base_index = np.arange(-half_window_length, half_window_length + 1)
index = np.round(current_position * fs + 0.001) + base_index + 1
safe_index = np.minimum(len(x), np.maximum(1, np.round(index))).astype("int32")
safe_index = safe_index - 1
segment = x[safe_index]
time_axis = base_index / float(fs) / 1.5
window1 = 0.5 * np.cos(np.pi * time_axis * float(current_f0)) + 0.5
window1 = window1 / np.sqrt(np.sum(window1 ** 2))
waveform = segment * window1 - window1 * np.mean(segment * window1) / np.mean(window1)
return waveform
def cheaptrick_get_power_spectrum(waveform, fs, fft_size, f0):
power_spectrum = np.abs(np.fft.fft(waveform, fft_size)) ** 2
frequency_axis = np.arange(fft_size) / float(fft_size) * float(fs)
ind = frequency_axis < (f0 + fs / fft_size)
low_frequency_axis = frequency_axis[ind]
low_frequency_replica = interp1d(f0 - low_frequency_axis,
power_spectrum[ind], kind="linear",
fill_value="extrapolate")(low_frequency_axis)
p1 = low_frequency_replica[(frequency_axis < f0)[:len(low_frequency_replica)]]
p2 = power_spectrum[(frequency_axis < f0)[:len(power_spectrum)]]
power_spectrum[frequency_axis < f0] = p1 + p2
lb1 = int(fft_size / 2) + 1
lb2 = 1
ub2 = int(fft_size / 2)
power_spectrum[lb1:] = power_spectrum[lb2:ub2][::-1]
return power_spectrum
def cheaptrick_linear_smoothing(power_spectrum, f0, fs, fft_size):
double_frequency_axis = np.arange(2 * fft_size) / float(fft_size) * fs - fs
double_spectrum = np.concatenate([power_spectrum, power_spectrum])
double_segment = np.cumsum(double_spectrum * (fs / float(fft_size)))
center_frequency = np.arange(int(fft_size / 2) + 1) / float(fft_size) * fs
low_levels = cheaptrick_interp1h(double_frequency_axis + fs / float(fft_size) / 2.,
double_segment, center_frequency - f0 / 3.)
high_levels = cheaptrick_interp1h(double_frequency_axis + fs / float(fft_size) / 2.,
double_segment, center_frequency + f0 / 3.)
smoothed_spectrum = (high_levels - low_levels) * 1.5 / f0
return smoothed_spectrum
def cheaptrick_interp1h(x, y, xi):
delta_x = float(x[1] - x[0])
xi = np.maximum(x[0], np.minimum(x[-1], xi))
xi_base = (np.floor((xi - x[0]) / delta_x)).astype("int32")
xi_fraction = (xi - x[0]) / delta_x - xi_base
delta_y = np.zeros_like(y)
delta_y[:-1] = y[1:] - y[:-1]
yi = y[xi_base] + delta_y[xi_base] * xi_fraction
return yi
def cheaptrick_smoothing_with_recovery(smoothed_spectrum, f0, fs, fft_size, q1):
quefrency_axis = np.arange(fft_size) / float(fs)
# 0 is NaN
smoothing_lifter = np.sin(np.pi * f0 * quefrency_axis) / (np.pi * f0 * quefrency_axis)
p = smoothing_lifter[1:int(fft_size / 2)][::-1].copy()
smoothing_lifter[int(fft_size / 2) + 1:] = p
smoothing_lifter[0] = 1.
compensation_lifter = (1 - 2. * q1) + 2. * q1 * np.cos(2 * np.pi * quefrency_axis * f0)
p = compensation_lifter[1:int(fft_size / 2)][::-1].copy()
compensation_lifter[int(fft_size / 2) + 1:] = p
tandem_cepstrum = np.fft.fft(np.log(smoothed_spectrum))
tmp_spectral_envelope = np.exp(np.real(np.fft.ifft(tandem_cepstrum * smoothing_lifter * compensation_lifter)))
spectral_envelope = tmp_spectral_envelope[:int(fft_size / 2) + 1]
return spectral_envelope
def cheaptrick_estimate_one_slice(x, fs, current_f0,
current_position, fft_size, q1):
waveform = cheaptrick_get_windowed_waveform(x, fs, current_f0,
current_position)
power_spectrum = cheaptrick_get_power_spectrum(waveform, fs, fft_size,
current_f0)
smoothed_spectrum = cheaptrick_linear_smoothing(power_spectrum, current_f0,
fs, fft_size)
comb_spectrum = np.concatenate([smoothed_spectrum, smoothed_spectrum[1:-1][::-1]])
spectral_envelope = cheaptrick_smoothing_with_recovery(comb_spectrum,
current_f0, fs, fft_size, q1)
return spectral_envelope
def cheaptrick(x, fs, temporal_positions, f0_sequence,
vuv, fftlen="auto", q1=-0.15):
f0_sequence = f0_sequence.copy()
f0_low_limit = 71
default_f0 = 500
if fftlen == "auto":
fftlen = int(2 ** np.ceil(np.log2(3. * float(fs) / f0_low_limit + 1)))
# raise ValueError("Only fftlen auto currently supported")
fft_size = fftlen
f0_low_limit = fs * 3.0 / (fft_size - 3.0)
f0_sequence[vuv == 0] = default_f0
spectrogram = np.zeros((int(fft_size / 2.) + 1, len(f0_sequence)))
for i in range(len(f0_sequence)):
if f0_sequence[i] < f0_low_limit:
f0_sequence[i] = default_f0
spectrogram[:, i] = cheaptrick_estimate_one_slice(x, fs, f0_sequence[i],
temporal_positions[i], fft_size, q1)
return temporal_positions, spectrogram.T, fs
def d4c_love_train(x, fs, current_f0, current_position, threshold):
vuv = 0
if current_f0 == 0:
return vuv
lowest_f0 = 40
current_f0 = max([current_f0, lowest_f0])
fft_size = int(2 ** np.ceil(np.log2(3. * fs / lowest_f0 + 1)))
boundary0 = int(np.ceil(100 / (float(fs) / fft_size)))
boundary1 = int(np.ceil(4000 / (float(fs) / fft_size)))
boundary2 = int(np.ceil(7900 / (float(fs) / fft_size)))
waveform = d4c_get_windowed_waveform(x, fs, current_f0, current_position,
1.5, 2)
power_spectrum = np.abs(np.fft.fft(waveform, int(fft_size)) ** 2)
power_spectrum[0:boundary0 + 1] = 0.
cumulative_spectrum = np.cumsum(power_spectrum)
if (cumulative_spectrum[boundary1] / cumulative_spectrum[boundary2]) > threshold:
vuv = 1
return vuv
def d4c_get_windowed_waveform(x, fs, current_f0, current_position, half_length,
window_type):
half_window_length = int(np.round(half_length * fs / current_f0))
base_index = np.arange(-half_window_length, half_window_length + 1)
index = np.round(current_position * fs + 0.001) + base_index + 1
safe_index = np.minimum(len(x), np.maximum(1, np.round(index))).astype("int32") - 1
segment = x[safe_index]
time_axis = base_index / float(fs) / float(half_length)
if window_type == 1:
window1 = 0.5 * np.cos(np.pi * time_axis * current_f0) + 0.5
elif window_type == 2:
window1 = 0.08 * np.cos(np.pi * time_axis * current_f0 * 2)
window1 += 0.5 * np.cos(np.pi * time_axis * current_f0) + 0.42
else:
raise ValueError("Unknown window type")
waveform = segment * window1 - window1 * np.mean(segment * window1) / np.mean(window1)
return waveform
def d4c_get_static_centroid(x, fs, current_f0, current_position, fft_size):
waveform1 = d4c_get_windowed_waveform(x, fs, current_f0,
current_position + 1. / current_f0 / 4., 2, 2)
waveform2 = d4c_get_windowed_waveform(x, fs, current_f0,
current_position - 1. / current_f0 / 4., 2, 2)
centroid1 = d4c_get_centroid(waveform1, fft_size)
centroid2 = d4c_get_centroid(waveform2, fft_size)
centroid = d4c_dc_correction(centroid1 + centroid2, fs, fft_size,
current_f0)
return centroid
def d4c_get_centroid(x, fft_size):
fft_size = int(fft_size)
time_axis = np.arange(1, len(x) + 1)
x = x.copy()
x = x / np.sqrt(np.sum(x ** 2))
spectrum = np.fft.fft(x, fft_size)
weighted_spectrum = np.fft.fft(-x * 1j * time_axis, fft_size)
centroid = -(weighted_spectrum.imag) * spectrum.real + spectrum.imag * weighted_spectrum.real
return centroid
def d4c_dc_correction(signal, fs, fft_size, f0):
fft_size = int(fft_size)
frequency_axis = np.arange(fft_size) / fft_size * fs
low_frequency_axis = frequency_axis[frequency_axis < f0 + fs / fft_size]
low_frequency_replica = interp1d(f0 - low_frequency_axis,
signal[frequency_axis < f0 + fs / fft_size],
kind="linear",
fill_value="extrapolate")(low_frequency_axis)
idx = frequency_axis < f0
signal[idx] = low_frequency_replica[idx[:len(low_frequency_replica)]] + signal[idx]
signal[int(fft_size / 2.) + 1:] = signal[1: int(fft_size / 2.)][::-1]
return signal
def d4c_linear_smoothing(group_delay, fs, fft_size, width):
double_frequency_axis = np.arange(2 * fft_size) / float(fft_size) * fs - fs
double_spectrum = np.concatenate([group_delay, group_delay])
double_segment = np.cumsum(double_spectrum * (fs / float(fft_size)))
center_frequency = np.arange(int(fft_size / 2) + 1) / float(fft_size) * fs
low_levels = cheaptrick_interp1h(double_frequency_axis + fs / float(fft_size) / 2.,
double_segment, center_frequency - width / 2.)
high_levels = cheaptrick_interp1h(double_frequency_axis + fs / float(fft_size) / 2.,
double_segment, center_frequency + width / 2.)
smoothed_spectrum = (high_levels - low_levels) / width
return smoothed_spectrum
def d4c_get_smoothed_power_spectrum(waveform, fs, f0, fft_size):
power_spectrum = np.abs(np.fft.fft(waveform, int(fft_size))) ** 2
spectral_envelope = d4c_dc_correction(power_spectrum, fs, fft_size, f0)
spectral_envelope = d4c_linear_smoothing(spectral_envelope, fs, fft_size, f0)
spectral_envelope = np.concatenate([spectral_envelope,
spectral_envelope[1:-1][::-1]])
return spectral_envelope
def d4c_get_static_group_delay(static_centroid, smoothed_power_spectrum, fs, f0,
fft_size):
group_delay = static_centroid / smoothed_power_spectrum
group_delay = d4c_linear_smoothing(group_delay, fs, fft_size, f0 / 2.)
group_delay = np.concatenate([group_delay, group_delay[1:-1][::-1]])
smoothed_group_delay = d4c_linear_smoothing(group_delay, fs, fft_size, f0)
group_delay = group_delay[:int(fft_size / 2) + 1] - smoothed_group_delay
group_delay = np.concatenate([group_delay, group_delay[1:-1][::-1]])
return group_delay
def d4c_get_coarse_aperiodicity(group_delay, fs, fft_size,
frequency_interval, number_of_aperiodicities, window1):
boundary = np.round(fft_size / len(window1) * 8)
half_window_length = np.floor(len(window1) / 2)
coarse_aperiodicity = np.zeros((number_of_aperiodicities, 1))
for i in range(1, number_of_aperiodicities + 1):
center = np.floor(frequency_interval * i / (fs / float(fft_size)))
segment = group_delay[int(center - half_window_length):int(center + half_window_length + 1)] * window1
power_spectrum = np.abs(np.fft.fft(segment, int(fft_size))) ** 2
cumulative_power_spectrum = np.cumsum(np.sort(power_spectrum[:int(fft_size / 2) + 1]))
coarse_aperiodicity[i - 1] = -10 * np.log10(
cumulative_power_spectrum[int(fft_size / 2 - boundary) - 1] / cumulative_power_spectrum[-1])
return coarse_aperiodicity
def d4c_estimate_one_slice(x, fs, current_f0, frequency_interval,
current_position, fft_size, number_of_aperiodicities, window1):
if current_f0 == 0:
coarse_aperiodicity = np.zeros((number_of_aperiodicities, 1))
return coarse_aperiodicity
static_centroid = d4c_get_static_centroid(x, fs, current_f0,
current_position, fft_size)
waveform = d4c_get_windowed_waveform(x, fs, current_f0, current_position,
2, 1)
smoothed_power_spectrum = d4c_get_smoothed_power_spectrum(waveform, fs,
current_f0, fft_size)
static_group_delay = d4c_get_static_group_delay(static_centroid,
smoothed_power_spectrum, fs, current_f0, fft_size)
coarse_aperiodicity = d4c_get_coarse_aperiodicity(static_group_delay,
fs, fft_size, frequency_interval, number_of_aperiodicities,
window1)
return coarse_aperiodicity
def d4c(x, fs, temporal_positions_h, f0_h, vuv_h, threshold="default",
fft_size="auto"):
f0_low_limit = 47
if fft_size == "auto":
fft_size = 2 ** np.ceil(np.log2(4. * fs / f0_low_limit + 1.))
else:
raise ValueError("Only fft_size auto currently supported")
f0_low_limit_for_spectrum = 71
fft_size_for_spectrum = 2 ** np.ceil(np.log2(3 * fs / f0_low_limit_for_spectrum + 1.))
threshold = 0.85
upper_limit = 15000
frequency_interval = 3000
f0 = f0_h.copy()
temporal_positions = temporal_positions_h.copy()
f0[vuv_h == 0] = 0.
number_of_aperiodicities = int(
np.floor(np.min([upper_limit, fs / 2. - frequency_interval]) / float(frequency_interval)))
window_length = np.floor(frequency_interval / (fs / float(fft_size))) * 2 + 1
window1 = harvest_nuttall(window_length)
aperiodicity = np.zeros((int(fft_size_for_spectrum / 2) + 1, len(f0)))
coarse_ap = np.zeros((1, len(f0)))
frequency_axis = np.arange(int(fft_size_for_spectrum / 2) + 1) * float(fs) / fft_size_for_spectrum
coarse_axis = np.arange(number_of_aperiodicities + 2) * frequency_interval
coarse_axis[-1] = fs / 2.
for i in range(len(f0)):
r = d4c_love_train(x, fs, f0[i], temporal_positions_h[i], threshold)
if r == 0:
aperiodicity[:, i] = 1 - 0.000000000001
continue
current_f0 = max([f0_low_limit, f0[i]])
coarse_aperiodicity = d4c_estimate_one_slice(x, fs, current_f0,
frequency_interval, temporal_positions[i], fft_size,
number_of_aperiodicities, window1)
coarse_ap[0, i] = coarse_aperiodicity.ravel()[0]
coarse_aperiodicity = np.maximum(0, coarse_aperiodicity - (current_f0 - 100) * 2. / 100.)
piece = np.concatenate([[-60], -coarse_aperiodicity.ravel(), [-0.000000000001]])
part = interp1d(coarse_axis, piece, kind="linear")(frequency_axis) / 20.
aperiodicity[:, i] = 10 ** part
return temporal_positions_h, f0_h, vuv_h, aperiodicity.T, coarse_ap.squeeze()
def world_synthesis_time_base_generation(temporal_positions, f0, fs, vuv,
time_axis, default_f0):
f0_interpolated_raw = interp1d(temporal_positions, f0, kind="linear",
fill_value="extrapolate")(time_axis)
vuv_interpolated = interp1d(temporal_positions, vuv, kind="linear",
fill_value="extrapolate")(time_axis)
vuv_interpolated = vuv_interpolated > 0.5
f0_interpolated = f0_interpolated_raw * vuv_interpolated.astype("float32")
f0_interpolated[f0_interpolated == 0] = f0_interpolated[f0_interpolated == 0] + default_f0
total_phase = np.cumsum(2 * np.pi * f0_interpolated / float(fs))
core = np.mod(total_phase, 2 * np.pi)
core = np.abs(core[1:] - core[:-1])
# account for diff, avoid deprecation warning with [:-1]
pulse_locations = time_axis[:-1][core > (np.pi / 2.)]
pulse_locations_index = np.round(pulse_locations * fs).astype("int32")
return pulse_locations, pulse_locations_index, vuv_interpolated
def world_synthesis_get_spectral_parameters(temporal_positions,
temporal_position_index, spectrogram, amplitude_periodic,
amplitude_random, pulse_locations):
floor_index = int(np.floor(temporal_position_index) - 1)
assert floor_index >= 0
ceil_index = int(np.ceil(temporal_position_index) - 1)
t1 = temporal_positions[floor_index]
t2 = temporal_positions[ceil_index]
if t1 == t2:
spectrum_slice = spectrogram[:, floor_index]
periodic_slice = amplitude_periodic[:, floor_index]
aperiodic_slice = amplitude_random[:, floor_index]
else:
cs = np.concatenate([spectrogram[:, floor_index][None],
spectrogram[:, ceil_index][None]], axis=0)
mmm = max([t1, min([t2, pulse_locations])])
spectrum_slice = interp1d(np.array([t1, t2]), cs,
kind="linear", axis=0)(mmm.copy())
cp = np.concatenate([amplitude_periodic[:, floor_index][None],
amplitude_periodic[:, ceil_index][None]], axis=0)
periodic_slice = interp1d(np.array([t1, t2]), cp,
kind="linear", axis=0)(mmm.copy())
ca = np.concatenate([amplitude_random[:, floor_index][None],
amplitude_random[:, ceil_index][None]], axis=0)
aperiodic_slice = interp1d(np.array([t1, t2]), ca,
kind="linear", axis=0)(mmm.copy())
return spectrum_slice, periodic_slice, aperiodic_slice
"""
Filter data with an FIR filter using the overlap-add method.
from http://projects.scipy.org/scipy/attachment/ticket/837/fftfilt.py
"""
def nextpow2(x):
"""Return the first integer N such that 2**N >= abs(x)"""
return np.ceil(np.log2(np.abs(x)))
def fftfilt(b, x, *n):
"""Filter the signal x with the FIR filter described by the
coefficients in b using the overlap-add method. If the FFT
length n is not specified, it and the overlap-add block length
are selected so as to minimize the computational cost of
the filtering operation."""
N_x = len(x)
N_b = len(b)
# Determine the FFT length to use:
if len(n):
# Use the specified FFT length (rounded up to the nearest
# power of 2), provided that it is no less than the filter
# length:
n = n[0]
if n != int(n) or n <= 0:
raise ValueError('n must be a nonnegative integer')
if n < N_b:
n = N_b
N_fft = 2 ** nextpow2(n)
else:
if N_x > N_b:
# When the filter length is smaller than the signal,
# choose the FFT length and block size that minimize the
# FLOPS cost. Since the cost for a length-N FFT is
# (N/2)*log2(N) and the filtering operation of each block
# involves 2 FFT operations and N multiplications, the
# cost of the overlap-add method for 1 length-N block is
# N*(1+log2(N)). For the sake of efficiency, only FFT
# lengths that are powers of 2 are considered:
N = 2 ** np.arange(np.ceil(np.log2(N_b)),
np.floor(np.log2(N_x)))
cost = np.ceil(N_x / (N - N_b + 1)) * N * (np.log2(N) + 1)
N_fft = N[np.argmin(cost)]
else:
# When the filter length is at least as long as the signal,
# filter the signal using a single block:
N_fft = 2 ** nextpow2(N_b + N_x - 1)
N_fft = int(N_fft)
# Compute the block length:
L = int(N_fft - N_b + 1)
# Compute the transform of the filter:
H = np.fft.fft(b, N_fft)
y = np.zeros(N_x, dtype=np.float32)
i = 0
while i <= N_x:
il = min([i + L, N_x])
k = min([i + N_fft, N_x])
yt = np.fft.ifft(np.fft.fft(x[i:il], N_fft) * H, N_fft) # Overlap..
y[i:k] = y[i:k] + yt[:k - i] # and add
i += L
return y
def world_synthesis(f0_d4c, vuv_d4c, aperiodicity_d4c,
spectrogram_ct, fs_ct, random_seed=1999):
# swap 0 and 1 axis
spectrogram_ct = spectrogram_ct.T
fs = fs_ct
# coarse -> fine aper
if len(aperiodicity_d4c.shape) == 1 or aperiodicity_d4c.shape[1] == 1:
print("Coarse aperiodicity detected - interpolating to full size")
aper = np.zeros_like(spectrogram_ct)
if len(aperiodicity_d4c.shape) == 1:
aperiodicity_d4c = aperiodicity_d4c[None, :]
else:
aperiodicity_d4c = aperiodicity_d4c.T
coarse_aper_d4c = aperiodicity_d4c
frequency_interval = 3000
upper_limit = 15000
number_of_aperiodicities = int(
np.floor(np.min([upper_limit, fs / 2. - frequency_interval]) / float(frequency_interval)))
coarse_axis = np.arange(number_of_aperiodicities + 2) * frequency_interval
coarse_axis[-1] = fs / 2.
f0_low_limit_for_spectrum = 71
fft_size_for_spectrum = 2 ** np.ceil(np.log2(3 * fs / f0_low_limit_for_spectrum + 1.))
frequency_axis = np.arange(int(fft_size_for_spectrum / 2) + 1) * float(fs) / fft_size_for_spectrum
for i in range(len(f0_d4c)):
ca = coarse_aper_d4c[0, i]
cf = f0_d4c[i]
coarse_aperiodicity = np.maximum(0, ca - (cf - 100) * 2. / 100.)
piece = np.concatenate([[-60], -ca.ravel(), [-0.000000000001]])
part = interp1d(coarse_axis, piece, kind="linear")(frequency_axis) / 20.
aper[:, i] = 10 ** part
aperiodicity_d4c = aper
else:
aperiodicity_d4c = aperiodicity_d4c.T
default_f0 = 500.
random_state = np.random.RandomState(1999)
spectrogram = spectrogram_ct
aperiodicity = aperiodicity_d4c
# max 30s, if greater than thrown an error
max_len = 5000000
_, temporal_positions = _world_get_temporal_positions(max_len, fs)
temporal_positions = temporal_positions[:spectrogram.shape[1]]
# temporal_positions = temporal_positions_d4c
# from IPython import embed; embed()
# raise ValueError()
vuv = vuv_d4c
f0 = f0_d4c
time_axis = np.arange(temporal_positions[0], temporal_positions[-1],
1. / fs)
y = 0. * time_axis
r = world_synthesis_time_base_generation(temporal_positions, f0, fs, vuv,
time_axis, default_f0)
pulse_locations, pulse_locations_index, interpolated_vuv = r
fft_size = int((len(spectrogram) - 1) * 2)
base_index = np.arange(-fft_size / 2, fft_size / 2) + 1
y_length = len(y)
tmp_complex_cepstrum = np.zeros((fft_size,), dtype=np.complex128)
latter_index = np.arange(int(fft_size / 2) + 1, fft_size + 1) - 1
temporal_position_index = interp1d(temporal_positions, np.arange(1, len(temporal_positions) + 1), kind="linear",
fill_value="extrapolate")(pulse_locations)
temporal_postion_index = np.maximum(1, np.minimum(len(temporal_positions),
temporal_position_index)) - 1
amplitude_aperiodic = aperiodicity ** 2
amplitude_periodic = np.maximum(0.001, (1. - amplitude_aperiodic))
for i in range(len(pulse_locations_index)):
spectrum_slice, periodic_slice, aperiodic_slice = world_synthesis_get_spectral_parameters(
temporal_positions, temporal_position_index[i], spectrogram,
amplitude_periodic, amplitude_aperiodic, pulse_locations[i])
idx = min(len(pulse_locations_index), i + 2) - 1
noise_size = pulse_locations_index[idx] - pulse_locations_index[i]
output_buffer_index = np.maximum(1, np.minimum(y_length, pulse_locations_index[i] + 1 + base_index)).astype(
"int32") - 1
if interpolated_vuv[pulse_locations_index[i]] >= 0.5:
tmp_periodic_spectrum = spectrum_slice * periodic_slice
# eps in matlab/octave
tmp_periodic_spectrum[tmp_periodic_spectrum == 0] = 2.2204E-16
periodic_spectrum = np.concatenate([tmp_periodic_spectrum,
tmp_periodic_spectrum[1:-1][::-1]])
tmp_cepstrum = np.real(np.fft.fft(np.log(np.abs(periodic_spectrum)) / 2.))
tmp_complex_cepstrum[latter_index] = tmp_cepstrum[latter_index] * 2
tmp_complex_cepstrum[0] = tmp_cepstrum[0]
response = np.fft.fftshift(np.real(np.fft.ifft(np.exp(np.fft.ifft(
tmp_complex_cepstrum)))))
y[output_buffer_index] += response * np.sqrt(
max([1, noise_size]))
tmp_aperiodic_spectrum = spectrum_slice * aperiodic_slice
else:
tmp_aperiodic_spectrum = spectrum_slice
tmp_aperiodic_spectrum[tmp_aperiodic_spectrum == 0] = 2.2204E-16
aperiodic_spectrum = np.concatenate([tmp_aperiodic_spectrum,
tmp_aperiodic_spectrum[1:-1][::-1]])
tmp_cepstrum = np.real(np.fft.fft(np.log(np.abs(aperiodic_spectrum)) / 2.))
tmp_complex_cepstrum[latter_index] = tmp_cepstrum[latter_index] * 2
tmp_complex_cepstrum[0] = tmp_cepstrum[0]
rc = np.fft.ifft(tmp_complex_cepstrum)
erc = np.exp(rc)
response = np.fft.fftshift(np.real(np.fft.ifft(erc)))
noise_input = random_state.randn(max([3, noise_size]), )
y[output_buffer_index] = y[output_buffer_index] + fftfilt(noise_input - np.mean(noise_input), response)
return y
def _mgc_b2c(wc, c, alpha):
wc_o = np.zeros_like(wc)
desired_order = len(wc) - 1
for i in range(0, len(c))[::-1]:
prev = copy.copy(wc_o)
wc_o[0] = c[i]
if desired_order >= 1:
wc_o[1] = (1. - alpha ** 2) * prev[0] + alpha * prev[1]
for m in range(2, desired_order + 1):
wc_o[m] = prev[m - 1] + alpha * (prev[m] - wc_o[m - 1])
return wc_o
def _mgc_ptrans(p, m, alpha):
d = 0.
o = 0.
d = p[m]
for i in range(1, m)[::-1]:
o = p[i] + alpha * d
d = p[i]
p[i] = o
o = alpha * d
p[0] = (1. - alpha ** 2) * p[0] + 2 * o
def _mgc_qtrans(q, m, alpha):
d = q[1]
for i in range(2, 2 * m + 1):
o = q[i] + alpha * d
d = q[i]
q[i] = o
def _mgc_gain(er, c, m, g):
t = 0.
if g != 0:
for i in range(1, m + 1):
t += er[i] * c[i]
return er[0] + g * t
else:
return er[0]
def _mgc_fill_toeplitz(A, t):
n = len(t)
for i in range(n):
for j in range(n):
A[i, j] = t[i - j] if i - j >= 0 else t[j - i]
def _mgc_fill_hankel(A, t):
n = len(t) // 2 + 1
for i in range(n):
for j in range(n):
A[i, j] = t[i + j]
def _mgc_ignorm(c, gamma):
if gamma == 0.:
c[0] = np.log(c[0])
return c
gain = c[0] ** gamma
c[1:] *= gain
c[0] = (gain - 1.) / gamma
def _mgc_gnorm(c, gamma):
if gamma == 0.:
c[0] = np.exp(c[0])
return c
gain = 1. + gamma * c[0]
c[1:] /= gain
c[0] = gain ** (1. / gamma)
def _mgc_b2mc(mc, alpha):
m = len(mc)
o = 0.
d = mc[m - 1]
for i in range(m - 1)[::-1]:
o = mc[i] + alpha * d
d = mc[i]
mc[i] = o
def _mgc_mc2b(mc, alpha):
itr = list(range(len(mc) - 1))[::-1]
for i in itr:
mc[i] = mc[i] - alpha * mc[i + 1]
def _mgc_gc2gc(src_ceps, src_gamma=0., dst_order=None, dst_gamma=0.):
if dst_order == None:
dst_order = len(src_ceps) - 1
dst_ceps = np.zeros((dst_order + 1,), dtype=src_ceps.dtype)
dst_order = len(dst_ceps) - 1
m1 = len(src_ceps) - 1
dst_ceps[0] = copy.deepcopy(src_ceps[0])
for m in range(2, dst_order + 2):
ss1 = 0.
ss2 = 0.
min_1 = m1 if (m1 < m - 1) else m - 2
itr = list(range(2, min_1 + 2))
if len(itr) < 1:
if min_1 + 1 == 2:
itr = [2]
else:
itr = []
"""
# old slower version
for k in itr:
assert k >= 1
assert (m - k) >= 0
cc = src_ceps[k - 1] * dst_ceps[m - k]
ss2 += (k - 1) * cc
ss1 += (m - k) * cc
"""
if len(itr) > 0:
itr = np.array(itr)
cc_a = src_ceps[itr - 1] * dst_ceps[m - itr]
ss2 += ((itr - 1) * cc_a).sum()
ss1 += ((m - itr) * cc_a).sum()
if m <= m1 + 1:
dst_ceps[m - 1] = src_ceps[m - 1] + (dst_gamma * ss2 - src_gamma * ss1) / (m - 1.)
else:
dst_ceps[m - 1] = (dst_gamma * ss2 - src_gamma * ss1) / (m - 1.)
return dst_ceps
def _mgc_newton(mgc_stored, periodogram, order, alpha, gamma,
recursion_order, iter_number, y_fft, z_fft, cr, pr, rr, ri,
qr, qi, Tm, Hm, Tm_plus_Hm, b):
# a lot of inplace operations to match the Julia code
cr[1:order + 1] = mgc_stored[1:order + 1]
if alpha != 0:
cr_res = _mgc_b2c(cr[:recursion_order + 1], cr[:order + 1], -alpha)
cr[:recursion_order + 1] = cr_res[:]
y = sp.fftpack.fft(np.cast["float64"](cr))
c = mgc_stored
x = periodogram
if gamma != 0.:
gamma_inv = 1. / gamma
else:
gamma_inv = np.inf
if gamma == -1.:
pr[:] = copy.deepcopy(x)
new_pr = copy.deepcopy(pr)
elif gamma == 0.:
pr[:] = copy.deepcopy(x) / np.exp(2 * np.real(y))
new_pr = copy.deepcopy(pr)
else:
tr = 1. + gamma * np.real(y)
ti = -gamma * np.imag(y)
trr = tr * tr
tii = ti * ti
s = trr + tii
t = x * np.power(s, (-gamma_inv))
t /= s
pr[:] = t
rr[:] = tr * t
ri[:] = ti * t
t /= s
qr[:] = (trr - tii) * t
s = tr * ti * t
qi[:] = (s + s)
new_pr = copy.deepcopy(pr)
if gamma != -1.:
"""
print()
print(pr.sum())
print(rr.sum())
print(ri.sum())
print(qr.sum())
print(qi.sum())
print()
"""
pass
y_fft[:] = copy.deepcopy(pr) + 0.j
z_fft[:] = np.fft.fft(y_fft) / len(y_fft)
pr[:] = copy.deepcopy(np.real(z_fft))
if alpha != 0.:
idx_1 = pr[:2 * order + 1]
idx_2 = pr[:recursion_order + 1]
idx_3 = _mgc_b2c(idx_1, idx_2, alpha)
pr[:2 * order + 1] = idx_3[:]
if gamma == 0. or gamma == -1.:
qr[:2 * order + 1] = pr[:2 * order + 1]
rr[:order + 1] = copy.deepcopy(pr[:order + 1])
else:
for i in range(len(qr)):
y_fft[i] = qr[i] + 1j * qi[i]
z_fft[:] = np.fft.fft(y_fft) / len(y_fft)
qr[:] = np.real(z_fft)
for i in range(len(rr)):
y_fft[i] = rr[i] + 1j * ri[i]
z_fft[:] = np.fft.fft(y_fft) / len(y_fft)
rr[:] = np.real(z_fft)
if alpha != 0.:
qr_new = _mgc_b2c(qr[:recursion_order + 1], qr[:recursion_order + 1], alpha)
qr[:recursion_order + 1] = qr_new[:]
rr_new = _mgc_b2c(rr[:order + 1], rr[:recursion_order + 1], alpha)
rr[:order + 1] = rr_new[:]
if alpha != 0:
_mgc_ptrans(pr, order, alpha)
_mgc_qtrans(qr, order, alpha)
eta = 0.
if gamma != -1.:
eta = _mgc_gain(rr, c, order, gamma)
c[0] = np.sqrt(eta)
if gamma == -1.:
qr[:] = 0.
elif gamma != 0.:
for i in range(2, 2 * order + 1):
qr[i] *= 1. + gamma
te = pr[:order]
_mgc_fill_toeplitz(Tm, te)
he = qr[2: 2 * order + 1]
_mgc_fill_hankel(Hm, he)
Tm_plus_Hm[:] = Hm[:] + Tm[:]
b[:order] = rr[1:order + 1]
res = np.linalg.solve(Tm_plus_Hm, b)
b[:] = res[:]
c[1:order + 1] += res[:order]
if gamma == -1.:
eta = _mgc_gain(rr, c, order, gamma)
c[0] = np.sqrt(eta)
return np.log(eta), new_pr
def _mgc_mgcepnorm(b_gamma, alpha, gamma, otype):
if otype != 0:
raise ValueError("Not yet implemented for otype != 0")
mgc = copy.deepcopy(b_gamma)
_mgc_ignorm(mgc, gamma)
_mgc_b2mc(mgc, alpha)
return mgc
def _sp2mgc(sp, order=20, alpha=0.35, gamma=-0.41, miniter=2, maxiter=30, criteria=0.001, otype=0, verbose=False):
# Based on r9y9 Julia code
# https://github.com/r9y9/MelGeneralizedCepstrums.jl
periodogram = np.abs(sp) ** 2
recursion_order = len(periodogram) - 1
slen = len(periodogram)
iter_number = 1
def _z():
return np.zeros((slen,), dtype="float64")
def _o():
return np.zeros((order,), dtype="float64")
def _o2():
return np.zeros((order, order), dtype="float64")
cr = _z()
pr = _z()
rr = _z()
ri = _z().astype("float128")
qr = _z()
qi = _z().astype("float128")
Tm = _o2()
Hm = _o2()
Tm_plus_Hm = _o2()
b = _o()
y = _z() + 0j
z = _z() + 0j
b_gamma = np.zeros((order + 1,), dtype="float64")
# return pr_new due to oddness with Julia having different numbers
# in pr at end of function vs back in this scope
eta0, pr_new = _mgc_newton(b_gamma, periodogram, order, alpha, -1.,
recursion_order, iter_number, y, z, cr, pr, rr,
ri, qr, qi, Tm, Hm, Tm_plus_Hm, b)
pr[:] = pr_new
"""
print(eta0)
print(sum(b_gamma))
print(sum(periodogram))
print(order)
print(alpha)
print(recursion_order)
print(sum(y))
print(sum(cr))
print(sum(z))
print(sum(pr))
print(sum(rr))
print(sum(qi))
print(Tm.sum())
print(Hm.sum())
print(sum(b))
raise ValueError()
"""
if gamma != -1.:
d = np.zeros((order + 1,), dtype="float64")
if alpha != 0.:
_mgc_ignorm(b_gamma, -1.)
_mgc_b2mc(b_gamma, alpha)
d = copy.deepcopy(b_gamma)
_mgc_gnorm(d, -1.)
# numbers are slightly different here - numerical diffs?
else:
d = copy.deepcopy(b_gamma)
b_gamma = _mgc_gc2gc(d, -1., order, gamma)
if alpha != 0.:
_mgc_ignorm(b_gamma, gamma)
_mgc_mc2b(b_gamma, alpha)
_mgc_gnorm(b_gamma, gamma)
if gamma != -1.:
eta_t = eta0
for i in range(1, maxiter + 1):
eta, pr_new = _mgc_newton(b_gamma, periodogram, order, alpha,
gamma, recursion_order, i, y, z, cr, pr, rr,
ri, qr, qi, Tm, Hm, Tm_plus_Hm, b)
pr[:] = pr_new
"""
print(eta0)
print(sum(b_gamma))
print(sum(periodogram))
print(order)
print(alpha)
print(recursion_order)
print(sum(y))
print(sum(cr))
print(sum(z))
print(sum(pr))
print(sum(rr))
print(sum(qi))
print(Tm.sum())
print(Hm.sum())
print(sum(b))
raise ValueError()
"""
err = np.abs((eta_t - eta) / eta)
if verbose:
print(("iter %i, criterion: %f" % (i, err)))
if i >= miniter:
if err < criteria:
if verbose:
print(("optimization complete at iter %i" % i))
break
eta_t = eta
mgc_arr = _mgc_mgcepnorm(b_gamma, alpha, gamma, otype)
return mgc_arr
_sp_convert_results = []
def _sp_collect_result(result):
_sp_convert_results.append(result)
def _sp_convert(c_i, order, alpha, gamma, miniter, maxiter, criteria,
otype, verbose):
i = c_i[0]
tot_i = c_i[1]
sp_i = c_i[2]
r_i = (i, _sp2mgc(sp_i, order=order, alpha=alpha, gamma=gamma,
miniter=miniter, maxiter=maxiter, criteria=criteria,
otype=otype, verbose=verbose))
return r_i
def sp2mgc(sp, order=20, alpha=0.35, gamma=-0.41, miniter=2,
maxiter=30, criteria=0.001, otype=0, verbose=False):
"""
Accepts 1D or 2D one-sided spectrum (complex or real valued).
If 2D, assumes time is axis 0.
Returns mel generalized cepstral coefficients.
Based on r9y9 Julia code
https://github.com/r9y9/MelGeneralizedCepstrums.jl
"""
if len(sp.shape) == 1:
sp = np.concatenate((sp, sp[:, 1:][:, ::-1]), axis=0)
return _sp2mgc(sp, order=order, alpha=alpha, gamma=gamma,
miniter=miniter, maxiter=maxiter, criteria=criteria,
otype=otype, verbose=verbose)
else:
sp = np.concatenate((sp, sp[:, 1:][:, ::-1]), axis=1)
# Slooow, use multiprocessing to speed up a bit
# http://blog.shenwei.me/python-multiprocessing-pool-difference-between-map-apply-map_async-apply_async/
# http://stackoverflow.com/questions/5666576/show-the-progress-of-a-python-multiprocessing-pool-map-call
c = [(i + 1, sp.shape[0], sp[i]) for i in range(sp.shape[0])]
p = Pool()
start = time.time()
if verbose:
print(("Starting conversion of %i frames" % sp.shape[0]))
print("This may take some time...")
# takes ~360s for 630 frames, 1 process
itr = p.map_async(
functools.partial(_sp_convert, order=order, alpha=alpha, gamma=gamma, miniter=miniter, maxiter=maxiter,
criteria=criteria, otype=otype, verbose=False), c, callback=_sp_collect_result)
sz = len(c) // itr._chunksize
if (sz * itr._chunksize) != len(c):
sz += 1
last_remaining = None
while True:
remaining = itr._number_left
if verbose:
if remaining != last_remaining:
last_remaining = remaining
print(("%i chunks of %i complete" % (sz - remaining, sz)))
if itr.ready():
break
time.sleep(.5)
"""
# takes ~455s for 630 frames
itr = p.imap_unordered(functools.partial(_sp_convert, order=order, alpha=alpha, gamma=gamma, miniter=miniter, maxiter=maxiter, criteria=criteria, otype=otype, verbose=False), c)
res = []
# print ~every 5%
mod = int(len(c)) // 20
if mod < 1:
mod = 1
for i, res_i in enumerate(itr, 1):
res.append(res_i)
if i % mod == 0 or i == 1:
print("%i of %i complete" % (i, len(c)))
"""
p.close()
p.join()
stop = time.time()
if verbose:
print(("Processed %i frames in %s seconds" % (sp.shape[0], stop - start)))
# map_async result comes in chunks
flat = [a_i for a in _sp_convert_results for a_i in a]
final = [o[1] for o in sorted(flat, key=lambda x: x[0])]
for i in range(len(_sp_convert_results)):
_sp_convert_results.pop()
return np.array(final)
def win2mgc(windowed_signal, order=20, alpha=0.35, gamma=-0.41, miniter=2,
maxiter=30, criteria=0.001, otype=0, verbose=False):
"""
Accepts 1D or 2D array of windowed signal frames.
If 2D, assumes time is axis 0.
Returns mel generalized cepstral coefficients.
Based on r9y9 Julia code
https://github.com/r9y9/MelGeneralizedCepstrums.jl
"""
if len(windowed_signal.shape) == 1:
sp = np.fft.fft(windowed_signal)
return _sp2mgc(sp, order=order, alpha=alpha, gamma=gamma,
miniter=miniter, maxiter=maxiter, criteria=criteria,
otype=otype, verbose=verbose)
else:
raise ValueError("2D input not yet complete for win2mgc")
def _mgc_freqt(wc, c, alpha):
prev = np.zeros_like(wc)
dst_order = len(wc) - 1
wc *= 0
m1 = len(c) - 1
for i in range(-m1, 1, 1):
prev[:] = wc
if dst_order >= 0:
wc[0] = c[-i] + alpha * prev[0]
if dst_order >= 1:
wc[1] = (1. - alpha * alpha) * prev[0] + alpha * prev[1]
for m in range(2, dst_order + 1):
wc[m] = prev[m - 1] + alpha * (prev[m] - wc[m - 1])
def _mgc_mgc2mgc(src_ceps, src_alpha, src_gamma, dst_order, dst_alpha, dst_gamma):
dst_ceps = np.zeros((dst_order + 1,))
alpha = (dst_alpha - src_alpha) / (1. - dst_alpha * src_alpha)
if alpha == 0.:
new_dst_ceps = copy.deepcopy(src_ceps)
_mgc_gnorm(new_dst_ceps, src_gamma)
dst_ceps = _mgc_gc2gc(new_dst_ceps, src_gamma, dst_order, dst_gamma)
_mgc_ignorm(dst_ceps, dst_gamma)
else:
_mgc_freqt(dst_ceps, src_ceps, alpha)
_mgc_gnorm(dst_ceps, src_gamma)
new_dst_ceps = copy.deepcopy(dst_ceps)
dst_ceps = _mgc_gc2gc(new_dst_ceps, src_gamma, dst_order, dst_gamma)
_mgc_ignorm(dst_ceps, dst_gamma)
return dst_ceps
_mgc_convert_results = []
def _mgc_collect_result(result):
_mgc_convert_results.append(result)
def _mgc_convert(c_i, alpha, gamma, fftlen):
i = c_i[0]
tot_i = c_i[1]
mgc_i = c_i[2]
r_i = (i, _mgc_mgc2mgc(mgc_i, src_alpha=alpha, src_gamma=gamma,
dst_order=fftlen // 2, dst_alpha=0., dst_gamma=0.))
return r_i
def mgc2sp(mgc_arr, alpha=0.35, gamma=-0.41, fftlen="auto", fs=None,
mode="world_pad", verbose=False):
"""
Accepts 1D or 2D array of mgc
If 2D, assume time is on axis 0
Returns reconstructed smooth spectrum
Based on r9y9 Julia code
https://github.com/r9y9/MelGeneralizedCepstrums.jl
"""
if mode != "world_pad":
raise ValueError("Only currently supported mode is world_pad")
if fftlen == "auto":
if fs == None:
raise ValueError("fs must be provided for fftlen 'auto'")
f0_low_limit = 71
fftlen = int(2 ** np.ceil(np.log2(3. * float(fs) / f0_low_limit + 1)))
if verbose:
print(("setting fftlen to %i" % fftlen))
if len(mgc_arr.shape) == 1:
c = _mgc_mgc2mgc(mgc_arr, alpha, gamma, fftlen // 2, 0., 0.)
buf = np.zeros((fftlen,), dtype=c.dtype)
buf[:len(c)] = c[:]
return np.fft.rfft(buf)
else:
# Slooow, use multiprocessing to speed up a bit
# http://blog.shenwei.me/python-multiprocessing-pool-difference-between-map-apply-map_async-apply_async/
# http://stackoverflow.com/questions/5666576/show-the-progress-of-a-python-multiprocessing-pool-map-call
c = [(i + 1, mgc_arr.shape[0], mgc_arr[i]) for i in range(mgc_arr.shape[0])]
p = Pool()
start = time.time()
if verbose:
print(("Starting conversion of %i frames" % mgc_arr.shape[0]))
print("This may take some time...")
# itr = p.map(functools.partial(_mgc_convert, alpha=alpha, gamma=gamma, fftlen=fftlen), c)
# raise ValueError()
# 500.1 s for 630 frames process
itr = p.map_async(functools.partial(_mgc_convert, alpha=alpha, gamma=gamma, fftlen=fftlen), c,
callback=_mgc_collect_result)
sz = len(c) // itr._chunksize
if (sz * itr._chunksize) != len(c):
sz += 1
last_remaining = None
while True:
remaining = itr._number_left
if verbose:
if last_remaining != remaining:
last_remaining = remaining
print(("%i chunks of %i complete" % (sz - remaining, sz)))
if itr.ready():
break
time.sleep(.5)
p.close()
p.join()
stop = time.time()
if verbose:
print(("Processed %i frames in %s seconds" % (mgc_arr.shape[0], stop - start)))
# map_async result comes in chunks
flat = [a_i for a in _mgc_convert_results for a_i in a]
final = [o[1] for o in sorted(flat, key=lambda x: x[0])]
for i in range(len(_mgc_convert_results)):
_mgc_convert_results.pop()
c = np.array(final)
buf = np.zeros((len(c), fftlen), dtype=c.dtype)
buf[:, :c.shape[1]] = c[:]
return np.exp(np.fft.rfft(buf, axis=-1).real)
def implot(arr, scale=None, title="", cmap="gray"):
import matplotlib.pyplot as plt
if scale is "specgram":
# plotting part
mag = 20. * np.log10(np.abs(arr))
# Transpose so time is X axis, and invert y axis so
# frequency is low at bottom
mag = mag.T[::-1, :]
else:
mag = arr
f, ax = plt.subplots()
ax.matshow(mag, cmap=cmap)
plt.axis("off")
x1 = mag.shape[0]
y1 = mag.shape[1]
def autoaspect(x_range, y_range):
"""
The aspect to make a plot square with ax.set_aspect in Matplotlib
"""
mx = max(x_range, y_range)
mn = min(x_range, y_range)
if x_range <= y_range:
return mx / float(mn)
else:
return mn / float(mx)
asp = autoaspect(x1, y1)
ax.set_aspect(asp)
plt.title(title)
def test_lpc_to_lsf():
# Matlab style vectors for testing
# lsf = [0.7842 1.5605 1.8776 1.8984 2.3593]
# a = [1.0000 0.6149 0.9899 0.0000 0.0031 -0.0082];
lsf = [[0.7842, 1.5605, 1.8776, 1.8984, 2.3593],
[0.7842, 1.5605, 1.8776, 1.8984, 2.3593]]
a = [[1.0000, 0.6149, 0.9899, 0.0000, 0.0031, -0.0082],
[1.0000, 0.6149, 0.9899, 0.0000, 0.0031, -0.0082]]
a = np.array(a)
lsf = np.array(lsf)
lsf_r = lpc_to_lsf(a)
assert_almost_equal(lsf, lsf_r, decimal=4)
a_r = lsf_to_lpc(lsf)
assert_almost_equal(a, a_r, decimal=4)
lsf_r = lpc_to_lsf(a[0])
assert_almost_equal(lsf[0], lsf_r, decimal=4)
a_r = lsf_to_lpc(lsf[0])
assert_almost_equal(a[0], a_r, decimal=4)
def test_lpc_analysis_truncate():
# Test that truncate doesn't crash and actually truncates
[a, g, e] = lpc_analysis(np.random.randn(85), order=8, window_step=80,
window_size=80, emphasis=0.9, truncate=True)
assert (a.shape[0] == 1)
def test_feature_build():
samplerate, X = fetch_sample_music()
# MATLAB wavread does normalization
X = X.astype('float32') / (2 ** 15)
wsz = 256
wst = 128
a, g, e = lpc_analysis(X, order=8, window_step=wst,
window_size=wsz, emphasis=0.9,
copy=True)
v, p = voiced_unvoiced(X, window_size=wsz,
window_step=wst)
c = compress(e, n_components=64)
# First component of a is always 1
combined = np.hstack((a[:, 1:], g, c[:a.shape[0]]))
features = np.zeros((a.shape[0], 2 * combined.shape[1]))
start_indices = v * combined.shape[1]
start_indices = start_indices.astype('int32')
end_indices = (v + 1) * combined.shape[1]
end_indices = end_indices.astype('int32')
for i in range(features.shape[0]):
features[i, start_indices[i]:end_indices[i]] = combined[i]
def test_mdct_and_inverse():
fs, X = fetch_sample_music()
X_dct = mdct_slow(X)
X_r = imdct_slow(X_dct)
assert np.all(np.abs(X_r[:len(X)] - X) < 1E-3)
assert np.abs(X_r[:len(X)] - X).mean() < 1E-6
def test_all():
test_lpc_analysis_truncate()
test_feature_build()
test_lpc_to_lsf()
test_mdct_and_inverse()
def run_lpc_example():
# ae.wav is from
# http://www.linguistics.ucla.edu/people/hayes/103/Charts/VChart/ae.wav
# Partially following the formant tutorial here
# http://www.mathworks.com/help/signal/ug/formant-estimation-with-lpc-coefficients.html
samplerate, X = fetch_sample_music()
c = overlap_dct_compress(X, 200, 400)
X_r = overlap_dct_uncompress(c, 400)
wavfile.write('lpc_uncompress.wav', samplerate, soundsc(X_r))
print("Calculating sinusoids")
f_hz, m = sinusoid_analysis(X, input_sample_rate=16000)
Xs_sine = sinusoid_synthesis(f_hz, m)
orig_fname = 'lpc_orig.wav'
sine_fname = 'lpc_sine_synth.wav'
wavfile.write(orig_fname, samplerate, soundsc(X))
wavfile.write(sine_fname, samplerate, soundsc(Xs_sine))
lpc_order_list = [8, ]
dct_components_list = [200, ]
window_size_list = [400, ]
# Seems like a dct component size of ~2/3rds the step
# (1/3rd the window for 50% overlap) works well.
for lpc_order in lpc_order_list:
for dct_components in dct_components_list:
for window_size in window_size_list:
# 50% overlap
window_step = window_size // 2
a, g, e = lpc_analysis(X, order=lpc_order,
window_step=window_step,
window_size=window_size, emphasis=0.9,
copy=True)
print("Calculating LSF")
lsf = lpc_to_lsf(a)
# Not window_size - window_step! Need to implement overlap
print("Calculating compression")
c = dct_compress(e, n_components=dct_components,
window_size=window_step)
co = overlap_dct_compress(e, n_components=dct_components,
window_size=window_step)
block_excitation = dct_uncompress(c, window_size=window_step)
overlap_excitation = overlap_dct_uncompress(co,
window_size=window_step)
a_r = lsf_to_lpc(lsf)
f, m = lpc_to_frequency(a_r, g)
block_lpc = lpc_synthesis(a_r, g, block_excitation,
emphasis=0.9,
window_step=window_step)
overlap_lpc = lpc_synthesis(a_r, g, overlap_excitation,
emphasis=0.9,
window_step=window_step)
v, p = voiced_unvoiced(X, window_size=window_size,
window_step=window_step)
noisy_lpc = lpc_synthesis(a_r, g, voiced_frames=v,
emphasis=0.9,
window_step=window_step)
if dct_components is None:
dct_components = window_size
noisy_fname = 'lpc_noisy_synth_%iwin_%ilpc_%idct.wav' % (
window_size, lpc_order, dct_components)
block_fname = 'lpc_block_synth_%iwin_%ilpc_%idct.wav' % (
window_size, lpc_order, dct_components)
overlap_fname = 'lpc_overlap_synth_%iwin_%ilpc_%idct.wav' % (
window_size, lpc_order, dct_components)
wavfile.write(noisy_fname, samplerate, soundsc(noisy_lpc))
wavfile.write(block_fname, samplerate,
soundsc(block_lpc))
wavfile.write(overlap_fname, samplerate,
soundsc(overlap_lpc))
def run_fft_vq_example():
n_fft = 512
time_smoothing = 4
def _pre(list_of_data):
f_c = np.vstack([stft(dd, n_fft) for dd in list_of_data])
if len(f_c) % time_smoothing != 0:
newlen = len(f_c) - len(f_c) % time_smoothing
f_c = f_c[:newlen]
f_mag = complex_to_abs(f_c)
f_phs = complex_to_angle(f_c)
f_sincos = angle_to_sin_cos(f_phs)
f_r = np.hstack((f_mag, f_sincos))
f_r = f_r.reshape((len(f_r) // time_smoothing,
time_smoothing * f_r.shape[1]))
return f_r, n_fft
def preprocess_train(list_of_data, random_state):
f_r, n_fft = _pre(list_of_data)
clusters = f_r
return clusters
def apply_preprocess(list_of_data, clusters):
f_r, n_fft = _pre(list_of_data)
memberships, distances = vq(f_r, clusters)
vq_r = clusters[memberships]
vq_r = vq_r.reshape((time_smoothing * len(vq_r),
vq_r.shape[1] // time_smoothing))
f_mag = vq_r[:, :n_fft // 2 + 1]
f_sincos = vq_r[:, n_fft // 2 + 1:]
extent = f_sincos.shape[1] // 2
f_phs = sin_cos_to_angle(f_sincos[:, :extent], f_sincos[:, extent:])
vq_c = abs_and_angle_to_complex(f_mag, f_phs)
d_k = istft(vq_c, fftsize=n_fft)
return d_k
random_state = np.random.RandomState(1999)
"""
fs, d = fetch_sample_music()
sub = int(.8 * d.shape[0])
d1 = [d[:sub]]
d2 = [d[sub:]]
"""
fs, d = fetch_sample_speech_fruit()
d1 = d[::8] + d[1::8] + d[2::8] + d[3::8] + d[4::8] + d[5::8] + d[6::8]
d2 = d[7::8]
# make sure d1 and d2 aren't the same!
assert [len(di) for di in d1] != [len(di) for di in d2]
clusters = preprocess_train(d1, random_state)
# Training data
vq_d1 = apply_preprocess(d1, clusters)
vq_d2 = apply_preprocess(d2, clusters)
assert [i != j for i, j in zip(vq_d1.ravel(), vq_d2.ravel())]
fix_d1 = np.concatenate(d1)
fix_d2 = np.concatenate(d2)
wavfile.write("fft_train_no_agc.wav", fs, soundsc(fix_d1))
wavfile.write("fft_test_no_agc.wav", fs, soundsc(fix_d2))
wavfile.write("fft_vq_train_no_agc.wav", fs, soundsc(vq_d1, fs))
wavfile.write("fft_vq_test_no_agc.wav", fs, soundsc(vq_d2, fs))
agc_d1, freq_d1, energy_d1 = time_attack_agc(fix_d1, fs, .5, 5)
agc_d2, freq_d2, energy_d2 = time_attack_agc(fix_d2, fs, .5, 5)
agc_vq_d1, freq_vq_d1, energy_vq_d1 = time_attack_agc(vq_d1, fs, .5, 5)
agc_vq_d2, freq_vq_d2, energy_vq_d2 = time_attack_agc(vq_d2, fs, .5, 5)
wavfile.write("fft_train_agc.wav", fs, soundsc(agc_d1))
wavfile.write("fft_test_agc.wav", fs, soundsc(agc_d2))
wavfile.write("fft_vq_train_agc.wav", fs, soundsc(agc_vq_d1, fs))
wavfile.write("fft_vq_test_agc.wav", fs, soundsc(agc_vq_d2))
def run_dct_vq_example():
def _pre(list_of_data):
# Temporal window setting is crucial! - 512 seems OK for music, 256
# fruit perhaps due to samplerates
n_dct = 512
f_r = np.vstack([mdct_slow(dd, n_dct) for dd in list_of_data])
return f_r, n_dct
def preprocess_train(list_of_data, random_state):
f_r, n_dct = _pre(list_of_data)
clusters = f_r
return clusters
def apply_preprocess(list_of_data, clusters):
f_r, n_dct = _pre(list_of_data)
f_clust = f_r
memberships, distances = vq(f_clust, clusters)
vq_r = clusters[memberships]
d_k = imdct_slow(vq_r, n_dct)
return d_k
random_state = np.random.RandomState(1999)
# This doesn't work very well due to only taking a sample from the end as
# test
fs, d = fetch_sample_music()
sub = int(.8 * d.shape[0])
d1 = [d[:sub]]
d2 = [d[sub:]]
"""
fs, d = fetch_sample_speech_fruit()
d1 = d[::8] + d[1::8] + d[2::8] + d[3::8] + d[4::8] + d[5::8] + d[6::8]
d2 = d[7::8]
# make sure d1 and d2 aren't the same!
assert [len(di) for di in d1] != [len(di) for di in d2]
"""
clusters = preprocess_train(d1, random_state)
# Training data
vq_d1 = apply_preprocess(d1, clusters)
vq_d2 = apply_preprocess(d2, clusters)
assert [i != j for i, j in zip(vq_d2.ravel(), vq_d2.ravel())]
fix_d1 = np.concatenate(d1)
fix_d2 = np.concatenate(d2)
wavfile.write("dct_train_no_agc.wav", fs, soundsc(fix_d1))
wavfile.write("dct_test_no_agc.wav", fs, soundsc(fix_d2))
wavfile.write("dct_vq_train_no_agc.wav", fs, soundsc(vq_d1))
wavfile.write("dct_vq_test_no_agc.wav", fs, soundsc(vq_d2))
"""
import matplotlib.pyplot as plt
plt.specgram(vq_d2, cmap="gray")
plt.figure()
plt.specgram(fix_d2, cmap="gray")
plt.show()
"""
agc_d1, freq_d1, energy_d1 = time_attack_agc(fix_d1, fs, .5, 5)
agc_d2, freq_d2, energy_d2 = time_attack_agc(fix_d2, fs, .5, 5)
agc_vq_d1, freq_vq_d1, energy_vq_d1 = time_attack_agc(vq_d1, fs, .5, 5)
agc_vq_d2, freq_vq_d2, energy_vq_d2 = time_attack_agc(vq_d2, fs, .5, 5)
wavfile.write("dct_train_agc.wav", fs, soundsc(agc_d1))
wavfile.write("dct_test_agc.wav", fs, soundsc(agc_d2))
wavfile.write("dct_vq_train_agc.wav", fs, soundsc(agc_vq_d1))
wavfile.write("dct_vq_test_agc.wav", fs, soundsc(agc_vq_d2))
def run_phase_reconstruction_example():
fs, d = fetch_sample_speech_tapestry()
# actually gives however many components you say! So double what .m file
# says
fftsize = 512
step = 64
X_s = np.abs(stft(d, fftsize=fftsize, step=step, real=False,
compute_onesided=False))
X_t = iterate_invert_spectrogram(X_s, fftsize, step, verbose=True)
"""
import matplotlib.pyplot as plt
plt.specgram(d, cmap="gray")
plt.savefig("1.png")
plt.close()
plt.imshow(X_s, cmap="gray")
plt.savefig("2.png")
plt.close()
"""
wavfile.write("phase_original.wav", fs, soundsc(d))
wavfile.write("phase_reconstruction.wav", fs, soundsc(X_t))
def run_phase_vq_example():
def _pre(list_of_data):
# Temporal window setting is crucial! - 512 seems OK for music, 256
# fruit perhaps due to samplerates
n_fft = 256
step = 32
f_r = np.vstack([np.abs(stft(dd, n_fft, step=step, real=False,
compute_onesided=False))
for dd in list_of_data])
return f_r, n_fft, step
def preprocess_train(list_of_data, random_state):
f_r, n_fft, step = _pre(list_of_data)
clusters = copy.deepcopy(f_r)
return clusters
def apply_preprocess(list_of_data, clusters):
f_r, n_fft, step = _pre(list_of_data)
f_clust = f_r
# Nondeterministic ?
memberships, distances = vq(f_clust, clusters)
vq_r = clusters[memberships]
d_k = iterate_invert_spectrogram(vq_r, n_fft, step, verbose=True)
return d_k
random_state = np.random.RandomState(1999)
fs, d = fetch_sample_speech_fruit()
d1 = d[::9]
d2 = d[7::8][:5]
# make sure d1 and d2 aren't the same!
assert [len(di) for di in d1] != [len(di) for di in d2]
clusters = preprocess_train(d1, random_state)
fix_d1 = np.concatenate(d1)
fix_d2 = np.concatenate(d2)
vq_d2 = apply_preprocess(d2, clusters)
wavfile.write("phase_train_no_agc.wav", fs, soundsc(fix_d1))
wavfile.write("phase_vq_test_no_agc.wav", fs, soundsc(vq_d2))
agc_d1, freq_d1, energy_d1 = time_attack_agc(fix_d1, fs, .5, 5)
agc_d2, freq_d2, energy_d2 = time_attack_agc(fix_d2, fs, .5, 5)
agc_vq_d2, freq_vq_d2, energy_vq_d2 = time_attack_agc(vq_d2, fs, .5, 5)
"""
import matplotlib.pyplot as plt
plt.specgram(agc_vq_d2, cmap="gray")
#plt.title("Fake")
plt.figure()
plt.specgram(agc_d2, cmap="gray")
#plt.title("Real")
plt.show()
"""
wavfile.write("phase_train_agc.wav", fs, soundsc(agc_d1))
wavfile.write("phase_test_agc.wav", fs, soundsc(agc_d2))
wavfile.write("phase_vq_test_agc.wav", fs, soundsc(agc_vq_d2))
def run_cqt_example():
try:
fs, d = fetch_sample_file("/Users/User/cqt_resources/kempff1.wav")
except ValueError:
print("WARNING: Using sample music instead but kempff1.wav is the example")
fs, d = fetch_sample_music()
X = d[:44100]
X_cq, c_dc, c_nyq, multiscale, shift, window_lens = cqt(X, fs)
X_r = icqt(X_cq, c_dc, c_nyq, multiscale, shift, window_lens)
SNR = 20 * np.log10(np.linalg.norm(X - X_r) / np.linalg.norm(X))
wavfile.write("cqt_original.wav", fs, soundsc(X))
wavfile.write("cqt_reconstruction.wav", fs, soundsc(X_r))
def run_fft_dct_example():
random_state = np.random.RandomState(1999)
fs, d = fetch_sample_speech_fruit()
n_fft = 64
X = d[0]
X_stft = stft(X, n_fft)
X_rr = complex_to_real_view(X_stft)
X_dct = fftpack.dct(X_rr, axis=-1, norm='ortho')
X_dct_sub = X_dct[1:] - X_dct[:-1]
std = X_dct_sub.std(axis=0, keepdims=True)
X_dct_sub += .01 * std * random_state.randn(
X_dct_sub.shape[0], X_dct_sub.shape[1])
X_dct_unsub = np.cumsum(X_dct_sub, axis=0)
X_idct = fftpack.idct(X_dct_unsub, axis=-1, norm='ortho')
X_irr = real_to_complex_view(X_idct)
X_r = istft(X_irr, n_fft)[:len(X)]
SNR = 20 * np.log10(np.linalg.norm(X - X_r) / np.linalg.norm(X))
print(SNR)
wavfile.write("fftdct_orig.wav", fs, soundsc(X))
wavfile.write("fftdct_rec.wav", fs, soundsc(X_r))
def run_world_example():
fs, d = fetch_sample_speech_tapestry()
d = d.astype("float32") / 2 ** 15
temporal_positions_h, f0_h, vuv_h, f0_candidates_h = harvest(d, fs)
temporal_positions_ct, spectrogram_ct, fs_ct = cheaptrick(d, fs,
temporal_positions_h, f0_h, vuv_h)
temporal_positions_d4c, f0_d4c, vuv_d4c, aper_d4c, coarse_aper_d4c = d4c(d, fs,
temporal_positions_h, f0_h, vuv_h)
# y = world_synthesis(f0_d4c, vuv_d4c, aper_d4c, spectrogram_ct, fs_ct)
y = world_synthesis(f0_d4c, vuv_d4c, coarse_aper_d4c, spectrogram_ct, fs_ct)
wavfile.write("out.wav", fs, soundsc(y))
def run_mgc_example():
import matplotlib.pyplot as plt
fs, x = wavfile.read("test16k.wav")
pos = 3000
fftlen = 1024
win = np.blackman(fftlen) / np.sqrt(np.sum(np.blackman(fftlen) ** 2))
xw = x[pos:pos + fftlen] * win
sp = 20 * np.log10(np.abs(np.fft.rfft(xw)))
mgc_order = 20
mgc_alpha = 0.41
mgc_gamma = -0.35
mgc_arr = win2mgc(xw, order=mgc_order, alpha=mgc_alpha, gamma=mgc_gamma, verbose=True)
xwsp = 20 * np.log10(np.abs(np.fft.rfft(xw)))
sp = mgc2sp(mgc_arr, mgc_alpha, mgc_gamma, fftlen)
plt.plot(xwsp)
plt.plot(20. / np.log(10) * np.real(sp), "r")
plt.xlim(1, len(xwsp))
plt.show()
def run_world_mgc_example():
fs, d = fetch_sample_speech_tapestry()
d = d.astype("float32") / 2 ** 15
# harcoded for 16k from
# https://github.com/CSTR-Edinburgh/merlin/blob/master/misc/scripts/vocoder/world/extract_features_for_merlin.sh
mgc_alpha = 0.58
# mgc_order = 59
mgc_order = 59
# this is actually just mcep
mgc_gamma = 0.0
# from sklearn.externals import joblib
# mem = joblib.Memory("/tmp")
# mem.clear()
def enc():
temporal_positions_h, f0_h, vuv_h, f0_candidates_h = harvest(d, fs)
temporal_positions_ct, spectrogram_ct, fs_ct = cheaptrick(d, fs,
temporal_positions_h, f0_h, vuv_h)
temporal_positions_d4c, f0_d4c, vuv_d4c, aper_d4c, coarse_aper_d4c = d4c(d, fs,
temporal_positions_h, f0_h, vuv_h)
mgc_arr = sp2mgc(spectrogram_ct, mgc_order, mgc_alpha, mgc_gamma,
verbose=True)
return mgc_arr, spectrogram_ct, f0_d4c, vuv_d4c, coarse_aper_d4c
mgc_arr, spectrogram_ct, f0_d4c, vuv_d4c, coarse_aper_d4c = enc()
sp_r = mgc2sp(mgc_arr, mgc_alpha, mgc_gamma, fs=fs, verbose=True)
"""
import matplotlib.pyplot as plt
plt.imshow(20 * np.log10(sp_r))
plt.figure()
plt.imshow(20 * np.log10(spectrogram_ct))
plt.show()
raise ValueError()
"""
y = world_synthesis(f0_d4c, vuv_d4c, coarse_aper_d4c, sp_r, fs)
# y = world_synthesis(f0_d4c, vuv_d4c, aper_d4c, sp_r, fs)
wavfile.write("out_mgc.wav", fs, soundsc(y))
def get_frame(signal, winsize, no):
shift = winsize // 2
start = no * shift
end = start + winsize
return signal[start:end]
class LTSD():
"""
LTSD VAD code from jfsantos
"""
def __init__(self, winsize, window, order):
self.winsize = int(winsize)
self.window = window
self.order = order
self.amplitude = {}
def get_amplitude(self, signal, l):
if l in self.amplitude:
return self.amplitude[l]
else:
amp = sp.absolute(sp.fft(get_frame(signal, self.winsize, l) * self.window))
self.amplitude[l] = amp
return amp
def compute_noise_avg_spectrum(self, nsignal):
windownum = int(len(nsignal) // (self.winsize // 2) - 1)
avgamp = np.zeros(self.winsize)
for l in range(windownum):
avgamp += sp.absolute(sp.fft(get_frame(nsignal, self.winsize, l) * self.window))
return avgamp / float(windownum)
def compute(self, signal):
self.windownum = int(len(signal) // (self.winsize // 2) - 1)
ltsds = np.zeros(self.windownum)
# Calculate the average noise spectrum amplitude based 20 frames in the head parts of input signal.
self.avgnoise = self.compute_noise_avg_spectrum(signal[0:self.winsize * 20]) ** 2
for l in range(self.windownum):
ltsds[l] = self.ltsd(signal, l, 5)
return ltsds
def ltse(self, signal, l, order):
maxamp = np.zeros(self.winsize)
for idx in range(l - order, l + order + 1):
amp = self.get_amplitude(signal, idx)
maxamp = np.maximum(maxamp, amp)
return maxamp
def ltsd(self, signal, l, order):
if l < order or l + order >= self.windownum:
return 0
return 10.0 * np.log10(np.sum(self.ltse(signal, l, order) ** 2 / self.avgnoise) / float(len(self.avgnoise)))
def ltsd_vad(x, fs, threshold=9, winsize=8192):
# winsize based on sample rate
# 1024 for fs = 16000
orig_dtype = x.dtype
orig_scale_min = x.min()
orig_scale_max = x.max()
x = (x - x.min()) / (x.max() - x.min())
# works with 16 bit
x = x * (2 ** 15)
x = x.astype("int32")
window = sp.hanning(winsize)
ltsd = LTSD(winsize, window, 5)
s_vad = ltsd.compute(x)
# LTSD is 50% overlap, so each "step" covers 4096 samples
# +1 to cover the extra edge window
n_samples = int(((len(s_vad) + 1) * winsize) // 2)
time_s = n_samples / float(fs)
time_points = np.linspace(0, time_s, len(s_vad))
time_samples = (fs * time_points).astype(np.int32)
time_samples = time_samples
f_vad = np.zeros_like(x, dtype=np.bool)
offset = winsize
for n, (ss, es) in enumerate(zip(time_samples[:-1], time_samples[1:])):
sss = ss - offset
if sss < 0:
sss = 0
ses = es - offset
if ses < 0:
ses = 0
if s_vad[n + 1] < threshold:
f_vad[sss:ses] = False
else:
f_vad[sss:ses] = True
f_vad[ses:] = False
x = x.astype("float64")
x = x / float(2 ** 15)
x = x * (orig_scale_max - orig_scale_min) + orig_scale_min
x = x.astype(orig_dtype)
return x[f_vad], f_vad
def run_ltsd_example():
fs, d = fetch_sample_speech_tapestry()
winsize = 1024
d = d.astype("float32") / 2 ** 15
d -= d.mean()
pad = 3 * fs
noise_pwr = np.percentile(d, 1) ** 2
noise_pwr = max(1E-9, noise_pwr)
d = np.concatenate((np.zeros((pad,)) + noise_pwr * np.random.randn(pad), d))
_, vad_segments = ltsd_vad(d, fs, winsize=winsize)
v_up = np.where(vad_segments == True)[0]
s = v_up[0]
st = v_up[-1] + int(.5 * fs)
d = d[s:st]
bname = "tapestry.wav".split(".")[0]
wavfile.write("%s_out.wav" % bname, fs, soundsc(d))
if __name__ == "__main__":
run_ltsd_example()
"""
Trying to run all examples will seg fault on my laptop - probably memory!
Comment individually
run_ltsd_example()
run_world_mgc_example()
run_world_example()
run_mgc_example()
run_phase_reconstruction_example()
run_phase_vq_example()
run_dct_vq_example()
run_fft_vq_example()
run_lpc_example()
run_cqt_example()
run_fft_dct_example()
test_all()
"""
| {
"repo_name": "thorwhalen/ut",
"path": "sound/others/audio_tools.py",
"copies": "1",
"size": "158402",
"license": "mit",
"hash": 463913261562416450,
"line_mean": 35.709617613,
"line_max": 185,
"alpha_frac": 0.5754094014,
"autogenerated": false,
"ratio": 3.06695322180917,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9139833732247071,
"avg_score": 0.0005057781924196731,
"num_lines": 4315
} |
import numpy
import numpy.fft
import scipy
def _bin_approx_search(lst, tg):
"""
Find the index of the element in lst which is closest to the number tg
"""
top = len(lst) - 1
bottom = 0
while top > bottom:
curri = (top - bottom)//2 + bottom
if lst[curri] < tg:
bottom = curri
else:
top = curri
if top - bottom == 1:
if abs(lst[top] - tg) < abs(lst[bottom] - tg):
return top
else:
return bottom
return top
def fft_in_range(audiomatrix, startindex, endindex, channel):
"""
Do an FFT in the specified range of indices
The audiomatrix should have the first index as its time domain and
second index as the channel number. The startindex and endinex
select the time range to use, and the channel parameter selects
which channel to do the FFT on.
Returns a vector of data in the frequency domain
"""
n = endindex - startindex
indat = audiomatrix[startindex:endindex, channel]
outdat = (numpy.fft.fft(indat)[range(n//2)])/n
return outdat
def get_x_axis(samplerate, samplelength):
"""
Find the actual frequencies to include along the x axis
The samplerate is the sample rate of the audio matrix in Hertz
and the samplelength is the number of samples fed to the FFT.
Returns a matrix with the x axis numbers.
"""
time = samplelength / samplerate # The sample time of a single fft
return scipy.arange(samplelength // 2) / time
def moving_fft(audiomatrix, sampletime, fps, samplerate, channel=0):
"""
Get a number of FFT samples over the time of an audio sample
This is basically like a moving average for DFTs
Args:
audiomatrix: A matrix of audio data with time for the first
dimension and channel for the second dimension
sampletime: The length of a sample in seconds
fps: The number of output samples per second
samplerate: The sample frequency of the audio in hertz
channel: The audio channel to use
Returns:
A matrix where the first dimension is the frame number
and the second dimension is the frequency
"""
samplelength = int(sampletime * samplerate)
frame_increment = samplerate // fps
frame = 0
frames = (audiomatrix.shape[0] - samplelength) // frame_increment
#ret = numpy.zeros((frames, frame_increment//2), numpy.complex128)
ret = []
for startindex in range(0, audiomatrix.shape[0] - samplelength, frame_increment):
x = fft_in_range(audiomatrix, startindex, startindex + frame_increment, channel)
ret.append(x)
return numpy.array(ret)
def freq_range_graph(fftmatrix, freqrange, samplerate, sampletime):
"""
Create a row vector of the average amplitude of a frequency range over time
Args:
fftmatrix: The moving_fft() output
freqrange: A tuple of form (minimum frequency, maximum frequency)
samplerate: The sample frequency of the audio in hertz
sampletime: The length of a sample in seconds
"""
frq = get_x_axis(samplerate, sampletime * samplerate)
bottomindex = _bin_approx_search(frq, freqrange[0])
topindex = _bin_approx_search(frq, freqrange[1])
sliced = fftmatrix[:,bottomindex:topindex]
return numpy.average(sliced, 1)
def isolate_freq_range(fftmatrix, freqrange, samplerate, sampletime):
"""
Create a moving DFT matrix with only the given frequency range
Args:
fftmatrix: The moving_fft() output
freqrange: A tuple of form (minimum frequency, maximum frequency)
samplerate: The sample frequency of the audio in hertz
sampletime: The length of a sample in seconds
"""
frq = get_x_axis(samplerate, sampletime * samplerate)
bottomindex = _bin_approx_search(frq, freqrange[0])
topindex = _bin_approx_search(frq, freqrange[1])
return fftmatrix[:,bottomindex:topindex]
def extract_freq(fftmatrix, backgroundfreq, targetfreq, samplerate, sampletime):
"""
Extract occurrences of a specific note of drum by comparing its frequencies
to background frequencies
Args:
fftmatrix: The moving_fft() output
backgroundfreq: Frequencies to look for background noise
targetfreq: Frequency range of the instrument
samplerate: The sample frequency of the audio in hertz
sampletime: The length of a sample in seconds
"""
target = freq_range_graph(fftmatrix, targetfreq, samplerate, sampletime)
background = freq_range_graph(fftmatrix, backgroundfreq, samplerate, sampletime)
return abs(target)/(abs(background) + 1E-20)
| {
"repo_name": "twoodford/audiovisualizer",
"path": "audiovisualizer/movingfft.py",
"copies": "1",
"size": "5213",
"license": "apache-2.0",
"hash": -9000727107123308000,
"line_mean": 36.5035971223,
"line_max": 88,
"alpha_frac": 0.6840590831,
"autogenerated": false,
"ratio": 4.034829721362229,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009674513710414258,
"num_lines": 139
} |
import numpy
from PIL import Image, ImageDraw
class Widget:
def _ampl_color(self, amplitude, frame):
"""
Get the appropriate color with alpha for the given amplitude
Args:
amplitude: The amplitude at the time
"""
color=(0,0,0)
for col in self._color:
if frame >= col[0]:
color = col[1]
return color + (int(self._amplfactor * amplitude),)
class DrumCircle(Widget):
"""
Visualizer for percussive sounds with a solid circle in
the middle and concentric rings for past data.
Takes a preprocessed 1D array for input. The extract_frequency()
method in the movingfft module may be useful for getting this type
of data.
"""
def __init__(self, amplitude, centerpt, inner_rad, outer_rad, color):
"""
Create a DrumCircle visualization widget
Args:
amplitude: 1D array of amplitude data to display
centerpt: (x,y) tuple for the location of the circle's center
inner_rad: The radius of the circle displaying the current amplitude
out_rad: The number of additional circles displaying past amplitude
color: The circle color in RGB
"""
self._amplitude = amplitude
self._centerpt = centerpt
self._inner_rad = inner_rad
self._outer_rad = outer_rad
self._color = color
# Find the amplitude -> alpha conversion factor
maxampl = numpy.nanmax(amplitude)
self._amplfactor = 256 / maxampl * 50
def display(self, framenum, target):
# Draw the center circle
currampl = self._amplitude[framenum]
_draw_circle(target, self._centerpt, self._inner_rad,
fill=self._ampl_color(currampl, framenum))
# Draw concentric circles from previous frames
for nframes in range(0, (self._outer_rad - self._inner_rad) // 2):
ampl = self._amplitude[framenum - nframes]
_draw_circle(target, self._centerpt, self._inner_rad + nframes*2,
outline=self._ampl_color(ampl, framenum - nframes))
_draw_circle(target, self._centerpt, self._inner_rad + nframes*2 + 1,
outline=self._ampl_color(ampl, framenum - nframes))
class FrequencyPoints(Widget):
"""
Shows the frequency data on the x axis and time on the y axis, with
the current time in the center. Looks kind of like rain.
"""
def __init__(self, fftmatrix, bounds, color, visframes=20):
self._fftmatrix = fftmatrix
self._bounds = bounds
self._color = color
self._visframes = visframes
self._amplfactor = 256*5000
def display(self, framenum, target):
# Start with a size that matches what we'll draw
isize = (self._fftmatrix.shape[1], self._visframes)
iimg = Image.new("RGBA", isize)
iimgdr = ImageDraw.Draw(iimg)
for x in range(isize[0]):
for y in range(isize[1]):
color = self._ampl_color(
abs(self._fftmatrix[framenum - y, x]), framenum - y)
iimgdr.point((x,y), color)
rectimg = iimg.resize(self._rect_size(), resample=Image.BICUBIC)
target.bitmap(self._bounds[0], rectimg, fill=self._ampl_color(1/5000, framenum))
def _rect_size(self):
"""Returns the (width, height) of the draw rect"""
bnd = self._bounds
return (bnd[1][0] - bnd[0][0], bnd[1][1] - bnd[0][1])
class MeterDisplay:
def __init__(self, fftmatrix, fpb, center, radius, color):
"""
Create a new meter display
Args:
fftmatrix: The fft data
fpb: Frames per beat
center: Center of the circle
radius: Radius of the circle
color: Color data
"""
self._fftmatrix = fftmatrix
self._fpb = fpb
self._center = center
self._radius = radius
self._color = color
def display(self, framenum, target):
left = (self._center[0] - self._radius, self._center[1] - self._radius)
right = (self._center[0] + self._radius, self._center[1] + self._radius)
beatnum = ((framenum - 11) // self._fpb) % 4 + 1
target.pieslice([left, right], 0, beatnum * 90, fill=self._color)
def _draw_circle(target, center, radius, fill=None, outline=None):
left = (center[0] - radius, center[1] - radius)
right = (center[0] + radius, center[1] + radius)
target.ellipse([left, right], fill, outline)
| {
"repo_name": "twoodford/audiovisualizer",
"path": "audiovisualizer/widgets.py",
"copies": "1",
"size": "5085",
"license": "apache-2.0",
"hash": -5245047185939257000,
"line_mean": 37.5227272727,
"line_max": 88,
"alpha_frac": 0.6098328417,
"autogenerated": false,
"ratio": 3.794776119402985,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9883471069466125,
"avg_score": 0.004227578327371811,
"num_lines": 132
} |
import PIL.Image
import PIL.ImageDraw
def make_frames(visualizers, numframes, outdimen, background=None):
"""
Returns an iterator over antialiased output images
Args:
visualizers: An iterable of visualizer widgets
numframes: The number of output frames to generate
outdimen: (width, height) tuple of the output image size
background: An optional background image
"""
# The initial size of the image before it is resampled for antialiasing
gensize = (outdimen[0] * 2, outdimen[1] * 2)
# Default background is solid black
if background is None:
background = PIL.Image.new("RGBA", gensize)
PIL.ImageDraw.Draw(background).rectangle([(0,0), gensize], fill=(0,0,0,255))
# Generate frames
for frame in range(numframes):
image = PIL.Image.new("RGBA", gensize)
imgdr = PIL.ImageDraw.Draw(image)
for vis in visualizers:
vis.display(frame, imgdr)
composite = PIL.Image.alpha_composite(background, image)
# Resample for nice antialiasing
yield composite.resize(outdimen, resample=PIL.Image.ANTIALIAS)
| {
"repo_name": "twoodford/audiovisualizer",
"path": "audiovisualizer/animator.py",
"copies": "1",
"size": "1661",
"license": "apache-2.0",
"hash": -4686251224579488000,
"line_mean": 36.75,
"line_max": 84,
"alpha_frac": 0.7031908489,
"autogenerated": false,
"ratio": 3.9360189573459716,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5139209806245971,
"avg_score": null,
"num_lines": null
} |
''' Audit Log Models '''
import json
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from django import dispatch
from django.conf import settings
from .utils import serialize_data, data_has_changes
audit = dispatch.Signal(
providing_args=['instance', 'relations', 'user', 'force'])
class AuditItem(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
serialized_data = models.TextField(null=True, blank=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True)
created = models.DateTimeField(auto_now_add=True)
@property
def audit_data(self):
return json.loads(self.serialized_data)
class Meta:
ordering = ('-created',)
get_latest_by = 'created'
@dispatch.receiver(audit, weak=False, dispatch_uid='audit.create_audit')
def create_audit(sender, instance, relations, user, force=False, **kws):
data = serialize_data(instance, relations)
ct = ContentType.objects.get_for_model(instance)
try:
prev_audit = AuditItem.objects.filter(content_type=ct).latest()
except AuditItem.DoesNotExist:
prev_audit = None
if data_has_changes(instance, relations, prev_audit) or force:
AuditItem.objects.create(
content_object=instance,
user=user,
serialized_data=json.dumps(data)
)
| {
"repo_name": "analytehealth/chronicler",
"path": "chronicler/models.py",
"copies": "1",
"size": "1557",
"license": "bsd-2-clause",
"hash": -8220841440557252000,
"line_mean": 32.1276595745,
"line_max": 72,
"alpha_frac": 0.7026332691,
"autogenerated": false,
"ratio": 3.9417721518987343,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5144405420998734,
"avg_score": null,
"num_lines": null
} |
"""Audit log
Revision ID: cf0c99c08578
Revises:
Create Date: 2017-12-12 21:12:56.282095
"""
from datetime import datetime
from alembic import op
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy_continuum import version_class
from sqlalchemy_continuum import versioning_manager
from sqlalchemy_continuum.operation import Operation
from tracker import db
from tracker.model import CVE
from tracker.model import Advisory
from tracker.model import CVEGroup
from tracker.model import CVEGroupEntry
from tracker.model import CVEGroupPackage
# revision identifiers, used by Alembic.
revision = 'cf0c99c08578'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ensure new transaction/version tables exist
db.create_all()
# update CVE table
op.add_column('cve',
Column('created',
DateTime,
default=datetime.utcnow,
nullable=True,
index=True))
op.add_column('cve',
Column('changed',
DateTime,
default=datetime.utcnow,
nullable=True,
index=True))
for cve in CVE.query.all():
cve.created = datetime.utcnow()
cve.changed = cve.created
db.session.commit()
db.session.flush()
# update AVG table
op.add_column('cve_group',
Column('changed',
DateTime,
default=datetime.utcnow,
nullable=True,
index=True))
for group in CVEGroup.query.all():
group.changed = group.created
db.session.commit()
db.session.flush()
VersionClassGroup = version_class(CVEGroup)
uow = versioning_manager.unit_of_work(db.session)
uow.create_transaction(db.session)
for group in VersionClassGroup.query.all():
for package in CVEGroupPackage.query.filter(
CVEGroupPackage.group_id == group.id).all():
package_version = uow.get_or_create_version_object(package)
package_version.group_id = group.id
package_version.pkgname = package.pkgname
package_version.transaction_id = group.transaction_id
package_version.end_transaction_id = group.end_transaction_id
package_version.operation_type = Operation.INSERT
package_version.group_id_mod = 1
package_version.pkgname_mod = 1
uow.process_operation(Operation(package, Operation.INSERT))
for cve in CVEGroupEntry.query.filter(
CVEGroupEntry.group_id == group.id).all():
cve_version = uow.get_or_create_version_object(cve)
cve_version.group_id = group.id
cve_version.cve_id = cve.cve_id
cve_version.transaction_id = group.transaction_id
cve_version.end_transaction_id = group.end_transaction_id
cve_version.operation_type = Operation.INSERT
cve_version.group_id_mod = 1
cve_version.cve_id_mod = 1
uow.process_operation(Operation(cve, Operation.INSERT))
uow.make_versions(db.session)
db.session.commit()
db.session.flush()
with op.batch_alter_table('cve_group', schema=None) as batch_op:
batch_op.alter_column('changed', nullable=False)
# update advisory table
op.add_column('advisory',
Column('changed',
DateTime,
default=datetime.utcnow,
nullable=True,
index=True))
for advisory in Advisory.query.all():
advisory.changed = group.created
db.session.commit()
db.session.flush()
with op.batch_alter_table('advisory', schema=None) as batch_op:
batch_op.alter_column('changed', nullable=False)
# set all fields to modified for initial insert
VersionClassCVE = version_class(CVE)
VersionClassCVE.query.update({
VersionClassCVE.operation_type: Operation.INSERT,
VersionClassCVE.issue_type_mod: 1,
VersionClassCVE.description_mod: 1,
VersionClassCVE.severity_mod: 1,
VersionClassCVE.remote_mod: 1,
VersionClassCVE.reference_mod: 1,
VersionClassCVE.notes_mod: 1
})
VersionClassGroup = version_class(CVEGroup)
VersionClassGroup.query.update({
VersionClassGroup.operation_type: Operation.INSERT,
VersionClassGroup.status_mod: 1,
VersionClassGroup.severity_mod: 1,
VersionClassGroup.affected_mod: 1,
VersionClassGroup.fixed_mod: 1,
VersionClassGroup.bug_ticket_mod: 1,
VersionClassGroup.reference_mod: 1,
VersionClassGroup.notes_mod: 1,
VersionClassGroup.created_mod: 1,
VersionClassGroup.changed_mod: 1,
VersionClassGroup.advisory_qualified_mod: 1
})
VersionClassAdvisory = version_class(Advisory)
VersionClassAdvisory.query.update({
VersionClassAdvisory.operation_type: Operation.INSERT,
VersionClassAdvisory.group_package_id_mod: 1,
VersionClassAdvisory.advisory_type_mod: 1,
VersionClassAdvisory.publication_mod: 1,
VersionClassAdvisory.workaround_mod: 1,
VersionClassAdvisory.impact_mod: 1,
VersionClassAdvisory.content_mod: 1,
VersionClassAdvisory.created_mod: 1,
VersionClassAdvisory.changed_mod: 1,
VersionClassAdvisory.reference_mod: 1
})
db.session.commit()
def downgrade():
with op.batch_alter_table('cve', schema=None) as batch_op:
batch_op.drop_index('ix_cve_created')
batch_op.drop_index('ix_cve_changed')
batch_op.drop_column('created')
batch_op.drop_column('changed')
with op.batch_alter_table('cve_group', schema=None) as batch_op:
batch_op.drop_index('ix_cve_group_changed')
batch_op.drop_column('changed')
with op.batch_alter_table('advisory', schema=None) as batch_op:
batch_op.drop_index('ix_advisory_changed')
batch_op.drop_column('changed')
def drop(model):
model.__table__.drop(db.engine)
drop(version_class(CVE))
drop(version_class(CVEGroup))
drop(version_class(CVEGroupEntry))
drop(version_class(CVEGroupPackage))
drop(version_class(Advisory))
drop(versioning_manager.transaction_cls)
db.session.commit()
| {
"repo_name": "jelly/arch-security-tracker",
"path": "migrations/versions/cf0c99c08578_.py",
"copies": "2",
"size": "6476",
"license": "mit",
"hash": -3687643759786266600,
"line_mean": 33.2645502646,
"line_max": 73,
"alpha_frac": 0.6348054355,
"autogenerated": false,
"ratio": 3.818396226415094,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5453201661915094,
"avg_score": null,
"num_lines": null
} |
"""Audit Logs API is a set of APIs for monitoring what’s happening in your Enterprise Grid organization.
Refer to https://slack.dev/python-slack-sdk/audit-logs/ for details.
"""
import json
import logging
from ssl import SSLContext
from typing import Any
from typing import Dict, Optional
import aiohttp
from aiohttp import BasicAuth, ClientSession
from slack_sdk.errors import SlackApiError
from .internal_utils import (
_build_request_headers,
_debug_log_response,
get_user_agent,
)
from .response import AuditLogsResponse
from ...proxy_env_variable_loader import load_http_proxy_from_env
class AsyncAuditLogsClient:
BASE_URL = "https://api.slack.com/audit/v1/"
token: str
timeout: int
ssl: Optional[SSLContext]
proxy: Optional[str]
base_url: str
session: Optional[ClientSession]
trust_env_in_session: bool
auth: Optional[BasicAuth]
default_headers: Dict[str, str]
logger: logging.Logger
def __init__(
self,
token: str,
timeout: int = 30,
ssl: Optional[SSLContext] = None,
proxy: Optional[str] = None,
base_url: str = BASE_URL,
session: Optional[ClientSession] = None,
trust_env_in_session: bool = False,
auth: Optional[BasicAuth] = None,
default_headers: Optional[Dict[str, str]] = None,
user_agent_prefix: Optional[str] = None,
user_agent_suffix: Optional[str] = None,
logger: Optional[logging.Logger] = None,
):
"""API client for Audit Logs API
See https://api.slack.com/admins/audit-logs for more details
Args:
token: An admin user's token, which starts with `xoxp-`
timeout: Request timeout (in seconds)
ssl: `ssl.SSLContext` to use for requests
proxy: Proxy URL (e.g., `localhost:9000`, `http://localhost:9000`)
base_url: The base URL for API calls
session: `aiohttp.ClientSession` instance
trust_env_in_session: True/False for `aiohttp.ClientSession`
auth: Basic auth info for `aiohttp.ClientSession`
default_headers: Request headers to add to all requests
user_agent_prefix: Prefix for User-Agent header value
user_agent_suffix: Suffix for User-Agent header value
logger: Custom logger
"""
self.token = token
self.timeout = timeout
self.ssl = ssl
self.proxy = proxy
self.base_url = base_url
self.session = session
self.trust_env_in_session = trust_env_in_session
self.auth = auth
self.default_headers = default_headers if default_headers else {}
self.default_headers["User-Agent"] = get_user_agent(
user_agent_prefix, user_agent_suffix
)
self.logger = logger if logger is not None else logging.getLogger(__name__)
if self.proxy is None or len(self.proxy.strip()) == 0:
env_variable = load_http_proxy_from_env(self.logger)
if env_variable is not None:
self.proxy = env_variable
async def schemas(
self,
*,
query_params: Optional[Dict[str, any]] = None,
headers: Optional[Dict[str, str]] = None,
) -> AuditLogsResponse:
"""Returns information about the kind of objects which the Audit Logs API
returns as a list of all objects and a short description.
Authentication not required.
Args:
query_params: Set any values if you want to add query params
headers: Additional request headers
Returns:
API response
"""
return await self.api_call(
path="schemas",
query_params=query_params,
headers=headers,
)
async def actions(
self,
*,
query_params: Optional[Dict[str, any]] = None,
headers: Optional[Dict[str, str]] = None,
) -> AuditLogsResponse:
"""Returns information about the kind of actions that the Audit Logs API
returns as a list of all actions and a short description of each.
Authentication not required.
Args:
query_params: Set any values if you want to add query params
headers: Additional request headers
Returns:
API response
"""
return await self.api_call(
path="actions",
query_params=query_params,
headers=headers,
)
async def logs(
self,
*,
latest: Optional[int] = None,
oldest: Optional[int] = None,
limit: Optional[int] = None,
action: Optional[str] = None,
actor: Optional[str] = None,
entity: Optional[str] = None,
additional_query_params: Optional[Dict[str, any]] = None,
headers: Optional[Dict[str, str]] = None,
) -> AuditLogsResponse:
"""This is the primary endpoint for retrieving actual audit events from your organization.
It will return a list of actions that have occurred on the installed workspace or grid organization.
Authentication required.
The following filters can be applied in order to narrow the range of actions returned.
Filters are added as query string parameters and can be combined together.
Multiple filter parameters are additive (a boolean AND) and are separated
with an ampersand (&) in the query string. Filtering is entirely optional.
Args:
latest: Unix timestamp of the most recent audit event to include (inclusive).
oldest: Unix timestamp of the least recent audit event to include (inclusive).
Data is not available prior to March 2018.
limit: Number of results to optimistically return, maximum 9999.
action: Name of the action.
actor: User ID who initiated the action.
entity: ID of the target entity of the action (such as a channel, workspace, organization, file).
additional_query_params: Add anything else if you need to use the ones this library does not support
headers: Additional request headers
Returns:
API response
"""
query_params = {
"latest": latest,
"oldest": oldest,
"limit": limit,
"action": action,
"actor": actor,
"entity": entity,
}
if additional_query_params is not None:
query_params.update(additional_query_params)
query_params = {k: v for k, v in query_params.items() if v is not None}
return await self.api_call(
path="logs",
query_params=query_params,
headers=headers,
)
async def api_call(
self,
*,
http_verb: str = "GET",
path: str,
query_params: Optional[Dict[str, any]] = None,
body_params: Optional[Dict[str, any]] = None,
headers: Optional[Dict[str, str]] = None,
) -> AuditLogsResponse:
url = f"{self.base_url}{path}"
return await self._perform_http_request(
http_verb=http_verb,
url=url,
query_params=query_params,
body_params=body_params,
headers=_build_request_headers(
token=self.token,
default_headers=self.default_headers,
additional_headers=headers,
),
)
async def _perform_http_request(
self,
*,
http_verb: str,
url: str,
query_params: Optional[Dict[str, Any]],
body_params: Optional[Dict[str, Any]],
headers: Dict[str, str],
) -> AuditLogsResponse:
if body_params is not None:
body_params = json.dumps(body_params)
headers["Content-Type"] = "application/json;charset=utf-8"
if self.logger.level <= logging.DEBUG:
headers_for_logging = {
k: "(redacted)" if k.lower() == "authorization" else v
for k, v in headers.items()
}
self.logger.debug(
f"Sending a request - "
f"url: {url}, "
f"params: {query_params}, "
f"body: {body_params}, "
f"headers: {headers_for_logging}"
)
session: Optional[ClientSession] = None
use_running_session = self.session and not self.session.closed
if use_running_session:
session = self.session
else:
session = aiohttp.ClientSession(
timeout=aiohttp.ClientTimeout(total=self.timeout),
auth=self.auth,
trust_env=self.trust_env_in_session,
)
resp: AuditLogsResponse
try:
request_kwargs = {
"headers": headers,
"params": query_params,
"data": body_params,
"ssl": self.ssl,
"proxy": self.proxy,
}
async with session.request(http_verb, url, **request_kwargs) as res:
response_body = {}
try:
response_body = await res.text()
except aiohttp.ContentTypeError:
self.logger.debug(
f"No response data returned from the following API call: {url}."
)
except json.decoder.JSONDecodeError as e:
message = f"Failed to parse the response body: {str(e)}"
raise SlackApiError(message, res)
resp = AuditLogsResponse(
url=url,
status_code=res.status,
raw_body=response_body,
headers=res.headers,
)
_debug_log_response(self.logger, resp)
finally:
if not use_running_session:
await session.close()
return resp
| {
"repo_name": "slackhq/python-slackclient",
"path": "slack_sdk/audit_logs/v1/async_client.py",
"copies": "1",
"size": "10030",
"license": "mit",
"hash": -4104228790683250000,
"line_mean": 35.3333333333,
"line_max": 112,
"alpha_frac": 0.5748903071,
"autogenerated": false,
"ratio": 4.377127891750328,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006365389147981688,
"num_lines": 276
} |
"""Audit Module"""
from datetime import datetime
from enum import Enum
from dateutil import parser
from flask import current_app
from ..database import db
from .reference import Reference
def lookup_version():
return current_app.config.metadata['version']
class Context(Enum):
# only add new contexts to END of list, otherwise ordering gets messed up
(other, login, assessment, authentication, intervention, account,
consent, user, observation, organization, group, procedure,
relationship, role, tou) = range(15)
class Audit(db.Model):
"""ORM class for audit data
Holds meta info about changes in other tables, such as when and
by whom the data was added. Several other tables maintain foreign
keys to audit rows, such as `Observation` and `Procedure`.
"""
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.ForeignKey('users.id'), nullable=False)
subject_id = db.Column(db.ForeignKey('users.id'), nullable=False)
_context = db.Column('context', db.Text, default='other', nullable=False)
timestamp = db.Column(db.DateTime, default=datetime.utcnow, nullable=False)
version = db.Column(db.Text, default=lookup_version, nullable=False)
comment = db.Column(db.Text)
def __str__(self):
return (
"Audit by user {0.user_id} on user {0.subject_id} at "
"{0.timestamp}: {0.context}: {0.comment}".format(self))
@property
def context(self):
return self._context
@context.setter
def context(self, ct_string):
self._context = getattr(Context, ct_string).name
def as_fhir(self):
"""Typically included as *meta* data in containing FHIR resource"""
from .user import get_user
from .fhir import FHIR_datetime
d = {}
d['version'] = self.version
d['lastUpdated'] = FHIR_datetime.as_fhir(self.timestamp)
d['by'] = Reference.patient(self.user_id).as_fhir()
d['by']['display'] = get_user(self.user_id).display_name
d['on'] = Reference.patient(self.subject_id).as_fhir()
d['context'] = self.context
if self.comment:
d['comment'] = self.comment
return d
@classmethod
def from_logentry(cls, entry):
"""Parse and create an Audit instance from audit log entry
Prior to version v16.5.12, audit entries only landed in log.
This may be used to convert old entries, but newer ones should
already be there.
"""
# 2016-02-23 10:07:05,953: performed by 10033 on 10033: login: logout
fields = entry.split(':')
dt = parser.parse(':'.join(fields[0:2]))
user_id = int(fields[3].split()[2])
subject_id = int(fields[3].split()[4])
context = fields[4].strip()
message = ':'.join(fields[5:])
return cls(
user_id=user_id, subject_id=subject_id, context=context,
timestamp=dt, comment=message)
| {
"repo_name": "uwcirg/true_nth_usa_portal",
"path": "portal/models/audit.py",
"copies": "1",
"size": "2974",
"license": "bsd-3-clause",
"hash": -1594743055922069000,
"line_mean": 33.183908046,
"line_max": 79,
"alpha_frac": 0.6375252186,
"autogenerated": false,
"ratio": 3.7455919395465993,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48831171581465993,
"avg_score": null,
"num_lines": null
} |
"""AUDIT module
Maintain a log exclusively used for recording auditable events.
Any action deemed an auditable event should make a call to
auditable_event()
Audit data is also persisted in the database *audit* table.
"""
import logging
import os
import sys
from flask import current_app
from .database import db
from .models.audit import Audit
# special log level for auditable events
# initial goal was to isolate all auditable events to one log handler
# revised to be a level less than ERROR, so auditable events aren't
# considered errors for error mail handling (see SMTPHandler)
AUDIT = int((logging.WARN + logging.ERROR) / 2)
def auditable_event(message, user_id, subject_id, context="other"):
"""Record auditable event
message: The message to record, i.e. "log in via facebook"
user_id: The authenticated user id performing the action
subject_id: The user id upon which the action was performed
"""
text = "performed by {0} on {1}: {2}: {3}".format(
user_id, subject_id, context, message)
current_app.logger.log(AUDIT, text)
with db.session.no_autoflush:
db.session.add(Audit(
user_id=user_id, subject_id=subject_id, comment=message,
context=context))
db.session.commit()
def configure_audit_log(app): # pragma: no cover
"""Configure audit logging.
The audit log is only active when running as a service (not during
database updates, etc.) It should only received auditable events
and never be rotated out.
"""
# Skip config when running tests or maintenance
if ((sys.argv[0].endswith('/bin/flask') and 'run' not in sys.argv) or
app.testing):
return
logging.addLevelName('AUDIT', AUDIT)
audit_log_handler = logging.StreamHandler(sys.stdout)
if app.config.get('LOG_FOLDER', None):
audit_log = os.path.join(app.config['LOG_FOLDER'], 'audit.log')
audit_log_handler = logging.FileHandler(audit_log, delay=True)
audit_log_handler.setLevel(AUDIT)
audit_log_handler.setFormatter(
logging.Formatter('%(asctime)s: %(message)s'))
app.logger.addHandler(audit_log_handler)
| {
"repo_name": "uwcirg/true_nth_usa_portal",
"path": "portal/audit.py",
"copies": "1",
"size": "2167",
"license": "bsd-3-clause",
"hash": 4630873641912835000,
"line_mean": 29.9571428571,
"line_max": 73,
"alpha_frac": 0.6940470697,
"autogenerated": false,
"ratio": 3.6359060402684564,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4829953109968456,
"avg_score": null,
"num_lines": null
} |
#audit
import gspread
import datetime
from configuration import configuration
class audit:
def __init__(self):
config = configuration("config.ini")
self.googleUsername = config.getGoogleUsername()
self.googlePassword = config.getGooglePassword()
def save(self, language, synonym, network, message):
googleSheets = gspread.login(self.googleUsername, self.googlePassword);
worksheet = googleSheets.open(language).worksheet("Audit");
audits = worksheet.get_all_values();
newAuditRow = len(audits) + 1
d = datetime.datetime.now()
currentDateTimeString = "%s-%s-%s %s:%s:%s" % (d.year, d.month, d.day, d.hour, d.minute, d.second)
worksheet.update_cell(newAuditRow, 1, currentDateTimeString)
worksheet.update_cell(newAuditRow, 2, "Beginner")
worksheet.update_cell(newAuditRow, 3, synonym.word)
worksheet.update_cell(newAuditRow, 4, synonym.synonym)
worksheet.update_cell(newAuditRow, 5, synonym.grammar)
worksheet.update_cell(newAuditRow, 6, synonym.level)
worksheet.update_cell(newAuditRow, 7, synonym.link)
worksheet.update_cell(newAuditRow, 8, network)
worksheet.update_cell(newAuditRow, 9, message) | {
"repo_name": "jameslawler/synonymly-standalone",
"path": "src/audit.py",
"copies": "1",
"size": "1253",
"license": "mit",
"hash": -5144037166942141000,
"line_mean": 40.8,
"line_max": 106,
"alpha_frac": 0.6855546688,
"autogenerated": false,
"ratio": 3.6637426900584797,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.484929735885848,
"avg_score": null,
"num_lines": null
} |
# audit_orders.py
# Ronald L. Rivest
# July 10, 2017
# python3
"""
Routine to work with multi.py program for election audits.
Generates random audit orders from a ballot manifest
and an audit seed, for each paper ballot collection.
The overall algorithm is the "Fisher-Yates shuffle":
https://en.wikipedia.org/wiki/FisherYates_shuffle
The method used uses SHA256 in counter mode, as in
the program:
https://people.csail.mit.edu/rivest/sampler.py
"""
import hashlib
import os
import multi
import ids
import utils
def sha256(hash_input):
"""
Return value of SHA256 hash of input
bytearray hash_input, as a nonnegative integer.
"""
assert isinstance(hash_input, bytearray)
return int(hashlib.sha256(hash_input).hexdigest(), 16)
def shuffle(L, seed):
""" Return shuffled copy of list L, based on seed. """
L = list(L).copy()
for i in range(len(L)):
hash_input = bytearray(str(seed)+","+str(i),'utf-8')
hash_value = sha256(hash_input)
j = hash_value % (i+1) # random modulo (i+1)
L[i], L[j] = L[j], L[i] # swap
return L
def test_shuffle(seed=1234567890):
for i in range(3):
L = range(20)
print(shuffle(L, seed+i))
"""
[12, 13, 2, 18, 3, 8, 9, 7, 17, 6, 16, 5, 11, 19, 1, 14, 10, 0, 4, 15]
[4, 2, 9, 8, 14, 6, 3, 5, 7, 15, 18, 10, 19, 1, 13, 11, 17, 12, 0, 16]
[13, 12, 1, 0, 3, 4, 19, 10, 11, 5, 7, 2, 17, 16, 18, 14, 8, 6, 9, 15]
"""
def compute_audit_orders(e):
for pbcid in e.pbcids:
compute_audit_order(e, pbcid)
def compute_audit_order(e, pbcid):
pairs = zip(list(range(1, 1+len(e.bids_p[pbcid]))),
e.bids_p[pbcid])
shuffled_pairs = shuffle(pairs, str(e.audit_seed)+","+pbcid)
e.shuffled_indices_p[pbcid] = [i for (i,b) in shuffled_pairs]
e.shuffled_bids_p[pbcid] = [b for (i,b) in shuffled_pairs]
def write_audit_orders(e):
for pbcid in e.pbcids:
write_audit_order(e, pbcid)
def write_audit_order(e, pbcid):
dirpath = os.path.join(multi.ELECTIONS_ROOT, e.election_dirname,
"3-audit", "32-audit-orders")
os.makedirs(dirpath, exist_ok=True)
ds = utils.date_string()
safe_pbcid = ids.filename_safe(pbcid)
filename = os.path.join(dirpath, "audit-order-"+safe_pbcid+"-"+ds+".csv")
with open(filename, "w") as file:
fieldnames = ["Ballot order",
"Collection",
"Box",
"Position",
"Stamp",
"Ballot id",
"Comments"]
file.write(",".join(fieldnames))
file.write("\n")
for i, index in enumerate(e.shuffled_indices_p[pbcid]):
bid = e.shuffled_bids_p[pbcid][i]
file.write("{},".format(i))
file.write("{},".format(pbcid))
file.write("{},".format(e.boxid_pb[pbcid][bid]))
file.write("{},".format(e.position_pb[pbcid][bid]))
file.write("{},".format(e.stamp_pb[pbcid][bid]))
file.write("{},".format(bid))
file.write("{},".format(e.comments_pb[pbcid][bid]))
file.write("\n")
def test_audit_orders():
import syn2
e = syn2.SynElection()
compute_audit_orders(e)
write_audit_orders(e)
if __name__=="__main__":
test_shuffle()
test_audit_orders()
| {
"repo_name": "ron-rivest/2017-bayes-audit",
"path": "2017-code/audit_orders.py",
"copies": "1",
"size": "3438",
"license": "mit",
"hash": -2236487544740560600,
"line_mean": 25.2442748092,
"line_max": 77,
"alpha_frac": 0.5596276905,
"autogenerated": false,
"ratio": 3.0614425645592163,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.907509316280177,
"avg_score": 0.009195418451489426,
"num_lines": 131
} |
import sys, datetime, time
import csv
import sys
import os
from datetime import datetime
from time import strptime
from optparse import OptionParser
from lxml import etree
### Global Variables ###
# Dictionary of XML elements to parse from each audit type
d = {}
d['FileItem'] = ['FileName', 'FullPath', 'FileAttributes', 'SizeInBytes', 'Md5sum', 'Username', 'Created', 'Modified', 'Accessed', 'Changed', 'FilenameCreated', 'FilenameModified', 'FilenameAccessed', 'FilenameChanged', 'SecurityID', 'INode', 'DevicePath', 'PEInfo','StreamList']
d['PrefetchItem'] = ['FullPath', 'Created', 'SizeInBytes', 'ApplicationFileName', 'LastRun', 'TimesExecuted', 'ApplicationFullPath']
d['UserItem'] = ['Username', 'SecurityID', 'SecurityType', 'fullname', 'description', 'homedirectory', 'scriptpath', 'grouplist', 'LastLogin', 'disabled', 'lockedout', 'passwordrequired', 'userpasswordage']
d['RegistryItem'] = ['Username','SecurityID','Path','ValueName','Type','Modified','Text','NumSubKeys','NumValues']
d['PortItem'] = ['pid', 'process', 'path', 'state', 'localIP', 'remoteIP', 'localPort', 'remotePort', 'protocol']
d['UrlHistoryItem'] = ['Profile', 'BrowserName', 'BrowserVersion', 'Username', 'URL', 'LastVisitDate', 'VisitType']
d['ProcessItem'] = ['pid', 'parentpid', 'path', 'name', 'arguments', 'Username', 'SecurityID', 'SecurityType', 'startTime']
d['EventLogItem'] = ['EID', 'log', 'index', 'type', 'genTime', 'writeTime', 'source', 'machine', 'user', 'message']
d['ServiceItem'] = ['name', 'descriptiveName', 'description', 'mode', 'startedAs', 'path', 'arguments', 'pathmd5sum', 'pathSignatureExists', 'pathSignatureVerified', 'pathSignatureDescription', 'pathCertificateSubject', 'pathCertificateIssuer', 'serviceDLL', 'serviceDLLmd5sum', 'serviceDLLSignatureExists', 'serviceDLLSignatureVerified', 'serviceDLLSignatureDescription', 'serviceDLLCertificateSubject', 'serviceDLLCertificateIssuer', 'status', 'pid', 'type']
d['ModuleItem'] = ['ModuleAddress', 'ModuleInit', 'ModuleBase', 'ModuleSize', 'ModulePath', 'ModuleName']
d['DriverItem'] = ['DriverObjectAddress', 'ImageBase', 'ImageSize', 'DriverName', 'DriverInit', 'DriverStartIo', 'DriverUnload', 'Md5sum', 'SignatureExists', 'SignatureVerified', 'SignatureDescription', 'CertificateIssuer']
d['HiveItem'] = ['Name', 'Path']
d['HookItem'] = ['HookDescription', 'HookedFunction', 'HookedModule', 'HookingModule', 'HookingAddress', 'DigitalSignatureHooking', 'DigitalSignatureHooked']
d['VolumeItem'] = ['VolumeName', 'DevicePath', 'DriveLetter', 'Type', 'Name', 'SerialNumber', 'FileSystemFlags', 'FileSystemName', 'ActualAvailableAllocationUnits', 'TotalAllocationUnits', 'BytesPerSector', 'SectorsPerAllocationUnit', 'CreationTime', 'IsMounted']
d['ArpEntryItem'] = ['Interface', 'InterfaceType', 'PhysicalAddress', 'IPv4Address', 'IPv6Address', 'IsRouter', 'LastReachable', 'LastUnreachable', 'CacheType']
d['RouteEntryItem'] = ['Interface', 'Destination', 'Netmask', 'Gateway', 'RouteType', 'Protocol', 'RouteAge', 'Metric']
d['DnsEntryItem'] = ['Host', 'RecordName', 'RecordType', 'TimeToLive', 'Flags', 'DataLength', 'RecordData']
d['TaskItem'] = ['Name', 'VirtualPath', 'ExitCode', 'CreationDate', 'Comment', 'Creator', 'MaxRunTime', 'Flag', 'AccountName', 'AccountRunLevel', 'AccountLogonType', 'MostRecentRunTime','NextRunTime', 'Status', 'ActionList']
d['FileDownloadHistoryItem'] = ['Profile', 'BrowserName', 'BrowserVersion', 'username', 'DownloadType', 'FileName', 'SourceURL', 'TargetDirectory', 'LastAccessedDate', 'LastModifiedDate', 'BytesDownloaded', 'MaxBytes', 'CacheFlags', 'CacheHitCount', 'LastCheckedDate']
d['CookieHistoryItem'] = ['Profile', 'BrowserName', 'BrowserVersion', 'Username', 'FileName', 'FilePath', 'CookiePath', 'CookieName', 'CookieValue', 'CreationDate', 'ExpirationDate' 'LastAccessedDate', 'LastModifiedDate']
d['SystemInfoItem'] = ['machine', 'totalphysical', 'availphysical', 'uptime', 'OS', 'OSbitness', 'hostname', 'date', 'user', 'domain', 'processor', 'patchLevel', 'buildNumber', 'procType', 'productID', 'productName', 'regOrg', 'regOwner', 'installDate' , 'MAC', 'timezoneDST', 'timezoneStandard', 'networkArray']
d['PersistenceItem'] = ['PersistenceType', 'ServiceName', 'RegPath', 'RegText', 'RegOwner', 'RegModified', 'ServicePath', 'serviceDLL', 'arguments', 'FilePath', 'FileOwner', 'FileCreated', 'FileModified', 'FileAccessed', 'FileChanged', 'SignatureExists', 'SignatureVerified', 'SignatureDescription', 'CertificateSubject', 'CertificateIssuer', 'md5sum']
# TODO: Add parsing for Disk and System Restore Point audits
# Global for timeline data
timelineData = []
### Class Definitions ###
# Generic timeline object definition
class timelineEntry:
# Initialize with timestamp, type of audit item, item contents
def __init__(self, timeStamp, rowType, entryDesc, entryData):
self.timeObject= datetime.strptime(timeStamp, "%Y-%m-%dT%H:%M:%SZ")
self.rowType = rowType
self.entryDesc = entryDesc
self.entryData = entryData
self.entry2Desc =""
self.entry2Data = ""
self.timeDesc = ""
self.user = ""
# Add a user to timeline object
def addUser(self, user):
self.user = user
# Add description of the sort timestamp
def addTimeDesc(self, timeDesc):
self.timeDesc = timeDesc
# Add description of the sort timestamp
def addEntry(self, entry2Desc, entry2Data):
self.entry2Data = entry2Data
self.entry2Desc = entry2Desc
# Return a list variable containing timeline object
def getTimelineRow(self):
rowData = [self.timeObject.isoformat(), self.timeDesc, self.rowType, self.user, self.entryDesc, self.entryData, self.entry2Desc, self.entry2Data]
return rowData
### Methods ###
# Helper function to print column headers for parsed audits
def printHeaders(auditType):
topRow = []
for columnLabel in d["".join(auditType)]:
topRow.append(columnLabel)
return topRow
# Parse MIR agent XML input files into tab-delimited output
def parseXML(inFile,outFile):
outHandle = open(outFile,'wb')
writer = csv.writer(outHandle, dialect=csv.excel_tab)
rowCount = -1
currentAudit = ""
# Iterate through XML
for event, elem in etree.iterparse(inFile):
# Only proceed if element is in our parsing dictionary
if elem.tag in d:
row = []
currentAudit = elem.tag
# Write header row
if rowCount < 0:
writer.writerow(printHeaders(elem.tag))
rowCount += 1
# Iterate through each sub-element and build an output row
for i in d[elem.tag]:
if(elem.find(i) is not None):
# Special case for nested DigSig data within FileItem audit results
if((elem.find(i).tag == "PEInfo") and (elem.tag == "FileItem")):
digSigList = []
for j in elem.find(i).iter():
if(j.tag == "DigitalSignature"):
subs = list(j)
for k in list(j):
digSigList.append(k.tag + " : " + (k.text or "[]"))
separator = " | "
row.append(separator.join(digSigList).encode("utf-8"))
# Special case for nested Stream data within FileItem audit results
elif((elem.find(i).tag == "StreamList") and (elem.tag == "FileItem")):
streamList = []
for j in elem.find(i).iter():
if(j.tag == "Stream"):
subs = list(j)
for k in list(j):
streamList.append(k.tag + " : " + (k.text or "[]"))
separator = " | "
row.append(separator.join(streamList).encode("utf-8"))
# Special case for nested network config data within System audit results
elif((elem.find(i).tag == "networkArray") and (elem.tag == "SystemInfoItem")):
networkAdapters = []
for j in elem.find(i).iter():
subs = list(j)
for k in list(j):
networkAdapters.append(k.tag + " : " + (k.text or "[]"))
separator = " | "
row.append(separator.join(networkAdapters))
# Special case for nested grouplist within UserItem audit results
elif((elem.find(i).tag == "grouplist") and (elem.tag == "UserItem")):
groupList = []
for j in elem.find(i).iter(tag="groupname"):
groupList.append(j.text)
separator = " | "
row.append(separator.join(groupList))
# Special case for nested RecordData within DNS Cache audit results
elif((elem.find(i).tag == "RecordData") and (elem.tag == "DnsEntryItem")):
recordList = []
for j in elem.find(i).iter():
if(j.tag != "RecordData"): recordList.append(j.tag + " : " + (j.text or ""))
separator = " | "
row.append(separator.join(recordList))
# Special case for nested DigSig data within HookItem audit results
elif((elem.find(i).tag == "DigitalSignatureHooking" or elem.find(i).tag =="DigitalSignatureHooked") and (elem.tag == "HookItem")):
digSigList = []
for j in elem.find(i).iter():
if(j.tag != "DigitalSignatureHooking" and j.tag != "DigitalSignatureHooked"): digSigList.append(j.tag + " : " + (j.text or ""))
separator = " | "
row.append(separator.join(digSigList))
# Special case for nested ActionList within Task audit results
elif((elem.find(i).tag == "ActionList") and (elem.tag == "TaskItem")):
actionList = []
for j in elem.find(i).iter():
if(j.tag != "Action" and j.tag != "ActionList"): actionList.append(j.tag + " : " + (j.text or ""))
separator = " | "
row.append(separator.join(actionList))
elif((elem.find(i).tag == "message") and (elem.tag == "EventLogItem")):
if elem.find(i).text is not None:
strippedMessage = elem.find(i).text.replace('\r\n', ' ')
strippedMessage = strippedMessage.replace('\t',' ')
strippedMessage = strippedMessage.replace('\n', ' ')
row.append(strippedMessage.encode("utf-8"))
# For all other non-nested elements
else:
rowData = elem.find(i).text or ""
row.append(rowData.encode("utf-8"))
# Write an empty string for empty elements
else:
row.append("")
# Commit row to tab-delim file
writer.writerow(row)
if(doTimeline) and (currentAudit == "FileItem") or \
((currentAudit == "RegistryItem") and (startTime <= elem.find("Modified").text) and (endTime >= elem.find("Modified").text)) or \
((currentAudit == "RegistryItem") and (startTime <= elem.find("Modified").text) and (endTime >= elem.find("Modified").text)) or \
((currentAudit == "EventLogItem") and (startTime <= elem.find("genTime").text) and (endTime >= elem.find("genTime").text)) or \
((currentAudit == "UrlHistoryItem") and (startTime <= elem.find("LastVisitDate").text) and (endTime >= elem.find("LastVisitDate").text)) or \
((currentAudit == "ProcessItem") and (elem.find("startTime") is not None and startTime <= elem.find("startTime").text) and (endTime >= elem.find("startTime").text)):
buildTimeline(elem)
rowCount += 1
# Free up memory by clearing no-longer needed XML element
elem.clear()
outHandle.close()
# Helper function to parse persistence audits, which require a different approach due to schema
def parsePersistence(inFile,outFile):
outHandle = open(outFile,'wb')
writer = csv.writer(outHandle, dialect=csv.excel_tab)
# Write header row
writer.writerow(printHeaders(['PersistenceItem']))
# Iterate through each top-level XML element
tree = etree.parse(inFile)
for subItem in tree.iter("PersistenceItem"):
row = []
for columnName in d['PersistenceItem']:
if(subItem.find(columnName) is not None):
rowData = subItem.find(columnName).text or ""
row.append(rowData.encode("utf-8"))
else: row.append("")
# Hack to reduce and simplify schema for output file. MD5 and digital signature information
# for Service ImagePaths and Service DLLs is "collapsed" into the existing columns for other
# Persistence items. Hooray for nested XML.
if((row[0]=="ServiceDll") and (subItem.find("serviceDLLSignatureExists") is not None)):
row[15] = subItem.find("serviceDLLSignatureExists").text
if((row[0]=="ServiceDll") and (subItem.find("serviceDLLSignatureExists") is not None)):
row[16] = subItem.find("serviceDLLSignatureVerified").text
if((row[0]=="ServiceDll") and (subItem.find("serviceDLLSignatureExists") is not None)):
row[17] = subItem.find("serviceDLLSignatureDescription").text
if((row[0]=="ServiceDll") and (subItem.find("serviceDLLSignatureExists") is not None)):
row[18] = subItem.find("serviceDLLCertificateSubject").text
if((row[0]=="ServiceDll") and (subItem.find("serviceDLLSignatureExists") is not None)):
row[19] = subItem.find("serviceDLLCertificateIssuer").text
if((row[0]=="ServiceDll") and (subItem.find("serviceDLLmd5sum") is not None)):
row[20] = subItem.find("serviceDLLmd5sum").text
if((row[0]=="Service") and (subItem.find("pathSignatureExists") is not None)):
row[15] = subItem.find("pathSignatureExists").text
if((row[0]=="Service") and (subItem.find("pathSignatureVerified") is not None)):
row[16] = subItem.find("pathSignatureVerified").text
if((row[0]=="Service") and (subItem.find("pathSignatureDescription") is not None)):
row[17] = subItem.find("pathSignatureDescription").text
if((row[0]=="Service") and (subItem.find("pathCertificateSubject") is not None)):
row[18] = subItem.find("pathCertificateSubject").text
if((row[0]=="Service") and (subItem.find("pathCertificateIssuer") is not None)):
row[19] = subItem.find("pathCertificateIssuer").text
if((row[0]=="Service") and (row[1].find("ServiceDll") < 0) and (subItem.find("pathmd5sum") is not None)):
row[20] = subItem.find("pathmd5sum").text
# Fix errant unicode after substituting
for i, rowValue in enumerate(row):
if rowValue is not None:
row[i] = rowValue.encode("utf-8")
writer.writerow(row)
outHandle.close()
# Helper function to parse prefetch audits, which require a different approach due to schema
def parsePrefetch(inFile,outFile):
outHandle = open(outFile,'wb')
writer = csv.writer(outHandle, dialect=csv.excel_tab)
# Write header row
writer.writerow(printHeaders(['PrefetchItem']))
# Iterate through each top-level XML element
tree = etree.parse(inFile)
for subItem in tree.iter("PrefetchItem"):
row = []
for columnName in d['PrefetchItem']:
if(subItem.find(columnName) is not None):
rowData = subItem.find(columnName).text or ""
row.append(rowData.encode("utf-8"))
else: row.append("")
writer.writerow(row)
# Add to timeline if option enabled and LastRun or Created within range
if doTimeline and subItem.find("LastRun").text is not None and subItem.find("Created").text is not None \
and ((startTime <= subItem.find("LastRun").text) and (endTime >= subItem.find("LastRun").text)) or \
((startTime <= subItem.find("Created").text) and (endTime >= subItem.find("Created").text)):
buildTimeline(subItem)
outHandle.close()
# Build a timeline object from a parsed element
def buildTimeline(elem):
# Case 1: File item timeline object
if(elem.tag == "FileItem"):
timeFields = ['Created', 'Modified', 'Accessed', 'Changed', 'FilenameCreated', 'FilenameModified', 'FilenameAccessed', 'FilenameChanged']
for field in timeFields:
if(elem.find(field) is not None):
timelineData.append(timelineEntry(elem.find(field).text, elem.tag, "FullPath", elem.find("FullPath").text.encode("utf-8")))
timelineData[-1].addTimeDesc(field)
if elem.find("Md5sum") is not None:
timelineData[-1].addEntry("MD5sum",elem.find("Md5sum").text)
if elem.find("Username") is not None:
timelineData[-1].addUser(elem.find("Username").text.encode("utf-8"))
# Case 2: Registry item timeline object
elif(elem.tag == "RegistryItem"):
timelineData.append(timelineEntry(elem.find("Modified").text, elem.tag, "Path", elem.find("Path").text.encode("utf-8")))
timelineData[-1].addTimeDesc("Modified")
if (elem.find("Text") is not None) and (elem.find("Text").text is not None):
timelineData[-1].addEntry("Text",elem.find("Text").text.encode("utf-8"))
if elem.find("Username") is not None:
timelineData[-1].addUser(elem.find("Username").text.encode("utf-8"))
# Case 3: Event log item timeline object
elif(elem.tag == "EventLogItem"):
if elem.find("message") is not None:
strippedMessage = elem.find("message").text.replace('\r\n', ' ')
strippedMessage = strippedMessage.replace('\t',' ')
strippedMessage = strippedMessage.replace('\n', ' ')
timelineData.append(timelineEntry(elem.find("genTime").text, elem.tag, "Message", strippedMessage.encode("utf-8")))
else: timelineData.append(timelineEntry(elem.find("genTime").text, elem.tag, "Message",""))
timelineData[-1].addEntry("Log",elem.find("log").text)
timelineData[-1].addTimeDesc("genTime")
if elem.find("user") is not None:
timelineData[-1].addUser(elem.find("user").text)
# Case 4: URL History timeline object
elif(elem.tag == "UrlHistoryItem"):
timelineData.append(timelineEntry(elem.find("LastVisitDate").text, elem.tag, "URL", elem.find("URL").text.encode("utf-8")))
timelineData[-1].addTimeDesc("LastVisitDate")
timelineData[-1].addUser(elem.find("Username").text.encode("utf-8"))
# Case 5: Process item timeline object
elif(elem.tag == "ProcessItem") and (elem.find("path").text is not None):
fullPath = elem.find("path").text+"\\"+elem.find("name").text
timelineData.append(timelineEntry(elem.find("startTime").text, elem.tag, "FullPath", fullPath.encode("utf-8")))
timelineData[-1].addTimeDesc("startTime")
timelineData[-1].addEntry("pid",elem.find("pid").text)
timelineData[-1].addUser(elem.find("Username").text.encode("utf-8"))
elif(elem.tag == "PrefetchItem") and (elem.find("LastRun") is not None) and (elem.find("Created") is not None) and (elem.find("ApplicationFullPath") is not None):
#Need to check whether LastRun or Created
if(elem.find("LastRun").text > startTime) and (elem.find("LastRun").text < endTime):
timelineData.append(timelineEntry(elem.find("LastRun").text, elem.tag, "ApplicationFullPath", elem.find("ApplicationFullPath").text))
timelineData[-1].addTimeDesc("LastRun")
timelineData[-1].addEntry("FullPath", elem.find("FullPath").text)
if(elem.find("Created").text >= startTime) and (elem.find("Created").text <= endTime):
timelineData.append(timelineEntry(elem.find("Created").text, elem.tag, "ApplicationFullPath", elem.find("ApplicationFullPath").text))
timelineData[-1].addTimeDesc("Created")
timelineData[-1].addEntry("FullPath", elem.find("FullPath").text)
# Prints timeline to tab delimited text
def printTimeline(timelineFile):
timelineFileHandle = open(timelineFile,'wb')
# Sort timeline data on primary date object
timelineData.sort(key=lambda r: r.timeObject)
# Output header row
writer = csv.writer(timelineFileHandle, dialect=csv.excel_tab)
headerRow = ["Timestamp", "Time Desc", "RowType", "User", "EntryDesc", "EntryData", "Entry2Desc", "Entry2Data"]
writer.writerow(headerRow)
# Print each timeline object that is within start and end time ranges
for i in timelineData:
if (i.timeObject >= datetime.strptime(startTime, "%Y-%m-%dT%H:%M:%SZ")) and (i.timeObject <= datetime.strptime(endTime, "%Y-%m-%dT%H:%M:%SZ")):
writer.writerow(i.getTimelineRow())
timelineFileHandle.close()
def main():
# Handle arguments
parser = OptionParser()
parser.add_option("-i", "--input", help="XML input directory (req). NO TRAILING SLASHES", action="store", type="string", dest="inPath")
parser.add_option("-o", "--output", help="Output directory (req). NO TRAILING SLASHES", action="store", type="string", dest="outPath")
parser.add_option("-t", "--timeline", help="Build timeline, requires --starttime and --endtime", action="store_true", dest="doTimeline")
parser.add_option("--starttime", help="Start time, format yyyy-mm-ddThh:mm:ssZ", action="store", type="string", dest="startTime")
parser.add_option("--endtime", help="End time, format yyyy-mm-ddThh:mm:ssZ", action="store", type="string", dest="endTime")
(options, args) = parser.parse_args()
if(len(sys.argv) < 3) or (not options.inPath) or (not options.outPath):
parser.print_help()
sys.exit(-1)
global startTime
global endTime
global doTimeline
inPath = options.inPath
outPath = options.outPath
doTimeline = options.doTimeline or False
startTime = options.startTime
endTime = options.endTime
# Ensure user supplies time ranges for timeline option
if options.doTimeline and (not options.startTime or not options.endTime):
print "Timeline option requires --starttime and --endtime\n"
parser.print_help()
sys.exit(-1)
# Normalize input paths
if not inPath.endswith(os.path.sep):
inPath += os.path.sep
if not outPath.endswith(os.path.sep):
outPath += os.path.sep
# Iterate through and parse each input file
for filename in os.listdir(inPath):
#Simple match on filename to avoid having to open and parse initial XML to determine doctype
if (filename.find("issues") is -1) and (filename.find(".xml") is not -1) and (filename.find("BatchResults") is -1):
inFile = inPath + filename
outFile = outPath + filename+".txt"
# Parse XML into delimited text
print "Parsing input file: " + inFile
if (filename.find("persistence") > 0): parsePersistence(inFile, outFile)
elif (filename.find("prefetch") > 0): parsePrefetch(inFile, outFile)
else: parseXML(inFile,outFile)
#else: print "No more input XML files to parse!"
# Output timeline (if option enabled) once we're done processing
if(doTimeline):
print "Outputting timeline: " + outPath + "timeline.txt"
printTimeline(outPath+"timeline.txt")
if __name__ == "__main__":
main() | {
"repo_name": "mandiant/AuditParser",
"path": "AuditParser.py",
"copies": "2",
"size": "22433",
"license": "apache-2.0",
"hash": 1484268054679490800,
"line_mean": 45.5435684647,
"line_max": 460,
"alpha_frac": 0.6904560246,
"autogenerated": false,
"ratio": 3.346710428166493,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5037166452766493,
"avg_score": null,
"num_lines": null
} |
# audit_payload.py
#
# Writes logs of jobs starting and stopping based on ClassAd updates.
#
# This works automatically in a condor-ce if worker nodes have the environment
# variable CONDORCE_COLLECTOR_HOST set to point to their site's condor-ce
# server, if the jobs are run with condor glideins such as with GlideinWMS.
#
# Otherwise a job should use
# condor_advertise -pool $CONDORCE_COLLECTOR_HOST UPDATE_STARTD_AD
# at the start of the job and
# condor_advertise -pool $CONDORCE_COLLECTOR_HOST INVALIDATE_STARTD_ADS
# at the end of the job, sending to both comannds' standard input at least
# these variables with format var = value, string values double-quoted:
# Name (string) - name identifying the worker node, typically in the
# format user@fully.qualified.domain.name
# SlotID (integer) - slot identifier number on the worker node
# MyType (string) - required to be set to the value of "Machine"
# The condor_advertise at the begining of the message must also contain
# GlobalJobId (string) - a globally unique identifier of the job
# RemoteOwner (string) - a string identifying the owner of the job
# and if the beginning of the message contains any of these they will
# also be logged:
# ClientMachine (string)
# ProjectName (string)
# Group (string)
# x509UserProxyVOName (string)
# x509userproxysubject (string)
# x509UserProxyFQAN (string)
# x509UserProxyEmail (string)
#
# There is one condor-ce configuration variable AUDIT_PAYLOAD_MAX_HOURS,
# which is optional and indicates the maximum number of hours any job
# is expected to run, default 72 hours (3 days). After that time the
# jobs will stop being tracked, in case a stop message was missed.
#
# Written by Dave Dykstra, June 2017
#
import htcondor
import time
import re
from collections import OrderedDict
# Dictionary containing all tracked running jobs.
# Each entry is for a 'master', which is either a pilot job/glidein or
# individual job.
# The index of the dictionary is a tuple of (mastername, slotid).
# The contents of each entry is a tuple of (starttime, jobs), where
# jobs is a dictionary of individual job names running in that master
# and each entry has a value of the GlobalJobId of that job.
runningmasters = OrderedDict()
if 'AUDIT_PAYLOAD_MAX_HOURS' in htcondor.param:
maxjobhours = int(htcondor.param['AUDIT_PAYLOAD_MAX_HOURS'])
else:
maxjobhours = 3 * 24
htcondor.log(htcondor.LogLevel.Audit,
"Audit payload maximum job hours: %d" % maxjobhours)
maxjobsecs = maxjobhours * 60 * 60
# a job may be being stopped
def stopjob(info):
global runningmasters
if 'Name' not in info or 'SlotID' not in info:
return
name = info['Name']
matchre = ""
if 'GLIDEIN_MASTER_NAME' in info:
idxname = info['GLIDEIN_MASTER_NAME']
if idxname == name:
# stop all jobs under this master
matchre = '.*'
else:
# names of form "slotN@" stop that name and all "slotN_M@" names
slotn = re.sub('^(slot[0-9]*)@.*', r'\1', name)
if slotn != name:
# match any name starting with slotN@ or slotN_
matchre = '^' + slotn + '[@_]'
# else take the default of matching only one name
else:
idxname = name
idx = (idxname, info['SlotID'])
if idx not in runningmasters:
return
runningjobs = runningmasters[idx][1]
if matchre == "":
# no match expression, just stop one
if name not in runningjobs:
return
stopjobnames = [name]
else:
# select all jobs in this master
stopjobnames = runningjobs.keys()
if matchre != '.*':
# restrict to the matching regular expression
regex = re.compile(matchre)
stopjobnames = filter(regex.search, stopjobnames)
for stopjobname in stopjobnames:
loginfo = {}
loginfo['Name'] = stopjobname
loginfo['SlotID'] = info['SlotID']
loginfo['GlobalJobId'] = runningjobs[stopjobname]
htcondor.log(htcondor.LogLevel.Audit, "Job stop: %s" % loginfo)
del runningjobs[stopjobname]
if len(runningjobs) == 0:
del runningmasters[idx]
# a job may be being started
def startjob(info):
global maxjobsecs
global runningmasters
if 'Name' not in info or 'SlotID' not in info or 'GlobalJobId' not in info:
return
name = info['Name']
if 'GLIDEIN_MASTER_NAME' in info:
# Glidein may be partitioned and sometimes tear down all contained
# slots at once, so need to track those slots together
idxname = info['GLIDEIN_MASTER_NAME']
else:
idxname = name
idx = (idxname, info['SlotID'])
globaljobid = info['GlobalJobId']
now = 0
if idx in runningmasters:
thismaster = runningmasters[idx]
runningjobs = thismaster[1]
if name in runningjobs:
if globaljobid == runningjobs[name]:
# just an update to a running job, ignore
return
# first stop the existing job, the slot is being reused
stopjob(info)
# this may have removed the last job in thismaster, check again
if idx not in runningmasters:
# new master
now = time.time()
thismaster = (now, {})
runningmasters[idx] = thismaster
# add job to this master
thismaster[1][name] = globaljobid
printinfo = {}
keys = ['Name', 'SlotID', 'GlobalJobId',
'RemoteOwner', 'ClientMachine', 'ProjectName', 'Group',
'x509UserProxyVOName', 'x509userproxysubject', 'x509UserProxyEmail']
for key in keys:
if key in info:
printinfo[key] = info[key]
htcondor.log(htcondor.LogLevel.Audit, "Job start: %s" % printinfo)
if now == 0:
return
# also look for expired jobs at the beginning of the list and stop them
for idx in runningmasters:
thismaster = runningmasters[idx]
deltasecs = int(now - thismaster[0])
if deltasecs <= maxjobsecs:
break
loginfo = {}
loginfo['SlotID'] = idx[1]
runningjobs = thismaster[1]
for jobname in runningjobs:
loginfo['Name'] = jobname
loginfo['GlobalJobId'] = runningjobs[jobname]
htcondor.log(htcondor.LogLevel.Audit,
"Cleaning up %d-second expired job: %s" % (deltasecs, loginfo))
del runningmasters[idx]
# this is the primary entry point called by the API
def update(command, ad):
if command != "UPDATE_STARTD_AD":
return
if ad.get('State') == 'Unclaimed':
stopjob(ad) # stop previous job on this slot if any
return
startjob(ad)
# this can also be called from the API when a job or slot is deleted
def invalidate(command, ad):
if command != "INVALIDATE_STARTD_ADS":
return
stopjob(ad)
| {
"repo_name": "djw8605/htcondor-ce",
"path": "src/audit_payloads.py",
"copies": "4",
"size": "6954",
"license": "apache-2.0",
"hash": -6933556926483869000,
"line_mean": 35.4083769634,
"line_max": 80,
"alpha_frac": 0.6463905666,
"autogenerated": false,
"ratio": 3.665788086452293,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6312178653052293,
"avg_score": null,
"num_lines": null
} |
# audit.py
# Code to implement Bayes post-election audit
# Ronald L. Rivest and Emily Shen
# June 23, 2012
"""
----------------------------------------------------------------------
This code available under "MIT License" (open source).
Copyright (C) 2012 Ronald L. Rivest and Emily Shen.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
----------------------------------------------------------------------
"""
# See notes.txt for documentation
import hashlib
import json
import string
import os
import random
import sys
import time
import bayes
import sampler
indent = " "
def printstr(s):
""" Print string s and a following newline.
(Will be adjusted later to actually save a copy to log file as well.)
"""
print s
def printvarvalue(varname,value):
""" Print a variable name and its value. """
printstr("--- " + varname + ":")
printstr(indent + str(value))
usage = """
Usage: audit.py command parameters
command is one of: help set shuffle audit
try 'audit.py help' for more help
'audit.py help command' for help on specific command
"""
def main():
if len(sys.argv)==1:
printstr(usage)
quit()
printstr("--- Bayes Post-Election Audit Utility (version 2012-06-04 by R.L. Rivest and E. Shen)")
printstr("--- Start: "+time.asctime())
printstr("--- Command:")
printstr(indent+string.join(sys.argv[1:]," "))
command = sys.argv[1]
if command=="set":
set_var()
elif command=="shuffle":
shuffle()
elif command=="audit":
audit()
elif command=="help":
help(sys.argv[2:])
else:
print "--- Error: unrecognized command:",sys.argv
printstr("--- Done: "+time.asctime())
def hash_file(filename):
""" Return SHA-256 of contents of file with given filename. """
if not os.path.exists(filename):
printstr("Error: file does not exist:"+filename)
return 0
file = open(filename,"r")
data = file.read()
file.close()
return hashlib.sha256(data).hexdigest()
help_txt = dict()
help_txt[""] = """
--- Available commands for audit.py:
--- help
--- help command
--- set dirname varname value
--- shuffle dirname
--- audit dirname
"""
help_txt["help"] = """
--- Command: help command
--- where command is one of: set shuffle audit
"""
help_txt["set"] = """
--- Command: set dirname varname value
--- where dirname is directory for desired contest
--- where varname is one of seed or audit_type
--- where value is arbitrary string if varname is seed (random number seed)
--- where value is one of N or P or NP if varname is audit_type
--- File with name varname.js created in the given directory
"""
help_txt["shuffle"] = """
--- Command: shuffle dirname
--- where dirname is directory for desired contest
--- Assumes that directory contains file reported.js
--- Creates new file with name shuffle.js that is shuffle of reported.js
--- Uses given random number seed from seed.js (produced with set)
--- Removes reported choices and replaces them with null string choices
--- shuffle.js can be renamed as actual.js and then filled in with actual choices
--- as auditing proceeds
"""
help_txt["audit"] = """
--- Command: audit dirname
--- where dirname is directory for desired contest
--- assumes directory contest contains reported.js and actual.js
--- file audit_type.js may optionally be present
--- Performs bayes audit of given contest, printing out for
--- each alternative an upper bound on probability that it is winner.
"""
def help(command_list):
""" Print generic help or help for specific command.
help -- print generic help
help command -- print help for given command
"""
if command_list==[]: # i.e. print generic help
command = ""
else:
command = command_list[0]
printstr(help_txt[command])
def set_var():
""" audit.py set dirname varname value
Create a file varname.js in the current directory
and set contents to be "value" .
As of now, only allowed varnames are "seed" and "audit_type"
"""
allowed_varnames = [ "seed", "audit_type" ]
if not len(sys.argv)==5:
printstr("--- Error: incorrect number of arguments for set:"+str(len(sys.argv)-1))
printstr("--- Usage: audit.py set dirname varname value")
return
dirname = os.path.realpath(sys.argv[2])
varname = sys.argv[3]
value = sys.argv[4]
if not os.path.isdir(dirname):
printstr("--- Error: first parameter not an existing directory:"+dirname)
printstr("--- Usage: audit.py set dirname varname value")
return
contest = os.path.basename(dirname)
printvarvalue("Contest",contest)
printvarvalue("Contest directory",dirname)
if varname not in allowed_varnames:
printstr("--- Error: only the following varnames may be set: "+str(allowed_varnames))
return
printstr("--- Setting value for `%s' for contest `%s'"%(varname,contest))
if varname=="audit_type" and value not in ["N","P","NP"]:
printstr("""--- Error: value for audit_type must be one of N, P, or NP """)
return
filename = os.path.join(dirname,varname+".js")
printvarvalue("Writing value to file with name",filename)
file = open(filename,"w")
value_data = json.dumps(value)
file.write(value_data+"\n")
printvarvalue("New value",value_data)
def shuffle():
"""
audit.py shuffle dirname
Produce an audit order for this audit.
Assumes that seed.js has been set, e.g. by a command
of the form "set seed 3544523"
"""
if not len(sys.argv)==3:
printstr("--- Error: incorrect number of arguments for shuffle:"+str(len(sys.argv)-1))
printstr("--- Usage: audit.py set dirname varname value")
return
dirname = os.path.realpath(sys.argv[2])
if not os.path.isdir(dirname):
printstr("--- Error: not an existing directory:"+dirname)
printstr("--- Usage: audit.py shuffle dirname")
return
contest = os.path.basename(dirname)
printvarvalue("Contest",contest)
printvarvalue("Contest directory",dirname)
seed_filename = os.path.join(dirname,"seed.js")
seed_file = open(seed_filename,"r")
seed = json.load(seed_file)
printvarvalue("Seed",seed)
reported_filename = os.path.join(dirname,"reported.js")
reported_file = open(reported_filename,"r")
reported = json.load(reported_file)
n = len(reported)
printvarvalue("Number of reported ballots",n)
skip, sample = sampler.generate_outputs(n,False,0,n-1,seed,0)
shuffled_filename = os.path.join(dirname,"shuffled.js")
shuffled_file = open(shuffled_filename,"w")
ids = sorted(reported.keys())
shuffled_ids = [ ids[sample[i]] for i in xrange(len(sample)) ]
shuffled_file.write("{\n")
for i,id in enumerate(shuffled_ids):
shuffled_file.write(' "' + str(id) + '": ""')
if i+1<len(shuffled_ids):
shuffled_file.write(",")
shuffled_file.write("\n")
shuffled_file.write("}\n")
shuffled_file.close()
printvarvalue("Filename for shuffled file written, and hash of shuffled file",shuffled_filename)
printstr(indent+"hash:"+hash_file(shuffled_filename))
def audit():
"""
audit.py audit dirname
"""
dirname = os.path.realpath(sys.argv[2])
if not os.path.isdir(dirname):
printstr("--- Error: not an existing directory:"+dirname)
printstr("--- Usage: audit.py audit dirname")
return
contest = os.path.basename(dirname)
printvarvalue("Contest",contest)
printvarvalue("Contest directory",dirname)
actual_filename = "actual.js"
full_actual_filename = os.path.join(dirname,actual_filename)
printvarvalue("Filename for Actual Ballots and Hash of Actual Ballots File",actual_filename)
printstr(indent+"hash:"+hash_file(full_actual_filename))
actual_file = open(full_actual_filename,"r")
actual = json.load(actual_file)
printvarvalue("Number of actual ballots",len(actual))
distinct_actual_choices = sorted(set(actual.values()))
printstr("--- Distinct actual choices (alphabetical order):")
for choice in distinct_actual_choices:
if choice == "":
printstr(indent+'"" (no choice given)')
else:
printstr(indent+choice)
reported_filename = "reported.js"
full_reported_filename = os.path.join(dirname,reported_filename)
if os.path.exists(full_reported_filename):
printvarvalue("Filename for Reported Ballots and Hash of Reported Ballots File",reported_filename)
printstr(indent+"hash:"+hash_file(full_reported_filename))
reported_file = open(full_reported_filename,"r")
reported = json.load(reported_file)
printvarvalue("Number of reported ballots",len(reported))
printstr("--- Both actual and reported ballot types available, so audit will be a `comparison' audit.")
ballot_polling = False
distinct_reported_choices = sorted(set(reported.values()))
printstr("--- Distinct reported choices (alphabetical order):")
for choice in distinct_reported_choices:
printstr(indent+choice)
all_choices = sorted(set(distinct_actual_choices).union(set(distinct_reported_choices)))
# check that actual choices are also reported choices?
for choice in distinct_actual_choices:
if choice != "" and choice not in distinct_reported_choices:
printstr("--- Warning:")
printstr(' actual choice "%s" not in reported choices; possible typo?'%choice)
printstr(" (no need to do anything if this is not a typo...)")
# check that no ballots are added in actual
for id in actual.keys():
if not id in reported:
if id == '':
id = '""'
printstr("--- Warning:")
printstr(' Actual ballot id "%s" not in reported ballot ids!'%id)
printstr(" (This ballot will be ignored in this audit.)")
else:
printstr( "--- No file of reported ballots (%s) given."%reported_filename)
printstr("--- Audit will therefore be a `ballot-polling' audit.")
ballot_polling = True
all_choices = distinct_actual_choices
# printstr("--- All choices (alphabetical order):")
# for choice in all_choices:
# printstr(indent+choice)
# set seed for package random from seed.js
seed_filename = os.path.join(dirname,"seed.js")
if not os.path.exists(seed_filename):
printstr("Error: seed file doesn't exist at filename:"+seed_filename)
seed_file = open(seed_filename,"r")
seed = json.load(seed_file)
random.seed(seed)
if not ballot_polling:
audited_ids = set([ id for id in actual if id in reported and actual[id] != "" ])
unaudited_ids = set([ id for id in reported if id not in audited_ids ])
else:
audited_ids = set([ id for id in actual if actual[id] != "" ])
unaudited_ids = set([ id for id in actual if id not in audited_ids ])
printvarvalue("Number of audited ballots",len(audited_ids))
printvarvalue("Number of unaudited ballots",len(unaudited_ids))
# give indices to all bids in audit
i_to_id = dict()
id_to_i = dict()
i = 1
for id in sorted(audited_ids):
i_to_id[i] = id
id_to_i[id] = i
i += 1
for id in sorted(unaudited_ids):
i_to_id[i] = id
id_to_i[id] = i
i += 1
# give indices to all choices
j_to_choice = dict()
choice_to_j = dict()
j = 1
for choice in all_choices:
j_to_choice[j] = choice
choice_to_j[choice] = j
j = j + 1
# now create r and a arrays
dummy = -9
t = len(all_choices)
s = len(audited_ids)
n = len(audited_ids)+len(unaudited_ids)
# printvarvalue("n",n)
# printvarvalue("s",s)
# printvarvalue("t",t)
r = [ dummy ]
a = [ dummy ]
if not ballot_polling:
count = [dummy]+[[dummy] + [0]*t for k in range(t+1)]
for i in range(1,s+1):
j = choice_to_j[reported[i_to_id[i]]]
k = choice_to_j[actual[i_to_id[i]]]
r.append(j)
a.append(k)
count[j][k] += 1
for i in range(s+1,n+1):
j = choice_to_j[reported[i_to_id[i]]]
k = 1 # doesn't matter
r.append(j)
a.append(k)
# printvarvalue("r",r)
# printvarvalue("a",a)
else: # ballot_polling
count = [dummy] + [0]*t
for i in range(1,s+1):
k = choice_to_j[actual[i_to_id[i]]]
r.append(1) # fixed value
a.append(k)
count[k] += 1
for i in range(s+1,n+1):
j = choice_to_j[actual[i_to_id[i]]]
k = 1 # doesn't matter
r.append(j)
a.append(k)
f = bayes.f_plurality
audit_type = "NP"
audit_type_filename = os.path.join(dirname,"audit_type.js")
if os.path.exists(audit_type_filename):
audit_type_file=open(audit_type_filename,"r")
audit_type=json.load(audit_type_file)
if audit_type not in ["P","NP","N"]:
printstr("Error: audit_type.js contains illegal audit.type,"+audit_type)
printstr(' (assumed to be "NP")')
audit_type = "NP"
print "--- Audit type:"
if ballot_polling:
printstr(" ballot-polling audit")
else:
printstr(" comparison audit")
printstr(" %s-type"%audit_type)
if audit_type == "P":
printstr(" "+"(Partisan priors)")
elif audit_type == "N":
printstr(indent+"(Nonpartisan prior)")
else:
printstr(indent+ "(Nonpartisan prior and also Partisan priors)")
prior_list = bayes.make_prior_list(audit_type,t,ballot_polling)
# print prior
if not ballot_polling:
# print out reported winner
reported_winner = j_to_choice[f(bayes.tally(r,t))]
printvarvalue("Reported winner",reported_winner)
# print out win probabilities (assumes plurality voting)
max_wins = dict()
max_u = 0
for prior in prior_list:
wins,u,z = bayes.win_probs(r,a,t,s,n,count,ballot_polling,f,prior)
for j in wins.keys():
max_wins[j] = max(wins[j],max_wins.get(j,0))
max_u = max(max_u,u)
L = sorted([(max_wins[j],j_to_choice[j]) for j in max_wins.keys()],reverse=True)
print "--- Estimated maximum winning probabilities:"
for (maxp,c) in L:
printstr(indent+"%6.4f"%maxp+" "+str(c))
if ballot_polling:
printstr("--- Estimated maximum probability that actual winner is not "+str(L[0][1])+":")
else:
printstr("--- Estimated maximum probability that actual winner is not "+str(reported_winner)+":")
printstr(indent+str(max_u))
def status():
"""
audit.py status dirname
Give a status report on the named contest
Include a note of any discrepancies between actual and reported.
"""
# TBD
pass
# import cProfile
# cProfile.run("main()")
main()
| {
"repo_name": "ron-rivest/2017-bayes-audit",
"path": "from-2012-bayes-audit/audit.py",
"copies": "1",
"size": "16272",
"license": "mit",
"hash": 4332667874857870300,
"line_mean": 35,
"line_max": 111,
"alpha_frac": 0.6198992134,
"autogenerated": false,
"ratio": 3.647612642905178,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47675118563051777,
"avg_score": null,
"num_lines": null
} |
# audit.py
# Ronald L. Rivest (with Karim Husayn Karimi)
# July 18, 2017
# python3
"""
Routines to work with multi.py on post-election audits.
"""
import os
import time
import multi
import csv_readers
import ids
import outcomes
import planner
import risk_bayes
import saved_state
import utils
##############################################################################
# Random number generation
##############################################################################
# see numpy.random.RandomState documentation and utils.RandomState
# Random states used in this program:
# auditRandomState -- controls random sampling and other audit aspects
##############################################################################
# Audit I/O and validation
##############################################################################
def draw_sample(e):
"""
"Draw sample", tally it, save sample tally in
e.sn_tcpra[stage_time][cid][pbcid].
Update e.sn_tcpr
Draw sample is in quotes since it just looks at the first
e.sn_tp[stage_time][pbcid]
elements of
e.av_cpb[cid][pbcid].
Code sets
e.sn_tcpr[e.stage_time][cid][pbcid][r]
to number of votes in sample with reported vote r.
Code sets
e.sn_tp
to number of ballots sampled in each pbc (equal to plan).
Note that in real life actual sampling number might be different than planned;
here it will be the same. But code elsewhere allows for such differences.
"""
if "plan_tp" in e.saved_state:
e.sn_tp[e.stage_time] = e.saved_state["plan_tp"][e.saved_state["stage_time"]]
else:
e.sn_tp[e.stage_time] = { pbcid: int(e.max_audit_rate_p[pbcid])
for pbcid in e.pbcids }
e.sn_tcpr[e.stage_time] = {}
for cid in e.cids:
e.sn_tcpra[e.stage_time][cid] = {}
e.sn_tcpr[e.stage_time][cid] = {}
# Use "sorted" in next line to preserve deterministic operation.
for pbcid in sorted(e.possible_pbcid_c[cid]):
e.sn_tcpr[e.stage_time][cid][pbcid] = {}
sample_size = int(e.sn_tp[e.stage_time][pbcid])
sample_bids = e.bids_p[pbcid][:sample_size]
avs = []
rvs = []
for bid in sample_bids:
# actual
if bid in e.av_cpb[cid][pbcid]:
avs.append(e.av_cpb[cid][pbcid][bid])
else:
avs.append(("-NoSuchContest",))
# reported
if bid in e.rv_cpb[cid][pbcid]:
rvs.append(e.rv_cpb[cid][pbcid][bid])
else:
rvs.append(("-NoSuchContest",))
arvs = list(zip(avs, rvs)) # list of (actual, reported) vote pairs
e.sn_tcpra[e.stage_time][cid][pbcid] = outcomes.compute_tally2(arvs)
for r in e.rn_cpr[cid][pbcid]:
e.sn_tcpr[e.stage_time][cid][pbcid][r] = len(
[rr for rr in rvs if rr == r])
def show_sample_counts(e):
utils.myprint(" Total sample counts by Contest.PaperBallotCollection[reported selection]"
"and actual selection:")
for cid in e.cids:
for pbcid in sorted(e.possible_pbcid_c[cid]):
tally2 = e.sn_tcpra[e.stage_time][cid][pbcid]
for r in sorted(tally2.keys()): # r = reported vote
utils.myprint(" {}.{}[{}]".format(cid, pbcid, r), end='')
for a in sorted(tally2[r].keys()):
utils.myprint(" {}:{}".format(a, tally2[r][a]), end='')
utils.myprint(" total:{}".format(e.sn_tcpr[e.stage_time][cid][pbcid][r]))
##############################################################################
# Compute status of each contest and of election
def compute_statuses(e):
"""
Compute status of each measurement and of election, from
already-computed measurement risks.
"""
for mid in e.mids:
# Measurement transition from Open to any of
# Exhausted, Passed, or Upset, but not vice versa.
e.status_tm[e.stage_time][mid] = \
e.saved_state["status_tm"][e.saved_state["stage_time"]][mid]
if e.status_tm[e.stage_time][mid] == "Open":
if all([e.rn_p[pbcid] == e.sn_tp[e.stage_time][pbcid]
for cid in e.possible_pbcid_c
for pbcid in e.possible_pbcid_c[cid]]):
e.status_tm[e.stage_time][mid] = "Exhausted"
elif e.risk_tm[e.stage_time][mid] < e.risk_limit_m[mid]:
e.status_tm[e.stage_time][mid] = "Passed"
elif e.risk_tm[e.stage_time][mid] > e.risk_upset_m[mid]:
e.status_tm[e.stage_time][mid] = "Upset"
e.election_status_t[e.stage_time] = \
sorted(list(set([e.status_tm[e.stage_time][mid]
for mid in e.mids])))
def show_risks_and_statuses(e):
"""
Show election and contest statuses for current stage.
"""
utils.myprint((" Risk (that reported outcome is wrong)"
"and measurement status per mid:"))
for mid in e.mids:
utils.myprint(" ",
mid,
e.cid_m[mid],
e.risk_method_m[mid],
e.sampling_mode_m[mid],
"Risk={}".format(e.risk_tm[e.stage_time][mid]),
"(limits {},{})".format(e.risk_limit_m[mid],
e.risk_upset_m[mid]),
e.status_tm[e.stage_time][mid])
utils.myprint(" Election status:", e.election_status_t[e.stage_time])
##############################################################################
# Audit spec
def set_audit_seed(e, new_audit_seed):
"""
Set e.audit_seed to new value (but only if not already set).
The idea is that the command line may set the audit seed to a non-None
value first, in which case it is "sticky" and thus overrides any
setting that might be in the audit seed file.
This routine also sets the global auditRandomState.
"""
global auditRandomState
e.audit_seed = new_audit_seed
# audit_seed might be None if no command-line argument given
auditRandomState = utils.RandomState(e.audit_seed)
# if seed is None (which happens if no command line value is given),
# utils.RandomState uses clock or other variable process-state
# parameters (via np.random.RandomState)
def read_audit_spec(e, args):
read_audit_spec_global(e, args)
read_audit_spec_contest(e, args)
read_audit_spec_collection(e, args)
read_audit_spec_seed(e, args)
check_audit_spec(e)
def read_audit_spec_global(e, args):
""" Read 3-audit/31-audit-spec/audit-spec-global.csv """
election_pathname = os.path.join(multi.ELECTIONS_ROOT,
e.election_dirname)
audit_spec_pathname = os.path.join(election_pathname,
"3-audit",
"31-audit-spec")
filename = utils.greatest_name(audit_spec_pathname,
"audit-spec-global",
".csv")
file_pathname = os.path.join(audit_spec_pathname, filename)
fieldnames = ["Global Audit Parameter",
"Value"]
rows = csv_readers.read_csv_file(file_pathname, fieldnames, varlen=False)
for row in rows:
parameter = row["Global Audit Parameter"]
value = row["Value"]
if parameter == "Max audit stage time":
e.max_stage_time = value
def read_audit_spec_contest(e, args):
""" Read 3-audit/31-audit-spec/audit-spec-contest.csv """
election_pathname = os.path.join(multi.ELECTIONS_ROOT,
e.election_dirname)
audit_spec_pathname = os.path.join(election_pathname,
"3-audit",
"31-audit-spec")
filename = utils.greatest_name(audit_spec_pathname,
"audit-spec-contest",
".csv")
file_pathname = os.path.join(audit_spec_pathname, filename)
fieldnames = ["Measurement id",
"Contest",
"Risk Measurement Method",
"Risk Limit",
"Risk Upset Threshold",
"Sampling Mode",
"Initial Status",
"Param 1",
"Param 2"]
rows = csv_readers.read_csv_file(file_pathname, fieldnames, varlen=False)
print("read_audit_spec_contest: e.mid:", e.mids)
for row in rows:
mid = row["Measurement id"]
e.mids.append(mid)
e.cid_m[mid] = row["Contest"]
e.risk_method_m[mid] = row["Risk Measurement Method"]
e.risk_limit_m[mid] = float(row["Risk Limit"])
e.risk_upset_m[mid] = float(row["Risk Upset Threshold"])
e.sampling_mode_m[mid] = row["Sampling Mode"]
e.initial_status_m[mid] = row["Initial Status"]
e.risk_measurement_parameters_m[mid] = (row["Param 1"], row["Param 2"])
def read_audit_spec_collection(e, args):
""" Read 3-audit/31-audit-spec/audit-spec-collection.csv """
election_pathname = os.path.join(multi.ELECTIONS_ROOT,
e.election_dirname)
audit_spec_pathname = os.path.join(election_pathname,
"3-audit",
"31-audit-spec")
filename = utils.greatest_name(audit_spec_pathname,
"audit-spec-collection",
".csv")
file_pathname = os.path.join(audit_spec_pathname, filename)
fieldnames = ["Collection",
"Max audit rate"]
rows = csv_readers.read_csv_file(file_pathname, fieldnames, varlen=False)
for row in rows:
pbcid = row["Collection"]
e.max_audit_rate_p[pbcid] = int(row["Max audit rate"])
def read_audit_spec_seed(e, args):
"""
Read audit seed from 3-audit/31-audit-spec/audit-spec-seed.csv
Do not overwrite e.audit_seed if it was non-None
because this means it was already set from the command line.
"""
election_pathname = os.path.join(multi.ELECTIONS_ROOT,
e.election_dirname)
audit_spec_pathname = os.path.join(election_pathname,
"3-audit",
"31-audit-spec")
filename = utils.greatest_name(audit_spec_pathname,
"audit-spec-seed",
".csv")
file_pathname = os.path.join(audit_spec_pathname, filename)
fieldnames = ["Audit seed"]
rows = csv_readers.read_csv_file(file_pathname, fieldnames, varlen=False)
for row in rows:
new_audit_seed = row["Audit seed"]
if e.audit_seed == None:
set_audit_seed(e, new_audit_seed)
def check_audit_spec(e):
if not isinstance(e.risk_limit_m, dict):
utils.myerror("e.risk_limit_m is not a dict.")
for mid in e.risk_limit_m:
if mid not in e.mids:
utils.mywarning("e.risk_limit_m mid key `{}` is not in e.mids."
.format(mid))
if not (0.0 <= float(e.risk_limit_m[mid]) <= 1.0):
utils.mywarning("e.risk_limit_m[{}] not in interval [0,1]".format(mid))
if not isinstance(e.max_audit_rate_p, dict):
utils.myerror("e.max_audit_rate_p is not a dict.")
for pbcid in e.max_audit_rate_p:
if pbcid not in e.pbcids:
utils.mywarning("pbcid `{}` is a key for e.max_audit_rate_p but not in e.pbcids."
.format(pbcid))
if not 0 <= int(e.max_audit_rate_p[pbcid]):
utils.mywarning("e.max_audit_rate_p[{}] must be nonnegative.".format(pbcid))
if utils.warnings_given > 0:
utils.myerror("Too many errors; terminating.")
def show_audit_spec(e):
utils.myprint("====== Audit spec ======")
utils.myprint("Seed for audit pseudorandom number generation (e.audit_seed):")
utils.myprint(" {}".format(e.audit_seed))
utils.myprint(("Risk Measurement ids (e.mids) with contest,"
"method, risk limit, and upset threshold, and sampling mode:"))
for mid in e.mids:
utils.myprint(" {}: {}, {}, {}, {}, {}"
.format(mid,
e.cid_m[mid],
e.risk_method_m[mid],
e.risk_limit_m[mid],
e.risk_upset_m[mid],
e.sampling_mode_m[mid]))
utils.myprint("Max number of ballots audited/day (e.max_audit_rate_p):")
for pbcid in sorted(e.pbcids):
utils.myprint(" {}:{}".format(pbcid, e.max_audit_rate_p[pbcid]))
utils.myprint("Max allowed start time for any stage (e.max_stage_time):")
utils.myprint(" {}".format(e.max_stage_time))
utils.myprint("Number of trials used to estimate risk"
" in compute_contest_risk (e.n_trials):")
utils.myprint(" {}".format(e.n_trials))
utils.myprint("Dirichlet hyperparameter for base case or non-matching reported/actual votes")
utils.myprint("(e.pseudocount_base):")
utils.myprint(" {}".format(e.pseudocount_base))
utils.myprint("Dirichlet hyperparameter for matching reported/actual votes")
utils.myprint("(e.pseudocount_match):")
utils.myprint(" {}".format(e.pseudocount_match))
def initialize_audit(e):
pass
def show_audit_stage_header(e):
utils.myprint("audit stage time", e.stage_time)
utils.myprint(" New target sample sizes by paper ballot collection:")
for pbcid in e.pbcids:
last_s = e.saved_state["sn_tp"][e.saved_state["stage_time"]]
utils.myprint(" {}: {} (+{})"
.format(pbcid,
e.saved_state["plan_tp"][e.saved_state["stage_time"]][pbcid],
e.saved_state["plan_tp"][e.saved_state["stage_time"]][pbcid] - \
last_s[pbcid]))
def read_audited_votes(e):
"""
Read audited votes from 3-audit/33-audited-votes/audited-votes-PBCID.csv
"""
election_pathname = os.path.join(multi.ELECTIONS_ROOT,
e.election_dirname)
audited_votes_pathname = os.path.join(election_pathname,
"3-audit",
"33-audited-votes")
for pbcid in e.pbcids:
safe_pbcid = ids.filename_safe(pbcid)
filename = utils.greatest_name(audited_votes_pathname,
"audited-votes-"+safe_pbcid,
".csv")
file_pathname = os.path.join(audited_votes_pathname, filename)
fieldnames = ["Collection", "Ballot id", "Contest", "Selections"]
rows = csv_readers.read_csv_file(file_pathname, fieldnames, varlen=True)
for row in rows:
pbcid = row["Collection"]
bid = row["Ballot id"]
cid = row["Contest"]
vote = row["Selections"]
utils.nested_set(e.av_cpb, [cid, pbcid, bid], vote)
def audit_stage(e, stage_time):
"""
Perform audit stage for the stage_time given.
We represent stage with a datetime string
(Historically, we used strings since json
requires keys to be strings. We aren't using
json now, but we might again later.)
"""
### TBD: filter file inputs by e.stage_time
e.stage_time = "{}".format(stage_time)
saved_state.read_saved_state(e)
e.status_tm[e.stage_time] = {}
e.sn_tp[e.stage_time] = {}
e.risk_tm[e.stage_time] = {}
e.sn_tcpra[e.stage_time] = {}
# this is global read, not just per stage, for now
read_audited_votes(e)
draw_sample(e)
risk_bayes.compute_risks(e, e.sn_tcpra)
compute_statuses(e)
write_audit_output_contest_status(e)
write_audit_output_collection_status(e)
show_audit_stage_header(e)
show_sample_counts(e)
show_risks_and_statuses(e)
def write_audit_output_contest_status(e):
"""
Write audit_output_contest_status; same format as audit_spec_contest,
except for status field.
"""
dirpath = os.path.join(multi.ELECTIONS_ROOT,
e.election_dirname,
"3-audit",
"34-audit-output")
os.makedirs(dirpath, exist_ok=True)
filename = os.path.join(dirpath,
"audit-output-contest-status-"+e.stage_time+".csv")
with open(filename, "w") as file:
fieldnames = ["Measurement id",
"Contest",
"Risk Measurement Method",
"Risk Limit",
"Risk Upset Threshold",
"Sampling Mode",
"Status",
"Param 1",
"Param 2"]
file.write(",".join(fieldnames))
file.write("\n")
for mid in e.mids:
file.write("{},".format(mid))
file.write("{},".format(e.cid_m[mid]))
file.write("{},".format(e.risk_method_m[mid]))
file.write("{},".format(e.risk_limit_m[mid]))
file.write("{},".format(e.risk_upset_m[mid]))
file.write("{},".format(e.sampling_mode_m[mid]))
file.write("{},".format(e.status_tm[e.stage_time][mid]))
file.write("{},".format(e.risk_measurement_parameters_m[mid][0]))
file.write("{}".format(e.risk_measurement_parameters_m[mid][1]))
file.write("\n")
def write_audit_output_collection_status(e):
""" Write 3-audit/34-audit-output/audit_output_collection_status.csv """
dirpath = os.path.join(multi.ELECTIONS_ROOT,
e.election_dirname,
"3-audit",
"34-audit-output")
os.makedirs(dirpath, exist_ok=True)
filename = os.path.join(dirpath,
"audit-output-collection-status-"+e.stage_time+".csv")
with open(filename, "w") as file:
fieldnames = ["Collection",
"Number of ballots",
"Number of allots sampled total",
"Number of ballots sample this stage."]
file.write(",".join(fieldnames))
file.write("\n")
for pbcid in e.pbcids:
file.write("{},".format(pbcid))
file.write("{},".format(len(e.bids_p[pbcid])))
file.write("{},".format(e.sn_tp[e.stage_time][pbcid]))
if "sn_tp" in e.saved_state:
new_sample_size = e.sn_tp[e.stage_time][pbcid]
old_sample_size = e.saved_state["sn_tp"] \
[e.saved_state["stage_time"]][pbcid]
diff_sample_size = new_sample_size - old_sample_size
file.write("{}".format(diff_sample_size))
file.write("\n")
def stop_audit(e):
"""
Return True if we should stop audit.
(I.e., if some measurement is Open and Active).
"""
for mid in e.mids:
if e.status_tm[e.stage_time][mid]=="Open" and \
e.sampling_mode_m[mid]=="Active":
return False
return True
def audit(e, args):
read_audit_spec(e, args)
initialize_audit(e)
saved_state.write_initial_saved_state(e)
show_audit_spec(e)
utils.myprint("====== Audit ======")
while True:
stage_time = utils.datetime_string()
if stage_time > e.max_stage_time:
break
audit_stage(e, stage_time)
if stop_audit(e):
break
planner.compute_plan(e)
print("Slack:", risk_bayes.compute_slack_p(e))
mid = e.mids[0]
risk_bayes.tweak_all(e, mid)
if not input("Begin new audit stage? (y or n):").startswith('y'):
break
saved_state.write_intermediate_saved_state(e)
time.sleep(2) # to ensure next stage_time is new
show_audit_summary(e)
def show_audit_summary(e):
utils.myprint("=============")
utils.myprint("Audit completed!")
utils.myprint("All measurements have a status in the following list:",
e.election_status_t[e.stage_time])
if all([e.sampling_mode_m[mid]!="Active" \
or e.status_tm[e.stage_time][mid]!="Open" \
for mid in e.mids]):
utils.myprint("No `Active' measurement still has `Open' status.")
if ("Active", "Upset") in \
[(e.sampling_mode_m[mid], e.status_tm[e.stage_time][mid])
for mid in e.mids]:
utils.myprint(("At least one `Active' measurement signals"
" `Upset' (full recount needed)."))
if e.stage_time > e.max_stage_time:
utils.myprint("Maximum audit stage time ({}) reached."
.format(e.max_stage_time))
utils.myprint("Number of ballots sampled, by paper ballot collection:")
for pbcid in e.pbcids:
utils.myprint(" {}:{}".format(pbcid, e.sn_tp[e.stage_time][pbcid]))
utils.myprint_switches = ["std"]
utils.myprint("Total number of ballots sampled: ", end='')
utils.myprint(sum([e.sn_tp[e.stage_time][pbcid] for pbcid in e.pbcids]))
| {
"repo_name": "ron-rivest/2017-bayes-audit",
"path": "2017-code/audit.py",
"copies": "1",
"size": "21474",
"license": "mit",
"hash": 7597860329503809000,
"line_mean": 36.024137931,
"line_max": 97,
"alpha_frac": 0.5372543541,
"autogenerated": false,
"ratio": 3.5969849246231154,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9609626468280025,
"avg_score": 0.00492256208861817,
"num_lines": 580
} |
"""Audit trails for graphs and graph hierarchies.
This module containes a collection of utils for audit trails that
provide version control for transformations of graphs and graph hierarchies:
* `Versioning`, abstract class for in-memory versioning of objects;
* `VersionedGraph`, wrapper around graph objects in ReGraph that allows to track their audit trail;
* `VersionedHierarchy`, wrapper around hierarchy objects in ReGraph that allows to track their audit trail;
"""
from abc import ABC, abstractmethod
import copy
import datetime
import uuid
import warnings
import networkx as nx
from regraph.exceptions import RevisionError, RevisionWarning
from regraph.rules import (compose_rules, Rule,
_create_merging_rule,
_create_merging_rule_hierarchy,
compose_rule_hierarchies,
invert_rule_hierarchy)
from regraph.utils import keys_by_value
def _generate_new_commit_meta_data():
time = datetime.datetime.now()
commit_id = str(uuid.uuid4())
return time, commit_id
class Versioning(ABC):
"""Class for version control.
Attributes
----------
_current_branch
Name of the current branch
_deltas : dict
Dictionary with delta's to all other branches
_heads : dict
_revision_graph : networkx.DiGraph
Methods
-------
branches()
current_branch()
commit(graph, rule, instance)
branch(new_branch)
switch_branch(branch)
merge(branch1, branch2)
_compose_deltas
_invert_delta
_merge_into_current_branch
_create_identity_delta
_compose_delta_path
"""
def __init__(self, init_branch="master", current_branch=None,
deltas=None, heads=None, revision_graph=None):
"""Initialize revision object."""
if current_branch is None:
self._current_branch = init_branch
else:
self._current_branch = current_branch
if deltas is None:
self._deltas = {}
else:
self._deltas = deltas
if heads is None:
# Create initial commit
time, commit_id = _generate_new_commit_meta_data()
self._heads = {}
self._heads[init_branch] = commit_id
self._revision_graph = nx.DiGraph()
self._revision_graph.add_node(
commit_id,
branch=self._current_branch,
message="Initial commit",
time=time
)
else:
self._heads = heads
self._revision_graph = revision_graph
def initial_commit(self):
"""Return the id of the initial commit."""
for n in self._revision_graph.nodes():
if len(list(self._revision_graph.predecessors(n))) == 0:
commit = n
break
return commit
@abstractmethod
def _compose_deltas(self, delta1, delta2):
"""Abstract method for composing deltas."""
pass
@staticmethod
@abstractmethod
def _invert_delta(self, delta1):
"""Abstract method for inverting deltas."""
pass
@staticmethod
@abstractmethod
def _merge_into_current_branch(self, delta):
"""Abstract method for merging a branch into the current one."""
pass
@abstractmethod
def _create_identity_delta(self):
"""Abstract method for creating an identity-delta."""
pass
def _compose_delta_path(self, path):
if len(path) > 1:
result_delta = self._revision_graph.adj[
path[0]][path[1]]["delta"]
previous_commit = path[1]
for current_commit in path[2:]:
result_delta = self._compose_deltas(
result_delta,
self._revision_graph.adj[
previous_commit][current_commit]["delta"])
d = self._revision_graph.adj[previous_commit][current_commit]["delta"]
previous_commit = current_commit
return result_delta
else:
return self._create_identity_delta()
def branches(self):
"""Return list of branches."""
return list(self._heads.keys())
def current_branch(self):
"""Return the name of the current branch."""
return self._current_branch
def print_history(self, hide_id=False):
"""Print the history of commits."""
for n in self._revision_graph.nodes():
print(
self._revision_graph.nodes[n]["time"].strftime(
"%d/%m/%Y %H:%M:%S"),
n if not hide_id else "",
self._revision_graph.nodes[n]["branch"],
self._revision_graph.nodes[n]["message"])
def commit(self, delta, message=None, previous_commit=None, **kwargs):
"""Add a commit."""
time, commit_id = _generate_new_commit_meta_data()
if previous_commit is None:
previous_commit = self._heads[self._current_branch]
# Update heads and revision graph
self._heads[self._current_branch] = commit_id
self._revision_graph.add_node(
commit_id,
branch=self._current_branch,
time=time,
message=message if message is not None else "",
**kwargs)
self._revision_graph.add_edge(
previous_commit, commit_id, delta=delta)
d = self._revision_graph.adj[previous_commit][commit_id]["delta"]
# Update deltas
for branch, branch_delta in self._deltas.items():
self._deltas[branch] = self._compose_deltas(
self._invert_delta(delta), branch_delta)
self._refine_delta(self._deltas[branch])
return commit_id
def switch_branch(self, branch):
"""Switch branches."""
if branch not in self.branches():
raise RevisionError(
"Branch '{}' does not exist".format(branch))
if branch == self._current_branch:
warnings.warn("Already in branch '{}'".format(branch), RevisionWarning)
# Set as the current branch
previous_branch = self._current_branch
self._current_branch = branch
# Apply delta to the versioned object
delta = self._deltas[branch]
self._apply_delta(delta)
self._deltas[previous_branch] = self._invert_delta(delta)
# Recompute deltas
for name, another_delta in self._deltas.items():
if name != previous_branch:
self._deltas[name] = self._compose_deltas(
self._deltas[previous_branch],
another_delta
)
del self._deltas[self._current_branch]
def branch(self, new_branch, message=None):
"""Create a new branch with identity commit."""
if new_branch in self.branches():
raise RevisionError(
"Branch '{}' already exists".format(new_branch))
if message is None:
message = "Created branch '{}'".format(new_branch)
# Set this as a current branch
previous_branch = self._current_branch
previous_commit = self._heads[self._current_branch]
self._current_branch = new_branch
identity_delta = self._create_identity_delta()
# Add a new delta
self._deltas[previous_branch] = identity_delta
# Create a new identity commit
commit_id = self.commit(
identity_delta,
message=message,
previous_commit=previous_commit)
self._heads[self._current_branch] = commit_id
return commit_id
def merge_with(self, branch, message=None):
"""Merge the current branch with the specified one."""
if branch not in self.branches():
raise RevisionError(
"Branch '{}' does not exist".format(branch))
if message is None:
message = "Merged branch '{}' into '{}'".format(
branch, self._current_branch)
delta = self._deltas[branch]
delta_to_current, delta_to_branch = self._merge_into_current_branch(
delta)
commit_id = self.commit(delta_to_current, message=message)
self._revision_graph.add_edge(
self._heads[branch], commit_id,
delta=delta_to_branch)
del self._heads[branch]
del self._deltas[branch]
return commit_id
def rollback(self, rollback_commit, message=None):
"""Rollback the current branch to a specific commit."""
if rollback_commit not in self._revision_graph.nodes():
raise RevisionError(
"Commit '{}' does not exist in the revision graph".format(
rollback_commit))
# Find paths from the last commit of the current branch
# to the commit with id 'rollback_commit'
try:
shortest_path = list(nx.shortest_path(
self._revision_graph, rollback_commit, self._heads[self._current_branch]))
except nx.NetworkXNoPath:
raise RevisionError(
"Branch '{}' does not contain a path to the commit '{}'".format(
self._current_branch, rollback_commit))
if message is None:
message = "Rollback to commit '{}'".format(rollback_commit)
# Generate a big rollback commit
rollback_delta = self._invert_delta(
self._compose_delta_path(shortest_path))
# Apply the rollback commit
self._apply_delta(rollback_delta)
# Compute all paths from every head to the commit
head_paths = {}
for h in self._heads.values():
head_paths[h] = list(nx.all_simple_paths(
self._revision_graph, rollback_commit, h))
# Compute new head commits (commits whose successors
# are merge commits to be removed)
new_heads = {}
removed_commits = set(
[n for pp in head_paths.values() for p in pp for n in p if n != rollback_commit])
for n in self._revision_graph.nodes():
for s in self._revision_graph.successors(n):
if n not in removed_commits and s in removed_commits:
new_heads[self._revision_graph.nodes[n]["branch"]] = (n, s)
# Recompute deltas
new_current_branch = self._revision_graph.nodes[rollback_commit]["branch"]
self._current_branch = new_current_branch
self._heads[self._current_branch] = rollback_commit
# Find a branching point from the rollback commit
rollback_bfs_from_commit = nx.bfs_tree(
self._revision_graph, rollback_commit, reverse=True)
rollback_branching_point = None
for n in rollback_bfs_from_commit.nodes():
if self._revision_graph.nodes[n]["branch"] !=\
self._current_branch:
rollback_branching_point = n
break
# Update deltas of the preserved heads
for head, commit in self._heads.items():
if head != self._current_branch:
# Find a branching point from the head
head_bfs_from_commit = nx.bfs_tree(
self._revision_graph, commit, reverse=True)
head_branching_point = None
for n in head_bfs_from_commit.nodes():
if self._revision_graph.nodes[n]["branch"] != head:
head_branching_point = n
break
if rollback_branching_point:
# Rollback in a branched part of the revision graph
try:
# Rollback happened before head
branching_to_head = nx.shortest_path(
self._revision_graph, rollback_branching_point, commit)
branching_to_rollback = nx.shortest_path(
self._revision_graph, rollback_branching_point, rollback_commit)
self._deltas[head] = self._compose_deltas(
self._invert_delta(self._compose_delta_path(branching_to_rollback)),
self._compose_delta_path(branching_to_head)
)
except nx.NetworkXNoPath:
if head_branching_point:
try:
# Rollback happened after head
branching_to_rollback = nx.shortest_path(
self._revision_graph,
head_branching_point, rollback_commit)
branching_to_head = nx.shortest_path(
self._revision_graph,
head_branching_point, commit)
self._deltas[head] = self._compose_deltas(
self._invert_delta(self._compose_delta_path(
branching_to_rollback)),
self._compose_delta_path(branching_to_head)
)
except:
# Rollback and head are disjoint,
# so no delta to compute (no undirected path)
pass
else:
# Rollback in an unbranched part of the revision graph
# So head can be only in a branched part and only before
# the rollback commit (otherwise removed)
if head_branching_point:
branching_to_head = nx.shortest_path(
self._revision_graph, head_branching_point, commit)
branching_to_rollback = nx.shortest_path(
self._revision_graph,
head_branching_point, rollback_commit)
delta_branching_to_rollback = self._compose_delta_path(
branching_to_rollback)
delta_branching_to_head = self._compose_delta_path(
branching_to_head)
self._deltas[head] = self._compose_deltas(
self._invert_delta(delta_branching_to_rollback),
delta_branching_to_head
)
if head in self._deltas:
self._refine_delta(self._deltas[head])
# Compute deltas of the new heads
for branch, (head_commit, merge_commit)in new_heads.items():
path_to_merge = nx.shortest_path(
self._revision_graph, rollback_commit, merge_commit)
delta_to_merge = self._compose_delta_path(path_to_merge)
head_to_merge = self._revision_graph.adj[
head_commit][merge_commit]["delta"]
self._deltas[branch] = self._compose_deltas(
delta_to_merge,
self._invert_delta(head_to_merge))
self._refine_delta(self._deltas[branch])
self._heads[branch] = head_commit
print("Created the new head for '{}'".format(branch))
# All paths to the heads originating from the commit to
# which we rollaback are removed
for c in removed_commits:
if c != rollback_commit:
self._revision_graph.remove_node(c)
if c in self._heads.values():
for h in keys_by_value(self._heads, c):
print("Removed a head for '{}'".format(h))
del self._heads[h]
def _revision_graph_to_json(self):
data = {
"nodes": [],
"edges": []
}
for n in self._revision_graph.nodes():
data["nodes"].append({
"id": n,
"branch": self._revision_graph.nodes[n]["branch"],
"time": self._revision_graph.nodes[n]["time"].strftime(
"%d/%m/%Y %H:%M:%S"),
"message": self._revision_graph.nodes[n]["message"]
})
for (s, t) in self._revision_graph.edges():
data["edges"].append({
"from": s,
"to": t,
"delta": self._delta_to_json(
self._revision_graph.adj[s][t]["delta"])
})
return data
@classmethod
def _revision_graph_from_json(cls, json_data):
revision_graph = nx.DiGraph()
for node_json in json_data["nodes"]:
revision_graph.add_node(
node_json["id"],
branch=node_json["branch"],
time=datetime.datetime.strptime(
node_json["time"], "%d/%m/%Y %H:%M:%S"),
message=node_json["message"])
for edge_json in json_data["edges"]:
revision_graph.add_edge(
edge_json["from"],
edge_json["to"],
delta=cls._delta_from_json(edge_json["delta"]))
return revision_graph
@staticmethod
@abstractmethod
def _delta_to_json(delta):
pass
@staticmethod
@abstractmethod
def _delta_from_json(json_data):
pass
def to_json(self):
"""Convert versioning object to JSON."""
data = {}
data["current_branch"] = self._current_branch
data["deltas"] = {}
for k, v in self._deltas.items():
data["deltas"][k] = self._delta_to_json(v)
data["heads"] = {}
data["heads"] = self._heads
data["revision_graph"] = self._revision_graph_to_json()
return data
def from_json(self, json_data):
"""Retrieve versioning object from JSON."""
self._current_branch = json_data["current_branch"]
self._deltas = {
k: self._delta_from_json(v)
for k, v in json_data["deltas"].items()}
self._heads = json_data["heads"]
self._revision_graph = self._revision_graph_from_json(
json_data["revision_graph"])
class VersionedGraph(Versioning):
"""Class for versioned hierarchies."""
def __init__(self, graph, init_branch="master", current_branch=None,
deltas=None, heads=None, revision_graph=None):
"""Initialize versioned graph object."""
self.graph = graph
super().__init__(init_branch=init_branch,
current_branch=current_branch,
deltas=deltas, heads=heads,
revision_graph=revision_graph)
def _refine_delta(self, delta):
lhs = delta["rule"].refine(self.graph, delta["lhs_instance"])
delta["lhs_instance"] = lhs
new_rhs = dict()
for n in delta["rule"].rhs.nodes():
if n not in delta["rhs_instance"].keys():
new_rhs[n] = lhs[delta["rule"].p_lhs[
keys_by_value(delta["rule"].p_rhs, n)[0]]]
else:
new_rhs[n] = delta["rhs_instance"][n]
delta["rhs_instance"] = new_rhs
def _compose_deltas(self, delta1, delta2):
"""Computing composition of two deltas."""
rule, lhs, rhs = compose_rules(
delta1["rule"],
delta1["lhs_instance"],
delta1["rhs_instance"],
delta2["rule"],
delta2["lhs_instance"],
delta2["rhs_instance"])
return {
"rule": rule,
"lhs_instance": lhs,
"rhs_instance": rhs
}
@staticmethod
def _invert_delta(delta):
"""Reverse the direction of delta."""
return {
"rule": delta["rule"].get_inverted_rule(),
"lhs_instance": copy.deepcopy(delta["rhs_instance"]),
"rhs_instance": copy.deepcopy(delta["lhs_instance"])
}
@staticmethod
def _create_identity_delta():
"""Create an identity-delta."""
rule = Rule.identity_rule()
identity_delta = {
"rule": rule,
"lhs_instance": {},
"rhs_instance": {}
}
return identity_delta
def _apply_delta(self, delta, relabel=True):
"""Apply delta to the current graph version."""
rhs_instance = self.graph.rewrite(
delta["rule"], delta["lhs_instance"])
if relabel:
# Relabel nodes to correspond to the stored rhs
new_labels = {
v: delta["rhs_instance"][k]
for k, v in rhs_instance.items()
}
for n in self.graph.nodes():
if n not in new_labels.keys():
new_labels[n] = n
self.graph.relabel_nodes(new_labels)
rhs_instance = {
k: new_labels[v]
for k, v in rhs_instance.items()
}
return rhs_instance
def _merge_into_current_branch(self, delta):
"""Merge branch with delta into the current branch."""
current_to_merged_rule, other_to_merged_rule =\
_create_merging_rule(
delta["rule"], delta["lhs_instance"], delta["rhs_instance"])
rhs_instance = self.graph.rewrite(
current_to_merged_rule, delta["lhs_instance"])
current_to_merged_delta = {
"rule": current_to_merged_rule,
"lhs_instance": delta["lhs_instance"],
"rhs_instance": rhs_instance
}
other_to_merged_delta = {
"rule": other_to_merged_rule,
"lhs_instance": delta["rhs_instance"],
"rhs_instance": rhs_instance
}
return current_to_merged_delta, other_to_merged_delta
def rewrite(self, rule, instance=None, message=None, **kwargs):
"""Rewrite the versioned graph and commit."""
# Refine a rule to be side-effect free
refined_instance = rule.refine(self.graph, instance)
rhs_instance = self.graph.rewrite(
rule, refined_instance)
commit_id = self.commit({
"rule": rule,
"lhs_instance": refined_instance,
"rhs_instance": rhs_instance
}, message=message, **kwargs)
return rhs_instance, commit_id
@staticmethod
def _delta_to_json(delta):
data = {}
data["rule"] = delta["rule"].to_json()
data["lhs_instance"] = delta["lhs_instance"]
data["rhs_instance"] = delta["rhs_instance"]
return data
@staticmethod
def _delta_from_json(json_data):
delta = {}
delta["rule"] = Rule.from_json(json_data["rule"])
delta["lhs_instance"] = json_data["lhs_instance"]
delta["rhs_instance"] = json_data["rhs_instance"]
return delta
@classmethod
def from_json(cls, graph, json_data):
"""Retrieve versioning object from JSON."""
obj = cls(graph)
super(VersionedGraph, cls).from_json(obj, json_data)
return obj
class VersionedHierarchy(Versioning):
"""Class for versioned hierarchies."""
def __init__(self, hierarchy, init_branch="master", current_branch=None,
deltas=None, heads=None, revision_graph=None):
"""Initialize versioned hierarchy object."""
self.hierarchy = hierarchy
super().__init__(init_branch=init_branch, current_branch=current_branch,
deltas=deltas, heads=heads,
revision_graph=revision_graph)
def _refine_delta(self, delta):
lhs_instances = self.hierarchy.refine_rule_hierarchy(
delta["rule_hierarchy"],
delta["lhs_instances"])
delta["lhs_instances"] = lhs_instances
for graph in delta["rule_hierarchy"]["rules"]:
if graph not in delta["rhs_instances"]:
delta["rhs_instances"][graph] = delta[
"lhs_instances"][graph]
for graph, rule in delta["rule_hierarchy"]["rules"].items():
rule = delta["rule_hierarchy"]["rules"][graph]
rhs_instance = delta["rhs_instances"][graph]
for n in rule.rhs.nodes():
if n not in rhs_instance.keys():
rhs_instance[n] = delta["lhs_instances"][graph][
rule.p_lhs[keys_by_value(rule.p_rhs, n)[0]]]
delta["rhs_instances"][graph] = rhs_instance
def _compose_deltas(self, delta1, delta2):
"""Computing composition of two deltas."""
rule, lhs, rhs = compose_rule_hierarchies(
delta1["rule_hierarchy"],
delta1["lhs_instances"],
delta1["rhs_instances"],
delta2["rule_hierarchy"],
delta2["lhs_instances"],
delta2["rhs_instances"])
return {
"rule_hierarchy": rule,
"lhs_instances": lhs,
"rhs_instances": rhs
}
@staticmethod
def _invert_delta(delta):
"""Reverse the direction of delta."""
return {
"rule_hierarchy": invert_rule_hierarchy(
delta["rule_hierarchy"]),
"lhs_instances": delta["rhs_instances"],
"rhs_instances": delta["lhs_instances"]
}
@staticmethod
def _create_identity_delta():
"""Create an identity-delta."""
identity_delta = {
"rule_hierarchy": {
"rules": {},
"rule_homomorphisms": {}
},
"lhs_instances": {},
"rhs_instances": {}
}
return identity_delta
def _apply_delta(self, delta, relabel=True):
"""Apply delta to the current hierarchy version."""
rhs_instances = self.hierarchy.apply_rule_hierarchy(
delta["rule_hierarchy"], delta["lhs_instances"])
if relabel:
# Relabel nodes to correspond to the stored rhs
for graph, rhs_instance in delta["rhs_instances"].items():
old_rhs = rhs_instance
new_rhs = rhs_instances[graph]
new_labels = {
v: old_rhs[k]
for k, v in new_rhs.items()
if v != old_rhs[k]
}
if len(new_labels) > 0:
for n in self.hierarchy.get_graph(graph).nodes():
if n not in new_labels.keys():
new_labels[n] = n
self.hierarchy.relabel_nodes(graph, new_labels)
rhs_instances[graph] = old_rhs
return rhs_instances
def _merge_into_current_branch(self, delta):
"""Merge branch with delta into the current branch."""
current_to_merged, other_to_merged =\
_create_merging_rule_hierarchy(
delta["rule_hierarchy"],
delta["lhs_instances"],
delta["rhs_instances"])
rhs_instances = self.hierarchy.apply_rule_hierarchy(
current_to_merged,
delta["lhs_instances"])
current_to_merged_delta = {
"rule_hierarchy": current_to_merged,
"lhs_instances": delta["lhs_instances"],
"rhs_instances": rhs_instances
}
other_to_merged_delta = {
"rule_hierarchy": other_to_merged,
"lhs_instances": delta["rhs_instances"],
"rhs_instances": rhs_instances
}
return current_to_merged_delta, other_to_merged_delta
def rewrite(self, graph_id, rule, instance=None,
p_typing=None, rhs_typing=None,
strict=False, message="", **kwargs):
"""Rewrite the versioned hierarchy and commit."""
rule_hierarchy, lhs_instances = self.hierarchy.get_rule_hierarchy(
graph_id, rule, instance, p_typing, rhs_typing)
lhs_instances = self.hierarchy.refine_rule_hierarchy(
rule_hierarchy, lhs_instances)
rhs_instances = self.hierarchy.apply_rule_hierarchy(
rule_hierarchy, lhs_instances)
commit_id = self.commit({
"rule_hierarchy": rule_hierarchy,
"lhs_instances": lhs_instances,
"rhs_instances": rhs_instances
}, message=message, **kwargs)
return rhs_instances[graph_id], commit_id
@staticmethod
def _delta_to_json(delta):
rule_homs_json = []
for (s, t), (lh, ph, rh) in delta[
"rule_hierarchy"]["rule_homomorphisms"].items():
rule_homs_json.append({
"from": s,
"to": t,
"lhs_mapping": lh,
"p_mapping": ph,
"rhs_mapping": rh
})
data = {}
data["rule_hierarchy"] = {
"rules": {},
"rule_homomorphisms": rule_homs_json
}
for graph, rule in delta["rule_hierarchy"]["rules"].items():
data["rule_hierarchy"]["rules"][graph] = rule.to_json()
data["lhs_instances"] = delta["lhs_instances"]
data["rhs_instances"] = delta["rhs_instances"]
return data
@staticmethod
def _delta_from_json(json_data):
delta = {}
rule_homs = {}
for record in json_data["rule_hierarchy"]["rule_homomorphisms"]:
rule_homs[(record["from"], record["to"])] = (
record["lhs_mapping"],
record["p_mapping"],
record["rhs_mapping"],
)
delta["rule_hierarchy"] = {
"rules": {},
"rule_homomorphisms": rule_homs
}
for graph, rule in json_data["rule_hierarchy"]["rules"].items():
delta["rule_hierarchy"]["rules"][graph] = Rule.from_json(rule)
delta["lhs_instances"] = json_data["lhs_instances"]
delta["rhs_instances"] = json_data["rhs_instances"]
return delta
@classmethod
def from_json(cls, hierarchy, json_data):
"""Retrieve versioning object from JSON."""
obj = cls(hierarchy)
super(VersionedHierarchy, cls).from_json(obj, json_data)
return obj
| {
"repo_name": "Kappa-Dev/ReGraph",
"path": "regraph/audit.py",
"copies": "1",
"size": "30208",
"license": "mit",
"hash": -7807191151811318000,
"line_mean": 35.884004884,
"line_max": 107,
"alpha_frac": 0.5367121292,
"autogenerated": false,
"ratio": 4.391335949992731,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5428048079192731,
"avg_score": null,
"num_lines": null
} |
''' Audit utilities '''
def get_data_from_field(obj, field):
''' Takes an model obj and a field object, gets the value for the field
from the supplied object and returns the resulting data
'''
field_type = field.get_internal_type()
if field_type == 'ForeignKey':
return getattr(obj, field.attname)
elif field_type == 'DateTimeField':
return str(getattr(obj, field.name))
else:
return getattr(obj, field.name)
def serialize_data(instance, relations):
''' Gets the data from every standard field and places it in the data
dictionary. Then proceeds to inspect the relations list provided, and
grabs all the necessary data by following those relations. When complete
returns the data dictionary to be serialized
'''
data = {}
for field in instance._meta.fields:
data[field.name] = get_data_from_field(instance, field)
for relation in relations:
related_attr = getattr(instance, relation)
data[relation] = []
for item in related_attr.all():
relation_data = {}
for field in item._meta.fields:
relation_data[field.name] = get_data_from_field(
item, field)
data[relation].append(relation_data)
return data
def data_has_changes(obj, relations, prev_audit=None):
''' Compares an obj to the previous audit item we created. If the
serialized differs, return True. Otherwise, we return False.
'''
if not prev_audit:
return True
data = serialize_data(obj, relations)
return data != prev_audit.audit_data
| {
"repo_name": "analytehealth/chronicler",
"path": "chronicler/utils.py",
"copies": "1",
"size": "1623",
"license": "bsd-2-clause",
"hash": -3703949068793442300,
"line_mean": 35.8863636364,
"line_max": 76,
"alpha_frac": 0.6487985213,
"autogenerated": false,
"ratio": 4.2265625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.53753610213,
"avg_score": null,
"num_lines": null
} |
"""A UDP server class for gevent"""
# Copyright (c) 2009-2010 Denis Bilenko. See LICENSE for details.
# Copyright (c) 2013 Russell Cloran
import sys
import errno
import traceback
from gevent import socket
from gevent import core
from gevent.baseserver import BaseServer
__all__ = ['DatagramServer']
class DatagramServer(BaseServer):
"""A generic UDP server.
Receive UDP package on a listening socket and spawns user-provided *handle*
for each connection with 2 arguments: the client message and the client
address.
Note that although the errors in a successfully spawned handler will not
affect the server or other connections, the errors raised by :func:`accept`
and *spawn* cause the server to stop accepting for a short amount of time.
The exact period depends on the values of :attr:`min_delay` and
:attr:`max_delay` attributes.
The delay starts with :attr:`min_delay` and doubles with each successive
error until it reaches :attr:`max_delay`. A successful :func:`accept`
resets the delay to :attr:`min_delay` again.
"""
# the number of seconds to sleep in case there was an error in recefrom() call
# for consecutive errors the delay will double until it reaches max_delay
# when accept() finally succeeds the delay will be reset to min_delay again
min_delay = 0.01
max_delay = 1
def __init__(self, listener, handle=None, backlog=None, spawn='default', **ssl_args):
BaseServer.__init__(self, listener, handle=handle, backlog=backlog, spawn=spawn)
self.delay = self.min_delay
self._recv_event = None
self._start_receving_timer = None
def pre_start(self):
if not hasattr(self, 'socket'):
self.socket = _udp_listener(self.address, backlog=self.backlog, reuse_addr=self.reuse_addr)
self.address = self.socket.getsockname()
self._stopped_event.clear()
def set_listener(self, listener, backlog=None):
BaseServer.set_listener(self, listener, backlog=backlog)
try:
self.socket = self.socket._sock
except AttributeError:
pass
def set_spawn(self, spawn):
BaseServer.set_spawn(self, spawn)
if self.pool is not None:
self.pool._semaphore.rawlink(self._start_receiving)
def set_handle(self, handle):
BaseServer.set_handle(self, handle)
self._handle = handle
def start_accepting(self):
if self._recv_event is None:
self._recv_event = core.read_event(self.socket.fileno(), self._do_recv, persist=True)
def _start_receiving(self, _event):
if self._recv_event is None:
if 'socket' not in self.__dict__:
return
self._recv_event = core.read_event(self.socket.fileno(), self._do_recv, persist=True)
def stop_accepting(self):
if self._recv_event is not None:
self._recv_event.cancel()
self._recv_event = None
if self._start_receving_timer is not None:
self._start_receving_timer.cancel()
self._start_receving_timer = None
def _do_recv(self, event, _evtype):
assert event is self._recv_event
address = None
try:
if self.full():
self.stop_accepting()
return
try:
msg, address = self.socket.recvfrom(8192)
except socket.error, err:
if err[0]==errno.EAGAIN:
sys.exc_clear()
return
raise
self.delay = self.min_delay
spawn = self._spawn
if spawn is None:
self._handle(address, msg)
else:
spawn(self._handle, address, msg)
return
except:
traceback.print_exc()
ex = sys.exc_info()[1]
if self.is_fatal_error(ex):
self.kill()
sys.stderr.write('ERROR: %s failed with %s\n' % (self, str(ex) or repr(ex)))
return
try:
if address is None:
sys.stderr.write('%s: Failed.\n' % (self, ))
else:
sys.stderr.write('%s: Failed to handle request from %s\n' % (self, address, ))
except Exception:
traceback.print_exc()
if self.delay >= 0:
self.stop_accepting()
self._start_receving_timer = core.timer(self.delay, self.start_accepting)
self.delay = min(self.max_delay, self.delay*2)
sys.exc_clear()
def is_fatal_error(self, ex):
return isinstance(ex, socket.error) and ex[0] in (errno.EBADF, errno.EINVAL, errno.ENOTSOCK)
def _udp_listener(address, backlog=50, reuse_addr=None):
"""A shortcut to create a listening UDP socket"""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if reuse_addr is not None:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, reuse_addr)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
try:
sock.bind(address)
except socket.error, exc:
strerror = getattr(exc, 'strerror', None)
if strerror is not None:
exc.strerror = strerror + ": " + repr(address)
raise
return sock
| {
"repo_name": "rcloran/SleepProxyServer",
"path": "sleepproxy/udp.py",
"copies": "1",
"size": "5306",
"license": "bsd-2-clause",
"hash": 1716105987697285600,
"line_mean": 33.9078947368,
"line_max": 103,
"alpha_frac": 0.6047870335,
"autogenerated": false,
"ratio": 3.9072164948453607,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9972357868382535,
"avg_score": 0.007929131992565045,
"num_lines": 152
} |
audTemp ="""
<audio id="audElement" controls class="media-object">
<source src="./doc/%(fname)s" type="audio/mpeg" />
<em>Sorry, your browser doesn't support HTML5 audio.</em>
</audio>
"""
from bottle import Bottle,route, run, template, static_file,request,redirect
import os,sched,time
from multiprocessing import Process
from mutagen.mp3 import MP3
from mutagen.easyid3 import EasyID3
import socket
import webbrowser
#import android
#droid = android.Android()
sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
sock.connect(("gmail.com",80))
hostip= sock.getsockname()[0]
sock.close()
print "hostip:",hostip
temp='puriyavillai-hq.mp3'
extip='98.235.224.147'
porty = 18080
timeset = 0
currtime = 0.0
starttime = 0.0
audtime = 0.0
app = Bottle()
def showurl(url="http://localhost:18080"):
time.sleep(5)
droid.webViewShow(url)
@app.route('/css/<scriptname>')
def getScript(scriptname):
return static_file(scriptname, root='./css/')
@app.route('/')
def home_page():
global temp
global hostip
global extip
global timeset
print temp
timeset= readTime()
return template('playtest', AudioFile='http://'+hostip+':'+str(porty)+'/mplay/'+temp,audList=getAudioList(), time_set = timeset, playList = GenPlayList())
@app.route('/mplay/<fname>')
def play_file(fname):
print fname
return static_file( fname, root='./doc/')
@app.route('/', method='POST')
def do_upload():
global temp
print "Post hit !"
upload = request.files.get('upfile')
if upload != None:
name, ext = os.path.splitext(upload.filename)
print upload.raw_filename
print name,ext
if not os.path.exists('./doc/'+upload.filename):
upload.save('./doc/'+upload.filename) # appends upload.filename automatically
temp = upload.filename
elif request.forms.get('filename')!=None:
temp = request.forms.get('filename')
redirect('/')
def getAudioList():
list = os.listdir('./doc/')
audlist=[]
for i in list:
if not os.path.isdir(i):
temp = str.split(i,'.')
if temp[len(temp)-1] in ['mp3']:
audlist.append(i)
return audlist
def GenPlayList():
global hostip,porty
list = os.listdir('./doc/')
audlist=""
for i in list:
if not os.path.isdir(i):
temp = str.split(i,'.')
if temp[len(temp)-1] in ['mp3']:
audlist=audlist +'"http://'+hostip+':'+str(porty)+'/mplay/'+i+'",'
return audlist[:-1]
def readTime():
global temp
f = open('timeset.txt','rb')
temp = f.readline().rstrip()
val = float(f.readline())
f.close()
return val
if __name__ == "__main__" :
webbrowser.open_new_tab('http://'+hostip+':'+str(porty))
run(app,host=hostip, port=porty)
print "app running !" | {
"repo_name": "vik1124/BottlePlayer",
"path": "tstbotplayer.py",
"copies": "1",
"size": "2614",
"license": "mit",
"hash": -4036349211193275000,
"line_mean": 23.9047619048,
"line_max": 155,
"alpha_frac": 0.6775057383,
"autogenerated": false,
"ratio": 2.8351409978308024,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4012646736130802,
"avg_score": null,
"num_lines": null
} |
## Aug 31 '11 at 4:58
## By Nick ODell
## Modified by Jared Haer
##
import math
class Turtle():
def __init__(self):
self.x, self.y, self.angle = 0.0, 0.0, 0.0
self.pointsVisited = []
self._visit()
def position(self):
return self.x, self.y
def xcor(self):
return self.x
def ycor(self):
return self.y
def forward(self, distance):
angle_radians = math.radians(self.angle)
self.x += math.cos(angle_radians) * distance
self.y += math.sin(angle_radians) * distance
self._visit()
def backward(self, distance):
self.forward(-distance)
def right(self, angle):
self.angle -= angle
def left(self, angle):
self.angle += angle
def setpos(self, x, y = None):
"""Can be passed either a tuple or two numbers."""
if y == None:
self.x = x[0]
self.y = y[1]
else:
self.x = x
self.y = y
self._visit()
def _visit(self):
"""Add point to the list of points gone to by the turtle."""
self.pointsVisited.append(self.position())
# Now for some aliases. Everything that's implemented in this class
# should be aliased the same way as the actual api.
fd = forward
bk = backward
back = backward
rt = right
lt = left
setposition = setpos
goto = setpos
pos = position
ut = Turtle() | {
"repo_name": "J216/gimp_be",
"path": "gimp_be/draw/UndrawnTurtle.py",
"copies": "1",
"size": "1444",
"license": "mit",
"hash": -5425279381624666000,
"line_mean": 21.578125,
"line_max": 71,
"alpha_frac": 0.555401662,
"autogenerated": false,
"ratio": 3.6743002544529264,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47297019164529264,
"avg_score": null,
"num_lines": null
} |
# Auger legacy functions
'''TESTING
i=0 X=0 Y=0 elnum=0
[elem, order, peakind, lowind, highind, peakrange, lowrange, hirange, idealev, idealind,
idealnegpeak, integwidth, chargeshift, peakwidth, searchwidth]=Elemdata[i]
'''
def integmaps(specimage, backarray, Elemdata, shiftdict):
''' Return integrated counts over each specified peak
Charging and any charge compensation
specimage - raw spectral image data
backarray is slope-intercept of background fits for each pixel
Elemdata contains all data regions, integ widths, etc.
shiftdict hold pixel and peak specific peak shift
(calculated using 3x3 smoothed spectral image)
peak out of range problem handled by double counting same region on other side of peak
returns: dictionary w/ elem as key and np array containing [0] - total raw counts
[1] background fit counts [2] subtracted counts integrated over element-specific
integration width (usually 9eV wide)
REPLACED BY FINDALLPEAKS METHOD
'''
sumdict={}
for elnum, [elem, order, peakind, lowind, highind, peakrange, lowrange, hirange, idealev, idealind,
idealnegpeak, integwidth, chargeshift, peakwidth, searchwidth] in enumerate(Elemdata):
shiftimage=shiftdict.get(elem,[]) # gets peak centers for this
xrange=np.arange(peakrange[0], peakrange[1]+1, 1.0) # same xrange for all
# np array holding integrated counts
sumarray=np.empty([specimage.shape[0],specimage.shape[1],3])
for X in range(0,specimage.shape[0]): # iterate over rows
for Y in range(0,specimage.shape[1]): # iterate over cols
# Get linear background over elem peak for given pixel
# Backarray contains slope, intercepts stacked for all elements in elem order
slope=backarray[X,Y,2*elnum]
intercept=backarray[X,Y,2*elnum+1]
backvals=np.zeros(xrange.shape)
for j in range(0, xrange.shape[0]):
backvals[j]=slope*xrange[j]+intercept
# select data subset using indices of this peak
rawdata=specimage[X,Y,peakind[0]:peakind[1]+1]
# Calculated background subtracted data in this region
subdata=specimage[X,Y,peakind[0]:peakind[1]+1]-backvals
thiscent=int((subdata.shape[0]-1)/2)+int(shiftimage[X,Y])
# Looking for peak integration limits
low=thiscent-int((integwidth-1)/2)
high=thiscent+int((integwidth-1)/2)
# Mirror integration regions if out-of-range (assumes roughly symmetric peak)
if low<0 | high>subdata.shape[0]:
if low<0:
# number of missing data channels
numaddchan=-low
low=0 # reset lower integ limit
# compensate w/ double count of high-side counts
addintegrange=[high-numaddchan+1,high+1]
else:
# number of missing data channels
numaddchan=high-subdata.shape[0]+1
high=subdata.shape[0] # reset upper integ limit
# Double count points on opposite side of peak
addintegrange=[low,low+numaddchan]
else:
# full peak available in range
addintegrange=[]
# Perform integrations (upper limit not included)
rawcounts=np.sum(rawdata[low:high])
backcounts=np.sum(backvals[low:high])
subcounts=np.sum(subdata[low:high])
if addintegrange:
rawcounts+=np.sum(rawdata[addintegrange[0]:addintegrange[1]])
backcounts+=np.sum(backvals[addintegrange[0]:addintegrange[1]])
subcounts+=np.sum(subdata[addintegrange[0]:addintegrange[1]])
if (rawdata[low:high].shape[0] + rawdata[addintegrange[0]:addintegrange[1]].shape[0]) != integwidth:
print('Number of integrated elements not equal to peak integwidth')
sumarray[X,Y,0]=rawcounts
sumarray[X,Y,1]=backcounts
sumarray[X,Y,2]=subcounts
sumdict.update({elem:sumarray})
return sumdict
def calcshifts(specimage, Elemdata, AESquantparams):
''' Diagnostic to look at peak shifts across spatial array; return stats on all peaks
over all pixels in spectral image; looking at direct peak not deriv
shifts for given element and shift image visualization of peak shifting magnitude
return:
numpy arrays for each element in dictionary w/ elem as key (can show position dependent
charging
peakstats - dict with each elements avreage, stdev, min max shift
'''
#TODO modify this to handle charging shift determinations??
peakstats={}
shiftdict={}
print('Peak shift averages by element')
print('Element Mean Stdev Min Max')
for i, [elem, order, peakind, lowind, highind, peakrange, lowrange, hirange, idealev, idealind,
idealnegpeak, integwidth, chargeshift, peakwidth, searchwidth] in enumerate(Elemdata):
[xmin, xmax]=peakind # unpack index range of elem/peak
# Calculate 2D shift array holding
shiftarr=np.empty([specimage.shape[0],specimage.shape[1]])
for x in range(0,specimage.shape[0]):
for y in range(0,specimage.shape[1]):
# Index # of true peak
if str(idealind)=='nan': # for charging samples ideal index can be out of scan range
thisshift=peakrange[0]+(peakrange[1]-peakrange[0])*np.argmax(specimage[x,y,xmin:xmax])/(xmax-xmin)-idealev
else:
thisshift=np.argmax(specimage[x,y,xmin:xmax])+xmin-idealind
shiftarr[x,y]=thisshift
shiftdict.update({elem:shiftarr})
# Grab and return mean, stdev, min, max
peakstats.update({elem: ["%.3f" % shiftarr.mean(), "%.3f" % shiftarr.std(), int(np.min(shiftarr)), int(np.max(shiftarr))]})
print(elem,' ', "%.3f" % shiftarr.mean(), "%.3f" % shiftarr.std(),' ', int(np.min(shiftarr)),' ', int(np.max(shiftarr)))
return shiftdict, peakstats
'''
Testing:
Augerslice.plot.scatter(x='Energy', y='Counts')
'''
def calcbackgrounds(specimage, energy, Elemdata):
''' Linear fit of e- background under peak using low and high background regions
return as numpy slope[0], intercept[1],
can run on raw spectral image or perform adjacent averaging/filtering first
'''
# make 3D blank numpy array of correct X, Y pixel dimension
# Z contains slope & intercept of fit for each element from Elemdata
backarray=np.empty([specimage.shape[0],specimage.shape[1], 2*len(Elemdata)])
Augerfile=pd.DataFrame()
# index through the entier 2D image (X and Y pixel positions)
for X in range(0,specimage.shape[0]):
for Y in range(0,specimage.shape[1]):
Augerfile['Energy']=energy
Augerfile['Counts']=specimage[X,Y]
# now loop through and fit each element
for i, [elem, order, peakind, lowind, highind, peakrange, lowrange, hirange, idealev,
idealind, integwidth, idealnegpeak, chargeshift, peakwidth, searchwidth] in enumerate(Elemdata):
backrange=[]
[lowmin, lowmax]=lowind # unpack index range of elem/peak
[highmin, highmax]=highind
backrange=[int(i) for i in range(lowmin, lowmax+1)]
backrange.extend([int(i) for i in range(highmin, highmax+1)])
Augerslice=Augerfile[Augerfile.index.isin(backrange)]
data1=Augerslice['Energy']
data2=Augerslice['Counts']
slope,intercept=np.polyfit(data1, data2, 1)
backarray[X,Y,2*i]=slope
backarray[X,Y,2*i+1]=intercept
return backarray
# old tk interfaces (since combined to single multipurpose reporting function)
def countsbackreport_tk(spelist, Elements, Backfitlog, AESquantparams, **kwargs):
''' tk interface for args/kwargs of countsback plot reporting
all args/dataframes must be passed through to plot functions
Options:
1) filenums -- use all or choose subsets by filenumbers (spelist normally has all spe filenumbers in folder)
2) xrange-- can be list of elements, list of ev ranges or combination (parsed by setplotboundaries)
3) custom pdf name option
4) plot background fits -bool
TODO: subsetting of areas?
'''
# TODO use prior kwargs to set defaults
# first print out existing info in various lines
root = tk.Tk()
filestr=tk.StringVar() # comma separated or range of filenumbers for plot
# Variable for entering eV range(s).. optionally used
xrangestr=tk.StringVar() # energy range in eV
xrangestr.set('')
now=datetime.datetime.now()
PDFname='Countsback_report'+'_'+now.strftime('%d%b%y')+'.pdf'
PDFnamestr=tk.StringVar() # energy range in eV
PDFnamestr.set(PDFname)
# elements list only displayed for now... change with separate tk selector
plotelemstr=tk.StringVar()
mytext='Peaks to be labeled:'+', '.join(Elements)
plotelemstr.set(mytext)
rangechoice=tk.StringVar()
# todo implement possible change of elements
newelems=tk.StringVar() # string for new element choices if made
newelems.set('')
backfitbool=tk.BooleanVar() # Bool for plotting background (if counts plot)
backfitptsbool=tk.BooleanVar() # Bool for plotting background (if counts plot)
plotelemsbool=tk.BooleanVar() # optional labelling of elements
plotelemsbool.set(True) # default to true
choice=tk.StringVar() # plot or abort
# set defaults based on prior run
# set defaults based on prior run
if 'addbackfit' in kwargs:
backfitbool.set(True)
if 'backfitpts' in kwargs:
backfitptsbool.set(True)
if 'plotelems' not in kwargs:
backfitptsbool.set(False)
a=tk.Label(root, text='Enter filenumbers for plot report (default all)').grid(row=0, column=0)
b=tk.Entry(root, textvariable=filestr).grid(row=0, column=1)
a=tk.Label(root, text='Optional xrange in eV (default 100-1900eV)').grid(row=1, column=0)
b=tk.Entry(root, textvariable=xrangestr).grid(row=1, column=1)
a=tk.Label(root, text='Enter PDF report name').grid(row=2, column=0)
b=tk.Entry(root, textvariable=PDFnamestr).grid(row=2, column=1)
# printout of peaks to be plotted (if chosen)
a=tk.Label(root, text=plotelemstr.get()).grid(row=3, column=0)
# Radio button for range (use element list, use ev strings or both)
radio1 = tk.Radiobutton(root, text='Plot element ranges', value='elems', variable = rangechoice).grid(row=0, column=2)
radio1 = tk.Radiobutton(root, text='Plot eV range', value='evrange', variable = rangechoice).grid(row=1, column=2)
radio1 = tk.Radiobutton(root, text='Join elements and eV range', value='both', variable = rangechoice).grid(row=2, column=2)
radio1 = tk.Radiobutton(root, text='Use full data range', value='full', variable = rangechoice).grid(row=3, column=2)
d=tk.Checkbutton(root, variable=backfitbool, text='Plot background fits?')
d.grid(row=4, column=0)
d=tk.Checkbutton(root, variable=backfitptsbool, text='Plot points used to fit background?')
d.grid(row=5, column=0)
d=tk.Checkbutton(root, variable=plotelemsbool, text='Label element peaks?')
d.grid(row=6, column=0)
# option to reselect labeled elemental peaks
# TODO fix this nested tk interface ... chosen elements are not changing
def changeelems(event):
newelemlist=AESutils.pickelemsGUI(AESquantparams) # get new elements/peaks list
newelems.set(', '.join(newelemlist))
newtext='Peaks to be labeled: '+', '.join(newelemlist)
plotelemstr.set(newtext)
def abort(event):
choice.set('abort')
root.destroy()
def plot(event):
choice.set('plot')
root.destroy()
d=tk.Button(root, text='Change labelled element peaks')
d.bind('<Button-1>', changeelems)
d.grid(row=7, column=0)
d=tk.Button(root, text='Plot')
d.bind('<Button-1>', plot)
d.grid(row=7, column=1)
d=tk.Button(root, text='Abort')
d.bind('<Button-1>', abort)
d.grid(row=7, column=2)
root.mainloop()
mychoice=choice.get()
if mychoice=='plot':
# Set up kwargs for plot
kwargs={}
if filestr.get()!='': # optional subset of files based on entered number
filenumlist=parsefilenums(filestr.get())
kwargs.update({'filesubset':filenumlist})
if backfitbool.get(): # plot backgrounds (stored in each Augerfile)
kwargs.update({'addbackfit':True})
if backfitptsbool.get(): # add points used for fitting background regions
kwargs.update({'backfitpts':True})
if plotelemsbool.get(): # add points used for fitting background regions
kwargs.update({'plotelems':Elements})
# Now create plotrange arg string (elems + evrange) based on radiobutton choice
plotrange=[]
if rangechoice.get()=='elems':
plotrange.extend(Elements)
elif rangechoice.get()=='evrange': # hyphenated ev range entered
tempstr=xrangestr.get()
plotrange.extend(tempstr.split(',')) # parse into strings if necessary (list must be passed)
elif rangechoice.get()=='both':
plotrange.extend(Elements) # start with elements list
tempstr=xrangestr.get()
plotrange.extend(tempstr.split(',')) # add ev ranges
elif rangechoice.get()=='full':
# set to maximum possible range.. will be reduced if larger than data range
plotrange.append('0-2500') # parse into strings if necessary (list must be passed)
# get and pass PDF name string as arg (default name is prefilled)
myPDFname=PDFnamestr.get()
'''
if newelems!='': # use newly assigned values
kwargs.update({'plotelems':newelems.get()})
'''
reportcountsback(spelist, plotrange, AESquantparams, Backfitlog, PDFname=myPDFname, **kwargs)
return kwargs
def countsderivreport_tk(spelist, Elements, Smdifpeakslog, Backfitlog, AESquantparams, **kwargs):
''' tk interface for args/kwargs of countsderiv reports (deriv on top and counts below for all areas)
all args/dataframes must be passed through to plot functions
Options:
1) filenums -- use all or choose subsets by filenumbers (spelist normally has all spe filenumbers in folder)
2) xrange-- can be list of elements, list of ev ranges or combination (parsed by setplotboundaries)
3) custom pdf name option
4) plot background fits -bool
5) plot smdifpeak amplitude points -bool (for deriv plot below)
6) area subsets
TODO: subsetting of areas?
'''
# TODO use prior kwargs to set defaults
# first print out existing info in various lines
root = tk.Tk()
filestr=tk.StringVar() # comma separated or range of filenumbers for plot
# Variable for entering eV range(s).. optionally used
xrangestr=tk.StringVar() # energy range in eV
xrangestr.set('')
now=datetime.datetime.now()
PDFname='Countsderiv_report'+'_'+now.strftime('%d%b%y')+'.pdf'
PDFnamestr=tk.StringVar() # energy range in eV
PDFnamestr.set(PDFname)
# elements list only displayed for now... change with separate tk selector
plotelemstr=tk.StringVar()
mytext='Peaks to be labeled:'+', '.join(Elements)
plotelemstr.set(mytext)
rangechoice=tk.StringVar()
# todo implement possible change of elements
newelems=tk.StringVar() # string for new element choices if made
newelems.set('')
backfitbool=tk.BooleanVar() # Bool for plotting background (if counts plot)
backfitptsbool=tk.BooleanVar() # Bool for plotting background (if counts plot)
smdifbool=tk.BooleanVar() # Bool for plotting background (if counts plot)
plotelemsbool=tk.BooleanVar() # optional labelling of elements
plotelemsbool.set(True) # default to true
choice=tk.StringVar() # plot or abort
# set defaults based on prior run
if 'addbackfit' in kwargs:
backfitbool.set(True)
if 'backfitpts' in kwargs:
backfitptsbool.set(True)
if 'plotelems' not in kwargs:
backfitptsbool.set(False)
a=tk.Label(root, text='Enter filenumbers for plot report (default all)').grid(row=0, column=0)
b=tk.Entry(root, textvariable=filestr).grid(row=0, column=1)
a=tk.Label(root, text='Optional xrange in eV (default 100-1900eV)').grid(row=1, column=0)
b=tk.Entry(root, textvariable=xrangestr).grid(row=1, column=1)
a=tk.Label(root, text='Enter PDF report name').grid(row=2, column=0)
b=tk.Entry(root, textvariable=PDFnamestr).grid(row=2, column=1)
# printout of peaks to be plotted (if chosen)
a=tk.Label(root, text=plotelemstr.get()).grid(row=3, column=0)
# Radio button for range (use element list, use ev strings or both)
radio1 = tk.Radiobutton(root, text='Plot element ranges', value='elems', variable = rangechoice).grid(row=0, column=2)
radio1 = tk.Radiobutton(root, text='Plot eV range', value='evrange', variable = rangechoice).grid(row=1, column=2)
radio1 = tk.Radiobutton(root, text='Join elements and eV range', value='both', variable = rangechoice).grid(row=2, column=2)
radio1 = tk.Radiobutton(root, text='Use full data range', value='full', variable = rangechoice).grid(row=3, column=2)
d=tk.Checkbutton(root, variable=backfitbool, text='Plot background fits?')
d.grid(row=4, column=0)
d=tk.Checkbutton(root, variable=backfitptsbool, text='Plot points used to fit background?')
d.grid(row=5, column=0)
d=tk.Checkbutton(root, variable=plotelemsbool, text='Label element peaks?')
d.grid(row=6, column=0)
d=tk.Checkbutton(root, variable=smdifbool, text='Label amplitude pts in sm-diff?')
d.grid(row=7, column=0)
# option to reselect labeled elemental peaks
# TODO fix this nested tk interface ... chosen elements are not changing
def changeelems(event):
newelemlist=AESutils.pickelemsGUI(AESquantparams) # get new elements/peaks list
newelems.set(', '.join(newelemlist))
newtext='Peaks to be labeled: '+', '.join(newelemlist)
plotelemstr.set(newtext)
def abort(event):
choice.set('abort')
root.destroy()
def plot(event):
choice.set('plot')
root.destroy()
d=tk.Button(root, text='Change labelled element peaks')
d.bind('<Button-1>', changeelems)
d.grid(row=8, column=0)
d=tk.Button(root, text='Plot')
d.bind('<Button-1>', plot)
d.grid(row=8, column=1)
d=tk.Button(root, text='Abort')
d.bind('<Button-1>', abort)
d.grid(row=8, column=2)
root.mainloop()
mychoice=choice.get()
if mychoice=='plot':
# Set up kwargs for plot
kwargs={}
if filestr.get()!='': # optional subset of files based on entered number
filenumlist=parsefilenums(filestr.get())
kwargs.update({'filesubset':filenumlist})
if backfitbool.get(): # plot backgrounds (stored in each Augerfile)
kwargs.update({'addbackfit':True})
if backfitptsbool.get(): # add points used for fitting background regions
kwargs.update({'backfitpts':True})
if plotelemsbool.get(): # add points used for fitting background regions
kwargs.update({'plotelems':Elements})
if smdifbool.get(): # add points used for fitting background regions
kwargs.update({'smdifpts':True})
# Now create plotrange arg string (elems + evrange) based on radiobutton choice
plotrange=[]
if rangechoice.get()=='elems':
plotrange.extend(Elements)
elif rangechoice.get()=='evrange':
tempstr=xrangestr.get()
plotrange.extend(tempstr.split(',')) # parse into strings if necessary (list must be passed)
elif rangechoice.get()=='both':
plotrange.extend(Elements) # start with elements list
tempstr=xrangestr.get()
plotrange.extend(tempstr.split(',')) # add ev ranges
elif rangechoice.get()=='full':
# set to maximum possible range.. will be reduced if larger than data range
plotrange.append('0-2500') # parse into strings if necessary (list must be passed)
# get and pass PDF name string as arg (default name is prefilled)
myPDFname=PDFnamestr.get()
'''
if newelems!='': # use newly assigned values
kwargs.update({'plotelems':newelems.get()})
'''
reportderivcnt(spelist, plotrange, AESquantparams, Backfitlog, Smdifpeakslog, PDFname=myPDFname, **kwargs)
return kwargs
# old version of scattercompplot before outlier determination/plotting
def plotcntsmajor(Params, areanum):
''' 2x3 plot of Si, Mg, S, Fe, C/Ca, and O
pass pre-sliced params and peaks
calls findevbreaks
'''
AugerFileName=Params.Filename # filename if logmatch is series
#numareas=int(Params.Areas)
#myareas=parseareas(areas, numareas) # set of areas for plotting
Augerfile=pd.read_csv(AugerFileName) # reads entire spectra into df
# myplotrange=(Augerfile['Energy'].min(),Augerfile['Energy'].max())
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(16,9)) # 2 by 3 axes array
colname='Counts'+str(areanum)
backname='Backfit'+str(areanum)
# find multiplex evbreaks
energyvals=findevbreaks(Params, Augerfile) # get x energy vals for evbreaks in this multiplex (if they exist) as float
# S region
Augerslice=Augerfile[(Augerfile['Energy']>115) & (Augerfile['Energy']<200)]
if Augerslice.empty==False: # only plottable if data exists in this range
Augerslice.plot(x='Energy', y=colname, ax=axes[0,0]) # S region
Augerslice.plot(x='Energy', y=backname, ax=axes[0,0])
axes[0,0].axvline(x=150.3, color='b') # at ideal S position
for j, val in enumerate(energyvals): # plot multiplex ev breaks in red
if val > 115 and val < 200:
axes[0,0].axvline(x=val, color='r') # on counts plot
# C/Ca region
Augerslice=Augerfile[(Augerfile['Energy']>225) & (Augerfile['Energy']<320)]
if Augerslice.empty==False: # only plottable if data exists in this range
Augerslice.plot(x='Energy', y=colname, ax=axes[1,0]) # C/Ca region
Augerslice.plot(x='Energy', y=backname, ax=axes[1,0])
axes[1,0].axvline(x=271, color='b') # at ideal C position
axes[1,0].axvline(x=291, color='b') # at ideal Ca position
# add red vert line at multiplex energy break if present
for j, val in enumerate(energyvals):
if val > 225 and val < 320:
axes[1,0].axvline(x=val, color='r') # on counts plot
# O regions
Augerslice=Augerfile[(Augerfile['Energy']>475) & (Augerfile['Energy']<535)]
if Augerslice.empty==False: # only plottable if data exists in this range
Augerslice.plot(x='Energy', y=colname, ax=axes[0,1]) # O region
Augerslice.plot(x='Energy', y=backname, ax=axes[0,1])
axes[0,1].axvline(x=508, color='b') # at ideal O position
for j, val in enumerate(energyvals):
if val > 475 and val < 535:
axes[0,1].axvline(x=val, color='r') # on counts plot
# Fe region
Augerslice=Augerfile[(Augerfile['Energy']>560) & (Augerfile['Energy']<750)]
if Augerslice.empty==False: # only plottable if data exists in this range
Augerslice.plot(x='Energy', y=colname, ax=axes[1,1]) # Fe region
Augerslice.plot(x='Energy', y=backname, ax=axes[1,1])
axes[1,1].axvline(x=595, color='b') # at ideal Fe1 position
axes[1,1].axvline(x=647.2, color='b') # at ideal Fe2 position
axes[1,1].axvline(x=702.2, color='b') # at ideal Fe3 position
for j, val in enumerate(energyvals):
if val > 560 and val < 750:
axes[1,1].axvline(x=val, color='r') # on counts plot
# Mg region
Augerslice=Augerfile[(Augerfile['Energy']>1140) & (Augerfile['Energy']<1220)]
if Augerslice.empty==False: # only plottable if data exists in this range
Augerslice.plot(x='Energy', y=colname, ax=axes[0,2]) # Mg region
Augerslice.plot(x='Energy', y=backname, ax=axes[0,2])
axes[0,2].axvline(x=1181.7, color='b') # at ideal Mg position
for j, val in enumerate(energyvals):
if val > 1140 and val < 1220:
axes[0,2].axvline(x=val, color='r') # on counts plot
# Si region
Augerslice=Augerfile[(Augerfile['Energy']>1550) & (Augerfile['Energy']<1650)]
if Augerslice.empty==False: # only plottable if data exists in this range
Augerslice.plot(x='Energy', y=colname, ax=axes[1,2]) # Si2 region
Augerslice.plot(x='Energy', y=backname, ax=axes[1,2]) # Si2 region
axes[1,2].axvline(x=1609, color='b') # at ideal Si position
for j, val in enumerate(energyvals):
if val > 1550 and val < 1650:
axes[1,2].axvline(x=val, color='r') # on counts plot
return
# Legacy versions replaced by above generalized functions
def reportpeaksmajor(paramlog, addgauss=True):
''' 2x3 plot of Si, Mg, S, Fe, C/Ca, and O
pass list of files and selected background regions from automated fitting
'''
with PdfPages('Peak_report.pdf') as pdf:
for index,row in paramlog.iterrows(): # iterrows avoids reindexing problems
AugerFileName=paramlog.iloc[index]['Filename']
numareas=int(paramlog.iloc[index]['Areas'])
Augerfile=pd.read_csv(AugerFileName) # reads entire spectra into df (all areas)
myplotrange=(Augerfile['Energy'].min(),Augerfile['Energy'].max()) # same range for all areas in spe
Params=paramlog.iloc[index] # grab row for this spe file as Series
filenumber=Params.Filenumber # retrieve filenumber
for i in range(0,numareas): # create plot for each area
areanum=i+1 #
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(16,9)) # 2 by 3 axes array
peakname='Peaks'+str(areanum)
gaussname='Gauss'+str(areanum)
mytitle=str(filenumber)+' area #'+str(areanum)
plt.suptitle(mytitle)
# S region
if myplotrange[0] < 135 and myplotrange[1] > 165:
Augerslice=Augerfile[(Augerfile['Energy']>135) & (Augerfile['Energy']<165)]
axes[0,0].axvline(x=150.3, color='b') # at ideal direct peak S position
if not Augerslice.empty:
Augerslice.plot(x='Energy', y=peakname, ax=axes[0,0]) # S region
if addgauss==True and gaussname in Augerslice.dtypes.index: # ensure Gaussian fit col exists
Augerslice.plot(x='Energy', y=gaussname, ax=axes[0,0]) # S region
# C/Ca region
if myplotrange[0] < 275 and myplotrange[1] > 305:
Augerslice=Augerfile[(Augerfile['Energy']>275) & (Augerfile['Energy']<305)]
if not Augerslice.empty:
Augerslice.plot(x='Energy', y=peakname, ax=axes[1,0]) # C/Ca region
if addgauss==True and gaussname in Augerslice.dtypes.index:
Augerslice.plot(x='Energy', y=gaussname, ax=axes[1,0]) # C/Ca region
axes[1,0].axvline(x=291, color='b') # at ideal direct peak Ca position
# Fe region
if myplotrange[0] < 625 and myplotrange[1] > 725:
Augerslice=Augerfile[(Augerfile['Energy']>625) & (Augerfile['Energy']<725)]
if not Augerslice.empty:
Augerslice.plot(x='Energy', y=peakname, ax=axes[1,1]) # Fe region
if addgauss==True and gaussname in Augerslice.dtypes.index:
Augerslice.plot(x='Energy', y=gaussname, ax=axes[1,1])
axes[1,1].axvline(x=648, color='b') # at ideal direct peak Fe2 position
axes[1,1].axvline(x=702, color='b') # at ideal direct peak Fe3 position
# Mg region
if myplotrange[0] < 1165 and myplotrange[1] > 1200:
Augerslice=Augerfile[(Augerfile['Energy']>1165) & (Augerfile['Energy']<1200)]
if not Augerslice.empty:
Augerslice.plot(x='Energy', y=peakname, ax=axes[0,2]) # Mg region
if addgauss==True and gaussname in Augerslice.dtypes.index:
Augerslice.plot(x='Energy', y=gaussname, ax=axes[0,2])
axes[0,2].axvline(x=1185, color='b') # at ideal Mg position
# Si region
if myplotrange[0] < 1590 and myplotrange[1] > 1625:
Augerslice=Augerfile[(Augerfile['Energy']>1590) & (Augerfile['Energy']<1625)]
if not Augerslice.empty:
Augerslice.plot(x='Energy', y=peakname, ax=axes[1,2]) # Si2 region
if addgauss==True and gaussname in Augerslice.dtypes.index:
Augerslice.plot(x='Energy', y=gaussname, ax=axes[1,2])
axes[1,2].axvline(x=1614, color='b') # at ideal Si position
pdf.savefig(fig)
plt.close('all') # close all open figures
return
def reportSDmajor(paramlog, Smdifpeakslog, PDFname='SDplots_report.pdf'):
''' 2x3 plot of Si, Mg, S, Fe, C/Ca, and O
pass pre-sliced params and peaks
'''
with PdfPages(PDFname) as pdf:
for index,row in paramlog.iterrows(): # iterrows avoids reindexing problems
AugerFileName=paramlog.loc[index]['Filename']
numareas=int(paramlog.loc[index]['Areas'])
Augerfile=pd.read_csv(AugerFileName) # reads entire spectra into df (all areas)
# myplotrange=(Augerfile['Energy'].min(),Augerfile['Energy'].max()) # same range for all areas in spe
Params=paramlog.loc[index] # grab row for this spe file as Series
filenumber=Params.Filenumber # retrieve filenumber
mypeaks=Smdifpeakslog[(Smdifpeakslog['Filename']==AugerFileName)] # retrieve assoc. subset of peaks data
energyvals=findevbreaks(Params, Augerfile) # get x energy vals for evbreaks in this multiplex (if they exist) as float
for i in range(0,numareas): # create plot for each area
areanum=i+1 #
Peaks=mypeaks[(mypeaks['Areanumber']==areanum)] # then only this areanum
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(16,9)) # 2 by 3 axes array
colname='S7D7'+str(areanum)
mytitle=str(filenumber)+' area #'+str(areanum)
plt.suptitle(mytitle)
# S region
Augerslice=Augerfile[(Augerfile['Energy']>115) & (Augerfile['Energy']<200)]
if not Augerslice.empty: # skip entire plot if no data
Augerslice.plot(x='Energy', y=colname, ax=axes[0,0]) # S region
plotpts=Peaks[(Peaks['Peakenergy']>115) & (Peaks['Peakenergy']<200)]
if not plotpts.empty:
plotpts.plot.scatter(x='Peakenergy', y='Negintensity', ax=axes[0,0], color='r')
plotpts.plot.scatter(x='Pospeak', y='Posintensity', ax=axes[0,0], color='r')
titlestring=maketitlestring(plotpts)
axes[0,0].set_title(titlestring, fontsize=10)
axes[0,0].axvline(x=154, color='b') # at ideal S position
noisebars=makenoisebars(plotpts) # vert lines showing noise ampl at low and high energies (list of two 3-tuples)
axes[0,0].vlines(x=noisebars[0][0], ymin=noisebars[0][1], ymax = noisebars[0][2], linewidth=2, color='r')
axes[0,0].vlines(x=noisebars[1][0], ymin=noisebars[1][1], ymax = noisebars[1][2], linewidth=2, color='r')
# add red vert line at multiplex energy break if present
for j, val in enumerate(energyvals):
if val > 115 and val < 200:
axes[0,0].axvline(x=val, color='r') # on counts plot
# C/Ca region
Augerslice=Augerfile[(Augerfile['Energy']>225) & (Augerfile['Energy']<320)]
if not Augerslice.empty:
Augerslice.plot(x='Energy', y=colname, ax=axes[1,0]) # C/Ca region
axes[1,0].axvline(x=276, color='b') # at ideal C position
axes[1,0].axvline(x=296, color='b') # at ideal Ca position
plotpts=Peaks[(Peaks['Peakenergy']>225) & (Peaks['Peakenergy']<320)]
if not plotpts.empty:
plotpts.plot.scatter(x='Peakenergy', y='Negintensity', ax=axes[1,0], color='r')
plotpts.plot.scatter(x='Pospeak', y='Posintensity', ax=axes[1,0], color='r')
titlestring=maketitlestring(plotpts)
axes[1,0].set_title(titlestring, fontsize=10)
noisebars=makenoisebars(plotpts) # vert lines showing noise ampl at low and high energies (list of two 3-tuples)
axes[1,0].vlines(x=noisebars[0][0], ymin=noisebars[0][1], ymax = noisebars[0][2], linewidth=2, color='r')
axes[1,0].vlines(x=noisebars[1][0], ymin=noisebars[1][1], ymax = noisebars[1][2], linewidth=2, color='r')
# add red vert line at multiplex energy break if present
for j, val in enumerate(energyvals):
if val > 225 and val < 320:
axes[1,0].axvline(x=val, color='r') # on counts plot
# O regions
Augerslice=Augerfile[(Augerfile['Energy']>475) & (Augerfile['Energy']<535)]
if not Augerslice.empty:
Augerslice.plot(x='Energy', y=colname, ax=axes[0,1]) # O region
axes[0,1].axvline(x=513, color='b') # at ideal O position
plotpts=Peaks[(Peaks['Peakenergy']>475) & (Peaks['Peakenergy']<535)]
if not plotpts.empty:
plotpts.plot.scatter(x='Peakenergy', y='Negintensity', ax=axes[0,1], color='r')
plotpts.plot.scatter(x='Pospeak', y='Posintensity', ax=axes[0,1], color='r')
titlestring=maketitlestring(plotpts)
axes[0,1].set_title(titlestring, fontsize=10)
noisebars=makenoisebars(plotpts) # vert lines showing noise ampl at low and high energies (list of two 3-tuples)
axes[0,1].vlines(x=noisebars[0][0], ymin=noisebars[0][1], ymax = noisebars[0][2], linewidth=2, color='r')
axes[0,1].vlines(x=noisebars[1][0], ymin=noisebars[1][1], ymax = noisebars[1][2], linewidth=2, color='r')
for j, val in enumerate(energyvals):
if val > 475 and val < 535:
axes[0,1].axvline(x=val, color='r') # on counts plot
# Fe region
Augerslice=Augerfile[(Augerfile['Energy']>560) & (Augerfile['Energy']<750)]
if not Augerslice.empty:
Augerslice.plot(x='Energy', y=colname, ax=axes[1,1]) # Fe region
axes[1,1].axvline(x=600, color='b') # at ideal Fe1 position
axes[1,1].axvline(x=654, color='b') # at ideal Fe2 position
axes[1,1].axvline(x=707, color='b') # at ideal Fe3 position
plotpts=Peaks[(Peaks['Peakenergy']>560) & (Peaks['Peakenergy']<750)]
if not plotpts.empty:
plotpts.plot.scatter(x='Peakenergy', y='Negintensity', ax=axes[1,1], color='r')
plotpts.plot.scatter(x='Pospeak', y='Posintensity', ax=axes[1,1], color='r')
titlestring=maketitlestring(plotpts)
axes[1,1].set_title(titlestring, fontsize=10)
noisebars=makenoisebars(plotpts) # vert lines showing noise ampl at low and high energies (list of two 3-tuples)
axes[1,1].vlines(x=noisebars[0][0], ymin=noisebars[0][1], ymax = noisebars[0][2], linewidth=2, color='r')
axes[1,1].vlines(x=noisebars[1][0], ymin=noisebars[1][1], ymax = noisebars[1][2], linewidth=2, color='r')
for j, val in enumerate(energyvals):
if val > 560 and val < 750:
axes[1,1].axvline(x=val, color='r') # on counts plot
# Mg region
Augerslice=Augerfile[(Augerfile['Energy']>1140) & (Augerfile['Energy']<1220)]
if not Augerslice.empty:
Augerslice.plot(x='Energy', y=colname, ax=axes[0,2]) # Mg region
axes[0,2].axvline(x=1185, color='b') # at ideal Mg position
plotpts=Peaks[(Peaks['Peakenergy']>1140) & (Peaks['Peakenergy']<1220)]
if not plotpts.empty:
plotpts.plot.scatter(x='Peakenergy', y='Negintensity', ax=axes[0,2], color='r')
plotpts.plot.scatter(x='Pospeak', y='Posintensity', ax=axes[0,2], color='r')
titlestring=maketitlestring(plotpts)
axes[0,2].set_title(titlestring, fontsize=10)
noisebars=makenoisebars(plotpts) # vert lines showing noise ampl at low and high energies (list of two 3-tuples)
axes[0,2].vlines(x=noisebars[0][0], ymin=noisebars[0][1], ymax = noisebars[0][2], linewidth=2, color='r')
axes[0,2].vlines(x=noisebars[1][0], ymin=noisebars[1][1], ymax = noisebars[1][2], linewidth=2, color='r')
for j, val in enumerate(energyvals):
if val > 1140 and val < 1220:
axes[0,2].axvline(x=val, color='r') # on counts plot
# Si region
Augerslice=Augerfile[(Augerfile['Energy']>1550) & (Augerfile['Energy']<1650)]
if not Augerslice.empty:
Augerslice.plot(x='Energy', y=colname, ax=axes[1,2]) # Si2 region
axes[1,2].axvline(x=1614, color='b') # at ideal Si position
plotpts=Peaks[(Peaks['Peakenergy']>1550) & (Peaks['Peakenergy']<1650)]
if not plotpts.empty:
plotpts.plot.scatter(x='Peakenergy', y='Negintensity', ax=axes[1,2], color='r')
plotpts.plot.scatter(x='Pospeak', y='Posintensity', ax=axes[1,2], color='r')
titlestring=maketitlestring(plotpts)
axes[1,2].set_title(titlestring, fontsize=10)
noisebars=makenoisebars(plotpts) # vert lines showing noise ampl at low and high energies (list of two 3-tuples)
axes[1,2].vlines(x=noisebars[0][0], ymin=noisebars[0][1], ymax = noisebars[0][2], linewidth=2, color='r')
axes[1,2].vlines(x=noisebars[1][0], ymin=noisebars[1][1], ymax = noisebars[1][2], linewidth=2, color='r')
for j, val in enumerate(energyvals):
if val > 1550 and val < 1650:
axes[1,2].axvline(x=val, color='r') # on counts plot
pdf.savefig(fig)
plt.close(fig) # closes recently displayed figure (since it's saved in PDF report)
return
def plotcntsmajor(Params, areanum):
''' 2x3 plot of Si, Mg, S, Fe, C/Ca, and O
pass pre-sliced params and peaks
calls findevbreaks
'''
AugerFileName=Params.Filename # filename if logmatch is series
#numareas=int(Params.Areas)
#myareas=parseareas(areas, numareas) # set of areas for plotting
Augerfile=pd.read_csv(AugerFileName) # reads entire spectra into df
# myplotrange=(Augerfile['Energy'].min(),Augerfile['Energy'].max())
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(16,9)) # 2 by 3 axes array
colname='Counts'+str(areanum)
backname='Backfit'+str(areanum)
# find multiplex evbreaks
energyvals=findevbreaks(Params, Augerfile) # get x energy vals for evbreaks in this multiplex (if they exist) as float
# S region
Augerslice=Augerfile[(Augerfile['Energy']>115) & (Augerfile['Energy']<200)]
if Augerslice.empty==False: # only plottable if data exists in this range
Augerslice.plot(x='Energy', y=colname, ax=axes[0,0]) # S region
Augerslice.plot(x='Energy', y=backname, ax=axes[0,0])
axes[0,0].axvline(x=150.3, color='b') # at ideal S position
for j, val in enumerate(energyvals): # plot multiplex ev breaks in red
if val > 115 and val < 200:
axes[0,0].axvline(x=val, color='r') # on counts plot
# C/Ca region
Augerslice=Augerfile[(Augerfile['Energy']>225) & (Augerfile['Energy']<320)]
if Augerslice.empty==False: # only plottable if data exists in this range
Augerslice.plot(x='Energy', y=colname, ax=axes[1,0]) # C/Ca region
Augerslice.plot(x='Energy', y=backname, ax=axes[1,0])
axes[1,0].axvline(x=271, color='b') # at ideal C position
axes[1,0].axvline(x=291, color='b') # at ideal Ca position
# add red vert line at multiplex energy break if present
for j, val in enumerate(energyvals):
if val > 225 and val < 320:
axes[1,0].axvline(x=val, color='r') # on counts plot
# O regions
Augerslice=Augerfile[(Augerfile['Energy']>475) & (Augerfile['Energy']<535)]
if Augerslice.empty==False: # only plottable if data exists in this range
Augerslice.plot(x='Energy', y=colname, ax=axes[0,1]) # O region
Augerslice.plot(x='Energy', y=backname, ax=axes[0,1])
axes[0,1].axvline(x=508, color='b') # at ideal O position
for j, val in enumerate(energyvals):
if val > 475 and val < 535:
axes[0,1].axvline(x=val, color='r') # on counts plot
# Fe region
Augerslice=Augerfile[(Augerfile['Energy']>560) & (Augerfile['Energy']<750)]
if Augerslice.empty==False: # only plottable if data exists in this range
Augerslice.plot(x='Energy', y=colname, ax=axes[1,1]) # Fe region
Augerslice.plot(x='Energy', y=backname, ax=axes[1,1])
axes[1,1].axvline(x=595, color='b') # at ideal Fe1 position
axes[1,1].axvline(x=647.2, color='b') # at ideal Fe2 position
axes[1,1].axvline(x=702.2, color='b') # at ideal Fe3 position
for j, val in enumerate(energyvals):
if val > 560 and val < 750:
axes[1,1].axvline(x=val, color='r') # on counts plot
# Mg region
Augerslice=Augerfile[(Augerfile['Energy']>1140) & (Augerfile['Energy']<1220)]
if Augerslice.empty==False: # only plottable if data exists in this range
Augerslice.plot(x='Energy', y=colname, ax=axes[0,2]) # Mg region
Augerslice.plot(x='Energy', y=backname, ax=axes[0,2])
axes[0,2].axvline(x=1181.7, color='b') # at ideal Mg position
for j, val in enumerate(energyvals):
if val > 1140 and val < 1220:
axes[0,2].axvline(x=val, color='r') # on counts plot
# Si region
Augerslice=Augerfile[(Augerfile['Energy']>1550) & (Augerfile['Energy']<1650)]
if Augerslice.empty==False: # only plottable if data exists in this range
Augerslice.plot(x='Energy', y=colname, ax=axes[1,2]) # Si2 region
Augerslice.plot(x='Energy', y=backname, ax=axes[1,2]) # Si2 region
axes[1,2].axvline(x=1609, color='b') # at ideal Si position
for j, val in enumerate(energyvals):
if val > 1550 and val < 1650:
axes[1,2].axvline(x=val, color='r') # on counts plot
return
def plotSDmajor(Params, Peaks, areanum):
''' 2x3 plot of Si, Mg, S, Fe, C/Ca, and O
pass pre-sliced params and peaks
'''
AugerFileName=Params.Filename # filename if logmatch is series
#numareas=int(Params.Areas)
#myareas=parseareas(areas, numareas) # set of areas for plotting
Augerfile=pd.read_csv(AugerFileName) # reads entire spectra into df
# myplotrange=(Augerfile['Energy'].min(),Augerfile['Energy'].max())
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(16,9)) # 2 by 3 axes array
colname='S7D7'+str(areanum)
# find multiplex evbreaks
energyvals=findevbreaks(Params, Augerfile) # get x energy vals for evbreaks in this multiplex (if they exist) as float
# S region
Augerslice=Augerfile[(Augerfile['Energy']>115) & (Augerfile['Energy']<200)]
if Augerslice.empty==False: # only plottable if data exists in this range
Augerslice.plot(x='Energy', y=colname, ax=axes[0,0]) # S region
axes[0,0].axvline(x=154, color='b') # at ideal S position
plotpts=Peaks[(Peaks['Peakenergy']>115) & (Peaks['Peakenergy']<200)]
if plotpts.empty==False: # plot smdif quant points if present
plotpts.plot.scatter(x='Peakenergy', y='Negintensity', ax=axes[0,0], color='r')
plotpts.plot.scatter(x='Pospeak', y='Posintensity', ax=axes[0,0], color='r')
titlestring=maketitlestring(plotpts)
axes[0,0].set_title(titlestring, fontsize=10)
noisebars=makenoisebars(plotpts) # vert lines showing noise ampl at low and high energies (list of two 3-tuples)
axes[0,0].vlines(x=noisebars[0][0], ymin=noisebars[0][1], ymax = noisebars[0][2], linewidth=2, color='r')
axes[0,0].vlines(x=noisebars[1][0], ymin=noisebars[1][1], ymax = noisebars[1][2], linewidth=2, color='r')
for j, val in enumerate(energyvals): # plot multiplex ev breaks in red
if val > 115 and val < 200:
axes[0,0].axvline(x=val, color='r') # on counts plot
# C/Ca region
Augerslice=Augerfile[(Augerfile['Energy']>225) & (Augerfile['Energy']<320)]
if Augerslice.empty==False: # only plottable if data exists in this range
Augerslice.plot(x='Energy', y=colname, ax=axes[1,0]) # C/Ca region
axes[1,0].axvline(x=276, color='b') # at ideal C position
axes[1,0].axvline(x=296, color='b') # at ideal Ca position
plotpts=Peaks[(Peaks['Peakenergy']>225) & (Peaks['Peakenergy']<320)]
if plotpts.empty==False: # plot smdif quant points if present
plotpts.plot.scatter(x='Peakenergy', y='Negintensity', ax=axes[1,0], color='r')
plotpts.plot.scatter(x='Pospeak', y='Posintensity', ax=axes[1,0], color='r')
titlestring=maketitlestring(plotpts)
axes[1,0].set_title(titlestring, fontsize=10)
noisebars=makenoisebars(plotpts) # vert lines showing noise ampl at low and high energies (list of two 3-tuples)
axes[1,0].vlines(x=noisebars[0][0], ymin=noisebars[0][1], ymax = noisebars[0][2], linewidth=2, color='r')
axes[1,0].vlines(x=noisebars[1][0], ymin=noisebars[1][1], ymax = noisebars[1][2], linewidth=2, color='r')
# add red vert line at multiplex energy break if present
for j, val in enumerate(energyvals):
if val > 225 and val < 320:
axes[1,0].axvline(x=val, color='r') # on counts plot
# O regions
Augerslice=Augerfile[(Augerfile['Energy']>475) & (Augerfile['Energy']<535)]
if Augerslice.empty==False: # only plottable if data exists in this range
Augerslice.plot(x='Energy', y=colname, ax=axes[0,1]) # O region
axes[0,1].axvline(x=513, color='b') # at ideal O position
plotpts=Peaks[(Peaks['Peakenergy']>475) & (Peaks['Peakenergy']<535)]
if plotpts.empty==False: # plot smdif quant points if present
plotpts.plot.scatter(x='Peakenergy', y='Negintensity', ax=axes[0,1], color='r')
plotpts.plot.scatter(x='Pospeak', y='Posintensity', ax=axes[0,1], color='r')
titlestring=maketitlestring(plotpts)
axes[0,1].set_title(titlestring, fontsize=10)
noisebars=makenoisebars(plotpts) # vert lines showing noise ampl at low and high energies (list of two 3-tuples)
axes[0,1].vlines(x=noisebars[0][0], ymin=noisebars[0][1], ymax = noisebars[0][2], linewidth=2, color='r')
axes[0,1].vlines(x=noisebars[1][0], ymin=noisebars[1][1], ymax = noisebars[1][2], linewidth=2, color='r')
for j, val in enumerate(energyvals):
if val > 475 and val < 535:
axes[0,1].axvline(x=val, color='r') # on counts plot
# Fe region
Augerslice=Augerfile[(Augerfile['Energy']>560) & (Augerfile['Energy']<750)]
if Augerslice.empty==False: # only plottable if data exists in this range
Augerslice.plot(x='Energy', y=colname, ax=axes[1,1]) # Fe region
axes[1,1].axvline(x=600, color='b') # at ideal Fe1 position
axes[1,1].axvline(x=654, color='b') # at ideal Fe2 position
axes[1,1].axvline(x=707, color='b') # at ideal Fe3 position
plotpts=Peaks[(Peaks['Peakenergy']>560) & (Peaks['Peakenergy']<750)]
if plotpts.empty==False: # plot smdif quant points if present
plotpts.plot.scatter(x='Peakenergy', y='Negintensity', ax=axes[1,1], color='r')
plotpts.plot.scatter(x='Pospeak', y='Posintensity', ax=axes[1,1], color='r')
titlestring=maketitlestring(plotpts)
axes[1,1].set_title(titlestring, fontsize=10)
noisebars=makenoisebars(plotpts) # vert lines showing noise ampl at low and high energies (list of two 3-tuples)
axes[1,1].vlines(x=noisebars[0][0], ymin=noisebars[0][1], ymax = noisebars[0][2], linewidth=2, color='r')
axes[1,1].vlines(x=noisebars[1][0], ymin=noisebars[1][1], ymax = noisebars[1][2], linewidth=2, color='r')
for j, val in enumerate(energyvals):
if val > 560 and val < 750:
axes[1,1].axvline(x=val, color='r') # on counts plot
# Mg region
Augerslice=Augerfile[(Augerfile['Energy']>1140) & (Augerfile['Energy']<1220)]
if Augerslice.empty==False: # only plottable if data exists in this range
Augerslice.plot(x='Energy', y=colname, ax=axes[0,2]) # Mg region
axes[0,2].axvline(x=1185, color='b') # at ideal Mg position
plotpts=Peaks[(Peaks['Peakenergy']>1140) & (Peaks['Peakenergy']<1220)]
if plotpts.empty==False: # plot smdif quant points if present
plotpts.plot.scatter(x='Peakenergy', y='Negintensity', ax=axes[0,2], color='r')
plotpts.plot.scatter(x='Pospeak', y='Posintensity', ax=axes[0,2], color='r')
titlestring=maketitlestring(plotpts)
axes[0,2].set_title(titlestring, fontsize=10)
noisebars=makenoisebars(plotpts) # vert lines showing noise ampl at low and high energies (list of two 3-tuples)
axes[0,2].vlines(x=noisebars[0][0], ymin=noisebars[0][1], ymax = noisebars[0][2], linewidth=2, color='r')
axes[0,2].vlines(x=noisebars[1][0], ymin=noisebars[1][1], ymax = noisebars[1][2], linewidth=2, color='r')
for j, val in enumerate(energyvals):
if val > 1140 and val < 1220:
axes[0,2].axvline(x=val, color='r') # on counts plot
# Si region
Augerslice=Augerfile[(Augerfile['Energy']>1550) & (Augerfile['Energy']<1650)]
if Augerslice.empty==False: # only plottable if data exists in this range
Augerslice.plot(x='Energy', y=colname, ax=axes[1,2]) # Si2 region
axes[1,2].axvline(x=1614, color='b') # at ideal Si position
plotpts=Peaks[(Peaks['Peakenergy']>1550) & (Peaks['Peakenergy']<1650)]
if plotpts.empty==False: # plot smdif quant points if present
plotpts.plot.scatter(x='Peakenergy', y='Negintensity', ax=axes[1,2], color='r')
plotpts.plot.scatter(x='Pospeak', y='Posintensity', ax=axes[1,2], color='r')
titlestring=maketitlestring(plotpts)
axes[1,2].set_title(titlestring, fontsize=10)
noisebars=makenoisebars(plotpts) # vert lines showing noise ampl at low and high energies (list of two 3-tuples)
axes[1,2].vlines(x=noisebars[0][0], ymin=noisebars[0][1], ymax = noisebars[0][2], linewidth=2, color='r')
axes[1,2].vlines(x=noisebars[1][0], ymin=noisebars[1][1], ymax = noisebars[1][2], linewidth=2, color='r')
for j, val in enumerate(energyvals):
if val > 1550 and val < 1650:
axes[1,2].axvline(x=val, color='r') # on counts plot
return
def reportcountsmajor(paramlog, Smdifpeakslog, PDFname='countsplot_report.pdf'):
''' 2x3 plot of Si, Mg, S, Fe, C/Ca, and O
pass pre-sliced params and peaks
REDUNDANT... just use reportcountsback with background switch
'''
with PdfPages(PDFname) as pdf:
for index,row in paramlog.iterrows(): # iterrows avoids reindexing problems
AugerFileName=paramlog.loc[index]['Filename']
numareas=int(paramlog.loc[index]['Areas'])
Augerfile=pd.read_csv(AugerFileName) # reads entire spectra into df (all areas)
myplotrange=(Augerfile['Energy'].min(),Augerfile['Energy'].max()) # same range for all areas in spe
Params=paramlog.loc[index] # grab row for this spe file as Series
filenumber=Params.Filenumber # retrieve filenumber
energyvals=findevbreaks(Params, Augerfile) # get x energy vals for evbreaks in this multiplex (if they exist) as float
for i in range(0,numareas): # create plot for each area
areanum=i+1 #
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(16,9)) # 2 by 3 axes array
colname='Counts'+str(areanum)
mytitle=str(filenumber)+' area #'+str(areanum)
plt.suptitle(mytitle)
# S region
if myplotrange[0] < 115 and myplotrange[1] > 200:
Augerslice=Augerfile[(Augerfile['Energy']>115) & (Augerfile['Energy']<200)]
if not Augerslice.empty:
Augerslice.plot(x='Energy', y=colname, ax=axes[0,0]) # S region
# TODO look up these constants from AESquantparams
axes[0,0].axvline(x=154, color='r') # at ideal S position
for j, val in enumerate(energyvals):
if val > 115 and val < 200:
axes[0,0].axvline(x=val, color='r') # on counts plot
# C/Ca region
if myplotrange[0] < 225 and myplotrange[1] > 320:
Augerslice=Augerfile[(Augerfile['Energy']>225) & (Augerfile['Energy']<320)]
if not Augerslice.empty:
Augerslice.plot(x='Energy', y=colname, ax=axes[1,0]) # C/Ca region
axes[1,0].axvline(x=276, color='r') # at ideal C position
axes[1,0].axvline(x=296, color='r') # at ideal Ca position
# add red vert line at multiplex energy break if present
for j, val in enumerate(energyvals):
if val > 225 and val < 320:
axes[1,0].axvline(x=val, color='r') # on counts plot
# O regions
if myplotrange[0] < 475 and myplotrange[1] > 535:
Augerslice=Augerfile[(Augerfile['Energy']>475) & (Augerfile['Energy']<535)]
if not Augerslice.empty:
Augerslice.plot(x='Energy', y=colname, ax=axes[0,1]) # O region
axes[0,1].axvline(x=513, color='r') # at ideal O position
for j, val in enumerate(energyvals):
if val > 475 and val < 535:
axes[0,1].axvline(x=val, color='r') # on counts plot
# Fe region
if myplotrange[0] < 560 and myplotrange[1] > 750:
Augerslice=Augerfile[(Augerfile['Energy']>560) & (Augerfile['Energy']<750)]
if not Augerslice.empty:
Augerslice.plot(x='Energy', y=colname, ax=axes[1,1]) # Fe region
axes[1,1].axvline(x=600, color='r') # at ideal Fe1 position
axes[1,1].axvline(x=654, color='r') # at ideal Fe2 position
axes[1,1].axvline(x=707, color='r') # at ideal Fe3 position
for j, val in enumerate(energyvals):
if val > 560 and val < 750:
axes[1,1].axvline(x=val, color='r') # on counts plot
# Mg region
if myplotrange[0] < 1140 and myplotrange[1] > 1220:
Augerslice=Augerfile[(Augerfile['Energy']>1140) & (Augerfile['Energy']<1220)]
if not Augerslice.empty:
Augerslice.plot(x='Energy', y=colname, ax=axes[0,2]) # Mg region
axes[0,2].axvline(x=1185, color='r') # at ideal Mg position
for j, val in enumerate(energyvals):
if val > 1140 and val < 1220:
axes[0,2].axvline(x=val, color='r') # on counts plot
# Si region
if myplotrange[0] < 1550 and myplotrange[1] > 1650:
Augerslice=Augerfile[(Augerfile['Energy']>1550) & (Augerfile['Energy']<1650)]
if not Augerslice.empty:
Augerslice.plot(x='Energy', y=colname, ax=axes[1,2]) # Si2 region
axes[1,2].axvline(x=1614, color='b') # at ideal Si position
for j, val in enumerate(energyvals):
if val > 1550 and val < 1650:
axes[1,2].axvline(x=val, color='r') # on counts plot
pdf.savefig(fig)
plt.close('all') # close all open figures
def plotSDmajor(Params, Peaks, areanum):
''' 2x3 plot of Si, Mg, S, Fe, C/Ca, and O
pass pre-sliced params and peaks
'''
AugerFileName=Params.Filename # filename if logmatch is series
#numareas=int(Params.Areas)
#myareas=parseareas(areas, numareas) # set of areas for plotting
Augerfile=pd.read_csv(AugerFileName) # reads entire spectra into df
# myplotrange=(Augerfile['Energy'].min(),Augerfile['Energy'].max())
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(16,9)) # 2 by 3 axes array
colname='S7D7'+str(areanum)
# find multiplex evbreaks
energyvals=findevbreaks(Params, Augerfile) # get x energy vals for evbreaks in this multiplex (if they exist) as float
# S region
Augerslice=Augerfile[(Augerfile['Energy']>115) & (Augerfile['Energy']<200)]
if Augerslice.empty==False: # only plottable if data exists in this range
Augerslice.plot(x='Energy', y=colname, ax=axes[0,0]) # S region
axes[0,0].axvline(x=154, color='b') # at ideal S position
plotpts=Peaks[(Peaks['Peakenergy']>115) & (Peaks['Peakenergy']<200)]
if plotpts.empty==False: # plot smdif quant points if present
plotpts.plot.scatter(x='Peakenergy', y='Negintensity', ax=axes[0,0], color='r')
plotpts.plot.scatter(x='Pospeak', y='Posintensity', ax=axes[0,0], color='r')
titlestring=maketitlestring(plotpts)
axes[0,0].set_title(titlestring, fontsize=10)
noisebars=makenoisebars(plotpts) # vert lines showing noise ampl at low and high energies (list of two 3-tuples)
axes[0,0].vlines(x=noisebars[0][0], ymin=noisebars[0][1], ymax = noisebars[0][2], linewidth=2, color='r')
axes[0,0].vlines(x=noisebars[1][0], ymin=noisebars[1][1], ymax = noisebars[1][2], linewidth=2, color='r')
for j, val in enumerate(energyvals): # plot multiplex ev breaks in red
if val > 115 and val < 200:
axes[0,0].axvline(x=val, color='r') # on counts plot
# C/Ca region
Augerslice=Augerfile[(Augerfile['Energy']>225) & (Augerfile['Energy']<320)]
if Augerslice.empty==False: # only plottable if data exists in this range
Augerslice.plot(x='Energy', y=colname, ax=axes[1,0]) # C/Ca region
axes[1,0].axvline(x=276, color='b') # at ideal C position
axes[1,0].axvline(x=296, color='b') # at ideal Ca position
plotpts=Peaks[(Peaks['Peakenergy']>225) & (Peaks['Peakenergy']<320)]
if plotpts.empty==False: # plot smdif quant points if present
plotpts.plot.scatter(x='Peakenergy', y='Negintensity', ax=axes[1,0], color='r')
plotpts.plot.scatter(x='Pospeak', y='Posintensity', ax=axes[1,0], color='r')
titlestring=maketitlestring(plotpts)
axes[1,0].set_title(titlestring, fontsize=10)
noisebars=makenoisebars(plotpts) # vert lines showing noise ampl at low and high energies (list of two 3-tuples)
axes[1,0].vlines(x=noisebars[0][0], ymin=noisebars[0][1], ymax = noisebars[0][2], linewidth=2, color='r')
axes[1,0].vlines(x=noisebars[1][0], ymin=noisebars[1][1], ymax = noisebars[1][2], linewidth=2, color='r')
# add red vert line at multiplex energy break if present
for j, val in enumerate(energyvals):
if val > 225 and val < 320:
axes[1,0].axvline(x=val, color='r') # on counts plot
# O regions
Augerslice=Augerfile[(Augerfile['Energy']>475) & (Augerfile['Energy']<535)]
if Augerslice.empty==False: # only plottable if data exists in this range
Augerslice.plot(x='Energy', y=colname, ax=axes[0,1]) # O region
axes[0,1].axvline(x=513, color='b') # at ideal O position
plotpts=Peaks[(Peaks['Peakenergy']>475) & (Peaks['Peakenergy']<535)]
if plotpts.empty==False: # plot smdif quant points if present
plotpts.plot.scatter(x='Peakenergy', y='Negintensity', ax=axes[0,1], color='r')
plotpts.plot.scatter(x='Pospeak', y='Posintensity', ax=axes[0,1], color='r')
titlestring=maketitlestring(plotpts)
axes[0,1].set_title(titlestring, fontsize=10)
noisebars=makenoisebars(plotpts) # vert lines showing noise ampl at low and high energies (list of two 3-tuples)
axes[0,1].vlines(x=noisebars[0][0], ymin=noisebars[0][1], ymax = noisebars[0][2], linewidth=2, color='r')
axes[0,1].vlines(x=noisebars[1][0], ymin=noisebars[1][1], ymax = noisebars[1][2], linewidth=2, color='r')
for j, val in enumerate(energyvals):
if val > 475 and val < 535:
axes[0,1].axvline(x=val, color='r') # on counts plot
# Fe region
Augerslice=Augerfile[(Augerfile['Energy']>560) & (Augerfile['Energy']<750)]
if Augerslice.empty==False: # only plottable if data exists in this range
Augerslice.plot(x='Energy', y=colname, ax=axes[1,1]) # Fe region
axes[1,1].axvline(x=600, color='b') # at ideal Fe1 position
axes[1,1].axvline(x=654, color='b') # at ideal Fe2 position
axes[1,1].axvline(x=707, color='b') # at ideal Fe3 position
plotpts=Peaks[(Peaks['Peakenergy']>560) & (Peaks['Peakenergy']<750)]
if plotpts.empty==False: # plot smdif quant points if present
plotpts.plot.scatter(x='Peakenergy', y='Negintensity', ax=axes[1,1], color='r')
plotpts.plot.scatter(x='Pospeak', y='Posintensity', ax=axes[1,1], color='r')
titlestring=maketitlestring(plotpts)
axes[1,1].set_title(titlestring, fontsize=10)
noisebars=makenoisebars(plotpts) # vert lines showing noise ampl at low and high energies (list of two 3-tuples)
axes[1,1].vlines(x=noisebars[0][0], ymin=noisebars[0][1], ymax = noisebars[0][2], linewidth=2, color='r')
axes[1,1].vlines(x=noisebars[1][0], ymin=noisebars[1][1], ymax = noisebars[1][2], linewidth=2, color='r')
for j, val in enumerate(energyvals):
if val > 560 and val < 750:
axes[1,1].axvline(x=val, color='r') # on counts plot
# Mg region
Augerslice=Augerfile[(Augerfile['Energy']>1140) & (Augerfile['Energy']<1220)]
if Augerslice.empty==False: # only plottable if data exists in this range
Augerslice.plot(x='Energy', y=colname, ax=axes[0,2]) # Mg region
axes[0,2].axvline(x=1185, color='b') # at ideal Mg position
plotpts=Peaks[(Peaks['Peakenergy']>1140) & (Peaks['Peakenergy']<1220)]
if plotpts.empty==False: # plot smdif quant points if present
plotpts.plot.scatter(x='Peakenergy', y='Negintensity', ax=axes[0,2], color='r')
plotpts.plot.scatter(x='Pospeak', y='Posintensity', ax=axes[0,2], color='r')
titlestring=maketitlestring(plotpts)
axes[0,2].set_title(titlestring, fontsize=10)
noisebars=makenoisebars(plotpts) # vert lines showing noise ampl at low and high energies (list of two 3-tuples)
axes[0,2].vlines(x=noisebars[0][0], ymin=noisebars[0][1], ymax = noisebars[0][2], linewidth=2, color='r')
axes[0,2].vlines(x=noisebars[1][0], ymin=noisebars[1][1], ymax = noisebars[1][2], linewidth=2, color='r')
for j, val in enumerate(energyvals):
if val > 1140 and val < 1220:
axes[0,2].axvline(x=val, color='r') # on counts plot
# Si region
Augerslice=Augerfile[(Augerfile['Energy']>1550) & (Augerfile['Energy']<1650)]
if Augerslice.empty==False: # only plottable if data exists in this range
Augerslice.plot(x='Energy', y=colname, ax=axes[1,2]) # Si2 region
axes[1,2].axvline(x=1614, color='b') # at ideal Si position
plotpts=Peaks[(Peaks['Peakenergy']>1550) & (Peaks['Peakenergy']<1650)]
if plotpts.empty==False: # plot smdif quant points if present
plotpts.plot.scatter(x='Peakenergy', y='Negintensity', ax=axes[1,2], color='r')
plotpts.plot.scatter(x='Pospeak', y='Posintensity', ax=axes[1,2], color='r')
titlestring=maketitlestring(plotpts)
axes[1,2].set_title(titlestring, fontsize=10)
noisebars=makenoisebars(plotpts) # vert lines showing noise ampl at low and high energies (list of two 3-tuples)
axes[1,2].vlines(x=noisebars[0][0], ymin=noisebars[0][1], ymax = noisebars[0][2], linewidth=2, color='r')
axes[1,2].vlines(x=noisebars[1][0], ymin=noisebars[1][1], ymax = noisebars[1][2], linewidth=2, color='r')
for j, val in enumerate(energyvals):
if val > 1550 and val < 1650:
axes[1,2].axvline(x=val, color='r') # on counts plot
return
def fitcubic(df, areanum, elem, AugerFileName):
'''Pass appropriate chunk from Auger spectral dataframe, perform cubic fit
return chunk with backfit column added '''
colname='Counts'+str(areanum)
backfitname='Backfit'+str(areanum)
xcol=df['Energy']
ycol=df[colname] # Counts1, Counts2 or whatever
# find relative minimum
try:
cubicfunc=lambda x, a, b, c, d: a*x**3 + b*x**2 + c*x + d # lambda definition of cubic poly
A,B,C, D=np.polyfit(xcol, ycol, 3)
except: # deal with common problems with linregress
print('Fitting error for', elem, ' in file ', AugerFileName)
fitparams=('n/a','n/a','n/a','n/a') # return all n/a
return df, fitparams
fitparams=(A, B, C, D) # tuple to return coeffs of 2nd order poly fit
for index,row in df.iterrows():
xval=df.loc[index]['Energy']
yval= A * xval**3+ B * xval**2 + C * xval + D
df=df.set_value(index, backfitname, yval)
return df, fitparams
def fitCapeak(df, areanum, elem, AugerFileName):
'''Pass appropriate chunk from Auger spectral dataframe, perform linear fit
return chunk with backfit column added
replaced with curve_fit version'''
colname='Counts'+str(areanum)
backfitname='Backfit'+str(areanum)
xcol=df['Energy']
ycol=df[colname] # Counts1, Counts2 or whatever
# find relative minimum
try:
A,B,C=np.polyfit(xcol, ycol, 2)
# diagonal of covariance matrix contains variances for fit params
except: # deal with common problems with linregress
print('Fitting error for', elem, ' in file ', AugerFileName)
fitparams=('n/a','n/a','n/a') # return all n/a
return df, fitparams
fitparams=(A, B, C) # tuple to return coeffs of 2nd order poly fit
for index,row in df.iterrows():
xval=df.loc[index]['Energy']
yval= A * xval**2+ B * xval + C
df=df.set_value(index, backfitname, yval)
return df, fitparams
def scattercompplot(comp1, comp2, elemlist, joinlist=['Sample','Areanum'], basis=False):
'''Pass two versions of composition calculation (using different lines or whatever) and compare
major elements using scatter graphs .. single point for each sample
uses inner merge to select only subset with values from each df
use either sample or filenumber'''
elemlist=[re.match('\D+',i).group(0) for i in elemlist]
# strip number from peaks like Fe2 if present; columns will be element names (Fe) not peak names (Fe2)
if basis==False: # use atomic % (which is the default), not basis for each element
elemlist=['%'+s for s in elemlist]
numareas=len(elemlist)
# set nrows and ncols for figure of proper size
cols=divmod(numareas,2)[0]+ divmod(numareas,2)[1]
if numareas>1:
rows=2
else:
rows=1
fig, axes = plt.subplots(nrows=rows, ncols=cols) # axes is array
# merge dfs with comp1 and comp2 using inner join
df=pd.merge(comp1, comp2, how='inner', on=joinlist, suffixes=('','b'))
mycols=df.dtypes.index # same columns
outliers=pd.DataFrame(columns=mycols) # empty dataframe for outlying points
outliers['Element'] # Find outliers from linear fit for each listed element
for i,elem in enumerate(elemlist):
# determine which subplot to use
if (i+1)%2==1:
rownum=0
else:
rownum=1
colnum=int((i+1)/2.1)
xcol=elem
ycol=elem+'b' # same element from second dataset
if numareas==1: # deal with single subplot separately
df.plot.scatter(x=xcol, y=ycol, ax=axes) # single plot axes has no [#,#]
else:
df.plot.scatter(x=xcol, y=ycol, ax=axes[rownum,colnum])
# linear regression: fitting, plot and add labels
data1=df[elem] # just this data column
colname=elem+'b'
data2=df[colname]
# slope,intercept=np.polyfit(data1, data2, 1) numpy version
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(data1, data2) # imported from scipy.stats
# set x range for linear plot
text1=str(round(slope,2))+' *x +' + str(round(intercept,2))
text2='R = ' + str(round(r_value,3)) + ' p = '+str(round(p_value,3))
xmax=max(max(data1),max(data2))*1.1 # set to slightly larger than max of dataset
x=np.linspace(0,xmax,100) # setting range for
if numareas==1: # deal with single subplot separately
axes.text(0.025,0.9, text1, fontsize=12, transform=axes.transAxes)
axes.text(0.025,0.8, text2, fontsize=12, transform=axes.transAxes)
plt.plot(x, x*slope+intercept, color='r') # plot appropriate line
else: # typical multiarea plot
axes[rownum,colnum].text(0.025,0.9, text1, fontsize=12, transform=axes[rownum,colnum].transAxes)
axes[rownum,colnum].text(0.025,0.8, text2, fontsize=12, transform=axes[rownum,colnum].transAxes)
plt.axes(axes[rownum,colnum]) # set correct axes as active
plt.plot(x, x*slope+intercept, color='r') # plot appropriate line
outliers=returnoutliers(df,slope, intercept, elem, basis=False)
return df
# old version when entire datachunk including evbreaks was passed
def savgol(counts, evbreaks):
'''Perform python smooth-diff used to guide selection of background regions
perform this in chunks between evbreaks, works for survey or multiplex
returns list with smooth-diff columns
'''
savgollist=[] # Empty list to hold savgol data
for i in range(0,len(evbreaks)-1):
thisreg=counts[evbreaks[i]:evbreaks[i+1]] # slice into separate multiplex regions and process separately
thisreg=np.asarray(thisreg) # convert list to array
window_size=11
deriv=2
order=2 # order of savgol fit
rate=1
order_range = range(order+1) # range object
half_window = (window_size -1) // 2 # type int
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
# b is matrix 3 by window size
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv) # series as long as array
# linalg.pinv gets pseudo-inverse of a matrix (window-sized series)
# .A of any matrix returns it as ndarray object
# Pad the signal at the extremes with values taken from the signal itself
firstvals = thisreg[0] - np.abs( thisreg[1:half_window+1][::-1] - thisreg[0] )
lastvals = thisreg[-1] + np.abs(thisreg[-half_window-1:-1][::-1] - thisreg[-1])
thisreg = np.concatenate((firstvals, thisreg, lastvals))
# Now convolve input signal and sav-gol processing 1D array)
thisreg=np.convolve( thisreg, m[::-1], mode='valid')
thisreg=thisreg.tolist() # convert to list
savgollist.extend(thisreg) # append to full spectrum list
while len(savgollist)<len(counts): # can be one element too short
savgollist.append(0)
return savgollist # returns savitsky-golay smooth diff over same full region
# old (not generalized) version of counts report iwth backgrounds
def reportcountsback(paramlog, plotelems, AESquantparams, plotback=True, PDFname='countsback_report.pdf'):
''' 2x3 plot of Si, Mg, S, Fe, C/Ca, and O
pass list of files and selected background regions from automated fitting
background fits themselves stored with auger csv files
optional pass of backfitlog (w/ points defining region boundary for background fitting useful for troubleshooting fits)
evbreaks from multiplex plotted as red lines (often different dwell times for different elements)
plotback switch -- whether or not to plot
'''
with PdfPages(PDFname) as pdf:
for index,row in paramlog.iterrows(): # iterrows avoids reindexing problems
AugerFileName=paramlog.loc[index]['Filename']
numareas=int(paramlog.loc[index]['Areas'])
Augerfile=pd.read_csv(AugerFileName) # reads entire spectra into df (all areas)
myplotrange=(Augerfile['Energy'].min(),Augerfile['Energy'].max()) # same range for all areas in spe
Params=paramlog.loc[index] # grab row for this spe file as Series
filenumber=Params.Filenumber # retrieve filenumber
energyvals=findevbreaks(Params, Augerfile) # get x energy vals for evbreaks in this multiplex (if they exist) as float
for i in range(0,numareas): # create plot for each area
areanum=i+1 #
Peaks=mypeaks[(mypeaks['Area']==areanum)] # get subset of peaks for this area number
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(16,9)) # 2 by 3 axes array
colname='Counts'+str(areanum)
backname='Backfit'+str(areanum)
mytitle=str(filenumber)+' area #'+str(areanum)
plt.suptitle(mytitle)
# S region
if myplotrange[0] < 115 and myplotrange[1] > 200:
Augerslice=Augerfile[(Augerfile['Energy']>115) & (Augerfile['Energy']<200)]
if not Augerslice.empty:
# find elemental lines in this keV range
thisrange='115-200'
elemlines=getelemenergy(plotelems, thisrange, AESquantparams)
# list of tuples with energy,elemname
for i, elemtuple in enumerate(elemlines):
# elemtuple[0] is energy and [1] is element symbol
axes[0,0].axvline(x=elemtuple[0], color='b') # O line
axes[0,0].text(elemtuple[0],-250, elemtuple[1],rotation=90) # use standard -250 y val
Augerslice.plot(x='Energy', y=colname, ax=axes[0,0]) # S region
if bflog!=False: # plotting of Auger background fits from integral quant method
Augerslice.plot(x='Energy', y=backname, ax=axes[0,0])
plotpts=Peaks[(Peaks['Element']=='S')]
indexvals=[] # list of single points from Augerfile to plot as different colored scatter
for i in range(0,len(plotpts)): # fit boundaries for S
indexvals.append(int(plotpts.iloc[i]['Lower1']))
indexvals.append(int(plotpts.iloc[i]['Lower2']))
indexvals.append(int(plotpts.iloc[i]['Upper1']))
indexvals.append(int(plotpts.iloc[i]['Upper2']))
Augerpts=Augerfile.iloc[indexvals] # boundaries of fitted regions
Augerpts.plot.scatter(x='Energy', y=colname, ax=axes[0,0])
for j, val in enumerate(energyvals): # any possible evbreaks in multiplex
if val > 115 and val < 200:
axes[0,0].axvline(x=val, color='r') # on counts plot
# C/Ca region
if myplotrange[0] < 225 and myplotrange[1] > 330:
Augerslice=Augerfile[(Augerfile['Energy']>225) & (Augerfile['Energy']<330)]
if not Augerslice.empty:
# find elemental lines in this keV range
thisrange='225-330'
elemlines=getelemenergy(plotelems, thisrange, AESquantparams)
# list of tuples with energy,elemname
for i, elemtuple in enumerate(elemlines):
# elemtuple[0] is energy and [1] is element symbol
axes[1,0].axvline(x=elemtuple[0], color='b') # O line
axes[1,0].text(elemtuple[0],-250, elemtuple[1],rotation=90) # use standard -250 y val
Augerslice.plot(x='Energy', y=colname, ax=axes[1,0]) # C/Ca region
if bflog!=False:
Augerslice.plot(x='Energy', y=backname, ax=axes[1,0])
plotpts=Peaks[(Peaks['Element']=='Ca')]
indexvals=[] # list of single points from Augerfile to plot as different colored scatter
for i in range(0,len(plotpts)): # fit boundaries for Ca
indexvals.append(int(plotpts.iloc[i]['Lower1']))
indexvals.append(int(plotpts.iloc[i]['Lower2']))
indexvals.append(int(plotpts.iloc[i]['Upper1']))
indexvals.append(int(plotpts.iloc[i]['Upper2']))
Augerpts=Augerfile.iloc[indexvals] # boundaries of fitted regions
Augerpts.plot.scatter(x='Energy', y=colname, ax=axes[1,0])
# add red vert line at multiplex energy break if present
for j, val in enumerate(energyvals):
if val > 225 and val < 330:
axes[1,0].axvline(x=val, color='r') # on counts plot
# O regions (Currently skip background fitting for O )
if myplotrange[0] < 475 and myplotrange[1] > 535:
Augerslice=Augerfile[(Augerfile['Energy']>475) & (Augerfile['Energy']<535)]
if not Augerslice.empty:
# find elemental lines in this keV range
thisrange='475-535'
elemlines=getelemenergy(plotelems, thisrange, AESquantparams)
# list of tuples with energy,elemname
for i, elemtuple in enumerate(elemlines):
# elemtuple[0] is energy and [1] is element symbol
axes[1,0].axvline(x=elemtuple[0], color='b') # O line
axes[1,0].text(elemtuple[0],-250, elemtuple[1],rotation=90) # use standard -250 y val
Augerslice.plot(x='Energy', y=colname, ax=axes[0,1]) # O region
for j, val in enumerate(energyvals):
if val > 475 and val < 535:
axes[0,1].axvline(x=val, color='r') # on counts plot
# Fe region
if myplotrange[0] < 560 and myplotrange[1] > 750:
Augerslice=Augerfile[(Augerfile['Energy']>560) & (Augerfile['Energy']<750)]
if not Augerslice.empty:
# find elemental lines in this keV range
thisrange='560-750'
elemlines=getelemenergy(plotelems, thisrange, AESquantparams)
# list of tuples with energy,elemname
for i, elemtuple in enumerate(elemlines):
# elemtuple[0] is energy and [1] is element symbol
axes[1,1].axvline(x=elemtuple[0], color='b') # O line
axes[1,1].text(elemtuple[0],-250, elemtuple[1],rotation=90) # use standard -250 y val
Augerslice.plot(x='Energy', y=colname, ax=axes[1,1]) # Fe region
if bflog!=False:
Augerslice.plot(x='Energy', y=backname, ax=axes[1,1])
plotpts=Peaks[Peaks['Element'].str.contains('Fe', na=False, case=False)] # match all Fe peaks
indexvals=[] # list of single points from Augerfile to plot as different colored scatter
for i in range(0,len(plotpts)): # should get values from Fe2 and Fe3
indexvals.append(int(plotpts.iloc[i]['Lower1']))
indexvals.append(int(plotpts.iloc[i]['Lower2']))
indexvals.append(int(plotpts.iloc[i]['Upper1']))
indexvals.append(int(plotpts.iloc[i]['Upper2']))
Augerpts=Augerfile.iloc[indexvals] # boundaries of fitted regions
Augerpts.plot.scatter(x='Energy', y=colname, ax=axes[1,1])
for j, val in enumerate(energyvals):
if val > 560 and val < 750:
axes[1,1].axvline(x=val, color='r') # on counts plot
# Mg region
if myplotrange[0] < 1140 and myplotrange[1] > 1220:
Augerslice=Augerfile[(Augerfile['Energy']>1140) & (Augerfile['Energy']<1220)]
if not Augerslice.empty:
# find elemental lines in this keV range
thisrange='1140-1220'
elemlines=getelemenergy(plotelems, thisrange, AESquantparams)
# list of tuples with energy,elemname
for i, elemtuple in enumerate(elemlines):
# elemtuple[0] is energy and [1] is element symbol
axes[0,2].axvline(x=elemtuple[0], color='b') # O line
axes[0,2].text(elemtuple[0],-250, elemtuple[1],rotation=90) # use standard -250 y val
Augerslice.plot(x='Energy', y=colname, ax=axes[0,2]) # Mg region
if bflog!=False:
Augerslice.plot(x='Energy', y=backname, ax=axes[0,2])
plotpts=Peaks[(Peaks['Element']=='Mg')]
indexvals=[] # list of single points from Augerfile to plot as different colored scatter
for i in range(0,len(plotpts)): # should get values from Fe2 and Fe3
indexvals.append(int(plotpts.iloc[i]['Lower1']))
indexvals.append(int(plotpts.iloc[i]['Lower2']))
indexvals.append(int(plotpts.iloc[i]['Upper1']))
indexvals.append(int(plotpts.iloc[i]['Upper2']))
Augerpts=Augerfile.iloc[indexvals] # boundaries of fitted regions
Augerpts.plot.scatter(x='Energy', y=colname, ax=axes[0,2])
for j, val in enumerate(energyvals):
if val > 1140 and val < 1220:
axes[0,2].axvline(x=val, color='r') # on counts plot
# Si region
if myplotrange[0] < 1550 and myplotrange[1] > 1650:
Augerslice=Augerfile[(Augerfile['Energy']>1550) & (Augerfile['Energy']<1650)]
if not Augerslice.empty: # find elemental lines in this keV range
thisrange='1550-1650'
elemlines=getelemenergy(plotelems, thisrange, AESquantparams)
# list of tuples with energy,elemname
for i, elemtuple in enumerate(elemlines):
# elemtuple[0] is energy and [1] is element symbol
axes[1,2].axvline(x=elemtuple[0], color='b') # O line
axes[1,2].text(elemtuple[0],-250, elemtuple[1],rotation=90) # use standard -250 y val
Augerslice.plot(x='Energy', y=colname, ax=axes[1,2]) # Si2 region
if bflog!=False:
Augerslice.plot(x='Energy', y=backname, ax=axes[1,2])
plotpts=Peaks[(Peaks['Element']=='Si')]
indexvals=[] # list of single points from Augerfile to plot as different colored scatter
for i in range(0,len(plotpts)): # should get values from Fe2 and Fe3
indexvals.append(int(plotpts.iloc[i]['Lower1']))
indexvals.append(int(plotpts.iloc[i]['Lower2']))
indexvals.append(int(plotpts.iloc[i]['Upper1']))
indexvals.append(int(plotpts.iloc[i]['Upper2']))
Augerpts=Augerfile.iloc[indexvals] # boundaries of fitted regions
Augerpts.plot.scatter(x='Energy', y=colname, ax=axes[1,2])
for j, val in enumerate(energyvals):
if val > 1550 and val < 1650:
axes[1,2].axvline(x=val, color='r') # on counts plot
pdf.savefig(fig)
plt.close('all') # close all open figures
return
def setplotrange(plotrange, Augerfile):
''' Set range of plot based on element, numerical range or default to max range of spectrum
commonly called by Auger plot functions below
LEGACY .. .replaced by getplotboundaries'''
if '-' in plotrange: # determine range for plot
myplotrange=(int(plotrange.split('-')[0]),int(plotrange.split('-')[1]))
elif plotrange=='C':
myplotrange=(236,316)
elif plotrange=='Ca':
myplotrange=(236,336)
elif plotrange=='O':
myplotrange=(470,540)
elif plotrange=='Fe':
myplotrange=(560,747)
elif plotrange=='Mg':
myplotrange=(1145,1225)
elif plotrange=='Al':
myplotrange=(1350,1430)
elif plotrange=='Si':
myplotrange=(1570,1650)
else: # defaults to full data range
lower=Augerfile.Energy.min()
upper=Augerfile.Energy.max()
myplotrange=(lower, upper)
return myplotrange
# old version of countsbackreport before using tk interface for args/kwargs
def reportcountsback(paramlog, plotelems, AESquantparams, backfitdf=False, PDFname='countsback_report.pdf'):
''' Plot of list of passed elements
pass list of files and selected background regions from automated fitting
background fits themselves stored with auger csv files
optional pass of backfitlog (w/ points defining region boundary for background fitting useful for troubleshooting fits)
evbreaks from multiplex plotted as red lines (often different dwell times for different elements)
plotback switch -- whether or not to plot
'''
plt.ioff()
with PdfPages(PDFname) as pdf:
for index,row in paramlog.iterrows(): # iterrows avoids reindexing problems
AugerFileName=paramlog.loc[index]['Filename']
numareas=int(paramlog.loc[index]['Areas'])
try:
Augerfile=pd.read_csv(AugerFileName) # reads entire spectra into df (all areas)
except:
print(AugerFileName,' skipped ... not found.')
continue
Params=paramlog.loc[index] # grab row for this spe file as Series
# filenumber=Params.Filenumber # retrieve filenumber
energyvals=findevbreaks(Params, Augerfile) # get x energy vals for evbreaks in this multiplex (if they exist) as float
# determine size of plot for this filenumber (same for all areas)
plotranges=getplotboundaries(Augerfile, plotelems, AESquantparams) # returns plot ranges for all regions with data from plotelems
# boundaries of backfit range from backfitlog are helpful for plotting(lower1 & 2 and upper 1&2 which are index #s)
if type(backfitdf)==pd.core.frame.DataFrame:
thisfilebackpts=backfitdf[backfitdf['Filename']==AugerFileName]
plotbackpts=True
else:
plotbackpts=False
for i in range(0,numareas): # create separate plot page for each area
areanum=i+1
if plotbackpts==True: # this gets all the lower1, lower2, upper1, upper2 index point boundaries
indexptslist=[]
thisareabackpts=thisfilebackpts[(thisfilebackpts['Areanumber']==areanum)] # get subset of peaks for this area number
thisarr=thisareabackpts.Lower1.unique()
thislist=np.ndarray.tolist(thisarr)
indexptslist.extend(thislist)
thisarr=thisareabackpts.Lower2.unique()
thislist=np.ndarray.tolist(thisarr)
indexptslist.extend(thislist)
thisarr=thisareabackpts.Upper1.unique()
thislist=np.ndarray.tolist(thisarr)
indexptslist.extend(thislist)
thisarr=thisareabackpts.Upper2.unique()
thislist=np.ndarray.tolist(thisarr)
indexptslist.extend(thislist)
indexptslist=[int(i) for i in indexptslist] # make sure all index #s are ints
indexptslist.sort()
# set plot row and column for this element range (from plotelems -- plotranges)
if len(plotranges)==1:
numcols=1
numrows=1
else:
numcols=2 #
numrows=math.ceil(len(plotranges)/2)
# new plot for each spatial area
try:
fig, axes = plt.subplots(nrows=numrows, ncols=numcols, figsize=(16,9), squeeze=False) # 2 by 3 axes array
colname='Counts'+str(areanum)
mytitle=AugerFileName +' area #'+str(areanum)
plt.suptitle(mytitle)
# now loop over the elemental plot ranges
for j, bounds in enumerate(plotranges):
[lower, upper]=bounds
thisrow=j%numrows
thiscol=j//numrows
axindex=thisrow, thiscol
Augerslice=Augerfile[(Augerfile['Energy']>=lower) & (Augerfile['Energy']<=upper)] # already known that this isn't empty
Augerslice.plot(x='Energy', y=colname, ax=axes[axindex]) # plot counts
if plotbackpts==True:
# Now add scatter plot points at fit region boundaries
backpts=Augerslice[Augerslice.index.isin(indexptslist)] # gets background fitted pts but only from this data slice
if not backpts.empty: # show fitted pts from counts
backpts.plot.scatter(x='Energy', y=colname, ax=axes[axindex])
backname='Backfit'+str(areanum) # don't need separate empty test since df already checked for empty in getplotboundaries
Augerslice.plot(x='Energy', y=backname, ax=axes[thisrow,thiscol])
# Section for labeling plotelements
elemlines=getelemenergy(plotelems, bounds, AESquantparams, deriv=False) # can pass plot range as lower,upper tuple
# list of tuples with energy,elemname
for k, elemtuple in enumerate(elemlines):
# elemtuple[0] is energy and [1] is element symbol
# axes[thisrow,thiscol].axvline(x=elemtuple[0], color='b') # O line
try:
axes[axindex].axvline(x=elemtuple[0], color='b') # O line
yval=(Augerslice[colname].max()-Augerslice[colname].min())*0.9+Augerslice[colname].min()
axes[thisrow,thiscol].text(elemtuple[0],yval, elemtuple[1],rotation=90, fontsize=18) # use standard -250 y val
except:
print('Problem labeling elements')
# fold
# add red vertical lines at multiplex energy breaks
for l, val in enumerate(energyvals):
if val > lower and val < upper:
axes[thisrow,thiscol].axvline(x=val, color='r') # on counts plot
for subplt in range(0,numrows*numcols): # hide any empty subplots
if subplt>len(plotranges)-1:
axes[subplt%numrows,subplt//numrows].set_visible(False)
pdf.savefig(fig)
plt.close(fig) # closes recently displayed figure (since it's saved in PDF report)
print(AugerFileName,' area', areanum, 'plotted') # end of long try plotting entire area
except:
print('Problem plotting file ', AugerFileName, 'area', areanum, ' likely no data for specified elements.')
plt.ion()
return
# Old way with cloned filenumber areanumber rows in paramlog
def reportderivcnt(paramlog, plotelems, AESquantparams, **kwargs):
''' Comparison plots for both derivative and counts itself (don't have to worry about axes w/o indexing)
plots selected filenumber + areanumber
plots all spe files, associated quant points, and labels selected elemental lines
Kwargs: both backfitlog (key bfl) and smdiflogs (key smd) are optional kwarg params
pdf rename is also custom (defaults to derivcnts_report_3Mar17)
'''
plt.ioff()
now=datetime.datetime.today()
# Set up default PDF name (custom name is passable via kwargs)
fname='Derivcnt_report_'+now.strftime('%d%b%y')+'.pdf'
PDFname=kwargs.get('PDFname',fname)
with PdfPages(PDFname) as pdf:
for index,row in paramlog.iterrows():
AugerFileName=paramlog.loc[index]['Filename']
areanum=int(paramlog.loc[index]['Areanumber'])
Augerfile=openspefile(AugerFileName)
if Augerfile.empty: # file not found problem
continue
# myplotrange=(Augerfile['Energy'].min(),Augerfile['Energy'].max()) # same range for all areas in spe
if 'smd' in kwargs:
Smdifdf=kwargs.get('smd','')
# retrieve assoc. subset of peaks data
thisfilepeaks=Smdifdf[(Smdifdf['Filename']==AugerFileName)&(Smdifdf['Areanumber']==areanum)]
# SKIP energyvals=findevbreaks(Params, Augerfile) # get x energy vals for evbreaks in multiplex
# determine size of plot for this filenumber (same for all areas)
# plotranges fnct sometimes combines certain plotelems (i.e. C and Ca together)
plotranges=getplotboundaries(Augerfile, plotelems, AESquantparams) # returns plot ranges for all regions with data from plotelems
# set plot rows and columns
numrows=2
numcols=len(plotranges)
# for plotting background fit points used in integral method
if 'bfl' in kwargs:
backfitdf=kwargs.get('bfl','')
thisfilebackpts=backfitdf[(backfitdf['Filename']==AugerFileName) & (backfitdf['Areanumber']==areanum)]
indexptslist=[] # this gets all the lower1, lower2, upper1, upper2 index point boundaries
thisarr=thisfilebackpts.Lower1.unique()
thislist=np.ndarray.tolist(thisarr)
indexptslist.extend(thislist)
thisarr=thisfilebackpts.Lower2.unique()
thislist=np.ndarray.tolist(thisarr)
indexptslist.extend(thislist)
thisarr=thisfilebackpts.Upper1.unique()
thislist=np.ndarray.tolist(thisarr)
indexptslist.extend(thislist)
thisarr=thisfilebackpts.Upper2.unique()
thislist=np.ndarray.tolist(thisarr)
indexptslist.extend(thislist)
indexptslist=[int(i) for i in indexptslist]
indexptslist.sort()
try:
fig, axes = plt.subplots(nrows=numrows, ncols=numcols, figsize=(16,9), squeeze=False) # 2 by ? axes array
mytitle=AugerFileName.replace('.csv','') +' area #'+str(areanum)
plt.suptitle(mytitle)
for j, bounds in enumerate(plotranges):
[lower, upper]=bounds
thiscol=j
# Augerslice already checked for empty set
Augerslice=Augerfile[(Augerfile['Energy']>lower) & (Augerfile['Energy']<upper)]
Augerslice.plot(x='Energy', y='S7D7'+str(areanum), ax=axes[0,thiscol]) # deriv in upper plot
Augerslice.plot(x='Energy', y='Counts'+str(areanum), ax=axes[1,thiscol]) # counts in lower
# Section for labeling plotelements (plot range is passable as lower, upper tuple)
elemlines=getelemenergy(plotelems, bounds, AESquantparams, deriv=True)
# list of tuples with energy,elemname
for k, elemtuple in enumerate(elemlines):
# elemtuple[0] is energy and [1] is element symbol
try:
axes[0,thiscol].axvline(x=elemtuple[0], color='b') # O line
axes[0,thiscol].text(elemtuple[0],-250, elemtuple[1],rotation=90, fontsize=18) # use standard -250 y val
except:
print('Problem labeling elements')
# Section for adding smooth-diff quant data (optional via kwarg)
if kwargs.get('addsmdif',False):
plotpts=thisfilepeaks[(thisfilepeaks['Peakenergy']>lower) & (thisfilepeaks['Peakenergy']<upper)]
if not plotpts.empty:
try:
plotpts.plot.scatter(x='Peakenergy', y='Negintensity', ax=axes[0,thiscol], color='r')
plotpts.plot.scatter(x='Pospeak', y='Posintensity', ax=axes[0,thiscol], color='r')
titlestring=maketitlestring(plotpts)
axes[0,thiscol].set_title(titlestring, fontsize=10)
except:
print('Problem adding points from smdif quant calcs for ', AugerFileName,'area # ', areanum )
# add red vert line at multiplex energy break if present
# removed... however evbreaks could be retrieved from AugerParamLog if desired
'''
for l, val in enumerate(energyvals):
if val > lower and val < upper:
axes[0,thiscol].axvline(x=val, color='r') # on deriv plot
axes[1,thiscol].axvline(x=val, color='r') # on counts plot
'''
# Now plot counts and background fits in bottom row
if 'bfl' in kwargs: # flag to show points from which background was determined
# Now add scatter plot points at fit region boundaries
backpts=Augerslice[Augerslice.index.isin(indexptslist)] # gets background fitted pts but only from this data slice
if not backpts.empty: # show fitted pts from counts
backpts.plot.scatter(x='Energy', y='Counts'+str(areanum), ax=axes[1,thiscol])
Augerslice.plot(x='Energy', y='Backfit'+str(areanum), ax=axes[1,thiscol])
# now label elements for counts plot
elemlines=getelemenergy(plotelems, bounds, AESquantparams, deriv=False) # can pass plot range as lower,upper tuple
# list of tuples with energy,elemname
for k, elemtuple in enumerate(elemlines):
# elemtuple[0] is energy and [1] is element symbol
# axes[thisrow,thiscol].axvline(x=elemtuple[0], color='b') # O line
try:
axes[1,thiscol].axvline(x=elemtuple[0], color='b') # O line
yval=(Augerslice['Counts'+str(areanum)].max()-Augerslice['Counts'+str(areanum)].min())*0.9+Augerslice['Counts'+str(areanum)].min()
axes[1,thiscol].text(elemtuple[0],yval, elemtuple[1],rotation=90, fontsize=18) # use standard -250 y val
except:
print('Problem labeling elements')
pdf.savefig(fig)
plt.close(fig)
except:
print('Unknown problem plotting', AugerFileName,' area #', areanum)
plt.ion()
return
def reportderivcntall(paramlog, plotelems, AESquantparams, Smdifdf=False, backfitdf=False, PDFname='this_report.pdf'):
''' Comparison plots for both derivative and counts itself (don't have to worry about axes w/o indexing)
plots selected filenumber for all area (looped)
plots all spe files, associated quant points, and labels selected elemental lines
'''
plt.ioff()
with PdfPages(PDFname) as pdf:
for index,row in paramlog.iterrows():
AugerFileName=paramlog.loc[index]['Filename']
numareas=int(paramlog.loc[index]['Areas'])
Augerfile=openspefile(AugerFileName)
if Augerfile.empty:
continue
# myplotrange=(Augerfile['Energy'].min(),Augerfile['Energy'].max()) # same range for all areas in spe
if type(Smdifdf)==pd.core.frame.DataFrame:
addsmdif=True # flag to plot sm-diff quant points
thisfilepeaks=Smdifdf[Smdifdf['Filename']==AugerFileName] # retrieve assoc. subset of peaks data
# SKIP energyvals=findevbreaks(Params, Augerfile) # get x energy vals for evbreaks in this multiplex (if they exist) as float
# determine size of plot for this filenumber (same for all areas); plotranges combines some plotelems (i.e. C and Ca together)
plotranges=getplotboundaries(Augerfile, plotelems, AESquantparams) # returns plot ranges for all regions with data from plotelems
for i in range(0,numareas):
areanum=i+1
# set plot rows and columns
numrows=2
numcols=len(plotranges)
if type(backfitdf)==pd.core.frame.DataFrame: # for plotting background fit points integral method
thisfilebackpts=backfitdf[(backfitdf['Filename']==AugerFileName) & (backfitdf['Areanumber']==areanum)]
plotbackpts=True
indexptslist=[] # this gets all the lower1, lower2, upper1, upper2 index point boundaries
thisarr=thisfilebackpts.Lower1.unique()
thislist=np.ndarray.tolist(thisarr)
indexptslist.extend(thislist)
thisarr=thisfilebackpts.Lower2.unique()
thislist=np.ndarray.tolist(thisarr)
indexptslist.extend(thislist)
thisarr=thisfilebackpts.Upper1.unique()
thislist=np.ndarray.tolist(thisarr)
indexptslist.extend(thislist)
thisarr=thisfilebackpts.Upper2.unique()
thislist=np.ndarray.tolist(thisarr)
indexptslist.extend(thislist)
indexptslist=[int(i) for i in indexptslist]
indexptslist.sort()
else:
plotbackpts=False
try:
fig, axes = plt.subplots(nrows=numrows, ncols=numcols, figsize=(16,9), squeeze=False) # 2 by ? axes array
mytitle=AugerFileName.replace('.csv','') +' area #'+str(areanum)
if len(plotranges)>4:
plt.tight_layout() # shrinks to fit axis labels
plt.suptitle(mytitle)
for j, bounds in enumerate(plotranges):
[lower, upper]=bounds
thiscol=j
Augerslice=Augerfile[(Augerfile['Energy']>lower) & (Augerfile['Energy']<upper)] # already known that this isn't empty
Augerslice.plot(x='Energy', y='S7D7'+str(areanum), ax=axes[0,thiscol]) # deriv in upper plot
Augerslice.plot(x='Energy', y='Counts'+str(areanum), ax=axes[1,thiscol]) # counts in lower
# Section for labeling plotelements
elemlines=getelemenergy(plotelems, bounds, AESquantparams, deriv=True) # can pass plot range as lower,upper tuple
# list of tuples with energy,elemname
for k, elemtuple in enumerate(elemlines):
# elemtuple[0] is energy and [1] is element symbol
# axes[thisrow,thiscol].axvline(x=elemtuple[0], color='b') # O line
try:
axes[0,thiscol].axvline(x=elemtuple[0], color='b') # O line
axes[0,thiscol].text(elemtuple[0],-250, elemtuple[1],rotation=90, fontsize=18) # use standard -250 y val
except:
print('Problem labeling elements')
# Section for adding smooth-diff quant data
if addsmdif:
thisareapeaks=thisfilepeaks[thisfilepeaks['Areanumber']==areanum]
plotpts=thisareapeaks[(thisfilepeaks['Peakenergy']>lower) & (thisareapeaks['Peakenergy']<upper)]
if not plotpts.empty:
try:
plotpts.plot.scatter(x='Peakenergy', y='Negintensity', ax=axes[0,thiscol], color='r')
plotpts.plot.scatter(x='Pospeak', y='Posintensity', ax=axes[0,thiscol], color='r')
titlestring=maketitlestring(plotpts)
axes[0,thiscol].set_title(titlestring, fontsize=10)
except:
print('Problem adding points from smdif quant calcs for ', AugerFileName,'area # ', areanum )
# add red vert line at multiplex energy break if present
# removed... however evbreaks could be retrieved from AugerParamLog if desired
'''
for l, val in enumerate(energyvals):
if val > lower and val < upper:
axes[0,thiscol].axvline(x=val, color='r') # on deriv plot
axes[1,thiscol].axvline(x=val, color='r') # on counts plot
'''
# Now plot counts and background fits in bottom row
if plotbackpts==True:
# Now add scatter plot points at fit region boundaries
backpts=Augerslice[Augerslice.index.isin(indexptslist)] # gets background fitted pts but only from this data slice
if not backpts.empty: # show fitted pts from counts
backpts.plot.scatter(x='Energy', y='Counts'+str(areanum), ax=axes[1,thiscol])
Augerslice.plot(x='Energy', y='Backfit'+str(areanum), ax=axes[1,thiscol])
# now label elements for counts plot
elemlines=getelemenergy(plotelems, bounds, AESquantparams, deriv=False) # can pass plot range as lower,upper tuple
# list of tuples with energy,elemname
for k, elemtuple in enumerate(elemlines):
# elemtuple[0] is energy and [1] is element symbol
# axes[thisrow,thiscol].axvline(x=elemtuple[0], color='b') # O line
try:
axes[1,thiscol].axvline(x=elemtuple[0], color='b') # O line
yval=(Augerslice['Counts'+str(areanum)].max()-Augerslice['Counts'+str(areanum)].min())*0.9+Augerslice['Counts'+str(areanum)].min()
axes[1,thiscol].text(elemtuple[0],yval, elemtuple[1],rotation=90, fontsize=18) # use standard -250 y val
except:
print('Problem labeling elements')
# now hide empty subplots
for i in range(0,numrows*numcols):
if i>len(plotranges)-1:
thisrow=i//numcols
thiscol=i%numcols
axindex=thisrow, thiscol # tuple to index axes
axes[axindex].set_visible(False)
pdf.savefig(fig)
plt.close(fig)
except:
print('Unknown problem plotting', AugerFileName,' area #', areanum)
plt.ion()
return
| {
"repo_name": "tkcroat/Augerquant",
"path": "Development/Auger_legacy_functions.py",
"copies": "1",
"size": "116655",
"license": "mit",
"hash": -1386515949523438000,
"line_mean": 60.8190630048,
"line_max": 187,
"alpha_frac": 0.5640992671,
"autogenerated": false,
"ratio": 3.7376245554451955,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4801723822545195,
"avg_score": null,
"num_lines": null
} |
"""Augmentation layers.
This module includes augmentation layers that can be applied to audio data and representations.
"""
import tensorflow as tf
from tensorflow.keras.layers import Layer
from tensorflow.keras import backend as K
from . import backend
from .backend import _CH_FIRST_STR, _CH_LAST_STR, _CH_DEFAULT_STR
import numpy as np
class ChannelSwap(Layer):
"""
Randomly swap the channel
Args:
data_format (`str`): specifies the data format of batch input/output
**kwargs: Keyword args for the parent keras layer (e.g., `name`)
Example:
::
input_shape = (2048, 2) # stereo signal
n_fft = 1024
n_hop = n_fft // 2
kwargs = {
'sample_rate': 22050,
'n_freq': n_fft // 2 + 1,
'n_mels': 128,
'f_min': 0.0,
'f_max': 8000,
}
model = Sequential()
model.add(kapre.STFT(n_fft=n_fft, hop_length=n_hop, input_shape=input_shape))
model.add(Magnitude())
# (batch, n_frame=3, n_freq=n_fft // 2 + 1, ch=1) and dtype is float
model.add(ChannelSwap())
# same shape but with randomly swapped channels.
::
input_shape = (2048, 4) # mono signal
model = Sequential()
model.add(ChannelSwap(input_shape=input_shape))
# still (2048, 4) but with randomly swapped channels
"""
def __init__(
self,
data_format='default',
**kwargs,
):
backend.validate_data_format_str(data_format)
if data_format == _CH_DEFAULT_STR:
self.data_format = K.image_data_format()
else:
self.data_format = data_format
super(ChannelSwap, self).__init__(**kwargs)
def call(self, x, training=None):
"""
Apply random channel-swap augmentation to `x`.
Args:
x (`Tensor`): A batch tensor of 1D (signals) or 2D (spectrograms) data
"""
if training in (None, False):
return x
# figure out input data format
if K.ndim(x) not in (3, 4):
raise ValueError(
'ndim of input tensor x should be 3 (batch signal) or 4 (batch spectrogram),'
'but it is %d' % K.ndim(x)
)
if self.data_format == _CH_LAST_STR:
ch_axis = 3 if K.ndim(x) == 4 else 2
else:
ch_axis = 1
# get swap indices
n_ch = K.int_shape(x)[ch_axis]
if n_ch == 1:
return x
swap_indices = np.random.permutation(n_ch).tolist()
# swap and return
return tf.gather(x, indices=swap_indices, axis=ch_axis)
def get_config(self):
config = super(ChannelSwap, self).get_config()
config.update(
{
'data_format': self.data_format,
}
)
return config
| {
"repo_name": "keunwoochoi/kapre",
"path": "kapre/augmentation.py",
"copies": "1",
"size": "2970",
"license": "mit",
"hash": -8568890512612209000,
"line_mean": 27.8349514563,
"line_max": 95,
"alpha_frac": 0.530976431,
"autogenerated": false,
"ratio": 3.862158647594278,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9890389450078088,
"avg_score": 0.0005491257032383322,
"num_lines": 103
} |
""" Augment core.
Manager and Worker classes.
"""
import os
import multiprocessing as mp
from PIL import Image
class Manager(object):
def __init__(self, src, dest, transformations, num_workers=4):
self.src = src
self.dest = dest
self.num_workers = num_workers
self.queue = mp.Queue()
self.workers = [Worker(w, src, dest, self.queue, transformations)
for w in range(num_workers)]
def process(self):
self._fill_queue()
for w in self.workers:
w.start()
for w in self.workers:
w.join()
def _fill_queue(self):
for (dirpath, dirnames, filenames) in os.walk(self.src):
for f in filenames:
if f is not ".DS_Store":
self.queue.put(dirpath + "/" + f)
class Worker(mp.Process):
def __init__(self, pidx, src, dest, queue, transformations):
mp.Process.__init__(self)
self.pidx = pidx
self.src = src
self.dest = dest
self.queue = queue
self.transformations = transformations
def run(self):
""" Main worker loop. """
print("[%d] Starting processing." % (self.pidx))
img_no = 0
while not self.queue.empty():
src_path = self.queue.get()
try:
src_img = Image.open(src_path)
except IOError as e:
print("Cannot open: ", src_path)
print("Error: ", e)
prev_info = ""
for t in self.transformations:
dest_img = t.transform(src_img)
info = prev_info + t.get_info()
if isinstance(dest_img, list):
for img_tuple in dest_img:
img, info = img_tuple
self._save(img, src_path, prev_info + info)
else:
self._save(dest_img, src_path, info)
# keep this image for further processing
if t.mutable:
src_img = dest_img
prev_info += t.get_info()
img_no += 1
print("[%d] Finished processing %03d images." % (self.pidx, img_no))
def _save(self, img, src_path, info=None):
dest_path = self._get_dest_path(src_path, info)
# print(dest_path)
img.save(dest_path)
def _get_dest_path(self, src_path, info):
src_path, src_fn = os.path.split(src_path)
dest_path = src_path.replace(self.src, self.dest)
dest_fn = src_fn.replace(".jpg", (str(info) if info else "") + ".jpg")
dest_path += ("/" + dest_fn)
return dest_path
| {
"repo_name": "floringogianu/augment",
"path": "augment/core.py",
"copies": "1",
"size": "2700",
"license": "mit",
"hash": 3793789346854653000,
"line_mean": 29.6818181818,
"line_max": 78,
"alpha_frac": 0.5077777778,
"autogenerated": false,
"ratio": 3.896103896103896,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4903881673903896,
"avg_score": null,
"num_lines": null
} |
#Augment data by x8 times by making scalings and small rotations
from util import shear_image
from util import rotate_image
from util import image_rotated_cropped
import glob
import cv2
import uuid
import os
from shutil import copyfile
IMAGE_WIDTH = 32
IMAGE_HEIGHT = 32
input_folder = "../img/mouth_data"
output_folder ="../img/all_data"
input_data_set = [img for img in glob.glob(input_folder+"/*jpg")]
generate_random_filename = 1
for in_idx, img_path in enumerate(input_data_set):
file_name = os.path.splitext(os.path.basename(img_path))[0]
print(file_name)
augmentation_number = 8
initial_rot = -20
#save original too
path = output_folder+"/"+file_name+".jpg"
copyfile(img_path, path)
for x in range(1, augmentation_number):
rotation_coeficient = x
rotation_step=5
total_rotation=initial_rot+rotation_step*rotation_coeficient
#print(total_rotation)
mouth_rotated = image_rotated_cropped(img_path,total_rotation)
#resize to 50 by 50
mouth_rotated = cv2.resize(mouth_rotated, (IMAGE_WIDTH, IMAGE_HEIGHT), interpolation = cv2.INTER_CUBIC)
if generate_random_filename == 1:
guid = uuid.uuid4()
uid_str = guid.urn
str_guid = uid_str[9:]
path = ""
if 'showingteeth' in img_path:
path = output_folder+"/"+str_guid+"_showingteeth.jpg"
else:
path = output_folder+"/"+str_guid+".jpg"
cv2.imwrite(path,mouth_rotated)
else:
path = ""
if 'showingteeth' in img_path:
path = output_folder+"/"+file_name+"_rotated"+str(x)+"_showingteeth.jpg"
else:
path = output_folder+"/"+file_name+"_rotated"+str(x)+".jpg"
cv2.imwrite(path,mouth_rotated)
| {
"repo_name": "juanzdev/TeethClassifierCNN",
"path": "src/data_augmentation.py",
"copies": "1",
"size": "1833",
"license": "mit",
"hash": 6982811367499585000,
"line_mean": 34.25,
"line_max": 111,
"alpha_frac": 0.6186579378,
"autogenerated": false,
"ratio": 3.407063197026022,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45257211348260223,
"avg_score": null,
"num_lines": null
} |
"""Augmenters that are collections of other augmenters.
List of augmenters:
* :class:`RandAugment`
Added in 0.4.0.
"""
from __future__ import print_function, division, absolute_import
import numpy as np
from .. import parameters as iap
from .. import random as iarandom
from . import meta
from . import arithmetic
from . import flip
from . import pillike
from . import size as sizelib
class RandAugment(meta.Sequential):
"""Apply RandAugment to inputs as described in the corresponding paper.
See paper::
Cubuk et al.
RandAugment: Practical automated data augmentation with a reduced
search space
.. note::
The paper contains essentially no hyperparameters for the individual
augmentation techniques. The hyperparameters used here come mostly
from the official code repository, which however seems to only contain
code for CIFAR10 and SVHN, not for ImageNet. So some guesswork was
involved and a few of the hyperparameters were also taken from
https://github.com/ildoonet/pytorch-randaugment/blob/master/RandAugment/augmentations.py .
This implementation deviates from the code repository for all PIL
enhance operations. In the repository these use a factor of
``0.1 + M*1.8/M_max``, which would lead to a factor of ``0.1`` for the
weakest ``M`` of ``M=0``. For e.g. ``Brightness`` that would result in
a basically black image. This definition is fine for AutoAugment (from
where the code and hyperparameters are copied), which optimizes
each transformation's ``M`` individually, but not for RandAugment,
which uses a single fixed ``M``. We hence redefine these
hyperparameters to ``1.0 + S * M * 0.9/M_max``, where ``S`` is
randomly either ``1`` or ``-1``.
We also note that it is not entirely clear which transformations
were used in the ImageNet experiments. The paper lists some
transformations in Figure 2, but names others in the text too (e.g.
crops, flips, cutout). While Figure 2 lists the Identity function,
this transformation seems to not appear in the repository (and in fact,
the function ``randaugment(N, M)`` doesn't seem to exist in the
repository either). So we also make a best guess here about what
transformations might have been used.
.. warning::
This augmenter only works with image data, not e.g. bounding boxes.
The used PIL-based affine transformations are not yet able to
process non-image data. (This augmenter uses PIL-based affine
transformations to ensure that outputs are as similar as possible
to the paper's implementation.)
Added in 0.4.0.
**Supported dtypes**:
minimum of (
:class:`~imgaug.augmenters.flip.Fliplr`,
:class:`~imgaug.augmenters.size.KeepSizeByResize`,
:class:`~imgaug.augmenters.size.Crop`,
:class:`~imgaug.augmenters.meta.Sequential`,
:class:`~imgaug.augmenters.meta.SomeOf`,
:class:`~imgaug.augmenters.meta.Identity`,
:class:`~imgaug.augmenters.pillike.Autocontrast`,
:class:`~imgaug.augmenters.pillike.Equalize`,
:class:`~imgaug.augmenters.arithmetic.Invert`,
:class:`~imgaug.augmenters.pillike.Affine`,
:class:`~imgaug.augmenters.pillike.Posterize`,
:class:`~imgaug.augmenters.pillike.Solarize`,
:class:`~imgaug.augmenters.pillike.EnhanceColor`,
:class:`~imgaug.augmenters.pillike.EnhanceContrast`,
:class:`~imgaug.augmenters.pillike.EnhanceBrightness`,
:class:`~imgaug.augmenters.pillike.EnhanceSharpness`,
:class:`~imgaug.augmenters.arithmetic.Cutout`,
:class:`~imgaug.augmenters.pillike.FilterBlur`,
:class:`~imgaug.augmenters.pillike.FilterSmooth`
)
Parameters
----------
n : int or tuple of int or list of int or imgaug.parameters.StochasticParameter or None, optional
Parameter ``N`` in the paper, i.e. number of transformations to apply.
The paper suggests ``N=2`` for ImageNet.
See also parameter ``n`` in :class:`~imgaug.augmenters.meta.SomeOf`
for more details.
Note that horizontal flips (p=50%) and crops are always applied. This
parameter only determines how many of the other transformations
are applied per image.
m : int or tuple of int or list of int or imgaug.parameters.StochasticParameter or None, optional
Parameter ``M`` in the paper, i.e. magnitude/severity/strength of the
applied transformations in interval ``[0 .. 30]`` with ``M=0`` being
the weakest. The paper suggests for ImageNet ``M=9`` in case of
ResNet-50 and ``M=28`` in case of EfficientNet-B7.
This implementation uses a default value of ``(6, 12)``, i.e. the
value is uniformly sampled per image from the interval ``[6 .. 12]``.
This ensures greater diversity of transformations than using a single
fixed value.
* If ``int``: That value will always be used.
* If ``tuple`` ``(a, b)``: A random value will be uniformly sampled per
image from the discrete interval ``[a .. b]``.
* If ``list``: A random value will be picked from the list per image.
* If ``StochasticParameter``: For ``B`` images in a batch, ``B`` values
will be sampled per augmenter (provided the augmenter is dependent
on the magnitude).
cval : number or tuple of number or list of number or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
The constant value to use when filling in newly created pixels.
See parameter `fillcolor` in
:class:`~imgaug.augmenters.pillike.Affine` for details.
The paper's repository uses an RGB value of ``125, 122, 113``.
This implementation uses a single intensity value of ``128``, which
should work better for cases where input images don't have exactly
``3`` channels or come from a different dataset than used by the
paper.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.RandAugment(n=2, m=9)
Create a RandAugment augmenter similar to the suggested hyperparameters
in the paper.
>>> aug = iaa.RandAugment(m=30)
Create a RandAugment augmenter with maximum magnitude/strength.
>>> aug = iaa.RandAugment(m=(0, 9))
Create a RandAugment augmenter that applies its transformations with a
random magnitude between ``0`` (very weak) and ``9`` (recommended for
ImageNet and ResNet-50). ``m`` is sampled per transformation.
>>> aug = iaa.RandAugment(n=(0, 3))
Create a RandAugment augmenter that applies ``0`` to ``3`` of its
child transformations to images. Horizontal flips (p=50%) and crops are
always applied.
"""
_M_MAX = 30
# according to paper:
# N=2, M=9 is optimal for ImageNet with ResNet-50
# N=2, M=28 is optimal for ImageNet with EfficientNet-B7
# for cval they use [125, 122, 113]
# Added in 0.4.0.
def __init__(self, n=2, m=(6, 12), cval=128,
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
# pylint: disable=invalid-name
seed = seed if random_state == "deprecated" else random_state
rng = iarandom.RNG.create_if_not_rng_(seed)
# we don't limit the value range to 10 here, because the paper
# gives several examples of using more than 10 for M
m = iap.handle_discrete_param(
m, "m", value_range=(0, None),
tuple_to_uniform=True, list_to_choice=True,
allow_floats=False)
self._m = m
self._cval = cval
# The paper says in Appendix A.2.3 "ImageNet", that they actually
# always execute Horizontal Flips and Crops first and only then a
# random selection of the other transformations.
# Hence, we split here into two groups.
# It's not really clear what crop parameters they use, so we
# choose [0..M] here.
initial_augs = self._create_initial_augmenters_list(m)
main_augs = self._create_main_augmenters_list(m, cval)
# assign random state to all child augmenters
for lst in [initial_augs, main_augs]:
for augmenter in lst:
augmenter.random_state = rng
super(RandAugment, self).__init__(
[
meta.Sequential(initial_augs,
seed=rng.derive_rng_()),
meta.SomeOf(n, main_augs, random_order=True,
seed=rng.derive_rng_())
],
seed=rng, name=name,
random_state=random_state, deterministic=deterministic
)
# Added in 0.4.0.
@classmethod
def _create_initial_augmenters_list(cls, m):
# pylint: disable=invalid-name
return [
flip.Fliplr(0.5),
sizelib.KeepSizeByResize(
# assuming that the paper implementation crops M pixels from
# 224px ImageNet images, we crop here a fraction of
# M*(M_max/224)
sizelib.Crop(
percent=iap.Divide(
iap.Uniform(0, m),
224,
elementwise=True),
sample_independently=True,
keep_size=False),
interpolation="linear"
)
]
# Added in 0.4.0.
@classmethod
def _create_main_augmenters_list(cls, m, cval):
# pylint: disable=invalid-name
m_max = cls._M_MAX
def _float_parameter(level, maxval):
maxval_norm = maxval / m_max
return iap.Multiply(level, maxval_norm, elementwise=True)
def _int_parameter(level, maxval):
# paper applies just int(), so we don't round here
return iap.Discretize(_float_parameter(level, maxval),
round=False)
# In the paper's code they use the definition from AutoAugment,
# which is 0.1 + M*1.8/10. But that results in 0.1 for M=0, i.e. for
# Brightness an almost black image, while M=5 would result in an
# unaltered image. For AutoAugment that may be fine, as M is optimized
# for each operation individually, but here we have only one fixed M
# for all operations. Hence, we rather set this to 1.0 +/- M*0.9/10,
# so that M=10 would result in 0.1 or 1.9.
def _enhance_parameter(level):
fparam = _float_parameter(level, 0.9)
return iap.Clip(
iap.Add(1.0, iap.RandomSign(fparam), elementwise=True),
0.1, 1.9
)
def _subtract(a, b):
return iap.Subtract(a, b, elementwise=True)
def _affine(*args, **kwargs):
kwargs["fillcolor"] = cval
if "center" not in kwargs:
kwargs["center"] = (0.0, 0.0)
return pillike.Affine(*args, **kwargs)
_rnd_s = iap.RandomSign
shear_max = np.rad2deg(0.3)
# we don't add vertical flips here, paper is not really clear about
# whether they used them or not
return [
meta.Identity(),
pillike.Autocontrast(cutoff=0),
pillike.Equalize(),
arithmetic.Invert(p=1.0),
# they use Image.rotate() for the rotation, which uses
# the image center as the rotation center
_affine(rotate=_rnd_s(_float_parameter(m, 30)),
center=(0.5, 0.5)),
# paper uses 4 - int_parameter(M, 4)
pillike.Posterize(
nb_bits=_subtract(
8,
iap.Clip(_int_parameter(m, 6), 0, 6)
)
),
# paper uses 256 - int_parameter(M, 256)
pillike.Solarize(
p=1.0,
threshold=iap.Clip(
_subtract(256, _int_parameter(m, 256)),
0, 256
)
),
pillike.EnhanceColor(_enhance_parameter(m)),
pillike.EnhanceContrast(_enhance_parameter(m)),
pillike.EnhanceBrightness(_enhance_parameter(m)),
pillike.EnhanceSharpness(_enhance_parameter(m)),
_affine(shear={"x": _rnd_s(_float_parameter(m, shear_max))}),
_affine(shear={"y": _rnd_s(_float_parameter(m, shear_max))}),
_affine(translate_percent={"x": _rnd_s(_float_parameter(m, 0.33))}),
_affine(translate_percent={"y": _rnd_s(_float_parameter(m, 0.33))}),
# paper code uses 20px on CIFAR (i.e. size 20/32), no information
# on ImageNet values so we just use the same values
arithmetic.Cutout(1,
size=iap.Clip(
_float_parameter(m, 20 / 32), 0, 20 / 32),
squared=True,
fill_mode="constant",
cval=cval),
pillike.FilterBlur(),
pillike.FilterSmooth()
]
# Added in 0.4.0.
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
someof = self[1]
return [someof.n, self._m, self._cval]
| {
"repo_name": "aleju/imgaug",
"path": "imgaug/augmenters/collections.py",
"copies": "2",
"size": "14361",
"license": "mit",
"hash": -3218769919617967600,
"line_mean": 41.1143695015,
"line_max": 173,
"alpha_frac": 0.6076874869,
"autogenerated": false,
"ratio": 3.8971506105834464,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00021848905401352926,
"num_lines": 341
} |
"""Augmenters that help with debugging.
List of augmenters:
* :class:`SaveDebugImageEveryNBatches`
Added in 0.4.0.
"""
from __future__ import print_function, division, absolute_import
from abc import ABCMeta, abstractmethod, abstractproperty
import os
import collections
import six
import numpy as np
import imageio
import imgaug as ia
from .. import dtypes as iadt
from . import meta
from . import size as sizelib
from . import blend as blendlib
_COLOR_PINK = (255, 192, 203)
_COLOR_GRID_BACKGROUND = _COLOR_PINK
def _resizepad_to_size(image, size, cval):
"""Resize and pad and image to given size.
This first resizes until one image size matches one size in `size` (while
retaining the aspect ratio).
Then it pads the other side until both sides match `size`.
Added in 0.4.0.
"""
# resize to height H and width W while keeping aspect ratio
height = size[0]
width = size[1]
height_im = image.shape[0]
width_im = image.shape[1]
aspect_ratio_im = width_im / height_im
# we know that height_im <= height and width_im <= width
height_diff = height - height_im
width_diff = width - width_im
if height_diff < width_diff:
height_im_rs = height
width_im_rs = height * aspect_ratio_im
else:
height_im_rs = width / aspect_ratio_im
width_im_rs = width
height_im_rs = max(int(np.round(height_im_rs)), 1)
width_im_rs = max(int(np.round(width_im_rs)), 1)
image_rs = ia.imresize_single_image(image, (height_im_rs, width_im_rs))
# pad to remaining size
pad_y = height - height_im_rs
pad_x = width - width_im_rs
pad_top = int(np.floor(pad_y / 2))
pad_right = int(np.ceil(pad_x / 2))
pad_bottom = int(np.ceil(pad_y / 2))
pad_left = int(np.floor(pad_x / 2))
image_rs_pad = sizelib.pad(image_rs,
top=pad_top, right=pad_right,
bottom=pad_bottom, left=pad_left,
cval=cval)
paddings = (pad_top, pad_right, pad_bottom, pad_left)
return image_rs_pad, (height_im_rs, width_im_rs), paddings
# TODO rename to Grid
@six.add_metaclass(ABCMeta)
class _IDebugGridCell(object):
"""A single cell within a debug image's grid.
Usually corresponds to one image, but can also be e.g. a title/description.
Added in 0.4.0.
"""
@abstractproperty
def min_width(self):
"""Minimum width in pixels that the cell requires.
Added in 0.4.0.
"""
@abstractproperty
def min_height(self):
"""Minimum height in pixels that the cell requires.
Added in 0.4.0.
"""
@abstractmethod
def draw(self, height, width):
"""Draw the debug image grid cell's content.
Added in 0.4.0.
Parameters
----------
height : int
Expected height of the drawn cell image/array.
width : int
Expected width of the drawn cell image/array.
Returns
-------
ndarray
``(H,W,3)`` Image.
"""
class _DebugGridBorderCell(_IDebugGridCell):
"""Helper to add a border around a cell within the debug image grid.
Added in 0.4.0.
"""
# Added in 0.4.0.
def __init__(self, size, color, child):
self.size = size
self.color = color
self.child = child
# Added in 0.4.0.
@property
def min_height(self):
return self.child.min_height
# Added in 0.4.0.
@property
def min_width(self):
return self.child.min_width
# Added in 0.4.0.
def draw(self, height, width):
content = self.child.draw(height, width)
content = sizelib.pad(content,
top=self.size, right=self.size,
bottom=self.size, left=self.size,
mode="constant", cval=self.color)
return content
class _DebugGridTextCell(_IDebugGridCell):
"""Cell containing text.
Added in 0.4.0.
"""
# Added in 0.4.0.
def __init__(self, text):
self.text = text
# Added in 0.4.0.
@property
def min_height(self):
return max(20, len(self.text.split("\n")) * 17)
# Added in 0.4.0.
@property
def min_width(self):
lines = self.text.split("\n")
if len(lines) == 0:
return 20
return max(20, int(7 * max([len(line) for line in lines])))
# Added in 0.4.0.
def draw(self, height, width):
image = np.full((height, width, 3), 255, dtype=np.uint8)
image = ia.draw_text(image, 0, 0, self.text, color=(0, 0, 0),
size=12)
return image
class _DebugGridImageCell(_IDebugGridCell):
"""Cell containing an image, possibly with an different-shaped overlay.
Added in 0.4.0.
"""
# Added in 0.4.0.
def __init__(self, image, overlay=None, overlay_alpha=0.75):
self.image = image
self.overlay = overlay
self.overlay_alpha = overlay_alpha
# Added in 0.4.0.
@property
def min_height(self):
return self.image.shape[0]
# Added in 0.4.0.
@property
def min_width(self):
return self.image.shape[1]
# Added in 0.4.0.
def draw(self, height, width):
image = self.image
kind = image.dtype.kind
if kind == "b":
image = image.astype(np.uint8) * 255
elif kind == "u":
min_value, _, max_value = iadt.get_value_range_of_dtype(image.dtype)
image = image.astype(np.float64) / max_value
elif kind == "i":
min_value, _, max_value = iadt.get_value_range_of_dtype(image.dtype)
dynamic_range = (max_value - min_value)
image = (min_value + image.astype(np.float64)) / dynamic_range
if image.dtype.kind == "f":
image = (np.clip(image, 0, 1.0) * 255).astype(np.uint8)
image_rsp, size_rs, paddings = _resizepad_to_size(
image, (height, width), cval=_COLOR_GRID_BACKGROUND)
blend = image_rsp
if self.overlay is not None:
overlay_rs = self._resize_overlay(self.overlay,
image.shape[0:2])
overlay_rsp = self._resize_overlay(overlay_rs, size_rs)
overlay_rsp = sizelib.pad(overlay_rsp,
top=paddings[0], right=paddings[1],
bottom=paddings[2], left=paddings[3],
cval=_COLOR_GRID_BACKGROUND)
blend = blendlib.blend_alpha(overlay_rsp, image_rsp,
alpha=self.overlay_alpha)
return blend
# Added in 0.4.0.
@classmethod
def _resize_overlay(cls, arr, size):
arr_rs = ia.imresize_single_image(arr, size, interpolation="nearest")
return arr_rs
class _DebugGridCBAsOICell(_IDebugGridCell):
"""Cell visualizing a coordinate-based augmentable.
CBAsOI = coordinate-based augmentables on images,
e.g. ``KeypointsOnImage``.
Added in 0.4.0.
"""
# Added in 0.4.0.
def __init__(self, cbasoi, image):
self.cbasoi = cbasoi
self.image = image
# Added in 0.4.0.
@property
def min_height(self):
return self.image.shape[0]
# Added in 0.4.0.
@property
def min_width(self):
return self.image.shape[1]
# Added in 0.4.0.
def draw(self, height, width):
image_rsp, size_rs, paddings = _resizepad_to_size(
self.image, (height, width), cval=_COLOR_GRID_BACKGROUND)
cbasoi = self.cbasoi.deepcopy()
cbasoi = cbasoi.on_(size_rs)
cbasoi = cbasoi.shift_(y=paddings[0], x=paddings[3])
cbasoi.shape = image_rsp.shape
return cbasoi.draw_on_image(image_rsp)
class _DebugGridColumn(object):
"""A single column within the debug image grid.
Added in 0.4.0.
"""
def __init__(self, cells):
self.cells = cells
@property
def nb_rows(self):
"""Number of rows in the column, i.e. examples in batch.
Added in 0.4.0.
"""
return len(self.cells)
@property
def max_cell_width(self):
"""Width in pixels of the widest cell in the column.
Added in 0.4.0.
"""
return max([cell.min_width for cell in self.cells])
@property
def max_cell_height(self):
"""Height in pixels of the tallest cell in the column.
Added in 0.4.0.
"""
return max([cell.min_height for cell in self.cells])
def draw(self, heights):
"""Convert this column to an image array.
Added in 0.4.0.
"""
width = self.max_cell_width
return np.vstack([cell.draw(height=height, width=width)
for cell, height
in zip(self.cells, heights)])
class _DebugGrid(object):
"""A debug image grid.
Columns correspond to the input datatypes (e.g. images, bounding boxes).
Rows correspond to the examples within a batch.
Added in 0.4.0.
"""
# Added in 0.4.0.
def __init__(self, columns):
assert len(columns) > 0
self.columns = columns
def draw(self):
"""Convert this grid to an image array.
Added in 0.4.0.
"""
nb_rows_by_col = [column.nb_rows for column in self.columns]
assert len(set(nb_rows_by_col)) == 1
rowwise_heights = np.zeros((self.columns[0].nb_rows,), dtype=np.int32)
for column in self.columns:
heights = [cell.min_height for cell in column.cells]
rowwise_heights = np.maximum(rowwise_heights, heights)
return np.hstack([column.draw(heights=rowwise_heights)
for column in self.columns])
# TODO image subtitles
# TODO run start date
# TODO main process id, process id
# TODO warning if map aspect ratio is different from image aspect ratio
# TODO error if non-image shapes differ from image shapes
def draw_debug_image(images, heatmaps=None, segmentation_maps=None,
keypoints=None, bounding_boxes=None, polygons=None,
line_strings=None):
"""Generate a debug image grid of a single batch and various datatypes.
Added in 0.4.0.
**Supported dtypes**:
* ``uint8``: yes; tested
* ``uint16``: ?
* ``uint32``: ?
* ``uint64``: ?
* ``int8``: ?
* ``int16``: ?
* ``int32``: ?
* ``int64``: ?
* ``float16``: ?
* ``float32``: ?
* ``float64``: ?
* ``float128``: ?
* ``bool``: ?
Parameters
----------
images : ndarray or list of ndarray
Images in the batch. Must always be provided. Batches without images
cannot be visualized.
heatmaps : None or list of imgaug.augmentables.heatmaps.HeatmapsOnImage, optional
Heatmaps on the provided images.
segmentation_maps : None or list of imgaug.augmentables.segmaps.SegmentationMapsOnImage, optional
Segmentation maps on the provided images.
keypoints : None or list of imgaug.augmentables.kps.KeypointsOnImage, optional
Keypoints on the provided images.
bounding_boxes : None or list of imgaug.augmentables.bbs.BoundingBoxesOnImage, optional
Bounding boxes on the provided images.
polygons : None or list of imgaug.augmentables.polys.PolygonsOnImage, optional
Polygons on the provided images.
line_strings : None or list of imgaug.augmentables.lines.LineStringsOnImage, optional
Line strings on the provided images.
Returns
-------
ndarray
Visualized batch as RGB image.
Examples
--------
>>> import numpy as np
>>> import imgaug.augmenters as iaa
>>> image = np.zeros((64, 64, 3), dtype=np.uint8)
>>> debug_image = iaa.draw_debug_image(images=[image, image])
Generate a debug image for two empty images.
>>> from imgaug.augmentables.kps import KeypointsOnImage
>>> kpsoi = KeypointsOnImage.from_xy_array([(10.5, 20.5), (30.5, 30.5)],
>>> shape=image.shape)
>>> debug_image = iaa.draw_debug_image(images=[image, image],
>>> keypoints=[kpsoi, kpsoi])
Generate a debug image for two empty images, each having two keypoints
drawn on them.
>>> from imgaug.augmentables.batches import UnnormalizedBatch
>>> segmap_arr = np.zeros((32, 32, 1), dtype=np.int32)
>>> kp_tuples = [(10.5, 20.5), (30.5, 30.5)]
>>> batch = UnnormalizedBatch(images=[image, image],
>>> segmentation_maps=[segmap_arr, segmap_arr],
>>> keypoints=[kp_tuples, kp_tuples])
>>> batch = batch.to_normalized_batch()
>>> debug_image = iaa.draw_debug_image(
>>> images=batch.images_unaug,
>>> segmentation_maps=batch.segmentation_maps_unaug,
>>> keypoints=batch.keypoints_unaug)
Generate a debug image for two empty images, each having an empty
segmentation map and two keypoints drawn on them. This example uses
``UnnormalizedBatch`` to show how to mostly evade going through imgaug
classes.
"""
columns = [_create_images_column(images)]
if heatmaps is not None:
columns.extend(_create_heatmaps_columns(heatmaps, images))
if segmentation_maps is not None:
columns.extend(_create_segmap_columns(segmentation_maps, images))
if keypoints is not None:
columns.append(_create_cbasois_column(keypoints, images, "Keypoints"))
if bounding_boxes is not None:
columns.append(_create_cbasois_column(bounding_boxes, images,
"Bounding Boxes"))
if polygons is not None:
columns.append(_create_cbasois_column(polygons, images, "Polygons"))
if line_strings is not None:
columns.append(_create_cbasois_column(line_strings, images,
"Line Strings"))
result = _DebugGrid(columns)
result = result.draw()
result = sizelib.pad(result, top=1, right=1, bottom=1, left=1,
mode="constant", cval=_COLOR_GRID_BACKGROUND)
return result
# Added in 0.4.0.
def _add_borders(cells):
"""Add a border (cell) around a cell."""
return [_DebugGridBorderCell(1, _COLOR_GRID_BACKGROUND, cell)
for cell in cells]
# Added in 0.4.0.
def _add_text_cell(title, cells):
"""Add a text cell before other cells."""
return [_DebugGridTextCell(title)] + cells
# Added in 0.4.0.
def _create_images_column(images):
"""Create columns for image data."""
cells = [_DebugGridImageCell(image) for image in images]
images_descr = _generate_images_description(images)
column = _DebugGridColumn(
_add_borders(
_add_text_cell(
"Images",
_add_text_cell(
images_descr,
cells)
)
)
)
return column
# Added in 0.4.0.
def _create_heatmaps_columns(heatmaps, images):
"""Create columns for heatmap data."""
nb_map_channels = max([heatmap.arr_0to1.shape[2]
for heatmap in heatmaps])
columns = [[] for _ in np.arange(nb_map_channels)]
for image, heatmap in zip(images, heatmaps):
heatmap_drawn = heatmap.draw()
for c, heatmap_drawn_c in enumerate(heatmap_drawn):
columns[c].append(
_DebugGridImageCell(image, overlay=heatmap_drawn_c))
columns = [
_DebugGridColumn(
_add_borders(
_add_text_cell(
"Heatmaps",
_add_text_cell(
_generate_heatmaps_description(
heatmaps,
channel_idx=c,
show_details=(c == 0)),
cells)
)
)
)
for c, cells in enumerate(columns)
]
return columns
# Added in 0.4.0.
def _create_segmap_columns(segmentation_maps, images):
"""Create columns for segmentation map data."""
nb_map_channels = max([segmap.arr.shape[2]
for segmap in segmentation_maps])
columns = [[] for _ in np.arange(nb_map_channels)]
for image, segmap in zip(images, segmentation_maps):
# TODO this currently draws the background in black, hence the
# resulting blended image is dark at class id 0
segmap_drawn = segmap.draw()
for c, segmap_drawn_c in enumerate(segmap_drawn):
columns[c].append(
_DebugGridImageCell(image, overlay=segmap_drawn_c))
columns = [
_DebugGridColumn(
_add_borders(
_add_text_cell(
"SegMaps",
_add_text_cell(
_generate_segmaps_description(
segmentation_maps,
channel_idx=c,
show_details=(c == 0)),
cells
)
)
)
)
for c, cells in enumerate(columns)
]
return columns
# Added in 0.4.0.
def _create_cbasois_column(cbasois, images, column_name):
"""Create a column for coordinate-based augmentables."""
cells = [_DebugGridCBAsOICell(cbasoi, image)
for cbasoi, image
in zip(cbasois, images)]
descr = _generate_cbasois_description(cbasois, images)
column = _DebugGridColumn(
_add_borders(
_add_text_cell(
column_name,
_add_text_cell(descr, cells)
)
)
)
return column
# Added in 0.4.0.
def _generate_images_description(images):
"""Generate description for image columns."""
if ia.is_np_array(images):
shapes_str = "array, shape %11s" % (str(images.shape),)
dtypes_str = "dtype %8s" % (images.dtype.name,)
if len(images) == 0:
value_range_str = ""
elif images.dtype.kind in ["u", "i", "b"]:
value_range_str = "value range: %3d to %3d" % (
np.min(images), np.max(images))
else:
value_range_str = "value range: %7.4f to %7.4f" % (
np.min(images), np.max(images))
else:
stats = _ListOfArraysStats(images)
if stats.empty:
shapes_str = ""
elif stats.all_same_shape:
shapes_str = (
"list of %3d arrays\n"
"all shape %11s"
) % (len(images), stats.shapes[0],)
else:
shapes_str = (
"list of %3d arrays\n"
"varying shapes\n"
"smallest image: %11s\n"
"largest image: %11s\n"
"height: %3d to %3d\n"
"width: %3d to %3d\n"
"channels: %1s to %1s"
) % (len(images),
stats.smallest_shape, stats.largest_shape,
stats.height_min, stats.height_max,
stats.width_min, stats.width_max,
stats.get_channels_min("None"),
stats.get_channels_max("None"))
if stats.empty:
dtypes_str = ""
elif stats.all_same_dtype:
dtypes_str = "all dtype %8s" % (stats.dtypes[0],)
else:
dtypes_str = "dtypes: %s" % (", ".join(stats.unique_dtype_names),)
if stats.empty:
value_range_str = ""
else:
value_range_str = "value range: %3d to %3d"
if not stats.all_dtypes_intlike:
value_range_str = "value range: %6.4f to %6.4f"
value_range_str = value_range_str % (stats.value_min,
stats.value_max)
strs = [shapes_str, dtypes_str, value_range_str]
return _join_description_strs(strs)
# Added in 0.4.0.
def _generate_segmaps_description(segmaps, channel_idx, show_details):
"""Generate description for segmap columns."""
if len(segmaps) == 0:
return "empty list"
strs = _generate_sm_hm_description(segmaps, channel_idx, show_details)
arrs_channel = [segmap.arr[:, :, channel_idx] for segmap in segmaps]
stats_channel = _ListOfArraysStats(arrs_channel)
value_range_str = (
"value range: %3d to %3d\n"
"number of unique classes: %2d"
) % (stats_channel.value_min, stats_channel.value_max,
stats_channel.nb_unique_values)
return _join_description_strs(strs + [value_range_str])
# Added in 0.4.0.
def _generate_heatmaps_description(heatmaps, channel_idx, show_details):
"""Generate description for heatmap columns."""
if len(heatmaps) == 0:
return "empty list"
strs = _generate_sm_hm_description(heatmaps, channel_idx, show_details)
arrs_channel = [heatmap.arr_0to1[:, :, channel_idx] for heatmap in heatmaps]
stats_channel = _ListOfArraysStats(arrs_channel)
value_range_str = (
"value range: %6.4f to %6.4f\n"
" (internal, max is [0.0, 1.0])"
) % (stats_channel.value_min, stats_channel.value_max)
return _join_description_strs(strs + [value_range_str])
# Added in 0.4.0.
def _generate_sm_hm_description(augmentables, channel_idx, show_details):
"""Generate description for SegMap/Heatmap columns."""
if augmentables is None:
return ""
if len(augmentables) == 0:
return "empty list"
arrs = [augmentable.get_arr() for augmentable in augmentables]
stats = _ListOfArraysStats(arrs)
if stats.get_channels_max(-1) > -1:
channel_str = "Channel %1d of %1d" % (channel_idx+1,
stats.get_channels_max(-1))
else:
channel_str = ""
if not show_details:
shapes_str = ""
elif stats.all_same_shape:
shapes_str = (
"items for %3d images\n"
"all arrays of shape %11s"
) % (len(augmentables), stats.shapes[0],)
else:
shapes_str = (
"items for %3d images\n"
"varying array shapes\n"
"smallest: %11s\n"
"largest: %11s\n"
"height: %3d to %3d\n"
"width: %3d to %3d\n"
"channels: %1s to %1s"
) % (len(augmentables),
stats.smallest_shape, stats.largest_shape,
stats.height_min, stats.height_max,
stats.width_min, stats.width_max,
stats.get_channels_min("None"),
stats.get_channels_max("None"))
if not show_details:
on_shapes_str = ""
else:
on_shapes_str = _generate_on_image_shapes_descr(augmentables)
return [channel_str, shapes_str, on_shapes_str]
# Added in 0.4.0.
def _generate_cbasois_description(cbasois, images):
"""Generate description for coordinate-based augmentable columns."""
images_str = "items for %d images" % (len(cbasois),)
nb_items_lst = [len(cbasoi.items) for cbasoi in cbasois]
nb_items_lst = nb_items_lst if len(cbasois) > 0 else [-1]
nb_items = sum(nb_items_lst)
items_str = (
"fewest items on image: %3d\n"
"most items on image: %3d\n"
"total items: %6d"
) % (min(nb_items_lst), max(nb_items_lst), nb_items)
areas = [
cba.area if hasattr(cba, "area") else -1
for cbasoi in cbasois
for cba in cbasoi.items]
areas = areas if len(cbasois) > 0 else [-1]
areas_str = (
"smallest area: %7.4f\n"
"largest area: %7.4f"
) % (min(areas), max(areas))
labels = list(ia.flatten([item.label if hasattr(item, "label") else None
for cbasoi in cbasois
for item in cbasoi.items]))
labels_ctr = collections.Counter(labels)
labels_most_common = []
for label, count in labels_ctr.most_common(10):
labels_most_common.append("\n - %s (%3d, %6.2f%%)" % (
label, count, count/nb_items * 100))
labels_str = (
"unique labels: %2d\n"
"most common labels:"
"%s"
) % (len(labels_ctr.keys()), "".join(labels_most_common))
coords_ooi = []
dists = []
for cbasoi, image in zip(cbasois, images):
h, w = image.shape[0:2]
for cba in cbasoi.items:
coords = cba.coords
for coord in coords:
x, y = coord
dist = (x - w/2)**2 + (y - h/2) ** 2
coords_ooi.append(not (0 <= x < w and 0 <= y < h))
dists.append(((x, y), dist))
# use x_ and y_ because otherwise we get a 'redefines x' error in pylint
coords_extreme = [(x_, y_)
for (x_, y_), _
in sorted(dists, key=lambda t: t[1])]
nb_ooi = sum(coords_ooi)
ooi_str = (
"coords out of image: %d (%6.2f%%)\n"
"most extreme coord: (%5.1f, %5.1f)"
# TODO "items anyhow out of image: %d (%.2f%%)\n"
# TODO "items fully out of image: %d (%.2f%%)\n"
) % (nb_ooi, nb_ooi / len(coords_ooi) * 100,
coords_extreme[-1][0], coords_extreme[-1][1])
on_shapes_str = _generate_on_image_shapes_descr(cbasois)
return _join_description_strs([images_str, items_str, areas_str,
labels_str, ooi_str, on_shapes_str])
# Added in 0.4.0.
def _generate_on_image_shapes_descr(augmentables):
"""Generate text block for non-image data describing their image shapes."""
on_shapes = [augmentable.shape for augmentable in augmentables]
stats_imgs = _ListOfArraysStats([np.empty(on_shape)
for on_shape in on_shapes])
if stats_imgs.all_same_shape:
on_shapes_str = "all on image shape %11s" % (stats_imgs.shapes[0],)
else:
on_shapes_str = (
"on varying image shapes\n"
"smallest image: %11s\n"
"largest image: %11s"
) % (stats_imgs.smallest_shape, stats_imgs.largest_shape)
return on_shapes_str
# Added in 0.4.0.
def _join_description_strs(strs):
"""Join lines to a single string while removing empty lines."""
strs = [str_i for str_i in strs if len(str_i) > 0]
return "\n".join(strs)
class _ListOfArraysStats(object):
"""Class to derive aggregated values from a list of arrays.
E.g. shape of the largest array, number of unique dtypes etc.
Added in 0.4.0.
"""
def __init__(self, arrays):
self.arrays = arrays
# Added in 0.4.0.
@property
def empty(self):
return len(self.arrays) == 0
# Added in 0.4.0.
@property
def areas(self):
return [np.prod(arr.shape[0:2]) for arr in self.arrays]
# Added in 0.4.0.
@property
def arrays_by_area(self):
arrays_by_area = [
arr for arr, _
in sorted(zip(self.arrays, self.areas), key=lambda t: t[1])
]
return arrays_by_area
# Added in 0.4.0.
@property
def shapes(self):
return [arr.shape for arr in self.arrays]
# Added in 0.4.0.
@property
def all_same_shape(self):
if self.empty:
return True
return len(set(self.shapes)) == 1
# Added in 0.4.0.
@property
def smallest_shape(self):
if self.empty:
return tuple()
return self.arrays_by_area[0].shape
# Added in 0.4.0.
@property
def largest_shape(self):
if self.empty:
return tuple()
return self.arrays_by_area[-1].shape
# Added in 0.4.0.
@property
def area_max(self):
if self.empty:
return tuple()
return np.prod(self.arrays_by_area[-1][0:2])
# Added in 0.4.0.
@property
def heights(self):
return [arr.shape[0] for arr in self.arrays]
# Added in 0.4.0.
@property
def height_min(self):
heights = self.heights
return min(heights) if len(heights) > 0 else 0
# Added in 0.4.0.
@property
def height_max(self):
heights = self.heights
return max(heights) if len(heights) > 0 else 0
# Added in 0.4.0.
@property
def widths(self):
return [arr.shape[1] for arr in self.arrays]
# Added in 0.4.0.
@property
def width_min(self):
widths = self.widths
return min(widths) if len(widths) > 0 else 0
# Added in 0.4.0.
@property
def width_max(self):
widths = self.widths
return max(widths) if len(widths) > 0 else 0
# Added in 0.4.0.
def get_channels_min(self, default):
if self.empty:
return -1
if any([arr.ndim == 2 for arr in self.arrays]):
return default
return min([arr.shape[2] for arr in self.arrays if arr.ndim > 2])
# Added in 0.4.0.
def get_channels_max(self, default):
if self.empty:
return -1
if not any([arr.ndim > 2 for arr in self.arrays]):
return default
return max([arr.shape[2] for arr in self.arrays if arr.ndim > 2])
# Added in 0.4.0.
@property
def dtypes(self):
return [arr.dtype for arr in self.arrays]
# Added in 0.4.0.
@property
def dtype_names(self):
return [dtype.name for dtype in self.dtypes]
# Added in 0.4.0.
@property
def all_same_dtype(self):
return len(set(self.dtype_names)) in [0, 1]
# Added in 0.4.0.
@property
def all_dtypes_intlike(self):
if self.empty:
return True
return all([arr.dtype.kind in ["u", "i", "b"] for arr in self.arrays])
# Added in 0.4.0.
@property
def unique_dtype_names(self):
return sorted(list({arr.dtype.name for arr in self.arrays}))
# Added in 0.4.0.
@property
def value_min(self):
return min([np.min(arr) for arr in self.arrays])
# Added in 0.4.0.
@property
def value_max(self):
return max([np.max(arr) for arr in self.arrays])
# Added in 0.4.0.
@property
def nb_unique_values(self):
values_uq = set()
for arr in self.arrays:
values_uq.update(np.unique(arr))
return len(values_uq)
# Added in 0.4.0.
@six.add_metaclass(ABCMeta)
class _IImageDestination(object):
"""A destination which receives images to save."""
def on_batch(self, batch):
"""Signal to the destination that a new batch is processed.
This is intended to be used by the destination e.g. to count batches.
Added in 0.4.0.
Parameters
----------
batch : imgaug.augmentables.batches._BatchInAugmentation
A batch to which the next ``receive()`` call may correspond.
"""
def receive(self, image):
"""Receive and handle an image.
Added in 0.4.0.
Parameters
----------
image : ndarray
Image to be handled by the destination.
"""
# Added in 0.4.0.
class _MultiDestination(_IImageDestination):
"""A list of multiple destinations behaving like a single one."""
# Added in 0.4.0.
def __init__(self, destinations):
self.destinations = destinations
# Added in 0.4.0.
def on_batch(self, batch):
for destination in self.destinations:
destination.on_batch(batch)
# Added in 0.4.0.
def receive(self, image):
for destination in self.destinations:
destination.receive(image)
# Added in 0.4.0.
class _FolderImageDestination(_IImageDestination):
"""A destination which saves images to a directory."""
# Added in 0.4.0.
def __init__(self, folder_path,
filename_pattern="batch_{batch_id:06d}.png"):
super(_FolderImageDestination, self).__init__()
self.folder_path = folder_path
self.filename_pattern = filename_pattern
self._batch_id = -1
self._filepath = None
# Added in 0.4.0.
def on_batch(self, batch):
self._batch_id += 1
self._filepath = os.path.join(
self.folder_path,
self.filename_pattern.format(batch_id=self._batch_id))
# Added in 0.4.0.
def receive(self, image):
imageio.imwrite(self._filepath, image)
# Added in 0.4.0.
@six.add_metaclass(ABCMeta)
class _IBatchwiseSchedule(object):
"""A schedule determining per batch whether a condition is met."""
def on_batch(self, batch):
"""Determine for the given batch whether the condition is met.
Added in 0.4.0.
Parameters
----------
batch : _BatchInAugmentation
Batch for which to evaluate the condition.
Returns
-------
bool
Signal whether the condition is met.
"""
# Added in 0.4.0.
class _EveryNBatchesSchedule(_IBatchwiseSchedule):
"""A schedule that generates a signal at every ``N`` th batch.
This schedule must be called for *every* batch in order to count them.
Added in 0.4.0.
"""
def __init__(self, interval):
self.interval = interval
self._batch_id = -1
# Added in 0.4.0.
def on_batch(self, batch):
self._batch_id += 1
signal = (self._batch_id % self.interval == 0)
return signal
class _SaveDebugImage(meta.Augmenter):
"""Augmenter saving debug images to a destination according to a schedule.
Added in 0.4.0.
Parameters
----------
destination : _IImageDestination
The destination receiving debug images.
schedule : _IBatchwiseSchedule
The schedule to use to determine for which batches an image is
supposed to be generated.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
"""
# Added in 0.4.0.
def __init__(self, destination, schedule,
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
super(_SaveDebugImage, self).__init__(
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
self.destination = destination
self.schedule = schedule
# Added in 0.4.0.
def _augment_batch_(self, batch, random_state, parents, hooks):
save = self.schedule.on_batch(batch)
self.destination.on_batch(batch)
if save:
image = draw_debug_image(
images=batch.images,
heatmaps=batch.heatmaps,
segmentation_maps=batch.segmentation_maps,
keypoints=batch.keypoints,
bounding_boxes=batch.bounding_boxes,
polygons=batch.polygons,
line_strings=batch.line_strings)
self.destination.receive(image)
return batch
class SaveDebugImageEveryNBatches(_SaveDebugImage):
"""Visualize data in batches and save corresponding plots to a folder.
Added in 0.4.0.
**Supported dtypes**:
See :func:`~imgaug.augmenters.debug.draw_debug_image`.
Parameters
----------
destination : str or _IImageDestination
Path to a folder. The saved images will follow a filename pattern
of ``batch_<batch_id>.png``. The latest image will additionally be
saved to ``latest.png``.
interval : int
Interval in batches. If set to ``N``, every ``N`` th batch an
image will be generated and saved, starting with the first observed
batch.
Note that the augmenter only counts batches that it sees. If it is
executed conditionally or re-instantiated, it may not see all batches
or the counter may be wrong in other ways.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> import tempfile
>>> folder_path = tempfile.mkdtemp()
>>> seq = iaa.Sequential([
>>> iaa.Sequential([
>>> iaa.Fliplr(0.5),
>>> iaa.Crop(px=(0, 16))
>>> ], random_order=True),
>>> iaa.SaveDebugImageEveryNBatches(folder_path, 100)
>>> ])
"""
# Added in 0.4.0.
def __init__(self, destination, interval,
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
schedule = _EveryNBatchesSchedule(interval)
if not isinstance(destination, _IImageDestination):
assert os.path.isdir(destination), (
"Expected 'destination' to be a string path to an existing "
"directory. Got path '%s'." % (destination,))
destination = _MultiDestination([
_FolderImageDestination(destination),
_FolderImageDestination(destination,
filename_pattern="batch_latest.png")
])
super(SaveDebugImageEveryNBatches, self).__init__(
destination=destination, schedule=schedule,
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
# Added in 0.4.0.
def get_parameters(self):
dests = self.destination.destinations
return [
dests[0].folder_path,
dests[0].filename_pattern,
dests[1].folder_path,
dests[1].filename_pattern,
self.schedule.interval
]
| {
"repo_name": "aleju/ImageAugmenter",
"path": "imgaug/augmenters/debug.py",
"copies": "2",
"size": "39118",
"license": "mit",
"hash": -1162588301609256000,
"line_mean": 29.5609375,
"line_max": 173,
"alpha_frac": 0.5741857968,
"autogenerated": false,
"ratio": 3.650429264650989,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00010006904148259945,
"num_lines": 1280
} |
#AugmentHaps.py
# Import necessary modules
import numpy as np
from argparse import ArgumentParser
from Modules.VCF_parser import *
from Modules.General import *
from Modules.IO import *
# Inputs:
parser = ArgumentParser()
parser.add_argument(
'-i','--inputHaps',
action="store",
dest="knownHaps",
help = "A VCF-formatted file containing the known haplotypes encoded \
in the GT field. GT must be present in the FORMAT field, and \
ploidy must be 1. ",
required=True
)
parser.add_argument(
"-n", "--newHaps",
action="store",
dest="inNewHaps",
help="A file containting the decimal identifiers for any new haplotypes \
to be added, one per line. ",
required=True
)
parser.add_argument(
'-o','--outFile',
action="store",
dest="outFile",
required=True,
help="A name for the output file. "
)
o = parser.parse_args()
#class o:
# pass
## the initial haplotypes file
#o.knownHaps = "Lascal_DeNovoAlign_d600q20_Haps_Extended.vcf"
## A file containing those haplotypes to be added, one per line
#o.inNewHaps = "NewHapsToAdd.txt"
## A name for the output file
#o.outFile = "Rerun2/WithAddedHaps.vcf"
# Load haplotypes
KnownHaps, KnownNames = toNP_array(o.knownHaps, "GT")
# Invert haplotypes so that ref allele is 1
#KnownHaps = invertArray(KnownHaps)
# Find unique haplotypes
inHapArray = ExtendHaps(KnownHaps)
# Convert to np array
# Read in new haplotypes, new haplotype names
inNewHaps = open(o.inNewHaps, "rb")
newHapNames = []
NewDecHaps = []
NewHaps = []
for iterLine in inNewHaps:
newHapNames.append("NH_%s" % iterLine.strip())
NewDecHaps.append(int(iterLine.strip()))
NewHaps = [DecHapToNPHap(NewDecHaps[x])
for x in xrange(len(NewDecHaps))]
# Append new haplotypes to old haplotypes
fullHaps = [np.copy(inHapArray[:,x]) for x in xrange(len(KnownNames))]
fullHaps.extend([invertArray(NewHaps[x].transpose()) for x in xrange(len(NewHaps))])
# Combine names fields
KnownNames.extend(newHapNames)
# Write out haplotypes
tmpVCF = vcfReader(o.knownHaps)
output3 = vcfWriter(
o.outFile,
source="AugmentHaploypes.py",
commandLine="",
baseHead=tmpVCF.headInfo,
FormatBlock=tmpVCF.headInfo["FORMAT"])
output3.writeHeader(KnownNames)
output3.setFormat("GT")
output3.importLinesInfo(
tmpVCF.getData("chrom", lineTarget="a"),
tmpVCF.getData("pos", lineTarget="a"),
tmpVCF.getData("ref", lineTarget="a"),
tmpVCF.getData("alt", lineTarget="a"),
tmpVCF.getData("qual", lineTarget="a")
)
for hapIter in xrange(len(fullHaps)):
# Add predicted SNP frequencies to VCF output
output3.importSampleValues(list(fullHaps[hapIter]), KnownNames[hapIter])
output3.writeSamples()
# Close output files
output3.close()
| {
"repo_name": "cruzan-lab/CallHap",
"path": "AugmentHaps.py",
"copies": "1",
"size": "2891",
"license": "mit",
"hash": 7183536166534940000,
"line_mean": 29.0860215054,
"line_max": 84,
"alpha_frac": 0.6727775856,
"autogenerated": false,
"ratio": 2.9804123711340207,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.908127730762309,
"avg_score": 0.014382529822186192,
"num_lines": 93
} |
# Augmenting Face Image Database via Transformations (Flip, Rotate, Crop & Scale)
# Jay Narhan
# UserID: JN721
#
# This class is designed to apply a series of image transformations to a set of images. A transformation is simply a
# function. It is a function that maps the image to another version of the image.
#
# G(x,y) = T( f(x,y) )
#
# G will be the new image based on the Transformation T. For every image, f(x,y) there will be 10 transformations
# generated.
#
# Images (including the original) may be saved to disk, along with a reference file that tracks the labelled emotion for
# each original image. Saving to disk requires invocation of the script with the argument "-s" at the cmd line.
# As a Class of type NN_Images, the object can be used for in-memory processing as required.
# Usage: Import this Class via "from NN_images import *" and call the methods needed.
import math, os, sys
import numpy as np
import pandas as pd
from skimage.io import imread, imsave
from skimage import transform as tf
from skimage import img_as_float
class NN_Images(object):
def __init__(self):
self.images = dict()
self.ROOT_DIR = os.getcwd()
self.IMAGE_DIR = './images'
self.DATA_DIR = './data'
self.TRANS_DIR = './trans_res'
self.IMG_WIDTH = 350
self.IMG_HEIGHT = 350
self.legends_file = 'legend.csv'
def get_imgs(self):
return self.images
def transform_img__(self, img, fn, emotion):
self.images[fn] = {'Image': img, 'Emotion': emotion} # Store original
counter = 0
self.images["Trans" + str(counter) + "_" + fn] = {'Image': np.fliplr(img), 'Emotion': emotion} # FLIP the image
counter += 1
for deg in range(-10, 15, 5): # ROTATE to be robust to camera orientation
if deg == 0:
continue
self.images["Trans" + str(counter) + "_" + fn] = {'Image': tf.rotate(img, deg), 'Emotion': emotion}
counter += 1
lenX, lenY = img.shape # CROP based on rough heuristic
for crop_size in range(8, 14, 2):
cropped = img[lenX / crop_size: - lenX / crop_size, lenY / crop_size: - lenY / crop_size]
self.images["Trans" + str(counter) + "_" + fn] = {'Image': cropped, 'Emotion': emotion}
counter += 1
for i in range(2): # SCALE down images (random factor btw 1.1 to 1.21)
scale_factor = math.sqrt(1.1) ** np.random.randint(2, 5)
scaled_img = tf.warp(img, tf.AffineTransform(scale=(scale_factor, scale_factor)))
self.images["Trans" + str(counter) + "_" + fn] = {'Image': scaled_img, 'Emotion': emotion}
counter += 1
def process_imgs(self):
# Read the file that tracks the emotions against the original images. Each new transformed image, will carry the
# same emotion label.
try:
os.chdir(self.DATA_DIR)
legend = pd.read_csv(self.legends_file)
except IOError as e:
print "I/O Error ({0}).".format(e.args[0])
sys.exit(2)
except OSError as e:
print "O/S Error({0}:{1})".format(e.args[1], self.DATA_DIR)
sys.exit(2)
finally:
os.chdir(self.ROOT_DIR)
os.chdir(self.IMAGE_DIR)
processed_imgs = 0
for filename in os.listdir(os.getcwd()):
try:
img = img_as_float(imread(filename)) # Read file as a float
# Pre-process:
rows, cols = img.shape
if cols != self.IMG_WIDTH or rows != self.IMG_HEIGHT:
print 'Resizing image ... '
img = tf.resize(img, output_shape=(self.IMG_WIDTH, self.IMG_HEIGHT))
emotion = legend.loc[legend['image'] == filename, 'emotion'].iloc[0] # Track the emotion of original
self.transform_img__(img, filename, emotion)
processed_imgs += 1
except IOError as e:
print "WARNING: {0} ... skipping this non-image file.".format(e.args[0])
print 'Processed {0} images'.format(processed_imgs)
os.chdir(self.ROOT_DIR)
def S2D(self, userid):
os.chdir(self.ROOT_DIR)
if not os.path.exists(self.TRANS_DIR):
os.makedirs('trans_res')
os.chdir(self.TRANS_DIR)
legend = pd.DataFrame(columns=['user.id', 'image', 'emotion'])
try:
for name, data in self.images.iteritems():
imsave(name, data['Image']) # Save image to disk
df = pd.DataFrame([[userid,
name, data['Emotion']]],
columns=['user.id', 'image', 'emotion'])
legend = legend.append(df)
legend = legend.sort_values(by='image')
legend.to_csv('01_legend.csv', index=False) # More efficient write to disk
except:
print 'Unknown Error in Saving to Disk'
pass
finally:
os.chdir(self.ROOT_DIR)
| {
"repo_name": "muxspace/facial_expressions",
"path": "python/JN721.py",
"copies": "2",
"size": "5371",
"license": "apache-2.0",
"hash": -3925568868637191000,
"line_mean": 37.0921985816,
"line_max": 120,
"alpha_frac": 0.5440327686,
"autogenerated": false,
"ratio": 3.8640287769784174,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5408061545578418,
"avg_score": null,
"num_lines": null
} |
''' augment __init__() '''
def func3():
print "you are calling func3 from world module"
class MyClass(object):
def __init__(self, who, what, when):
self.who = who
self.what = what
self.when = when
def hello(self):
print "if you are good, {} will bring presents instead of {} for {}".format(self.who, self.what, self.when)
def not_hello(self):
print "my performance review says i'm getting {} from {} for sure this {}".format(self.what, self.who, self.when)
class MyChildClass(MyClass):
def __init__(self, why, who, what, when):
self.why = why
super(MyChildClass, self).__init__(who, what, when)
def hello(self):
print "i want {} to bring {} for my brother at {} because he's in {}".format(self.who, self.what, self.when, self.why)
def main():
print "calling main function from world.main"
print "function name __name__ is calling: {}".format(__name__)
if __name__ == "__main__":
main()
''' validation test '''
my_xmas = MyClass("Santa Claus", "lumps of coal", "Christmas")
print
print my_xmas.who
print my_xmas.what
print my_xmas.when
print
my_xmas.hello()
print
my_xmas.not_hello()
print
my_bros_xmas = MyChildClass("Hawaii", "Santa Claus", "lumps of coal", "Christmas")
print
my_bros_xmas.hello()
print
my_bros_xmas.not_hello()
print
| {
"repo_name": "daveg999/automation_class",
"path": "class9/ex6/mytest/world.py",
"copies": "1",
"size": "1419",
"license": "apache-2.0",
"hash": -6353121979466090000,
"line_mean": 24.8,
"line_max": 126,
"alpha_frac": 0.5990133897,
"autogenerated": false,
"ratio": 3.3154205607476634,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9386437415401993,
"avg_score": 0.005599307009134172,
"num_lines": 55
} |
""" A UI that allows the user to choose a view. """
# Enthought library imports.
from enthought.pyface.workbench.api import IView, WorkbenchWindow
from enthought.traits.api import Any, HasTraits, Instance, List, Str
from enthought.traits.api import TraitError, Undefined
from enthought.traits.ui.api import Item, TreeEditor, TreeNode, View
from enthought.traits.ui.menu import Action # fixme: Non-api import!
class Category(HasTraits):
""" A view category. """
# The name of the category.
name = Str
# The views in the category.
views = List
class WorkbenchWindowTreeNode(TreeNode):
""" A tree node for workbench windows that displays the window's views.
The views are grouped by their category.
"""
#### 'TreeNode' interface #################################################
# List of object classes that the node applies to.
node_for = [WorkbenchWindow]
###########################################################################
# 'TreeNode' interface.
###########################################################################
def get_children(self, object):
""" Get the object's children. """
# Collate the window's views into categories.
categories_by_name = self._get_categories_by_name(object)
categories = categories_by_name.values()
categories.sort(key=lambda category: category.name)
return categories
###########################################################################
# Private interface.
###########################################################################
def _get_categories_by_name(self, window):
""" Return a dictionary containing all categories keyed by name. """
categories_by_name = {}
for view in window.views:
category = categories_by_name.get(view.category)
if category is None:
category = Category(name=view.category)
categories_by_name[view.category] = category
category.views.append(view)
return categories_by_name
class IViewTreeNode(TreeNode):
""" A tree node for objects that implement the 'IView' interface.
This node does *not* recognise objects that can be *adapted* to the 'IView'
interface, only those that actually implement it. If we wanted to allow
for adaptation we would have to work out a way for the rest of the
'TreeNode' code to access the adapter, not the original object. We could,
of course override every method, but that seems a little, errr, tedious.
We could probably do with something like in the PyFace tree where there
is a method that returns the actual object that we want to manipulate.
"""
def is_node_for(self, obj):
""" Returns whether this is the node that handles a specified object.
"""
# By checking for 'is obj' here, we are *not* allowing adaptation (if
# we were allowing adaptation it would be 'is not None'). See the class
# doc string for details.
return IView(obj, Undefined) is obj
def get_icon(self, obj, is_expanded):
""" Returns the icon for a specified object. """
if obj.image is not None:
icon = obj.image
else:
# fixme: A bit of magic here! Is there a better way to say 'use
# the default leaf icon'?
icon = '<item>'
return icon
class ViewChooser(HasTraits):
""" Allow the user to choose a view.
This implementation shows views in a tree grouped by category.
"""
# The window that contains the views to choose from.
window = Instance('enthought.pyface.workbench.api.WorkbenchWindow')
# The currently selected tree item (at any point in time this might be
# either None, a view category, or a view).
selected = Any
# The selected view (None if the selected item is not a view).
view = Instance(IView)
#### Traits UI views ######################################################
traits_ui_view = View(
Item(
name = 'window',
editor = TreeEditor(
nodes = [
WorkbenchWindowTreeNode(
auto_open = True,
label = '=Views',
rename = False,
copy = False,
delete = False,
insert = False,
menu = None,
),
TreeNode(
node_for = [Category],
auto_open = True,
children = 'views',
label = 'name',
rename = False,
copy = False,
delete = False,
insert = False,
menu = None,
),
IViewTreeNode(
auto_open = False,
label = 'name',
rename = False,
copy = False,
delete = False,
insert = False,
menu = None,
)
],
editable = False,
hide_root = True,
selected = 'selected',
show_icons = True
),
show_label = False
),
buttons = [
Action(name='OK', enabled_when='view is not None'), 'Cancel'
],
resizable = True,
style = 'custom',
title = 'Show View',
width = .2,
height = .4
)
###########################################################################
# 'ViewChooser' interface.
###########################################################################
def _selected_changed(self, old, new):
""" Static trait change handler. """
# If the assignment fails then the selected object does *not* implement
# the 'IView' interface.
try:
self.view = new
except TraitError:
self.view = None
return
#### EOF ######################################################################
| {
"repo_name": "enthought/traitsgui",
"path": "enthought/pyface/workbench/action/view_chooser.py",
"copies": "1",
"size": "6433",
"license": "bsd-3-clause",
"hash": -2904724967289445000,
"line_mean": 31.3266331658,
"line_max": 79,
"alpha_frac": 0.4818902534,
"autogenerated": false,
"ratio": 5.073343848580442,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.014467740008161525,
"num_lines": 199
} |
""" A UI that allows the user to choose a view. """
# Enthought library imports.
from pyface.workbench.api import IView, WorkbenchWindow
from traits.api import Any, HasTraits, Instance, List, Str
from traits.api import TraitError, Undefined
from traitsui.api import Item, TreeEditor, TreeNode, View
from traitsui.menu import Action # fixme: Non-api import!
class Category(HasTraits):
""" A view category. """
# The name of the category.
name = Str
# The views in the category.
views = List
class WorkbenchWindowTreeNode(TreeNode):
""" A tree node for workbench windows that displays the window's views.
The views are grouped by their category.
"""
#### 'TreeNode' interface #################################################
# List of object classes that the node applies to.
node_for = [WorkbenchWindow]
###########################################################################
# 'TreeNode' interface.
###########################################################################
def get_children(self, object):
""" Get the object's children. """
# Collate the window's views into categories.
categories_by_name = self._get_categories_by_name(object)
categories = categories_by_name.values()
categories.sort(key=lambda category: category.name)
return categories
###########################################################################
# Private interface.
###########################################################################
def _get_categories_by_name(self, window):
""" Return a dictionary containing all categories keyed by name. """
categories_by_name = {}
for view in window.views:
category = categories_by_name.get(view.category)
if category is None:
category = Category(name=view.category)
categories_by_name[view.category] = category
category.views.append(view)
return categories_by_name
class IViewTreeNode(TreeNode):
""" A tree node for objects that implement the 'IView' interface.
This node does *not* recognise objects that can be *adapted* to the 'IView'
interface, only those that actually implement it. If we wanted to allow
for adaptation we would have to work out a way for the rest of the
'TreeNode' code to access the adapter, not the original object. We could,
of course override every method, but that seems a little, errr, tedious.
We could probably do with something like in the PyFace tree where there
is a method that returns the actual object that we want to manipulate.
"""
def is_node_for(self, obj):
""" Returns whether this is the node that handles a specified object.
"""
# By checking for 'is obj' here, we are *not* allowing adaptation (if
# we were allowing adaptation it would be 'is not None'). See the class
# doc string for details.
return IView(obj, Undefined) is obj
def get_icon(self, obj, is_expanded):
""" Returns the icon for a specified object. """
if obj.image is not None:
icon = obj.image
else:
# fixme: A bit of magic here! Is there a better way to say 'use
# the default leaf icon'?
icon = '<item>'
return icon
class ViewChooser(HasTraits):
""" Allow the user to choose a view.
This implementation shows views in a tree grouped by category.
"""
# The window that contains the views to choose from.
window = Instance('pyface.workbench.api.WorkbenchWindow')
# The currently selected tree item (at any point in time this might be
# either None, a view category, or a view).
selected = Any
# The selected view (None if the selected item is not a view).
view = Instance(IView)
#### Traits UI views ######################################################
traits_ui_view = View(
Item(
name = 'window',
editor = TreeEditor(
nodes = [
WorkbenchWindowTreeNode(
auto_open = True,
label = '=Views',
rename = False,
copy = False,
delete = False,
insert = False,
menu = None,
),
TreeNode(
node_for = [Category],
auto_open = True,
children = 'views',
label = 'name',
rename = False,
copy = False,
delete = False,
insert = False,
menu = None,
),
IViewTreeNode(
auto_open = False,
label = 'name',
rename = False,
copy = False,
delete = False,
insert = False,
menu = None,
)
],
editable = False,
hide_root = True,
selected = 'selected',
show_icons = True
),
show_label = False
),
buttons = [
Action(name='OK', enabled_when='view is not None'), 'Cancel'
],
resizable = True,
style = 'custom',
title = 'Show View',
width = .2,
height = .4
)
###########################################################################
# 'ViewChooser' interface.
###########################################################################
def _selected_changed(self, old, new):
""" Static trait change handler. """
# If the assignment fails then the selected object does *not* implement
# the 'IView' interface.
try:
self.view = new
except TraitError:
self.view = None
return
#### EOF ######################################################################
| {
"repo_name": "pankajp/pyface",
"path": "pyface/workbench/action/view_chooser.py",
"copies": "4",
"size": "6371",
"license": "bsd-3-clause",
"hash": 1669395251280931300,
"line_mean": 31.0150753769,
"line_max": 79,
"alpha_frac": 0.4781039083,
"autogenerated": false,
"ratio": 5.12962962962963,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7607733537929631,
"avg_score": null,
"num_lines": null
} |
"""A UI used to manipulate blendshapes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import logging
import os
from PySide2.QtCore import *
from PySide2.QtWidgets import *
from maya.app.general.mayaMixin import MayaQWidgetBaseMixin
import maya.cmds as cmds
from cmt.ui.widgets.filepathwidget import FilePathWidget
from cmt.ui.stringcache import StringCache
import cmt.deform.blendshape as bs
import cmt.deform.np_mesh as np_mesh
reload(bs)
import cmt.shortcuts as shortcuts
from cmt.io.obj import import_obj, export_obj
logger = logging.getLogger(__name__)
_win = None
def show():
"""Shows the window."""
global _win
if _win:
_win.close()
_win = ShapesWindow()
_win.show()
class ShapesWindow(MayaQWidgetBaseMixin, QMainWindow):
def __init__(self, parent=None):
super(ShapesWindow, self).__init__(parent)
self.setWindowTitle("Shapes")
self.resize(800, 600)
self.create_actions()
self.create_menu()
main_widget = QWidget()
self.setCentralWidget(main_widget)
main_layout = QVBoxLayout()
main_widget.setLayout(main_layout)
self.file_model = QFileSystemModel(self)
self.root_path = FilePathWidget(
"Root: ", FilePathWidget.directory, name="cmt.shapes.rootpath", parent=self
)
self.root_path.path_changed.connect(self.set_root_path)
main_layout.addWidget(self.root_path)
self.file_tree_view = QTreeView()
self.file_model.setFilter(QDir.NoDotAndDotDot | QDir.Files | QDir.AllDirs)
self.file_model.setReadOnly(True)
self.file_model.setNameFilters(["*.obj"])
self.file_model.setNameFilterDisables(False)
self.file_tree_view.setModel(self.file_model)
self.file_tree_view.setColumnHidden(1, True)
self.file_tree_view.setColumnHidden(2, True)
self.file_tree_view.setContextMenuPolicy(Qt.CustomContextMenu)
self.file_tree_view.customContextMenuRequested.connect(
self.on_file_tree_context_menu
)
self.file_tree_view.doubleClicked.connect(self.on_file_tree_double_clicked)
self.file_tree_view.setSelectionMode(QAbstractItemView.ExtendedSelection)
main_layout.addWidget(self.file_tree_view)
self.set_root_path(self.root_path.path)
def create_actions(self):
self.propagate_neutral_action = QAction(
"Propagate Neutral Update",
toolTip="Propagate updates to a neutral mesh to the selected targets.",
triggered=self.propagate_neutral_update,
)
self.export_selected_action = QAction(
"Export Selected Meshes",
toolTip="Export the selected meshes to the selected directory",
triggered=self.export_selected,
)
def create_menu(self):
menubar = self.menuBar()
menu = menubar.addMenu("Shapes")
menu.addAction(self.propagate_neutral_action)
menu.addAction(self.export_selected_action)
def set_root_path(self, path):
index = self.file_model.setRootPath(path)
self.file_tree_view.setRootIndex(index)
def on_file_tree_double_clicked(self, index):
path = self.file_model.fileInfo(index).absoluteFilePath()
if not os.path.isfile(path) or not path.lower().endswith(".obj"):
return
self.import_selected_objs()
def on_file_tree_context_menu(self, pos):
index = self.file_tree_view.indexAt(pos)
if not index.isValid():
return
path = self.file_model.fileInfo(index).absoluteFilePath()
if not os.path.isfile(path) or not path.lower().endswith(".obj"):
return
sel = cmds.ls(sl=True)
blendshape = bs.get_blendshape_node(sel[0]) if sel else None
menu = QMenu()
label = "Import as target" if blendshape else "Import"
menu.addAction(QAction(label, self, triggered=self.import_selected_objs))
if sel and shortcuts.get_shape(sel[0]):
menu.addAction(
QAction(
"Export selected", self, triggered=partial(export_obj, sel[0], path)
)
)
menu.exec_(self.file_tree_view.mapToGlobal(pos))
def get_selected_paths(self):
indices = self.file_tree_view.selectedIndexes()
if not indices:
return []
paths = [
self.file_model.fileInfo(idx).absoluteFilePath()
for idx in indices
if idx.column() == 0
]
return paths
def import_selected_objs(self, add_as_targets=True):
"""Import the selected shapes in the tree view.
If a mesh with a blendshape is selected in the scene, the shapes will be added
as targets
"""
indices = self.file_tree_view.selectedIndexes()
if not indices:
return None
paths = self.get_selected_paths()
sel = cmds.ls(sl=True)
blendshape = bs.get_blendshape_node(sel[0]) if sel else None
meshes = [import_obj(path) for path in paths]
if blendshape and add_as_targets:
for mesh in meshes:
bs.add_target(blendshape, mesh)
cmds.delete(mesh)
elif meshes:
cmds.select(meshes)
return meshes
def export_selected(self):
sel = cmds.ls(sl=True)
if not sel:
return
indices = self.file_tree_view.selectedIndexes()
if indices:
path = self.file_model.fileInfo(indices[0]).absoluteFilePath()
directory = os.path.dirname(path) if os.path.isfile(path) else path
else:
directory = self.file_model.rootPath()
for mesh in sel:
path = os.path.join(directory, "{}.obj".format(mesh))
export_obj(mesh, path)
def propagate_neutral_update(self):
sel = cmds.ls(sl=True)
if len(sel) != 2:
QMessageBox.critical(
self, "Error", "Select the old neutral, then the new neutral."
)
return
old_neutral, new_neutral = sel
meshes = self.import_selected_objs(add_as_targets=False)
if not meshes:
return
bs.propagate_neutral_update(old_neutral, new_neutral, meshes)
| {
"repo_name": "chadmv/cmt",
"path": "scripts/cmt/deform/shapesui.py",
"copies": "1",
"size": "6401",
"license": "mit",
"hash": 3626342756245635000,
"line_mean": 33.4139784946,
"line_max": 88,
"alpha_frac": 0.6255272614,
"autogenerated": false,
"ratio": 3.75425219941349,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.487977946081349,
"avg_score": null,
"num_lines": null
} |
"""A unified interface for performing and debugging optimization problems."""
import abc
import numpy as np
import scipy.sparse as sps
import scipy.optimize as opt
from scipy.optimize import minimize
class Optimizer(object):
def __init__(self, fun, x0, args=(), method='L-BFGS-B', jac=None,
hess=None, hessp=None, bounds=None, constraints=(),
tol=None, callback=None, options=None, evolution=False):
""" A class for handling minimization of scalar function of one or more
variables.
Parameters
----------
fun : callable
Objective function.
x0 : ndarray
Initial guess.
args : tuple, optional
Extra arguments passed to the objective function and its
derivatives (Jacobian, Hessian).
method : str, optional
Type of solver. Should be one of
- 'Nelder-Mead'
- 'Powell'
- 'CG'
- 'BFGS'
- 'Newton-CG'
- 'Anneal'
- 'L-BFGS-B'
- 'TNC'
- 'COBYLA'
- 'SLSQP'
- 'dogleg'
- 'trust-ncg'
jac : bool or callable, optional
Jacobian of objective function. Only for CG, BFGS, Newton-CG,
dogleg, trust-ncg.
If `jac` is a Boolean and is True, `fun` is assumed to return the
value of Jacobian along with the objective function. If False, the
Jacobian will be estimated numerically.
`jac` can also be a callable returning the Jacobian of the
objective. In this case, it must accept the same arguments
as `fun`.
hess, hessp : callable, optional
Hessian of objective function or Hessian of objective function
times an arbitrary vector p. Only for Newton-CG,
dogleg, trust-ncg.
Only one of `hessp` or `hess` needs to be given. If `hess` is
provided, then `hessp` will be ignored. If neither `hess` nor
`hessp` is provided, then the hessian product will be approximated
using finite differences on `jac`. `hessp` must compute the Hessian
times an arbitrary vector.
bounds : sequence, optional
Bounds for variables (only for L-BFGS-B, TNC and SLSQP).
``(min, max)`` pairs for each element in ``x``, defining
the bounds on that parameter. Use None for one of ``min`` or
``max`` when there is no bound in that direction.
constraints : dict or sequence of dict, optional
Constraints definition (only for COBYLA and SLSQP).
Each constraint is defined in a dictionary with fields:
type : str
Constraint type: 'eq' for equality, 'ineq' for inequality.
fun : callable
The function defining the constraint.
jac : callable, optional
The Jacobian of `fun` (only for SLSQP).
args : sequence, optional
Extra arguments to be passed to the function and Jacobian.
Equality constraint means that the constraint function result is to
be zero whereas inequality means that it is to be non-negative.
Note that COBYLA only supports inequality constraints.
tol : float, optional
Tolerance for termination. For detailed control, use
solver-specific options.
callback : callable, optional
Called after each iteration, as ``callback(xk)``, where ``xk`` is
the current parameter vector. Only available using Scipy >= 0.12.
options : dict, optional
A dictionary of solver options. All methods accept the following
generic options:
maxiter : int
Maximum number of iterations to perform.
disp : bool
Set to True to print convergence messages.
For method-specific options, see
`show_options('minimize', method)`.
evolution : bool, optional
save history of x for each iteration. Only available using Scipy
>= 0.12.
See also
---------
scipy.optimize.minimize
"""
self.size_of_x = len(x0)
self._evol_kx = None
if evolution is True:
self._evol_kx = []
def history_of_x(kx):
self._evol_kx.append(kx)
res = minimize(fun, x0, args, method, jac, hess, hessp, bounds,
constraints, tol, callback=history_of_x,
options=options)
else:
res = minimize(fun, x0, args, method, jac, hess, hessp, bounds,
constraints, tol, callback, options)
self.res = res
@property
def xopt(self):
return self.res['x']
@property
def fopt(self):
return self.res['fun']
@property
def nit(self):
return self.res['nit']
@property
def nfev(self):
return self.res['nfev']
@property
def message(self):
return self.res['message']
def print_summary(self):
print(self.res)
@property
def evolution(self):
if self._evol_kx is not None:
return np.asarray(self._evol_kx)
else:
return None
def spdot(A, B):
"""The same as np.dot(A, B), except it works even if A or B or both
are sparse matrices.
Parameters
----------
A, B : arrays of shape (m, n), (n, k)
Returns
-------
The matrix product AB. If both A and B are sparse, the result will be a
sparse matrix. Otherwise, a dense result is returned
See discussion here:
http://mail.scipy.org/pipermail/scipy-user/2010-November/027700.html
"""
if sps.issparse(A) and sps.issparse(B):
return A * B
elif sps.issparse(A) and not sps.issparse(B):
return (A * B).view(type=B.__class__)
elif not sps.issparse(A) and sps.issparse(B):
return (B.T * A.T).T.view(type=A.__class__)
else:
return np.dot(A, B)
def sparse_nnls(y, X,
momentum=1,
step_size=0.01,
non_neg=True,
check_error_iter=10,
max_error_checks=10,
converge_on_sse=0.99):
"""
Solve y=Xh for h, using gradient descent, with X a sparse matrix.
Parameters
----------
y : 1-d array of shape (N)
The data. Needs to be dense.
X : ndarray. May be either sparse or dense. Shape (N, M)
The regressors
momentum : float, optional (default: 1).
The persistence of the gradient.
step_size : float, optional (default: 0.01).
The increment of parameter update in each iteration
non_neg : Boolean, optional (default: True)
Whether to enforce non-negativity of the solution.
check_error_iter : int (default:10)
How many rounds to run between error evaluation for
convergence-checking.
max_error_checks : int (default: 10)
Don't check errors more than this number of times if no improvement in
r-squared is seen.
converge_on_sse : float (default: 0.99)
a percentage improvement in SSE that is required each time to say
that things are still going well.
Returns
-------
h_best : The best estimate of the parameters.
"""
num_regressors = X.shape[1]
# Initialize the parameters at the origin:
h = np.zeros(num_regressors)
# If nothing good happens, we'll return that:
h_best = h
iteration = 1
ss_residuals_min = np.inf # This will keep track of the best solution
sse_best = np.inf # This will keep track of the best performance so far
count_bad = 0 # Number of times estimation error has gone up.
error_checks = 0 # How many error checks have we done so far
while 1:
if iteration > 1:
# The gradient is (Kay 2008 supplemental page 27):
gradient = spdot(X.T, spdot(X, h) - y)
gradient += momentum * gradient
# Normalize to unit-length
unit_length_gradient = (gradient /
np.sqrt(np.dot(gradient, gradient)))
# Update the parameters in the direction of the gradient:
h -= step_size * unit_length_gradient
if non_neg:
# Set negative values to 0:
h[h < 0] = 0
# Every once in a while check whether it's converged:
if np.mod(iteration, check_error_iter):
# This calculates the sum of squared residuals at this point:
sse = np.sum((y - spdot(X, h)) ** 2)
# Did we do better this time around?
if sse < ss_residuals_min:
# Update your expectations about the minimum error:
ss_residuals_min = sse
h_best = h # This holds the best params we have so far
# Are we generally (over iterations) converging on
# sufficient improvement in r-squared?
if sse < converge_on_sse * sse_best:
sse_best = sse
count_bad = 0
else:
count_bad += 1
else:
count_bad += 1
if count_bad >= max_error_checks:
return h_best
error_checks += 1
iteration += 1
class SKLearnLinearSolver(object, metaclass=abc.ABCMeta):
"""
Provide a sklearn-like uniform interface to algorithms that solve problems
of the form: $y = Ax$ for $x$
Sub-classes of SKLearnLinearSolver should provide a 'fit' method that have
the following signature: `SKLearnLinearSolver.fit(X, y)`, which would set
an attribute `SKLearnLinearSolver.coef_`, with the shape (X.shape[1],),
such that an estimate of y can be calculated as:
`y_hat = np.dot(X, SKLearnLinearSolver.coef_.T)`
"""
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
@abc.abstractmethod
def fit(self, X, y):
"""Implement for all derived classes """
def predict(self, X):
"""
Predict using the result of the model
Parameters
----------
X : array-like (n_samples, n_features)
Samples.
Returns
-------
C : array, shape = (n_samples,)
Predicted values.
"""
X = np.asarray(X)
return np.dot(X, self.coef_.T)
class NonNegativeLeastSquares(SKLearnLinearSolver):
"""
A sklearn-like interface to scipy.optimize.nnls
"""
def fit(self, X, y):
"""
Fit the NonNegativeLeastSquares linear model to data
Parameters
----------
"""
coef, rnorm = opt.nnls(X, y)
self.coef_ = coef
return self
| {
"repo_name": "FrancoisRheaultUS/dipy",
"path": "dipy/core/optimize.py",
"copies": "9",
"size": "11096",
"license": "bsd-3-clause",
"hash": 703478615690414500,
"line_mean": 31.2558139535,
"line_max": 79,
"alpha_frac": 0.5603821197,
"autogenerated": false,
"ratio": 4.239969430645778,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9300351550345778,
"avg_score": null,
"num_lines": null
} |
""" A unified interface for performing and debugging optimization problems.
Only L-BFGS-B and Powell is supported in this class for versions of
Scipy < 0.12. All optimizers are available for scipy >= 0.12.
"""
import abc
from distutils.version import LooseVersion
import numpy as np
import scipy
import scipy.sparse as sps
import scipy.optimize as opt
from dipy.utils.six import with_metaclass
SCIPY_LESS_0_12 = LooseVersion(scipy.__version__) < '0.12'
if not SCIPY_LESS_0_12:
from scipy.optimize import minimize
else:
from scipy.optimize import fmin_l_bfgs_b, fmin_powell
class Optimizer(object):
def __init__(self, fun, x0, args=(), method='L-BFGS-B', jac=None,
hess=None, hessp=None, bounds=None, constraints=(),
tol=None, callback=None, options=None, evolution=False):
""" A class for handling minimization of scalar function of one or more
variables.
Parameters
----------
fun : callable
Objective function.
x0 : ndarray
Initial guess.
args : tuple, optional
Extra arguments passed to the objective function and its
derivatives (Jacobian, Hessian).
method : str, optional
Type of solver. Should be one of
- 'Nelder-Mead'
- 'Powell'
- 'CG'
- 'BFGS'
- 'Newton-CG'
- 'Anneal'
- 'L-BFGS-B'
- 'TNC'
- 'COBYLA'
- 'SLSQP'
- 'dogleg'
- 'trust-ncg'
jac : bool or callable, optional
Jacobian of objective function. Only for CG, BFGS, Newton-CG,
dogleg, trust-ncg.
If `jac` is a Boolean and is True, `fun` is assumed to return the
value of Jacobian along with the objective function. If False, the
Jacobian will be estimated numerically.
`jac` can also be a callable returning the Jacobian of the
objective. In this case, it must accept the same arguments
as `fun`.
hess, hessp : callable, optional
Hessian of objective function or Hessian of objective function
times an arbitrary vector p. Only for Newton-CG,
dogleg, trust-ncg.
Only one of `hessp` or `hess` needs to be given. If `hess` is
provided, then `hessp` will be ignored. If neither `hess` nor
`hessp` is provided, then the hessian product will be approximated
using finite differences on `jac`. `hessp` must compute the Hessian
times an arbitrary vector.
bounds : sequence, optional
Bounds for variables (only for L-BFGS-B, TNC and SLSQP).
``(min, max)`` pairs for each element in ``x``, defining
the bounds on that parameter. Use None for one of ``min`` or
``max`` when there is no bound in that direction.
constraints : dict or sequence of dict, optional
Constraints definition (only for COBYLA and SLSQP).
Each constraint is defined in a dictionary with fields:
type : str
Constraint type: 'eq' for equality, 'ineq' for inequality.
fun : callable
The function defining the constraint.
jac : callable, optional
The Jacobian of `fun` (only for SLSQP).
args : sequence, optional
Extra arguments to be passed to the function and Jacobian.
Equality constraint means that the constraint function result is to
be zero whereas inequality means that it is to be non-negative.
Note that COBYLA only supports inequality constraints.
tol : float, optional
Tolerance for termination. For detailed control, use
solver-specific options.
callback : callable, optional
Called after each iteration, as ``callback(xk)``, where ``xk`` is
the current parameter vector. Only available using Scipy >= 0.12.
options : dict, optional
A dictionary of solver options. All methods accept the following
generic options:
maxiter : int
Maximum number of iterations to perform.
disp : bool
Set to True to print convergence messages.
For method-specific options, see
`show_options('minimize', method)`.
evolution : bool, optional
save history of x for each iteration. Only available using Scipy
>= 0.12.
See also
---------
scipy.optimize.minimize
"""
self.size_of_x = len(x0)
self._evol_kx = None
_eps = np.finfo(float).eps
if SCIPY_LESS_0_12:
if evolution is True:
print('Saving history is available only with Scipy >= 0.12.')
if method == 'L-BFGS-B':
default_options = {'maxcor': 10, 'ftol': 1e-7, 'gtol': 1e-5,
'eps': 1e-8, 'maxiter': 1000}
if jac is None:
approx_grad = True
else:
approx_grad = False
if options is None:
options = default_options
if options is not None:
for key in options:
default_options[key] = options[key]
options = default_options
try:
out = fmin_l_bfgs_b(fun, x0, args,
approx_grad=approx_grad,
bounds=bounds,
m=options['maxcor'],
factr=options['ftol']/_eps,
pgtol=options['gtol'],
epsilon=options['eps'],
maxiter=options['maxiter'])
except TypeError:
msg = 'In Scipy ' + scipy.__version__ + ' `maxiter` '
msg += 'parameter is not available for L-BFGS-B. \n Using '
msg += '`maxfun` instead with value twice of maxiter.'
print(msg)
out = fmin_l_bfgs_b(fun, x0, args,
approx_grad=approx_grad,
bounds=bounds,
m=options['maxcor'],
factr=options['ftol']/_eps,
pgtol=options['gtol'],
epsilon=options['eps'],
maxfun=options['maxiter'] * 2)
res = {'x': out[0], 'fun': out[1], 'nfev': out[2]['funcalls']}
try:
res['nit'] = out[2]['nit']
except KeyError:
res['nit'] = None
elif method == 'Powell':
default_options = {'xtol': 0.0001, 'ftol': 0.0001,
'maxiter': None}
if options is None:
options = default_options
if options is not None:
for key in options:
default_options[key] = options[key]
options = default_options
out = fmin_powell(fun, x0, args,
xtol=options['xtol'],
ftol=options['ftol'],
maxiter=options['maxiter'],
full_output=True,
disp=False,
retall=True)
xopt, fopt, direc, iterations, funcs, warnflag, allvecs = out
res = {'x': xopt, 'fun': fopt,
'nfev': funcs, 'nit': iterations}
else:
msg = 'Only L-BFGS-B and Powell is supported in this class '
msg += 'for versions of Scipy < 0.12.'
raise ValueError(msg)
if not SCIPY_LESS_0_12:
if evolution is True:
self._evol_kx = []
def history_of_x(kx):
self._evol_kx.append(kx)
res = minimize(fun, x0, args, method, jac, hess, hessp, bounds,
constraints, tol, callback=history_of_x,
options=options)
else:
res = minimize(fun, x0, args, method, jac, hess, hessp, bounds,
constraints, tol, callback, options)
self.res = res
@property
def xopt(self):
return self.res['x']
@property
def fopt(self):
return self.res['fun']
@property
def nit(self):
return self.res['nit']
@property
def nfev(self):
return self.res['nfev']
@property
def message(self):
return self.res['message']
def print_summary(self):
print(self.res)
@property
def evolution(self):
if self._evol_kx is not None:
return np.asarray(self._evol_kx)
else:
return None
def spdot(A, B):
"""The same as np.dot(A, B), except it works even if A or B or both
are sparse matrices.
Parameters
----------
A, B : arrays of shape (m, n), (n, k)
Returns
-------
The matrix product AB. If both A and B are sparse, the result will be a
sparse matrix. Otherwise, a dense result is returned
See discussion here:
http://mail.scipy.org/pipermail/scipy-user/2010-November/027700.html
"""
if sps.issparse(A) and sps.issparse(B):
return A * B
elif sps.issparse(A) and not sps.issparse(B):
return (A * B).view(type=B.__class__)
elif not sps.issparse(A) and sps.issparse(B):
return (B.T * A.T).T.view(type=A.__class__)
else:
return np.dot(A, B)
def sparse_nnls(y, X,
momentum=1,
step_size=0.01,
non_neg=True,
check_error_iter=10,
max_error_checks=10,
converge_on_sse=0.99):
"""
Solve y=Xh for h, using gradient descent, with X a sparse matrix
Parameters
----------
y : 1-d array of shape (N)
The data. Needs to be dense.
X : ndarray. May be either sparse or dense. Shape (N, M)
The regressors
momentum : float, optional (default: 1).
The persistence of the gradient.
step_size : float, optional (default: 0.01).
The increment of parameter update in each iteration
non_neg : Boolean, optional (default: True)
Whether to enforce non-negativity of the solution.
check_error_iter : int (default:10)
How many rounds to run between error evaluation for
convergence-checking.
max_error_checks : int (default: 10)
Don't check errors more than this number of times if no improvement in
r-squared is seen.
converge_on_sse : float (default: 0.99)
a percentage improvement in SSE that is required each time to say
that things are still going well.
Returns
-------
h_best : The best estimate of the parameters.
"""
num_regressors = X.shape[1]
# Initialize the parameters at the origin:
h = np.zeros(num_regressors)
# If nothing good happens, we'll return that:
h_best = h
gradient = np.zeros(num_regressors)
iteration = 1
ss_residuals_min = np.inf # This will keep track of the best solution
sse_best = np.inf # This will keep track of the best performance so far
count_bad = 0 # Number of times estimation error has gone up.
error_checks = 0 # How many error checks have we done so far
while 1:
if iteration > 1:
# The sum of squared error given the current parameter setting:
sse = np.sum((y - spdot(X, h)) ** 2)
# The gradient is (Kay 2008 supplemental page 27):
gradient = spdot(X.T, spdot(X, h) - y)
gradient += momentum * gradient
# Normalize to unit-length
unit_length_gradient = (gradient /
np.sqrt(np.dot(gradient, gradient)))
# Update the parameters in the direction of the gradient:
h -= step_size * unit_length_gradient
if non_neg:
# Set negative values to 0:
h[h < 0] = 0
# Every once in a while check whether it's converged:
if np.mod(iteration, check_error_iter):
# This calculates the sum of squared residuals at this point:
sse = np.sum((y - spdot(X, h)) ** 2)
# Did we do better this time around?
if sse < ss_residuals_min:
# Update your expectations about the minimum error:
ss_residuals_min = sse
h_best = h # This holds the best params we have so far
# Are we generally (over iterations) converging on
# sufficient improvement in r-squared?
if sse < converge_on_sse * sse_best:
sse_best = sse
count_bad = 0
else:
count_bad += 1
else:
count_bad += 1
if count_bad >= max_error_checks:
return h_best
error_checks += 1
iteration += 1
class SKLearnLinearSolver(with_metaclass(abc.ABCMeta, object)):
"""
Provide a sklearn-like uniform interface to algorithms that solve problems
of the form: $y = Ax$ for $x$
Sub-classes of SKLearnLinearSolver should provide a 'fit' method that have
the following signature: `SKLearnLinearSolver.fit(X, y)`, which would set
an attribute `SKLearnLinearSolver.coef_`, with the shape (X.shape[1],),
such that an estimate of y can be calculated as:
`y_hat = np.dot(X, SKLearnLinearSolver.coef_.T)`
"""
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
@abc.abstractmethod
def fit(self, X, y):
"""Implement for all derived classes """
def predict(self, X):
"""
Predict using the result of the model
Parameters
----------
X : array-like (n_samples, n_features)
Samples.
Returns
-------
C : array, shape = (n_samples,)
Predicted values.
"""
X = np.asarray(X)
return np.dot(X, self.coef_.T)
class NonNegativeLeastSquares(SKLearnLinearSolver):
"""
A sklearn-like interface to scipy.optimize.nnls
"""
def fit(self, X, y):
"""
Fit the NonNegativeLeastSquares linear model to data
Parameters
----------
"""
coef, rnorm = opt.nnls(X, y)
self.coef_ = coef
return self
| {
"repo_name": "Messaoud-Boudjada/dipy",
"path": "dipy/core/optimize.py",
"copies": "3",
"size": "15193",
"license": "bsd-3-clause",
"hash": 4531625914953720300,
"line_mean": 32.9129464286,
"line_max": 79,
"alpha_frac": 0.5189890081,
"autogenerated": false,
"ratio": 4.3683151236342725,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6387304131734273,
"avg_score": null,
"num_lines": null
} |
# A unit fraction contains 1 in the numerator. The decimal representation of the unit fractions with denominators 2 to 10 are given:
# 1/2 = 0.5
# 1/3 = 0.(3)
# 1/4 = 0.25
# 1/5 = 0.2
# 1/6 = 0.1(6)
# 1/7 = 0.(142857)
# 1/8 = 0.125
# 1/9 = 0.(1)
# 1/10 = 0.1
# Where 0.1(6) means 0.166666..., and has a 1-digit recurring cycle. It can be seen that 1/7 has a 6-digit recurring cycle.
# Find the value of d < 1000 for which 1/d contains the longest recurring cycle in its decimal fraction part.
from itertools import count
def recur_len(n):
# digits for unit fraction 1/n
r = 10 # initial remainder (10/n)/10
seen = {}
for i in count(0):
#print("i = %i and r = %r" % (i, r))
if r == 0:
return 0 #divides evenly
elif r in seen:
return i-seen[r] # curpos - firstpos
seen[r] = i
r = 10*(r % n)
#recur_len(7)
len, i = max((recur_len(i),i) for i in range(2,1000))
print(i)
# my slow way:
# from decimal import *
# d = max = temp = length = 1
# stringTemp = ""
# divisor = Decimal(1)
# getcontext().prec = 10000
# while d < 1000:
# temp = divisor/Decimal(d)
# # take only the remainder
# stringTemp = str(temp)[2:]
# strLen = len(stringTemp)
# if strLen > 6:
# #if stringTemp[0] != stringTemp[1] and stringTemp[1] != stringTemp[2]:
# i = 2
# while stringTemp[:i] != stringTemp[i:i+i]:
# #print(stringTemp[:i], stringTemp[i:i+i])
# i += 1
# if i == strLen:
# break
# if stringTemp[:i] == stringTemp[i:i+i]:
# #print("%d has this sequence: %s" % (d, stringTemp[:i]))
# if i > length:
# length = i
# max = d
# #print("%i has length %i" % (d, len(stringTemp)))
# d += 1
# print(max, length)
#983 | {
"repo_name": "ledbutter/ProjectEulerPython",
"path": "Problem26.py",
"copies": "1",
"size": "1739",
"license": "mit",
"hash": -222720630692340540,
"line_mean": 23.9850746269,
"line_max": 132,
"alpha_frac": 0.5710178263,
"autogenerated": false,
"ratio": 2.466666666666667,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8192790240329451,
"avg_score": 0.06897885052744313,
"num_lines": 67
} |
# A unival tree (which stands for "universal value") is a tree where all nodes under it have the same value.
# Given the root to a binary tree, count the number of unival subtrees.
# For example, the following tree has 5 unival subtrees:
# 0
# / \
# 1 0
# / \
# 1 0
# / \
# 1 1
class Node:
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
def _count_univals(root):
"""
Count the amount of subtrees that are univals, as well as the value of unival tree the node is
a root of or null if it's not
"""
if not root:
return (0, None, None)
left = _count_univals(root.left)
right = _count_univals(root.right)
is_unival = (right[1] is None or right[1] == root.value) and (left[1] is None or left[1] == root.value)
return (
(1 if is_unival else 0) + left[0] + right[0],
root.value if is_unival else None
)
def count_univals(root):
univals = _count_univals(root)
return univals[0]
import pytest
@pytest.mark.parametrize(('tree', 'expected'), [
(Node(1), 1),
(Node(2), 1),
(Node(0, Node(1)), 1),
(Node(0, Node(1), Node(0)), 2),
(Node(0, Node(1), Node(0, Node(1), Node(0))), 3),
(Node(0, Node(1), Node(0, Node(1, Node(1), Node(1)), Node(0))), 5)
])
def test(tree, expected):
assert count_univals(tree) == expected
| {
"repo_name": "marianosimone/interviewed",
"path": "dailycodingproblem/problems/20180725.py",
"copies": "1",
"size": "1417",
"license": "unlicense",
"hash": -3586627749343494700,
"line_mean": 26.25,
"line_max": 108,
"alpha_frac": 0.5906845448,
"autogenerated": false,
"ratio": 2.9644351464435146,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4055119691243515,
"avg_score": null,
"num_lines": null
} |
""" A universal module with functions / classes without dependencies. """
import functools
import re
import os
_sep = os.path.sep
if os.path.altsep is not None:
_sep += os.path.altsep
_path_re = re.compile(r'(?:\.[^{0}]+|[{0}]__init__\.py)$'.format(re.escape(_sep)))
del _sep
def to_list(func):
def wrapper(*args, **kwargs):
return list(func(*args, **kwargs))
return wrapper
def to_tuple(func):
def wrapper(*args, **kwargs):
return tuple(func(*args, **kwargs))
return wrapper
def unite(iterable):
"""Turns a two dimensional array into a one dimensional."""
return set(typ for types in iterable for typ in types)
class UncaughtAttributeError(Exception):
"""
Important, because `__getattr__` and `hasattr` catch AttributeErrors
implicitly. This is really evil (mainly because of `__getattr__`).
Therefore this class originally had to be derived from `BaseException`
instead of `Exception`. But because I removed relevant `hasattr` from
the code base, we can now switch back to `Exception`.
:param base: return values of sys.exc_info().
"""
def safe_property(func):
return property(reraise_uncaught(func))
def reraise_uncaught(func):
"""
Re-throw uncaught `AttributeError`.
Usage: Put ``@rethrow_uncaught`` in front of the function
which does **not** suppose to raise `AttributeError`.
AttributeError is easily get caught by `hasattr` and another
``except AttributeError`` clause. This becomes problem when you use
a lot of "dynamic" attributes (e.g., using ``@property``) because you
can't distinguish if the property does not exist for real or some code
inside of the "dynamic" attribute through that error. In a well
written code, such error should not exist but getting there is very
difficult. This decorator is to help us getting there by changing
`AttributeError` to `UncaughtAttributeError` to avoid unexpected catch.
This helps us noticing bugs earlier and facilitates debugging.
"""
@functools.wraps(func)
def wrapper(*args, **kwds):
try:
return func(*args, **kwds)
except AttributeError as e:
raise UncaughtAttributeError(e) from e
return wrapper
class PushBackIterator:
def __init__(self, iterator):
self.pushes = []
self.iterator = iterator
self.current = None
def push_back(self, value):
self.pushes.append(value)
def __iter__(self):
return self
def __next__(self):
if self.pushes:
self.current = self.pushes.pop()
else:
self.current = next(self.iterator)
return self.current
| {
"repo_name": "glenngillen/dotfiles",
"path": ".vscode/extensions/ms-python.python-2021.5.842923320/pythonFiles/lib/jedilsp/jedi/inference/utils.py",
"copies": "3",
"size": "2706",
"license": "mit",
"hash": -8386216320932229000,
"line_mean": 29.0666666667,
"line_max": 82,
"alpha_frac": 0.6603843311,
"autogenerated": false,
"ratio": 4.069172932330827,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6229557263430827,
"avg_score": null,
"num_lines": null
} |
""" A universal module with functions / classes without dependencies. """
import sys
import contextlib
import functools
import re
from ast import literal_eval
from jedi._compatibility import unicode, next, reraise
from jedi import settings
class MultiLevelStopIteration(Exception):
"""
StopIteration's get catched pretty easy by for loops, let errors propagate.
"""
class UncaughtAttributeError(Exception):
"""
Important, because `__getattr__` and `hasattr` catch AttributeErrors
implicitly. This is really evil (mainly because of `__getattr__`).
`hasattr` in Python 2 is even more evil, because it catches ALL exceptions.
Therefore this class originally had to be derived from `BaseException`
instead of `Exception`. But because I removed relevant `hasattr` from
the code base, we can now switch back to `Exception`.
:param base: return values of sys.exc_info().
"""
def safe_property(func):
return property(reraise_uncaught(func))
def reraise_uncaught(func):
"""
Re-throw uncaught `AttributeError`.
Usage: Put ``@rethrow_uncaught`` in front of the function
which does **not** suppose to raise `AttributeError`.
AttributeError is easily get caught by `hasattr` and another
``except AttributeError`` clause. This becomes problem when you use
a lot of "dynamic" attributes (e.g., using ``@property``) because you
can't distinguish if the property does not exist for real or some code
inside of the "dynamic" attribute through that error. In a well
written code, such error should not exist but getting there is very
difficult. This decorator is to help us getting there by changing
`AttributeError` to `UncaughtAttributeError` to avoid unexpected catch.
This helps us noticing bugs earlier and facilitates debugging.
.. note:: Treating StopIteration here is easy.
Add that feature when needed.
"""
@functools.wraps(func)
def wrapper(*args, **kwds):
try:
return func(*args, **kwds)
except AttributeError:
exc_info = sys.exc_info()
reraise(UncaughtAttributeError(exc_info[1]), exc_info[2])
return wrapper
class PushBackIterator(object):
def __init__(self, iterator):
self.pushes = []
self.iterator = iterator
self.current = None
def push_back(self, value):
self.pushes.append(value)
def __iter__(self):
return self
def next(self):
""" Python 2 Compatibility """
return self.__next__()
def __next__(self):
if self.pushes:
self.current = self.pushes.pop()
else:
self.current = next(self.iterator)
return self.current
@contextlib.contextmanager
def scale_speed_settings(factor):
a = settings.max_executions
b = settings.max_until_execution_unique
settings.max_executions *= factor
settings.max_until_execution_unique *= factor
yield
settings.max_executions = a
settings.max_until_execution_unique = b
def indent_block(text, indention=' '):
"""This function indents a text block with a default of four spaces."""
temp = ''
while text and text[-1] == '\n':
temp += text[-1]
text = text[:-1]
lines = text.split('\n')
return '\n'.join(map(lambda s: indention + s, lines)) + temp
@contextlib.contextmanager
def ignored(*exceptions):
"""
Context manager that ignores all of the specified exceptions. This will
be in the standard library starting with Python 3.4.
"""
try:
yield
except exceptions:
pass
def source_to_unicode(source, encoding=None):
def detect_encoding():
"""
For the implementation of encoding definitions in Python, look at:
- http://www.python.org/dev/peps/pep-0263/
- http://docs.python.org/2/reference/lexical_analysis.html#encoding-declarations
"""
byte_mark = literal_eval(r"b'\xef\xbb\xbf'")
if source.startswith(byte_mark):
# UTF-8 byte-order mark
return 'utf-8'
first_two_lines = re.match(r'(?:[^\n]*\n){0,2}', str(source)).group(0)
possible_encoding = re.search(r"coding[=:]\s*([-\w.]+)",
first_two_lines)
if possible_encoding:
return possible_encoding.group(1)
else:
# the default if nothing else has been set -> PEP 263
return encoding if encoding is not None else 'iso-8859-1'
if isinstance(source, unicode):
# only cast str/bytes
return source
# cast to unicode by default
return unicode(source, detect_encoding(), 'replace')
| {
"repo_name": "syseleven/vim-config",
"path": ".vim/bundle/jedi-vim/jedi/jedi/common.py",
"copies": "3",
"size": "4718",
"license": "apache-2.0",
"hash": -1022404644993503700,
"line_mean": 30.8783783784,
"line_max": 88,
"alpha_frac": 0.6456125477,
"autogenerated": false,
"ratio": 4.190053285968029,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6335665833668028,
"avg_score": null,
"num_lines": null
} |
""" A universal module with functions / classes without dependencies. """
import sys
import contextlib
import functools
import re
from itertools import chain
from ast import literal_eval
from jedi._compatibility import unicode, reraise
from jedi import settings
class UncaughtAttributeError(Exception):
"""
Important, because `__getattr__` and `hasattr` catch AttributeErrors
implicitly. This is really evil (mainly because of `__getattr__`).
`hasattr` in Python 2 is even more evil, because it catches ALL exceptions.
Therefore this class originally had to be derived from `BaseException`
instead of `Exception`. But because I removed relevant `hasattr` from
the code base, we can now switch back to `Exception`.
:param base: return values of sys.exc_info().
"""
def safe_property(func):
return property(reraise_uncaught(func))
def reraise_uncaught(func):
"""
Re-throw uncaught `AttributeError`.
Usage: Put ``@rethrow_uncaught`` in front of the function
which does **not** suppose to raise `AttributeError`.
AttributeError is easily get caught by `hasattr` and another
``except AttributeError`` clause. This becomes problem when you use
a lot of "dynamic" attributes (e.g., using ``@property``) because you
can't distinguish if the property does not exist for real or some code
inside of the "dynamic" attribute through that error. In a well
written code, such error should not exist but getting there is very
difficult. This decorator is to help us getting there by changing
`AttributeError` to `UncaughtAttributeError` to avoid unexpected catch.
This helps us noticing bugs earlier and facilitates debugging.
.. note:: Treating StopIteration here is easy.
Add that feature when needed.
"""
@functools.wraps(func)
def wrapper(*args, **kwds):
try:
return func(*args, **kwds)
except AttributeError:
exc_info = sys.exc_info()
reraise(UncaughtAttributeError(exc_info[1]), exc_info[2])
return wrapper
class PushBackIterator(object):
def __init__(self, iterator):
self.pushes = []
self.iterator = iterator
self.current = None
def push_back(self, value):
self.pushes.append(value)
def __iter__(self):
return self
def next(self):
""" Python 2 Compatibility """
return self.__next__()
def __next__(self):
if self.pushes:
self.current = self.pushes.pop()
else:
self.current = next(self.iterator)
return self.current
@contextlib.contextmanager
def scale_speed_settings(factor):
a = settings.max_executions
b = settings.max_until_execution_unique
settings.max_executions *= factor
settings.max_until_execution_unique *= factor
try:
yield
finally:
settings.max_executions = a
settings.max_until_execution_unique = b
def indent_block(text, indention=' '):
"""This function indents a text block with a default of four spaces."""
temp = ''
while text and text[-1] == '\n':
temp += text[-1]
text = text[:-1]
lines = text.split('\n')
return '\n'.join(map(lambda s: indention + s, lines)) + temp
@contextlib.contextmanager
def ignored(*exceptions):
"""
Context manager that ignores all of the specified exceptions. This will
be in the standard library starting with Python 3.4.
"""
try:
yield
except exceptions:
pass
def source_to_unicode(source, encoding=None):
def detect_encoding():
"""
For the implementation of encoding definitions in Python, look at:
- http://www.python.org/dev/peps/pep-0263/
- http://docs.python.org/2/reference/lexical_analysis.html#encoding-declarations
"""
byte_mark = literal_eval(r"b'\xef\xbb\xbf'")
if source.startswith(byte_mark):
# UTF-8 byte-order mark
return 'utf-8'
first_two_lines = re.match(r'(?:[^\n]*\n){0,2}', str(source)).group(0)
possible_encoding = re.search(r"coding[=:]\s*([-\w.]+)",
first_two_lines)
if possible_encoding:
return possible_encoding.group(1)
else:
# the default if nothing else has been set -> PEP 263
return encoding if encoding is not None else 'utf-8'
if isinstance(source, unicode):
# only cast str/bytes
return source
# cast to unicode by default
return unicode(source, detect_encoding(), 'replace')
def splitlines(string):
"""
A splitlines for Python code. In contrast to Python's ``str.splitlines``,
looks at form feeds and other special characters as normal text. Just
splits ``\n`` and ``\r\n``.
Also different: Returns ``['']`` for an empty string input.
"""
return re.split('\n|\r\n', string)
def unite(iterable):
"""Turns a two dimensional array into a one dimensional."""
return set(chain.from_iterable(iterable))
| {
"repo_name": "rfguri/vimfiles",
"path": "bundle/ycm/third_party/ycmd/third_party/JediHTTP/vendor/jedi/jedi/common.py",
"copies": "1",
"size": "5091",
"license": "mit",
"hash": -8839519003334778000,
"line_mean": 30.81875,
"line_max": 88,
"alpha_frac": 0.643095659,
"autogenerated": false,
"ratio": 4.162714636140638,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5305810295140638,
"avg_score": null,
"num_lines": null
} |
""" A universal module with functions / classes without dependencies. """
import sys
import contextlib
import functools
import re
import os
from jedi._compatibility import reraise
_sep = os.path.sep
if os.path.altsep is not None:
_sep += os.path.altsep
_path_re = re.compile('(?:\.[^{0}]+|[{0}]__init__\.py)$'.format(re.escape(_sep)))
del _sep
def to_list(func):
def wrapper(*args, **kwargs):
return list(func(*args, **kwargs))
return wrapper
def unite(iterable):
"""Turns a two dimensional array into a one dimensional."""
return set(typ for types in iterable for typ in types)
class UncaughtAttributeError(Exception):
"""
Important, because `__getattr__` and `hasattr` catch AttributeErrors
implicitly. This is really evil (mainly because of `__getattr__`).
`hasattr` in Python 2 is even more evil, because it catches ALL exceptions.
Therefore this class originally had to be derived from `BaseException`
instead of `Exception`. But because I removed relevant `hasattr` from
the code base, we can now switch back to `Exception`.
:param base: return values of sys.exc_info().
"""
def safe_property(func):
return property(reraise_uncaught(func))
def reraise_uncaught(func):
"""
Re-throw uncaught `AttributeError`.
Usage: Put ``@rethrow_uncaught`` in front of the function
which does **not** suppose to raise `AttributeError`.
AttributeError is easily get caught by `hasattr` and another
``except AttributeError`` clause. This becomes problem when you use
a lot of "dynamic" attributes (e.g., using ``@property``) because you
can't distinguish if the property does not exist for real or some code
inside of the "dynamic" attribute through that error. In a well
written code, such error should not exist but getting there is very
difficult. This decorator is to help us getting there by changing
`AttributeError` to `UncaughtAttributeError` to avoid unexpected catch.
This helps us noticing bugs earlier and facilitates debugging.
.. note:: Treating StopIteration here is easy.
Add that feature when needed.
"""
@functools.wraps(func)
def wrapper(*args, **kwds):
try:
return func(*args, **kwds)
except AttributeError:
exc_info = sys.exc_info()
reraise(UncaughtAttributeError(exc_info[1]), exc_info[2])
return wrapper
class PushBackIterator(object):
def __init__(self, iterator):
self.pushes = []
self.iterator = iterator
self.current = None
def push_back(self, value):
self.pushes.append(value)
def __iter__(self):
return self
def next(self):
""" Python 2 Compatibility """
return self.__next__()
def __next__(self):
if self.pushes:
self.current = self.pushes.pop()
else:
self.current = next(self.iterator)
return self.current
@contextlib.contextmanager
def ignored(*exceptions):
"""
Context manager that ignores all of the specified exceptions. This will
be in the standard library starting with Python 3.4.
"""
try:
yield
except exceptions:
pass
def indent_block(text, indention=' '):
"""This function indents a text block with a default of four spaces."""
temp = ''
while text and text[-1] == '\n':
temp += text[-1]
text = text[:-1]
lines = text.split('\n')
return '\n'.join(map(lambda s: indention + s, lines)) + temp
def dotted_from_fs_path(fs_path, sys_path):
"""
Changes `/usr/lib/python3.4/email/utils.py` to `email.utils`. I.e.
compares the path with sys.path and then returns the dotted_path. If the
path is not in the sys.path, just returns None.
"""
if os.path.basename(fs_path).startswith('__init__.'):
# We are calculating the path. __init__ files are not interesting.
fs_path = os.path.dirname(fs_path)
# prefer
# - UNIX
# /path/to/pythonX.Y/lib-dynload
# /path/to/pythonX.Y/site-packages
# - Windows
# C:\path\to\DLLs
# C:\path\to\Lib\site-packages
# over
# - UNIX
# /path/to/pythonX.Y
# - Windows
# C:\path\to\Lib
path = ''
for s in sys_path:
if (fs_path.startswith(s) and len(path) < len(s)):
path = s
# - Window
# X:\path\to\lib-dynload/datetime.pyd => datetime
module_path = fs_path[len(path):].lstrip(os.path.sep).lstrip('/')
# - Window
# Replace like X:\path\to\something/foo/bar.py
return _path_re.sub('', module_path).replace(os.path.sep, '.').replace('/', '.')
| {
"repo_name": "edelvalle/SublimeJEDI",
"path": "dependencies/jedi/evaluate/utils.py",
"copies": "8",
"size": "4702",
"license": "mit",
"hash": -3999563322148045000,
"line_mean": 29.5324675325,
"line_max": 84,
"alpha_frac": 0.6314334326,
"autogenerated": false,
"ratio": 3.7980613893376414,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8429494821937642,
"avg_score": null,
"num_lines": null
} |
""" A universal module with functions / classes without dependencies. """
import sys
import contextlib
import functools
import tokenizer as tokenize
from jedi._compatibility import next, reraise
from jedi import settings
FLOWS = ['if', 'else', 'elif', 'while', 'with', 'try', 'except', 'finally']
class MultiLevelStopIteration(Exception):
"""
StopIteration's get catched pretty easy by for loops, let errors propagate.
"""
pass
class UncaughtAttributeError(Exception):
"""
Important, because `__getattr__` and `hasattr` catch AttributeErrors
implicitly. This is really evil (mainly because of `__getattr__`).
`hasattr` in Python 2 is even more evil, because it catches ALL exceptions.
Therefore this class originally had to be derived from `BaseException`
instead of `Exception`. But because I removed relevant `hasattr` from
the code base, we can now switch back to `Exception`.
:param base: return values of sys.exc_info().
"""
def rethrow_uncaught(func):
"""
Re-throw uncaught `AttributeError`.
Usage: Put ``@rethrow_uncaught`` in front of the function
which does **not** suppose to raise `AttributeError`.
AttributeError is easily get caught by `hasattr` and another
``except AttributeError`` clause. This becomes problem when you use
a lot of "dynamic" attributes (e.g., using ``@property``) because you
can't distinguish if the property does not exist for real or some code
inside of the "dynamic" attribute through that error. In a well
written code, such error should not exist but getting there is very
difficult. This decorator is to help us getting there by changing
`AttributeError` to `UncaughtAttributeError` to avoid unexpected catch.
This helps us noticing bugs earlier and facilitates debugging.
.. note:: Treating StopIteration here is easy.
Add that feature when needed.
"""
@functools.wraps(func)
def wrapper(*args, **kwds):
try:
return func(*args, **kwds)
except AttributeError:
exc_info = sys.exc_info()
reraise(UncaughtAttributeError(exc_info[1]), exc_info[2])
return wrapper
class PushBackIterator(object):
def __init__(self, iterator):
self.pushes = []
self.iterator = iterator
self.current = None
def push_back(self, value):
self.pushes.append(value)
def __iter__(self):
return self
def next(self):
""" Python 2 Compatibility """
return self.__next__()
def __next__(self):
if self.pushes:
self.current = self.pushes.pop()
else:
self.current = next(self.iterator)
return self.current
class NoErrorTokenizer(object):
def __init__(self, readline, offset=(0, 0), is_fast_parser=False):
self.readline = readline
self.gen = tokenize.generate_tokens(readline)
self.offset = offset
self.closed = False
self.is_first = True
self.push_backs = []
# fast parser options
self.is_fast_parser = is_fast_parser
self.current = self.previous = [None, None, (0, 0), (0, 0), '']
self.in_flow = False
self.new_indent = False
self.parser_indent = self.old_parser_indent = 0
self.is_decorator = False
self.first_stmt = True
def push_last_back(self):
self.push_backs.append(self.current)
def next(self):
""" Python 2 Compatibility """
return self.__next__()
def __next__(self):
if self.closed:
raise MultiLevelStopIteration()
if self.push_backs:
return self.push_backs.pop(0)
self.last_previous = self.previous
self.previous = self.current
self.current = next(self.gen)
c = list(self.current)
if c[0] == tokenize.ENDMARKER:
self.current = self.previous
self.previous = self.last_previous
raise MultiLevelStopIteration()
# this is exactly the same check as in fast_parser, but this time with
# tokenize and therefore precise.
breaks = ['def', 'class', '@']
if self.is_first:
c[2] = self.offset[0] + c[2][0], self.offset[1] + c[2][1]
c[3] = self.offset[0] + c[3][0], self.offset[1] + c[3][1]
self.is_first = False
else:
c[2] = self.offset[0] + c[2][0], c[2][1]
c[3] = self.offset[0] + c[3][0], c[3][1]
self.current = c
def close():
if not self.first_stmt:
self.closed = True
raise MultiLevelStopIteration()
# ignore indents/comments
if self.is_fast_parser \
and self.previous[0] in (tokenize.INDENT, tokenize.NL, None,
tokenize.NEWLINE, tokenize.DEDENT) \
and c[0] not in (tokenize.COMMENT, tokenize.INDENT,
tokenize.NL, tokenize.NEWLINE, tokenize.DEDENT):
# print c, tokenize.tok_name[c[0]]
tok = c[1]
indent = c[2][1]
if indent < self.parser_indent: # -> dedent
self.parser_indent = indent
self.new_indent = False
if not self.in_flow or indent < self.old_parser_indent:
close()
self.in_flow = False
elif self.new_indent:
self.parser_indent = indent
self.new_indent = False
if not self.in_flow:
if tok in FLOWS or tok in breaks:
self.in_flow = tok in FLOWS
if not self.is_decorator and not self.in_flow:
close()
self.is_decorator = '@' == tok
if not self.is_decorator:
self.old_parser_indent = self.parser_indent
self.parser_indent += 1 # new scope: must be higher
self.new_indent = True
if tok != '@':
if self.first_stmt and not self.new_indent:
self.parser_indent = indent
self.first_stmt = False
return c
@contextlib.contextmanager
def scale_speed_settings(factor):
a = settings.max_executions
b = settings.max_until_execution_unique
settings.max_executions *= factor
settings.max_until_execution_unique *= factor
yield
settings.max_executions = a
settings.max_until_execution_unique = b
def indent_block(text, indention=' '):
""" This function indents a text block with a default of four spaces """
temp = ''
while text and text[-1] == '\n':
temp += text[-1]
text = text[:-1]
lines = text.split('\n')
return '\n'.join(map(lambda s: indention + s, lines)) + temp
@contextlib.contextmanager
def ignored(*exceptions):
"""Context manager that ignores all of the specified exceptions. This will
be in the standard library starting with Python 3.4."""
try:
yield
except exceptions:
pass
| {
"repo_name": "stevenbaker/dotfiles",
"path": ".vim/bundle/jedi-vim/jedi/jedi/common.py",
"copies": "3",
"size": "7146",
"license": "mit",
"hash": -7334574756159066000,
"line_mean": 32.8672985782,
"line_max": 81,
"alpha_frac": 0.5856423174,
"autogenerated": false,
"ratio": 4.137811233352634,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000057796786498670675,
"num_lines": 211
} |
""" A universal module with functions / classes without dependencies. """
import sys
import contextlib
import functools
from jedi._compatibility import reraise
from jedi import settings
class UncaughtAttributeError(Exception):
"""
Important, because `__getattr__` and `hasattr` catch AttributeErrors
implicitly. This is really evil (mainly because of `__getattr__`).
`hasattr` in Python 2 is even more evil, because it catches ALL exceptions.
Therefore this class originally had to be derived from `BaseException`
instead of `Exception`. But because I removed relevant `hasattr` from
the code base, we can now switch back to `Exception`.
:param base: return values of sys.exc_info().
"""
def safe_property(func):
return property(reraise_uncaught(func))
def reraise_uncaught(func):
"""
Re-throw uncaught `AttributeError`.
Usage: Put ``@rethrow_uncaught`` in front of the function
which does **not** suppose to raise `AttributeError`.
AttributeError is easily get caught by `hasattr` and another
``except AttributeError`` clause. This becomes problem when you use
a lot of "dynamic" attributes (e.g., using ``@property``) because you
can't distinguish if the property does not exist for real or some code
inside of the "dynamic" attribute through that error. In a well
written code, such error should not exist but getting there is very
difficult. This decorator is to help us getting there by changing
`AttributeError` to `UncaughtAttributeError` to avoid unexpected catch.
This helps us noticing bugs earlier and facilitates debugging.
.. note:: Treating StopIteration here is easy.
Add that feature when needed.
"""
@functools.wraps(func)
def wrapper(*args, **kwds):
try:
return func(*args, **kwds)
except AttributeError:
exc_info = sys.exc_info()
reraise(UncaughtAttributeError(exc_info[1]), exc_info[2])
return wrapper
class PushBackIterator(object):
def __init__(self, iterator):
self.pushes = []
self.iterator = iterator
self.current = None
def push_back(self, value):
self.pushes.append(value)
def __iter__(self):
return self
def next(self):
""" Python 2 Compatibility """
return self.__next__()
def __next__(self):
if self.pushes:
self.current = self.pushes.pop()
else:
self.current = next(self.iterator)
return self.current
def indent_block(text, indention=' '):
"""This function indents a text block with a default of four spaces."""
temp = ''
while text and text[-1] == '\n':
temp += text[-1]
text = text[:-1]
lines = text.split('\n')
return '\n'.join(map(lambda s: indention + s, lines)) + temp
@contextlib.contextmanager
def ignored(*exceptions):
"""
Context manager that ignores all of the specified exceptions. This will
be in the standard library starting with Python 3.4.
"""
try:
yield
except exceptions:
pass
def unite(iterable):
"""Turns a two dimensional array into a one dimensional."""
return set(typ for types in iterable for typ in types)
def to_list(func):
def wrapper(*args, **kwargs):
return list(func(*args, **kwargs))
return wrapper
| {
"repo_name": "NixaSoftware/CVis",
"path": "venv/lib/python2.7/site-packages/jedi/common.py",
"copies": "1",
"size": "3397",
"license": "apache-2.0",
"hash": -2103930164891915300,
"line_mean": 29.6036036036,
"line_max": 79,
"alpha_frac": 0.6555784516,
"autogenerated": false,
"ratio": 4.272955974842767,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5428534426442767,
"avg_score": null,
"num_lines": null
} |
""" A universal module with functions / classes without dependencies. """
import sys
import contextlib
import functools
from jedi._compatibility import reraise
def to_list(func):
def wrapper(*args, **kwargs):
return list(func(*args, **kwargs))
return wrapper
def unite(iterable):
"""Turns a two dimensional array into a one dimensional."""
return set(typ for types in iterable for typ in types)
class UncaughtAttributeError(Exception):
"""
Important, because `__getattr__` and `hasattr` catch AttributeErrors
implicitly. This is really evil (mainly because of `__getattr__`).
`hasattr` in Python 2 is even more evil, because it catches ALL exceptions.
Therefore this class originally had to be derived from `BaseException`
instead of `Exception`. But because I removed relevant `hasattr` from
the code base, we can now switch back to `Exception`.
:param base: return values of sys.exc_info().
"""
def safe_property(func):
return property(reraise_uncaught(func))
def reraise_uncaught(func):
"""
Re-throw uncaught `AttributeError`.
Usage: Put ``@rethrow_uncaught`` in front of the function
which does **not** suppose to raise `AttributeError`.
AttributeError is easily get caught by `hasattr` and another
``except AttributeError`` clause. This becomes problem when you use
a lot of "dynamic" attributes (e.g., using ``@property``) because you
can't distinguish if the property does not exist for real or some code
inside of the "dynamic" attribute through that error. In a well
written code, such error should not exist but getting there is very
difficult. This decorator is to help us getting there by changing
`AttributeError` to `UncaughtAttributeError` to avoid unexpected catch.
This helps us noticing bugs earlier and facilitates debugging.
.. note:: Treating StopIteration here is easy.
Add that feature when needed.
"""
@functools.wraps(func)
def wrapper(*args, **kwds):
try:
return func(*args, **kwds)
except AttributeError:
exc_info = sys.exc_info()
reraise(UncaughtAttributeError(exc_info[1]), exc_info[2])
return wrapper
class PushBackIterator(object):
def __init__(self, iterator):
self.pushes = []
self.iterator = iterator
self.current = None
def push_back(self, value):
self.pushes.append(value)
def __iter__(self):
return self
def next(self):
""" Python 2 Compatibility """
return self.__next__()
def __next__(self):
if self.pushes:
self.current = self.pushes.pop()
else:
self.current = next(self.iterator)
return self.current
@contextlib.contextmanager
def ignored(*exceptions):
"""
Context manager that ignores all of the specified exceptions. This will
be in the standard library starting with Python 3.4.
"""
try:
yield
except exceptions:
pass
def indent_block(text, indention=' '):
"""This function indents a text block with a default of four spaces."""
temp = ''
while text and text[-1] == '\n':
temp += text[-1]
text = text[:-1]
lines = text.split('\n')
return '\n'.join(map(lambda s: indention + s, lines)) + temp
| {
"repo_name": "bazitur/brackets-python-tools",
"path": "pythonfiles/jedi/evaluate/utils.py",
"copies": "1",
"size": "3371",
"license": "mit",
"hash": 5764960438006678000,
"line_mean": 29.6454545455,
"line_max": 79,
"alpha_frac": 0.6541085731,
"autogenerated": false,
"ratio": 4.272496831432193,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5426605404532192,
"avg_score": null,
"num_lines": null
} |
# A unix-oriented process dispatcher. Uses a single thread with select and
# waitpid to dispatch tasks. This avoids several deadlocks that are possible
# with fork/exec + threads + Python.
import errno, os, select
from datetime import datetime, timedelta
from results import TestOutput
class Task(object):
def __init__(self, test, pid, stdout, stderr):
self.test = test
self.cmd = test.get_command(test.js_cmd_prefix)
self.pid = pid
self.stdout = stdout
self.stderr = stderr
self.start = datetime.now()
self.out = []
self.err = []
def spawn_test(test, passthrough=False):
"""Spawn one child, return a task struct."""
if not passthrough:
(rout, wout) = os.pipe()
(rerr, werr) = os.pipe()
rv = os.fork()
# Parent.
if rv:
os.close(wout)
os.close(werr)
return Task(test, rv, rout, rerr)
# Child.
os.close(rout)
os.close(rerr)
os.dup2(wout, 1)
os.dup2(werr, 2)
cmd = test.get_command(test.js_cmd_prefix)
os.execvp(cmd[0], cmd)
def total_seconds(td):
"""
Return the total number of seconds contained in the duration as a float
"""
return (float(td.microseconds) \
+ (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6
def get_max_wait(tasks, results, timeout):
"""
Return the maximum time we can wait before any task should time out.
"""
# If we have a progress-meter, we need to wake up to update it frequently.
wait = results.pb.update_granularity()
# If a timeout is supplied, we need to wake up for the first task to
# timeout if that is sooner.
if timeout:
now = datetime.now()
timeout_delta = timedelta(seconds=timeout)
for task in tasks:
remaining = task.start + timeout_delta - now
if remaining < wait:
wait = remaining
# Return the wait time in seconds, clamped to zero.
return max(total_seconds(wait), 0)
def flush_input(fd, frags):
"""
Read any pages sitting in the file descriptor 'fd' into the list 'frags'.
"""
rv = os.read(fd, 4096)
frags.append(rv)
while len(rv) == 4096:
# If read() returns a full buffer, it may indicate there was 1 buffer
# worth of data, or that there is more data to read. Poll the socket
# before we read again to ensure that we will not block indefinitly.
readable, _, _ = select.select([fd], [], [], 0)
if not readable:
return
rv = os.read(fd, 4096)
frags.append(rv)
def read_input(tasks, timeout):
"""
Select on input or errors from the given task list for a max of timeout
seconds.
"""
rlist = []
exlist = []
outmap = {} # Fast access to fragment list given fd.
for t in tasks:
rlist.append(t.stdout)
rlist.append(t.stderr)
outmap[t.stdout] = t.out
outmap[t.stderr] = t.err
# This will trigger with a close event when the child dies, allowing
# us to respond immediately and not leave cores idle.
exlist.append(t.stdout)
readable, _, _ = select.select(rlist, [], exlist, timeout)
for fd in readable:
flush_input(fd, outmap[fd])
def remove_task(tasks, pid):
"""
Return a pair with the removed task and the new, modified tasks list.
"""
index = None
for i, t in enumerate(tasks):
if t.pid == pid:
index = i
break
else:
raise KeyError("No such pid: {}".format(pid))
out = tasks[index]
tasks.pop(index)
return out
def timed_out(task, timeout):
"""
Return True if the given task has been running for longer than |timeout|.
|timeout| may be falsy, indicating an infinite timeout (in which case
timed_out always returns False).
"""
if timeout:
now = datetime.now()
return (now - task.start) > timedelta(seconds=timeout)
return False
def reap_zombies(tasks, results, timeout):
"""
Search for children of this process that have finished. If they are tasks,
then this routine will clean up the child and send a TestOutput to the
results channel. This method returns a new task list that has had the ended
tasks removed.
"""
while True:
try:
pid, status = os.waitpid(0, os.WNOHANG)
if pid == 0:
break
except OSError as e:
if e.errno == errno.ECHILD:
break
raise e
ended = remove_task(tasks, pid)
flush_input(ended.stdout, ended.out)
flush_input(ended.stderr, ended.err)
os.close(ended.stdout)
os.close(ended.stderr)
returncode = os.WEXITSTATUS(status)
if os.WIFSIGNALED(status):
returncode = -os.WTERMSIG(status)
out = TestOutput(
ended.test,
ended.cmd,
''.join(ended.out),
''.join(ended.err),
returncode,
total_seconds(datetime.now() - ended.start),
timed_out(ended, timeout))
results.push(out)
return tasks
def kill_undead(tasks, results, timeout):
"""
Signal all children that are over the given timeout.
"""
for task in tasks:
if timed_out(task, timeout):
os.kill(task.pid, 9)
def run_all_tests(tests, results, options):
# Copy and reverse for fast pop off end.
tests = tests[:]
tests.reverse()
# The set of currently running tests.
tasks = []
while len(tests) or len(tasks):
while len(tests) and len(tasks) < options.worker_count:
tasks.append(spawn_test(tests.pop(), options.passthrough))
timeout = get_max_wait(tasks, results, options.timeout)
read_input(tasks, timeout)
kill_undead(tasks, results, options.timeout)
tasks = reap_zombies(tasks, results, options.timeout)
results.pb.poke()
return True
| {
"repo_name": "kostaspl/SpiderMonkey38",
"path": "js/src/tests/lib/tasks_unix.py",
"copies": "1",
"size": "6038",
"license": "mpl-2.0",
"hash": -5857604089124803000,
"line_mean": 28.7438423645,
"line_max": 80,
"alpha_frac": 0.5944021199,
"autogenerated": false,
"ratio": 3.807061790668348,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9890562091046855,
"avg_score": 0.002180363904298432,
"num_lines": 203
} |
"""A Unstructred Grid file reader object.
"""
# Author: R.Sreekanth <sreekanth [at] aero.iitb.ac.in>
# Suyog Dutt Jain <suyog.jain [at] aero.iitb.ac.in>
# Copyright (c) 2009-2015, Enthought, Inc.
# License: BSD Style.
from os.path import basename
# Enthought library imports.
from traits.api import Instance, Str, Dict
from traitsui.api import View, Group, Item, Include
from tvtk.api import tvtk
# Local imports.
from tvtk.common import is_old_pipeline
from mayavi.core.file_data_source import FileDataSource
from mayavi.core.pipeline_info import PipelineInfo
from mayavi.core.common import error
########################################################################
# `UnstructuredGridReader` class
########################################################################
class UnstructuredGridReader(FileDataSource):
# The version of this class. Used for persistence.
__version__ = 0
# The UnstructuredGridAlgorithm data file reader.
reader = Instance(tvtk.Object, allow_none=False, record=True)
# Information about what this object can produce.
output_info = PipelineInfo(datasets=['unstructured_grid'])
######################################################################
# Private Traits
_reader_dict = Dict(Str, Instance(tvtk.Object))
# Our view.
view = View(Group(Include('time_step_group'),
Item(name='base_file_name'),
Item(name='reader',
style='custom',
resizable=True),
show_labels=False),
resizable=True)
######################################################################
# `object` interface
######################################################################
def __set_pure_state__(self, state):
# The reader has its own file_name which needs to be fixed.
state.reader.file_name = state.file_path.abs_pth
# Now call the parent class to setup everything.
super(UnstructuredGridReader, self).__set_pure_state__(state)
######################################################################
# `FileDataSource` interface
######################################################################
def update(self):
self.reader.update()
if len(self.file_path.get()) == 0:
return
self.render()
def has_output_port(self):
""" Return True as the reader has output port."""
return True
def get_output_object(self):
""" Return the reader output port."""
return self.reader.output_port
######################################################################
# Non-public interface
######################################################################
def _file_path_changed(self, fpath):
value = fpath.get()
if len(value) == 0:
return
# Extract the file extension
splitname = value.strip().split('.')
extension = splitname[-1].lower()
# Select UnstructuredGridreader based on file type
old_reader = self.reader
if extension in self._reader_dict:
self.reader = self._reader_dict[extension]
else:
error('Invalid file extension for file: %s'%value)
return
self.reader.file_name = value.strip()
self.reader.update_information()
if isinstance(self.reader, tvtk.ExodusIIReader):
# Make sure the point fields are read during Update().
for k in range(self.reader.number_of_point_result_arrays ):
arr_name = self.reader.get_point_result_array_name( k )
self.reader.set_point_result_array_status( arr_name, 1 )
self.reader.update()
if old_reader is not None:
old_reader.on_trait_change(self.render, remove=True)
self.reader.on_trait_change(self.render)
old_outputs = self.outputs
if isinstance(self.reader, tvtk.ExodusIIReader):
self.outputs = [self.reader.output.get_block(0).get_block(0)]
else:
self.outputs = [self.reader.output]
if self.outputs == old_outputs:
self.data_changed = True
# Change our name on the tree view
self.name = self._get_name()
def _get_name(self):
""" Returns the name to display on the tree view. Note that
this is not a property getter.
"""
fname = basename(self.file_path.get())
ret = "%s"%fname
if len(self.file_list) > 1:
ret += " (timeseries)"
if '[Hidden]' in self.name:
ret += ' [Hidden]'
return ret
def __reader_dict_default(self):
"""Default value for reader dict."""
if is_old_pipeline():
rd = {'inp':tvtk.AVSucdReader(),
'neu':tvtk.GAMBITReader(),
'exii':tvtk.ExodusReader()
}
else:
rd = {'inp':tvtk.AVSucdReader(),
'neu':tvtk.GAMBITReader(),
'ex2':tvtk.ExodusIIReader()
}
return rd
| {
"repo_name": "dmsurti/mayavi",
"path": "mayavi/sources/unstructured_grid_reader.py",
"copies": "1",
"size": "5172",
"license": "bsd-3-clause",
"hash": 7626250273214116000,
"line_mean": 34.9166666667,
"line_max": 74,
"alpha_frac": 0.5146945089,
"autogenerated": false,
"ratio": 4.3572030328559395,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.537189754175594,
"avg_score": null,
"num_lines": null
} |
"""A Unstructred Grid file reader object.
"""
# Author: R.Sreekanth <sreekanth [at] aero.iitb.ac.in>
# Suyog Dutt Jain <suyog.jain [at] aero.iitb.ac.in>
# Copyright (c) 2009, Enthought, Inc.
# License: BSD Style.
from os.path import basename
# Enthought library imports.
from traits.api import Instance, Str, Dict
from traitsui.api import View, Group, Item, Include
from tvtk.api import tvtk
# Local imports.
from tvtk.common import is_old_pipeline
from mayavi.core.file_data_source import FileDataSource
from mayavi.core.pipeline_info import PipelineInfo
from mayavi.core.common import error
########################################################################
# `UnstructuredGridReader` class
########################################################################
class UnstructuredGridReader(FileDataSource):
# The version of this class. Used for persistence.
__version__ = 0
# The UnstructuredGridAlgorithm data file reader.
reader = Instance(tvtk.Object, allow_none=False, record=True)
# Information about what this object can produce.
output_info = PipelineInfo(datasets=['unstructured_grid'])
######################################################################
# Private Traits
_reader_dict = Dict(Str, Instance(tvtk.Object))
# Our view.
view = View(Group(Include('time_step_group'),
Item(name='base_file_name'),
Item(name='reader',
style='custom',
resizable=True),
show_labels=False),
resizable=True)
######################################################################
# `object` interface
######################################################################
def __set_pure_state__(self, state):
# The reader has its own file_name which needs to be fixed.
state.reader.file_name = state.file_path.abs_pth
# Now call the parent class to setup everything.
super(UnstructuredGridReader, self).__set_pure_state__(state)
######################################################################
# `FileDataSource` interface
######################################################################
def update(self):
self.reader.update()
if len(self.file_path.get()) == 0:
return
self.render()
def has_output_port(self):
""" Return True as the reader has output port."""
return True
def get_output_object(self):
""" Return the reader output port."""
return self.reader.output_port
######################################################################
# Non-public interface
######################################################################
def _file_path_changed(self, fpath):
value = fpath.get()
if len(value) == 0:
return
# Extract the file extension
splitname = value.strip().split('.')
extension = splitname[-1].lower()
# Select UnstructuredGridreader based on file type
old_reader = self.reader
if self._reader_dict.has_key(extension):
self.reader = self._reader_dict[extension]
else:
error('Invalid file extension for file: %s'%value)
return
self.reader.file_name = value.strip()
self.reader.update_information()
if isinstance(self.reader, tvtk.ExodusIIReader):
# Make sure the point fields are read during Update().
for k in xrange(self.reader.number_of_point_result_arrays ):
arr_name = self.reader.get_point_result_array_name( k )
self.reader.set_point_result_array_status( arr_name, 1 )
self.reader.update()
if old_reader is not None:
old_reader.on_trait_change(self.render, remove=True)
self.reader.on_trait_change(self.render)
old_outputs = self.outputs
if isinstance(self.reader, tvtk.ExodusIIReader):
self.outputs = [self.reader.output.get_block(0).get_block(0)]
else:
self.outputs = [self.reader.output]
if self.outputs == old_outputs:
self.data_changed = True
# Change our name on the tree view
self.name = self._get_name()
def _get_name(self):
""" Returns the name to display on the tree view. Note that
this is not a property getter.
"""
fname = basename(self.file_path.get())
ret = "%s"%fname
if len(self.file_list) > 1:
ret += " (timeseries)"
if '[Hidden]' in self.name:
ret += ' [Hidden]'
return ret
def __reader_dict_default(self):
"""Default value for reader dict."""
if is_old_pipeline():
rd = {'inp':tvtk.AVSucdReader(),
'neu':tvtk.GAMBITReader(),
'exii':tvtk.ExodusReader()
}
else:
rd = {'inp':tvtk.AVSucdReader(),
'neu':tvtk.GAMBITReader(),
'ex2':tvtk.ExodusIIReader()
}
return rd
| {
"repo_name": "alexandreleroux/mayavi",
"path": "mayavi/sources/unstructured_grid_reader.py",
"copies": "2",
"size": "5178",
"license": "bsd-3-clause",
"hash": -1598316516054268200,
"line_mean": 34.9583333333,
"line_max": 74,
"alpha_frac": 0.5142912321,
"autogenerated": false,
"ratio": 4.354920100925147,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005145702717786459,
"num_lines": 144
} |
"""Aurifere
Usage:
aurifere [-v] install <package>...
aurifere [-v] update
Options:
-h --help Show this screen.
-v --verbose
"""
from .vendor.docopt import docopt
from .vendor import colorama
from .install import Install
from .repository import default_repository
def comma_separated_package_list(pkgs):
return ', '.join(hl(p.name) for p in pkgs)
def hl(text):
return colorama.Style.BRIGHT + text + colorama.Style.RESET_ALL
def confirm(message):
prompt = '{}? (Y/n)'.format(message)
choice = input(prompt)
if choice in ('no', 'n', 'N'):
return False
else:
return True
def review_package(install, package):
print('Reviewing {} {}. Last reviewed version was {}'.format(
hl(package.name),
hl(package.version()),
hl(package._git.tag_for_ref('reviewed'))
))
if package in install.dependencies:
print('This package is required by {}'
.format(comma_separated_package_list(install.dependencies[package])))
input('About to show diff ...')
package._git._git('diff', 'reviewed', '--color')
if confirm('Validate review for {} '.format(hl(package.name))):
package.validate_review()
else:
# TODO : maybe we can be a little more diplomatic
print("Too bad, I'm gonna crash !")
def review_and_install(installer):
if not installer.to_install:
print('Nothing to do')
return
installer.fetch_all()
pacman_dependencies = installer.pacman_dependencies()
packages_to_review = installer.packages_to_review()
print('Packages to build and install : {}'
.format(comma_separated_package_list(installer.to_install)))
if pacman_dependencies:
print('Packages installed from pacman as dependencies :')
for package, from_ in pacman_dependencies.items():
print('{} from {}'.format(hl(package),
hl(comma_separated_package_list(from_))))
if packages_to_review:
print('Packages to review : {}'
.format(comma_separated_package_list(packages_to_review)))
if not confirm('Do you confirm'):
return
for package in packages_to_review:
review_package(installer, package)
installer.install()
def update():
installer = Install(default_repository())
installer.update_aur()
def main():
arguments = docopt(__doc__)
if arguments['--verbose']:
import logging
logging.basicConfig(level=logging.DEBUG)
installer = Install(default_repository())
if arguments['install']:
installer.add_packages(arguments['<package>'])
if arguments['update']:
installer.update_aur()
review_and_install(installer)
if __name__ == '__main__':
main()
| {
"repo_name": "madjar/aurifere",
"path": "aurifere/cli.py",
"copies": "1",
"size": "2773",
"license": "isc",
"hash": 1272272364031834000,
"line_mean": 24.9158878505,
"line_max": 79,
"alpha_frac": 0.6314460873,
"autogenerated": false,
"ratio": 3.90014064697609,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.503158673427609,
"avg_score": null,
"num_lines": null
} |
'''aurora core -- provides the main classes
This mostly implements the Articles class that does all of the iteration
on the articles. It also implements the individual Article class.
'''
import os
import re
class Articles:
'''Keeps track of all of the individual articles that exist.
'''
def __init__(self, path, formatter):
self.by_slug = {} # Slug -> Article
self.by_recent = [] # [ Art, Art, Art... ] where 0 is oldest, -1 is most recent
self.by_date = {} # Yr -> { Mo -> [ ... ] }
self.on_index = [] # [ Art, Art, Art... ]
if not os.path.isdir(path):
return
for fn in os.listdir(path):
ffn = os.path.join(path, fn)
if not os.path.isfile(ffn):
continue
match = re.match(r'^(\d\d\d\d)-(\d\d)-(\d\d)-(.+)\.md$', fn)
if match is None:
continue
yr, mo, da, slug = match.groups()
if slug in self.by_slug:
continue
yr, mo, da = int(yr), int(mo), int(da)
# Now we have an article, store it.
art = Article(yr, mo, da, slug, ffn, formatter)
if not art.publish:
continue
self.by_slug[slug] = art
self.by_recent.append(art)
if yr not in self.by_date:
self.by_date[yr] = {}
if mo not in self.by_date[yr]:
self.by_date[yr][mo] = []
self.by_date[yr][mo].append(art)
# Do a bunch of sorting.
self.by_recent.sort()
for yr in self.by_date:
for mo in self.by_date[yr]:
self.by_date[yr][mo].sort()
# Construct the index of the most recent N.
self.on_index = self.by_recent[-3:]
self.on_index.reverse()
class Article:
'''A single Article.
'''
def __init__(self, yr, mo, da, slug, ffn, formatter):
self.time = '%d-%d-%d' % (yr, mo, da)
self.year = yr
self.month = mo
self.day = da
self.slug = slug
self.filename = ffn
self.props = {}
self.content = ''
self.raw_content = ''
self.title = 'no title'
self.publish = True
self.date = '%04d-%02d-%02d' % (yr, mo, da)
self.time = '00:00'
self.categories = []
self.subtitle = ''
with open(self.filename, 'r') as f:
header = True
for line in f:
tline = line.strip()
if not header or len(tline) <= 0 or not re.match(r'^.+?:.+?$', line):
self.raw_content += line.rstrip() + '\n'
header = False
else:
k, v = tline.split(':', 1)
self._set_prop(k, v)
self.content = formatter(self.raw_content)
def _set_prop(self, prop, val):
'''Given a property (such as from a post), set that on
ourselves. This is an internal method.
'''
prop = prop.lower().strip()
val = val.strip()
if prop == 'publish':
self.publish = True if val == 'yes' else False
elif prop == 'time':
self.time = val
elif prop == 'categories':
self.categories = [x.strip() for x in val.split(',')]
elif prop == 'subtitle':
self.subtitle = val
elif prop == 'title':
self.title = val
def __lt__(self, other):
'''Internal: for sorting these objects.
'''
if self.date == other.date:
return self.time < other.time
return self.date < other.date
| {
"repo_name": "zorkian/aurora",
"path": "aurora/core.py",
"copies": "1",
"size": "3657",
"license": "bsd-3-clause",
"hash": -2123723838344545800,
"line_mean": 30.2564102564,
"line_max": 88,
"alpha_frac": 0.4883784523,
"autogenerated": false,
"ratio": 3.693939393939394,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4682317846239394,
"avg_score": null,
"num_lines": null
} |
import requests
import json
from pprint import pprint
class Aurora:
def __init__(self, auth_token, address, port=16021):
self.auth_token = auth_token
self.address = address
self.port = port
self.uri_base = "http://%s:%d/api/v1/%s" % (address, port, auth_token)
def _get_json(self, path):
uri = "%s%s" % (self.uri_base, path)
r = requests.get(uri)
if r.ok:
return r.json()
else:
r.raise_for_status()
def _put_json(self, path, data):
uri = "%s%s" % (self.uri_base, path)
print("PUT'ing to URI %s with data %s" % (uri, json.dumps(data)))
r = requests.put(uri, data=json.dumps(data))
if r.ok:
return True
else:
r.raise_for_status()
def get_info(self):
return self._get_json("")
def get_effects(self):
return self._get_json("/effects/effectsList")
def get_state(self):
return self._get_json("/state")
def get_power(self):
return self._get_json("/state/on")["value"]
def get_brightness(self):
return self._get_json("/state/brightness")["value"]
def get_brightness_max(self):
return self._get_json("/state/brightness")['max']
def get_brightness_min(self):
return self._get_json("/state/brightness")['min']
def set_brightness(self, new_brightness):
self._put_json("/state/brightness", {"brightness": {"value": int(new_brightness)}})
return self.get_brightness()
def increment_brightness(self, brightness_increment):
self._put_json("/state/brightness", {"brightness": {"incrememnt": int(brightness_increment)}})
return self.get_brightness()
def get_hue(self):
return self._get_json("/state/hue")["value"]
def get_hue_max(self):
return self._get_json("/state/hue")["max"]
def get_hue_min(self):
return self._get_json("/state/hue")["min"]
def set_hue(self, new_hue):
self._put_json("/state/hue", {"hue": {"value": int(new_hue)}})
return self.get_hue()
def increment_hue(self, hue_increment):
self._put_json("/state/hue", {"hue": {"increment": int(hue_increment)}})
return self.get_hue()
def delete_auth_token(self):
r = requests.delete(self.uri_base)
if r.ok:
return True
else:
r.raise_for_status()
@staticmethod
def get_auth_token(address, port=16021):
uri = "http://%s:%d/api/v1/new" % (address, port)
r = requests.post(uri)
if r.ok:
return r.json()["auth_token"]
else:
r.raise_for_status()
| {
"repo_name": "miriad/nanoleaf-aurora-python",
"path": "aurora.py",
"copies": "1",
"size": "3333",
"license": "apache-2.0",
"hash": 1019160575171354400,
"line_mean": 29.8611111111,
"line_max": 102,
"alpha_frac": 0.600060006,
"autogenerated": false,
"ratio": 3.5419766206163654,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46420366266163654,
"avg_score": null,
"num_lines": null
} |
"""aurpackager URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from aurpackager.settings import URL_PREFIX
def _application_directory():
if URL_PREFIX:
return URL_PREFIX.strip('/') + '/'
else:
return URL_PREFIX
urlpatterns = [
url(r'^{}api/'.format(_application_directory()), include('api.urls', namespace='api')),
url(r'^{}'.format(_application_directory()), include('manager.urls', namespace='manager'))
]
| {
"repo_name": "colajam93/aurpackager",
"path": "aurpackager/urls.py",
"copies": "1",
"size": "1068",
"license": "mit",
"hash": 3811500732494090000,
"line_mean": 33.4516129032,
"line_max": 94,
"alpha_frac": 0.6825842697,
"autogenerated": false,
"ratio": 3.6203389830508477,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48029232527508475,
"avg_score": null,
"num_lines": null
} |
# aus2.py
# July 30, 2016
# python3
# part of https://github.com/ron-rivest/2016-aus-senate-audit
"""
Framework for AUS senate audit by Bayesian audit method.
This codes derives from aus.py, but modified to utilize api.py
for interfacing with dividebatur code.
It has also been modified to use gamma-variates to draw from
Dirichlet posterior probability distribution, rather than using
Polya's urn method, for efficiency reasons.
"""
import collections
import random
# random.seed(1) # make deterministic
import time
import api
from itertools import chain, groupby
class RealElection(api.Election):
"""
Class representing real election.
"""
def __init__(self):
super(RealElection, self).__init__()
self.load_election(contest_name, max_tickets=5000)
def draw_ballots(self, batch_size=100):
"""
add interpretation of random sample of real ballots
to election data structure
"""
api.load_more_ballots(self, "filename-TBD")
def get_outcome(self, new_ballot_weights):
"""
Return result of scf (social choice function) on this election.
"""
### call Bowland's code here
pass
class SimulatedElection(api.Election):
"""
Class representing a simulated election (synthetic data).
"""
def __init__(self, m, n):
super(SimulatedElection, self).__init__()
self.m = m # number of candidates
self.candidates = list(range(1, self.m+1)) # {1, 2, ..., m}
self.candidate_ids = list(range(1, self.m+1)) # {1, 2, ..., m}
self.n = n # number of cast ballots
self.ballots_drawn = 0 # cumulative
self.seats = int(m/2) # seat best 1/2 of candidates
self.electionID = "SimulatedElection " + \
time.asctime(time.localtime())
def draw_ballots(self):
"""
add simulated ballots (sample increment) for testing purposes
These ballots are biased so (1,2,...,m) is likely to be the winner
More precisely, for each ballot candidate i is given a value i+v*U
where U = uniform(0,1). Then candidates are sorted into increasing
order of these values.
Does not let total number of ballots drawn exceed the total
number n of cast votes.
Default batch size otherwise is 100.
"""
m = self.m # number of candidates
v = m/2.0 # noise level (control position variance)
default_batch_size = 100
batch_size = min(default_batch_size,
self.n-self.ballots_drawn)
for _ in range(batch_size):
L = [(idx + v*random.random(), candidate_id)
for idx, candidate_id in enumerate(self.candidate_ids)]
ballot = tuple(candidate_id for (val, candidate_id) in sorted(L))
self.add_ballot(ballot, 1.0)
self.ballots_drawn += batch_size
def get_outcome(self, new_ballot_weights):
"""
Return result of scf (social choice function) on this sample.
Here we use Borda count as a test scf.
Returns tuple listing candidate_ids of winners in canonical (sorted) order,
most preferred to least preferred. The number of winners is equal
to self.seats. Each ballot carries a weight equal to new_ballot_weights[ballot].
"""
counter = collections.Counter()
for ballot in self.ballots:
w = new_ballot_weights[ballot]
for idx, candidate_id in enumerate(ballot):
counter[candidate_id] += w*idx
L = counter.most_common()
L.reverse()
L = [candidate_id for (candidate_id, count) in L]
outcome = tuple(sorted(L[:self.seats]))
return outcome
##############################################################################
# Drawing from Bayesian posterior (aka reweighting or fuzzing)
def get_new_ballot_weights(election, r):
"""
Return dict new_ballot_weights for this election, based
on using gamma variates to draw from Dirichlet distribution over
existing ballots, based on existing ballot weights.
Sum of new ballot weights should be r (approximately).
New weights are rounded down.
"""
new_ballot_weights = {}
for ballot in election.ballots:
old_weight = election.ballot_weights[ballot]
if old_weight > 0:
new_ballot_weights[ballot] = random.gammavariate(old_weight, 1.0)
else:
new_ballot_weights[ballot] = 0.0
total_weight = sum([new_ballot_weights[ballot]
for ballot in election.ballots])
for ballot in election.ballots:
new_ballot_weights[ballot] = int(r * new_ballot_weights[ballot] / total_weight)
return new_ballot_weights
##############################################################################
# Implementation of audit
def audit(election, alpha=0.05, k=4, trials=100):
"""
Bayesian audit of given election
Input:
election # election to audit
alpha # error tolerance
k # amount to increase sample size by
trials # trials per sample
"""
print()
print("Audit of simulated election")
print("ElectionID:", election.electionID)
print("Candidates are:", election.candidates)
print("Number of ballots cast:", election.n)
print("Number of trials per sample:", trials)
print("Number of seats contested for:", election.seats)
seed = int(random.random()*1000000000000)
random.seed(seed)
print("Random number seed:", seed) # for reproducibility or debugging if needed
print()
# cast one "prior" ballot for each candidate, to
# establish Bayesian prior. The prior ballot is a length-one
# partial ballot with just a first-choice vote for that candidate.
for candidate_id in election.candidate_ids:
election.add_ballot((candidate_id,), 1.0)
start_time = time.time()
#dictionary from candidates to a set of ballots that elected them
candidate_ballot_map = {}
#defines low frequency candidates
low_freq = 0.03
candidate_outcomes = None
# overall audit loop
stage_counter = 0
while True:
stage_counter += 1
print("Audit stage number:", stage_counter)
# draw additional ballots and add them to election.ballots
election.draw_ballots()
print(" sample size (including prior ballots) is",
election.total_ballot_weight)
print(" last ballot drawn:")
print(" ", election.ballots[-1])
# run trials in Bayesian manner
# Each outcome is a tuple of candidates who have been elected,
# in a canonical order. (NOT the order in which they were elected, say.)
# We can thus test for equality of outcomes.
print(" doing",
trials,
"Bayesian trials (posterior-based election simulations) in this stage.")
outcomes = []
for _ in range(trials):
new_ballot_weights = get_new_ballot_weights(election, election.n)
outcome = election.get_outcome(new_ballot_weights)
for candidate_id in outcome:
if candidate_id not in candidate_ballot_map.keys():
candidate_ballot_map[candidate_id] = new_ballot_weights
outcomes.append(outcome)
# find most common outcome and its number of occurrences
best, freq = collections.Counter(outcomes).most_common(1)[0]
print(" most common outcome (", election.seats, "seats ):")
print(" ", best)
print(" frequency of most common outcome:", freq, "/", trials)
global candidate_outcomes
candidate_outcomes = collections.Counter(chain(*outcomes))
print(" " + "Fraction present in outcome by candidate: ")
print(" " + ', '.join([str(candidate) + ": " + str(c_freq/trials)
for candidate, c_freq
in sorted(candidate_outcomes.items(),
key=lambda x: (x[1], x[0]))]))
# stop if best occurs almost always (more than 1-alpha of the time)
if freq >= trials*(1.0-alpha):
print()
print("Stopping: audit confirms outcome:")
print(" ", best)
print("Total number of ballots examined:", election.ballots_drawn)
break
if election.ballots_drawn >= election.n:
print("Audit has looked at all ballots. Done.")
break
print()
if candidate_outcomes:
for candidate, c_freq in sorted(candidate_outcomes.items(),
key=lambda x: (x[1], x[0])):
if c_freq/trials < low_freq:
print(" " +
"One set of ballots that elected low frequency candidate: " +
str(candidate) + " which occured in outcome with percent: " +
str(c_freq))
print(" " + str(candidate_ballot_map[candidate]))
print("Elapsed time:", time.time()-start_time, "seconds.")
audit(SimulatedElection(100, 1000000))
| {
"repo_name": "ron-rivest/2016-aus-senate-audit",
"path": "rivest/aus2.py",
"copies": "1",
"size": "9416",
"license": "apache-2.0",
"hash": 8769722515381876000,
"line_mean": 38.3974895397,
"line_max": 92,
"alpha_frac": 0.5867672048,
"autogenerated": false,
"ratio": 4.060370849504096,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0013126959050311624,
"num_lines": 239
} |
"""A Usage vector implementation used in a DNC.
This Usage vector is implemented as defined in the DNC architecture in
DeepMind's Nature paper:
http://www.nature.com/nature/journal/vaop/ncurrent/full/nature20101.html
Author: Austin Derrow-Pinion
"""
import collections
import sonnet as snt
import tensorflow as tf
# Ensure values are greater than epsilon to avoid numerical instability.
_EPSILON = 1e-6
UsageState = collections.namedtuple('UsageState', ('usage_vector'))
class Usage(snt.RNNCore):
"""A Usage vector exposes to what extent each memory location is used.
Every time external memory is written at some location, the usage here
increases to a maximum of 1. Free gates are used to decrease the usage
values to a minimum of 0.
"""
def __init__(self,
memory_size=16,
name='usage'):
"""Initialize a Usage vector used in a DNC.
Args:
memory_size: The number of memory slots in the external memory.
Written as `N` in the DNC paper. Default value is 16.
name: The name of the module (default 'usage').
"""
super(Usage, self).__init__(name=name)
self._memory_size = memory_size
self._state_size = UsageState(
usage_vector=tf.TensorShape([self._memory_size]))
def _build(self,
prev_write_weightings,
prev_read_weightings,
free_gates,
prev_state):
"""Compute one timestep of computation for the Usage vector.
This updates the usage vector in the Usage state to the next timestep
iteration.
Args:
prev_write_weightings: A Tensor of shape
`[batch_size, memory_size]` containing the weights to write
with. Represented as `w_{t-1}^w` in the DNC paper for time'
`t-1`. If `w_{t-1}^w[i]` is 0 then nothing is written to memory
regardless of the other parameters. Therefore it can be used to
protect the external memory from unwanted modifications.
prev_read_weightings: A Tensor of shape
`[batch_size, num_reads, memory_size]` containing the previous
read weights. This is written in the DNC paper as
`w_{t-1}^{r,i}` for time `t-1` for read head `i`.
free_gates: A Tensor of shape `[batch_size, num_reads]` containing
a free gate value bounded in `[0, 1]` for each read head and
emitted from the controller. The DNC paper writes the free
gates as `f_t^i` for time `t` and read head `i`.
prev_state: An instance of `TemporalLinkageState` containing the
previous state of this Temporal Linkage.
Returns:
A tuple `(output, next_state)`. Where `output` is a Tensor of shape
`[batch_size, memory_size]` containing allocation weighting vector
for each batch. The `next_state` is an instance of `UsageState`
containing this timestep's updated usage vector.
"""
memory_retention_vector = self.memory_retention_vector(
prev_read_weightings, free_gates)
updated_usage_vector = self.updated_usage_vector(
prev_state.usage_vector, prev_write_weightings,
memory_retention_vector)
allocation_weighting = self.allocation_weighting(updated_usage_vector)
return (allocation_weighting,
UsageState(usage_vector=updated_usage_vector))
def updated_usage_vector(self,
prev_usage_vector,
prev_write_weightings,
memory_retention_vector):
"""Compute the updated usage vector for this timestep.
The usage vector is written in the DNC paper as `u_t` for time `t`. It
can be defined as:
u_t = (u_{t-1} + w_{t-1}^w - u_{t-1} * w_{t-1}^w) * phi_t
where `w_{t-1^w` are the write weightings at the previous timestep and
`phi_t` is the memory retention vector for this timestep.
Args:
prev_usage_vector: A Tensor of shape `[batch_size, memory_size]`
containing the usage vector values from the previous timestep.
Written in the DNC paper as `u_{t-1}` for time `t-1`.
prev_write_weightings: A Tensor of shape
`[batch_size, memory_size]` containing the weights to write
with. Represented as `w_{t-1}^w` in the DNC paper for time'
`t-1`. If `w_{t-1}^w[i]` is 0 then nothing is written to memory
regardless of the other parameters. Therefore it can be used to
protect the external memory from unwanted modifications.
memory_retention_vector: A Tensor of shape
`[batch_size, memory_size]` containing the values of the memory
retention vector for this timestep. Written in the DNC paper as
`phi_t` for time `t`.
Returns:
A Tensor of shape `[batch_size, memory_size]` containing the
updated usage vector values for this timestep.
"""
return (prev_usage_vector +
prev_write_weightings -
prev_usage_vector *
prev_write_weightings) * memory_retention_vector
def memory_retention_vector(self, prev_read_weightings, free_gates):
"""Compute the memory retention vector for this timestep.
The memory retention vector in the DNC paper is written as `psi_t` for
time `t`. The values represent how much each location in external
memory will not be freed by the free gates.
The memory retention vector is defined as:
psi_t = PRODUCT_{i=1}^R (1 - f_t^i * w_{t-1}^{r,i})
Args:
prev_read_weightings: A Tensor of shape
`[batch_size, num_reads, memory_size]` containing the previous
read weights. This is written in the DNC paper as
`w_{t-1}^{r,i}` for time `t-1` for read head `i`.
free_gates: A Tensor of shape `[batch_size, num_reads]` containing
a free gate value bounded in `[0, 1]` for each read head and
emitted from the controller. The DNC paper writes the free
gates as `f_t^i` for time `t` and read head `i`.
Returns:
A Tensor of shape `[batch_size, memory_size]` containing the values
of the memory retention vector for this timestep.
"""
# Tensor of shape `[batch_size, num_reads, 1]`
free_gates_expanded = tf.expand_dims(free_gates, 2)
# Tensor of shape `[batch_size, num_reads, memory_size]
free_gates_weighted = free_gates_expanded * prev_read_weightings
return tf.reduce_prod(1 - free_gates_weighted, axis=1)
def allocation_weighting(self, usage_vector):
"""Compute the allocation weighting vector providing write locations.
The allocation weighting vector is written in the DNC paper as `a_t`
for time `t`. Since the vector is a weighting, the values sum to less
than or equal to 1. If all usages are 1, that means each location in
memory is very important and so the allocation weighting will be 0
meaning no new information can be written to the external memory.
Let `phi_t` be the sorted indices vector for the usage vector. The
allocation weighting vector is computed as:
a_t[phi_t[j]] = (1 - u_t[phi_t[j]])
* MULTIPLY_{i=1}^{j-1}(u_t[phi_t[i]])
Args:
usage_vector: A Tensor of shape `[batch_size, memory_size]`
containing the usage vector values from this timestep. Written
in the DNC paper as `u_t` for time `t`.
Returns:
A Tensor of shape `[batch_size, memory_size]` containing the values
for the allocation vector for each batch of input.
"""
# avoid NaN from tf.cumprod
usage_vector = _EPSILON + (1 - _EPSILON) * usage_vector
sorted_usage, indices = self.sorted_indices(usage_vector)
non_usage = 1 - sorted_usage
usage_cumprod = tf.cumprod(sorted_usage, axis=1, exclusive=True)
sorted_allocation = tf.expand_dims(non_usage * usage_cumprod, 2)
# reorder sorted_allocation back to original order in usage_vector
flattened_allocation = tf.reshape(sorted_allocation, [-1])
batch_size = tf.shape(usage_vector)[0]
index_offset = tf.tile(
tf.expand_dims(self._memory_size * tf.range(0, batch_size), 1),
[1, self._memory_size])
flattened_index_offset = tf.reshape(index_offset, [-1])
flattened_indices = tf.reshape(indices, [-1]) + flattened_index_offset
ordered_allocation = tf.gather(flattened_allocation, flattened_indices)
return tf.reshape(ordered_allocation, [-1, self._memory_size])
def sorted_indices(self, usage_vector):
"""Construct a list of sorted indices from the usage vector.
The sort is in ascending order. Sorting is a non-differentiable
function so these discontinuities must be ignored in the DNC function
to calculate training gradients. The DNC paper seems to claim this
is not relevant to learning and won't effect the outcome.
The paper writes this sorted free list as `phi_t` for time `t`. Since
it is in ascending order, `phi_t[1]` is the index of the least used
location in the DNC external memory at time `t`.
Args:
usage_vector: A Tensor of shape `[batch_size, memory_size]`
containing the usage vector values from this timestep. Written
in the DNC paper as `u_t` for time `t`.
Returns:
A tuple `(values, indices)` where both are a Tensor of shape
`[batch_size, memory_size]`. The `values` contain the usage vector
values sorted in ascending order. The `indices` contain the indices
for the sorted usage vector for every batch in ascending order.
"""
values, descending_indices = tf.nn.top_k(usage_vector,
k=self._memory_size)
return (tf.reverse(values, [-1]), tf.reverse(descending_indices, [-1]))
@property
def state_size(self):
"""Return a description of the state size."""
return self._state_size
| {
"repo_name": "derrowap/DNC-TensorFlow",
"path": "src/dnc/usage.py",
"copies": "1",
"size": "10813",
"license": "mit",
"hash": 579693369395257600,
"line_mean": 46.2723214286,
"line_max": 79,
"alpha_frac": 0.5988162397,
"autogenerated": false,
"ratio": 4.217238689547582,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 224
} |
"""A USB vehicle interface data source."""
import logging
import usb.core
import usb.util
from .base import BytestreamDataSource, DataSourceError
LOG = logging.getLogger(__name__)
class UsbDataSource(BytestreamDataSource):
"""A source to receive data from an OpenXC vehicle interface via USB."""
DEFAULT_VENDOR_ID = 0x1bc4
DEFAULT_PRODUCT_ID = 0x0001
DEFAULT_READ_REQUEST_SIZE = 512
# If we don't get DEFAULT_READ_REQUEST_SIZE bytes within this number of
# milliseconds, bail early and return whatever we have - could be zero,
# could be just less than 512. If data is really pumpin' we can get better
# throughput if the READ_REQUEST_SIZE is higher, but this delay has to be
# low enough that a single request isn't held back too long.
DEFAULT_READ_TIMEOUT = 200
LIBUSB0_TIMEOUT_CODE = -116
LIBUSB1_TIMEOUT_CODE = -7
OPENUSB_TIMEOUT_CODE = -62
DEFAULT_INTERFACE_NUMBER = 0
VEHICLE_DATA_IN_ENDPOINT = 2
LOG_IN_ENDPOINT = 11
def __init__(self, vendor_id=None, product_id=None, **kwargs):
"""Initialize a connection to the USB device's IN endpoint.
Kwargs:
vendor_id (str or int) - optionally override the USB device vendor
ID we will attempt to connect to, if not using the OpenXC
hardware.
product_id (str or int) - optionally override the USB device product
ID we will attempt to connect to, if not using the OpenXC
hardware.
log_mode - optionally record or print logs from the USB device, which
are on a separate channel.
Raises:
DataSourceError if the USB device with the given vendor ID is not
connected.
"""
super(UsbDataSource, self).__init__(**kwargs)
if vendor_id is not None and not isinstance(vendor_id, int):
vendor_id = int(vendor_id, 0)
self.vendor_id = vendor_id or self.DEFAULT_VENDOR_ID
if product_id is not None and not isinstance(product_id, int):
product_id = int(product_id, 0)
self.product_id = product_id or self.DEFAULT_PRODUCT_ID
devices = usb.core.find(find_all=True, idVendor=self.vendor_id,
idProduct=self.product_id)
for device in devices:
self.device = device
try:
self.device.set_configuration()
except usb.core.USBError as e:
LOG.warn("Skipping USB device: %s", e)
else:
return
raise DataSourceError("No USB vehicle interface detected - is one plugged in?")
def read(self, timeout=None):
return self._read(self.VEHICLE_DATA_IN_ENDPOINT, timeout)
def read_logs(self, timeout=None):
return self._read(self.LOG_IN_ENDPOINT, timeout, 64)
def stop(self):
super(UsbDataSource, self).stop()
usb.util.dispose_resources(self.device)
def _read(self, endpoint_address, timeout=None,
read_size=DEFAULT_READ_REQUEST_SIZE):
timeout = timeout or self.DEFAULT_READ_TIMEOUT
try:
raw_binary = self.device.read(0x80 + endpoint_address,read_size, self.DEFAULT_INTERFACE_NUMBER, timeout)
return str(raw_binary, 'utf-8', 'ignore') # Formerly - Causes byte tranlation str(temp, 'ISO-8859-1')
except (usb.core.USBError, AttributeError) as e:
if e.backend_error_code in [self.LIBUSB0_TIMEOUT_CODE, self.LIBUSB1_TIMEOUT_CODE, self.OPENUSB_TIMEOUT_CODE]:
# Timeout, it may just not be sending
return ""
raise DataSourceError("USB device couldn't be read", e)
| {
"repo_name": "openxc/openxc-python",
"path": "openxc/sources/usb.py",
"copies": "1",
"size": "3690",
"license": "bsd-3-clause",
"hash": -6111960586269614000,
"line_mean": 38.2553191489,
"line_max": 121,
"alpha_frac": 0.6344173442,
"autogenerated": false,
"ratio": 3.9891891891891893,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5123606533389189,
"avg_score": null,
"num_lines": null
} |
#A useful class for dealing with 2-D arrays
import array
class Array2D:
def __init__(self, w, h, element_width):
self.width = w
self.height = h
self.element_width = element_width
def null_array(self):
self.data = array.array('B', (0,)*(self.width*self.height*self.element_width))
def get_pixel(self, x, y):
index = self.element_width * (x + y*self.width)
return self.data[index:index+self.element_width]
def set_pixel(self, x, y, *data):
index = self.element_width * (x + y*self.width)
for j in range(self.element_width):
self.data[index+j] = data[j]
def iter_pixels(self):
for index in range(0, self.width*self.height*self.element_width, self.element_width):
o = self.data[index:index+self.element_width]
yield o[0]
def get_area(self, x, y, dx, dy):
#Width in bytes of data
data_width = dx*self.element_width
#New array
out = array.array('B', (0,)*(dy*data_width))
#Image array index
i0 = self.element_width * (x + y*self.width)
#New array index
j0 = 0
for y0 in range(y, y+dy):
out[j0:j0+data_width] = self.data[i0:i0+data_width]
i0 += self.width * self.element_width
j0 += data_width
return out
def contiguous(self, x, y, scale):
#Check if a given square, starting at x,y and ending at x+scale,y+scale is all the same colour
area = self.get_area(x,y,scale,scale)
last = area[:self.element_width]
for i in range(self.element_width, len(area), self.element_width):
j = area[i:i+self.element_width]
if j != last:
return False
last = j
return last
def __eq__(self, other):
return self.width == other.width and self.height == other.height and self.element_width == other.element_width and self.data == other.data
| {
"repo_name": "barneygale/mcocr",
"path": "mcocr/array2d.py",
"copies": "1",
"size": "2038",
"license": "bsd-3-clause",
"hash": 4473067598729724400,
"line_mean": 32.9666666667,
"line_max": 147,
"alpha_frac": 0.5628066732,
"autogenerated": false,
"ratio": 3.48972602739726,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45525327005972605,
"avg_score": null,
"num_lines": null
} |
# a/user/briefing.py
import json
import os
import records
from .mission import Mission
from f1.s.core.persistent import Persistent
# from f1.a.user.session import Session
class Briefing(Persistent):
me = None
def __init__(self, account, device_id, boot_config):
super().__init__(boot_config)
self.account = account
self.device_id = device_id
self.briefing = None
print('briefing.device_id', device_id)
# from db (Persistent)
if not self.briefing:
data = {'device_id': self.device_id}
result = self.select_from_database(data, object_class='AUserBriefing')
assert len(result) == 1, 'found {} briefing objects'.format(len(result))
# print('briefing.result:')
# print(type(result), result)
# self.briefing = json.load(result[0])
self.briefing = result[0]
assert type(self.briefing) is dict, 'type(self.briefing) is dict'
# assert type(self.briefing) == dict, 'type(self.briefing) == dict'
print('briefing.briefing:')
print(type(self.briefing), self.briefing['mission']['id'])
# print('briefing.super().__init__(briefing)')
try:
super().__init__(briefing=self.briefing)
except Exception as exc:
print(exc)
self.mission_id = self.briefing['mission']['id']
self.details = self.briefing['details']
self.mission = Mission(self.mission_id)
@classmethod
def get_me(cls, account, device_id, boot_config):
if cls.me is None:
cls.me = Briefing(account, device_id, boot_config)
return cls.me
| {
"repo_name": "filemakergarage/zeroclient",
"path": "f1/a/user/briefing.py",
"copies": "1",
"size": "1715",
"license": "mit",
"hash": -3683757618219622000,
"line_mean": 30.1818181818,
"line_max": 84,
"alpha_frac": 0.587755102,
"autogenerated": false,
"ratio": 3.712121212121212,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9797711505152717,
"avg_score": 0.0004329617936988596,
"num_lines": 55
} |
# a user-created video game database that allows the user to add and remove games from the database, as well as search
# their database by a variety of parameters
#
# by Matthew Ridderikhoff, created 06/08/2016
import sqlite3
class Database:
def __init__(self, dbName, tableName, invalidResponse):
self.dbName = dbName + ".db" # name of the database
self.tableName = tableName # name of the table where the data is stored
self.invalidResponse = invalidResponse # error message when response is not one of the options
# create connection to database
self.conn = sqlite3.connect(self.dbName)
# create cursor to execute SQLite commands
self.c = self.conn.cursor()
# create new table (if it already doesn't exist) with rows: id, name, console, and ESRB rating
self.c.execute('CREATE TABLE IF NOT EXISTS ' +
self.tableName + '(id INTEGER, name TEXT, console TEXT, esrbRating TEXT)')
# main menu:
# states user options and gets selection from user
# - if the user makes a valid selection, send the user to that menu/exits program
# - if the user makes an invalid selection, repeat the main menu and get another input
def mainMenu(self):
print('Choose one of the following:')
print('1 - add/remove game(s)')
print('2 - search your game database')
print('3 - exit myGameDatabase')
print('')
response = self.getSelection() # get user selection
if response == '1':
self.addOrRemoveMenu() # sends user to add/remove menu
elif response == '2':
self.searchMenu() # sends user to search menu
elif response == '3':
print('Goodbye') # exits myGameDatabase
self.conn.commit()
self.conn.close()
else:
print(self.invalidResponse) # restart mainMenu() because of the invalid response
self.mainMenu()
# add/remove menu:
# presents selections, either:
# - add a game from the database
# - remove a game from the database
# - return to the main menu
def addOrRemoveMenu(self):
print('Would you like to:')
print('1 - add a game to your collection')
print('2 - remove a game from your collection')
print('3 - return to the main menu')
print('')
response = self.getSelection()
if response == '1':
self.addGame()
elif response == '2':
self.removeGame() # send user to removeGame
elif response == '3':
self.mainMenu() # send user to main menu
else:
print(self.invalidResponse) # restart addOrRemove() because of an invalid response
self.addOrRemoveMenu()
# creates a new game in the database, unless the name of the game the user is trying to add is already in the database
def addGame(self):
print("What is the name of the game you'd like to add?")
nameResponse = input()
duplicate = self.isGameInDatabase(nameResponse) # check if the name given is already listed in the database
# perform action based on whether or not a duplicate was entered
if duplicate:
print('That game is already in your database')
self.addOrRemoveMenu()
else:
print('What console is ' + nameResponse + ' on?')
consoleResponse = self.pickConsole(nameResponse)
print("What is the ESRB rating of " + nameResponse + "?")
esrbRatingResponse = self.pickESRB(nameResponse)
gameSQLCOde = self.addGameSQLCode(nameResponse, consoleResponse,
esrbRatingResponse) # compile responses into SQLite
self.c.execute(gameSQLCOde) # add game to database
print(nameResponse + " has been added to your database")
self.addOrRemoveMenu()
# helper function to ensure the user only inputs supported consoles
def pickConsole(self, nameResponse):
print('1 - Xbox One, 2 - Playstation 4, 3 - WiiU')
invalidResponse = True
while invalidResponse:
response = input()
if response == '1':
invalidResponse = False
return 'XONE'
elif response == '2':
invalidResponse = False
return 'PS4'
elif response == '3':
invalidResponse = False
return 'WiiU'
# will only execute if the user inputs an invalid response, asks the question again
print(self.invalidResponse)
print('What console is ' + nameResponse + ' on?')
print('1 - Xbox One, 2 - Playstation 4, 3 - WiiU')
# helper function to ensure the user only inputs supported ESRB ratings
def pickESRB(self, nameResponse):
print('1 - E10+ for Everyone 10 and up, 2 - T for Teen, 3 - M for Mature')
invalidResponse = True
while invalidResponse:
response = input()
if response == '1':
invalidResponse = False
return 'E10+'
elif response == '2':
invalidResponse = False
return 'T'
elif response == '3':
invalidResponse = False
return 'M'
print(self.invalidResponse)
print("What is the ESRB rating of " + nameResponse + "?")
print('1 - E10+ for Everyone 10 and up, 2 - T for Teen, 3 - M for Mature')
# helper function to create the SQLite code string we will pass to add a game
def addGameSQLCode(self, name, console, esrbRating):
return "INSERT INTO " + self.tableName + " VALUES(" + str(
self.getLowestAvailableId()) + ", '" + name + "', '" + console + "', '" + esrbRating + "')"
# helper function that returns the lowest available Id
# NOTE: it does NOT account for Ids that were once assigned, but who's game has been removed
def getLowestAvailableId(self):
self.c.execute("SELECT id FROM " + self.tableName) # get all used Ids
ids = self.c.fetchall()
num = 0
for n in ids:
intValue = n[0] # retrieve the single int from the tuple (should never have more than 1 item)
if intValue > num:
num = intValue
return num + 1
# helper function that checks to see if a given name is in the database
def isGameInDatabase(self, gameName):
inDB = False
self.c.execute('SELECT name FROM ' + self.tableName)
allGameNames = self.c.fetchall()
for name in allGameNames:
nameString = name[0] # retrieve the single string from the tuple (should never have more than 1 item)
if nameString == gameName:
inDB = True
return inDB
# removes a game from the database, if that game is in the database
def removeGame(self):
print("What game would you like to remove?")
nameResponse = input()
# checks to see if the name is actually in the database
canRemove = self.isGameInDatabase(nameResponse)
if canRemove:
self.c.execute('DELETE FROM ' + self.tableName + ' WHERE name = "' + nameResponse + '"')
print(nameResponse + " has been successfully removed")
self.addOrRemoveMenu()
else:
print("That game is not in your database")
self.addOrRemoveMenu()
# search menu:
# allows the user to search the database by any row
# - except for the id row, which not be seen by the user todo user can see it currently
def searchMenu(self):
print('How would you like to search?')
print('1 - by name')
print('2 - by console')
print('3 - by ESRB rating')
print('4 - return to main menu')
print('')
response = self.getSelection()
if response == '1':
self.searchByName()
elif response == '2':
self.searchByConsole()
elif response == '3':
self.searchByESRB()
elif response == '4':
self.mainMenu() # return to main menu
else:
print(self.invalidResponse) # ask again
self.searchMenu()
# performs search by name, letting the user search for one game, a series, or see their database organized by name
def searchByName(self):
print('Do you want to search for:')
print('1 - a single game')
print('2 - a series of games')
print('3 - all your games organized alphabetically')
response = self.getSelection()
if response == '1':
print('What is the name of the game?')
name = input()
validName = self.isGameInDatabase(name) # check if game is in the database
if validName:
self.c.execute('SELECT * FROM ' + self.tableName + ' WHERE name = "' + name + '"')
self.printSearchResults(self.c.fetchall())
else:
print("That game is not in your database")
elif response == '2':
print("What is the name of the series?")
name = input()
self.c.execute('SELECT * FROM ' + self.tableName + ' WHERE name like "' + name + '%"')
self.printSearchResults(self.c.fetchall())
elif response == '3':
self.c.execute('SELECT * FROM ' + self.tableName + ' ORDER BY name ASC')
self.printSearchResults(self.c.fetchall())
else:
print(self.invalidResponse)
self.searchByName()
self.mainMenu()
# performs search by console (PS4, XONE, WiiU)
def searchByConsole(self):
print('Do you want to see games on the console')
print('1 - Playstation 4')
print('2 - Xbox One')
print('3 - WiiU')
response = self.getSelection()
if response == '1':
self.c.execute(self.createSearchSQL('console', 'PS4'))
self.printSearchResults(self.c.fetchall())
elif response == '2':
self.c.execute(self.createSearchSQL('console', 'XONE'))
self.printSearchResults(self.c.fetchall())
elif response == '3':
self.c.execute(self.createSearchSQL('console', 'WiiU'))
self.printSearchResults(self.c.fetchall())
else:
print(self.invalidResponse)
self.searchByConsole()
self.mainMenu()
# performs search by ESRB rating (E10+, T, M)
def searchByESRB(self):
print('Do you want to see games with an ESRB rating of:')
print('1 - E10+')
print('2 - T')
print('3 - M')
response = self.getSelection()
if response == '1':
self.c.execute(self.createSearchSQL('esrbRating', 'E10+'))
self.printSearchResults(self.c.fetchall())
elif response == '2':
self.c.execute(self.createSearchSQL('esrbRating', 'T'))
self.printSearchResults(self.c.fetchall())
elif response == '3':
self.c.execute(self.createSearchSQL('esrbRating', 'M'))
self.printSearchResults(self.c.fetchall())
else:
print(self.invalidResponse)
self.searchByESRB()
self.mainMenu()
# helper function that compiles the search parameters into SQLite code
def createSearchSQL(self, category, searchParameter):
return 'SELECT * FROM ' + self.tableName + ' WHERE "' + category + '" = "' + searchParameter + '"'
# helper function that gets input from user and returns it
def getSelection(self):
print('Your Selection:')
response = input()
return response
# helper function that displays all search results in a user-friendly way
def printSearchResults(self, results):
print('')
for game in results:
name = game[1] # index 1 will always have the name (index 0 has the id)
console = game[2] # index 2 will always be the game's console
esrb = game[3] # index 3 will always be the game's ESRB rating
print(name + ' : ' + console + ' : ' + esrb)
print('')
# creates a new database with the given parameters, and sends the user to the main menu, which runs the application
db = Database("myGameDatabase", "tableName", "Invalid Response")
db.mainMenu()
| {
"repo_name": "MattRidderikhoff/My-Game-Database",
"path": "Database.py",
"copies": "1",
"size": "12860",
"license": "mit",
"hash": -2696803214163047000,
"line_mean": 35.7126099707,
"line_max": 122,
"alpha_frac": 0.5728615863,
"autogenerated": false,
"ratio": 4.404109589041096,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5476971175341095,
"avg_score": null,
"num_lines": null
} |
"""A user-friendly Bash module for Python.
Built upon Alex Couper's `bash` package. (https://github.com/alexcouper/bash)
"""
# TODO-SDH It may be helpful to use psutil for pybasher.
# TODO-SDH Finish pybasher ASAP
# TODO-SDH Look at some examples for pybasher.
# TODO-SDH Update the README for pybasher
import platform
import sys
from OrthoEvol.utilities import FullUtilities
# Set up run cmd function
runcmd = FullUtilities().system_cmd
SUBPROCESS_HAS_TIMEOUT = True
if "windows" in platform.system().lower():
raise ImportError("PyBasher is currently only supported on linux and osx.")
else:
from subprocess import PIPE, Popen
if sys.version_info < (3, 0):
try:
from subprocess32 import PIPE, Popen
except ImportError:
# You haven't got subprocess32 installed. If you're running 2.X this
# will mean you don't have access to things like timeout
SUBPROCESS_HAS_TIMEOUT = False
#import os
#import configparser
# TODO-SDH use a config file to load/use a list or group of common commands.
class BaseBash(object):
"""Utilize bash commands within python."""
def __init__(self):
"""Initialize the call as well as standard error and output."""
# TODO-SDH Test if this is working.
self.process = None
self.stdout = None
def _bash(self, cmd, env=None, stdout=PIPE, stderr=PIPE, timeout=None, _sync=True):
"""Use subprocess to run bash commands.
:param cmd: The bash command to be run.
:param env: (Default value = None)
:param stdout: (Default value = PIPE)
:param stderr: (Default value = PIPE)
:param timeout: (Default value = None)
:param _sync: (Default value = True)
"""
self.process = Popen(cmd, shell=True, stdout=stdout, stdin=PIPE,
stderr=stderr, env=env)
if _sync:
self._sync(timeout)
return self
def _sync(self, timeout=None):
"""Ensure function is run.
:param timeout: (Default value = None)
"""
kwargs = {'input': self.stdout}
if timeout:
kwargs['timeout'] = timeout
if not SUBPROCESS_HAS_TIMEOUT:
raise ValueError(
"Timeout given but subprocess doesn't support it. "
"Install subprocess32 and try again."
)
self.stdout, self.stderr = self.process.communicate(**kwargs)
self.code = self.process.returncode
return self
def __repr__(self):
return self._value()
def __unicode__(self):
return self._value()
def __str__(self):
return self._value()
def __nonzero__(self):
return self.__bool__()
def __bool__(self):
return bool(self.value())
def _value(self):
if self.stdout:
return self.stdout.strip().decode(encoding='UTF-8')
return ''
class PyBasher(BaseBash):
"""Common bash commands."""
def __init__(self):
super().__init__()
def cp(self):
"""Copy file."""
cmd = ''
self._bash(cmd)
| {
"repo_name": "datasnakes/Datasnakes-Scripts",
"path": "OrthoEvol/Tools/pybasher/bash.py",
"copies": "1",
"size": "3165",
"license": "mit",
"hash": -4535596205700652000,
"line_mean": 27.5135135135,
"line_max": 87,
"alpha_frac": 0.595892575,
"autogenerated": false,
"ratio": 4.0681233933161955,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0014615986838209059,
"num_lines": 111
} |
"""A user interface for interacting with the network."""
from tkinter import Tk, Frame, Button, Label, Entry, Menu, messagebox, Toplevel
from tomography import Tomography
from Node import Node, Server, EndUser
from Connection import Connection
from Link import Link
class Display(Frame):
"""Provide the user with a visualization of the network and a way to interact with it."""
def __init__(self, master):
Frame.__init__(self, master)
self.master = master
self.master.title("Tomography")
self.master.protocol("WM_DELETE_WINDOW", self.shutdown)
self.tomography = Tomography()
self.menu_creation()
def menu_creation(self):
"""
Helper method to define the menu bar
"""
self.menu_bar = Menu(self.master)
self.file_menu = Menu(self.menu_bar, tearoff=0)
self.edit_menu = Menu(self.menu_bar, tearoff=0)
self.help_menu = Menu(self.menu_bar, tearoff=0)
self.menu_bar.add_cascade(label="File", menu=self.file_menu)
self.menu_bar.add_cascade(label="Edit", menu=self.edit_menu)
self.menu_bar.add_cascade(label="Help", menu=self.help_menu)
self.edit_menu_create()
self.file_menu_create()
self.help_menu_create()
self.master.config(menu=self.menu_bar)
def edit_menu_create(self):
self.edit_menu.add_command(label="Node", command=self.node_popup)
self.edit_menu.add_command(label="Link", command=self.link_popup)
self.edit_menu.add_command(label="Connection", command=self.connection_popup)
self.edit_menu.add_separator
#self.edit_menu.add_command(label="Remove First", command=self.remove_first)
#self.edit_menu.add_command(label="Remove Last", command=self.remove_last)
self.edit_menu.add_command(label="Remove All", command=self.master_blaster)
def master_blaster(self):
"""Deletes all objects on the graph."""
#pass an empty list
self.tomography.connections = []
self.tomography.nodes = []
def help_menu_create(self):
self.help_menu.add_command(label="View Help", command=self.help_message_box)
def help_message_box(self):
#pause the animation
self.help_message_str = "Sam Doud needs to write this up"
#launch the message box
messagebox.showinfo("Help", self.help_message_str)
def file_menu_create(self):
#self.file_menu.add_command(label="Save", command=self.save)
#self.file_menu.add_command(label="Open", command=self.open)
self.file_menu.add_command(label="Exit", command=self.master.quit)
#self.file_menu.add_separator()
#self.file_menu.add_command(label="Save as GIF", command=self.save_gif_handler)
#self.file_menu.add_command(label="Save as Video", command=self.save_video_handler)
def node_popup(self):
self.width = 5
self.top = Toplevel(self.master)
self.master.wait_window(self.top)
self.type_label = Label(self.top, text="Select a node type")
self.type_entry = Entry(self.top, width=self.width, bd=self.bd)
self.create_node_submit = Button(self.top, text="Create Node", command=self.node_cleanup)
self.top.bind("<Return>", self.node_cleanup)
def node_cleanup(self):
if self.type_entry.get():
type_of_node = self.type_entry.get()
if type_of_node.lower is 'server':
new_node = Server()
if type_of_node.lower is 'enduser':
new_node = EndUser()
if new_node:
self.add_node(new_node)
self.top.destroy()
def link_popup(self):
self.width = 5
self.default_lag = 1
self.default_buffer = 1
self.top = Toplevel(self.master)
self.master.wait_window(self.top)
self.connection_label = Label(self.top, text="Select a Connection to add a link to")
self.connection_entry = Entry(self.top, width=self.width, bd=self.bd)
self.lag_label = Label(self.top, text="Select a lag time")
self.lag_entry = Entry(self.top, width=self.width, bd=self.bd)
self.buffer_label = Label(self.top, text="Select a buffer size")
self.buffer_entry = Entry(self.top, width=self.width, bd=self.bd)
self.create_node_submit = Button(self.top, text="Create link", command=self.link_cleanup)
self.top.bind("<Return>", self.node_cleanup)
def link_cleanup(self):
if self.connection_entry.get():
#get the connection. dummy code for now
c = Connection(1, 2)
link = Link(c.start_node, c.end_node,
self.lag_entry.get()
if self.lag_entry.get()
else self.default_lag,
self.buffer_entry.get()
if self.buffer_entry.get()
else self.default_buffer)
self.tomography.connections[self.tomography.connections.index(c)].add_link(link=link)
self.top.destroy()
def connection_popup(self):
self.width = 5
self.top = Toplevel(self.master)
self.master.wait_window(self.top)
if len(self.tomography.nodes) < 2:
#message that a connection cannot be made
self.top.destroy()
self.start_label = Label(self.top, text="Select a start node")
self.start_entry = Entry(self.top, width=self.width, bd=self.bd)
self.end_label = Label(self.top, text="Select a end node")
self.end_entry = Entry(self.top, width=self.width, bd=self.bd)
self.create_node_submit = Button(self.top, text="Create Node", command=self.node_cleanup)
self.top.bind("<Return>", self.node_cleanup)
def connection_cleanup(self):
if self.start_entry.get() and self.end_entry.get():
pass
self.top.destroy()
def tick(self, time=1):
"""Increment n units of time."""
if time < 1:
raise Exception("Must provide a positive real value for time" +
"(Although it really should be an integer you oaf.)")
for _counter in range(time):
self.tomography.tick()
self.draw()
def shutdown(self):
"""Closes the application 'gracefully'."""
self.master.quit()
self.master.destroy()
def add_node(self, node):
"""Add a node to the Tomography.
The Tomography will assign an address if needed."""
self.tomography.add_node(node)
def remove_node(self, node):
"""Safely remove a node from the Tomography."""
self.tomography.remove_node(node)
def connect_nodes(self, start_node, end_node):
"""Connect two Nodes. A start node should be upstream of the end_node.ß"""
self.tomography.add_connection(start_node, end_node)
def draw(self):
"""Draw all nodes and their connections (along with any notation about data flows)."""
pass
ROOT = Tk()
WINDOW = Display(master=ROOT)
WINDOW.mainloop()
| {
"repo_name": "SamuelDoud/tomography",
"path": "tomography/src/Display.py",
"copies": "1",
"size": "7084",
"license": "mit",
"hash": -5042831739538164000,
"line_mean": 40.6647058824,
"line_max": 97,
"alpha_frac": 0.6183820415,
"autogenerated": false,
"ratio": 3.6267281105990783,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47451101520990785,
"avg_score": null,
"num_lines": null
} |
"""A user profile shows the publically available information about a user."""
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
import datetime
class UserProfile(models.Model):
"""Public user profile."""
user = models.ForeignKey(User, related_name='profile')
name = models.CharField(max_length=1000, blank=True, null=True)
image = models.ImageField(blank=True, null=True)
date_created = models.DateField(default=datetime.datetime.now)
def image_url(self):
"""Return image URL."""
if self.image and self.image.url:
return self.image.url
else:
return ''
def check_profile_exists(sender, instance, signal, *args, **kwargs):
"""Create a profile if a user does not have one."""
if sender is User:
if UserProfile.objects.filter(user=instance).count() == 0:
user_profile = UserProfile()
user_profile.user = instance
user_profile.name = instance.first_name
user_profile.save()
post_save.connect(check_profile_exists, sender=User)
| {
"repo_name": "CornerstoneLabs/club-prototype",
"path": "app-server/userprofile/models.py",
"copies": "1",
"size": "1136",
"license": "mit",
"hash": 2055803166582848800,
"line_mean": 32.4117647059,
"line_max": 77,
"alpha_frac": 0.6698943662,
"autogenerated": false,
"ratio": 4.057142857142857,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 34
} |
# aus.py
# July 27, 2016
# python3
# code sketches for AUS senate audit by Bayesian audit method
import collections
import copy
import random
# random.seed(1) # make deterministic
class Election:
pass
class RealElection(Election):
def __init__(self):
self.candidates = []
self.prior_ballots = [ (c,) for c in self.candidates ]
def get_candidates(self):
return []
def draw_ballots(self, k):
"""
return list of up to k paper ballots
"""
return None
def scf(self, sample):
""" Return result of scf (social choice function) on this sample. """
### call Bowland's code here
return None
class SimulatedElection(Election):
def __init__(self, m, n):
self.m = m # number of candidates
self.candidates = list(range(1,self.m+1)) # {1, 2, ..., m}
self.n = n # number of cast ballots
self.prior_ballots = [ (c,) for c in self.candidates ] # one for each
self.ballots_drawn = 0 # cumulative
def draw_ballots(self, k):
"""
return list of up to k simulated ballots for testing purposes
or [] if no more ballots available
These ballots are biased so (1,2,...,m) is likely to be the winner
More precisely, for each ballot candidate i is given a value i+v*U
where U = uniform(0,1). Then candidates are sorted in order of these
values.
"""
ballots = []
v = 5.0 # noise level
k = min(k, self.n-self.ballots_drawn)
for _ in range(k):
L = [ (idx+v*random.random(), c) for idx, c in enumerate(self.candidates) ]
ballot = [ c for (val, c) in sorted(L) ]
ballots.append(ballot)
self.ballots_drawn += k
return ballots
def scf(self, sample):
"""
Return result of scf (social choice function) on this sample.
Here we use Borda count as a scf.
Returns tuple in decreasing order of candidate popularity.
"""
counter = collections.Counter()
for ballot in sample:
for idx, candidate in enumerate(ballot):
counter[candidate] += idx
L = counter.most_common()
L.reverse()
return tuple(c for (c, count) in L)
##############################################################################
# A ballot is an abstract blob.
# Here implemented as a tuple.
# The only operations we need on ballots are:
# -- obtaining them from election data
# -- putting them into a list
# -- copying one of them
# -- making up a list of "prior ballots" expressing
# our Bayesian prior
# -- (possibly re-weighting ballots?)
##############################################################################
# Implementation of polya's urn
def urn(election, sample, r):
"""
Return list of length r generated from sample and prior ballots.
Don't return prior ballots, but sample is part of returned result.
It may be possible to optimize this code using gamma variates.
"""
L = election.prior_ballots + sample
for _ in range(r-len(sample)):
L.append(random.choice(L))
return L[len(election.prior_ballots):]
def test_urn(election):
k = 5
r = 10
print("test_urn",k, r)
sample = election.draw_ballots(k)
print(urn(election, sample, r))
# test_urn(SimulatedElection(3,36))
##############################################################################
# Implementation of audit
def audit(election, alpha=0.05, k=4, trials=100):
"""
Bayesian audit of given election
Input:
election # election to audit
alpha # error tolerance
k # amount to increase sample size by
trials # trials per sample
"""
print("audit")
print("Candidates are:", election.candidates)
print("Number of ballots cast:", election.n)
print("Number of trials per sample:", trials)
# overall audit loop
sample = []
while True:
# draw additional ballots and add them to sample
sample_increment = election.draw_ballots(k)
if sample_increment is []:
print("Audit has looked at all ballots. Done.")
break
sample.extend(sample_increment)
print("sample size is now", len(sample),":")
# run trials in Bayesian manner
# we assume that each outcome is
# a list or tuple of candidates who have been elected,
# in some sort of canonical or sorted order.
# We can thus test for equality of outcomes.
outcomes = [election.scf(urn(election, sample, election.n))
for t in range(trials)]
# find most common outcome and its number of occurrences
best, freq = collections.Counter(outcomes).most_common(1)[0]
print(" most common winner =",best, "freq =",freq)
# stop if best occurs almost always
if freq >= trials*(1.0-alpha):
print("Stopping: audit confirms outcome:", best)
break
audit(SimulatedElection(4,10000))
| {
"repo_name": "ron-rivest/2016-aus-senate-audit",
"path": "rivest/aus.py",
"copies": "1",
"size": "5250",
"license": "apache-2.0",
"hash": 65468940656693610,
"line_mean": 30.4371257485,
"line_max": 87,
"alpha_frac": 0.5622857143,
"autogenerated": false,
"ratio": 4.057187017001546,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5119472731301545,
"avg_score": null,
"num_lines": null
} |
a#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import division
import stocktime as st
import datetime as dt
from datetime import datetime
import time, requests, os, pickle, logging
__version__ = '0.0.1'
__license__ = 'MIT'
__author__ = 'Joshua Guo (1992gq@gmail.com)'
'''
Python get sh000001 day stock data for data analysis.
'''
def main():
FILE = os.curdir
logging.basicConfig(filename=os.path.join(FILE,'log.txt'), level=logging.INFO)
get_stock_index_5_min()
def get_stock_index_5_min():
'''
get stock index last price: 9:35~11:30(24) + 13:05~15:00(24) = 48 tick
'''
stockid = 's_sh000001' # 上证综指
# initialze time data
todaydate = st.todaydate
opentime1 = st.opentime1 # 务必在9:35之前启动
midclose = st.midclose
opentime2 = st.opentime2
closetime = st.closetime
tick_shift = dt.timedelta(seconds=15)
tick_delta = dt.timedelta(minutes=5)
tick_now = opentime1
if datetime.now() > midclose:
tick_now = opentime2
index_seq = []
is_middle_saved = False
while True:
try:
now = datetime.now()
mid_shift = midclose + tick_shift
close_shift = closetime + tick_shift
if (now >= opentime1 and now <= mid_shift) or (now >= opentime2 and now <= close_shift):
if now.hour == tick_now.hour and now.minute == tick_now.minute:
if abs(now.second - tick_now.second) <= 3:
sindex = getStockIndexFromSina(stockid)
marketdatas = sindex.split(',')
index_seq.append(marketdatas[1])
tick_now = tick_now + tick_delta
save_list2file_pickle(index_seq, todaydate)
print('>>>>>>>>>>Save sequence to file by pickle : %s' % index_seq)
else:
if now > midclose and now < opentime2:
print('>>>>>>>>>>>>>>>Now it is in middle time!')
time.sleep(1)
if is_middle_saved == False:
print('>>>>>>>>>>Save sequence to file by pickle : %s' % index_seq)
save_list2file_pickle(index_seq, todaydate)
tick_now = opentime2
is_middle_saved = True
continue
elif tick_now > closetime:
print('>>>>>>>>save stock index to file by pickle : %s' % index_seq)
save_list2file_pickle(index_seq, todaydate)
break
except Exception, e:
print(e)
finally:
print('>>>>>>>>>>>>refresh %s %d %d' % (tick_now, now.minute, now.second))
time.sleep(1)
print('>>>>>>>>>>>>>stock index sequence collector ending...')
def make_dir_if_not_exist(filepath):
if os.path.exists(str(filepath)):
pass
else:
os.mkdir(str(filepath))
def save_list2file_pickle(index_seq, subdir):
'''
save stock sequence to file by pickle
'''
filepath = './Stock Index/' + subdir
make_dir_if_not_exist(filepath)
filepath = filepath + '/'
output = open(filepath + 'shindex_seq.pkl', 'wb')
# Pickle dictionary using protocol 0.
pickle.dump(index_seq, output)
output.close()
logging.info('STOCKINDEX save stock sequence to file by pickle at %s' % datetime.now())
def getStockIndexFromSina(sid):
url = "http://hq.sinajs.cn/list=" + sid
result = requests.get(url).content
source = result.split("\"")
if source.count >= 1:
data = source[1].split(",")
# for dt in data:
# print dt
return source[1]
return ""
if __name__ == '__main__':
main()
| {
"repo_name": "JoshuaMichaelKing/Stock-SentimentAnalysis",
"path": "stockindex.py",
"copies": "1",
"size": "3813",
"license": "mit",
"hash": -7323772930541230000,
"line_mean": 33.7798165138,
"line_max": 100,
"alpha_frac": 0.5489316803,
"autogenerated": false,
"ratio": 3.669893514036786,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4718825194336786,
"avg_score": null,
"num_lines": null
} |
#a!/usr/bin/env python3
from lsh.model import LSHItem
import random
class LSHReader:
def __init__(self):
import pyximport; pyximport.install()
from lsh import bits
def process_file(self, filelike):
for line in filelike:
yield self.process_line(line.strip())
def process_line(self, line):
line = line.split(' ')
assert len(line) == 2, '{} does not match expected format of space limited line'.format(line)
id, signature = line
id = int(id)
#sig = int(signature[:32], base=2)
signature = bits.Hash(sig)
# signature = hashes.Hashes(signature)
item = LSHItem(id, signature)
return item
class BinaryReader:
def __init__(self):
pass
def process_file(self, filelike):
while True:
id, sig = filelike.read(4), filelike.read(16)
if not len(id) or not len(sig):
break
else:
yield LSHItem(int(id), BitArray(sig))
if __name__ == '__main__':
r = LSHReader()
import sys
for item in r.process_file(open(sys.argv[1], 'r')):
print(item)
| {
"repo_name": "schwa-lab/lsh",
"path": "lsh/readers.py",
"copies": "1",
"size": "1163",
"license": "mit",
"hash": 6751838656523927000,
"line_mean": 23.7446808511,
"line_max": 101,
"alpha_frac": 0.5649183147,
"autogenerated": false,
"ratio": 3.668769716088328,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4733688030788328,
"avg_score": null,
"num_lines": null
} |
"""Australian-specific Form helpers."""
from __future__ import unicode_literals
import re
from django.forms import ValidationError
from django.forms.fields import CharField, RegexField, Select
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from localflavor.compat import EmptyValueCompatMixin
from localflavor.deprecation import DeprecatedPhoneNumberFormFieldMixin
from .au_states import STATE_CHOICES
from .validators import AUBusinessNumberFieldValidator, AUCompanyNumberFieldValidator, AUTaxFileNumberFieldValidator
PHONE_DIGITS_RE = re.compile(r'^(\d{10})$')
class AUPostCodeField(RegexField):
"""
Australian post code field.
Assumed to be 4 digits.
Northern Territory 3-digit postcodes should have leading zero.
"""
default_error_messages = {
'invalid': _('Enter a 4 digit postcode.'),
}
def __init__(self, max_length=4, min_length=None, *args, **kwargs):
super(AUPostCodeField, self).__init__(r'^\d{4}$',
max_length, min_length, *args, **kwargs)
class AUPhoneNumberField(EmptyValueCompatMixin, CharField, DeprecatedPhoneNumberFormFieldMixin):
"""
A form field that validates input as an Australian phone number.
Valid numbers have ten digits.
.. deprecated:: 1.4
Use the django-phonenumber-field_ library instead.
.. _django-phonenumber-field: https://github.com/stefanfoulis/django-phonenumber-field
"""
default_error_messages = {
'invalid': 'Phone numbers must contain 10 digits.',
}
def clean(self, value):
"""Validate a phone number. Strips parentheses, whitespace and hyphens."""
super(AUPhoneNumberField, self).clean(value)
if value in self.empty_values:
return self.empty_value
value = re.sub('(\(|\)|\s+|-)', '', force_text(value))
phone_match = PHONE_DIGITS_RE.search(value)
if phone_match:
return '%s' % phone_match.group(1)
raise ValidationError(self.error_messages['invalid'])
class AUStateSelect(Select):
"""A Select widget that uses a list of Australian states/territories as its choices."""
def __init__(self, attrs=None):
super(AUStateSelect, self).__init__(attrs, choices=STATE_CHOICES)
class AUBusinessNumberField(EmptyValueCompatMixin, CharField):
"""
A form field that validates input as an Australian Business Number (ABN).
.. versionadded:: 1.3
.. versionchanged:: 1.4
"""
default_validators = [AUBusinessNumberFieldValidator()]
def to_python(self, value):
value = super(AUBusinessNumberField, self).to_python(value)
if value in self.empty_values:
return self.empty_value
return value.upper().replace(' ', '')
def prepare_value(self, value):
"""Format the value for display."""
if value is None:
return value
spaceless = ''.join(value.split())
return '{} {} {} {}'.format(spaceless[:2], spaceless[2:5], spaceless[5:8], spaceless[8:])
class AUCompanyNumberField(EmptyValueCompatMixin, CharField):
"""
A form field that validates input as an Australian Company Number (ACN).
.. versionadded:: 1.5
"""
default_validators = [AUCompanyNumberFieldValidator()]
def to_python(self, value):
value = super(AUCompanyNumberField, self).to_python(value)
if value in self.empty_values:
return self.empty_value
return value.upper().replace(' ', '')
def prepare_value(self, value):
"""Format the value for display."""
if value is None:
return value
spaceless = ''.join(value.split())
return '{} {} {}'.format(spaceless[:3], spaceless[3:6], spaceless[6:])
class AUTaxFileNumberField(CharField):
"""
A form field that validates input as an Australian Tax File Number (TFN).
.. versionadded:: 1.4
"""
default_validators = [AUTaxFileNumberFieldValidator()]
def prepare_value(self, value):
"""Format the value for display."""
if value is None:
return value
spaceless = ''.join(value.split())
return '{} {} {}'.format(spaceless[:3], spaceless[3:6], spaceless[6:])
| {
"repo_name": "thor/django-localflavor",
"path": "localflavor/au/forms.py",
"copies": "2",
"size": "4295",
"license": "bsd-3-clause",
"hash": -8590122977940521000,
"line_mean": 30.3503649635,
"line_max": 116,
"alpha_frac": 0.6533178114,
"autogenerated": false,
"ratio": 3.936755270394134,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008910980875932448,
"num_lines": 137
} |
a = u"String"
# Python 3.6
a = <error descr="Python version 2.7 does not support a 'F' prefix"><warning descr="Python version 3.5 does not support a 'F' prefix">f</warning></error>""
a = <error descr="Python version 2.7 does not support a 'F' prefix"><warning descr="Python version 3.5 does not support a 'F' prefix">F</warning></error>""
a = <error descr="Python version 2.7 does not support a 'RF' prefix"><warning descr="Python version 3.5 does not support a 'RF' prefix">rf</warning></error>""
a = <error descr="Python version 2.7 does not support a 'FR' prefix"><warning descr="Python version 3.5 does not support a 'FR' prefix">fr</warning></error>""
a = <error descr="Python version 2.7 does not support a 'FU' prefix"><warning descr="Python versions 3.5, 3.6, 3.7, 3.8, 3.9, 3.10 do not support a 'FU' prefix">fu</warning></error>""
a = <error descr="Python version 2.7 does not support a 'UF' prefix"><warning descr="Python versions 3.5, 3.6, 3.7, 3.8, 3.9, 3.10 do not support a 'UF' prefix">uf</warning></error>""
a = <error descr="Python version 2.7 does not support a 'BF' prefix"><warning descr="Python versions 3.5, 3.6, 3.7, 3.8, 3.9, 3.10 do not support a 'BF' prefix">bf</warning></error>""
a = <error descr="Python version 2.7 does not support a 'FB' prefix"><warning descr="Python versions 3.5, 3.6, 3.7, 3.8, 3.9, 3.10 do not support a 'FB' prefix">fb</warning></error>""
a = <error descr="Python version 2.7 does not support a 'UFR' prefix"><warning descr="Python versions 3.5, 3.6, 3.7, 3.8, 3.9, 3.10 do not support a 'UFR' prefix">ufr</warning></error>""
# python 3.3
a = u""
a = r""
a = b""
a = <error descr="Python version 2.7 does not support a 'RB' prefix">rb</error>""
a = br""
# python 3.2, 3.1
a = r""
a = b""
a = br""
# python 3.0
a = r""
a = b""
# python 2.7, 2.6
a = u""
a = r""
a = <warning descr="Python versions 3.5, 3.6, 3.7, 3.8, 3.9, 3.10 do not support a 'UR' prefix">ur</warning>""
a = b""
a = br""
# python 2.5
a = u""
a = r""
a = <warning descr="Python versions 3.5, 3.6, 3.7, 3.8, 3.9, 3.10 do not support a 'UR' prefix">ur</warning>""
# combined, PY-32321
b = <warning descr="Python versions 3.5, 3.6, 3.7, 3.8, 3.9, 3.10 do not allow to mix bytes and non-bytes literals">u"" b""</warning>
b = <warning descr="Python versions 3.5, 3.6, 3.7, 3.8, 3.9, 3.10 do not allow to mix bytes and non-bytes literals">r"" b""</warning>
b = <warning descr="Python versions 3.5, 3.6, 3.7, 3.8, 3.9, 3.10 do not allow to mix bytes and non-bytes literals"><error descr="Python version 2.7 does not support a 'F' prefix"><warning descr="Python version 3.5 does not support a 'F' prefix">f</warning></error>"" b""</warning>
# never was available
a = <error descr="Python version 2.7 does not support a 'RR' prefix"><warning descr="Python versions 3.5, 3.6, 3.7, 3.8, 3.9, 3.10 do not support a 'RR' prefix">rr</warning></error>""
a = <error descr="Python version 2.7 does not support a 'BB' prefix"><warning descr="Python versions 3.5, 3.6, 3.7, 3.8, 3.9, 3.10 do not support a 'BB' prefix">bb</warning></error>""
a = <error descr="Python version 2.7 does not support a 'UU' prefix"><warning descr="Python versions 3.5, 3.6, 3.7, 3.8, 3.9, 3.10 do not support a 'UU' prefix">uu</warning></error>"" | {
"repo_name": "dahlstrom-g/intellij-community",
"path": "python/testData/inspections/PyCompatibilityInspection/stringLiteralExpression.py",
"copies": "10",
"size": "3240",
"license": "apache-2.0",
"hash": 8809982432956743000,
"line_mean": 56.875,
"line_max": 281,
"alpha_frac": 0.6561728395,
"autogenerated": false,
"ratio": 2.781115879828326,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.8437288719328326,
"avg_score": null,
"num_lines": null
} |
# Autelis Pool Control wrapper class
# Designed to work with Jandy TCP Serial Port Firmwares v. 1.6.9
# and higher and Pentair TCP Serial Port Firmwares v. 1.6.7 and higher
import requests
import re
import socket
import xml.etree.ElementTree as xml
import logging
import sys
# Parameters for Pool Control HTTP Command Interface
_STATUS_ENDPOINT = "status.xml"
_COMMAND_ENDPOINT = "set.cgi"
_AUTELIS_ON_VALUE = 1
_AUTELIS_OFF_VALUE = 0
# Parameters for Pool Control TCP Serial Port interface
_CONTROLLER_TCP_PORT = 6000
_TEST_TCP_MSG = "#OPMODE?\r"
_TEST_RTN_SUCCESS = "!00 OPMODE="
_STATUS_UPDATE_MATCH_PATTERN = r"!00 ([A-Z0-9]+)=([A-Z0-9]+) ?[FC]?\r\n"
_BUFFER_SIZE = 32
class AutelisInterface:
# Primary constructor method
def __init__(self, controllerAddr, userName, password, logger=None):
# declare instance variables
self.controllerAddr = controllerAddr
self._userName = userName
self._password = password
# setup basic console logger for debugging
if logger == None:
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', level=logging.DEBUG)
self._logger = logging.getLogger() # Root logger
else:
self._logger = logger
# Gets the status XML from the Pool Controller
def get_status(self):
self._logger.debug("In get_status()...")
try:
response = requests.get(
"http://{host_addr}/{device_list_endpoint}".format(
host_addr=self.controllerAddr,
device_list_endpoint=_STATUS_ENDPOINT
),
auth=(self._userName, self._password),
timeout=3.05
)
response.raise_for_status() # Raise HTTP errors to be handled in exception handling
# Allow timeout and connection errors to be ignored - log and return no XML
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError, requests.exceptions.HTTPError) as e:
self._logger.warn("HTTP GET in get_status() failed - %s", str(e))
return None
except:
self._logger.error("Unexpected error occured - %s", sys.exc_info()[0])
raise
statusXML = xml.fromstring(response.text)
if statusXML.tag == "response":
return statusXML
else:
self._logger.warn("%s returned invalid XML in response", response.url)
return None
# Set the named attribute of the named element to the specified value
def send_command(self, element, label, value):
self._logger.debug("In send_command(): Element %s, Label %s, Value %s", element, label, value)
try:
response = requests.get(
"http://{host_addr}/{device_set_endpoint}?name={name}&{label}={value}".format(
host_addr=self.controllerAddr,
device_set_endpoint=_COMMAND_ENDPOINT,
name=element,
label=label,
value=str(int(value))
),
auth=(self._userName, self._password),
timeout=3.05
)
response.raise_for_status() # Raise HTTP errors to be handled in exception handling
# Allow timeout and connection errors to be ignored - log and return false
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError, requests.exceptions.HTTPError) as e:
self._logger.warn("HTTP GET in send_command() failed - %s", str(e))
return False
except:
self._logger.error("Unexpected error occured - %s", sys.exc_info()[0])
raise
else:
self._logger.debug("GET returned successfully - %s", response.text)
return True
def on(self, element):
return self.send_command(element, "value", _AUTELIS_ON_VALUE)
def off(self, element):
return self.send_command(element, "value", _AUTELIS_OFF_VALUE)
def set_temp(self, element, value):
return self.send_command(element, "temp", value)
def set_heat_setting(self, element, value): # for Pentair compatibility
return self.send_command(element, "hval", value)
# Monitors the TCP connection for status updates from the Pool Controller and forwards
# to Node Server in real time - must be executed on seperate, non-blocking thread
def status_listener(controllerAddr, statusUpdateCallback=None, logger=None):
# setup basic console logger for debugging
if logger == None:
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', level=logging.DEBUG)
logger = logging.getLogger() # Root logger
# Open a socket for communication with the Pool Controller
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
conn.connect((controllerAddr, _CONTROLLER_TCP_PORT))
except (socket.error, socket.herror, socket.gaierror) as e:
logger.error("Unable to establish TCP connection with Pool Controller. Socket error %d - %s", e[0], e[1])
return False
except:
raise
# Loop continuously and Listen for status messages over TCP connection
while True:
# Get next status message
try:
conn.settimeout(600) # If no messages in 10 minutes, then check connection
msg = conn.recv(_BUFFER_SIZE)
except socket.timeout:
# Check connection
try:
conn.settimeout(2)
conn.send(_TEST_TCP_MSG)
msg = conn.recv(_BUFFER_SIZE)
except socket.timeout:
logger.error("Pool Controller did not respond to test message - connection closed.")
conn.close()
return False
except socket.error as e:
logger.error("TCP Connection to Pool Controller unexpectedly closed. Socket error %d - %s", e[0], e[1])
conn.close()
return False
except:
conn.close()
raise
# check returned data for success
if not _TEST_RTN_SUCCESS in msg:
logger.error("Pool Controller returned invalid data ('%s') - connection closed.", msg)
conn.close()
return False
except socket.error as e:
logger.error("TCP Connection to Pool Controller unexpectedly closed. Socket error %d - %s", e[0], e[1])
conn.close()
return False
except:
conn.close()
raise
# If msg is not empty, process status request
if len(msg) > 0:
# See if the status update message matches our regex pattern
matches = re.match(_STATUS_UPDATE_MATCH_PATTERN, msg)
if matches:
# pull the pertinent data out of the message
cmd = matches.groups()[0]
val = matches.groups()[1]
logger.debug("Status update message received from Pool Controller: Command %s, Value %s", cmd, val)
# call status update callback function
if not statusUpdateCallback == None:
if not statusUpdateCallback(cmd_to_element(cmd), val_to_text(val)):
logger.warn("Unhandled status update from Pool Controller - %s", cmd)
else:
logger.warn("Invalid status message received from Pool Controller - %s", msg)
# Convert the TCP Serial Port Interface command words to
# element tags matching the HTTP Command Interface
def cmd_to_element(cmd):
if cmd[:3] == "CIR": # for Pentair compatibility
circuitNum = int(cmd[3:])
if circuitNum >= 41 and circuitNum <= 50:
return "feature" + str(circuitNum - 40)
else:
return "circuit" + cmd[3:]
elif cmd == "AIRTMP":
return "airtemp"
elif cmd == "SPATMP":
return "spatemp"
elif cmd == "SOLHT":
return "solarht"
elif cmd == "SOLTMP":
return "solartemp"
elif cmd == "WFALL":
return "waterfall"
elif cmd == "CLEAN":
return "cleaner"
elif cmd == "OPTIONS":
return "dip"
elif cmd == "UNITS":
return "tempunits"
elif cmd == "POOLTMP":
return "pooltemp"
elif cmd == "POOLTMP2":
return "pooltemp"
else:
return cmd.lower()
# Convert the TCP Serial Port Interface value to
# element text matching the HTTP Command Interface
def val_to_text(val):
if val == "AUTO":
return "0"
elif val == "SERVICE":
return "1"
elif val == "TIMEOUT":
return "2"
elif val == "TRUE":
return "1"
elif val == "FALSE":
return "0"
elif val == "T":
return "1"
elif val == "F":
return "0"
elif val == "ON":
return "1"
elif val == "OFF":
return "0"
elif val == "HEATER": # for Pentair compatibility
return "1"
elif val == "SOLPREF": # for Pentair compatibility
return "2"
elif val == "SOLAR": # for Pentair compatibility
return "3"
else:
return val
| {
"repo_name": "Goose66/autelis-polyglot",
"path": "autelisapi.py",
"copies": "1",
"size": "9498",
"license": "mit",
"hash": -8552148698311785000,
"line_mean": 34.9571984436,
"line_max": 119,
"alpha_frac": 0.5727521583,
"autogenerated": false,
"ratio": 4.321201091901729,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5393953250201728,
"avg_score": null,
"num_lines": null
} |
#Auteur: Rémi Pelletier
#Fichier: ForceBrute-RemiPelletier.py
#Problème: Force brute (Compétition informatique CEGInfo-CEGL 2016)
#Score: 45.45/50 (le dernier test échoue en raison d'un timeout)
import re
class Node:
value = 0
nNodes = 1
childs = []
def __init__(self):
self.value = 0
self.nNodes = 1
self.childs = []
def multiplyRange(start, end):
result = 1
for i in range(start, end+1):
result *= i
return result
def computeNbTrees(length):
return multiplyRange(length+2, 2*length) // multiplyRange(1, length)
def getWorstLength(node):
length = 0
if len(node.childs) == 0:
length = 0
elif len(node.childs) == 1:
length = node.value + getWorstLength(node.childs[0])
elif len(node.childs) == 2:
length = node.value + max(getWorstLength(node.childs[0]), getWorstLength(node.childs[1]))
return length
def readNode(line, curIndex):
curNode = Node()
while (curIndex[0] < len(line)) and (line[curIndex[0]] != ')'):
c = line[curIndex[0]]
curIndex[0] += 1
if c == '(':
curNode.childs.append(readNode(line, curIndex))
elif c == '.':
subStr = re.search(r'\d+', line[curIndex[0]:]).group()
curNode.value = int(subStr)
curIndex[0] += len(subStr) + 1
if(line[curIndex[0]] == ')'):
curIndex[0] += 1
for j in range(0, len(curNode.childs)):
curNode.nNodes += curNode.childs[j].nNodes
return curNode
line = input()
curIndex = [1]
curIndex[0] = 1
root = readNode(line, curIndex)
print(int(getWorstLength(root)))
print((computeNbTrees(root.nNodes)))
| {
"repo_name": "PolyAlgo/PolyAlgo",
"path": "Documentation/Compétion informatique CEGInfo 2016/Force brute (1)/ForceBrute-RemiPelletier.py",
"copies": "2",
"size": "1772",
"license": "apache-2.0",
"hash": -2317679348799182300,
"line_mean": 26.0634920635,
"line_max": 97,
"alpha_frac": 0.5701357466,
"autogenerated": false,
"ratio": 3.112676056338028,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46828118029380283,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.