text stringlengths 0 1.05M | meta dict |
|---|---|
from django.db import models
from django.conf import settings
import django.utils.timezone as timezone
from django.core.exceptions import ValidationError
import uuid
class Author(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)
url = models.URLField()
host = models.URLField()
github = models.URLField(blank=True, default='')
bio = models.TextField(blank=True, default='')
# http://hostname/author/<uuid>
id = models.URLField(primary_key=True)
def __str__(self):
return self.user.get_username()
class Follow(models.Model):
author = models.ForeignKey(Author, on_delete=models.CASCADE,
related_name='follow')
friend = models.URLField()
friendDisplayName = models.CharField(max_length=256, default="")
def __str__(self):
return '{} follows {}'.format(self.author, self.friendDisplayName)
class FriendRequest(models.Model):
#this is the 'sender' of the request
requester = models.URLField()
requestee = models.ForeignKey(Author, on_delete=models.CASCADE,
related_name='request')
requesterDisplayName = models.CharField(max_length=256, default="")
created = models.DateTimeField(auto_now=True)
def __str__(self):
return '{} sent friend request for {}'.format(self.requesterDisplayName, self.requestee)
class Post(models.Model):
class Meta:
ordering = ['-published']
title = models.CharField(max_length=32)
description = models.CharField(max_length=140) # why not Twitter?
contentType = models.CharField(max_length=32)
content = models.TextField()
author = models.ForeignKey(Author, on_delete=models.CASCADE,
related_name='post_author')
published = models.DateTimeField(default=timezone.now)
# http://hostname/posts/<uuid>
id = models.URLField(primary_key=True)
visibility = models.CharField(max_length=10, default="PUBLIC")
unlisted = models.BooleanField(default=False)
def __str__(self):
return '"{}" - {}'.format(self.title, self.author.user.get_username())
def clean(self):
"""
Custom validation.
- Ensure visibility == PRIVATE if there's visibleTos
"""
if self.visibility != 'PRIVATE' and self.cansee_set.count() != 0:
raise ValidationError('Visibilty must be private if visibileTo is'
' set')
class Category(models.Model):
"""
Another container class, this one for post categories. This might be better
off as a many-to-many relationship.
"""
post = models.ForeignKey(Post, on_delete=models.CASCADE)
category = models.CharField(max_length=32)
def __str__(self):
return '"{}" in {}'.format(self.post.title, self.category)
class CanSee(models.Model):
"""
Another container class, this one for users who can see private posts. This
might be better off as a many-to-many relationship.
"""
post = models.ForeignKey(Post, on_delete=models.CASCADE)
visibleTo = models.URLField() # This is an author id, could be remote
def __str__(self):
return '{} sees {}'.format(self.visibleTo, self.post)
class RemoteCommentAuthor(models.Model):
"""
We need to cache remote comment authors. I think this is a terrible idea and
we should do it better but right now fuck it. And probably until the end
of time.
"""
authorId = models.URLField(primary_key=True)
host = models.URLField()
displayName = models.CharField(max_length=256)
github = models.URLField(blank=True, default='')
def __str__(self):
return '{}@{}'.format(self.displayName, self.authorId)
class Comment(models.Model):
class Meta:
ordering = ['published']
# As it stands, this could be a remote user. We're currently sent info about
# this user but we are not going to store it and will request it from the
# remote server every time. We could start caching stuff later.
author = models.URLField()
post = models.ForeignKey(Post, on_delete=models.CASCADE)
comment = models.TextField()
contentType = models.CharField(max_length=32)
published = models.DateTimeField(default=timezone.now)
# The only id field that's a uuid because there's no way to directly access
# a comment via URI
# So says the Hindle
id = models.UUIDField(primary_key=True, default=uuid.uuid4)
def __str__(self):
try:
localAuthor = Author.objects.get(id=self.author)
name = localAuthor.user.get_username()
except Author.DoesNotExist:
name = "Remote user"
return '{} on "{}"'.format(name,
self.post.title)
| {
"repo_name": "CMPUT404W17T06/CMPUT404-project",
"path": "dash/models.py",
"copies": "1",
"size": "4865",
"license": "apache-2.0",
"hash": -7983634182781003000,
"line_mean": 35.3059701493,
"line_max": 96,
"alpha_frac": 0.6495375128,
"autogenerated": false,
"ratio": 4.077954735959765,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5227492248759765,
"avg_score": null,
"num_lines": null
} |
from django.http import HttpResponse, JsonResponse, HttpResponseForbidden
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.decorators.http import require_POST, require_GET
from django.views import generic
from .models import Post, Category, Comment, CanSee, Author, Follow, FriendRequest
from django.contrib.auth.models import User
from django.db.models import Q
from .forms import PostForm, CommentForm
import base64
import uuid
import itertools
from django.views.generic.edit import CreateView
from rest.authUtils import createBasicAuthToken, parseBasicAuthToken, \
getRemoteCredentials
from rest.models import RemoteCredentials
from rest.serializers import PostSerializer, CommentSerializer, \
FollowSerializer, AuthorSerializer
from django.utils.dateparse import parse_datetime
from urllib.parse import urlsplit, urlunsplit
import requests
from rest.verifyUtils import NotFound, RequestExists
import datetime
import dateutil.parser
from requests.exceptions import ChunkedEncodingError
def postSortKey(postDict):
return parse_datetime(postDict['published'])
def getFriends(authorID):
friends = []
try:
#Check if author is local
#AuthorTest checks if that query breaks, because if so that goes to the DNE except
authorTest = Author.objects.get(id=authorID)
#If it hasn't broken yet, just check if local friends.
following = Follow.objects \
.filter(author=authorID) \
.values_list('friend', flat=True)
for author in following:
try:
#Check if author is local
#AuthorTest checks if that query breaks, because if so that goes to the DNE except
authorTest = Author.objects.get(id=author)
#If it hasn't broken yet, just check they follow you locally
following2 = Follow.objects \
.filter(author=author) \
.values_list('friend', flat=True)
if authorID in following2:
friends.append(author)
except Author.DoesNotExist:
host2 = getRemoteCredentials(author)
following2 = []
if not host2:
#Might have friends with a server we don't have access to.
continue
r2 = requests.get(author+ 'friends/',
data={'query':'friends'},
auth=(host2.username, host2.password))
if r2.status_code == 200:
following2 = r2.json()['authors']
if authorID in following2:
friends.append(author)
except Author.DoesNotExist:
#Huzzah, something broke. Most likely, this means that the author is remote
following = []
host = getRemoteCredentials(authorID)
if not host:
return friends
r1 = requests.get(authorID+ 'friends/',
data={'query':'friends'},
auth=(host.username, host.password))
if r1.status_code == 200:
following = r1.json()['authors']
for user in following:
try:
#Check if author is local
#AuthorTest checks if that query breaks, because if so that goes to the DNE except
authorTest = Author.objects.get(id=user)
#If it hasn't broken yet, just check if local friends.
following2 = Follow.objects \
.filter(author=user) \
.values_list('friend', flat=True)
if authorID in following2:
friends.append(user)
except Author.DoesNotExist:
host2 = getRemoteCredentials(user)
following2 = []
if not host2:
continue
r2 = requests.get(user+ 'friends/',
data={'query':'friends'},
auth=(host2.username, host2.password))
if r2.status_code == 200:
following2 = r2.json()['authors']
if authorID in following2:
friends.append(user)
return friends
class StreamView(LoginRequiredMixin, generic.ListView):
login_url = 'login'
template_name = 'dashboard.html'
context_object_name = 'latest_post_list'
def get_queryset(self):
# Return posts that are visible to everyone (Public, this server only,
# self posted. Remove unlisted unless you are the creator of post)
localVisible = Post.objects.filter(
((Q(visibility='PUBLIC') | Q(visibility='SERVERONLY'))\
& Q(unlisted=False)) | Q(author=self.request.user.author)
)
#list of all remote creditials we know about.
#have host, username, password
#does not contain our own server
allRemotePosts = []
hosts = RemoteCredentials.objects.all()
for host in hosts:
print('Getting from', host.host)
# Technically, author/posts is all posts and posts/ is only PUBLIC
# Will everyone follow that? who knows....
try:
r = requests.get(host.host + 'author/posts/',
data={'query':'posts'},
auth=(host.username, host.password))
if r.status_code != 200:
r = requests.get(host.host + 'posts/',
data={'query':'posts'},
auth=(host.username, host.password))
if r.status_code != 200:
print('Error {} connecting while getting posts: {}'
.format(r.status_code, host.host))
print('Got response: {}'.format(r.text))
continue
except ChunkedEncodingError as e:
print('{} got chunked encoding error...'.format(host.host))
print(e)
continue
# Hacky things to make us work with remotes that follow the spec
# "closely"
posts = r.json()['posts']
for post in posts:
try:
uuid.UUID(post['id'])
# If it fails it means it's (probably) a url
except ValueError:
pass
# If it succeeded we want to overwrite it with the url
else:
origin = post['origin']
if origin[-1] != '/':
origin += '/'
post['id'] = origin
allRemotePosts += posts
#get local authors who follow you
localFollowers = Follow.objects \
.filter(friend=self.request.user.author.id) \
.values_list('author', flat=True)
#Of your local follwers, get those that you follow back, the "friends"
localFriends = Follow.objects \
.filter(author=self.request.user.author.id,friend__in=localFollowers) \
.values_list('friend', flat=True)
# Get posts marked FRIENDS visibility whose authors consider this author
# a friend
localFriendsPosts = Post.objects\
.filter(visibility='FRIENDS', author__in=localFriends,
unlisted=False)
#PURGE THE REMOTE POSTS
following = Follow.objects \
.filter(author=self.request.user.author.id) \
.values_list('friend', flat=True)
allLocalFOAFPosts = Post.objects\
.filter(visibility='FOAF', unlisted = False)
localFOAFPosts = []
for FOAFPost in allLocalFOAFPosts:
friends = getFriends(FOAFPost.author.id)
if self.request.user.author.id in friends:
#Grab this post. Somehow.
localFOAFPosts.append(FOAFPost)
for friend in friends:
FOAF = getFriends(friend)
if self.request.user.author.id in FOAF:
#Grab this post. Somehow.
localFOAFPosts.append(FOAFPost)
break
remotePosts=[]
for remotePost in allRemotePosts:
# Not in just default to False
if 'unlisted' not in remotePost or remotePost['unlisted'] == False:
# Not in, assume PUBLIC
if 'visibility' not in remotePost:
remotePosts.append(remotePost)
elif remotePost['visibility'] == 'PUBLIC':
remotePosts.append(remotePost)
elif remotePost['visibility'] == 'FRIENDS':
#Check if you follow them.
if remotePost['author']['id'] in following:
#Huzzah, now check if they follow you.
host = getRemoteCredentials(remotePost['author']['id'])
r1 = requests.get(remotePost['author']['url']+ 'friends/',
data={'query':'friends'},
auth=(host.username, host.password))
if r1.status_code == 200:
friends = r1.json()['authors']
if self.request.user.author.id in friends:
remotePosts.append(remotePost)
else:
continue
elif remotePost['visibility'] == 'FOAF':
#Same as above, if they're your friend you can just attach it.
authorsFriends = getFriends(remotePost['author']['id'])
if self.request.user.author.id in authorsFriends:
#YOU ARE A FRIEND, JUST RUN WITH IT.
remotePosts.append(remotePost)
else:
#YOU ARE NOT A FRIEND, CHECK THEIR FRIENDS
for authorFriend in authorsFriends:
FOAF = getFriends(authorFriend)
if self.request.user.author.id in FOAF:
remotePosts.append(remotePost)
#YOU ARE A FOAF, SO BREAK OUT OF LOOP
break
elif remotePost['visibility'] == 'PRIVATE':
if self.request.user.author.url in remotePost['visibleTo']:
remotePosts.append(remotePost)
# Get posts you can see
authorCanSee = CanSee.objects \
.filter(visibleTo=self.request.user.author.url) \
.values_list('post', flat=True)
visibleToPosts = Post.objects \
.filter(id__in=authorCanSee, visibility="PRIVATE",
unlisted=False)
finalQuery = itertools.chain(localVisible, visibleToPosts, localFriendsPosts, localFOAFPosts)
postSerializer = PostSerializer(finalQuery, many=True)
#postSerializer.data gives us a list of dicts that can be added to the remote posts lists
posts= postSerializer.data + remotePosts
posts = sorted(posts, key = postSortKey, reverse=True)
for post in posts:
post['published'] = dateutil.parser.parse(post['published'])
return posts
def get_context_data(self, **kwargs):
context = generic.ListView.get_context_data(self, **kwargs)
context['postForm'] = PostForm()
context['commentForm'] = CommentForm()
return context
@require_POST
@login_required(login_url="login")
def newPost(request):
# Get form data
form = PostForm(request.POST)
if form.is_valid():
data = form.cleaned_data
host = 'http://' + request.get_host()
data['author'] = request.user.author
data['host'] = host
# Make new post
post = Post()
post.id = host + '/posts/' + uuid.uuid4().hex + '/'
post.author = request.user.author
post.save()
# Did they upload an image?
if 'attachImage' in request.FILES:
makePost(post.id, data, request.FILES['attachImage'])
else:
makePost(post.id, data)
# Redirect
return redirect('dash:dash')
def makePost(pid, data, image=False):
try:
post = Post.objects.get(pk__contains=pid)
except (Post.DoesNotExist, Post.MultipleObjectsReturned) as e:
return redirect('dash:dash')
if data['visibility'] == "UNLISTED":
data['visibility'] = "PRIVATE"
data['unlisted'] = True
else:
data['unlisted'] = False
# Fill in post
post.title = data['title']
post.contentType = data['contentType']
post.content = data['content']
post.visibility = data['visibility']
post.unlisted = data['unlisted']
post.description = data['description']
post.save()
if image:
data['published'] = post.published
makeImagePost(data, image)
handlePostLists(post, data['categories'], data['visibleTo'])
def makeImagePost(data, image):
# Build a bytes object from all of the image chunks (theoretically
# only) one, but you never know
b = bytes()
for c in image.chunks():
b += c
# Encode it in b64
encoded = base64.b64encode(b)
encoded = encoded.decode('utf-8')
# Turn it into a data url
contentType = image.content_type + ';base64'
encoded = 'data:' + contentType + ',' + encoded
# Make the new post
post = Post()
imageId = uuid.uuid4().hex
post.id = data['host'] + '/posts/' + imageId + '/'
post.author = data['author']
# Steal the parent post's title and description
post.title = data['title'] + ' [IMAGE]'
post.description = data['description']
# Set up image content
post.contentType = contentType
post.content = encoded
# Image posts are same Visibilty and unlisted-ness as parent post
post.visibility = data['visibility']
post.unlisted = data['unlisted']
post.published = data['published'] - datetime.timedelta(microseconds=1)
# Save the image post
post.save()
handlePostLists(post, data['categories'], data['visibleTo'])
def handlePostLists(post, categories, visibleTo):
# Were there any categories?
if categories:
# Normalize the categories
categoryList = categories.split(',')
categoryList = [i.strip() for i in categoryList]
categoryList = list(set(categoryList))
# Build Category objects
for categoryStr in categoryList:
try:
category = Category.objects.get(post=post.id, category=categoryStr)
except (Category.DoesNotExist) as e:
category = Category()
category.category = categoryStr
category.post = post
category.save()
if visibleTo:
visibilityList = visibleTo.split(',')
visibilityList = [i.strip() for i in visibilityList]
visibilityList = list(set(visibilityList))
# Build canSee objects
for author in visibilityList:
try:
canSee = CanSee.objects.get(post=post.id, visibleTo=author)
except (CanSee.DoesNotExist) as e:
canSee = CanSee()
canSee.visibleTo = author
canSee.post = post
canSee.save()
@require_POST
@login_required(login_url="login")
def newComment(request):
previous_page = request.META.get('HTTP_REFERER')
# Get form data
data = request.POST
# Make new comment
comment = Comment()
# Fill in data
comment.author = request.user.author.id
comment.comment = data['comment']
comment.contentType = data['contentType']
comment.post_id = data['post_id']
# Is it a local post?
hostAddress = urlsplit(data['post_id']).netloc
userAddress = urlsplit(request.user.author.host).netloc
if userAddress == hostAddress:
# Save the new comment
comment.save()
else:
# Post the new comment
serialized_comment = CommentSerializer(comment).data
# Try and ensure we have a decent URL
hostUrl = data['post_id']
if not (hostUrl.startswith('http://') or
hostUrl.startswith('https://')):
hostUrl = 'http://' + hostUrl
# Get remote credentials for this host, just redirect if we fail I guess
# TODO show error message on failure instead
hostCreds = getRemoteCredentials(hostUrl)
if hostCreds == None:
print('Failed to find remote credentials for comment post: {}' \
.format(data['post_id']))
return redirect(previous_page)
# Ensure that the last character is a slash
if not hostUrl.endswith('/'):
hostUrl += '/'
hostUrl += 'comments/'
data = {
"query": "addComment",
'post':data['post_id'],
'comment':serialized_comment
}
r = requests.post(hostUrl,
auth=(hostCreds.username, hostCreds.password),
json=data)
# Redirect to the dash
if (previous_page == None):
return redirect('dash:dash')
else:
return redirect(previous_page)
@require_POST
@login_required(login_url="login")
def deletePost(request):
# Get form data
data = request.POST
pid = data['post']
try:
post = Post.objects.get(pk__contains=pid)
except (Post.DoesNotExist, Post.MultipleObjectsReturned) as e:
return redirect('dash:manager')
if post.author.id == request.user.author.id:
post.delete()
# Redirect to the manager
return redirect('dash:manager')
@login_required(login_url="login")
def editPost(request, pid):
if request.method == 'GET':
pid = request.get_host() + '/posts/' + pid
post = get_object_or_404(Post, pk__contains=pid)
post = PostSerializer(post, many=False).data
return JsonResponse(post)
else:
print(pid)
form = PostForm(request.POST)
if form.is_valid():
data = form.cleaned_data
data['author'] = request.user.author
data['host'] = 'http://' + request.get_host()
# Did they upload an image?
if 'attachImage' in request.FILES:
makePost(pid, data, request.FILES['attachImage'])
else:
makePost(pid, data)
return redirect('dash:manager')
class ManagerView(LoginRequiredMixin, generic.ListView):
login_url = 'login'
template_name = 'manager.html'
context_object_name = 'latest_post_list'
def get_queryset(self):
# Return posts that are visible to everyone (Public, this server only,
# self posted)
localVisible = Post.objects.filter(
Q(author=self.request.user.author)
)
posts = PostSerializer(localVisible, many=True).data
posts = sorted(posts, key = postSortKey, reverse=True)
for post in posts:
post['published'] = dateutil.parser.parse(post['published'])
return posts
def get_context_data(self, **kwargs):
context = generic.ListView.get_context_data(self, **kwargs)
context['postForm'] = PostForm()
context['commentForm'] = CommentForm()
return context
@login_required(login_url="login")
def post(request, pid):
pid = request.get_host() + '/posts/' + pid
post = get_object_or_404(Post, pk__contains=pid)
post = PostSerializer(post, many=False).data
post['published'] = dateutil.parser.parse(post['published'])
return render(request, 'post_page.html', {'post':post, 'commentForm': CommentForm()})
class ListFollowsAndFriends(LoginRequiredMixin, generic.ListView):
''' Lists whom you are following, who are following you and who are your friends '''
context_object_name = 'following'
template_name = 'following.html'
def get_queryset(self):
following = Follow.objects.filter(author=self.request.user.author)
Friends = Follow.objects.filter(author=self.request.user.author)
return {'Following':following,'Friends':Friends}
"""
@login_required()
def friendRequest(request):
''' Accept or reject Friend requests '''
friend_requests = FriendRequest.objects.filter(requestee = request.user.author)
if request.method == 'POST':
'''Accept will add a new row in follow and make them friends, then delete the friend request,
this also checks if there are duplicate in follow table'''
'''Reject will delete the friend request and do nothing to follow table'''
if 'accept' in request.POST and len(Follow.objects.filter(author=request.user.author, friend=request.POST['accept'])) == 0:
follow = Follow()
follow.author = request.user.author
follow.friend = request.POST['accept']
follow.friendDisplayName = request.POST['accept1']
follow.save()
'''if this is a local author we create another row in follow table
if Author.objects.get(url = request.POST['accept'] && not Follow.objects.get( ):
follow = Follow()
follow.author = Author.objects.get(url = request.POST['accept'])
follow.friend = request.user.author.url
follow.requesterDisplayName = User.get_short_name(request.user)
follow.save()'''
FriendRequest.objects.get(requestee = request.user.author,requester = request.POST['accept']).delete()
elif 'reject' in request.POST:
''''if Author.objects.get(url = request.POST['accept']):
follow = Follow()
follow.author = Author.objects.get(url = request.POST['accept'])
follow.friend = request.user.author.url
follow.requesterDisplayName = User.get_short_name(request.user)
follow.save()'''
FriendRequest.objects.get(requestee = request.user.author,requester = request.POST['reject']).delete()
return render(request, 'friendrequests.html', {'followers': friend_requests})
"""
@require_POST
@login_required(login_url="login")
def addFriend(request):
'''Accept will add a new row in follow and make them friends, then delete the friend request,
this also checks if there are duplicate in follow table'''
'''Reject will delete the friend request and do nothing to follow table'''
user = request.POST['user']
displayName = request.POST['displayName']
result = request.POST['result']
if result == 'accept' and len(Follow.objects.filter(author=request.user.author, friend=user)) == 0:
follow = Follow()
follow.author = request.user.author
follow.friend = user
follow.friendDisplayName = displayName
follow.save()
FriendRequest.objects.get(requestee = request.user.author,requester = user).delete()
elif result == 'decline':
FriendRequest.objects.get(requestee = request.user.author,requester = user).delete()
return redirect('dash:follow_requests')
@require_POST
@login_required(login_url="login")
def SendFriendRequest(request):
# Get form data
data = request.POST
# Make friend request and follow
friendrequest = FriendRequest()
# Get author, all local users have an author except for the initial super
# user. Just don't use them.
author = request.user.author
# Get the requested id
requestedId = data['author']
if not requestedId.endswith('/'):
requestedId += '/'
# check user trying to send request to self
if requestedId == author.id:
return redirect('dash:dash')
# Check if this user is already following the requested user. If they aren't
# then follow the user
localFollows = Follow.objects.filter(author=author,
friend=requestedId)
if len(localFollows) == 0:
# Build the follow
follow = Follow()
follow.friend = data['author']
follow.friendDisplayName = data['displayName']
follow.author = author
follow.save()
# Are they a local user?
localAuthorRequested = None
try:
localAuthorRequested = Author.objects.get(id=requestedId)
# User can't send a friend request if they are friends already, this avoid the problem
# where users can spam others sending friend requests
if len(Follow.objects.filter(author=author, friend=requestedId)) == 1 and len(
Follow.objects.filter(author=Author.objects.get(url=requestedId), friend=author.url)):
return redirect('dash:dash')
# check if the friend is already the following requesting user, this avoid friend requests
# being added into the table
elif len(Follow.objects.filter(author=Author.objects.get(url=requestedId), friend=author.url)):
return redirect('dash:dash')
# If they aren't just leave it as None
except Author.DoesNotExist:
pass
# Was the requested author local?
if localAuthorRequested != None:
# Don't duplicate friend requests
localRequest = FriendRequest.objects \
.filter(requestee=localAuthorRequested,
requester=author.id)
# Just redirect and pretend we did something
if len(localRequest) > 0:
return redirect('dash:dash')
# Save the new friend request to local
friendrequest.requester = author.id
friendrequest.requestee = localAuthorRequested
friendrequest.requesterDisplayName = author.user.get_username()
friendrequest.save()
else:
# Get remote credentials for this host, just redirect if we fail I guess
# TODO show error message on failure instead
hostCreds = getRemoteCredentials(requestedId)
if hostCreds == None:
#print('Failed to find remote credentials for comment post: {}'.format(data['post_id']))
return redirect('dash:dash')
# Build remote friend request url
url = hostCreds.host + 'friendrequest/'
# Build request data
authorData = AuthorSerializer(author).data
requestedAuthor = {
'id': requestedId,
'url': requestedId,
'host': data['host'],
'displayName': data['displayName']
}
data = {
"query": "friendrequest",
'author': authorData,
'friend': requestedAuthor
}
r = requests.post(url,
auth=(hostCreds.username, hostCreds.password),
json=data)
#Redirect to the dash
return redirect('dash:dash')
@login_required()
def DeleteFriends(request):
#delete or unfollow friend, showing friend list and following list
if request.method == 'POST':
if 'unfriend' in request.POST:
print(request.POST['unfriend'], request.user.author)
Follow.objects.get(friendDisplayName=request.POST['unfriend'],author=request.user.author).delete()
elif 'unfollow' in request.POST:
Follow.objects.get(friendDisplayName=request.POST['unfollow'],author=request.user.author).delete()
Friends = []
Followings = []
#get all follow list
following = request.user.author.follow.all()
for follow in following:
#check if B follows A
localAuthorRequested = None
try:
localAuthorRequested = Author.objects.get(url = follow.friend)
# If they aren't just leave it as None
except Author.DoesNotExist:
pass
# Was the requested author local?
if localAuthorRequested != None:
if Follow.objects.filter(friend=follow.author.url,author=Author.objects.get(url = follow.friend)):
friend = Follow.objects.filter(friend=follow.author.url,author=Author.objects.get(url = follow.friend))
for f in friend:
Friends.append(follow)
else:
Followings.append(follow)
else:
remote_friend_list=[]
try:
host = getRemoteCredentials(follow.friend)
r1 = requests.get(follow.friend+ 'friends/',
data={'query':'friends'},
auth=(host.username, host.password))
if r1.status_code == 200:
remote_friend_list= r1.json()["authors"]
if follow.author.url in remote_friend_list:
Friends.append(follow)
else:
Followings.append(follow)
except:
Followings.append(follow)
print(Followings, Friends)
friend_requests = FriendRequest.objects.filter(requestee = request.user.author)
return render(request, 'following.html', {'Followings':Followings,'Friends':Friends, 'Requests':friend_requests})
| {
"repo_name": "CMPUT404W17T06/CMPUT404-project",
"path": "dash/views.py",
"copies": "1",
"size": "29985",
"license": "apache-2.0",
"hash": -414301183872413440,
"line_mean": 38.2988204456,
"line_max": 131,
"alpha_frac": 0.5768217442,
"autogenerated": false,
"ratio": 4.506989328122652,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004567817433631652,
"num_lines": 763
} |
from rest_framework.views import APIView
from dash.models import Post, Author, Category, CanSee
from .serializers import PostSerializer
from .verifyUtils import postValidators, NotFound, ResourceConflict
from .dataUtils import validateData, pidToUrl, getPostData, getPost
from .httpUtils import JSONResponse
class PostView(APIView):
"""
REST view of an individual Post.
"""
def delete(self, request, pid=None):
"""
Deletes a post.
"""
# Get the post
post = getPost(request, pid)
# Save the id for the return
postId = post.id
# Delete the post
post.delete()
# Return
data = {'deleted': postId}
return JSONResponse(data)
def get(self, request, pid=None):
"""
Gets a post.
"""
# Get post
post = getPost(request, pid)
# Serialize post
postSer = PostSerializer(post)
postData = postSer.data
# TODO: Add query?
# postData['query'] = 'post'
return JSONResponse(postData)
def post(self, request, pid=None):
"""
Creates a post.
"""
try:
# This has the potential to raise NotFound AND MalformedId
# If it's MalformedId we want it to fail
post = getPost(request, pid)
# We WANT it to be not found
except NotFound:
pass
# No error was raised which means it already exists
else:
raise ResourceConflict('post',
request.build_absolute_uri(request.path))
# Get and validate data
data = getPostData(request)
validateData(data, postValidators)
# Get id url
url = pidToUrl(request, pid)
# Fill in required fields
post = Post()
post.id = url
post.title = data['title']
post.contentType = data['contentType']
post.content = data['content']
post.author = Author.objects.get(id=data['author'])
post.visibility = data['visibility']
# Fill in unrequired fields
post.unlisted = data.get('unlisted', False)
post.description = data.get('description', '')
post.published = data.get('published', timezone.now())
# Save
post.save()
# Were there any categories?
if 'categories' in data and data['categories']:
categoryList = data['categories']
# Build Category objects
for categoryStr in categoryList:
category = Category()
category.category = categoryStr
category.post = post
category.save()
# Were there any users this should be particularly visibleTo
if 'visibleTo' in data and data['visibleTo']:
visibleToList = data['visibleTo']
# Build can see list
for authorId in visibleToList:
canSee = CanSee()
canSee.post = post
canSee.visibleTo = authorId
canSee.save()
# Return
data = {'created': post.id}
return JSONResponse(data)
def put(self, request, pid=None):
"""
Updates a post.
"""
# Get post
post = getPost(request, pid)
# Get data from PUT, don't require any fields
data = getPostData(request, require=False)
validateData(data, postValidators)
# Update fields as appropriate
post.title = data.get('title', post.title)
post.description = data.get('description', post.description)
post.published = data.get('published', post.published)
post.contentType = data.get('contentType', post.contentType)
post.content = data.get('content', post.content)
post.author = Author.objects.get(id=data['author']) \
if 'author' in data else \
post.author
post.visibility = data.get('visibility', post.visibility)
post.unlisted = data.get('unlisted', post.unlisted)
post.save()
# Should we update categories?
if 'categories' in data:
# Destroy the old categories
oldCategories = Category.objects.filter(post=post)
for category in oldCategories:
category.delete()
# Build new categories
categoryList = data['categories']
# Build Category objects
for categoryStr in categoryList:
category = Category()
category.category = categoryStr
category.post = post
category.save()
# Should we update the visibleTos?
if 'visibleTo' in data:
# Destroy old can sees
oldCanSees = CanSee.objects.filter(post=post)
for canSee in oldCanSees:
canSee.delete()
# Build new can sees
visibleToList = data['visibleTo']
# Build can see list
for authorId in visibleToList:
canSee = CanSee()
canSee.post = post
canSee.visibleTo = authorId
canSee.save()
# Return
data = {'updated': post.id}
return JSONResponse(data)
| {
"repo_name": "CMPUT404W17T06/CMPUT404-project",
"path": "rest/singlePostView.py",
"copies": "1",
"size": "5349",
"license": "apache-2.0",
"hash": 2672465103251961000,
"line_mean": 29.5657142857,
"line_max": 76,
"alpha_frac": 0.5593568891,
"autogenerated": false,
"ratio": 4.560102301790281,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5619459190890281,
"avg_score": null,
"num_lines": null
} |
from urllib.parse import urlsplit, urlunsplit
from django.core.paginator import Paginator
from rest_framework import serializers
import requests
from dash.models import Post, Author, Comment, Category, CanSee, \
RemoteCommentAuthor
from .models import RemoteCredentials
from .authUtils import getRemoteCredentials
class FollowSerializer(serializers.BaseSerializer):
def to_representation(self, follow):
data = {}
try:
author = Author.objects.get(id=follow.friend)
data['id'] = author.id
data['host'] = author.host
data['displayName'] = author.user.get_username()
data['url'] = author.id
except Author.DoesNotExist:
# Build the fallback host
split = urlsplit(follow.friend)
split = (split.scheme, split.netloc, '', '', '')
url = urlunsplit(split) + '/'
# Set everything up with values, if we can successfully get a user
# from remote then we'll update
followId = follow.friend
data['id'] = followId
data['host'] = url
data['displayName'] = 'UnkownRemoteUser'
data['url'] = followId
remoteCreds = getRemoteCredentials(followId)
if remoteCreds != None:
req = requests.get(followId, auth=(remoteCreds.username,
remoteCreds.password))
if req.status_code == 200:
try:
# Try to parse JSON out
reqData = req.json()
# We could just pass along everything, but the spec
# says pick and choose these
data['id'] = reqData['id']
data['host'] = reqData['host']
data['displayName'] = reqData['displayName']
data['url'] = reqData['url']
# Couldn't parse json, just give up
except ValueError:
print('Could not parse JSON from author follow request')
else:
print(('Got status code {} while requesting follow user. ' \
'Using "{}" for {}.') \
.format(req.status_code, remoteCreds, followId))
print('TEXT\n', req.text)
else:
print('Could not get remote credentials for follow id: {}' \
.format(followId))
class AuthorSerializer(serializers.ModelSerializer):
class Meta:
model = Author
fields = ('id', 'host', 'url', 'github')
def to_representation(self, author):
rv = serializers.ModelSerializer.to_representation(self, author)
rv['displayName'] = author.user.get_username()
# Did the caller want the friends added?
if self.context.get('addFriends', False):
# Right now, we're calling all of our follows our friends,
# if this needs to be actually VERIFIED friends it's going to be
# a lot more costly..
folSer = FollowSerializer(author.follow.all(), many=True)
rv['friends'] = folSer.data
return rv
class AuthorFromIdSerializer(serializers.BaseSerializer):
def to_representation(self, authorId):
data = {}
print('AUTHOR FROM ID REP', authorId)
try:
author = Author.objects.get(id=authorId)
data['id'] = author.id
data['host'] = author.host
data['displayName'] = author.user.get_username()
data['url'] = author.url
data['github'] = author.github
# No sweat, they could be remote user
except Author.DoesNotExist:
try:
author = RemoteCommentAuthor.objects.get(authorId=authorId)
data['id'] = author.authorId
data['host'] = author.host
data['displayName'] = author.displayName
data['url'] = author.authorId
data['github'] = author.github
# We couldn't find a remote author either?!
except RemoteCommentAuthor.DoesNotExist:
# Print some reasonable debug and blow up
print('Could not get remote credentials for author id: {}' \
.format(authorId))
raise
return data
class CategorySerializer(serializers.BaseSerializer):
def to_representation(self, category):
return category.category
class CanSeeSerializer(serializers.BaseSerializer):
def to_representation(self, canSee):
return canSee.visibleTo
class PostSerializer(serializers.ModelSerializer):
class Meta:
model = Post
fields = '__all__'
author = AuthorSerializer()
def to_representation(self, post):
rv = serializers.ModelSerializer.to_representation(self, post)
categories = Category.objects.filter(post=post)
catSer = CategorySerializer(categories, many=True)
rv['categories'] = catSer.data
# The source and the origin is the same as the id -- so says the Hindle
rv['source'] = rv['id']
rv['origin'] = rv['id']
# Get comments and add count to rv
comments = Comment.objects.filter(post=post)
count = comments.count()
rv['count'] = count
# Get number of comments to attach and add to rv
pageSize = self.context.get('commentPageSize', 50)
rv['size'] = pageSize if count > pageSize else count
# Serialize and attach the first page
pager = Paginator(comments, pageSize)
commSer = CommentSerializer(pager.page(1), many=True)
rv['comments'] = commSer.data
# Serialize and attach list of visibileTo
canSees = CanSee.objects.filter(post=post)
canSer = CanSeeSerializer(canSees, many=True)
rv['visibleTo'] = canSer.data
return rv
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = ('author', 'comment', 'contentType', 'published', 'id')
author = AuthorFromIdSerializer()
| {
"repo_name": "CMPUT404W17T06/CMPUT404-project",
"path": "rest/serializers.py",
"copies": "1",
"size": "6263",
"license": "apache-2.0",
"hash": 7819549094870637000,
"line_mean": 37.6604938272,
"line_max": 80,
"alpha_frac": 0.5732077279,
"autogenerated": false,
"ratio": 4.60514705882353,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5678354786723531,
"avg_score": null,
"num_lines": null
} |
"""
Django settings for stream project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'v90-c%*alar)fzk=a95a_aqli%*ks3*)q6tt1zeen05!ooa9gd'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['.herokuapp.com', 'localhost', '127.0.0.1', '[::1]']
LOGIN_REDIRECT_URL = '/login/'
LOGIN_URL = '/login/'
# Application definition
INSTALLED_APPS = [
'website',
'dash.apps.DashConfig',
'rest.apps.RestConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'stream.urls'
# Add static folder to STATIC_DIRS
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ["templates"],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'stream.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
# Update database configuration with $DATABASE_URL.
import dj_database_url
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# Rest framework settings
REST_FRAMEWORK = {
'EXCEPTION_HANDLER': 'rest.verifyUtils.exceptionHandler',
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest.authUtils.nodeToNodeBasicAuth',
)
}
| {
"repo_name": "CMPUT404W17T06/CMPUT404-project",
"path": "stream/settings.py",
"copies": "1",
"size": "4316",
"license": "apache-2.0",
"hash": -6690968736740382000,
"line_mean": 25.6419753086,
"line_max": 91,
"alpha_frac": 0.6953197405,
"autogenerated": false,
"ratio": 3.4037854889589907,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45991052294589907,
"avg_score": null,
"num_lines": null
} |
"""stream URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth import views
from website.forms import LoginForm
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^dash/', include('dash.urls', namespace='dash')),
url(r'^', include('rest.urls', namespace='rest')),
url(r'^', include('website.urls', namespace="website")),
url(r'^login/$', views.login, {'template_name': 'login.html', 'authentication_form': LoginForm}, name='login'),
url(r'^logout/$', views.logout, {'next_page': '/'}),
#url(r'^stream', views.StreamView.as_view(), name='stream'),
]
| {
"repo_name": "CMPUT404W17T06/CMPUT404-project",
"path": "stream/urls.py",
"copies": "1",
"size": "1283",
"license": "apache-2.0",
"hash": -9002874958720669000,
"line_mean": 41.7666666667,
"line_max": 115,
"alpha_frac": 0.680436477,
"autogenerated": false,
"ratio": 3.4864130434782608,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9662848636304963,
"avg_score": 0.0008001768346595933,
"num_lines": 30
} |
#TODO: Add test cases
import subprocess, re, datetime
from dateutil.parser import *
#Warning: If ffprobe's output is ever changed, this part might break.
def getLength(filename):
result = subprocess.Popen(["ffprobe", filename],
stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
return [x for x in result.stdout.readlines() if "Duration" in x]
def getDurationTimestamp(filename):
stamp = getLength(filename)
#Check for case of not a video file
if not stamp:
return datetime.timedelta() #Return empty timedelta for non-video files (Will add exception handling later)
else:
stamp = stamp[0]
#Regular expression extracts the timestamp string
trimmedStamp = re.findall(r'\d\d:\d\d:\d\d',stamp)
trimmedStamp = trimmedStamp[0]
#Turns the timestamp string into a datetime object
ts = parse(trimmedStamp)
return datetime.timedelta(hours = ts.hour, seconds = ts.second, minutes = ts.minute)
| {
"repo_name": "timezombi/lsvd",
"path": "video_duration.py",
"copies": "1",
"size": "1032",
"license": "apache-2.0",
"hash": 8151305958726048000,
"line_mean": 31.25,
"line_max": 109,
"alpha_frac": 0.7509689922,
"autogenerated": false,
"ratio": 3.4864864864864864,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47374554786864864,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Brandon C. Kelly'
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import solve
from scipy.optimize import minimize
import samplers
import multiprocessing
import _carmcmc as carmcmcLib
class CarmaModel(object):
"""
Class for performing statistical inference assuming a CARMA(p,q) model.
"""
def __init__(self, time, y, ysig, p=1, q=0):
"""
Constructor for the CarmaModel class.
:param time: The observation times.
:param y: The measured time series.
:param ysig: The standard deviation in the measurements errors on the time series.
:param p: The order of the autoregressive (AR) polynomial. Default is p = 1.
:param q: The order of the moving average (MA) polynomial. Default is q = 0. Note that p > q.
"""
try:
p > q
except ValueError:
" Order of AR polynomial, p, must be larger than order of MA polynomial, q."
# check that time values are unique and in ascending ordered
s_idx = np.argsort(time)
t_unique, u_idx = np.unique(time[s_idx], return_index=True)
u_idx = s_idx[u_idx]
# convert input to std::vector<double> extension class
self._time = carmcmcLib.vecD()
self._time.extend(time[u_idx])
self._y = carmcmcLib.vecD()
self._y.extend(y[u_idx])
self._ysig = carmcmcLib.vecD()
self._ysig.extend(ysig[u_idx])
# save parameters
self.time = time[u_idx]
self.y = y[u_idx]
self.ysig = ysig[u_idx]
self.p = p
self.q = q
self.mcmc_sample = None
def run_mcmc(self, nsamples, nburnin=None, ntemperatures=None, nthin=1, init=None, tmax=None):
"""
Run the MCMC sampler. This is actually a wrapper that calls the C++ code that runs the MCMC sampler.
:param nsamples: The number of samples from the posterior to generate.
:param ntemperatures: Number of parallel MCMC chains to run in the parallel tempering algorithm. Default is 1
(no tempering) for p = 1 and max(10, p+q) for p > 1.
:param nburnin: Number of burnin iterations to run. The default is nsamples / 2.
:param nthin: Thinning interval for the MCMC sampler. Default is 1 (no thinning).
:return: Either a CarmaSample or Car1Sample object, depending on the values of self.p. The CarmaSample object
will also be stored as a data member of the CarmaModel object.
"""
if ntemperatures is None:
ntemperatures = max(10, self.p + self.q)
if nburnin is None:
nburnin = nsamples / 2
if init is None:
init = carmcmcLib.vecD()
if tmax is None:
tmax = max(100, self.t.shape[0])
if self.p == 1:
# Treat the CAR(1) case separately
cppSample = carmcmcLib.run_mcmc_car1(nsamples, nburnin, self._time, self._y, self._ysig,
nthin, init)
# run_mcmc_car1 returns a wrapper around the C++ CAR1 class, convert to python object
sample = Car1Sample(self.time, self.y, self.ysig, cppSample)
else:
cppSample = carmcmcLib.run_mcmc_carma(nsamples, nburnin, self._time, self._y, self._ysig,
self.p, self.q, ntemperatures, False, nthin, init, tmax)
# run_mcmc_car returns a wrapper around the C++ CARMA class, convert to a python object
sample = CarmaSample(self.time, self.y, self.ysig, cppSample, q=self.q)
self.mcmc_sample = sample
return sample
def get_mle(self, p, q, ntrials=100, njobs=1):
"""
Return the maximum likelihood estimate (MLE) of the CARMA model parameters. This is done by using the
L-BFGS-B algorithm from scipy.optimize on ntrials randomly distributed starting values of the parameters. This
this return NaN for more complex CARMA models, especially if the data are not well-described by a CARMA model.
In addition, the likelihood space can be highly multi-modal, and there is no guarantee that the global MLE will
be found using this procedure.
@param p: The order of the AR polynomial.
@param q: The order of the MA polynomial. Must be q < p.
@param ntrials: The number of random starting values for the optimizer. Default is 100.
@param njobs: The number of processors to use. If njobs = -1, then all of them are used. Default is njobs = 1.
@return: The scipy.optimize.Result object corresponding to the MLE.
"""
if njobs == -1:
njobs = multiprocessing.cpu_count()
args = [(p, q, self.time, self.y, self.ysig)] * ntrials
if njobs == 1:
MLEs = map(_get_mle_single, args)
else:
# use multiple processors
pool = multiprocessing.Pool(njobs)
# warm up the pool
pool.map(int, range(multiprocessing.cpu_count()))
MLEs = pool.map(_get_mle_single, args)
pool.terminate()
best_MLE = MLEs[0]
for MLE in MLEs:
if MLE.fun < best_MLE.fun: # note that MLE.fun is -loglik since we use scipy.optimize.minimize
# new MLE found, save this value
best_MLE = MLE
print best_MLE.message
return best_MLE
def choose_order(self, pmax, qmax=None, pqlist=None, njobs=1, ntrials=100):
"""
Choose the order of the CARMA model by minimizing the AICc(p,q). This first computes the maximum likelihood
estimate on a grid of (p,q) values using self.get_mle, and then choosing the value of (p,q) that minimizes
the AICc. These values of p and q are stored as self.p and self.q.
@param pmax: The maximum order of the AR(p) polynomial to search over.
@param qmax: The maximum order of the MA(q) polynomial to search over. If none, search over all possible values
of q < p.
@param pqlist: A list of (p,q) tuples. If supplied, the (p,q) pairs are used instead of being generated from the
values of pmax and qmax.
@param njobs: The number of processors to use for calculating the MLE. A value of njobs = -1 will use all
available processors.
@param ntrials: The number of random starts to use in the MLE, the default is 100.
@return: A tuple of (MLE, pqlist, AICc). MLE is a scipy.optimize.Result object containing the maximum-likelihood
estimate. pqlist contains the values of (p,q) used in the search, and AICc contains the values of AICc for
each (p,q) pair in pqlist.
"""
try:
pmax > 0
except ValueError:
"Order of AR polynomial must be at least 1."
if qmax is None:
qmax = pmax - 1
try:
pmax > qmax
except ValueError:
" Order of AR polynomial, p, must be larger than order of MA polynimial, q."
if pqlist is None:
pqlist = []
for p in xrange(1, pmax+1):
for q in xrange(p):
pqlist.append((p, q))
MLEs = []
for pq in pqlist:
MLE = self.get_mle(pq[0], pq[1], ntrials=ntrials, njobs=njobs)
MLEs.append(MLE)
best_AICc = 1e300
AICc = []
best_MLE = MLEs[0]
print 'p, q, AICc:'
for MLE, pq in zip(MLEs, pqlist):
nparams = 2 + pq[0] + pq[1]
deviance = 2.0 * MLE.fun
this_AICc = 2.0 * nparams + deviance + 2.0 * nparams * (nparams + 1.0) / (self.time.size - nparams - 1.0)
print pq[0], pq[1], this_AICc
AICc.append(this_AICc)
if this_AICc < best_AICc:
# new optimum found, save values
best_MLE = MLE
best_AICc = this_AICc
self.p = pq[0]
self.q = pq[1]
print 'Model with best AICc has p =', self.p, ' and q = ', self.q
return best_MLE, pqlist, AICc
def _get_mle_single(args):
p, q, time, y, ysig = args
nsamples = 1
nburnin = 25
nwalkers = 10
# get a CARMA process object by running the MCMC sampler for a very short period. This will provide the initial
# guess and the function to compute the log-posterior
tvec = arrayToVec(time) # convert to std::vector<double> object for input into C++ wrapper
yvec = arrayToVec(y)
ysig_vec = arrayToVec(ysig)
if p == 1:
# Treat the CAR(1) case separately
CarmaProcess = carmcmcLib.run_mcmc_car1(nsamples, nburnin, tvec, yvec, ysig_vec, 1)
else:
CarmaProcess = carmcmcLib.run_mcmc_carma(nsamples, nburnin, tvec, yvec, ysig_vec,
p, q, nwalkers, False, 1)
initial_theta = CarmaProcess.getSamples()
initial_theta = np.array(initial_theta[0])
initial_theta[1] = 1.0 # initial guess for measurement error scale parameter
# set bounds on parameters
ysigma = y.std()
dt = time[1:] - time[:-1]
max_freq = 1.0 / dt.min()
max_freq = 0.9 * max_freq
min_freq = 1.0 / (time.max() - time.min())
theta_bnds = [(ysigma / 10.0, 10.0 * ysigma)]
theta_bnds.append((0.9, 1.1))
theta_bnds.append((None, None))
if p == 1:
theta_bnds.append((np.log(min_freq), np.log(max_freq)))
else:
# monte carlo estimates of bounds on quadratic term parameterization of AR(p) roots
qterm_lbound = min(min_freq ** 2, 2.0 * min_freq)
qterm_lbound = np.log(qterm_lbound)
qterm_ubound = max(max_freq ** 2, 2.0 * max_freq)
qterm_ubound = np.log(qterm_ubound)
theta_bnds.extend([(qterm_lbound, qterm_ubound)] * p)
# no bounds on MA coefficients
if q > 0:
theta_bnds.extend([(None, None)] * q)
CarmaProcess.SetMLE(True) # ignore the prior bounds when calculating CarmaProcess.getLogDensity in C++ code
# make sure initial guess of theta does not violate bounds
for j in xrange(len(initial_theta)):
if theta_bnds[j][0] is not None:
if (initial_theta[j] < theta_bnds[j][0]) or (initial_theta[j] > theta_bnds[j][1]):
initial_theta[j] = np.random.uniform(theta_bnds[j][0], theta_bnds[j][1])
thisMLE = minimize(_carma_loglik, initial_theta, args=(CarmaProcess,), method="L-BFGS-B", bounds=theta_bnds)
return thisMLE
def _carma_loglik(theta, args):
CppCarma = args
theta_vec = carmcmcLib.vecD()
theta_vec.extend(theta)
logdens = CppCarma.getLogDensity(theta_vec)
return -logdens
class CarmaSample(samplers.MCMCSample):
"""
Class for storing and analyzing the MCMC samples of a CARMA(p,q) model.
"""
def __init__(self, time, y, ysig, sampler, q=0, filename=None, MLE=None, trace=None, logpost=None, loglike=None):
"""
Constructor for the CarmaSample class. In general a CarmaSample object should never be constructed directly,
but should be constructed by calling CarmaModel.run_mcmc().
@param time: The array of time values for the time series.
@param y: The array of measured values for the time series.
@param ysig: The array of measurement noise standard deviations for the time series.
@param sampler: A C++ object return by _carmcmcm.run_carma_mcmc(). In general this should not be obtained
directly, but a CarmaSample object should be obtained by running CarmaModel.run_mcmc().
@param q: The order of the MA polynomial.
@param filename: A string of the name of the file containing the MCMC samples generated by the C++ carpack.
@param MLE: The maximum-likelihood estimate, obtained as a scipy.optimize.Result object.
"""
self.time = time # The time values of the time series
self.y = y # The measured values of the time series
self.ysig = ysig # The standard deviation of the measurement errors of the time series
self.q = q # order of moving average polynomial
if logpost is None:
logpost = np.array(sampler.GetLogLikes())
if trace is None:
trace = np.array(sampler.getSamples())
self._trace = trace
super(CarmaSample, self).__init__(filename=filename, logpost=logpost, trace=trace)
# now calculate the AR(p) characteristic polynomial roots, coefficients, MA coefficients, and amplitude of
# driving noise and add them to the MCMC samples
print "Calculating PSD Lorentzian parameters..."
self._ar_roots()
print "Calculating coefficients of AR polynomial..."
self._ar_coefs()
if self.q > 0:
print "Calculating coefficients of MA polynomial..."
self._ma_coefs(trace)
print "Calculating sigma..."
self._sigma_noise()
if loglike is None:
# add the log-likelihoods
print "Calculating log-likelihoods..."
loglik = np.empty(logpost.size)
sampler.SetMLE(True)
for i in xrange(logpost.size):
std_theta = carmcmcLib.vecD()
std_theta.extend(trace[i, :])
# loglik[i] = logpost[i] - sampler.getLogPrior(std_theta)
loglik[i] = sampler.getLogDensity(std_theta)
else:
loglik = loglike
self._samples['loglik'] = loglik
# make the parameter names (i.e., the keys) public so the user knows how to get them
self.parameters = self._samples.keys()
self.newaxis()
self.mle = {}
if MLE is not None:
# add maximum a posteriori estimate
self.add_mle(MLE)
@property
def trace(self):
return self._trace
def add_mle(self, MLE):
"""
Add the maximum-likelihood estimate to the CarmaSample object. This will convert the MLE to a dictionary, and
add it as a data member of the CarmaSample object. The values can be accessed as self.mle['parameter']. For
example, the MLE of the CARMA process variance is accessed as self.mle['var'].
@param MLE: The maximum-likelihood estimate, returned by CarmaModel.get_mle() or CarmaModel.choose_order().
"""
self.mle = {'loglik': -MLE.fun, 'var': MLE.x[0] ** 2, 'measerr_scale': MLE.x[1], 'mu': MLE.x[2]}
# add AR polynomial roots and PSD lorentzian parameters
quad_coefs = np.exp(MLE.x[3:self.p + 3])
ar_roots = np.zeros(self.p, dtype=complex)
psd_width = np.zeros(self.p)
psd_cent = np.zeros(self.p)
for i in xrange(self.p / 2):
quad1 = quad_coefs[2 * i]
quad2 = quad_coefs[2 * i + 1]
discriminant = quad2 ** 2 - 4.0 * quad1
if discriminant > 0:
sqrt_disc = np.sqrt(discriminant)
else:
sqrt_disc = 1j * np.sqrt(np.abs(discriminant))
ar_roots[2 * i] = -0.5 * (quad2 + sqrt_disc)
ar_roots[2 * i + 1] = -0.5 * (quad2 - sqrt_disc)
psd_width[2 * i] = -np.real(ar_roots[2 * i]) / (2.0 * np.pi)
psd_cent[2 * i] = np.abs(np.imag(ar_roots[2 * i])) / (2.0 * np.pi)
psd_width[2 * i + 1] = -np.real(ar_roots[2 * i + 1]) / (2.0 * np.pi)
psd_cent[2 * i + 1] = np.abs(np.imag(ar_roots[2 * i + 1])) / (2.0 * np.pi)
if self.p % 2 == 1:
# p is odd, so add in root from linear term
ar_roots[-1] = -quad_coefs[-1]
psd_cent[-1] = 0.0
psd_width[-1] = quad_coefs[-1] / (2.0 * np.pi)
self.mle['ar_roots'] = ar_roots
self.mle['psd_width'] = psd_width
self.mle['psd_cent'] = psd_cent
self.mle['ar_coefs'] = np.poly(ar_roots).real
# now calculate the moving average coefficients
if self.q == 0:
self.mle['ma_coefs'] = 1.0
else:
quad_coefs = np.exp(MLE.x[3 + self.p:])
ma_roots = np.empty(quad_coefs.size, dtype=complex)
for i in xrange(self.q / 2):
quad1 = quad_coefs[2 * i]
quad2 = quad_coefs[2 * i + 1]
discriminant = quad2 ** 2 - 4.0 * quad1
if discriminant > 0:
sqrt_disc = np.sqrt(discriminant)
else:
sqrt_disc = 1j * np.sqrt(np.abs(discriminant))
ma_roots[2 * i] = -0.5 * (quad2 + sqrt_disc)
ma_roots[2 * i + 1] = -0.5 * (quad2 - sqrt_disc)
if self.q % 2 == 1:
# q is odd, so add in root from linear term
ma_roots[-1] = -quad_coefs[-1]
ma_coefs = np.poly(ma_roots)
# normalize so constant in polynomial is unity, and reverse order to be consistent with MA
# representation
self.mle['ma_coefs'] = np.real(ma_coefs / ma_coefs[self.q])[::-1]
# finally, calculate sigma, the standard deviation in the driving white noise
unit_var = carma_variance(1.0, self.mle['ar_roots'], np.atleast_1d(self.mle['ma_coefs']))
self.mle['sigma'] = np.sqrt(self.mle['var'] / unit_var.real)
def set_logpost(self, logpost):
"""
Add the input log-posterior MCMC values to the CarmaSample parameter dictionary.
@param logpost: The values of the log-posterior obtained from the MCMC sampler.
"""
self._samples['logpost'] = logpost # log-posterior of the CAR(p) model
def generate_from_trace(self, trace):
"""
Generate the dictionary of MCMC samples for the CARMA process parameters from the input array.
@param trace: An array containing the MCMC samples.
"""
# Figure out how many AR terms we have
self.p = trace.shape[1] - 3 - self.q
names = ['var', 'measerr_scale', 'mu', 'quad_coefs']
if names != self._samples.keys():
idx = 0
# Parameters are not already in the dictionary, add them.
self._samples['var'] = (trace[:, 0] ** 2) # Variance of the CAR(p) process
self._samples['measerr_scale'] = trace[:, 1] # Measurement errors are scaled by this much.
self._samples['mu'] = trace[:, 2] # model mean of time series
# AR(p) polynomial is factored as a product of quadratic terms:
# alpha(s) = (quad_coefs[0] + quad_coefs[1] * s + s ** 2) * ...
self._samples['quad_coefs'] = np.exp(trace[:, 3:self.p + 3])
def generate_from_file(self, filename):
"""
Build the dictionary of parameter samples from an ascii file of MCMC samples from carpack.
:param filename: The name of the file containing the MCMC samples generated by carpack.
"""
# TODO: put in exceptions to make sure files are ready correctly
# Grab the MCMC output
trace = np.genfromtxt(filename[0], skip_header=1)
self.generate_from_trace(trace[:, 0:-1])
self.set_logpost(trace[:, -1])
def _ar_roots(self):
"""
Calculate the roots of the CARMA(p,q) characteristic polynomial and add them to the MCMC samples.
"""
var = self._samples['var']
quad_coefs = self._samples['quad_coefs']
self._samples['ar_roots'] = np.empty((var.size, self.p), dtype=complex)
self._samples['psd_centroid'] = np.empty((var.size, self.p))
self._samples['psd_width'] = np.empty((var.size, self.p))
for i in xrange(self.p / 2):
quad1 = quad_coefs[:, 2 * i]
quad2 = quad_coefs[:, 2 * i + 1]
discriminant = quad2 ** 2 - 4.0 * quad1
sqrt_disc = np.where(discriminant > 0, np.sqrt(discriminant), 1j * np.sqrt(np.abs(discriminant)))
self._samples['ar_roots'][:, 2 * i] = -0.5 * (quad2 + sqrt_disc)
self._samples['ar_roots'][:, 2 * i + 1] = -0.5 * (quad2 - sqrt_disc)
self._samples['psd_width'][:, 2 * i] = -np.real(self._samples['ar_roots'][:, 2 * i]) / (2.0 * np.pi)
self._samples['psd_centroid'][:, 2 * i] = np.abs(np.imag(self._samples['ar_roots'][:, 2 * i])) / \
(2.0 * np.pi)
self._samples['psd_width'][:, 2 * i + 1] = -np.real(self._samples['ar_roots'][:, 2 * i + 1]) / (2.0 * np.pi)
self._samples['psd_centroid'][:, 2 * i + 1] = np.abs(np.imag(self._samples['ar_roots'][:, 2 * i + 1])) / \
(2.0 * np.pi)
if self.p % 2 == 1:
# p is odd, so add in root from linear term
self._samples['ar_roots'][:, -1] = -quad_coefs[:, -1]
self._samples['psd_centroid'][:, -1] = 0.0
self._samples['psd_width'][:, -1] = quad_coefs[:, -1] / (2.0 * np.pi)
def _ma_coefs(self, trace):
"""
Calculate the CARMA(p,q) moving average coefficients and add them to the MCMC samples.
"""
nsamples = trace.shape[0]
if self.q == 0:
self._samples['ma_coefs'] = np.ones((nsamples, 1))
else:
quad_coefs = np.exp(trace[:, 3 + self.p:])
roots = np.empty(quad_coefs.shape, dtype=complex)
for i in xrange(self.q / 2):
quad1 = quad_coefs[:, 2 * i]
quad2 = quad_coefs[:, 2 * i + 1]
discriminant = quad2 ** 2 - 4.0 * quad1
sqrt_disc = np.where(discriminant > 0, np.sqrt(discriminant), 1j * np.sqrt(np.abs(discriminant)))
roots[:, 2 * i] = -0.5 * (quad2 + sqrt_disc)
roots[:, 2 * i + 1] = -0.5 * (quad2 - sqrt_disc)
if self.q % 2 == 1:
# q is odd, so add in root from linear term
roots[:, -1] = -quad_coefs[:, -1]
coefs = np.empty((nsamples, self.q + 1), dtype=complex)
for i in xrange(nsamples):
coefs_i = np.poly(roots[i, :])
# normalize so constant in polynomial is unity, and reverse order to be consistent with MA
# representation
coefs[i, :] = (coefs_i / coefs_i[self.q])[::-1]
self._samples['ma_coefs'] = coefs.real
def _ar_coefs(self):
"""
Calculate the CARMA(p,q) autoregressive coefficients and add them to the MCMC samples.
"""
roots = self._samples['ar_roots']
coefs = np.empty((roots.shape[0], self.p + 1), dtype=complex)
for i in xrange(roots.shape[0]):
coefs[i, :] = np.poly(roots[i, :])
self._samples['ar_coefs'] = coefs.real
def _sigma_noise(self):
"""
Calculate the MCMC samples of the standard deviation of the white noise driving process and add them to the
MCMC samples.
"""
# get the CARMA(p,q) model variance of the time series
var = self._samples['var']
# get the roots of the AR(p) characteristic polynomial
ar_roots = self._samples['ar_roots']
# get the moving average coefficients
ma_coefs = self._samples['ma_coefs']
# calculate the variance of a CAR(p) process, assuming sigma = 1.0
sigma1_variance = np.zeros_like(var) + 0j
for k in xrange(self.p):
denom = -2.0 * ar_roots[:, k].real + 0j
for l in xrange(self.p):
if l != k:
denom *= (ar_roots[:, l] - ar_roots[:, k]) * (np.conjugate(ar_roots[:, l]) + ar_roots[:, k])
ma_sum1 = np.zeros_like(ar_roots[:, 0])
ma_sum2 = ma_sum1.copy()
for l in xrange(ma_coefs.shape[1]):
ma_sum1 += ma_coefs[:, l] * ar_roots[:, k] ** l
ma_sum2 += ma_coefs[:, l] * (-1.0 * ar_roots[:, k]) ** l
numer = ma_sum1 * ma_sum2
sigma1_variance += numer / denom
sigsqr = var / sigma1_variance.real
# add the white noise sigmas to the MCMC samples
self._samples['sigma'] = np.sqrt(sigsqr)
def plot_power_spectrum(self, percentile=68.0, nsamples=None, plot_log=True, color="b", alpha=0.5, sp=None,
doShow=True):
"""
Plot the posterior median and the credibility interval corresponding to percentile of the CARMA(p,q) PSD. This
function returns a tuple containing the lower and upper PSD credibility intervals as a function of frequency,
the median PSD as a function of frequency, and the frequencies.
:rtype : A tuple of numpy arrays, (lower PSD, upper PSD, median PSD, frequencies). If no subplot axes object
is supplied (i.e., if sp = None), then the subplot axes object used will also be returned as the last
element of the tuple.
:param percentile: The percentile of the PSD credibility interval to plot.
:param nsamples: The number of MCMC samples to use to estimate the credibility interval. The default is all
of them. Use less samples for increased speed.
:param plot_log: A boolean. If true, then a logarithmic plot is made.
:param color: The color of the shaded credibility region.
:param alpha: The transparency level.
:param sp: A matplotlib subplot axes object to use.
:param doShow: If true, call plt.show()
"""
sigmas = self._samples['sigma']
ar_coefs = self._samples['ar_coefs']
ma_coefs = self._samples['ma_coefs']
if nsamples is None:
# Use all of the MCMC samples
nsamples = sigmas.shape[0]
else:
try:
nsamples <= sigmas.shape[0]
except ValueError:
"nsamples must be less than the total number of MCMC samples."
nsamples0 = sigmas.shape[0]
index = np.arange(nsamples) * (nsamples0 / nsamples)
sigmas = sigmas[index]
ar_coefs = ar_coefs[index]
ma_coefs = ma_coefs[index]
nfreq = 1000
dt_min = self.time[1:] - self.time[0:self.time.size - 1]
dt_min = dt_min.min()
dt_max = self.time.max() - self.time.min()
# Only plot frequencies corresponding to time scales a factor of 2 shorter and longer than the minimum and
# maximum time scales probed by the time series.
freq_max = 0.5 / dt_min
freq_min = 1.0 / dt_max
frequencies = np.linspace(np.log(freq_min), np.log(freq_max), num=nfreq)
frequencies = np.exp(frequencies)
psd_credint = np.empty((nfreq, 3))
lower = (100.0 - percentile) / 2.0 # lower and upper intervals for credible region
upper = 100.0 - lower
# Compute the PSDs from the MCMC samples
omega = 2.0 * np.pi * 1j * frequencies
ar_poly = np.zeros((nfreq, nsamples), dtype=complex)
ma_poly = np.zeros_like(ar_poly)
for k in xrange(self.p):
# Here we compute:
# alpha(omega) = ar_coefs[0] * omega^p + ar_coefs[1] * omega^(p-1) + ... + ar_coefs[p]
# Note that ar_coefs[0] = 1.0.
argrid, omgrid = np.meshgrid(ar_coefs[:, k], omega)
ar_poly += argrid * (omgrid ** (self.p - k))
ar_poly += ar_coefs[:, self.p]
for k in xrange(ma_coefs.shape[1]):
# Here we compute:
# delta(omega) = ma_coefs[0] + ma_coefs[1] * omega + ... + ma_coefs[q] * omega^q
magrid, omgrid = np.meshgrid(ma_coefs[:, k], omega)
ma_poly += magrid * (omgrid ** k)
psd_samples = np.squeeze(sigmas) ** 2 * np.abs(ma_poly) ** 2 / np.abs(ar_poly) ** 2
# Now compute credibility interval for power spectrum
psd_credint[:, 0] = np.percentile(psd_samples, lower, axis=1)
psd_credint[:, 2] = np.percentile(psd_samples, upper, axis=1)
psd_credint[:, 1] = np.median(psd_samples, axis=1)
# Plot the power spectra
if sp == None:
fig = plt.figure()
sp = fig.add_subplot(111)
if plot_log:
# plot the posterior median first
sp.loglog(frequencies, psd_credint[:, 1], color=color)
else:
sp.plot(frequencies, psd_credint[:, 1], color=color)
sp.fill_between(frequencies, psd_credint[:, 2], psd_credint[:, 0], facecolor=color, alpha=alpha)
sp.set_xlim(frequencies.min(), frequencies.max())
sp.set_xlabel('Frequency')
sp.set_ylabel('Power Spectrum')
if doShow:
plt.show()
if sp == None:
return (psd_credint[:, 0], psd_credint[:, 2], psd_credint[:, 1], frequencies, fig)
else:
return (psd_credint[:, 0], psd_credint[:, 2], psd_credint[:, 1], frequencies)
def makeKalmanFilter(self, bestfit):
if bestfit == 'map':
# use maximum a posteriori estimate
max_index = self._samples['logpost'].argmax()
sigsqr = (self._samples['sigma'][max_index] ** 2)[0]
mu = self._samples['mu'][max_index][0]
ar_roots = self._samples['ar_roots'][max_index]
ma_coefs = self._samples['ma_coefs'][max_index]
elif bestfit == 'median':
# use posterior median estimate
sigsqr = np.median(self._samples['sigma']) ** 2
mu = np.median(self._samples['mu'])
ar_roots = np.median(self._samples['ar_roots'], axis=0)
ma_coefs = np.median(self._samples['ma_coefs'], axis=0)
elif bestfit == 'mean':
# use posterior mean as the best-fit
sigsqr = np.mean(self._samples['sigma'] ** 2)
mu = np.mean(self._samples['mu'])
ar_roots = np.mean(self._samples['ar_roots'], axis=0)
ma_coefs = np.mean(self._samples['ma_coefs'], axis=0)
else:
# use a random draw from the posterior
random_index = np.random.random_integers(0, self._samples.values()[0].shape[0])
sigsqr = (self._samples['sigma'][random_index] ** 2)[0]
mu = self._samples['mu'][random_index][0]
ar_roots = self._samples['ar_roots'][random_index]
ma_coefs = self._samples['ma_coefs'][random_index]
# expose C++ Kalman filter class to python
kfilter = carmcmcLib.KalmanFilterp(arrayToVec(self.time),
arrayToVec(self.y - mu),
arrayToVec(self.ysig),
sigsqr,
arrayToVec(ar_roots, carmcmcLib.vecC),
arrayToVec(ma_coefs))
return kfilter, mu
def assess_fit(self, bestfit="map", nplot=256, doShow=True):
"""
Display plots and provide useful information for assessing the quality of the CARMA(p,q) model fit.
:param bestfit: A string specifying how to define 'best-fit'. Can be the maximum a posteriori value (MAP),
the posterior mean ("mean"), or the posterior median ("median").
:param nplot: The number of interpolated time series values to plot.
:param doShow: If true, call pyplot.show(). Else if false, return the matplotlib figure object.
"""
bestfit = bestfit.lower()
try:
bestfit in ['map', 'median', 'mean']
except ValueError:
"bestfit must be one of 'map, 'median', or 'mean'"
fig = plt.figure()
# compute the marginal mean and variance of the predicted values
time_predict = np.linspace(self.time.min(), self.time.max(), nplot)
predicted_mean, predicted_var = self.predict(time_predict, bestfit=bestfit)
predicted_low = predicted_mean - np.sqrt(predicted_var)
predicted_high = predicted_mean + np.sqrt(predicted_var)
# plot the time series and the marginal 1-sigma error bands
plt.subplot(221)
plt.fill_between(time_predict, predicted_low, predicted_high, color='cyan')
plt.plot(time_predict, predicted_mean, '-b', label='Interpolation')
plt.plot(self.time, self.y, 'k.', label='Data')
plt.xlabel('Time')
plt.xlim(self.time.min(), self.time.max())
#plt.legend()
# plot the standardized residuals and compare with the standard normal
kfilter, mu = self.makeKalmanFilter(bestfit)
kfilter.Filter()
kmean = np.asarray(kfilter.GetMean())
kvar = np.asarray(kfilter.GetVar())
standardized_residuals = (self.y - mu - kmean) / np.sqrt(kvar)
plt.subplot(222)
plt.xlabel('Time')
plt.ylabel('Standardized Residuals')
plt.xlim(self.time.min(), self.time.max())
# Now add the histogram of values to the standardized residuals plot
pdf, bin_edges = np.histogram(standardized_residuals, bins=10)
bin_edges = bin_edges[0:pdf.size]
# Stretch the PDF so that it is readable on the residual plot when plotted horizontally
pdf = pdf / float(pdf.max()) * 0.4 * self.time.max()
# Add the histogram to the plot
plt.barh(bin_edges, pdf, height=bin_edges[1] - bin_edges[0])
# now overplot the expected standard normal distribution
expected_pdf = np.exp(-0.5 * bin_edges ** 2)
expected_pdf = expected_pdf / expected_pdf.max() * 0.4 * self.time.max()
plt.plot(expected_pdf, bin_edges, 'DarkOrange', lw=2)
plt.plot(self.time, standardized_residuals, '.k')
# plot the autocorrelation function of the residuals and compare with the 95% confidence intervals for white
# noise
plt.subplot(223)
maxlag = min(50, self.time.size-1)
wnoise_upper = 1.96 / np.sqrt(self.time.size)
wnoise_lower = -1.96 / np.sqrt(self.time.size)
plt.fill_between([0, maxlag], wnoise_upper, wnoise_lower, facecolor='grey')
lags, acf, not_needed1, not_needed2 = plt.acorr(standardized_residuals, maxlags=maxlag, lw=2)
plt.xlim(0, maxlag)
plt.xlabel('Time Lag')
plt.ylabel('ACF of Residuals')
# plot the autocorrelation function of the squared residuals and compare with the 95% confidence intervals for
# white noise
plt.subplot(224)
squared_residuals = standardized_residuals ** 2
wnoise_upper = 1.96 / np.sqrt(self.time.size)
wnoise_lower = -1.96 / np.sqrt(self.time.size)
plt.fill_between([0, maxlag], wnoise_upper, wnoise_lower, facecolor='grey')
lags, acf, not_needed1, not_needed2 = plt.acorr(squared_residuals - squared_residuals.mean(), maxlags=maxlag,
lw=2)
plt.xlim(0, maxlag)
plt.xlabel('Time Lag')
plt.ylabel('ACF of Sqrd. Resid.')
plt.tight_layout()
if doShow:
plt.show()
else:
return fig
def predict(self, time, bestfit='map'):
"""
Return the predicted value of the time series and its standard deviation at the input time(s) given the best-fit
value of the CARMA(p,q) model and the measured time series.
:param time: A scalar or numpy array containing the time values to predict the time series at.
:param bestfit: A string specifying how to define 'best-fit'. Can be the Maximum Posterior (MAP), the posterior
mean ("mean"), the posterior median ("median"), or a random sample from the MCMC sampler ("random").
:rtype : A tuple of numpy arrays containing the expected value and variance of the time series at the input
time values.
"""
bestfit = bestfit.lower()
try:
bestfit in ['map', 'median', 'mean', 'random']
except ValueError:
"bestfit must be one of 'map, 'median', 'mean', or 'random'"
# note that KalmanFilter class assumes the time series has zero mean
kfilter, mu = self.makeKalmanFilter(bestfit)
kfilter.Filter()
if np.isscalar(time):
pred = kfilter.Predict(time)
yhat = pred.first
yhat_var = pred.second
else:
yhat = np.empty(time.size)
yhat_var = np.empty(time.size)
for i in xrange(time.size):
pred = kfilter.Predict(time[i])
yhat[i] = pred.first
yhat_var[i] = pred.second
yhat += mu # add mean back into time series
return yhat, yhat_var
def simulate(self, time, bestfit='map'):
"""
Simulate a time series at the input time(s) given the best-fit value of the CARMA(p,q) model and the measured
time series.
:param time: A scalar or numpy array containing the time values to simulate the time series at.
:param bestfit: A string specifying how to define 'best-fit'. Can be the Maximum Posterior (MAP), the posterior
mean ("mean"), the posterior median ("median"), or a random sample from the MCMC sampler ("random").
:rtype : The time series values simulated at the input values of time.
"""
bestfit = bestfit.lower()
try:
bestfit in ['map', 'median', 'mean', 'random']
except ValueError:
"bestfit must be one of 'map, 'median', 'mean', 'random'"
# note that KalmanFilter class assumes the time series has zero mean
kfilter, mu = self.makeKalmanFilter(bestfit)
kfilter.Filter()
vtime = carmcmcLib.vecD()
if np.isscalar(time):
vtime.append(time)
else:
vtime.extend(time)
ysim = np.asarray(kfilter.Simulate(vtime))
ysim += mu # add mean back into time series
return ysim
def DIC(self):
"""
Calculate the Deviance Information Criterion for the model.
The deviance is -2 * log-likelihood, and the DIC is:
DIC = mean(deviance) + 0.5 * variance(deviance)
"""
deviance = -2.0 * self._samples['loglik']
mean_deviance = np.mean(deviance, axis=0)
effect_npar = 0.5 * np.var(deviance, axis=0)
dic = mean_deviance + effect_npar
return dic
def arrayToVec(array, arrType=carmcmcLib.vecD):
"""
Convert the input numpy array to a python wrapper of a C++ std::vector<double> object.
"""
vec = arrType()
vec.extend(array)
return vec
class Car1Sample(CarmaSample):
def __init__(self, time, y, ysig, sampler, filename=None):
"""
Constructor for a CAR(1) sample. This is a special case of the CarmaSample class for p = 1. As with the
CarmaSample class, this class should never be constructed directly. Instead, one should obtain a Car1Sample
class by calling CarmaModel.run_mcmc().
@param time: The array of time values for the time series.
@param y: The array of measured time series values.
@param ysig: The standard deviation in the measurement noise for the time series.
@param sampler: A wrapper for an instantiated C++ Car1 object.
@param filename: The name of an ascii file containing the MCMC samples.
"""
self.time = time # The time values of the time series
self.y = y # The measured values of the time series
self.ysig = ysig # The standard deviation of the measurement errors of the time series
self.p = 1 # How many AR terms
self.q = 0 # How many MA terms
logpost = np.array(sampler.GetLogLikes())
trace = np.array(sampler.getSamples())
super(CarmaSample, self).__init__(filename=filename, logpost=logpost, trace=trace)
print "Calculating sigma..."
self._sigma_noise()
# add the log-likelihoods
print "Calculating log-likelihoods..."
loglik = np.empty(logpost.size)
for i in xrange(logpost.size):
std_theta = carmcmcLib.vecD()
std_theta.extend(trace[i, :])
loglik[i] = logpost[i] - sampler.getLogPrior(std_theta)
self._samples['loglik'] = loglik
# make the parameter names (i.e., the keys) public so the use knows how to get them
self.parameters = self._samples.keys()
self.newaxis()
def generate_from_trace(self, trace):
names = ['sigma', 'measerr_scale', 'mu', 'log_omega']
if names != self._samples.keys():
self._samples['var'] = trace[:, 0]
self._samples['measerr_scale'] = trace[:, 1]
self._samples['mu'] = trace[:, 2]
self._samples['log_omega'] = trace[:, 3]
def _ar_roots(self):
print "_ar_roots not supported for CAR1"
return
def _ar_coefs(self):
print "_ar_coefs not supported for CAR1"
return
def _sigma_noise(self):
self._samples['sigma'] = np.sqrt(2.0 * self._samples['var'] * np.exp(self._samples['log_omega']))
def makeKalmanFilter(self, bestfit):
if bestfit == 'map':
# use maximum a posteriori estimate
max_index = self._samples['logpost'].argmax()
sigsqr = (self._samples['sigma'][max_index] ** 2)[0]
mu = self._samples['mu'][max_index][0]
log_omega = self._samples['log_omega'][max_index][0]
elif bestfit == 'median':
# use posterior median estimate
sigsqr = np.median(self._samples['sigma']) ** 2
mu = np.median(self._samples['mu'])
log_omega = np.median(self._samples['log_omega'])
else:
# use posterior mean as the best-fit
sigsqr = np.mean(self._samples['sigma'] ** 2)
mu = np.mean(self._samples['mu'])
log_omega = np.mean(self._samples['log_omega'])
kfilter = carmcmcLib.KalmanFilter1(arrayToVec(self.time),
arrayToVec(self.y - mu),
arrayToVec(self.ysig),
sigsqr,
np.exp(log_omega))
return kfilter, mu
def plot_power_spectrum(self, percentile=68.0, nsamples=None, plot_log=True, color="b", alpha=0.5, sp=None,
doShow=True):
"""
Plot the posterior median and the credibility interval corresponding to percentile of the CAR(1) PSD. This
function returns a tuple containing the lower and upper PSD credibility intervals as a function of
frequency, the median PSD as a function of frequency, and the frequencies.
:rtype : A tuple of numpy arrays, (lower PSD, upper PSD, median PSD, frequencies). If no subplot axes object
is supplied (i.e., if sp = None), then the subplot axes object used will also be returned as the last
element of the tuple.
:param percentile: The percentile of the PSD credibility interval to plot.
:param nsamples: The number of MCMC samples to use to estimate the credibility interval. The default is all
of them. Use less samples for increased speed.
:param plot_log: A boolean. If true, then a logarithmic plot is made.
:param color: The color of the shaded credibility region.
:param alpha: The transparency level.
:param sp: A matplotlib subplot axes object to use.
:param doShow: If true, call plt.show()
"""
sigmas = self._samples['sigma']
log_omegas = self._samples['log_omega']
if nsamples is None:
# Use all of the MCMC samples
nsamples = sigmas.shape[0]
else:
try:
nsamples <= sigmas.shape[0]
except ValueError:
"nsamples must be less than the total number of MCMC samples."
nsamples0 = sigmas.shape[0]
index = np.arange(nsamples) * (nsamples0 / nsamples)
sigmas = sigmas[index]
log_omegas = log_omegas[index]
nfreq = 1000
dt_min = self.time[1:] - self.time[0:self.time.size - 1]
dt_min = dt_min.min()
dt_max = self.time.max() - self.time.min()
# Only plot frequencies corresponding to time scales a factor of 2 shorter and longer than the minimum and
# maximum time scales probed by the time series.
freq_max = 0.5 / dt_min
freq_min = 1.0 / dt_max
frequencies = np.linspace(np.log(freq_min), np.log(freq_max), num=nfreq)
frequencies = np.exp(frequencies)
psd_credint = np.empty((nfreq, 3))
lower = (100.0 - percentile) / 2.0 # lower and upper intervals for credible region
upper = 100.0 - lower
numer = sigmas ** 2
omegasq = np.exp(log_omegas) ** 2
for i in xrange(nfreq):
denom = omegasq + (2. * np.pi * frequencies[i]) ** 2
psd_samples = numer / denom
# Now compute credibility interval for power spectrum
psd_credint[i, 0] = np.percentile(psd_samples, lower, axis=0)
psd_credint[i, 2] = np.percentile(psd_samples, upper, axis=0)
psd_credint[i, 1] = np.median(psd_samples, axis=0)
# Plot the power spectra
if sp == None:
fig = plt.figure()
sp = fig.add_subplot(111)
if plot_log:
# plot the posterior median first
sp.loglog(frequencies, psd_credint[:, 1], color=color)
else:
sp.plot(frequencies, psd_credint[:, 1], color=color)
sp.fill_between(frequencies, psd_credint[:, 2], psd_credint[:, 0], facecolor=color, alpha=alpha)
sp.set_xlim(frequencies.min(), frequencies.max())
sp.set_xlabel('Frequency')
sp.set_ylabel('Power Spectrum')
if doShow:
plt.show()
if sp == None:
return (psd_credint[:, 0], psd_credint[:, 2], psd_credint[:, 1], frequencies, fig)
else:
return (psd_credint[:, 0], psd_credint[:, 2], psd_credint[:, 1], frequencies)
def get_ar_roots(qpo_width, qpo_centroid):
"""
Return the roots of the characteristic AR(p) polynomial of the CARMA(p,q) process, given the lorentzian widths and
centroids.
:rtype : The roots of the autoregressive polynomial, a numpy array.
:param qpo_width: The widths of the lorentzian functions defining the PSD.
:param qpo_centroid: The centroids of the lorentzian functions defining the PSD. For all values of qpo_centroid
that are greater than zero, the complex conjugate of the root will also be added.
"""
ar_roots = []
for i in xrange(len(qpo_centroid)):
ar_roots.append(qpo_width[i] + 1j * qpo_centroid[i])
if qpo_centroid[i] > 1e-10:
# lorentzian is centered at a frequency > 0, so add complex conjugate of this root
ar_roots.append(np.conjugate(ar_roots[-1]))
if len(qpo_width) - len(qpo_centroid) == 1:
# odd number of lorentzian functions, so add in low-frequency component
ar_roots.append(qpo_width[-1] + 1j * 0.0)
ar_roots = np.array(ar_roots)
return -2.0 * np.pi * ar_roots
def power_spectrum(freq, sigma, ar_coef, ma_coefs=[1.0]):
"""
Return the power spectrum for a CARMA(p,q) process calculated at the input frequencies.
:param freq: The frequencies at which to calculate the PSD.
:param sigma: The standard deviation driving white noise.
:param ar_coef: The CARMA model autoregressive coefficients.
:param ma_coefs: Coefficients of the moving average polynomial
:rtype : The power spectrum at the input frequencies, a numpy array.
"""
try:
len(ma_coefs) <= len(ar_coef)
except ValueError:
"Size of ma_coefs must be less or equal to size of ar_roots."
ma_poly = np.polyval(ma_coefs[::-1], 2.0 * np.pi * 1j * freq) # Evaluate the polynomial in the PSD numerator
ar_poly = np.polyval(ar_coef, 2.0 * np.pi * 1j * freq) # Evaluate the polynomial in the PSD denominator
pspec = sigma ** 2 * np.abs(ma_poly) ** 2 / np.abs(ar_poly) ** 2
return pspec
def carma_variance(sigsqr, ar_roots, ma_coefs=[1.0], lag=0.0):
"""
Return the autocovariance function of a CARMA(p,q) process.
:param sigsqr: The variance in the driving white noise.
:param ar_roots: The roots of the AR characteristic polynomial.
:param ma_coefs: The moving average coefficients.
:param lag: The lag at which to calculate the autocovariance function.
"""
try:
len(ma_coefs) <= len(ar_roots)
except ValueError:
"Size of ma_coefs must be less or equal to size of ar_roots."
if len(ma_coefs) < len(ar_roots):
# add extra zeros to end of ma_coefs
nmore = len(ar_roots) - len(ma_coefs)
ma_coefs = np.append(ma_coefs, np.zeros(nmore))
sigma1_variance = 0.0 + 0j
p = ar_roots.size
for k in xrange(p):
denom_product = 1.0 + 0j
for l in xrange(p):
if l != k:
denom_product *= (ar_roots[l] - ar_roots[k]) * (np.conjugate(ar_roots[l]) + ar_roots[k])
denom = -2.0 * denom_product * ar_roots[k].real
ma_sum1 = 0.0 + 0j
ma_sum2 = 0.0 + 0j
for l in xrange(p):
ma_sum1 += ma_coefs[l] * ar_roots[k] ** l
ma_sum2 += ma_coefs[l] * (-1.0 * ar_roots[k]) ** l
numer = ma_sum1 * ma_sum2 * np.exp(ar_roots[k] * abs(lag))
sigma1_variance += numer / denom
return sigsqr * sigma1_variance.real
def car1_process(time, sigsqr, tau):
"""
Generate a CAR(1) process.
:param time: The time values at which to generate the CAR(1) process at.
:param sigsqr: The variance in the driving white noise term.
:param tau: The e-folding (mean-reversion) time scale of the CAR(1) process. Note that tau = -1.0 / ar_root.
:rtype : A numpy array containing the simulated CAR(1) process values at time.
"""
marginal_var = sigsqr * tau / 2.0
y = np.zeros(len(time))
y[0] = np.sqrt(marginal_var) * np.random.standard_normal()
for i in range(1, len(time)):
dt = time[i] - time[i-1]
rho = np.exp(-dt / tau)
conditional_var = marginal_var * (1.0 - rho ** 2)
y[i] = rho * y[i-1] + np.sqrt(conditional_var) * np.random.standard_normal()
return y
def carma_process(time, sigsqr, ar_roots, ma_coefs=[1.0]):
"""
Generate a CARMA(p,q) process.
:param time: The time values at which to generate the CARMA(p,q) process at.
:param sigsqr: The variance in the driving white noise term.
:param ar_roots: The roots of the autoregressive characteristic polynomial.
:param ma_coefs: The moving average coefficients.
:rtype : A numpy array containing the simulated CARMA(p,q) process values at time.
"""
try:
len(ma_coefs) <= len(ar_roots)
except ValueError:
"Size of ma_coefs must be less or equal to size of ar_roots."
p = len(ar_roots)
if p == 1:
# generate a CAR(1) process
return car1_process(time, sigsqr, -1.0 / np.asscalar(ar_roots))
if len(ma_coefs) < p:
# add extra zeros to end of ma_coefs
q = len(ma_coefs)
ma_coefs = np.resize(np.array(ma_coefs), len(ar_roots))
ma_coefs[q:] = 0.0
time.sort()
# make sure process is stationary
try:
np.any(ar_roots.real < 0)
except ValueError:
"Process is not stationary, real part of roots must be negative."
# make sure the roots are unique
tol = 1e-8
roots_grid = np.meshgrid(ar_roots, ar_roots)
roots_grid1 = roots_grid[0].ravel()
roots_grid2 = roots_grid[1].ravel()
diff_roots = np.abs(roots_grid1 - roots_grid2) / np.abs(roots_grid1 + roots_grid2)
try:
np.any(diff_roots > tol)
except ValueError:
"Roots are not unique."
# Setup the matrix of Eigenvectors for the Kalman Filter transition matrix. This allows us to transform
# quantities into the rotated state basis, which makes the computations for the Kalman filter easier and faster.
EigenMat = np.ones((p, p), dtype=complex)
EigenMat[1, :] = ar_roots
for k in xrange(2, p):
EigenMat[k, :] = ar_roots ** k
# Input vector under the original state space representation
Rvector = np.zeros(p, dtype=complex)
Rvector[-1] = 1.0
# Input vector under rotated state space representation
Jvector = solve(EigenMat, Rvector) # J = inv(E) * R
# Compute the vector of moving average coefficients in the rotated state.
rotated_MA_coefs = ma_coefs.dot(EigenMat)
# Calculate the stationary covariance matrix of the state vector
StateVar = np.empty((p, p), dtype=complex)
for j in xrange(p):
StateVar[:, j] = -sigsqr * Jvector * np.conjugate(Jvector[j]) / (ar_roots + np.conjugate(ar_roots[j]))
# Initialize variance in one-step prediction error and the state vector
PredictionVar = StateVar.copy()
StateVector = np.zeros(p, dtype=complex)
# Convert the current state to matrices for convenience, since we'll be doing some Linear algebra.
StateVector = np.matrix(StateVector).T
StateVar = np.matrix(StateVar)
PredictionVar = np.matrix(PredictionVar)
rotated_MA_coefs = np.matrix(rotated_MA_coefs) # this is a row vector, so no transpose
StateTransition = np.zeros_like(StateVector)
KalmanGain = np.zeros_like(StateVector)
# Initialize the Kalman mean and variance. These are the forecasted values and their variances.
kalman_mean = 0.0
kalman_var = np.real(np.asscalar(rotated_MA_coefs * PredictionVar * rotated_MA_coefs.H))
# simulate the first time series value
y = np.empty_like(time)
y[0] = np.random.normal(kalman_mean, np.sqrt(kalman_var))
# Initialize the innovations, i.e., the KF residuals
innovation = y[0]
for i in xrange(1, time.size):
# First compute the Kalman gain
KalmanGain = PredictionVar * rotated_MA_coefs.H / kalman_var
# update the state vector
StateVector += innovation * KalmanGain
# update the state one-step prediction error variance
PredictionVar -= kalman_var * (KalmanGain * KalmanGain.H)
# predict the next state, do element-wise multiplication
dt = time[i] - time[i - 1]
StateTransition = np.matrix(np.exp(ar_roots * dt)).T
StateVector = np.multiply(StateVector, StateTransition)
# update the predicted state covariance matrix
PredictionVar = np.multiply(StateTransition * StateTransition.H, PredictionVar - StateVar) + StateVar
# now predict the observation and its variance
kalman_mean = np.real(np.asscalar(rotated_MA_coefs * StateVector))
kalman_var = np.real(np.asscalar(rotated_MA_coefs * PredictionVar * rotated_MA_coefs.H))
# simulate the next time series value
y[i] = np.random.normal(kalman_mean, np.sqrt(kalman_var))
# finally, update the innovation
innovation = y[i] - kalman_mean
return y
##################
# Deprecated
class KalmanFilterDeprecated(object):
def __init__(self, time, y, yvar, sigsqr, ar_roots, ma_coefs=[1.0]):
"""
Constructor for Kalman Filter class.
:param time: The time values of the time series.
:param y: The mean-subtracted time series.
:param yvar: The variance in the measurement errors on the time series.
:param sigsqr: The variance of the driving white noise term in the CAR(p) process.
:param ar_roots: The roots of the autoregressive characteristic polynomial.
"""
try:
len(ma_coefs) <= ar_roots.size
except ValueError:
"Order of MA polynomial cannot be larger than order of AR polynomial."
self.time = time
self.y = y
self.yvar = yvar
self.sigsqr = sigsqr
self.ar_roots = ar_roots
self.p = ar_roots.size # order of the CARMA(p,q) process
self.q = len(ma_coefs)
self.ma_coefs = np.append(ma_coefs, np.zeros(self.p - self.q))
def reset(self):
"""
Reset the Kalman Filter to its initial state.
"""
# Setup the matrix of Eigenvectors for the Kalman Filter transition matrix. This allows us to transform
# quantities into the rotated state basis, which makes the computations for the Kalman filter easier and faster.
EigenMat = np.ones((self.p, self.p), dtype=complex)
EigenMat[1, :] = self.ar_roots
for k in xrange(2, self.p):
EigenMat[k, :] = self.ar_roots ** k
# Input vector under the original state space representation
Rvector = np.zeros(self.p, dtype=complex)
Rvector[-1] = 1.0
# Input vector under rotated state space representation
Jvector = solve(EigenMat, Rvector) # J = inv(E) * R
# Compute the vector of moving average coefficients in the rotated state.
rotated_MA_coefs = self.ma_coefs.dot(EigenMat)
# Calculate the stationary covariance matrix of the state vector
StateVar = np.empty((self.p, self.p), dtype=complex)
for j in xrange(self.p):
StateVar[:, j] = -self.sigsqr * Jvector * np.conjugate(Jvector[j]) / \
(self.ar_roots + np.conjugate(self.ar_roots[j]))
# Initialize variance in one-step prediction error and the state vector
PredictionVar = StateVar.copy()
StateVector = np.zeros(self.p, dtype=complex)
# Convert the current state to matrices for convenience, since we'll be doing some Linear algebra.
self._StateVector = np.matrix(StateVector).T
self._StateVar = np.matrix(StateVar)
self._PredictionVar = np.matrix(PredictionVar)
self._rotated_MA_coefs = np.matrix(rotated_MA_coefs) # this is a row vector, so no transpose
self._StateTransition = np.zeros_like(self._StateVector)
self._KalmanGain = np.zeros_like(self._StateVector)
# Initialize the Kalman mean and variance. These are the forecasted values and their variances.
self.kalman_mean = np.empty_like(self.time)
self.kalman_var = np.empty_like(self.time)
self.kalman_mean[0] = 0.0
self.kalman_var[0] = np.real(self._rotated_MA_coefs * self._PredictionVar * self._rotated_MA_coefs.H) \
+ self.yvar[0]
# Initialize the innovations, i.e., the KF residuals
self._innovation = self.y[0]
self._current_index = 1
def update(self):
"""
Perform one iteration (update) of the Kalman Filter.
"""
# First compute the Kalman gain
self._KalmanGain = self._PredictionVar * self._rotated_MA_coefs.H / self.kalman_var[self._current_index - 1]
# update the state vector
self._StateVector += self._innovation * self._KalmanGain
# update the state one-step prediction error variance
self._PredictionVar -= self.kalman_var[self._current_index - 1] * (self._KalmanGain * self._KalmanGain.H)
# predict the next state, do element-wise multiplication
dt = self.time[self._current_index] - self.time[self._current_index - 1]
self._StateTransition = np.matrix(np.exp(self.ar_roots * dt)).T
self._StateVector = np.multiply(self._StateVector, self._StateTransition)
# update the predicted state covariance matrix
self._PredictionVar = np.multiply(self._StateTransition * self._StateTransition.H,
self._PredictionVar - self._StateVar) + self._StateVar
# now predict the observation and its variance
self.kalman_mean[self._current_index] = np.real(np.asscalar(self._rotated_MA_coefs * self._StateVector))
self.kalman_var[self._current_index] = \
np.real(np.asscalar(self._rotated_MA_coefs * self._PredictionVar * self._rotated_MA_coefs.H))
self.kalman_var[self._current_index] += self.yvar[self._current_index]
# finally, update the innovation
self._innovation = self.y[self._current_index] - self.kalman_mean[self._current_index]
self._current_index += 1
def filter(self):
"""
Perform the Kalman Filter on all points of the time series. The kalman mean and variance are returned upon
completion, and are stored in the instantiated KalmanFilter object.
"""
self.reset()
for i in xrange(self.time.size - 1):
self.update()
return self.kalman_mean, self.kalman_var
def predict(self, time_predict):
"""
Return the predicted value of a time series and its standard deviation at the input time given the input
values of the CARMA(p,q) model parameters and a measured time series.
:rtype : A tuple containing the predicted value and its variance.
:param time_predict: The time at which to predict the time series.
"""
try:
self.time.min() > time_predict
except ValueError:
"backcasting currently not supported: time_predict must be greater than self.time.min()"
self.reset()
# find the index where time[ipredict-1] < time_predict < time[ipredict]
ipredict = np.max(np.where(self.time < time_predict)) + 1
for i in xrange(ipredict - 1):
# run the kalman filter for time < time_predict
self.update()
# predict the value of y[time_predict]
self._KalmanGain = self._PredictionVar * self._rotated_MA_coefs.H / self.kalman_var[ipredict - 1]
self._StateVector += self._innovation * self._KalmanGain
self._PredictionVar -= self.kalman_var[ipredict - 1] * (self._KalmanGain * self._KalmanGain.H)
dt = time_predict - self.time[ipredict - 1]
self._StateTransition = np.matrix(np.exp(self.ar_roots * dt)).T
self._StateVector = np.multiply(self._StateVector, self._StateTransition)
self._PredictionVar = np.multiply(self._StateTransition * self._StateTransition.H,
self._PredictionVar - self._StateVar) + self._StateVar
ypredict_mean = np.asscalar(np.real(self._rotated_MA_coefs * self._StateVector))
ypredict_var = np.asscalar(np.real(self._rotated_MA_coefs * self._PredictionVar * self._rotated_MA_coefs.H))
# start the running statistics for the conditional mean and precision of the predicted time series value, given
# the measured time series
cprecision = 1.0 / ypredict_var
cmean = cprecision * ypredict_mean
if ipredict >= self.time.size:
# we are forecasting (extrapolating) the value, so no need to run interpolation steps below
return ypredict_mean, ypredict_var
# for time > time_predict we need to compute the coefficients for the linear filter, i.e., at time[j]:
# E(y[j]|{y[i]; j<i}) = alpha[j] + beta[j] * ypredict. we do this using recursions similar to the kalman
# filter.
# first set the initial values.
self._KalmanGain = self._PredictionVar * self._rotated_MA_coefs.H / ypredict_var
# initialize the coefficients for predicting the state vector at coefs(time_predict|time_predict)
const_state = self._StateVector - self._KalmanGain * ypredict_mean
slope_state = self._KalmanGain
# update the state one-step prediction error variance
self._PredictionVar -= ypredict_var * (self._KalmanGain * self._KalmanGain.H)
# do coefs(time_predict|time_predict) --> coefs(time[i+1]|time_predict)
dt = self.time[ipredict] - time_predict
self._StateTransition = np.matrix(np.exp(self.ar_roots * dt)).T
const_state = np.multiply(const_state, self._StateTransition)
slope_state = np.multiply(slope_state, self._StateTransition)
# update the predicted state covariance matrix
self._PredictionVar = np.multiply(self._StateTransition * self._StateTransition.H,
self._PredictionVar - self._StateVar) + self._StateVar
# compute the coefficients for the linear filter at time[ipredict], and compute the variance in the predicted
# y[ipredict]
const = np.asscalar(np.real(self._rotated_MA_coefs * const_state))
slope = np.asscalar(np.real(self._rotated_MA_coefs * slope_state))
self.kalman_var[ipredict] = \
np.real(self._rotated_MA_coefs * self._PredictionVar * self._rotated_MA_coefs.H) + \
self.yvar[ipredict]
# update the running conditional mean and variance of the predicted time series value
cprecision += slope ** 2 / self.kalman_var[ipredict]
cmean += slope * (self.y[ipredict] - const) / self.kalman_var[ipredict]
self.const = np.zeros(self.time.size)
self.slope = np.zeros(self.time.size)
self.const[ipredict] = const
self.slope[ipredict] = slope
# now repeat for time > time_predict
for i in xrange(ipredict + 1, self.time.size):
self._KalmanGain = self._PredictionVar * self._rotated_MA_coefs.H / self.kalman_var[i - 1]
# update the state prediction coefficients: coefs(i|i-1) --> coefs(i|i)
const_state += self._KalmanGain * (self.y[i - 1] - const)
slope_state -= self._KalmanGain * slope
# update the state one-step prediction error variance
self._PredictionVar -= self.kalman_var[i - 1] * (self._KalmanGain * self._KalmanGain.H)
# compute the one-step state prediction coefficients: coefs(i|i) --> coefs(i+1|i)
dt = self.time[i] - self.time[i - 1]
self._StateTransition = np.matrix(np.exp(self.ar_roots * dt)).T
const_state = np.multiply(const_state, self._StateTransition)
slope_state = np.multiply(slope_state, self._StateTransition)
# compute the state one-step prediction error variance
self._PredictionVar = np.multiply(self._StateTransition * self._StateTransition.H,
self._PredictionVar - self._StateVar) + self._StateVar
# compute the coefficients for predicting y[i]|y[j],j<i as a function of ypredict
const = np.asscalar(np.real(self._rotated_MA_coefs * const_state))
slope = np.asscalar(np.real(self._rotated_MA_coefs * slope_state))
# compute the variance in predicting y[i]|y[j],j<i
self.kalman_var[i] = \
np.real(self._rotated_MA_coefs * self._PredictionVar * self._rotated_MA_coefs.H) + \
self.yvar[i]
# finally, update the running conditional mean and variance of the predicted time series value
cprecision += slope ** 2 / self.kalman_var[i]
cmean += slope * (self.y[i] - const) / self.kalman_var[i]
self.const[i] = const
self.slope[i] = slope
cvar = 1.0 / cprecision
cmean *= cvar
return cmean, cvar
def simulate(self, time_simulate):
"""
Simulate a time series at the input time values of time_simulate, given the measured time series and input
CARMA(p,q) parameters.
:rtype : A scalar or numpy array, depending on type of time_simulate.
:param time_simulate: The time(s) at which to simulate a random draw of the time series conditional on the
measured time series and the input parameters.
"""
if np.isscalar(time_simulate):
cmean, cvar = self.predict(time_simulate)
ysimulated = np.random.normal(cmean, np.sqrt(cvar))
return ysimulated
else:
# input is array-like, need to simulate values sequentially, adding each value to the measured time series
# as they are simulated
time0 = self.time # save original values
y0 = self.y
yvar0 = self.yvar
ysimulated = np.empty(time_simulate.size)
time_simulate.sort()
for i in xrange(time_simulate.size):
cmean, cvar = self.predict(time_simulate[i])
ysimulated[i] = np.random.normal(cmean, np.sqrt(cvar)) # simulate the time series value
# find the index where time[isimulate-1] < time_simulate < time[isimulate]
isimulate = np.max(np.where(self.time < time_simulate[i])) + 1
# insert the simulated value into the time series array
self.time = np.insert(self.time, isimulate, time_simulate[i])
self.y = np.insert(self.y, isimulate, ysimulated[i])
self.yvar = np.insert(self.yvar, isimulate, 0.0)
# reset measured time series to original values
self.y = y0
self.time = time0
self.yvar = yvar0
return ysimulated
| {
"repo_name": "farr/carma_pack",
"path": "src/carmcmc/carma_pack.py",
"copies": "1",
"size": "68810",
"license": "mit",
"hash": -131876566897171710,
"line_mean": 43.5949449125,
"line_max": 120,
"alpha_frac": 0.5952768493,
"autogenerated": false,
"ratio": 3.6120734908136485,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47073503401136485,
"avg_score": null,
"num_lines": null
} |
# MIT License
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__version__ = '0.1.0'
__author__ = 'Brandon Hamilton <brandon.hamilton@gmail.com>'
__copyright__ = "Copyright (c) 2015 Brandon Hamilton"
__license__ = "MIT"
import os
import requests
BASE_URI = 'https://updown.io/api/checks'
API_KEY = 'UPDOWN_API_KEY' in os.environ and os.environ['UPDOWN_API_KEY'] or None
def _performRequest(method, uri='', data=None):
kwargs = { 'headers': { 'X-API-KEY' : API_KEY } }
if data:
kwargs['data'] = data
r = requests.request(method, BASE_URI + uri, **kwargs)
r.raise_for_status()
return r.json()
def checks():
r = _performRequest('GET')
return { c['url'] : Check._fromObject(c) for c in r }
def add(url, period=60, apdex_t=0.25, enabled=True, published=False):
c = Check(url, period, apdex_t, enabled, published)
c.sync()
return c
class Check:
def __init__(self, url, period=60, apdex_t=0.25, enabled=True, published=False):
self.token = None
self.url = url
self.period = period
self.apdex_t = apdex_t
self.enabled = enabled
self.published = published
@staticmethod
def _fromObject(obj):
c = Check(obj['url'])
for attr, value in obj.iteritems():
setattr(c, attr, value)
return c
def _toObject(self):
return { attr: getattr(self, attr) for attr in dir(self) if not callable(getattr(self,attr)) and not attr.startswith("__") }
def sync(self):
if self.token:
r = _performRequest('PUT', uri='/' + self.token, data=self._toObject())
for attr, value in r.iteritems():
setattr(self, attr, value)
else:
r = _performRequest('POST', data=self._toObject())
for attr, value in r.iteritems():
setattr(self, attr, value)
def delete(self):
if self.token:
r = _performRequest('DELETE', uri='/' + self.token)
return r['deleted']
else:
return True
def downtimes(self, page=1):
r = _performRequest('GET', '/' + self.token + '/downtimes', data={'page':page})
return r
def __repr__(self):
return repr(self._toObject())
def __str__(self):
return str(self._toObject())
| {
"repo_name": "brandonhamilton/updown-python",
"path": "updown/__init__.py",
"copies": "1",
"size": "3415",
"license": "mit",
"hash": -4784523748264064000,
"line_mean": 33.8469387755,
"line_max": 132,
"alpha_frac": 0.6442166911,
"autogenerated": false,
"ratio": 3.675995694294941,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48202123853949413,
"avg_score": null,
"num_lines": null
} |
__author__ = 'brandonkelly'
from distutils.core import setup, Extension
import numpy.distutils.misc_util
import os
import platform
system_name= platform.system()
#desc = open("README.rst").read()
extension_version = "0.1.0"
extension_url = "https://github.com/bckelly80/big_data_combine"
BOOST_DIR = os.environ["BOOST_DIR"]
ARMADILLO_DIR = os.environ["ARMADILLO_DIR"]
NUMPY_DIR = os.environ["NUMPY_DIR"]
include_dirs = [NUMPY_DIR + "/include", BOOST_DIR + "/include", ARMADILLO_DIR + "/include",
"/usr/include/", "include"]
# needed to add "include" in order to build
for include_dir in numpy.distutils.misc_util.get_numpy_include_dirs():
include_dirs.append(include_dir)
library_dirs = [NUMPY_DIR + "/lib", BOOST_DIR + "/lib", ARMADILLO_DIR + "/lib", "/usr/lib/"]
if system_name != 'Darwin':
# /usr/lib64 does not exist under Mac OS X
library_dirs.append("/usr/lib64")
compiler_args = ["-O3"]
if system_name == 'Darwin':
compiler_args.append("-std=c++11")
# need to build against libc++ for Mac OS X
compiler_args.append("-stdlib=libc++")
else:
compiler_args.append("-std=c++0x")
def configuration(parent_package='', top_path=None):
# http://docs.scipy.org/doc/numpy/reference/distutils.html#numpy.distutils.misc_util.Configuration
from numpy.distutils.misc_util import Configuration
config = Configuration("hmlinmae_gibbs", parent_package, top_path)
config.version = extension_version
config.add_data_dir((".", "python/hmlin_mae"))
config.add_library("hmlinmae_gibbs", sources=["linmae_parameters.cpp", "MaeGibbs.cpp"],
include_dirs=include_dirs, library_dirs=library_dirs,
libraries=["boost_python", "boost_filesystem", "boost_system", "armadillo", "yamcmcpp"],
extra_compiler_args=compiler_args)
config.add_extension("lib_hmlinmae", sources=["boost_python_wrapper.cpp", "MaeGibbs.cpp"], include_dirs=include_dirs,
library_dirs=library_dirs,
libraries=["boost_python", "boost_filesystem", "boost_system", "armadillo", "yamcmcpp",
"hmlinmae_gibbs"], extra_compile_args=compiler_args)
#config.add_data_dir(("../../../../include"))
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| {
"repo_name": "brandonckelly/BDC",
"path": "HMLinMAE/python/hmlin_mae/setup.py",
"copies": "2",
"size": "2409",
"license": "mit",
"hash": -7518694493696822000,
"line_mean": 42.0178571429,
"line_max": 121,
"alpha_frac": 0.6508924865,
"autogenerated": false,
"ratio": 3.3181818181818183,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49690743046818187,
"avg_score": null,
"num_lines": null
} |
__author__ = 'brandonkelly'
import numpy as np
from numba import jit
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
import time
@jit # if you don't have number, then comment out this line but this routine will be slow!
def dynamic_time_warping(tseries1, tseries2):
"""
Compute the dynamic time warping (DTW) distance between two time series. It is assumed that the time series are
evenly sampled, but they can have different lengths. Numba is used to speed up the computation, so you must have
Numba installed. Note that the time series can be multivariate.
:param tseries1: The first time series, a 1-D or 2-D numpy array.
:param tseries2: The second time series, a 1-D or 2-D numpy array.
:return: A tuple containing the DTW distance, the DTW matrix, and the path matrix taken by the algorithm.
"""
ntime1, nfeatures = tseries1.shape
ntime2 = tseries2.shape[0]
dtw = np.zeros((ntime1, ntime2), dtype=np.float) # matrix of coordinate distances
path = np.zeros((ntime1, ntime2), dtype=np.int) # path of algorithm
# initialize the first row and column
for k in range(nfeatures):
dtw[0, 0] += (tseries1[0, k] - tseries2[0, k]) ** 2
path[0, 0] = -1
for i in range(1, ntime1):
dist = 0.0
for k in range(nfeatures):
dist += (tseries1[i, k] - tseries2[0, k]) ** 2
dtw[i, 0] = dtw[i-1, 0] + dist
path[i, 0] = 2
for j in range(1, ntime2):
dist = 0.0
for k in range(nfeatures):
dist += (tseries1[0, k] - tseries2[j, k]) ** 2
dtw[0, j] = dtw[0, j-1] + dist
path[0, j] = 1
# main loop of the DTW algorithm
for i in range(1, len(tseries1)):
for j in range(1, len(tseries2)):
a = dtw[i-1, j-1]
b = dtw[i, j-1]
c = dtw[i-1, j]
if a < b:
if a < c:
idx = 0 # a is the minimum
delta = a
else:
idx = 2 # c is the minimum
delta = c
else:
if b < c:
idx = 1 # b is the minimum
delta = b
else:
idx = 2 # c is the minimum
delta = c
# neighbors = np.array([dtw[i-1, j-1], dtw[i, j-1], dtw[i-1, j]])
# idx = np.argmin(neighbors)
# delta = neighbors[idx]
dist = 0.0
for k in range(nfeatures):
dist += (tseries1[i, k] - tseries2[j, k]) ** 2
dtw[i, j] = dist + delta
path[i, j] = idx
return dtw[-1, -1], dtw, path
class DBA(object):
def __init__(self, max_iter, tol=1e-4, verbose=False):
"""
Constructor for the DBA class. This class computes the dynamic time warping (DTW) barycenter averaging (DBA)
strategy for averaging a set of time series. The method is described in
"A global averaging method for dynamic time warping, with applications to clustering." Petitjean, F.,
Ketterlin, A., & Gancarski, P. 2011, Pattern Recognition, 44, 678-693.
:param max_iter: The maximum number of iterations for the DBA algorithm.
:param tol: The tolerance level for the algorithm. The algorithm terminates once the fractional difference in
the within-group sum of squares between successive iterations is less than tol. The algorithm will also
terminate if the maximum number of iterations is exceeded, or if the sum of squares increases.
:param verbose: If true, then provide helpful output.
"""
self.max_iter = max_iter
self.tol = tol
self.average = np.zeros(1)
self.wgss = 0.0 # the within-group sum of squares, called the inertia in the clustering literature
self.verbose = verbose
def compute_average(self, tseries, nstarts=1, initial_value=None, dba_length=None):
"""
Perform the DBA algorithm to compute the average for a set of time series. The algorithm is a local optimization
strategy and thus depends on the initial guess for the average. Improved results can be obtained by using
multiple random initial starts.
:param tseries: The list of time series, a list of numpy arrays. Can be multivariate time series.
:param nstarts: The number of random starts to use for the DBA algorithm. The average time series that minimizes
the within-group sum of squares over the random starts is returned and saved.
:param initial_value: The initial value for the DBA algorithm, a numpy array. If None, then the initial values
will be drawn randomly from the set of input time series (recommended). Note that is an initial guess is
supplied, then the nstarts parameter is ignored.
:param dba_length: The length of the DBA average time series. If None, this will be set to the length of the
initial_value array. Otherwise, the initial value array will be linearly interpolated to this length.
:return: The estimated average of the time series, defined to minimize the within-group sum of squares of the
input set of time series.
"""
if initial_value is not None:
nstarts = 1
if initial_value is None:
# initialize the average as a random draw from the set of inputs
start_idx = np.random.permutation(len(tseries))[:nstarts]
best_wgss = 1e300
if self.verbose:
print 'Doing initialization iteration:'
for i in range(nstarts):
print i, '...'
if initial_value is None:
iseries = tseries[start_idx[i]]
else:
iseries = initial_value
if dba_length is not None:
# linearly interpolate initial average value to the requested length
iseries0 = np.atleast_2d(iseries)
if iseries0.shape[0] == 1:
iseries0 = iseries0.T # vector, so transpose to shape (ntime, 1)
nfeatures = iseries0.shape[1]
iseries = np.zeros((dba_length, nfeatures))
for k in range(nfeatures):
lininterp = interp1d(np.arange(iseries0.shape[0]), iseries0[:, k])
iseries[:, k] = lininterp(np.linspace(0.0, iseries0.shape[0]-1.01, num=dba_length))
self._run_dba(tseries, iseries)
if self.wgss < best_wgss:
# found better average, save it
if self.verbose:
print 'New best estimate found for random start', i
best_wgss = self.wgss
best_average = self.average
self.wgss = best_wgss
self.average = best_average
return best_average
def associate_segments(self, tseries):
"""
Identify the indices of the inputs time series that are associated with each element of the average time series.
:param tseries: The times series for which the indices associated with the average are desired. A numpy array.
:return: A list-of-lists containing the indices of the input time series that are associated with the elements
of the DBA average. Call this assoc_table. Then assoc_table[i] will return a list of the indices of the
input time series that are associated with the element i of the DBA average (i.e., self.average[i]).
"""
dtw_dist, dtw, path = dynamic_time_warping(self.average, tseries)
# table telling us which elements of the time series are identified with a specific element of the DBA average
assoc_table = []
for i in range(self.average.shape[0]):
assoc_table.append([])
i = self.average.shape[0] - 1
j = tseries.shape[0] - 1
while i >= 0 and j >= 0:
assoc_table[i].append(j)
if path[i, j] == 0:
i -= 1
j -= 1
elif path[i, j] == 1:
j -= 1
elif path[i, j] == 2:
i -= 1
else:
# should not happen, but just in case make sure we bail once path[i, j] = -1
break
return assoc_table
def _run_dba(self, tseries, initial_value):
""" Perform the DBA algorithm. """
nseries = len(tseries)
self.average = initial_value
# first iteration: get initial within-group sum of squares
if self.verbose:
print 'Doing iteration'
print ' ', '0', '...'
wgss = self._dba_iteration(tseries)
# main DBA loop
for i in range(1, self.max_iter):
if self.verbose:
print ' ', i, '...', 'WGSS:', wgss
wgss_old = wgss
# WGSS is actually from previous iteration, but don't compute again because it is expensive
wgss = self._dba_iteration(tseries)
if wgss > wgss_old:
# sum of squares should be non-increasing
print 'Warning! Within-group sum of squares increased at iteration', i, 'terminating algorithm.'
break
elif np.abs(wgss - wgss_old) / wgss_old < self.tol:
# convergence
break
# compute final within-group sum of squares
wgss = 0.0
for k in range(nseries):
wgss += dynamic_time_warping(tseries[k], self.average)[0]
self.wgss = wgss
def _dba_iteration(self, tseries):
""" Perform a single iteration of the DBA algorithm. """
ntime = self.average.shape[0]
# table telling us which elements of the time series are identified with a specific element of the DBA average
assoc_table = []
for i in range(ntime):
assoc_table.append([])
wgss = 0.0 # within group sum of squares from previous iteration, compute here so we don't have to repeat
for series in tseries:
if self.average.shape[1] == 1:
series = series[:, np.newaxis]
dtw_dist, dtw, path = dynamic_time_warping(self.average, series)
wgss += dtw_dist
i = ntime - 1
j = series.shape[0] - 1
while i >= 0 and j >= 0:
assoc_table[i].append(series[j])
if path[i, j] == 0:
i -= 1
j -= 1
elif path[i, j] == 1:
j -= 1
elif path[i, j] == 2:
i -= 1
else:
# should not happen, but just in case make sure we bail once path[i, j] = -1
break
# update the average
for i, cell in enumerate(assoc_table):
cell_array = np.array(cell)
self.average[i] = cell_array.mean(axis=0)
return wgss
if __name__ == "__main__":
# run on some test data
nseries = 40
ntime0 = 1000
phase1 = 0.1 + 0.2 * np.random.uniform(0.0, 1.0, nseries) - 0.1
period1 = np.pi / 4.0 + np.pi / 100.0 * np.random.standard_normal(nseries)
phase2 = np.pi / 2 + 0.2 * np.random.uniform(0.0, 1.0, nseries) - 0.1
period2 = np.pi / 2.0 + np.pi / 100.0 * np.random.standard_normal(nseries)
noise_amplitude = 0.0
t_list = []
ts_list = []
for i in range(nseries):
ntime = np.random.random_integers(ntime0 * 0.9, ntime0 * 1.1)
t = np.linspace(0.0, 10.0, ntime)
t_list.append(t)
tseries = np.zeros((ntime, 2))
tseries[:, 0] = np.sin(t / period1[i] + phase1[i]) + noise_amplitude * np.random.standard_normal(ntime)
tseries[:, 1] = np.sin(t / period2[i] + phase2[i]) + noise_amplitude * np.random.standard_normal(ntime)
ts_list.append(tseries)
niter = 30
dba = DBA(niter, verbose=True, tol=1e-4)
t1 = time.clock()
dba_avg = dba.compute_average(ts_list, nstarts=5, dba_length=10)
t2 = time.clock()
print 'DBA algorithm took', t2 - t1, 'seconds.'
plt.subplot(221)
for i in range(nseries):
plt.plot(t_list[i], ts_list[i][:, 0], '.', ms=2)
t = np.linspace(0.0, 10.0, len(dba_avg))
plt.plot(t, dba_avg[:, 0], 'ko')
plt.subplot(222)
for i in range(nseries):
plt.plot(t_list[i], ts_list[i][:, 1], '.', ms=2)
t = np.linspace(0.0, 10.0, len(dba_avg))
plt.plot(t, dba_avg[:, 1], 'ko')
plt.subplot(223)
for ts in ts_list:
plt.plot(ts[:, 0], ts[:, 1], '.', ms=2)
plt.plot(dba_avg[:, 0], dba_avg[:, 1], 'ko')
plt.show()
plt.close()
# find the segments of the first time series identified with each element of the average
assoc = dba.associate_segments(ts_list[0])
plt.subplot(221)
t = t_list[0]
ts = ts_list[0]
for i, a in enumerate(assoc):
plt.plot(t[a], ts[a, 0], '.', label=str(i))
plt.plot(np.median(t[a]), dba_avg[i, 0], 'ko')
plt.subplot(222)
for i, a in enumerate(assoc):
plt.plot(t[a], ts[a, 1], '.', label=str(i))
plt.plot(np.median(t[a]), dba_avg[i, 1], 'ko')
plt.subplot(223)
for i, a in enumerate(assoc):
plt.plot(ts[a, 0], ts[a, 1], '.', label=str(i))
plt.plot(dba_avg[i, 0], dba_avg[i, 1], 'ko')
plt.show() | {
"repo_name": "brandonckelly/bck_stats",
"path": "bck_stats/dba.py",
"copies": "1",
"size": "13381",
"license": "mit",
"hash": -720592700124811400,
"line_mean": 40.0490797546,
"line_max": 120,
"alpha_frac": 0.5677453105,
"autogenerated": false,
"ratio": 3.6312075983717773,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9691616299656587,
"avg_score": 0.0014673218430382086,
"num_lines": 326
} |
__author__ = 'brandonkelly'
import numpy as np
from sklearn.isotonic import IsotonicRegression
class REACT(object):
def __init__(self, basis='DCT', n_components=None, method='monotone'):
try:
basis.lower() in ['dct', 'manual']
except ValueError:
'Input basis must be either DCT or manual.'
try:
method.lower() in ['monotone', 'nss']
except ValueError:
'method must be either monotone or nss.'
self.basis = basis
self.nfreq = 1
self.ncomp = 1
self.n_components = n_components
self.method = method
self.coefs = np.zeros(1)
self.shrinkage_factors = np.zeros(1)
def fit(self, y, X=None, sigsqr=None):
# check inputs
if X is None:
# build the discrete cosine basis
if self.n_components is None:
n_components = len(y)
else:
n_components = self.n_components
X = self.build_dct(len(y), n_components)
self.nfreq = len(y)
self.ncomp = n_components
else:
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
self.ncomp = n_components
try:
n_components <= len(y)
except ValueError:
'Number of components must be less than the length of y.'
self.coefs = np.dot(X.T, y)
if sigsqr is None:
# estimate noise variance using first difference estimator
sigsqr = np.sum((y[1:] - y[:-1]) ** 2) / (2.0 * (len(y) - 1))
if self.method == 'monotone':
# use monotone shrinkage on the basis coefficients
self._set_shrinkage_factors(sigsqr)
else:
# use nested subset selection to choose the order of the basis expansion
self._set_nss_order(sigsqr)
self.coefs *= self.shrinkage_factors
ysmooth = X.dot(self.coefs)
return ysmooth
@staticmethod
def build_dct(n, p):
rows, columns = np.mgrid[:n, :p]
U = np.cos(np.pi * rows * columns / (n - 1.0))
row_norm = 2 * np.ones(n)
row_norm[0] = 1.0
row_norm[-1] = 1.0
col_norm = 2 * np.ones(p)
col_norm[0] = 1.0
if p == n:
col_norm[-1] = 1.0
U *= 0.5 * np.sqrt(2.0 * np.outer(row_norm, col_norm) / (n - 1))
return U
def interpolate(self, x_idx):
try:
self.method.lower() == 'dct'
except AttributeError:
'Interpolation only available for DCT basis.'
n = self.nfreq
p = self.ncomp
cols = np.arange(p)
row_norm = 2 * np.ones(n)
row_norm[0] = 1.0
row_norm[-1] = 1.0
col_norm = 2 * np.ones(p)
col_norm[0] = 1.0
U = np.cos(np.pi * np.outer(x_idx / n, cols))
U *= 0.5 * np.sqrt(2.0 * np.outer(row_norm, col_norm) / (n - 1))
y_interp = U.dot(self.coefs)
return y_interp
def _set_shrinkage_factors(self, sigsqr):
coefs_snr = (self.coefs ** 2 - sigsqr) / self.coefs ** 2 # signal-to-noise ratio of the coefficients
coefs_snr[coefs_snr < 0] = 0.0
x = np.arange(len(coefs_snr))
weights = self.coefs ** 2
self.shrinkage_factors = \
IsotonicRegression(y_min=0.0, y_max=1.0, increasing=False).fit_transform(x, coefs_snr, weights)
def _set_nss_order(self, sigsqr):
coefs_snr = (self.coefs ** 2 - sigsqr) / self.coefs ** 2 # signal-to-noise ratio of the coefficients
coefs_snr[coefs_snr < 0] = 0.0
risk = np.empty(len(coefs_snr))
shrinkage_factor = np.zeros(len(coefs_snr))
for j in xrange(len(risk)):
shrinkage_factor[:j+1] = 1.0
risk[j] = np.mean((shrinkage_factor - coefs_snr) ** 2 * self.coefs ** 2)
best_order = risk.argmin()
self.shrinkage_factors = np.ones(len(coefs_snr))
self.shrinkage_factors[best_order:] = 0.0 # only keep first best_order basis coefficients
class REACT2D(REACT):
def __init__(self, max_order=None, method='monotone'):
# currently only support the DCT for 2-D data
super(REACT2D, self).__init__('DCT', max_order, method)
self.row_order = np.zeros(1)
self.col_order = np.zeros(1)
def interpolate(self, x_idx):
if True:
print 'Interpolation not currently available for REACT2D'
else:
super(REACT2D, self).interpolate(x_idx)
@staticmethod
def build_dct(nrows, ncols, p):
# first build 1-D basis vectors
Urows = super(REACT2D, REACT2D).build_dct(nrows, p)
Ucols = super(REACT2D, REACT2D).build_dct(ncols, p)
# now build 2-d basis as outer products of 1-d basis vectors
row_order, col_order = np.mgrid[:p, :p]
row_order = row_order.ravel() + 1
col_order = col_order.ravel() + 1
# sort the basis images by the sum of squares of their orders
sqr_order = row_order ** 2 + col_order ** 2
s_idx = np.argsort(sqr_order)
row_order = row_order[s_idx]
col_order = col_order[s_idx]
U = np.empty((nrows * ncols, len(row_order)))
for j in xrange(len(row_order)):
U[:, j] = np.outer(Urows[:, row_order[j]-1], Ucols[:, col_order[j]-1]).ravel()
return U
def fit(self, y, sigsqr):
# build the discrete cosine basis
if self.n_components is None:
components_from_y = True
self.n_components = min(y.shape)
else:
components_from_y = False
try:
self.n_components <= min(y.shape)
except ValueError:
'Number of components must be less than the length of y.'
# build the 2-D DCT here and then feed into REACT.fit()
X = self.build_dct(y.shape[0], y.shape[1], self.n_components)
ysmooth = super(REACT2D, self).fit(y.ravel(), X, sigsqr)
# save the orders of the basis functions
row_order, col_order = np.mgrid[:self.n_components, :self.n_components]
row_order = row_order.ravel() + 1
col_order = col_order.ravel() + 1
# sort the basis images by the sum of squares of their orders
sqr_order = row_order ** 2 + col_order ** 2
s_idx = np.argsort(sqr_order)
self.row_order = row_order[s_idx]
self.col_order = col_order[s_idx]
if components_from_y:
# return n_components to value from constructor
self.n_components = None
return np.reshape(ysmooth, y.shape)
| {
"repo_name": "brandonckelly/bck_stats",
"path": "bck_stats/react.py",
"copies": "1",
"size": "6675",
"license": "mit",
"hash": -5587184589497234000,
"line_mean": 34.1315789474,
"line_max": 109,
"alpha_frac": 0.5546067416,
"autogenerated": false,
"ratio": 3.3814589665653494,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44360657081653493,
"avg_score": null,
"num_lines": null
} |
__author__ = 'brandonkelly'
import numpy as np
import abc
from sklearn.linear_model import LogisticRegression
from sklearn.grid_search import GridSearchCV, ParameterGrid
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, GradientBoostingRegressor, \
RandomForestRegressor
from sklearn.svm import SVC, LinearSVC
from sklearn.metrics import accuracy_score, make_scorer, mean_absolute_error, mean_squared_error
from sklearn.cross_validation import KFold
from sklearn.base import clone
float_types = (float, np.float, np.float32, np.float64, np.float_, np.float128, np.float16)
int_types = (int, np.int, np.int8, np.int16, np.int32, np.int64)
class GbcAutoNtrees(GradientBoostingClassifier):
"""
Same as GradientBoostingClassifier, but the number of estimators is chosen automatically by maximizing the
out-of-bag score.
"""
def __init__(self, subsample, loss='deviance', learning_rate=0.01, n_estimators=500, min_samples_split=2,
min_samples_leaf=1, max_depth=3, init=None, random_state=None, max_features=None, verbose=0):
super(GbcAutoNtrees, self).__init__(loss, learning_rate, n_estimators, subsample, min_samples_split,
min_samples_leaf, max_depth, init, random_state, max_features, verbose)
def fit(self, X, y):
super(GbcAutoNtrees, self).fit(X, y)
oob_score = np.cumsum(self.oob_improvement_)
ntrees = oob_score.argmax() + 1
if self.verbose:
print 'Chose', ntrees, 'based on the OOB score.'
self.n_estimators = ntrees
self.estimators_ = self.estimators_[:ntrees]
# plt.plot(oob_score)
# plt.show()
return self
class GbrAutoNtrees(GradientBoostingRegressor):
"""
Same as GradientBoostingRegressor, but the number of estimators is chosen automatically by maximizing the
out-of-bag score.
"""
def __init__(self, subsample, loss='ls', learning_rate=0.1, n_estimators=100, min_samples_split=2,
min_samples_leaf=1, max_depth=3, init=None, random_state=None, max_features=None, alpha=0.9,
verbose=0):
super(GbrAutoNtrees, self).__init__(loss, learning_rate, n_estimators, subsample, min_samples_split,
min_samples_leaf, max_depth, init, random_state, max_features, alpha,
verbose)
def fit(self, X, y):
super(GbrAutoNtrees, self).fit(X, y)
oob_score = np.cumsum(self.oob_improvement_)
ntrees = oob_score.argmax() + 1
if self.verbose:
print 'Chose', ntrees, 'based on the OOB score.'
self.n_estimators = ntrees
self.estimators_ = self.estimators_[:ntrees]
# plt.plot(oob_score)
# plt.show()
return self
class BasePredictorSuite(object):
""" Base class for running a suite of estimators from scikit-learn. """
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __init__(self, tuning_ranges=None, models=None, cv=None, njobs=1, pre_dispatch='2*n_jobs', stack=True,
verbose=False):
"""
Initialize a pipeline to run a suite of scikit-learn estimators. The tuning parameters are chosen through
cross-validation or the out-of-bags score (for Random Forests) as part of the fitting process.
:param tuning_ranges: A nested dictionary containing the ranges of the tuning parameters. It should be of the
format {model name 1: {parameter name 1: list(value range 1), parameter name 2: list(value range 2), ...} }.
:param models: A list of instantiated scikit-learn estimator classes to fit. If None, these are taken from
the models listed in tuning_range.
:param cv: The number of CV folds to use, or a CV generator.
:param njobs: The number of processes to run in parallel.
:param pre_dispatch: Passed to sklearn.grid_search.GridSearchCV, see documentation for GridSearchCV for further
details.
:param stack: If true, then the predict() method will return a stacked (averaged) value over the estimators.
Otherwise, if false, then predict() will return the predictions for each estimator.
:param verbose: If true, print out helpful information.
"""
super(BasePredictorSuite, self).__init__()
self.verbose = verbose
if tuning_ranges is None:
tuning_ranges = dict()
self.tuning_ranges = tuning_ranges
if models is None:
models = []
self.models = models
self.model_names = []
for model in self.models:
# store the names of the sklearn classes used
self.model_names.append(model.__class__.__name__)
# make sure the model names are in the dictionary of tuning parameters
if model.__class__.__name__ not in tuning_ranges:
raise ValueError('Could not find tuning parameters for', model.__class__.__name__)
if cv is None:
cv = 3
self.cv = cv
self.njobs = njobs
self.pre_dispatch = pre_dispatch
self.scorer = None
self.stack = stack
self.best_scores = dict()
self.nfeatures = None
def refine_grid(self, best_params, model_name):
"""
Refine the tuning parameter grid to zoom in on the region near the current maximum.
:param best_params: A dictionary containing the set of best tuning parameter names and their values. Should be
of the form {'parameter 1': value1, 'parameter 2', value2, ... }. The tuning parameter grid will be refined
in the region of these parameter values.
:param model_name: The name of the estimator corresponding to the tuning parameters in best_params.
"""
for param_name in best_params:
pvalue_list = self.tuning_ranges[model_name][param_name]
best_value = best_params[param_name]
# find the values corresponding to
idx = pvalue_list.index(best_value)
ngrid = len(pvalue_list)
if idx == 0:
# first element of grid, so expand below it
if type(pvalue_list[0]) in int_types:
pv_min = pvalue_list[0] / 2 # reduce minimum grid value by a factor of 2
pv_min = max(1, pv_min) # assume integer tuning parameters are never less than 1.
pv_max = pvalue_list[1]
self.tuning_ranges[model_name][param_name] = \
list(np.unique(np.linspace(pv_min, pv_max, ngrid).astype(np.int)))
else:
# use logarithmic grids for floats
dp = np.log10(pvalue_list[1]) - np.log10(pvalue_list[0])
pv_min = np.log10(pvalue_list[0]) - dp
pv_max = np.log10(pvalue_list[1])
self.tuning_ranges[model_name][param_name] = list(np.logspace(pv_min, pv_max, ngrid))
if self.verbose:
print self.tuning_ranges[model_name][param_name]
elif idx == ngrid - 1:
# last element of grid, so expand above it
if pvalue_list[idx] is None:
# special situation for some estimators, like the DecisionTreeClassifier
pv_min = pvalue_list[idx-1] # increase the maximum grid value by a factor of 2
pv_max = 2 * pv_min
self.tuning_ranges[model_name][param_name] = \
list(np.unique(np.linspace(pv_min, pv_max, ngrid-1).astype(np.int)))
# make sure we keep None as the last value in the list
self.tuning_ranges[model_name][param_name].append(None)
elif type(pvalue_list[idx]) in int_types:
pv_min = np.log10(pvalue_list[idx-1])
pv_max = np.log10(2 * pvalue_list[idx]) # increase the maximum grid value by a factor of 2
if param_name == 'max_features':
# can't have max_features > nfeatures
pv_max = min(2 * pvalue_list[idx], self.nfeatures)
pv_max = np.log10(pv_max)
self.tuning_ranges[model_name][param_name] = \
list(np.unique(np.logspace(pv_min, pv_max, ngrid).astype(np.int)))
else:
# use logarithmic grids for floats
dp = np.log10(pvalue_list[idx]) - np.log10(pvalue_list[idx-1])
pv_min = np.log10(pvalue_list[idx-1])
pv_max = np.log10(pvalue_list[idx]) + dp
self.tuning_ranges[model_name][param_name] = list(np.logspace(pv_min, pv_max, ngrid))
if self.verbose:
print self.tuning_ranges[model_name][param_name]
else:
# inner element of grid
if pvalue_list[idx + 1] is None:
# special situation for some estimators, like the DecisionTreeClassifier
pv_min = pvalue_list[idx-1] # increase the maximum grid value by a factor of 2
pv_max = 2 * pvalue_list[idx]
self.tuning_ranges[model_name][param_name] = \
list(np.unique(np.linspace(pv_min, pv_max, ngrid-1).astype(np.int)))
# make sure we keep None as the last value in the list
self.tuning_ranges[model_name][param_name].append(None)
elif type(pvalue_list[idx]) in int_types:
pv_min = pvalue_list[idx-1]
pv_max = pvalue_list[idx+1]
# switch to linear spacing for interior integer grid values
self.tuning_ranges[model_name][param_name] = \
list(np.unique(np.linspace(pv_min, pv_max, ngrid).astype(np.int)))
else:
# use logarithmic grids for floats
pv_min = np.log10(pvalue_list[idx-1])
pv_max = np.log10(pvalue_list[idx+1])
self.tuning_ranges[model_name][param_name] = list(np.logspace(pv_min, pv_max, ngrid))
if self.verbose:
print self.tuning_ranges[model_name][param_name]
# print 'New Grid:', self.tuning_ranges[model_name][param_name]
def cross_validate(self, X, model_idx, y):
"""
Fit the tuning parameters for an estimator on a grid using cross-validation.
:param X: The array of predictors, shape (n_samples, n_features).
:param model_idx: The index of the estimator to fit.
:param y: The array of response values, shape (n_samples) or (n_samples, n_outputs) depending on the estimator.
:return: A tuple containing the scikit-learn estimator object with the best tuning parameters, the score
corresponding to the best tuning parameters, and a dictionary containing the best tuning parameter values.
"""
if self.verbose:
print 'Doing cross-validation for model', self.model_names[model_idx], '...'
grid = GridSearchCV(self.models[model_idx], self.tuning_ranges[self.model_names[model_idx]],
scoring=self.scorer, n_jobs=self.njobs, cv=self.cv, pre_dispatch=self.pre_dispatch)
grid.fit(X, y)
if self.verbose:
print 'Best', self.model_names[model_idx], 'has:'
for tuning_parameter in self.tuning_ranges[self.model_names[model_idx]]:
print ' ', tuning_parameter, '=', grid.best_params_[tuning_parameter]
print ' CV Score of', grid.best_score_
return grid.best_estimator_, grid.best_score_, grid.best_params_
def oob_validate(self, X, model_idx, y):
"""
Fit the tuning parameters for a Random Forest estimator on a grid by maximizing the score of the out-of-bag
samples. This is faster than using cross-validation.
:param X: The array of predictors, shape (n_samples, n_features).
:param model_idx: The index of the estimator to fit.
:param y: The array of response values, shape (n_samples) or (n_samples, n_outputs) depending on the estimator.
:return: A tuple containing the scikit-learn estimator object with the best tuning parameters, the score
corresponding to the best tuning parameters, and a dictionary containing the best tuning parameter values.
"""
if self.verbose:
print 'Doing OOB-validation for model', self.model_names[model_idx], '...'
tune_grid = list(ParameterGrid(self.tuning_ranges[self.model_names[model_idx]]))
best_estimator = None
best_score = -1e30
# fit random forest
for point in tune_grid:
estimator = clone(self.models[model_idx])
for tpar in point:
# set the tuning parameters
estimator.__setattr__(tpar, point[tpar])
estimator.fit(X, y)
if estimator.oob_score_ > best_score:
# new best values, save them
best_score = estimator.oob_score_
best_estimator = estimator
best_params = estimator.get_params()
best_tparams = dict()
for tpar in self.tuning_ranges[self.model_names[model_idx]]:
best_tparams[tpar] = best_params[tpar] # only grab the values of the best tuning parameter
if self.verbose:
print 'Best', self.model_names[model_idx], 'has:'
for tuning_parameter in self.tuning_ranges[self.model_names[model_idx]]:
print ' ', tuning_parameter, '=', best_tparams[tuning_parameter]
print ' OOB Score of', best_score
return best_estimator, best_score, best_tparams
def fit(self, X, y, n_refinements=1):
"""
Fit the suite of estimators. The tuning parameters are estimated using cross-validation.
:param X: The array of predictors, shape (n_samples, n_features).
:param y: The array of response values, shape (n_samples) or (n_samples, n_outputs), depending on the estimator.
:param n_refinements: The number of time to refine the grid of tuning parameter values. Must be an integer or
dictionary. If an integer, the grid for all models will be refined this many times. If a dictionary, should
have (key value) pairs given by (estimator name, n_refinements).
:return: Returns self.
"""
self.nfeatures = X.shape[1]
ndata = len(y)
if X.shape[0] != ndata:
raise ValueError('X and y must have same number of rows.')
if np.isscalar(n_refinements):
# use same number of refinements for all models
n_refinements = {name: n_refinements for name in self.model_names}
if type(self.cv) in int_types:
# construct cross-validation iterator
self.cv = KFold(ndata, n_folds=self.cv)
elif self.cv.n != ndata:
# need to reconstruct cross-validation iterator since we have different data
self.cv = KFold(ndata, n_folds=self.cv.n_folds)
for k in range(len(self.models)):
if 'RandomForest' in self.model_names[k]:
# use out-of-bag error for validation error
best_estimator, best_score, best_params = self.oob_validate(X, k, y)
else:
# use cross-validation for validation error
best_estimator, best_score, best_params = self.cross_validate(X, k, y)
self.models[k] = best_estimator
self.best_scores[self.model_names[k]] = best_score
for i in range(n_refinements[self.model_names[k]]):
if self.verbose:
print 'Refining Grid...'
old_score = best_score
# now refine the grid and refit
self.refine_grid(best_params, self.model_names[k])
if 'RandomForest' in self.model_names[k]:
# use out-of-bag error for validation error
best_estimator, best_score, best_params = self.oob_validate(X, k, y)
else:
# use cross-validation for validation error
best_estimator, best_score, best_params = self.cross_validate(X, k, y)
if self.verbose:
print ' New Validation Score of', best_score, 'is an improvement of', \
100.0 * (best_score - old_score) / np.abs(old_score), '%.'
self.models[k] = best_estimator
self.best_scores[self.model_names[k]] = best_score
return self
def predict_all(self, X):
"""
Predict the outputs as a function of the inputs for each model.
:param X: The array of predictor values, shape (n_samples, n_features).
:return: A dictionary containing the values of the response predicted at the input values for each model.
"""
y_predict_all = {name: model.predict(X) for name, model in zip(self.model_names, self.models)}
return y_predict_all
@abc.abstractmethod
def predict(self, X, weights='auto'):
return self.predict_all(X)
class ClassificationSuite(BasePredictorSuite):
def __init__(self, n_features=None, tuning_ranges=None, models=None, cv=None, njobs=1, pre_dispatch='2*n_jobs',
stack=True, verbose=False):
"""
Initialize a pipeline to run a suite of scikit-learn classifiers. The tuning parameters are chosen through
cross-validation or the out-of-bags score (for Random Forests) as part of the fitting process. The score
function used is the accuracy score (fraction of correct classifications).
:param verbose: Provide helpful output.
:param n_features: The number of features that will be used when performing the fit. Must supply either
n_features or tuning_ranges. This is necessary because the tuning parameter for the RandomForestClassifier
is max_features, and max_features must be less than the number of features in the input array. So, in order
to automatically construct the tuning_ranges dictionary it is necessary to know n_features in order to
ensure max_features <= n_features.
:param tuning_ranges: A nested dictionary containing the ranges of the tuning parameters. It should be of the
format {model name 1: {parameter name 1: list(value range 1), parameter name 2: list(value range 2), ...} }.
If n_features is not supplied, then tuning_ranges must be provided.
:param models: A list of instantiated scikit-learn classifier classes to fit. If None, these are taken from
the models listed in tuning_range.
:param cv: The number of CV folds to use, or a CV generator.
:param njobs: The number of processes to run in parallel.
:param pre_dispatch: Passed to sklearn.grid_search.GridSearchCV, see documentation for GridSearchCV for further
details.
:param stack: If true, then the predict() method will return a stacked (averaged) value over the estimators.
Otherwise, if false, then predict() will return the predictions for each estimator.
"""
if tuning_ranges is None:
try:
n_features is not None
except ValueError:
'Must supply one of n_features or tuning_ranges.'
# use default values for grid search over tuning parameters for all models
tuning_ranges = {'LogisticRegression': {'C': list(np.logspace(-2.0, 1.0, 5))},
'DecisionTreeClassifier': {'max_depth': [5, 10, 20, 50, None]},
'LinearSVC': {'C': list(np.logspace(-2.0, 1.0, 5))},
'SVC': {'C': list(np.logspace(-2.0, 1.0, 5)),
'gamma': list(np.logspace(np.log10(1.0 / n_features),
np.log10(1000.0 / n_features), 5))},
'RandomForestClassifier': {'max_features':
list(np.unique(np.linspace(2, n_features, 5).astype(np.int)))},
'GbcAutoNtrees': {'max_depth': [1, 2, 3, 5, 10]}}
if models is None:
# initialize the list of sklearn objects corresponding to different statistical models
models = []
if 'LogisticRegression' in tuning_ranges:
models.append(LogisticRegression(penalty='l1', class_weight='auto'))
if 'DecisionTreeClassifier' in tuning_ranges:
models.append(DecisionTreeClassifier())
if 'LinearSVC' in tuning_ranges:
models.append(LinearSVC(penalty='l1', loss='l2', dual=False, class_weight='auto'))
if 'SVC' in tuning_ranges:
models.append(SVC(class_weight='auto'))
if 'RandomForestClassifier' in tuning_ranges:
models.append(RandomForestClassifier(n_estimators=500, oob_score=True, n_jobs=njobs))
if 'GbcAutoNtrees' in tuning_ranges:
models.append(GbcAutoNtrees(subsample=0.75, n_estimators=500, learning_rate=0.01))
super(ClassificationSuite, self).__init__(tuning_ranges=tuning_ranges, models=models, cv=cv, njobs=njobs,
pre_dispatch=pre_dispatch, stack=stack, verbose=verbose)
self.scorer = make_scorer(accuracy_score)
self.nfeatures = n_features
self.classes = None
def predict(self, X, weights='auto'):
"""
Predict the classes as a function of the inputs. If self.stack is true, then the predictions for each data point
are computed based on a weighted majority vote of the estimators. Otherwise, a dictionary containing the
predictions for each estimator are returns.
:param X: The array of predictor values, shape (n_samples, n_features).
:param weights: The weights to use when combining the predictions for the individual estimators. If 'auto', then
the weights are given by the validation scores. If 'uniform', then uniform weights are used. Otherwise
weights must be a dictionary with (model name, weight) as the (key, value) pair.
No effect if self.stack = False.
:return: The values of the response predicted at the input values.
"""
y_predict_all = super(ClassificationSuite, self).predict_all(X)
if weights is 'uniform':
# just use uniform weighting
weights = {name: 1.0 for name in self.model_names}
if weights is 'auto':
# weight based on validation score
weights = self.best_scores
if self.stack:
# combine the model outputs
y_votes = np.zeros((X.shape[0], len(self.model_names)))
for name in y_predict_all:
vote = y_predict_all[name]
idx_1d = vote + np.arange(len(vote)) * y_votes.shape[1]
# compute weighted vote for each class
y_votes[np.unravel_index(idx_1d, y_votes.shape)] += weights[name]
y_predict = self.classes[y_votes.argmax(axis=1)] # output is winner of majority vote
else:
y_predict = y_predict_all
return y_predict
def fit(self, X, y, n_refinements=1):
classes, y = np.unique(y, return_inverse=True)
self.classes = classes
return super(ClassificationSuite, self).fit(X, y, n_refinements)
class RegressionSuite(BasePredictorSuite):
def __init__(self, n_features=None, tuning_ranges=None, models=None, cv=None, njobs=1, pre_dispatch='2*n_jobs',
stack=True, verbose=False, metric='lad'):
if metric.lower() not in ['lad', 'mse']:
raise ValueError('Metric must be either lad or mse.')
if tuning_ranges is None:
try:
n_features is not None
except ValueError:
'Must supply one of n_features or tuning_ranges.'
# use default values for grid search over tuning parameters for all models
tuning_ranges = {'DecisionTreeClassifier': {'max_depth': [5, 10, 20, 50, None]},
'RandomForestRegressor': {'max_features':
list(np.unique(np.linspace(2, n_features, 5).astype(np.int)))},
'GbrAutoNtrees': {'max_depth': [1, 2, 3, 5, 10]}}
if models is None:
# initialize the list of sklearn objects corresponding to different statistical models
models = []
if 'DecisionTreeRegressor' in tuning_ranges:
models.append(DecisionTreeRegressor())
if 'RandomForestRegressor' in tuning_ranges:
models.append(RandomForestRegressor(n_estimators=500, oob_score=True, n_jobs=njobs))
if 'GbrAutoNtrees' in tuning_ranges:
models.append(GbrAutoNtrees(subsample=0.75, n_estimators=500, learning_rate=0.01))
super(RegressionSuite, self).__init__(tuning_ranges, models, cv, njobs, pre_dispatch, stack, verbose)
self.scorer = make_scorer(accuracy_score)
self.nfeatures = n_features
self.metric = metric.lower()
if self.metric == 'lad':
self.scorer = make_scorer(mean_absolute_error, greater_is_better=False)
elif self.metric == 'mse':
self.scorer = make_scorer(mean_squared_error, greater_is_better=False)
def predict(self, X, weights='auto'):
y_predict_all = super(RegressionSuite, self).predict_all(X)
if weights is 'uniform':
# just use uniform weighting
weights = {name: 1.0 for name in self.model_names}
if weights is 'auto':
# weight based on validation score
weights = self.best_scores
if self.stack:
# combine the model outputs
y_predict = 0.0
wsum = 0.0
for name in y_predict_all:
y_predict += weights[name] * y_predict_all[name]
wsum += weights[name]
y_predict /= wsum
else:
y_predict = y_predict_all
return y_predict | {
"repo_name": "brandonckelly/bck_stats",
"path": "build/lib/bck_stats/sklearn_estimator_suite.py",
"copies": "2",
"size": "26762",
"license": "mit",
"hash": 735642944689450400,
"line_mean": 50.3685220729,
"line_max": 120,
"alpha_frac": 0.5966295494,
"autogenerated": false,
"ratio": 4.035283474065139,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5631913023465138,
"avg_score": null,
"num_lines": null
} |
__author__ = 'brandonkelly'
import numpy as np
import carmcmc as cm
import matplotlib.pyplot as plt
from os import environ
import cPickle
from astropy.io import fits
import multiprocessing
from matplotlib.mlab import detrend_mean
base_dir = environ['HOME'] + '/Projects/carma_pack/src/paper/'
data_dir = base_dir + 'data/'
nthreads = multiprocessing.cpu_count()
def make_sampler_plots(time, y, ysig, pmax, file_root, title, do_mags=False, njobs=-1):
froot = base_dir + 'plots/' + file_root
# clean data
dt = time[1:] - time[0:-1]
if np.sum(dt <= 0) > 0:
time = time[dt > 0]
y = y[dt > 0]
ysig = ysig[dt > 0]
good = np.where(np.isfinite(time))[0]
time = time[good]
y = y[good]
ysig = ysig[good]
good = np.where(np.isfinite(y))[0]
time = time[good]
y = y[good]
ysig = ysig[good]
good = np.where(np.isfinite(ysig))[0]
time = time[good]
y = y[good]
ysig = ysig[good]
print 'Getting maximum-likelihood estimates...'
carma_model = cm.CarmaModel(time, y, ysig)
MAP, pqlist, AIC_list = carma_model.choose_order(pmax, njobs=njobs)
# convert lists to a numpy arrays, easier to manipulate
pqarray = np.array(pqlist)
pmodels = pqarray[:, 0]
qmodels = pqarray[:, 1]
AICc = np.array(AIC_list)
plt.clf()
plt.subplot(111)
for i in xrange(qmodels.max()+1):
plt.plot(pmodels[qmodels == i], AICc[qmodels == i], 's-', label='q=' + str(i), lw=2)
plt.legend(loc='best')
plt.xlabel('p')
plt.ylabel('AICc(p,q)')
plt.xlim(0, pmodels.max() + 1)
plt.title(title)
plt.savefig(froot + 'aic.eps')
plt.close()
# make sure to change these back!!!!
# carma_model.p = 7
# carma_model.q = 3
nsamples = 50000
carma_sample = carma_model.run_mcmc(nsamples)
carma_sample.add_mle(MAP)
ax = plt.subplot(111)
print 'Getting bounds on PSD...'
psd_low, psd_hi, psd_mid, frequencies = carma_sample.plot_power_spectrum(percentile=95.0, sp=ax, doShow=False,
color='SkyBlue', nsamples=5000)
psd_mle = cm.power_spectrum(frequencies, carma_sample.mle['sigma'], carma_sample.mle['ar_coefs'],
ma_coefs=np.atleast_1d(carma_sample.mle['ma_coefs']))
ax.loglog(frequencies, psd_mle, '--b', lw=2)
dt = time[1:] - time[0:-1]
noise_level = 2.0 * np.median(dt) * np.mean(ysig ** 2)
ax.loglog(frequencies, np.ones(frequencies.size) * noise_level, color='grey', lw=2)
ax.set_ylim(bottom=noise_level / 100.0)
do_s82 = True
if do_s82:
ax.annotate("Measurement Noise Level", (3.0e-2, 1e-2))
else:
ax.annotate("Measurement Noise Level", (3.0 * ax.get_xlim()[0], noise_level / 2.5))
ax.set_xlabel('Frequency [1 / day]')
if do_mags:
ax.set_ylabel('Power Spectral Density [mag$^2$ day]')
else:
ax.set_ylabel('Power Spectral Density [flux$^2$ day]')
plt.title(title)
plt.savefig(froot + 'psd.eps')
print 'Assessing the fit quality...'
fig = carma_sample.assess_fit(doShow=False)
ax_again = fig.add_subplot(2, 2, 1)
ax_again.set_title(title)
if do_mags:
ylims = ax_again.get_ylim()
ax_again.set_ylim(ylims[1], ylims[0])
ax_again.set_ylabel('magnitude')
else:
ax_again.set_ylabel('ln Flux')
plt.savefig(froot + 'fit_quality.eps')
return carma_sample
def do_simulated_regular():
# first generate some data assuming a CARMA(5,3) process on a uniform grid
sigmay = 2.3 # dispersion in lightcurve
p = 5 # order of AR polynomial
qpo_width = np.array([1.0/100.0, 1.0/100.0, 1.0/500.0])
qpo_cent = np.array([1.0/5.0, 1.0/50.0])
ar_roots = cm.get_ar_roots(qpo_width, qpo_cent)
ma_coefs = np.zeros(p)
ma_coefs[0] = 1.0
ma_coefs[1] = 4.5
ma_coefs[2] = 1.25
sigsqr = sigmay ** 2 / cm.carma_variance(1.0, ar_roots, ma_coefs=ma_coefs)
ny = 1028
time = np.arange(0.0, ny)
y0 = cm.carma_process(time, sigsqr, ar_roots, ma_coefs=ma_coefs)
ysig = np.ones(ny) * np.sqrt(1e-2)
# ysig = np.ones(ny) * np.sqrt(1e-6)
y = y0 + ysig * np.random.standard_normal(ny)
froot = base_dir + 'plots/car5_regular_'
plt.subplot(111)
plt.plot(time, y0, 'k-')
plt.plot(time, y, '.')
plt.xlim(time.min(), time.max())
plt.xlabel('Time')
plt.ylabel('CARMA(5,3) Process')
plt.savefig(froot + 'tseries.eps')
ar_coef = np.poly(ar_roots)
print 'Getting maximum-likelihood estimates...'
carma_model = cm.CarmaModel(time, y, ysig)
pmax = 7
MAP, pqlist, AIC_list = carma_model.choose_order(pmax, njobs=-1)
# convert lists to a numpy arrays, easier to manipulate
pqarray = np.array(pqlist)
pmodels = pqarray[:, 0]
qmodels = pqarray[:, 1]
AICc = np.array(AIC_list)
plt.clf()
plt.subplot(111)
for i in xrange(qmodels.max()+1):
plt.plot(pmodels[qmodels == i], AICc[qmodels == i], 's-', label='q=' + str(i), lw=2)
plt.legend()
plt.xlabel('p')
plt.ylabel('AICc(p,q)')
plt.xlim(0, pmodels.max() + 1)
plt.savefig(froot + 'aic.eps')
plt.close()
nsamples = 50000
carma_sample = carma_model.run_mcmc(nsamples)
carma_sample.add_mle(MAP)
plt.subplot(111)
pgram, freq = plt.psd(y)
plt.clf()
ax = plt.subplot(111)
print 'Getting bounds on PSD...'
psd_low, psd_hi, psd_mid, frequencies = carma_sample.plot_power_spectrum(percentile=95.0, sp=ax, doShow=False,
color='SkyBlue', nsamples=5000)
psd_mle = cm.power_spectrum(frequencies, carma_sample.mle['sigma'], carma_sample.mle['ar_coefs'],
ma_coefs=np.atleast_1d(carma_sample.mle['ma_coefs']))
ax.loglog(freq / 2.0, pgram, 'o', color='DarkOrange')
psd = cm.power_spectrum(frequencies, np.sqrt(sigsqr), ar_coef, ma_coefs=ma_coefs)
ax.loglog(frequencies, psd, 'k', lw=2)
ax.loglog(frequencies, psd_mle, '--b', lw=2)
noise_level = 2.0 * np.mean(ysig ** 2)
ax.loglog(frequencies, np.ones(frequencies.size) * noise_level, color='grey', lw=2)
ax.set_ylim(bottom=noise_level / 100.0)
ax.annotate("Measurement Noise Level", (3.0 * ax.get_xlim()[0], noise_level / 2.5))
ax.set_xlabel('Frequency')
ax.set_ylabel('Power Spectral Density')
plt.savefig(froot + 'psd.eps')
print 'Assessing the fit quality...'
carma_sample.assess_fit(doShow=False)
plt.savefig(froot + 'fit_quality.eps')
pfile = open(data_dir + froot + '.pickle', 'wb')
cPickle.dump(carma_sample, pfile)
pfile.close()
def do_simulated_irregular():
# first generate some data assuming a CARMA(5,3) process on a uniform grid
sigmay = 2.3 # dispersion in lightcurve
p = 5 # order of AR polynomial
mu = 17.0 # mean of time series
qpo_width = np.array([1.0/100.0, 1.0/300.0, 1.0/200.0])
qpo_cent = np.array([1.0/5.0, 1.0/25.0])
ar_roots = cm.get_ar_roots(qpo_width, qpo_cent)
ma_coefs = np.zeros(p)
ma_coefs[0] = 1.0
ma_coefs[1] = 4.5
ma_coefs[2] = 1.25
sigsqr = sigmay ** 2 / cm.carma_variance(1.0, ar_roots, ma_coefs=ma_coefs)
ny = 270
time = np.empty(ny)
dt = np.random.uniform(1.0, 3.0, ny)
time[0:90] = np.cumsum(dt[0:90])
time[90:2*90] = 180 + time[90-1] + np.cumsum(dt[90:2*90])
time[2*90:] = 180 + time[2*90-1] + np.cumsum(dt[2*90:])
y = mu + cm.carma_process(time, sigsqr, ar_roots, ma_coefs=ma_coefs)
ysig = np.ones(ny) * y.std() / 5.0
# ysig = np.ones(ny) * 1e-6
y0 = y.copy()
y += ysig * np.random.standard_normal(ny)
froot = base_dir + 'plots/car5_irregular_'
plt.subplot(111)
for i in xrange(3):
plt.plot(time[90*i:90*(i+1)], y0[90*i:90*(i+1)], 'k', lw=2)
plt.plot(time[90*i:90*(i+1)], y[90*i:90*(i+1)], 'bo')
plt.xlim(time.min(), time.max())
plt.xlabel('Time')
plt.ylabel('CARMA(5,3) Process')
plt.savefig(froot + 'tseries.eps')
ar_coef = np.poly(ar_roots)
print 'Getting maximum-likelihood estimates...'
carma_model = cm.CarmaModel(time, y, ysig)
pmax = 7
MAP, pqlist, AIC_list = carma_model.choose_order(pmax, njobs=1)
# convert lists to a numpy arrays, easier to manipulate
pqarray = np.array(pqlist)
pmodels = pqarray[:, 0]
qmodels = pqarray[:, 1]
AICc = np.array(AIC_list)
plt.clf()
plt.subplot(111)
for i in xrange(qmodels.max()+1):
plt.plot(pmodels[qmodels == i], AICc[qmodels == i], 's-', label='q=' + str(i), lw=2)
plt.legend()
plt.xlabel('p')
plt.ylabel('AICc(p,q)')
plt.xlim(0, pmodels.max() + 1)
plt.savefig(froot + 'aic.eps')
plt.close()
nsamples = 50000
carma_sample = carma_model.run_mcmc(nsamples)
carma_sample.add_mle(MAP)
plt.clf()
ax = plt.subplot(111)
print 'Getting bounds on PSD...'
psd_low, psd_hi, psd_mid, frequencies = carma_sample.plot_power_spectrum(percentile=95.0, sp=ax, doShow=False,
color='SkyBlue', nsamples=5000)
psd_mle = cm.power_spectrum(frequencies, carma_sample.mle['sigma'], carma_sample.mle['ar_coefs'],
ma_coefs=np.atleast_1d(carma_sample.mle['ma_coefs']))
psd = cm.power_spectrum(frequencies, np.sqrt(sigsqr), ar_coef, ma_coefs=ma_coefs)
ax.loglog(frequencies, psd_mle, '--b', lw=2)
ax.loglog(frequencies, psd, 'k', lw=2)
dt = np.median(time[1:] - time[0:-1])
noise_level = 2.0 * dt * np.mean(ysig ** 2)
ax.loglog(frequencies, np.ones(frequencies.size) * noise_level, color='grey', lw=2)
ax.set_ylim(bottom=noise_level / 100.0)
ax.annotate("Measurement Noise Level", (3.0 * ax.get_xlim()[0], noise_level / 2.5))
ax.set_xlabel('Frequency')
ax.set_ylabel('Power Spectral Density')
plt.savefig(froot + 'psd.eps')
print 'Assessing the fit quality...'
carma_sample.assess_fit(doShow=False)
plt.savefig(froot + 'fit_quality.eps')
# compute the marginal mean and variance of the predicted values
nplot = 1028
time_predict = np.linspace(time.min(), 1.25 * time.max(), nplot)
time_predict = time_predict[1:]
predicted_mean, predicted_var = carma_sample.predict(time_predict, bestfit='map')
predicted_low = predicted_mean - np.sqrt(predicted_var)
predicted_high = predicted_mean + np.sqrt(predicted_var)
# plot the time series and the marginal 1-sigma error bands
plt.clf()
plt.subplot(111)
plt.fill_between(time_predict, predicted_low, predicted_high, color='cyan')
plt.plot(time_predict, predicted_mean, '-b', label='Predicted')
plt.plot(time[0:90], y0[0:90], 'k', lw=2, label='True')
plt.plot(time[0:90], y[0:90], 'bo')
for i in xrange(1, 3):
plt.plot(time[90*i:90*(i+1)], y0[90*i:90*(i+1)], 'k', lw=2)
plt.plot(time[90*i:90*(i+1)], y[90*i:90*(i+1)], 'bo')
plt.xlabel('Time')
plt.ylabel('CARMA(5,3) Process')
plt.xlim(time_predict.min(), time_predict.max())
plt.legend()
plt.savefig(froot + 'interp.eps')
def do_simulated_irregular_nonstationary():
# generate first half of lightcurve assuming a CARMA(5,3) process on a uniform grid
sigmay1 = 2.3 # dispersion in lightcurve
p = 5 # order of AR polynomial
qpo_width1 = np.array([1.0/100.0, 1.0/300.0, 1.0/200.0])
qpo_cent1 = np.array([1.0/5.0, 1.0/25.0])
ar_roots1 = cm.get_ar_roots(qpo_width1, qpo_cent1)
ma_coefs1 = np.zeros(p)
ma_coefs1[0] = 1.0
ma_coefs1[1] = 4.5
ma_coefs1[2] = 1.25
sigsqr1 = sigmay1 ** 2 / cm.carma_variance(1.0, ar_roots1, ma_coefs=ma_coefs1)
ny = 270
time = np.zeros(ny)
dt = np.random.uniform(1.0, 3.0, ny)
time[0:90] = np.cumsum(dt[0:90])
time[90:2*90] = 180 + time[90-1] + np.cumsum(dt[90:2*90])
time[2*90:] = 180 + time[2*90-1] + np.cumsum(dt[2*90:])
y = cm.carma_process(time, sigsqr1, ar_roots1, ma_coefs=ma_coefs1)
# first generate some data assuming a CARMA(5,3) process on a uniform grid
sigmay2 = 4.5 # dispersion in lightcurve
p = 5 # order of AR polynomial
qpo_width2 = np.array([1.0/100.0, 1.0/100.0, 1.0/500.0])
qpo_cent2 = np.array([1.0/5.0, 1.0/50.0])
ar_roots2 = cm.get_ar_roots(qpo_width2, qpo_cent2)
ma_coefs2 = np.zeros(p)
ma_coefs2[0] = 1.0
ma_coefs2[1] = 4.5
ma_coefs2[2] = 1.25
sigsqr2 = sigmay2 ** 2 / cm.carma_variance(1.0, ar_roots2, ma_coefs=ma_coefs2)
ny = 270
time2 = np.zeros(ny)
dt = np.random.uniform(1.0, 3.0, ny)
time2[0:90] = np.cumsum(dt[0:90])
time2[90:2*90] = 180 + time2[90-1] + np.cumsum(dt[90:2*90])
time2[2*90:] = 180 + time2[2*90-1] + np.cumsum(dt[2*90:])
time = np.append(time, time.max() + 180 + time2)
y2 = cm.carma_process(time2, sigsqr2, ar_roots2, ma_coefs=ma_coefs2)
y = np.append(y, y2)
ysig = np.ones(len(y)) * y.std() / 8.0
# ysig = np.ones(ny) * 1e-6
y0 = y.copy()
y += ysig * np.random.standard_normal(len(y))
froot = base_dir + 'plots/car5_nonstationary_'
plt.subplot(111)
for i in xrange(6):
plt.plot(time[90*i:90*(i+1)], y0[90*i:90*(i+1)], 'k', lw=2)
plt.plot(time[90*i:90*(i+1)], y[90*i:90*(i+1)], 'bo')
plt.xlim(time.min(), time.max())
plt.xlabel('Time')
plt.ylabel('Non-Stationary Process')
plt.savefig(froot + 'tseries.eps')
plt.show()
ar_coef1 = np.poly(ar_roots1)
ar_coef2 = np.poly(ar_roots2)
print 'Getting maximum-likelihood estimates...'
carma_model = cm.CarmaModel(time, y, ysig)
pmax = 7
MAP, pqlist, AIC_list = carma_model.choose_order(pmax, njobs=1)
# convert lists to a numpy arrays, easier to manipulate
pqarray = np.array(pqlist)
pmodels = pqarray[:, 0]
qmodels = pqarray[:, 1]
AICc = np.array(AIC_list)
plt.clf()
plt.subplot(111)
for i in xrange(qmodels.max()+1):
plt.plot(pmodels[qmodels == i], AICc[qmodels == i], 's-', label='q=' + str(i), lw=2)
plt.legend()
plt.xlabel('p')
plt.ylabel('AICc(p,q)')
plt.xlim(0, pmodels.max() + 1)
plt.savefig(froot + 'aic.eps')
plt.close()
nsamples = 50000
carma_sample = carma_model.run_mcmc(nsamples)
carma_sample.add_mle(MAP)
cPickle.dump(carma_sample, open(data_dir + 'nonstationary.pickle', 'wb'))
plt.clf()
ax = plt.subplot(111)
print 'Getting bounds on PSD...'
psd_low, psd_hi, psd_mid, frequencies = carma_sample.plot_power_spectrum(percentile=95.0, sp=ax, doShow=False,
color='SkyBlue', nsamples=5000)
psd_mle = cm.power_spectrum(frequencies, carma_sample.mle['sigma'], carma_sample.mle['ar_coefs'],
ma_coefs=np.atleast_1d(carma_sample.mle['ma_coefs']))
psd1 = cm.power_spectrum(frequencies, np.sqrt(sigsqr1), ar_coef1, ma_coefs=ma_coefs1)
psd2 = cm.power_spectrum(frequencies, np.sqrt(sigsqr2), ar_coef2, ma_coefs=ma_coefs2)
# ax.loglog(frequencies, psd_mle, '--b', lw=2)
ax.loglog(frequencies, psd1, 'k', lw=2)
ax.loglog(frequencies, psd2, '--k', lw=2)
dt = np.median(time[1:] - time[0:-1])
noise_level = 2.0 * dt * np.mean(ysig ** 2)
ax.loglog(frequencies, np.ones(frequencies.size) * noise_level, color='grey', lw=2)
ax.set_ylim(bottom=noise_level / 100.0)
ax.annotate("Measurement Noise Level", (3.0 * ax.get_xlim()[0], noise_level / 2.5))
ax.set_xlabel('Frequency')
ax.set_ylabel('Power Spectral Density')
plt.savefig(froot + 'psd.eps')
print 'Assessing the fit quality...'
carma_sample.assess_fit(doShow=False)
plt.savefig(froot + 'fit_quality.eps')
# compute the marginal mean and variance of the predicted values
nplot = 1028
time_predict = np.linspace(time.min(), 1.25 * time.max(), nplot)
time_predict = time_predict[1:]
predicted_mean, predicted_var = carma_sample.predict(time_predict, bestfit='map')
predicted_low = predicted_mean - np.sqrt(predicted_var)
predicted_high = predicted_mean + np.sqrt(predicted_var)
# plot the time series and the marginal 1-sigma error bands
plt.clf()
plt.subplot(111)
plt.fill_between(time_predict, predicted_low, predicted_high, color='cyan')
plt.plot(time_predict, predicted_mean, '-b', label='Predicted')
plt.plot(time[0:90], y0[0:90], 'k', lw=2, label='True')
plt.plot(time[0:90], y[0:90], 'bo')
for i in xrange(1, 3):
plt.plot(time[90*i:90*(i+1)], y0[90*i:90*(i+1)], 'k', lw=2)
plt.plot(time[90*i:90*(i+1)], y[90*i:90*(i+1)], 'bo')
plt.xlabel('Time')
plt.ylabel('CARMA(5,3) Process')
plt.xlim(time_predict.min(), time_predict.max())
plt.legend()
plt.savefig(froot + 'interp.eps')
def do_AGN_Stripe82():
s82_id = '1627677'
data = np.genfromtxt(data_dir + s82_id)
# do r-band data
jdate = data[:, 6]
rmag = data[:, 7]
rerr = data[:, 8]
load_pickle = True
if load_pickle:
time = jdate - jdate.min()
ysig = rerr
carma_sample = cPickle.load(open(data_dir + s82_id + '.pickle', 'rb'))
froot = base_dir + 'plots/' + s82_id + '_'
print 'Assessing the fit quality...'
fig = carma_sample.assess_fit(doShow=False)
ax_again = fig.add_subplot(2, 2, 1)
ax_again.set_title("S82 Quasar, r-band")
ylims = ax_again.get_ylim()
ax_again.set_ylim(ylims[1], ylims[0])
ax_again.set_ylabel('magnitude')
ax_again.set_xlabel('Time [days]')
ax_again = fig.add_subplot(2, 2, 2)
ax_again.set_xlabel('Time [days]')
ax_again = fig.add_subplot(2, 2, 3)
ax_again.set_xlabel('Lag')
ax_again = fig.add_subplot(2, 2, 4)
ax_again.set_xlabel('Lag')
plt.savefig(froot + 'fit_quality.eps')
else:
carma_sample = make_sampler_plots(jdate - jdate.min(), rmag, rerr, 7, s82_id + '_', 'S82 Quasar, r-band',
do_mags=True)
pfile = open(data_dir + '1627677.pickle', 'wb')
cPickle.dump(carma_sample, pfile)
pfile.close()
def do_AGN_Kepler():
sname = 'Zw 229-15'
data = fits.open(data_dir + 'kepler_zw229_Q7.fits')[1].data
jdate = data['time']
flux = np.array(data['SAP_FLUX'], dtype=float)
ferr = np.array(data['SAP_FLUX_ERR'], dtype=float)
keep = np.where(np.logical_and(np.isfinite(jdate), np.isfinite(flux)))[0]
jdate = jdate[keep]
jdate -= jdate.min()
flux = flux[keep]
ferr = ferr[keep]
df = flux[1:] - flux[0:-1] # remove outliers
keep = np.where(np.abs(df) < 56.0)
jdate = jdate[keep]
flux = flux[keep]
ferr = ferr[keep]
load_pickle = True
if load_pickle:
carma_sample = cPickle.load(open(data_dir + 'zw229.pickle', 'rb'))
# rerun MLE
# carma_model = cm.CarmaModel(jdate, flux, ferr, p=carma_sample.p, q=carma_sample.q)
# mle = carma_model.get_map(carma_sample.p, carma_sample.q)
# carma_sample.add_map(mle)
else:
carma_sample = make_sampler_plots(jdate, flux, ferr, 7, 'zw229_', sname, njobs=1)
# transform the flux through end matching
tflux = flux - flux[0]
slope = (tflux[-1] - tflux[0]) / (jdate[-1] - jdate[0])
tflux -= slope * jdate
plt.subplot(111)
dt = jdate[1] - jdate[0]
pgram, freq = plt.psd(tflux, 512, 2.0 / dt, detrend=detrend_mean)
plt.clf()
ax = plt.subplot(111)
print 'Getting bounds on PSD...'
psd_low, psd_hi, psd_mid, frequencies = carma_sample.plot_power_spectrum(percentile=95.0, sp=ax, doShow=False,
color='SkyBlue', nsamples=5000)
psd_mle = cm.power_spectrum(frequencies, carma_sample.mle['sigma'], carma_sample.mle['ar_coefs'],
ma_coefs=np.atleast_1d(carma_sample.mle['ma_coefs']))
ax.loglog(freq / 2.0, pgram, 'o', color='DarkOrange')
psd_slope = 3.14
above_noise = np.where(freq / 2.0 < 1.0)[0]
psd_norm = np.mean(np.log(pgram[above_noise[1:]]) + 3.14 * np.log(freq[above_noise[1:]] / 2.0))
psd_plaw = np.exp(psd_norm) / (freq[1:] / 2.0) ** psd_slope
ax.loglog(freq[1:] / 2.0, psd_plaw, '-', lw=2, color='DarkOrange')
ax.loglog(frequencies, psd_mid, '--b', lw=2)
noise_level = 2.0 * dt * np.mean(ferr ** 2)
ax.loglog(frequencies, np.ones(frequencies.size) * noise_level, color='grey', lw=2)
ax.set_ylim(bottom=noise_level / 100.0)
ax.annotate("Measurement Noise Level", (3.0 * ax.get_xlim()[0], noise_level / 2.5))
ax.set_xlabel('Frequency [1 / day]')
ax.set_ylabel('Power Spectral Density [flux$^2$ day]')
plt.title(sname)
plt.savefig(base_dir + 'plots/zw229_psd.eps')
plt.clf()
carma_sample.plot_1dpdf('measerr_scale')
plt.savefig(base_dir + 'plots/zw229_measerr_scale.eps')
measerr_scale = carma_sample.get_samples('measerr_scale')
print "95% credibility interval on Kepler measurement error scale parameter:", np.percentile(measerr_scale, 2.5), \
np.percentile(measerr_scale, 97.5)
pfile = open(data_dir + 'zw229.pickle', 'wb')
cPickle.dump(carma_sample, pfile)
pfile.close()
def do_AGN_Xray():
sname = 'MCG-6-30-15, X-ray'
data = np.genfromtxt(data_dir + 'mcg-6-30-15_rxte_xmm.txt')
jdate = data[:, 0]
flux = data[:, 1] * np.log(10.0) # convert to natural logarithm
ferr = data[:, 2] * np.log(10.0)
jdate = jdate - jdate.min()
time = jdate * 86.4e3 # convert to seconds
dt = time[1:] - time[0:-1]
rxte = np.where(dt > 50.0)[0]
dt_rxte = np.median(dt[rxte])
xmm = np.where(dt < 50.0)[0]
dt_xmm = 48.0
carma_sample = make_sampler_plots(time[rxte], flux[rxte], ferr[rxte] / 1e6, 7, 'mcg63015_rxte_', sname, njobs=4)
measerr_scale = carma_sample.get_samples('measerr_scale')
print "95% credibility interval on Kepler measurement error scale parameter:", np.percentile(measerr_scale, 2.5), \
np.percentile(measerr_scale, 97.5)
pfile = open(data_dir + 'mcg63015.pickle', 'wb')
cPickle.dump(carma_sample, pfile)
pfile.close()
ax = plt.subplot(111)
print 'Getting bounds on PSD...'
psd_low, psd_hi, psd_mid, frequencies = carma_sample.plot_power_spectrum(percentile=95.0, sp=ax, doShow=False,
color='SkyBlue', nsamples=5000)
psd_mle = cm.power_spectrum(frequencies, carma_sample.mle['sigma'], carma_sample.mle['ar_coefs'],
ma_coefs=np.atleast_1d(carma_sample.mle['ma_coefs']))
ax.loglog(frequencies, psd_mle, '--b', lw=2)
noise_level_rxte = 2.0 * dt_rxte * np.mean(ferr[rxte] ** 2)
noise_level_xmm = 2.0 * dt_xmm * np.mean(ferr[xmm] ** 2)
rxte_frange = np.array([1.0 / time[rxte].max(), 1.0 / dt_rxte])
xmm_frange = np.array([1.0 / (time[xmm].max() - time[xmm].min()), 1.0 / dt_xmm])
ax.loglog(rxte_frange, np.ones(2) * noise_level_rxte, color='grey', lw=2)
ax.loglog(xmm_frange, np.ones(2) * noise_level_xmm, color='grey', lw=2)
noise_level = np.min([noise_level_rxte, noise_level_xmm])
ax.set_ylim(bottom=noise_level / 100.0)
ax.annotate("Measurement Noise Level, RXTE", (2.0 * ax.get_xlim()[0], noise_level_rxte / 2.5))
ax.annotate("Noise Level, XMM", (xmm_frange[0], noise_level_xmm / 2.5))
ax.set_xlabel('Frequency [Hz]')
ax.set_ylabel('Power Spectral Density [fraction$^2$ / Hz]')
plt.title(sname)
plt.savefig(base_dir + 'plots/mcg63015_psd.eps')
def do_RRLyrae():
dtype = np.dtype([("mjd", np.float), ("filt", np.str, 1), ("mag", np.float), ("dmag", np.float)])
data = np.loadtxt(data_dir + 'RRLyrae.txt', comments="#", dtype=dtype)
# do g-band light curve
gIdx = np.where(data["filt"] == "g")[0]
jdate = data['mjd'][gIdx]
gmag = data['mag'][gIdx]
gerr = data['dmag'][gIdx]
load_pickle = True
if load_pickle:
time = jdate - jdate.min()
ysig = gerr
carma_sample = cPickle.load(open(data_dir + 'RRLyrae.pickle', 'rb'))
froot = base_dir + 'plots/' + 'RRLyrae_'
ax = plt.subplot(111)
print 'Getting bounds on PSD...'
psd_low, psd_hi, psd_mid, frequencies = carma_sample.plot_power_spectrum(percentile=95.0, sp=ax, doShow=False,
color='SkyBlue', nsamples=5000)
psd_mle = cm.power_spectrum(frequencies, carma_sample.mle['sigma'], carma_sample.mle['ar_coefs'],
ma_coefs=np.atleast_1d(carma_sample.mle['ma_coefs']))
ax.loglog(frequencies, psd_mle, '--b', lw=2)
dt = time[1:] - time[0:-1]
noise_level = 2.0 * np.median(dt) * np.mean(ysig ** 2)
ax.loglog(frequencies, np.ones(frequencies.size) * noise_level, color='grey', lw=2)
ax.set_ylim(bottom=noise_level / 100.0)
ax.annotate("Measurement Noise Level", (3.0 * ax.get_xlim()[0], noise_level / 2.5))
ax.annotate("C", (1.0 / 0.5, 1e3))
ax.annotate("B", (1.0 / 1.3, 2.0))
ax.annotate("A", (1.0 / 2.49, 1.0))
ax.set_xlabel('Frequency [1 / day]')
ax.set_ylabel('Power Spectral Density [mag$^2$ day]')
plt.title("RR Lyrae, g-band")
plt.savefig(froot + 'psd.eps')
print 'Assessing the fit quality...'
fig = carma_sample.assess_fit(doShow=False)
ax_again = fig.add_subplot(2, 2, 1)
ax_again.set_title("RR Lyrae, g-band")
ylims = ax_again.get_ylim()
ax_again.set_ylim(ylims[1], ylims[0])
ax_again.set_ylabel('magnitude')
ax_again.set_xlabel('Time [days]')
ax_again = fig.add_subplot(2, 2, 2)
ax_again.set_xlabel('Time [days]')
ax_again = fig.add_subplot(2, 2, 3)
ax_again.set_xlabel('Lag')
ax_again = fig.add_subplot(2, 2, 4)
ax_again.set_xlabel('Lag')
plt.savefig(froot + 'fit_quality.eps')
else:
carma_sample = make_sampler_plots(jdate - jdate.min(), gmag, gerr, 7, 'RRLyrae_', 'RR Lyrae, g-band', do_mags=True,
njobs=1)
pfile = open(data_dir + 'RRLyrae.pickle', 'wb')
cPickle.dump(carma_sample, pfile)
pfile.close()
def do_OGLE_LPV():
sname = 'LPV, RGB, i-band'
data = np.genfromtxt(data_dir + 'OGLE-LMC-LPV-00007.dat')
jdate = data[:, 0]
imag = data[:, 1]
ierr = data[:, 2]
load_pickle = False
if load_pickle:
time = jdate
ysig = ierr
carma_sample = cPickle.load(open(data_dir + 'ogle_lpv_rgb.pickle', 'rb'))
froot = base_dir + 'plots/' + 'ogle_lpv_rgb_'
ax = plt.subplot(111)
print 'Getting bounds on PSD...'
psd_low, psd_hi, psd_mid, frequencies = carma_sample.plot_power_spectrum(percentile=95.0, sp=ax, doShow=False,
color='SkyBlue', nsamples=5000)
psd_mle = cm.power_spectrum(frequencies, carma_sample.mle['sigma'], carma_sample.mle['ar_coefs'],
ma_coefs=np.atleast_1d(carma_sample.mle['ma_coefs']))
ax.loglog(frequencies, psd_mle, '--b', lw=2)
dt = time[1:] - time[0:-1]
noise_level = 2.0 * np.median(dt) * np.mean(ysig ** 2)
ax.loglog(frequencies, np.ones(frequencies.size) * noise_level, color='grey', lw=2)
ax.set_ylim(bottom=noise_level / 100.0)
ax.annotate("Measurement Noise Level", (3.0 * ax.get_xlim()[0], noise_level / 2.5))
ax.annotate("A", (1.0 / 25.0, 2.5e-3))
ax.annotate("B", (1.0 / 16.0, 2.5e-3))
ax.set_xlabel('Frequency [1 / day]')
ax.set_ylabel('Power Spectral Density [mag$^2$ day]')
plt.title(sname)
plt.savefig(froot + 'psd.eps')
else:
carma_sample = make_sampler_plots(jdate - jdate.min(), imag, ierr, 7, 'ogle_lpv_rgb_', sname, do_mags=True,
njobs=1)
pfile = open(data_dir + 'ogle_lpv_rgb.pickle', 'wb')
cPickle.dump(carma_sample, pfile)
pfile.close()
def do_XRB():
sname = 'XTE 1550-564'
data_file = data_dir + 'LC_B_3.35-12.99keV_1div128s_total.fits'
data = fits.open(data_file)[1].data
tsecs = data['TIME']
flux = data['RATE']
dt = tsecs[1:] - tsecs[:-1]
gap = np.where(dt > 1)[0]
tsecs = tsecs[gap[0]+1:gap[1]][:40000]
flux = flux[gap[0]+1:gap[1]][:40000]
tsecs0 = tsecs.copy()
flux0 = flux.copy()
ndown_sample = 4000
idx = np.random.permutation(len(flux0))[:ndown_sample]
idx.sort()
tsecs = tsecs[idx]
logflux = np.log(flux[idx])
ferr = np.sqrt(flux[idx])
logf_err = ferr / flux[idx]
# # high-frequency sampling lightcurve
# high_cutoff = 10000
# tsecs_high = tsecs[:high_cutoff]
# logflux_high = np.log(flux[:high_cutoff])
# ferr_high = np.sqrt(flux[:high_cutoff])
# logferr_high = ferr_high / flux[:high_cutoff]
#
# ndown_sample_high = 1000
# idx_high = np.random.permutation(len(logflux_high))[:ndown_sample_high]
# idx_high.sort()
#
# # middle-frequency sampling lightcurve
# tsecs_mid = tsecs[high_cutoff:]
# logflux_mid = np.log(flux[high_cutoff:])
# ferr_mid = np.sqrt(flux[high_cutoff:])
# logf_err_mid = ferr_mid / flux[high_cutoff:]
# # logf_err = np.sqrt(0.00018002985939372774 / 2.0 / np.median(dt)) # eyeballed from periodogram
# # logf_err = np.ones(len(tsecs)) * logf_err
#
# ndown_sample_mid = 4000 - ndown_sample_high
# idx_mid = np.random.permutation(len(logflux_mid))[:ndown_sample_mid]
# idx_mid.sort()
#
# tsecs = np.concatenate((tsecs_high[idx_high], tsecs_mid[idx_mid]))
# logflux = np.concatenate((logflux_high[idx_high], logflux_mid[idx_mid]))
# logf_err = np.concatenate((logferr_high[idx_high], logf_err_mid[idx_mid]))
# idx = np.concatenate((idx_high, idx_mid))
plt.plot(tsecs0, np.log(flux0))
plt.errorbar(tsecs, logflux, yerr=logf_err)
print 'Measurement errors are', np.mean(logf_err) / np.std(logflux) * 100, ' % of observed standard deviation.'
print 'Mean time spacing:', np.mean(tsecs[1:] - tsecs[:-1])
# print 'Mean time spacing for high-frequency sampling:', np.mean(tsecs_high[idx_high[1:]]-tsecs_high[idx_high[:-1]])
# print 'Mean time spacing for low-frequency sampling:', np.mean(tsecs_mid[idx_mid[1:]]-tsecs_mid[idx_mid[:-1]])
plt.show()
plt.clf()
plt.plot(tsecs, logflux)
plt.show()
plt.hist(logflux, bins=100, normed=True)
plt.xlabel('log Flux')
print 'Standard deviation in lightcurve:', np.std(logflux)
print 'Typical measurement error:', np.mean(logf_err)
plt.show()
plt.clf()
assert np.all(np.isfinite(tsecs))
assert np.all(np.isfinite(logflux))
assert np.all(np.isfinite(logf_err))
dt_idx = tsecs[1:] - tsecs[:-1]
assert np.all(dt_idx > 0)
load_pickle = True
if load_pickle:
carma_sample = cPickle.load(open(data_dir + 'xte1550_p5q4.pickle', 'rb'))
else:
carma_sample = make_sampler_plots(tsecs, logflux, logf_err, 7, 'xte1550_', sname, njobs=7)
plt.subplot(111)
pgram, freq = plt.psd(np.log(flux0), 512, 2.0 / np.median(dt), detrend=detrend_mean)
plt.clf()
ax = plt.subplot(111)
print 'Getting bounds on PSD...'
psd_low, psd_hi, psd_mid, frequencies = carma_sample.plot_power_spectrum(percentile=95.0, sp=ax, doShow=False,
color='SkyBlue', nsamples=5000)
psd_mle = cm.power_spectrum(frequencies, carma_sample.mle['sigma'], carma_sample.mle['ar_coefs'],
ma_coefs=np.atleast_1d(carma_sample.mle['ma_coefs']))
ax.loglog(freq / 2, pgram, 'o', color='DarkOrange')
nyquist_freq = np.mean(0.5 / dt_idx)
nyquist_idx = np.where(frequencies <= nyquist_freq)[0]
ax.loglog(frequencies, psd_mle, '--b', lw=2)
# noise_level = 2.0 * np.mean(dt_idx) * np.mean(logf_err ** 2)
noise_level0 = 0.00018002985939372774 / 2.0 # scale the noise level seen in the PSD
noise_level = noise_level0 * (0.5 / np.median(dt)) / nyquist_freq
ax.loglog(frequencies[nyquist_idx], np.ones(len(nyquist_idx)) * noise_level, color='grey', lw=2)
# ax.loglog(frequencies, np.ones(len(frequencies)) * noise_level0)
ax.set_ylim(bottom=noise_level0 / 10.0)
ax.annotate("Measurement Noise Level", (3.0 * ax.get_xlim()[0], noise_level / 1.5))
ax.set_xlabel('Frequency [Hz]')
ax.set_ylabel('Power Spectral Density [fraction$^2$ Hz$^{-1}$]')
plt.title(sname)
plt.savefig(base_dir + 'plots/xte1550_psd.eps')
# plot the standardized residuals and compare with the standard normal
plt.clf()
kfilter, mu = carma_sample.makeKalmanFilter('map')
kfilter.Filter()
kmean = np.asarray(kfilter.GetMean())
kvar = np.asarray(kfilter.GetVar())
standardized_residuals = (carma_sample.y - mu - kmean) / np.sqrt(kvar)
plt.hist(standardized_residuals, bins=100, normed=True, color='SkyBlue', histtype='stepfilled')
plt.xlabel('Standardized Residuals')
plt.ylabel('Probability Distribution')
xlim = plt.xlim()
xvalues = np.linspace(xlim[0], xlim[1], num=100)
expected_pdf = np.exp(-0.5 * xvalues ** 2) / np.sqrt(2.0 * np.pi)
plt.plot(xvalues, expected_pdf, 'k', lw=3)
plt.title(sname)
plt.savefig(base_dir + 'plots/xte1550_resid_dist.eps')
# plot the autocorrelation function of the residuals and compare with the 95% confidence intervals for white
# noise
plt.clf()
maxlag = 50
wnoise_upper = 1.96 / np.sqrt(carma_sample.time.size)
wnoise_lower = -1.96 / np.sqrt(carma_sample.time.size)
plt.fill_between([0, maxlag], wnoise_upper, wnoise_lower, facecolor='grey')
lags, acf, not_needed1, not_needed2 = plt.acorr(standardized_residuals, maxlags=maxlag, lw=3)
plt.xlim(0, maxlag)
plt.ylim(-0.2, 0.2)
plt.xlabel('Time Lag')
plt.ylabel('ACF of Residuals')
plt.savefig(base_dir + 'plots/xte1550_resid_acf.eps')
# plot the autocorrelation function of the squared residuals and compare with the 95% confidence intervals for
# white noise
plt.clf()
squared_residuals = standardized_residuals ** 2
wnoise_upper = 1.96 / np.sqrt(carma_sample.time.size)
wnoise_lower = -1.96 / np.sqrt(carma_sample.time.size)
plt.fill_between([0, maxlag], wnoise_upper, wnoise_lower, facecolor='grey')
lags, acf, not_needed1, not_needed2 = plt.acorr(squared_residuals - squared_residuals.mean(), maxlags=maxlag,
lw=3)
plt.xlim(0, maxlag)
plt.ylim(-0.2, 0.2)
plt.xlabel('Time Lag')
plt.ylabel('ACF of Sqrd. Resid.')
plt.savefig(base_dir + 'plots/xte1550_sqrres_acf.eps')
if not load_pickle:
pfile = open(data_dir + 'xte1550_nonoise.pickle', 'wb')
cPickle.dump(carma_sample, pfile)
pfile.close()
if __name__ == "__main__":
# do_simulated_regular()
# do_simulated_irregular()
# do_simulated_irregular_nonstationary()
# do_AGN_Stripe82()
# do_AGN_Kepler()
# do_RRLyrae()
do_OGLE_LPV()
# do_AGN_Xray()
# do_XRB()
| {
"repo_name": "brandonckelly/carma_pack",
"path": "src/paper/carma_paper.py",
"copies": "2",
"size": "35307",
"license": "mit",
"hash": 5748780017111876000,
"line_mean": 37.7989010989,
"line_max": 123,
"alpha_frac": 0.5956892401,
"autogenerated": false,
"ratio": 2.78490298154283,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9375359971954749,
"avg_score": 0.0010464499376164253,
"num_lines": 910
} |
__author__ = 'brandonkelly'
import numpy as np
import matplotlib.pyplot as plt
from os import environ
from scipy.misc import comb
import carmcmc
# true values
p = 5 # order of AR polynomial
sigmay = 2.3
qpo_width = np.array([1.0/100.0, 1.0/100.0, 1.0/500.0])
qpo_cent = np.array([1.0/5.0, 1.0/50.0])
ar_roots = carmcmc.get_ar_roots(qpo_width, qpo_cent)
# calculate moving average coefficients under z-transform of Belcher et al. (1994)
kappa = 3.0
ma_coefs = comb(p-1 * np.ones(p), np.arange(p)) / kappa ** np.arange(p)
sigsqr = sigmay ** 2 / carmcmc.carma_variance(1.0, ar_roots, ma_coefs=ma_coefs)
data_dir = environ['HOME'] + '/Projects/carma_pack/cpp_tests/data/'
fname = data_dir + 'zcarma5_mcmc.dat'
data = np.genfromtxt(data_dir + 'zcarma5_test.dat')
Zcarma = carmcmc.ZCarmaSample(data[:, 0], data[:, 1], data[:, 2], filename=fname)
Zcarma.assess_fit()
print "True value of log10(kappa) is: ", np.log10(kappa)
plt.hist(Zcarma.get_samples('kappa'), bins=100)
plt.show()
Zcarma.plot_parameter('kappa', doShow=True)
psd_low, psd_high, psd_mid, freq = Zcarma.plot_power_spectrum(percentile=95.0, nsamples=10000, doShow=False)
ar_coef = np.poly(ar_roots)
true_psd = carmcmc.power_spectrum(freq, np.sqrt(sigsqr), ar_coef, ma_coefs=ma_coefs)
plt.loglog(freq, true_psd, 'r', lw=2)
plt.xlabel('Frequency')
plt.ylabel('PSD, ZCARMA(5)')
plt.show()
fname = data_dir + 'carma_mcmc.dat'
Carma = carmcmc.CarmaSample(data[:, 0], data[:, 1], data[:, 2], filename=fname, q=p-1)
Carma.assess_fit()
print "True values of MA coefs are:", ma_coefs
Carma.plot_1dpdf('ma_coefs', doShow=True)
Carma.posterior_summaries('ma_coefs')
print ''
print "True values of log_widths are", np.log(qpo_width)
Carma.posterior_summaries('log_width')
print ''
print "True values of log_centroids are", np.log(qpo_cent)
Carma.posterior_summaries('log_centroid')
psd_low, psd_high, psd_mid, freq = Carma.plot_power_spectrum(percentile=95.0, nsamples=10000, doShow=False)
true_psd = carmcmc.power_spectrum(freq, np.sqrt(sigsqr), ar_coef, ma_coefs=ma_coefs)
plt.loglog(freq, true_psd, 'r', lw=2)
plt.xlabel('Frequency')
plt.ylabel('PSD, CARMA(5,4)')
plt.show() | {
"repo_name": "farr/carma_pack",
"path": "cpp_tests/analyze_test_data.py",
"copies": "2",
"size": "2137",
"license": "mit",
"hash": -1445658283519073300,
"line_mean": 32.9365079365,
"line_max": 108,
"alpha_frac": 0.7061300889,
"autogenerated": false,
"ratio": 2.4935822637106186,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.41997123526106184,
"avg_score": null,
"num_lines": null
} |
__author__ = 'brandonkelly'
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import NearestNeighbors
from scipy.spatial.distance import cdist
from scipy import linalg
import multiprocessing
def distance_matrix(Xvals):
covar = np.cov(Xvals, rowvar=0)
covar_inv = linalg.inv(covar)
Dmat = cdist(Xvals, Xvals, metric='mahalanobis', VI=covar_inv)
return Dmat
def impact_single_theta(args):
predict, theta, X, p_idx, weights, predict_args = args
# first compute the matrix of model predictions:
# y_predict[i, j] = E(y|u_i, v_j, theta)
ndata = X.shape[0]
X_copy = X.copy()
u = X[:, p_idx] # the active predictor
y_predict = np.zeros((ndata, ndata))
for i in range(ndata):
X_copy[:, p_idx] = u[i]
y_predict[i] = predict(X_copy, theta, *predict_args)
# get matrix of signs of transitions
transition_sign = np.zeros((ndata, ndata))
for j in range(ndata):
transition_sign[:, j] = np.sign(u - u[j])
u1, u2 = np.meshgrid(u, u)
transition_sign = np.sign(u2 - u1)
y_predict_diff = y_predict - np.outer(np.ones(ndata), y_predict.diagonal())
numer = np.sum(weights * y_predict_diff * transition_sign) # signed version
abs_numer = np.sum(weights * np.abs(y_predict_diff)) # absolute version
# denom = np.sum(weights * (u2 - u1) * np.sign(u2 - u1))
denom = np.sum(weights)
return numer / denom, abs_numer / denom
def impact(predict, theta, X, predictors=None, predict_args=None, nneighbors=None, nx=None, ntheta=None,
mahalanobis_constant=1.0, n_jobs=1):
if n_jobs < 0:
n_jobs = multiprocessing.cpu_count()
if n_jobs > 1:
pool = multiprocessing.Pool(n_jobs)
if predictors is None:
# calculate the impact for all the predictors
predictors = np.arange(X.shape[1])
if nx is not None:
# use only a subset of the data points
subset_idx = np.random.permutation(X.shape[0])[:nx]
X = X[subset_idx]
else:
nx = X.shape[0]
if ntheta is not None:
# use only a subset of the theta samples
subset_idx = np.random.permutation(theta.shape[0])
theta = theta[subset_idx]
else:
ntheta = theta.shape[0]
if nneighbors is None:
# use all of the neighbors when computing the weights
nneighbors = X.shape[0]
# first compute the distance matrix
Dmat = distance_matrix(X)
weights0 = 1.0 / (mahalanobis_constant + Dmat)
# get the sets of nearest neighbors
knn = NearestNeighbors(n_neighbors=nneighbors)
knn.fit(X)
nn_idx = knn.kneighbors(X, return_distance=False)
weights = np.zeros_like(weights0)
for i in range(weights.shape[0]):
# data points outside of K nearest neighbors have weight of zero
weights[nn_idx[i], i] = weights0[nn_idx[i], i]
weights /= weights.sum(axis=0) # normalize weights to contribution to impact for each data point is the same
impacts = np.zeros(len(predictors))
abs_impacts = np.zeros_like(impacts)
impact_sigmas = np.zeros_like(impacts)
abs_impact_sigma = np.zeros_like(impacts)
print 'Doing predictor'
for p_idx in predictors:
print p_idx, '...'
args = []
for s in range(ntheta):
args.append([predict, theta[s], X, p_idx, weights, predict_args])
if n_jobs == 1:
results = map(impact_single_theta, args)
else:
results = pool.map(impact_single_theta, args)
results = np.array(results)
impacts[p_idx] = np.mean(results[:, 0])
impact_sigmas[p_idx] = np.std(results[:, 0])
abs_impacts[p_idx] = np.mean(results[:, 1])
abs_impact_sigma[p_idx] = np.std(results[:, 1])
# impact_theta = np.zeros(theta.shape)
# impact_theta_abs = np.zeros_like(impact_theta)
# for s in range(ntheta):
# impact_s, abs_impact_s = impact_single_theta(predict, theta[s], X, p_idx, weights, predict_args=predict_args)
# impact_theta[s] = impact_s
# impact_theta_abs[s] = abs_impact_s
# impacts[p_idx] = np.mean(impact_theta)
# impact_sigmas[p_idx] = np.std(impact_theta)
# abs_impacts[p_idx] = np.mean(impact_theta_abs)
# abs_impact_sigma[p_idx] = np.std(impact_theta_abs)
return impacts, impact_sigmas, abs_impacts, abs_impact_sigma
if __name__ == "__main__":
# test and example usage
ndata = 200
beta = np.array([1.0, 2.0, -0.6, 0.1])
sigma = 0.1
X = np.column_stack((np.ones(ndata), np.random.standard_normal(ndata), np.random.uniform(0.0, 5.0, ndata),
np.random.standard_cauchy(ndata)))
y = X.dot(beta) + sigma * np.random.standard_normal(ndata)
XX_inv = linalg.inv(X.T.dot(X))
bhat = XX_inv.dot(X.T.dot(y))
bcov = XX_inv * sigma * sigma
nsamples = 100
betas = np.random.multivariate_normal(bhat, bcov, nsamples)
betas = betas[:, 1:] # ignore constant term
def linear_mean(X, beta, constant):
ymean = X.dot(beta) + constant
return ymean
# don't include constant term
impacts, isigmas, abs_impacts, aisigmas = \
impact(linear_mean, betas, X[:, 1:], predict_args=(bhat[0],), nneighbors=20, n_jobs=4)
print impacts
sorted_idx = np.argsort(np.abs(impacts))
labels = np.array(['x1', 'x2', 'x3'])[sorted_idx]
pos = np.arange(sorted_idx.shape[0]) + .5
plt.barh(pos, impacts[sorted_idx], align='center', xerr=isigmas[sorted_idx], alpha=0.5)
plt.yticks(pos, labels)
plt.xlabel('Impact')
plt.show() | {
"repo_name": "brandonckelly/bck_stats",
"path": "bck_stats/avg_pred_comp.py",
"copies": "1",
"size": "5610",
"license": "mit",
"hash": -6113874521818590000,
"line_mean": 34.5126582278,
"line_max": 123,
"alpha_frac": 0.6178253119,
"autogenerated": false,
"ratio": 3.090909090909091,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42087344028090906,
"avg_score": null,
"num_lines": null
} |
__author__ = 'brandonkelly'
import numpy as np
import matplotlib.pyplot as plt
import lib_hmlinmae as maeLib
import yamcmcpp
class LinMAESample(yamcmcpp.MCMCSample):
def __init__(self, y, X):
super(LinMAESample, self).__init__()
self.y = y
self.X = X
self.mfeat = X[0].shape[1]
self.nobjects = len(y)
def generate_from_file(self, filename):
pass
def generate_from_trace(self, trace):
pass
def set_logpost(self, logpost):
pass
def predict(self, x_predict, obj_idx, nsamples=None):
if nsamples is None:
# Use all of the MCMC samples
nsamples = self.nsamples
index = np.arange(nsamples)
else:
try:
nsamples <= self.nsamples
except ValueError:
"nsamples must be less than the total number of MCMC samples."
nsamples0 = self.nsamples
index = np.arange(nsamples) * (nsamples0 / nsamples)
beta = self.get_samples('coefs ' + str(obj_idx))[index, :]
sigsqr = self.get_samples('sigsqr ' + str(obj_idx))[index]
y_predict = beta.dot(x_predict)
return y_predict, sigsqr
def run_gibbs(y, X, nsamples, nburnin, nthin=1, tdof=8):
nobjects = len(y)
mfeat = X[0].shape[1]
# convert from numpy to vec3D format needed for C++ extension
X3d = maeLib.vec3D()
y2d = maeLib.vec2D()
for i in xrange(nobjects):
y_i = y[i]
X_i = X[i]
# store response in std::vector<std::vector<double> >
y1d = maeLib.vec1D()
y1d.extend(y_i)
y2d.append(y1d)
X2d = maeLib.vec2D()
ndata = y_i.size
for j in xrange(ndata):
# store predictors in std::vector<std::vector<std::vector<double> > >
X1d = maeLib.vec1D()
X1d.extend(X_i[j, :])
X2d.append(X1d)
X3d.append(X2d)
# run the gibbs sampler
Sampler = maeLib.MaeGibbs(tdof, y2d, X3d) # C++ gibbs sampler object
Sampler.RunMCMC(nsamples, nburnin, nthin)
print "Getting MCMC samples..."
# grab the MCMC samples and store them in a python class
samples = LinMAESample(y, X)
samples._samples['coefs mean'] = np.empty((nsamples, mfeat))
samples._samples['sigsqr mean'] = np.empty(nsamples)
for d in xrange(nobjects):
samples._samples['weights ' + str(d)] = np.empty(nsamples)
samples._samples['coefs ' + str(d)] = np.empty((nsamples, mfeat))
samples._samples['sigsqr ' + str(d)] = np.empty(nsamples)
samples.parameters = samples._samples.keys()
samples.nsamples = nsamples
print "Storing MCMC samples..."
trace = Sampler.GetCoefsMean()
samples._samples['coefs mean'] = np.asarray(trace)
trace = Sampler.GetNoiseMean()
samples._samples['sigsqr mean'] = np.asarray(trace)
for d in xrange(nobjects):
if d % 100 == 0:
print "...", d, "..."
trace = Sampler.GetWeights(d)
samples._samples['weights ' + str(d)] = np.asarray(trace)
trace = Sampler.GetCoefs(d)
samples._samples['coefs ' + str(d)] = np.asarray(trace)
trace = Sampler.GetSigSqr(d)
samples._samples['sigsqr ' + str(d)] = np.asarray(trace)
return samples
| {
"repo_name": "brandonckelly/BDC",
"path": "HMLinMAE/hmlin_mae.py",
"copies": "2",
"size": "3300",
"license": "mit",
"hash": 8837069948865763000,
"line_mean": 28.7297297297,
"line_max": 81,
"alpha_frac": 0.5857575758,
"autogenerated": false,
"ratio": 3.2384690873405297,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.977720228684786,
"avg_score": 0.009404875258533793,
"num_lines": 111
} |
__author__ = 'brandonkelly'
import numpy as np
import matplotlib.pyplot as plt
import os
# physical constants, cgs
clight = 2.99792458e10
hplanck = 6.6260755e-27
kboltz = 1.380658e-16
wavelength = np.asarray([100.0, 160.0, 250.0, 350.0, 500.0]) # observational wavelengths in microns
nu = clight / (wavelength / 1e4)
nu.sort()
print nu
nu_ref = 2.3e11 # 230 GHz
def modified_bbody(nu, const, beta, temp):
sed = 2.0 * hplanck * nu ** 3 / (clight * clight) / (np.exp(hplanck * nu / (kboltz * temp)) - 1.0)
sed *= const * (nu / nu_ref) ** beta
return sed
ndata = 100000
cbt_mean = np.asarray([15.0, 2.0, np.log(15.0)])
cbt_sigma = np.asarray([1.0, 0.1, 0.3])
cbt_corr = np.asarray([[1.0, -0.5, 0.0],
[-0.5, 1.0, 0.25],
[0.0, 0.25, 1.0]])
cbt_cov = np.dot(np.diag(cbt_sigma), cbt_corr.dot(np.diag(cbt_sigma)))
cbt = np.random.multivariate_normal(cbt_mean, cbt_cov, ndata)
data_dir = os.environ['HOME'] + '/Projects/CUDAHM/dusthm/data/'
np.savetxt(data_dir + 'true_cbt_' + str(ndata) + '.dat', cbt, fmt='%10.6e')
sed = np.zeros((ndata, len(nu)))
for j in range(len(nu)):
sed[:, j] = modified_bbody(nu[j], np.exp(cbt[:, 0]), cbt[:, 1], np.exp(cbt[:, 2]))
# generate noise assuming a median S/N of 200
fnu_sig = np.median(sed, axis=0) / 1000.0
fnu_sig = np.outer(np.ones(ndata), fnu_sig)
fnu = sed + fnu_sig * np.random.standard_normal(fnu_sig.shape)
data = np.zeros((ndata, 2*len(nu)))
data[:, 0] = fnu[:, 0]
data[:, 1] = fnu_sig[:, 0]
data[:, 2] = fnu[:, 1]
data[:, 3] = fnu_sig[:, 1]
data[:, 4] = fnu[:, 2]
data[:, 5] = fnu_sig[:, 2]
data[:, 6] = fnu[:, 3]
data[:, 7] = fnu_sig[:, 3]
data[:, 8] = fnu[:, 4]
data[:, 9] = fnu_sig[:, 4]
data_dir = os.environ['HOME'] + '/Projects/CUDAHM/dusthm/data/'
header = 'nu = '
for j in range(len(nu)):
header += str(nu[j]) + ', '
np.savetxt(data_dir + 'cbt_sed_' + str(ndata) + '.dat', data, fmt='%10.6e', header=header)
idx = np.random.random_integers(0, fnu.shape[0]-1)
plt.errorbar(nu, fnu[idx], yerr=fnu_sig[idx])
plt.xscale('log')
nurange = nu.max() - nu.min()
plt.xlim(nu.min() - 0.05 * nurange, nu.max() + 0.05 * nurange)
plt.xlabel('Frequency [Hz]')
plt.ylabel(r'$f_{\nu}$ [arbitrary]')
plt.title('Index ' + str(idx))
plt.show() | {
"repo_name": "brandonckelly/CUDAHM",
"path": "dusthm/src/python/make_dusthm_data.py",
"copies": "1",
"size": "2257",
"license": "mit",
"hash": 555827005448047700,
"line_mean": 27.9487179487,
"line_max": 102,
"alpha_frac": 0.5923792645,
"autogenerated": false,
"ratio": 2.286727456940223,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.33791067214402226,
"avg_score": null,
"num_lines": null
} |
__author__ = 'brandonkelly'
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import hmlinmae_gibbs as hmlin
import os
import multiprocessing as mp
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import cross_validation
from sklearn.metrics import mean_absolute_error
import cPickle
base_dir = os.environ['HOME'] + '/Projects/Kaggle/big_data_combine/'
plags = 3
ndays = 510
ntime = 54
# get the data
fname = base_dir + 'data/BDC_dataframe.p'
df = pd.read_pickle(fname)
train_file = base_dir + 'data/trainLabels.csv'
train = pd.read_csv(train_file)
train = train.drop(train.columns[0], axis=1)
ntrain = len(train)
# global MCMC parameters
nsamples = 10000
nburnin = 10000
nthin = 5
tdof = 10000
def build_submission_file(yfit, snames, filename):
header = 'FileID'
for s in snames:
header += ',' + s
header += '\n'
# check for zero standard deviation
for i in xrange(yfit.shape[0]):
for j in xrange(yfit.shape[1]):
# stock j on day i
ystd = np.std(df[snames[j]].ix[200 + i + 1][1:])
if ystd < 1e-6:
yfit[i, j] = df[snames[j]].ix[200 + i + 1, 54]
submission = np.insert(yfit, 0, np.arange(201, 511), axis=1)
submission_file = base_dir + 'data/submissions/' + filename
sfile = open(submission_file, 'w')
sfile.write(header)
for i in xrange(submission.shape[0]):
this_row = str(submission[i, 0])
for j in xrange(1, submission.shape[1]):
this_row += ',' + str(submission[i, j])
this_row += '\n'
sfile.write(this_row)
sfile.close()
def boost_residuals(args):
resid, stock_idx = args
X, Xpredict = build_design_matrix(stock_idx)
gbr = GradientBoostingRegressor(loss='lad', max_depth=2, subsample=0.5, learning_rate=0.001,
n_estimators=400)
gbr.fit(X, resid)
oob_error = -np.cumsum(gbr.oob_improvement_)
#plt.plot(oob_error)
#plt.show()
ntrees = np.max(np.array([np.argmin(oob_error) + 1, 5]))
print "Using ", ntrees, " trees for stock ", cnames[stock_idx]
gbr.n_estimators = ntrees
# get cross-validation accuracy
print "Getting CV error..."
cv_error = cross_validation.cross_val_score(gbr, X, y=resid, score_func=mean_absolute_error,
cv=10)
gbr.fit(X, resid)
fimportance = gbr.feature_importances_
fimportance /= fimportance.max()
pfile_name = base_dir + 'data/GBR_O' + str(stock_idx+1) + '.p'
pfile = open(pfile_name, 'wb')
cPickle.dump(gbr, pfile)
pfile.close()
return gbr.predict(Xpredict), cv_error, fimportance
def build_design_matrix(stock_idx):
# construct array of predictors
fnames = []
for f in df.columns:
if f[0] == 'I':
fnames.append(f)
cnames = df.columns
npredictors = len(fnames)
two_hours = 24
thisX = np.empty((ntrain, npredictors))
thisXpredict = np.empty((ndata - ntrain, npredictors))
for j in xrange(len(fnames)):
thisX[:, j] = df[fnames[j]].ix[:, 54][:ntrain]
thisXpredict[:, j] = df[fnames[j]].ix[:, 54][ntrain:]
# remove day 22
thisX = np.delete(thisX, 21, axis=0)
return thisX, thisXpredict
if __name__ == "__main__":
# get the stock labels
snames = []
for c in df.columns:
if c[0] == 'O':
snames.append(c)
nstocks = len(snames)
ndata = ndays
# construct the response arrays
y = []
for i in xrange(nstocks):
thisy = np.empty(ndata)
thisy[:ntrain] = train[snames[i]]
thisy[ntrain:] = df[snames[i]].ix[:, 54][ntrain:]
thisy = np.delete(thisy, (21, 421), axis=0)
y.append(thisy)
# construct the predictor arrays
two_hours = 24
args = []
print 'Building data arrays...'
mfeat = 1 + plags
X = []
for i in xrange(nstocks):
thisX = np.empty((ndata, mfeat))
thisX[:, 0] = 1.0 # first column corresponds to constant
for j in xrange(plags):
thisX[:ntrain, j + 1] = df[snames[i]].ix[:, 54 - j][:ntrain]
thisX[ntrain:, j + 1] = df[snames[i]].ix[:, 54 - two_hours - j][ntrain:]
thisX = np.delete(thisX, (21, 421), axis=0)
X.append(thisX)
# run the MCMC sampler to get linear predictors based on previous values
samples = hmlin.run_gibbs(y, X, nsamples, nburnin, nthin, tdof)
sfile = open(base_dir + 'data/linmae_samples.p', 'wb')
cPickle.dump(samples, sfile)
sfile.close()
print 'Getting predictions from MCMC samples ...'
# boost residuals from predicted values at 2pm and 4pm for the training set, and 2pm for the test set
y = np.empty((ndays + ntrain, nstocks))
yfit = np.empty((ndays + ntrain, nstocks))
ysubmit = np.empty((ndays - ntrain, nstocks))
Xsubmit = np.empty((ndays - ntrain, mfeat, nstocks))
Xfit = np.empty((ndays + ntrain, mfeat, nstocks))
# Xfit[0:ndata, :, :] = the predictors for the 2pm values for the entire data set
# Xfit[ndata:, :, :] = the predictors for the 4pm values for the training data set
ndata = ndays
for i in xrange(nstocks):
print '... ', snames[i], ' ...'
y[:ndays, i] = df[snames[i]].ix[:, 54] # value at 2pm
y[ndays:, i] = train[snames[i]] # value at 4pm
Xfit[:, 0, i] = 1.0 # first column corresponds to constant
Xsubmit[:, 0, i] = 1.0
for j in xrange(plags):
# value at 12pm
Xfit[:ndays, j + 1, i] = df[snames[i]].ix[:, 54 - j - two_hours]
# value at 2pm
Xfit[ndays:, j + 1, i] = df[snames[i]].ix[:, 54 - j][:ntrain]
Xsubmit[:, j + 1, i] = df[snames[i]].ix[:, 54 - j][ntrain:]
for d in xrange(len(snames)):
for k in xrange(yfit.shape[0]):
ypredict, ypvar = samples.predict(Xfit[k, :, d], d)
yfit[k, d] = np.median(ypredict)
for k in xrange(ysubmit.shape[0]):
ypredict, ypvar = samples.predict(Xsubmit[k, :, d], d)
ysubmit[k, d] = np.median(ypredict)
build_submission_file(ysubmit, snames, 'hmlin_mae.csv')
resid = y - yfit
resid = resid[ndata:, :]
#remove days 22 and 422
resid = np.delete(resid, 21, axis=0)
# compare histogram of residuals with expected distribution
print 'Comparing histogram of residuals against model distributions...'
for d in xrange(len(snames)):
this_resid = y[:, d] - yfit[:, d]
# rmax = np.percentile(this_resid, 0.99)
# rmin = np.percentile(this_resid, 0.01)
# rrange = rmax - rmin
# rmax += 0.05 * rrange
# rmin -= 0.05 * rrange
plt.clf()
n, bins, patches = plt.hist(this_resid, bins=30, normed=True)
bins = np.linspace(np.min(bins), np.max(bins), 100)
sigsqr = samples.get_samples('sigsqr ' + str(d))
pdf = np.zeros(len(bins))
for b in xrange(len(bins)):
pdf[b] = np.mean(1.0 / 2.0 / np.sqrt(sigsqr) * np.exp(-np.abs(bins[b]) / np.sqrt(sigsqr)))
plt.plot(bins, pdf, 'r-', lw=2)
plt.savefig(base_dir + 'plots/residual_distribution_' + snames[d] + '.png')
plt.close()
# plot model values vs true values at 4pm
sidx = 0
for s in snames:
plt.clf()
plt.plot(yfit[ndata:, sidx], train[s], 'b.')
xlower = np.percentile(train[s], 1.0)
xupper = np.percentile(train[s], 99.0)
xr = xupper - xlower
plt.xlim(xlower - 0.05 * xr, xupper + 0.05 * xr)
plt.ylim(xlower - 0.05 * xr, xupper + 0.05 * xr)
plt.plot(plt.xlim(), plt.xlim(), 'r-', lw=2)
sidx += 1
plt.xlabel('Estimated value at 4pm')
plt.ylabel('True value at 4pm')
plt.savefig(base_dir + 'plots/model_vs_true_' + s + '.png')
plt.close()
# construct array of predictors
fnames = []
for f in df.columns:
if f[0] == 'I':
fnames.append(f)
# now gradient boost the residuals
print "Fitting gradient boosted trees..."
cnames = df.columns
npredictors = 2 * (len(df.columns) - 1)
args = []
for i in xrange(nstocks):
args.append((resid[:, i], i))
pool = mp.Pool(mp.cpu_count()-1)
results = pool.map(boost_residuals, args)
#print np.mean(abs(resid[:, 1]))
#results = boost_residuals(args[1])
print 'Training error:', np.mean(abs(resid))
cverror = 0.0
fimportance = 0.0
for r in results:
cverror += np.mean(r[1])
fimportance += r[2]
fimportance /= nstocks
sort_idx = fimportance.argsort()
print "Sorted feature importances: "
for s in sort_idx:
print fnames[s], fimportance[s]
print ''
print 'CV error is: ', cverror / len(results)
idx = 0
for r in results:
# add AR(p) contribution back in
ysubmit[:, idx] = r[0] + ysubmit[:, idx]
idx += 1
subfile = 'hmlin_mae_boost.csv'
build_submission_file(ysubmit, snames, subfile)
# compare submission file with last observed value as a sanity check
dfsubmit = pd.read_csv(base_dir + 'data/submissions/' + subfile)
| {
"repo_name": "brandonckelly/BDC",
"path": "boost_hmlin_residuals.py",
"copies": "1",
"size": "9181",
"license": "mit",
"hash": -8890185631665146000,
"line_mean": 29.7056856187,
"line_max": 105,
"alpha_frac": 0.5826162727,
"autogenerated": false,
"ratio": 3.0964586846543,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9173981780515073,
"avg_score": 0.0010186353678453923,
"num_lines": 299
} |
__author__ = 'brandonkelly'
import numpy as np
import matplotlib.pyplot as plt
class GcvExpSmoother(object):
def __init__(self, lookback=30):
"""
Constructor for class to perform exponentially-weighted average smoothing of a 1-D data set.
@param lookback: The maximum look-back length to use in the smoothing. Only the data points in
y[idx - lookback:idx] are used to compute the smoothed estimate of y[idx+1].
"""
self.lookback = int(lookback) # support of exponential smoother, only use this many data points in computation
self.efold = 1.0
self.gcv_grid = np.zeros(2.0 * self.lookback)
self.efold_grid = np.zeros(2.0 * self.lookback)
def smooth(self, y):
"""
Return a smoothed estimate of y, using the current value of self.efold for the e-folding length.
@param y: The data, a 1-D array.
@return: The smoothed estimate of y, a 1-D numpy array.
"""
ysmooth, peff = self._smooth(self.efold, y)
return ysmooth
def weights(self, efold, lookback=None):
if lookback is None:
lookback = self.lookback
xvalues = np.arange(0.0, lookback)
weights = np.exp(-xvalues / efold)
return weights[::-1] / np.sum(weights)
def choose_efold(self, y, verbose=False):
"""
Choose the optimal e-folding length of the exponential smoothing kernel using generalized cross-validation.
@param y: The training set, a 1-D array.
@param verbose: If true, then print the chosen smoothing length.
"""
ngrid = 20
efold_grid = np.logspace(-1.0, np.log10(self.lookback * 2.0), ngrid)
gcv_grid = np.zeros(efold_grid.size)
for i in xrange(efold_grid.size):
smoothed_y, peffective = self._smooth(efold_grid[i], y)
gcv_grid[i] = gcv_error(y, smoothed_y, peffective)
# choose e-folding length of smoother to minimize the generalized cross-validation error
self.efold = efold_grid[gcv_grid.argmin()]
if verbose:
print 'E-folding length chosen to be', self.efold
# save the grids
self.efold_grid = efold_grid
self.gcv_grid = gcv_grid
def _smooth(self, efold, y):
try:
y.size > self.lookback
except ValueError:
'Y must have at least self.lookback elements.'
ysmooth = np.zeros(y.size)
ysmooth[0] = y[0]
peffective = 0.0 # trace of the smoothing matrix, the effective number of parameters
# treat the first self.lookback data points seperately, since the base-line is shorter
for i in xrange(1, self.lookback):
weights = self.weights(efold, lookback=i)
ysmooth[i] = weights.dot(y[0:i])
peffective += weights[-1]
weights = self.weights(efold)
for i in xrange(y.size - self.lookback - 1):
idx = self.lookback + i
# estimate current y as exponentially-weighted average of previous self.lookback y-values
ysmooth[idx] = weights.dot(y[idx - self.lookback:idx])
peffective += weights[-1]
ysmooth[-1] = weights.dot(y[y.size - self.lookback - 1:-1])
peffective += weights[-1]
return ysmooth, peffective
def gcv_error(y, ysmooth, peffective):
"""
Compute generalized cross-validation error.
@param y: The numpy array of y-values.
@param ysmooth: The smoothed numpy array of y-values.
@param peffective: The effective number of parameters of the smoothing matrix, given by its trace.
@return: The generalized cross-validation error (L2-loss function).
"""
gcv = np.mean((y - ysmooth) ** 2) / (1.0 - peffective / y.size) ** 2
return gcv
if __name__ == "__main__":
# example usage
x = np.arange(500)
y = np.cos(x / 15.0) + 0.1 * np.random.standard_normal(500)
gcv = GcvExpSmoother()
gcv.choose_efold(y, verbose=True)
ysmooth = gcv.smooth(y)
plt.semilogy(gcv.efold_grid, gcv.gcv_grid)
plt.xlabel('E-folding length')
plt.ylabel('GCV Error')
plt.show()
plt.clf()
plt.plot(x, y, '.', label='Data')
plt.plot(x, ysmooth, label='Smoothed', lw=2)
plt.legend()
plt.show()
| {
"repo_name": "brandonckelly/bck_stats",
"path": "bck_stats/gcv_smoother.py",
"copies": "1",
"size": "4289",
"license": "mit",
"hash": -4714377831514575000,
"line_mean": 34.4462809917,
"line_max": 119,
"alpha_frac": 0.6152949405,
"autogenerated": false,
"ratio": 3.4505229283990344,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9561342246296696,
"avg_score": 0.0008951245204679116,
"num_lines": 121
} |
__author__ = 'brandonkelly'
import numpy as np
import pandas as pd
import os
base_dir = os.environ['HOME'] + '/Projects/Kaggle/big_data_combine/'
def boxcox(x):
if np.any(x < 0):
u = x
elif np.any(x == 0):
lamb = 0.5
u = (x ** lamb - 1.0) / lamb
else:
u = np.log(x)
return u
def build_dataframe(file=None):
# grab the data
df = pd.read_csv(base_dir + 'data/' + '1.csv')
df['day'] = pd.Series(len(df[df.columns[0]]) * [1])
df['time index'] = pd.Series(df.index)
df = df.set_index(['day', 'time index'])
files = [str(d) + '.csv' for d in range(2, 511)]
for f in files:
print 'Getting data for day ' + f.split('.')[0] + '...'
this_df = pd.read_csv(base_dir + 'data/' + f)
this_df['day'] = pd.Series(len(df[this_df.columns[0]]) * [int(f.split('.')[0])])
this_df['time index'] = pd.Series(this_df.index)
this_df = this_df.set_index(['day', 'time index'])
df = df.append(this_df)
# find the columns corresponding to the securities and predictors
colnames = df.columns
feature_index = [c[0] == 'I' for c in colnames]
nfeatures = np.sum(feature_index)
security_index = [c[0] == 'O' for c in colnames]
nsecurities = np.sum(security_index)
feature_labels = []
for c in colnames:
if c[0] == 'I':
feature_labels.append(c)
for f in feature_labels:
print 'Transforming data for ', f
df[f] = df[f].apply(boxcox)
if file is not None:
df.to_pickle(file)
return df
if __name__ == "__main__":
build_dataframe(base_dir + 'data/BDC_dataframe.p') | {
"repo_name": "brandonckelly/BDC",
"path": "get_data.py",
"copies": "1",
"size": "1654",
"license": "mit",
"hash": 4036594477273187300,
"line_mean": 26.5833333333,
"line_max": 88,
"alpha_frac": 0.5562273277,
"autogenerated": false,
"ratio": 2.958855098389982,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4015082426089982,
"avg_score": null,
"num_lines": null
} |
__author__ = 'brandonkelly'
import unittest
import numpy as np
from scipy import stats, integrate
from tree import *
import matplotlib.pyplot as plt
from test_tree_parameters import build_test_data, SimpleBartStep
class ProposalTestCase(unittest.TestCase):
def setUp(self):
nsamples = 500
nfeatures = 4
self.alpha = 0.95
self.beta = 2.0
self.X = np.random.standard_cauchy((nsamples, nfeatures))
self.true_sigsqr = 0.7 ** 2
tree, mu = build_test_data(self.X, self.true_sigsqr)
self.true_mu = mu
self.y = tree.y
self.mtrees = 1 # single tree model
self.mu = BartMeanParameter("mu", 1)
# Rescale y to lie between -0.5 and 0.5
self.true_mu -= self.y.min()
self.y -= self.y.min() # minimum = 0
self.true_mu /= self.y.max()
self.true_sigsqr /= self.y.max() ** 2
self.y /= self.y.max() # maximum = 1
self.true_mu -= 0.5
self.y -= 0.5 # range is -0.5 to 0.5
tree.y = self.y
# Tree parameter object, note that this is different from a BaseTree object
self.tree = BartTreeParameter('tree', self.X, self.y, self.mtrees, self.alpha, self.beta,
self.mu.mubar, self.mu.prior_var)
self.tree.value = tree
self.mu.treeparam = self.tree
# update moments of y-values in each terminal node since we transformed the data
for leaf in self.tree.value.terminalNodes:
self.tree.value.filter(leaf)
self.mu.sigsqr = BartVariance(self.X, self.y)
self.mu.sigsqr.bart_step = SimpleBartStep()
self.mu.sigsqr.value = self.true_sigsqr
self.tree.sigsqr = self.mu.sigsqr
self.tree_proposal = BartProposal()
def tearDown(self):
del self.X
del self.y
del self.mu
del self.tree
def test_draw(self):
# make sure grow/prune operations return a tree with correct # of terminal nodes
current_tree = self.tree.value
ntrials = 1000
for i in xrange(ntrials):
new_tree = self.tree_proposal.draw(current_tree)
nleafs_new = len(new_tree.terminalNodes)
nleafs_old = len(current_tree.terminalNodes)
if self.tree_proposal._node is None or self.tree_proposal._node.feature is None:
# make sure tree configuration is not updated
self.assertEqual(nleafs_new, nleafs_old)
elif self.tree_proposal._operation == 'grow':
# make sure there is one more terminal node
self.assertEqual(nleafs_new, nleafs_old + 1)
else:
# make sure there is one less terminal node
self.assertEqual(nleafs_new, nleafs_old - 1)
current_tree = new_tree
def test_logdensity(self):
# make sure ratio of transition kernels matches the values computed directly
current_tree = self.tree.value
ntrials = 1000
for i in xrange(ntrials):
new_tree = self.tree_proposal.draw(current_tree)
logratio = self.tree_proposal.logdensity(current_tree, new_tree, True)
logratio = -logratio # sign of output agrees with MetroStep.accept, reverse for convention of this test
nleafs_new = len(new_tree.terminalNodes)
nleafs_old = len(current_tree.terminalNodes)
if self.tree_proposal._node is None or self.tree_proposal._node.feature is None:
# tree configuration is not updated
self.assertAlmostEqual(logratio, 0.0)
continue
elif self.tree_proposal._operation == 'grow':
log_forward = -np.log(nleafs_old) - np.log(current_tree.n_features) - \
np.log(self.tree_proposal._node.npts)
# reverse is the prune update
log_backward = -np.log(len(new_tree.get_terminal_parents()))
else:
log_forward = -np.log(len(current_tree.get_terminal_parents()))
# reverse mode is grow update
log_backward = -np.log(nleafs_new) - np.log(new_tree.n_features) - np.log(self.tree_proposal._node.npts)
logratio_direct = self.tree.logprior(new_tree) - log_forward - \
(self.tree.logprior(current_tree) - log_backward)
self.assertAlmostEqual(logratio, logratio_direct)
current_tree = new_tree
def test_mcmc(self):
# run a simple MCMC sampler for the tree configuration to make sure that we correctly constrain the number of
# internal and terminal nodes
burnin = 1000
niter = 5000
true_nleaves = len(self.tree.value.terminalNodes)
true_ninodes = len(self.tree.value.internalNodes)
metro_step = steps.MetroStep(self.tree, self.tree_proposal, niter)
nleaves = np.zeros(niter)
ninodes = np.zeros(niter)
naccepted = 0
naccept_grow = 0
naccept_prune = 0
print 'Doing burnin...'
for i in xrange(burnin):
# burnin phase
if i % 100 == 0:
print i, '...'
old_nleaves = len(self.tree.value.terminalNodes)
old_nbranches = len(self.tree.value.internalNodes)
metro_step.do_step()
new_nleaves = len(self.tree.value.terminalNodes)
new_nbranches = len(self.tree.value.internalNodes)
accepted = (metro_step.naccept - naccepted) == 1
if accepted:
if self.tree_proposal._operation == 'grow':
naccept_grow += 1
elif self.tree_proposal._operation == 'prune':
naccept_prune += 1
if not self.tree_proposal._prohibited_proposal:
# make sure the tree configuration has changed
self.assertNotEqual(old_nleaves, new_nleaves)
self.assertNotEqual(old_nbranches, new_nbranches)
else:
# proposal results in prohibited tree structure, so structure is unchanged
self.assertEqual(old_nbranches, new_nbranches)
self.assertEqual(old_nleaves, new_nleaves)
naccepted += 1
else:
# proposal rejected, make sure tree is not updated
self.assertEqual(old_nleaves, new_nleaves)
self.assertEqual(old_nbranches, new_nbranches)
print 'Sampling tree structures...'
for i in xrange(niter):
if i % 100 == 0:
print i, '...'
# now save the number of nodes sampled from their posterior
metro_step.do_step()
nleaves[i] = len(self.tree.value.terminalNodes)
ninodes[i] = len(self.tree.value.internalNodes)
print 'Number of accepted grow proposals:', naccept_grow
print 'Number of accepted prune proposals:', naccept_prune
plt.plot(nleaves, '.')
plt.plot(ninodes, 'r.')
plt.ylabel('Number of nodes')
plt.show()
ntrue = np.sum(ninodes[nleaves == true_nleaves] == true_ninodes)
ntrue_fraction = ntrue / float(niter)
# posterior probability of correct number of internal and terminal nodes should be at least 5%
print ntrue_fraction
self.assertGreater(ntrue_fraction, 0.05)
if __name__ == "__main__":
unittest.main()
| {
"repo_name": "acbecker/BART",
"path": "tests/test_bart_proposal.py",
"copies": "1",
"size": "7492",
"license": "mit",
"hash": 9188560527729688000,
"line_mean": 40.3922651934,
"line_max": 120,
"alpha_frac": 0.5858248799,
"autogenerated": false,
"ratio": 3.8381147540983607,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49239396339983604,
"avg_score": null,
"num_lines": null
} |
__author__ = 'brandonkelly'
import unittest
import numpy as np
from scipy import stats, integrate
from tree import *
import matplotlib.pyplot as plt
# generate test data from an ensemble of trees
def build_test_data(X, sigsqr, ngrow=5, mtrees=1):
if np.isscalar(ngrow):
ngrow = [ngrow] * mtrees
ytemp = np.random.standard_normal(X.shape[0])
forest = []
mu_list = []
mu_map = np.zeros(X.shape[0])
for m in xrange(mtrees):
tree = BaseTree(X, ytemp, min_samples_leaf=1)
for i in xrange(ngrow[m]):
tree.grow()
mu = np.random.normal(2.0, 1.3, len(tree.terminalNodes))
forest.append(tree)
mu_list.append(mu)
n_idx = 0
this_mu_map = np.zeros(mu_map.size)
for leaf in tree.terminalNodes:
y_in_node = tree.filter(leaf)[1]
this_mu_map[y_in_node] = mu[n_idx]
n_idx += 1
mu_map += this_mu_map
y = mu_map + np.sqrt(sigsqr) * np.random.standard_normal(X.shape[0])
for tree in forest:
tree.y = y
# rerun filter to update the y-means and variances in each terminal node
for leaf in tree.terminalNodes:
x_in_node, y_in_node = tree.filter(leaf)
if mtrees == 1:
forest = forest[0]
mu_list = mu_list[0]
return forest, mu_list
class SimpleBartStep(object):
def __init__(self):
self.nsamples = 500
self.resids = np.random.standard_normal(self.nsamples)
class VarianceTestCase(unittest.TestCase):
def setUp(self):
nsamples = 2000
nfeatures = 2
self.X = np.random.standard_cauchy((nsamples, nfeatures))
self.true_sigsqr = 0.7 ** 2
tree, mu = build_test_data(self.X, self.true_sigsqr)
self.y = tree.y
self.sigsqr = BartVariance(self.X, self.y)
self.sigsqr.bart_step = SimpleBartStep()
# get residuals
mu_map = np.zeros(nsamples)
n_idx = 0
for node in tree.terminalNodes:
y_in_node = tree.filter(node)[1]
mu_map[y_in_node] = mu[n_idx]
n_idx += 1
self.sigsqr.bart_step.resids = self.y - mu_map
def tearDown(self):
del self.X
del self.y
del self.true_sigsqr
del self.sigsqr
def test_prior(self):
nsamples = self.X.shape[0]
y = 2.0 + self.X[:, 0] + np.sqrt(self.true_sigsqr) * np.random.standard_normal(nsamples)
SigSqr = BartVariance(self.X, y)
SigSqr.bart_step = SimpleBartStep()
nu = 3.0 # Degrees of freedom for error variance prior; should always be > 3
q = 0.90 # The quantile of the prior that the sigma2 estimate is placed at
qchi = stats.chi2.interval(q, nu)[1]
# scale parameter for error variance scaled inverse-chi-square prior
lamb = self.true_sigsqr * qchi / nu
# is the prior scale parameter within 5% of the expected value?
frac_diff = np.abs(SigSqr.lamb - lamb) / lamb
prior_msg = "Fractional difference in prior scale parameter for variance parameter is greater than 10%"
self.assertLess(frac_diff, 0.10, msg=prior_msg)
def test_random_posterior(self):
ndraws = 100000
ssqr_draws = np.empty(ndraws)
for i in xrange(ndraws):
ssqr_draws[i] = self.sigsqr.random_posterior()
nu = self.sigsqr.nu
prior_ssqr = self.sigsqr.lamb
post_dof = nu + len(self.y)
post_ssqr = (nu * prior_ssqr + self.y.size * np.var(self.sigsqr.bart_step.resids)) / post_dof
igam_shape = post_dof / 2.0
igam_scale = post_dof * post_ssqr / 2.0
igamma = stats.distributions.invgamma(igam_shape, scale=igam_scale)
# test draws from conditional posterior by comparing 1st and 2nd moments to true values
true_mean = igamma.moment(1)
frac_diff = np.abs(true_mean - ssqr_draws.mean()) / true_mean
rpmsg = "Fractional difference in mean from BartVariance.random_posterior() is greater than 2%"
self.assertLess(frac_diff, 0.02, msg=rpmsg)
true_ssqr = igamma.moment(2)
frac_diff = np.abs(true_ssqr - (ssqr_draws.var() + ssqr_draws.mean() ** 2)) / true_ssqr
rpmsg = "Fractional difference in 2nd moment from BartVariance.random_posterior() is greater than 2%"
self.assertLess(frac_diff, 0.02, msg=rpmsg)
# make sure gibbs sampler constrains the correct value
ssqr_low = np.percentile(ssqr_draws, 1.0)
ssqr_high = np.percentile(ssqr_draws, 99.0)
rpmsg = "Value of Variance parameter returned by Gibbs sampler is outside of 99% credibility interval."
self.assertGreater(self.true_sigsqr, ssqr_low, msg=rpmsg)
self.assertLess(self.true_sigsqr, ssqr_high, msg=rpmsg)
class MuTestCase(unittest.TestCase):
def setUp(self):
nsamples = 2000
nfeatures = 2
self.alpha = 0.95
self.beta = 2.0
self.X = np.random.standard_cauchy((nsamples, nfeatures))
self.true_sigsqr = 0.7 ** 2
tree, mu = build_test_data(self.X, self.true_sigsqr)
self.true_mu = mu
self.tree = tree
self.y = tree.y
self.mtrees = 1 # single tree model
self.mu = BartMeanParameter("mu", 1)
# Rescale y to lie between -0.5 and 0.5
self.true_mu -= self.y.min()
self.y -= self.y.min() # minimum = 0
self.true_mu /= self.y.max()
self.true_sigsqr /= self.y.max() ** 2
self.y /= self.y.max() # maximum = 1
self.true_mu -= 0.5
self.y -= 0.5 # range is -0.5 to 0.5
tree.y = self.y
# update moments of y-values in each terminal node since we transformed the data
for leaf in self.tree.terminalNodes:
self.tree.filter(leaf)
self.mu.sigsqr = BartVariance(self.X, self.y)
self.mu.sigsqr.bart_step = SimpleBartStep()
self.mu.sigsqr.value = self.true_sigsqr
self.tree_param = BartTreeParameter('tree', self.X, self.y, self.mtrees, prior_mu=self.mu.mubar,
prior_var=self.mu.prior_var)
self.tree_param.value = tree
self.mu.treeparam = self.tree_param
self.mu.set_starting_value()
def tearDown(self):
del self.X
del self.y
del self.mu
del self.tree
def test_random_posterior(self):
# first get values of mu drawn from their conditional posterior
ndraws = 100000
nleaves = len(self.mu.value)
mu_draws = np.empty((ndraws, nleaves))
for i in xrange(ndraws):
mu_draws[i, :] = self.mu.random_posterior()
l_idx = 0
for leaf in self.mu.treeparam.value.terminalNodes:
ny = leaf.npts
ybar = leaf.ybar
post_var = 1.0 / (1.0 / self.mu.prior_var + ny / self.mu.sigsqr.value)
post_mean = post_var * (self.mu.mubar / self.mu.prior_var + ny * ybar / self.mu.sigsqr.value)
# test draws from conditional posterior by comparing 1st and 2nd moments to true values
zscore = np.abs((post_mean - mu_draws[:, l_idx].mean())) / np.sqrt(post_var / ndraws)
rpmsg = "Sample mean from BartMeanParameter.random_posterior() differs by more than 3-sigma."
self.assertLess(zscore, 3.0, msg=rpmsg)
frac_diff = np.abs(np.sqrt(post_var) - mu_draws[:, l_idx].std()) / np.sqrt(post_var)
rpmsg = "Fractional difference in standard deviation from BartMeanParameter.random_posterior() is greater" \
+ " than 2%"
self.assertLess(frac_diff, 0.02, msg=rpmsg)
# make sure gibbs sampler constrains the correct value
mu_low = np.percentile(mu_draws[:, l_idx], 1.0)
mu_high = np.percentile(mu_draws[:, l_idx], 99.0)
rpmsg = "Value of Terminal Node output parameter returned by Gibbs sampler is outside of 99% credibility" \
+ " interval.\n Violated: " + str(mu_low) + ' < ' + str(self.true_mu[l_idx]) + ' < ' + str(mu_high)
self.assertGreater(self.true_mu[l_idx], mu_low, msg=rpmsg)
self.assertLess(self.true_mu[l_idx], mu_high, msg=rpmsg)
l_idx += 1
class TreeTestCase(unittest.TestCase):
def setUp(self):
nsamples = 20 # ensure a small number of data points in each node for we don't get underflows below
nfeatures = 2
self.alpha = 0.95
self.beta = 2.0
self.X = np.random.standard_cauchy((nsamples, nfeatures))
self.true_sigsqr = 0.7 ** 2
tree, mu = build_test_data(self.X, self.true_sigsqr)
self.true_mu = mu
self.y = tree.y
self.mtrees = 1 # single tree model
self.mu = BartMeanParameter("mu", 1)
# Rescale y to lie between -0.5 and 0.5
self.true_mu -= self.y.min()
self.y -= self.y.min() # minimum = 0
self.true_mu /= self.y.max()
self.true_sigsqr /= self.y.max() ** 2
self.y /= self.y.max() # maximum = 1
self.true_mu -= 0.5
self.y -= 0.5 # range is -0.5 to 0.5
tree.y = self.y
# update moments of y-values in each terminal node since we transformed the data
for leaf in tree.terminalNodes:
tree.filter(leaf)
self.mu.sigsqr = BartVariance(self.X, self.y)
self.mu.sigsqr.bart_step = SimpleBartStep()
self.mu.sigsqr.value = self.true_sigsqr
self.tree_param = BartTreeParameter('tree', self.X, self.y, self.mtrees, prior_mu=self.mu.mubar,
prior_var=self.mu.prior_var)
self.tree_param.value = tree
self.tree_param.sigsqr = self.mu.sigsqr
self.mu.treeparam = self.tree_param
self.mu.set_starting_value()
def tearDown(self):
del self.X
del self.y
del self.mu
del self.tree_param
def test_logdens(self):
loglik_direct = 0.0
mugrid = np.linspace(-5.0, 5.0, 1001)
for leaf in self.mu.treeparam.value.terminalNodes:
# compute log-likelihood numerically
in_node = self.mu.treeparam.value.filter(leaf)[1] # find the data that end up in this node
lik = 1.0
for i in xrange(leaf.npts):
lik *= stats.distributions.norm(self.y[in_node][i], np.sqrt(self.true_sigsqr)).pdf(mugrid)
# add in prior contribution
lik *= stats.distributions.norm(self.mu.mubar, np.sqrt(self.mu.prior_var)).pdf(mugrid)
loglik_direct += np.log(integrate.simps(lik, mugrid))
# make sure numerical and analytical calculation agree
loglik = self.mu.treeparam.logdensity(self.mu.treeparam.value)
frac_diff = np.abs(loglik_direct - loglik) / np.abs(loglik)
tree_msg = "Fractional difference between numerical calculation of tree loglik and analytical calculation is" \
+ " greater than 1%"
self.assertLess(frac_diff, 0.01, msg=tree_msg)
if __name__ == "__main__":
unittest.main()
| {
"repo_name": "acbecker/BART",
"path": "tests/test_tree_parameters.py",
"copies": "1",
"size": "11055",
"license": "mit",
"hash": 386242495227616800,
"line_mean": 38.3416370107,
"line_max": 120,
"alpha_frac": 0.5952057892,
"autogenerated": false,
"ratio": 3.2774977764601245,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43727035656601243,
"avg_score": null,
"num_lines": null
} |
__author__ = 'brandonkelly'
__notes__ = "Adapted from Dan Foreman-Mackey triangle.py module."
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
def multiclass_triangle(xs, classes, labels=None, verbose=True, fig=None, **kwargs):
# Deal with 1D sample lists.
xs = np.atleast_1d(xs)
if len(xs.shape) == 1:
xs = np.atleast_2d(xs)
else:
assert len(xs.shape) == 2, "The input sample array must be 1- or 2-D."
xs = xs.T
assert xs.shape[0] <= xs.shape[1], "I don't believe that you want more " \
"dimensions than samples!"
K = len(xs)
factor = 2.0 # size of one side of one panel
lbdim = 0.5 * factor # size of left/bottom margin
trdim = 0.05 * factor # size of top/right margin
whspace = 0.05 # w/hspace size
plotdim = factor * K + factor * (K - 1.) * whspace
dim = lbdim + plotdim + trdim
if fig is None:
fig, axes = plt.subplots(K, K, figsize=(dim, dim))
else:
try:
axes = np.array(fig.axes).reshape((K, K))
except:
raise ValueError("Provided figure has {0} axes, but data has "
"dimensions K={1}".format(len(fig.axes), K))
lb = lbdim / dim
tr = (lbdim + plotdim) / dim
fig.subplots_adjust(left=lb, bottom=lb, right=tr, top=tr,
wspace=whspace, hspace=whspace)
extents = [[x.min(), x.max()] for x in xs]
# Check for parameters that never change.
m = np.array([e[0] == e[1] for e in extents], dtype=bool)
if np.any(m):
raise ValueError(("It looks like the parameter(s) in column(s) "
"{0} have no dynamic range. Please provide an "
"`extent` argument.")
.format(", ".join(map("{0}".format,
np.arange(len(m))[m]))))
class_labels = np.unique(classes)
nclasses = len(class_labels)
color_list = ["Black", "DodgerBlue", "DarkOrange", "Green", "Magenta", "Red", "Brown", "Cyan"] * 10
for i, x in enumerate(xs):
ax = axes[i, i]
# Plot the histograms.
n = []
for l, k in enumerate(class_labels):
n_k, b_k, p_k = ax.hist(x[classes == k], bins=kwargs.get("bins", 50),
range=extents[i], histtype="step",
color=color_list[l], lw=2, normed=True)
n.append(n_k)
# Set up the axes.
ax.set_xlim(extents[i])
ax.set_ylim(0, 1.1 * np.max(n))
ax.set_yticklabels([])
ax.xaxis.set_major_locator(MaxNLocator(5))
# Not so DRY.
if i < K - 1:
ax.set_xticklabels([])
else:
[l.set_rotation(45) for l in ax.get_xticklabels()]
if labels is not None:
ax.set_xlabel(labels[i])
ax.xaxis.set_label_coords(0.5, -0.3)
for j, y in enumerate(xs):
ax = axes[i, j]
if j > i:
ax.set_visible(False)
ax.set_frame_on(False)
continue
elif j == i:
continue
for l, k in enumerate(class_labels):
ax.plot(y[classes == k], x[classes == k], 'o', ms=1.5, color=color_list[l], rasterized=True, alpha=0.25)
extent = [[y.min(), y.max()], [x.min(), x.max()]]
ax.set_xlim(extent[0])
ax.set_ylim(extent[1])
ax.xaxis.set_major_locator(MaxNLocator(5))
ax.yaxis.set_major_locator(MaxNLocator(5))
if i < K - 1:
ax.set_xticklabels([])
else:
[l.set_rotation(45) for l in ax.get_xticklabels()]
if labels is not None:
ax.set_xlabel(labels[j])
ax.xaxis.set_label_coords(0.5, -0.3)
if j > 0:
ax.set_yticklabels([])
else:
[l.set_rotation(45) for l in ax.get_yticklabels()]
if labels is not None:
ax.set_ylabel(labels[i])
ax.yaxis.set_label_coords(-0.3, 0.5)
return fig | {
"repo_name": "brandonckelly/bck_stats",
"path": "bck_stats/multiclass_triangle_plot.py",
"copies": "1",
"size": "4254",
"license": "mit",
"hash": 5972071105419535000,
"line_mean": 35.6810344828,
"line_max": 120,
"alpha_frac": 0.4974141984,
"autogenerated": false,
"ratio": 3.4726530612244897,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9458682673662566,
"avg_score": 0.0022769171923848856,
"num_lines": 116
} |
__author__ = 'brcinko'
from django.conf.urls import patterns, include, url
from django.contrib import admin
from rest_framework.urlpatterns import format_suffix_patterns
import views
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
url(r'^admin/', include(admin.site.urls)),
url(r'^acl_rules/(?P<pk>\d+)/$', views.acl_rule_detail, name='acl_rule_detail'),
url(r'^acl_rules/$', views.acl_rules_list, name='acl_rules_list'),
# http_access
url(r'^acl_list/$', views.acl_list, name='acl_list'),
url(r'^acl_list/(?P<pk>\d+)/$', views.acl_list_detail, name='acl_list_detail'),
# authentication
url(r'^authentication/$', views.authentication, name='authentication'),
url(r'^authentication_db/$', views.authentication_db, name='authentication_db'),
url(r'^authentication/(?P<pk>\d+)/$', views.authentication_detail, name='authentication_detail'),
url(r'^authentication_db/(?P<pk>\d+)/$', views.authentication_db_detail, name='authentication_db_detail'),
# reconfigure
url(r'^update_config/$', views.update_config, name='update_config'),
)
urlpatterns = format_suffix_patterns(urlpatterns)
| {
"repo_name": "erigones/api_squid",
"path": "urls.py",
"copies": "1",
"size": "1446",
"license": "bsd-3-clause",
"hash": 6763217094894526000,
"line_mean": 56.84,
"line_max": 129,
"alpha_frac": 0.5456431535,
"autogenerated": false,
"ratio": 4.3293413173652695,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003096130788601922,
"num_lines": 25
} |
__author__ = 'brcinko'
import os
import psycopg2
import psycopg2.extras
import json
import fileinput
import string
from settings import *
"""
This file contains method to update a reconfigure squid proxy server
"""
"""
example of JSON in aclrule.values:
{"values":[
"192.168.0.0/24",
"127.0.0.0/24"
]
}
EVERY RECORD IN DATABASE HAVE TO BE IN THIS FORM!
"""
columns = (
'acl_values'
)
def update_rules(rules):
data = ""
i = 0
for rule in rules:
acl_string = ""
help_acl_string = "acl " + rule.acl_name + " " + rule.acl_type + " "
if isinstance(rule.acl_values, basestring):
j = str(i) + "HAHAHA"
# return str(j)
values = json.loads(rule.acl_values)
for value in values["values"]:
acl_string += help_acl_string + value + '\n'
else:
for value in rule.acl_values['values']:
acl_string += help_acl_string + value + '\n'
i += i
data += acl_string
return data
def update_list(patterns):
data = ""
for pattern in patterns:
if pattern.deny_value is True:
pattern_str = "http_access deny "
else:
pattern_str = "http_access allow "
acls_str = ""
for acls in pattern.acl_rules.all():
acls_str += acls.acl_name + " "
data += pattern_str + acls_str + "\n"
return data
def get_db_string(db):
db_string = ' --dsn "DSN:mysql:' + db.database_name + '" '
db_string += '--user ' + db.user + ' --password ' + db.password + ' --table' + db.table
db_string += ' --usercol ' + db.username_column + ' --passwdcol ' + db.password_column + ' --' + db.encryption
db_string += "\n"
return db_string
def update_authentication(auth):
db_string = get_db_string(auth.authenticationdb)
if auth.enabled is not False:
auth_string = "auth_param basic program " + auth.program + db_string
auth_string += "auth_param basic children " + str(auth.children) + " \n"
auth_string += "auth_param basic realm " + auth.realm + " \n"
auth_string += "auth_param basic credentialsttl " + str(auth.credentialsttl) + " \n"
if auth.case_sensitive is True:
auth_string += "auth_param basic case_sensitive on" + " \n"
else:
auth_string += "auth_param basic case_sensitive off" + " \n"
if auth.utf8 is True:
auth_string += " auth_param basic utf8 on" + " \n"
else:
auth_string += "auth_param basic utf8 off" + " \n"
return auth_string
else:
return False
def generate_file(data_rules, data_patterns, data_auth, inputfile):
auth_str = update_authentication(data_auth)
rules_str = update_rules(data_rules)
# return rules_str
patterns_str = update_list(data_patterns)
with open(inputfile, "r") as myfile:
file_string = myfile.read()
file_string = file_string.replace(flag_rules, rules_str)
file_string = file_string.replace(flag_patterns, patterns_str)
if auth_str is not False:
file_string = file_string.replace(flag_auth, auth_str)
file_string = file_string.replace(flag_auth_acl, "acl api_auth proxy_auth REQUIRED")
file_string = file_string.replace(flag_auth_pattern, "http_access allow db-auth")
with open(SQUID_CONF_FILE, "w") as text_file:
text_file.write(file_string)
def main():
pass
#rules = update_config_rules()
#patterns = update_config_list()
#generate_file( rules, patterns, auth , '/home/brcinko/squid.conf')
# update_authentication(auth)
if __name__ == "__main__":
main() | {
"repo_name": "erigones/api_squid",
"path": "helpers.py",
"copies": "1",
"size": "3668",
"license": "bsd-3-clause",
"hash": 888139865856307300,
"line_mean": 28.5887096774,
"line_max": 114,
"alpha_frac": 0.5932388222,
"autogenerated": false,
"ratio": 3.3962962962962964,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4489535118496296,
"avg_score": null,
"num_lines": null
} |
__author__ = 'breddels'
# due to 32 bit limitations in numpy, we cannot use astropy's fits module for writing colfits
import sys
import math
import vaex.dataset
import astropy.io.fits
import numpy as np
import logging
logger = logging.getLogger("vaex.file.colfits")
def empty(filename, length, column_names, data_types, data_shapes, ucds, units, null_values={}):
with open(filename, "wb") as f:
logger.debug("preparing empty fits file: %s", filename)
class Scope(object):
pass
def write(key, value, comment=""):
first_part = "{key:8}= {value:20} / ".format(key=key, value=value)
f.write(first_part.encode("ascii"))
leftover = 80 - len(first_part)
f.write(("{comment:"+str(leftover) +"}").format(comment=comment).encode("ascii"))
logger.debug("at pos: %s", f.tell())
def finish_header():
f.write("{end:80}".format(end="END").encode("ascii"))
offset = f.tell()
bytes_over_padding = offset % 2880
logger.debug(("bytes_over_padding: %s", bytes_over_padding))
if bytes_over_padding > 0:
padding = 2880 - bytes_over_padding
f.write((" "*padding).encode("ascii"))
def finish_data():
offset = f.tell()
bytes_over_padding = offset % 2880
if bytes_over_padding > 0:
padding = 2880 - bytes_over_padding
f.write(("\0"*padding).encode("ascii"))
byte_size = sum([length * type.itemsize for type in data_types])
write("SIMPLE", "T", "file conforms to FITS standard")
write("BITPIX", 8, "number of bits per data pixel")
write("NAXIS", 0, "number of array dimensions")
finish_header()
write("XTENSION", repr("BINTABLE"), "binary table extension")
write("BITPIX", 8, "number of bits per data pixel")
write("NAXIS", 2, "number of array dimensions")
write("NAXIS1", byte_size, "length of dim 1")
write("NAXIS2", 1, "length of dim 2")
write("PCOUNT", 0, "number of group parameters")
write("GCOUNT", 1, "number of groups")
write("TFIELDS", len(column_names), "number of columns")
for i, (column_name, type, shape) in enumerate(zip(column_names, data_types, data_shapes)):
i += 1 # 1 based index
#column = dataset.columns[column_name]
write("TTYPE%d" % i, repr(str(column_name)), "column name %i" % (i))
numpy_type_name = type.descr[0][1][1:] # i4, f8 etc
if numpy_type_name[0] == 'S':
string_length = numpy_type_name[1:]
fits_type = str(int(string_length)*length)+"A"
logger.debug("type for %s: numpy=%r, fits=%r, string_length=%r length=%r", column_name, numpy_type_name, fits_type, string_length, length)
# TODO: support rank1 arrays
write("TFORM%d" % i , repr("{type}".format(type=fits_type)), "")
write("TDIM%d" % i, repr("({string_length},{length})".format(string_length=string_length, length=length)), "")
else:
fits_type = astropy.io.fits.column.NUMPY2FITS[numpy_type_name]
logger.debug("type for %s: numpy=%r, fits=%r", column_name, numpy_type_name, fits_type)
# TODO: support rank1 arrays
write("TFORM%d" % i , repr("{length}{type}".format(length=length, type=fits_type)), "")
write("TDIM%d" % i, repr("(1,{length})".format(length=length)), "")
ucd = ucds[i-1]
if ucd:
write("TUCD%d" % i, repr(str(ucd)))
unit = units[i-1]
if unit:
write("TUNIT%d" % i, repr(str(unit)))
if column_name in null_values:
write("TNULL%d" % i, str(null_values[column_name]))
finish_header()
for i, (column_name, type, shape) in enumerate(zip(column_names, data_types, data_shapes)):
byte_size = length * type.itemsize
f.seek(f.tell() + byte_size)
finish_data()
def write_colfits(dataset, path, selection=False):
with open(path, "wb") as f:
class Scope(object):
pass
vars = Scope()
#vars.
def write(key, value, comment=""):
f.write("{key:8}= {value:20} / {comment:47}".format(key=key, value=value, comment=comment))
print(("at pos", f.tell()))
def finish_header():
print((f.write("{end:80}".format(end="END"))))
offset = f.tell()
bytes_over_padding = offset % 2880
print(("bytes_over_padding", bytes_over_padding))
if bytes_over_padding > 0:
padding = 2880 - bytes_over_padding
f.write(" "*padding)
def finish_data():
offset = f.tell()
bytes_over_padding = offset % 2880
if bytes_over_padding > 0:
padding = 2880 - bytes_over_padding
f.write("\0"*padding)
write("SIMPLE", "T", "file conforms to FITS standard")
write("BITPIX", 8, "number of bits per data pixel")
write("NAXIS", 0, "number of array dimensions")
finish_header()
write("XTENSION", repr("BINTABLE"), "binary table extension")
write("BITPIX", 8, "number of bits per data pixel")
write("NAXIS", 2, "number of array dimensions")
write("NAXIS1", dataset.byte_size(selection=selection), "length of dim 1")
write("NAXIS2", 1, "length of dim 2")
write("TFIELDS", len(dataset.column_names), "number of columns")
for i, column_name in enumerate(dataset.column_names):
i += 1 # 1 based index
column = dataset.columns[column_name]
write("TTYPE%d" % i, repr(str(column_name)), "column name %i" % (i))
numpy_type_name = column.dtype.descr[0][1][1:] # i4, f8 etc
fits_type = astropy.io.fits.column.NUMPY2FITS[numpy_type_name]
# TODO: support rank1 arrays
write("TFORM%d" % i , repr("{length}{type}".format(length=len(dataset), type=fits_type)), "")
write("TDIM%d" % i, repr("(1,{length})".format(length=len(dataset))), "")
finish_header()
for i, column_name in enumerate(dataset.column_names):
column = dataset.columns[column_name]
numpy_type_name = column.dtype.descr[0][1][1:] # i4, f8 etc
fits_type = astropy.io.fits.column.NUMPY2FITS[numpy_type_name]
chunk_size = 1024**2 # 1 meg at a time
chunks = int(math.ceil(len(dataset)/float(chunk_size)))
for i in range(chunks):
i1 = i * chunk_size
i2 = min(len(dataset), (i+1) * chunk_size)
data_big_endian = column[i1:i2].astype(">" + numpy_type_name)
f.write(data_big_endian)
print((f.tell(), f.tell() / 1024**2, "mb", len(dataset)))
assert i2 == len(dataset)
finish_data()
if __name__ == "__main__":
input = sys.argv[1]
output = sys.argv[2]
dataset_in = vaex.dataset.load_file(input)
write_colfits(dataset_in, output) | {
"repo_name": "maartenbreddels/vaex",
"path": "packages/vaex-core/vaex/file/colfits.py",
"copies": "1",
"size": "6157",
"license": "mit",
"hash": 6014363751789437000,
"line_mean": 37.248447205,
"line_max": 142,
"alpha_frac": 0.6495046289,
"autogenerated": false,
"ratio": 2.8757589911256423,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8877335216630831,
"avg_score": 0.02958568067896225,
"num_lines": 161
} |
__author__ = 'breddels'
from ctypes import *
import h5py
import sys
import numpy as np
import mmap
import vaex
import vaex.vaexfast
import timeit
import threading
filename = sys.argv[1]
h5file = h5py.File(filename, "r")
column = h5file[sys.argv[2]]
length = len(column)
assert column.dtype == np.float64
offset = column.id.get_offset()
h5file.close()
file = open(filename)
fileno = file.fileno()
dtype = np.float64
mapping = mmap.mmap(fileno, 0, prot=mmap.PROT_READ)
mmapped_array = np.frombuffer(mapping, dtype=dtype, count=length, offset=offset)
N = 3
byte_size = length * 8
thread_local = threading.local()
import ctypes.util
libc = cdll.LoadLibrary(ctypes.util.find_library('c'))
#fopen = libc.fopen
#fread = libc.fread
#cread = libc.read
#fread.argtypes = [c_void_p, c_size_t, c_size_t, c_void_p]
#fdopen = libc.fdopen
#fh = fdopen(fileno, "r")
def sum_read_part(i1, i2):
if not hasattr(thread_local, "buffer"):
#thread_local.buffer = np.zeros((i2-i1), dtype=np.float64)
thread_local.c_buffer = ctypes.create_string_buffer((i2-i1)*8)
thread_local.buffer = np.frombuffer(thread_local.c_buffer)
# opening the file for each thread avoids having mutexes slow us down in the c code
#thread_local.file = open(filename)
#thread_local.fileno = thread_local.file.fileno()
thread_local.file = open(filename, "r", 1)
#thread_local.fileno = thread_local.file.fileno()
#print "creating buffer"
c_buffer = thread_local.c_buffer
buffer = thread_local.buffer
buffer = buffer[:i2-i1] # clip it if needed
thread_local.file.seek(offset+i1)
buffer = np.fromfile(thread_local.file, count=i2-i1+1)
#thread_local.read()
#fread(c_buffer, 8, (i2-i1), fh)
#libc.read(thread_local.fileno, c_buffer, (i2-i1)*8)
#data = file.read((i2-i1)*8)
#buffer = np.fromstring(data, dtype=np.float64)
return np.sum(buffer)
#return vaex.vaexfast.sum(mmapped_array[i1:i2])
import concurrent.futures
def sum_read():
total = sum([future.result() for future in vaex.utils.submit_subdivide(9, sum_read_part, length, int(2e5))])
return total
#for i in range(3):
# print sum_read()
print "benchmarking read mmap", sum_read()
expr = "sum_read()"
print sum_read()
times = timeit.repeat(expr, setup="from __main__ import sum_read", repeat=5, number=N)
print "minimum time", min(times)/N
bandwidth = [byte_size/1024.**3/(time/N) for time in times]
print "%f GiB/s" % max(bandwidth)
def sum_mmap_part(i1, i2):
return np.sum(mmapped_array[i1:i2])
#return vaex.vaexfast.sum(mmapped_array[i1:i2])
import concurrent.futures
def sum_mmap():
total = sum([future.result() for future in vaex.utils.submit_subdivide(8, sum_mmap_part, length, int(1e6))])
return total
print "benchmarking sum mmap", sum_mmap(), sum_mmap(), sum_mmap()
expr = "sum_mmap()"
print sum_mmap()
times = timeit.repeat(expr, setup="from __main__ import sum_mmap", repeat=5, number=N)
print "minimum time", min(times)/N
bandwidth = [byte_size/1024.**3/(time/N) for time in times]
print "%f GiB/s" % max(bandwidth)
| {
"repo_name": "maartenbreddels/vaex",
"path": "bin/vaex_benchmark_mmap.py",
"copies": "1",
"size": "2992",
"license": "mit",
"hash": 1518520504967281400,
"line_mean": 26.2,
"line_max": 109,
"alpha_frac": 0.7072192513,
"autogenerated": false,
"ratio": 2.676207513416816,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8747835120020888,
"avg_score": 0.027118328939185744,
"num_lines": 110
} |
__author__ = 'breddels'
import javaobj
import sys
import io
if __name__ == "__main__":
import logging
javaobj._log.setLevel(logging.DEBUG)
jobj = file(sys.argv[1]).read()[16 + 5:]
print((repr(jobj[:100])))
pobj, index = javaobj.loads(jobj)
rest = jobj[index:]
import zlib
print((repr(rest[:100])))
datastr = zlib.decompress(rest, -15)
stream = io.StringIO(datastr)
m = javaobj.JavaObjectUnmarshaller(stream)
data = m.readObject()
while len(data) == 2:
print((data[0].classdesc.name))
# datastr = datastr[data[1]:]
x = stream.read(3)
print(("data left", [hex(ord(k)) for k in x]))
stream.seek(-3, 1)
# x = stream.read(10)
# print "data left", [hex(ord(k)) for k in x]
# stream.seek(-10, 1)
# data = javaobj.loads(datastr)
data = m.readObject()
print(data)
if data[0] == "END":
print("end...")
x = stream.read(10)
i
print(("data left", [hex(ord(k)) for k in x]))
print(data)
# print pobj, index
| {
"repo_name": "maartenbreddels/vaex",
"path": "packages/vaex-ui/vaex/ui/gbin.py",
"copies": "1",
"size": "1100",
"license": "mit",
"hash": -4504849823928897000,
"line_mean": 27.2051282051,
"line_max": 58,
"alpha_frac": 0.5354545455,
"autogenerated": false,
"ratio": 3.206997084548105,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4242451630048105,
"avg_score": null,
"num_lines": null
} |
__author__ = 'breddels'
import logging
logger = logging.getLogger("vaex.file")
opener_classes = []
normal_open = open
def register(cls):
opener_classes.append(cls)
import vaex.file.other
try:
import vaex.hdf5 as hdf5
except ImportError:
hdf5 = None
if hdf5:
import vaex.hdf5.dataset
def can_open(path, *args, **kwargs):
for name, class_ in list(vaex.file.other.dataset_type_map.items()):
if class_.can_open(path, *args):
return True
def open(path, *args, **kwargs):
dataset_class = None
openers = []
for opener in opener_classes:
if opener.can_open(path, *args, **kwargs):
return opener.open(path, *args, **kwargs)
if hdf5:
openers.extend(hdf5.dataset.dataset_type_map.items())
openers.extend(vaex.file.other.dataset_type_map.items())
for name, class_ in list(openers):
logger.debug("trying %r with class %r" % (path, class_))
if class_.can_open(path, *args, **kwargs):
logger.debug("can open!")
dataset_class = class_
break
if dataset_class:
dataset = dataset_class(path, *args, **kwargs)
return vaex.dataframe.DataFrameLocal(dataset)
return dataset
def dup(file):
"""Duplicate a file like object, s3 or cached file supported"""
if isinstance(file, vaex.file.cache.CachedFile):
return file.dup()
elif vaex.file.s3.s3fs is not None and isinstance(file, vaex.file.s3.s3fs.core.S3File):
return vaex.file.s3.dup(file)
elif vaex.file.gcs.gcsfs is not None and isinstance(file, vaex.file.gcs.gcsfs.core.GCSFile):
return vaex.file.gcs.dup(file)
else:
return normal_open(file.name, file.mode)
| {
"repo_name": "maartenbreddels/vaex",
"path": "packages/vaex-core/vaex/file/__init__.py",
"copies": "1",
"size": "1718",
"license": "mit",
"hash": 5187195840335908000,
"line_mean": 30.2363636364,
"line_max": 96,
"alpha_frac": 0.6426076834,
"autogenerated": false,
"ratio": 3.2476370510396975,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4390244734439697,
"avg_score": null,
"num_lines": null
} |
__author__ = 'breddels'
"""
Demonstrates combining Qt and tornado, both which want to have their own event loop.
The solution is to run tornado in a thread, the issue is that callbacks will then also be executed in this thread, and Qt doesn't like that.
To fix this, I show how to use execute the callback in the main thread, using a Qt signal/event in combination with Promises.
The output of the program is:
fetch page, we are in thread <_MainThread(MainThread, started 47200787479520)>
response is 191548 bytes, we are in thread <Thread(Thread-1, started daemon 47201018689280)>
the other thread should fulfil the result to this promise, we are in thread <Thread(Thread-1, started daemon 47201018689280)>
we received a promise, let us fulfill it, and are in thread <_MainThread(MainThread, started 47200787479520)>
let us set the background to black, we are in thread <_MainThread(MainThread, started 47200787479520)>
The magic happens in this line:
.then(self.move_to_gui_thread)
Without it, you'll see something like this:
fetch page, we are in thread <_MainThread(MainThread, started 47822588292064)>
response is 191604 bytes, we are in thread <Thread(Thread-1, started daemon 47822819497728)>
let us set the background to black, we are in thread <Thread(Thread-1, started daemon 47822819497728)>
QPixmap: It is not safe to use pixmaps outside the GUI thread
"""
from aplus import Promise # https://github.com/xogeny/aplus
import threading
import tornado
from tornado.httpclient import AsyncHTTPClient
from PyQt4 import QtGui
from PyQt4 import QtCore
import sys
# tornado works with futures, this wraps it in a promise
def wrap_future_with_promise(future):
promise = Promise()
def callback(future):
e = future.exception()
if e:
promise.reject(e)
else:
promise.fulfill(future.result())
future.add_done_callback(callback)
return promise
class Window(QtGui.QMainWindow):
signal_promise = QtCore.pyqtSignal(object, object)
def __init__(self):
QtGui.QMainWindow.__init__(self)
self.button = QtGui.QPushButton("Async fetch using tornado", self)
self.button.resize(self.button.sizeHint())
self.button.clicked.connect(self.on_click)
self.signal_promise.connect(self.on_signal_promise)
def on_click(self, *args):
print "fetch page, we are in thread", threading.currentThread()
client = AsyncHTTPClient()
future = client.fetch("http://www.google.com/")
promise = wrap_future_with_promise(future)
# without .then(self.move_to_gui_thread), Qt will complain
promise.then(self.show_output)\
.then(self.move_to_gui_thread)\
.then(self.do_gui_stuff)\
.then(None, self.on_error)
def move_to_gui_thread(self, value):
promise = Promise()
print "the other thread should fulfil the result to this promise, we are in thread", threading.currentThread()
self.signal_promise.emit(promise, value)
return promise
def on_signal_promise(self, promise, value):
print "we received a promise, let us fulfill it, and are in thread", threading.currentThread()
promise.fulfill(value)
def on_error(self, error):
print "error", error
def show_output(self, response):
print "response is", len(response.body), "bytes, we are in thread", threading.currentThread()
def do_gui_stuff(self, response):
print "let us set the background to orange, we are in thread", threading.currentThread()
# this Qt call should only be done from the main thread
self.setStyleSheet("background-color: orange;")
# run the tornado loop in a seperate thread
thread = threading.Thread(target=lambda : tornado.ioloop.IOLoop.current().start())
thread.setDaemon(True)
thread.start()
app = QtGui.QApplication(sys.argv)
window = Window()
window.show()
app.exec_()
| {
"repo_name": "maartenbreddels/vaex",
"path": "misc/gist/qt_and_tornado.py",
"copies": "1",
"size": "3697",
"license": "mit",
"hash": 8335357420540310000,
"line_mean": 37.1134020619,
"line_max": 140,
"alpha_frac": 0.7541249662,
"autogenerated": false,
"ratio": 3.333633904418395,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9501307768536562,
"avg_score": 0.017290220416366433,
"num_lines": 97
} |
__author__ = 'breddels'
import unittest
import vaex as vx
import vaex.utils
import vaex.image
import numpy as np
default_size = 2
default_shape = (default_size, default_size)
class TestImage(unittest.TestCase):
def test_blend(self):
black = vaex.image.background(default_shape, "black")
white = vaex.image.background(default_shape, "white")
transparant_black = black * 1
transparant_black[...,3] = 100
for mode in vaex.image.modes:
self.assert_(np.all(vaex.image.blend([black, white]) == white))
self.assert_(np.all(vaex.image.blend([white, black]) == black))
grey = vaex.image.blend([white, transparant_black], mode)
self.assert_(np.all(grey[...,0:3] < white[...,0:3]))
self.assert_(np.all(grey[...,0:3] > black[...,0:3]))
def test_pil(self):
white = vaex.image.background(default_shape, "white")
# not much to test
im = vaex.image.rgba_2_pil(white)
raw_data = vaex.image.pil_2_data(im)
url = vaex.image.rgba_to_url(white)
class TestAttrDict(unittest.TestCase):
def test_attrdict(self):
d = vx.utils.AttrDict()
d.a = 1
self.assertEqual(d.a, 1)
with self.assertRaises(KeyError):
a = d.doesnotexist
d = vx.utils.AttrDict(a=1)
print(d.__dict__)
self.assertEqual(d.a, 1)
with self.assertRaises(KeyError):
a = d.doesnotexist
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "maartenbreddels/vaex",
"path": "packages/vaex-core/vaex/test/misc.py",
"copies": "1",
"size": "1519",
"license": "mit",
"hash": 955847813265572400,
"line_mean": 28.7843137255,
"line_max": 75,
"alpha_frac": 0.5911784068,
"autogenerated": false,
"ratio": 3.273706896551724,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43648853033517243,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Brenda'
from egat.testset import SequentialTestSet
from webdriver_resource import WebDriverResource
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from egat.execution_groups import execution_group
from selenium.webdriver.common.action_chains import ActionChains
@execution_group("test7")
class Test7(SequentialTestSet):
def testStep1(self):
# We can access the configuration parameters from inside any test function.
base_url = self.configuration["base_url"]
port = self.configuration["port"]
@WebDriverResource.decorator
def testStep2(self):
# Test setup step
if self.environment.get('browser', '') == "Chrome":
self.browser = webdriver.Chrome()
elif self.environment.get('browser', '') == "Firefox":
self.browser = webdriver.Firefox()
else:
self.browser = webdriver.Firefox()
self.browser.maximize_window()
self.browser.get("http://www.amazon.com")
time.sleep(5)
# Veirfy that the page is displayed as expected
if self.browser.find_element_by_link_text('Amazon'):
assert(True)
else:
assert(False)
def testStep3(self):
# Verify that user is not signed into the system
all_spans = self.browser.find_elements_by_xpath('//*[@id="nav-signin-text"]')
for span in all_spans:
if "Sign in" in span.text:
span.send_keys(Keys.ALT,Keys.ARROW_LEFT)
assert(True)
else:
assert(False)
self.browser.quit()
def testStep4(self):
# Navigate to the directory and verify text is displayed
variable = self.browser.find_element_by_id('nav-link-shopall')
actions = ActionChains(self.browser)
actions.move_to_element(variable)
actions.double_click(variable)
actions.perform()
directory = self.browser.find_element_by_id("siteDirectoryHeading").text
if "EARTH'S BIGGEST SELECTION" in directory:
assert(True)
else:
assert(False)
def testStep5(self):
# Navigate to Trade in Your Electronics
for span in self.browser.find_elements_by_xpath('//*[@id="shopAllLinks"]/tbody/tr/td[2]/div[5]/h2'):
self.browser.find_element_by_link_text('Trade In Your Electronics').click()
for span in self.browser.find_elements_by_class_name('tradein-search-widget-heading'):
if "Find the Items You'd Like to Trade In" in span.text:
self.browser.get_screenshot_as_file('Screenshots/trade_in.png')
assert(True)
else:
assert(False)
def testStep6(self):
# Navigate to Laptops page
self.browser.find_element_by_xpath('//*[@id="center"]/div[5]/div[1]/div[3]/div/a').click()
for span in self.browser.find_elements_by_xpath('//*[@id="ref_541966"]/li[5]/a/span[1]'):
if 'Laptops' in span.text:
span.click()
assert(True)
for span in self.browser.find_elements_by_xpath('//*[@id="s-result-count"]/span/span'):
time.sleep(10)
self.browser.get_screenshot_as_file('Screenshots/laptops.png')
if 'Laptops' in span.text:
assert(True)
else:
assert(False)
else:
assert(False)
def testStep7(self):
# Navigate to the Top Brands page
self.browser.find_element_by_xpath('//*[@id="ref_562215011"]/li[10]/a').click()
self.browser.get_screenshot_as_file('Screenshots/expand_make.png')
time.sleep(5)
for span in self.browser.find_elements_by_xpath('//*[@id="ref_2528832011"]/li[8]/a/span'):
if 'See more' in span.text:
span.click()
for span in self.browser.find_elements_by_xpath('//*[@id="breadCrumb"]'):
self.browser.get_screenshot_as_file('Screenshots/top_brands.png')
time.sleep(5)
if 'Top Brands' in span.text:
assert(True)
else:
assert(False)
assert(True)
else:
assert(False)
def testStep8(self):
# Select Toshiba and verify that Toshiba is displayed
for span in self.browser.find_elements_by_xpath('//*[@id="ref_2528832011"]/ul[3]/li[5]/a/span[1]'):
if 'Toshiba' in span.text:
assert(True)
span.click()
self.browser.get_screenshot_as_file('Screenshots/toshiba.png')
for span in self.browser.find_elements_by_xpath('//*[@id="s-result-count"]/span/span'):
if 'Toshiba' in span.text:
assert(True)
else:
assert(False)
else:
assert(False)
time.sleep(10)
def testStep9(self):
# Tear down step
self.browser.quit() | {
"repo_name": "scotlowery/egat",
"path": "examples/example_amazon/trade_in.py",
"copies": "2",
"size": "5181",
"license": "mit",
"hash": -2070476349811414500,
"line_mean": 39.8031496063,
"line_max": 108,
"alpha_frac": 0.5651418645,
"autogenerated": false,
"ratio": 3.8664179104477614,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5431559774947761,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Brenda'
from egat.testset import SequentialTestSet
from webdriver_resource import WebDriverResource
from selenium import webdriver
import time
class Test5(SequentialTestSet):
def testStep1(self):
# We can access the configuration parameters from inside any test function.
base_url = self.configuration["base_url"]
port = self.configuration["port"]
@WebDriverResource.decorator
def testStep2(self):
# Verifying that the page is loaded and exists by checking for a specific meta content identifier
self.driver = webdriver.Firefox()
self.driver.get("http://jqueryui.com")
if self.driver.find_element_by_css_selector("meta[name='author']"):
assert(True)
else:
assert(False)
def testStep3(self):
# Verifying the toggle class page at starting position
self.driver.find_element_by_link_text('Toggle Class').click()
time.sleep(3)
self.driver.get_screenshot_as_file('Screenshots/Toggle_class/toggleclass_pagedisplayed.png')
self.driver.switch_to_frame(self.driver.find_element_by_css_selector('#content > iframe'))
if self.driver.find_element_by_class_name('newClass'):
assert(True)
else:
assert(False)
def testStep4(self):
# Click button to toggle class
self.driver.find_element_by_id('button').click()
time.sleep(3)
self.driver.get_screenshot_as_file('Screenshots/Toggle_class/toggleclass_toggled.png')
if self.driver.find_element_by_class_name('ui-corner-all'):
assert(True)
else:
assert(False)
def testStep5(self):
# Click button to toggle class
self.driver.find_element_by_id('button').click()
time.sleep(3)
self.driver.get_screenshot_as_file('Screenshots/Toggle_class/toggleclass_toggled_again.png')
if self.driver.find_element_by_class_name('newClass'):
assert(True)
else:
assert(False)
def testStep6(self):
self.driver.quit() | {
"repo_name": "scotlowery/egat",
"path": "examples/example_jqueryui/toggle_class.py",
"copies": "2",
"size": "2093",
"license": "mit",
"hash": -8654669389129581000,
"line_mean": 36.3928571429,
"line_max": 105,
"alpha_frac": 0.65169613,
"autogenerated": false,
"ratio": 3.883116883116883,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0023478494414099086,
"num_lines": 56
} |
__author__ = 'Brenda'
from egat.testset import SequentialTestSet
from webdriver_resource import WebDriverResource
from selenium import webdriver
class Test6(SequentialTestSet):
def testStep1(self):
# We can access the configuration parameters from inside any test function.
base_url = self.configuration["base_url"]
port = self.configuration["port"]
@WebDriverResource.decorator
def testStep2(self):
# Verifying that the page is loaded and exists by checking for a specific meta content identifier
self.browser = webdriver.Firefox()
self.browser.get("http://jqueryui.com")
if self.browser.find_element_by_css_selector("meta[name='author']"):
assert(True)
else:
assert(False)
def testStep3(self):
# Verifying the Select a Speed page at starting position
self.browser.find_element_by_link_text('Selectmenu').click()
self.browser.get_screenshot_as_file('Screenshots/Selectmenu/selectmenu_pagedisplayed.png')
self.browser.switch_to_frame(self.browser.find_element_by_css_selector('#content > iframe'))
if self.browser.find_element_by_class_name('ui-selectmenu-text').text == "Medium":
assert(True)
else:
assert(False)
def testStep4(self):
# Changing Select a Speed from Medium to Slow
self.browser.find_element_by_class_name('ui-selectmenu-text').click()
self.browser.find_element_by_xpath('//*[@id="ui-id-2"]').click()
if self.browser.find_element_by_class_name('ui-selectmenu-text').text == "Slow":
assert(True)
self.browser.get_screenshot_as_file('Screenshots/Selectmenu/speed_change.png')
else:
assert(False)
def testStep5(self):
# Verifying the Select a File at starting position
if self.browser.find_element_by_xpath('//*[@id="files-button"]/span[2]').text == "jQuery.js":
assert(True)
else:
assert(False)
def testStep6(self):
# Changing the Select a File from jQuery.js to Some unknown file
self.browser.find_element_by_xpath('//*[@id="files-button"]/span[2]').click()
self.browser.find_element_by_xpath('//*[@id="ui-id-8"]').click()
if self.browser.find_element_by_xpath('//*[@id="files-button"]/span[2]').text == "Some unknown file":
assert(True)
self.browser.get_screenshot_as_file('Screenshots/Selectmenu/file_change.png')
else:
assert(False)
def testStep7(self):
# Verify the Select a number at starting position
if self.browser.find_element_by_xpath('//*[@id="number-button"]/span[2]').text == "2":
assert(True)
else:
assert(False)
def testStep8(self):
# changing the Select a number from 2 to 6
self.browser.find_element_by_xpath('//*[@id="number-button"]/span[2]').click()
self.browser.find_element_by_xpath('//*[@id="ui-id-15"]').click()
if self.browser.find_element_by_xpath('//*[@id="number-button"]/span[2]').text == '6':
assert(True)
self.browser.get_screenshot_as_file('Screenshots/Selectmenu/number_change.png')
else:
assert(False)
def testStep9(self):
self.browser.quit() | {
"repo_name": "egineering-llc/egat",
"path": "examples/example_jqueryui/selectmenu.py",
"copies": "2",
"size": "3332",
"license": "mit",
"hash": 8634309000428778000,
"line_mean": 41.7307692308,
"line_max": 109,
"alpha_frac": 0.6278511405,
"autogenerated": false,
"ratio": 3.677704194260486,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5305555334760486,
"avg_score": null,
"num_lines": null
} |
__author__ = 'brendan'
import endpoints
from protorpc import message_types
from protorpc import messages
from google.appengine.ext import ndb
DEBUG = True
USER_AUTH_RC = endpoints.ResourceContainer(message_types.VoidMessage,
email=messages.StringField(1, required=True),
password=messages.StringField(2, required=True))
USER_RC = endpoints.ResourceContainer(message_types.VoidMessage,
user_id=messages.IntegerField(1, required=True),
user_token=messages.StringField(2, required=True),
last_cursor=messages.StringField(3, required=False),
school_type=messages.StringField(4, required=False),
college_rank=messages.StringField(5, required=False))
USER_NEW_RC = endpoints.ResourceContainer(message_types.VoidMessage,
email=messages.StringField(1, required=True),
first_name=messages.StringField(2, required=True),
last_name=messages.StringField(3, required=True),
phone=messages.StringField(4, required=True),
school_type=messages.StringField(5, required=True))
def get_stripe_api_key():
if DEBUG:
return "sk_test_5sR4GHddXZ1EK8kQ98t5Heuw"
else:
return "sk_live_HYGtqSOL9p66j235jkMAofVY"
def get_mail_username():
return "yury191"
def get_mail_pass():
return "Grodno123" | {
"repo_name": "boneil3/hyperAdmit",
"path": "backend/utils.py",
"copies": "2",
"size": "1692",
"license": "mit",
"hash": -7233083410390446000,
"line_mean": 43.5526315789,
"line_max": 93,
"alpha_frac": 0.5561465721,
"autogenerated": false,
"ratio": 4.524064171122995,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6080210743222996,
"avg_score": null,
"num_lines": null
} |
__author__ = 'brendan'
import Quandl
import pandas as pd
#secs = ['EURUSD', 'GBPUSD', 'EURGBP', 'AUDUSD', 'USDMXN', 'USDINR', 'USDBRL', 'USDCAD', 'USDZAR']
#datas = []
#for i, sec in enumerate(secs):
# data = pd.DataFrame(Quandl.get('CURRFX/' + sec, authtoken='ZoAeCkDnkL4oFQs1z2_u')['Rate'])
# data = data.loc['2000-01-03':]
# data.columns = [sec]
# print(data)
# if i == 0:
# ret_data = data
# continue
# ret_data = ret_data.join(data, how='left')
#ret_data.to_csv('CCY_2000.csv')
authtoken = 'ZoAeCkDnkL4oFQs1z2_u'
'''da = []
data_names = ['US_PCE', 'UK_CPI', 'EUR_CPI', 'JP_CPI', 'US_GDP_Q', 'UK_GDP_Q', 'EUR_GDP_Q', 'JP_GDP_A', 'US_CP',
'US_ER', 'UK_ER', 'ITA_ER', 'FRA_ER', 'GER_ER', 'JP_ER', 'US_M2', 'UK_M2', 'GER_M2', 'ITA_M2',
'FRA_M2', 'JP_M2', 'US_CA', 'UK_CA', 'GER_CA', 'ITA_CA', 'FRA_CA', 'JP_CA', 'US_LF', 'UK_LF', 'GER_LF',
'ITA_LF', 'FRA_LF', 'JP_LF', 'US_POP', 'UK_POP', 'GER_POP', 'ITA_POP', 'FRA_POP', 'JP_POP', 'US_POP_65',
'UK_POP_65', 'GER_POP_65', 'ITA_POP_65', 'FRA_POP_65', 'JP_POP_65']
# Inflation Monthly
US_PCE = pd.DataFrame(Quandl.get('FRED/PCETRIM6M680SFRBDAL', authtoken=authtoken))
UK_CPI = pd.DataFrame(Quandl.get('UKONS/MM23_D7G7_M', authtoken=authtoken))
EUR_CPI = pd.DataFrame(Quandl.get('RATEINF/INFLATION_EUR', authtoken=authtoken))
JP_CPI = pd.DataFrame(Quandl.get('RATEINF/INFLATION_JPN', authtoken=authtoken))
da.extend([US_PCE, UK_CPI, EUR_CPI, JP_CPI])
# GDP Aggregate
US_GDP_Q = pd.DataFrame(Quandl.get('FRED/GDP', authtoken=authtoken))
UK_GDP_Q = pd.DataFrame(Quandl.get('UKONS/QNA_BKTL_Q', authtoken=authtoken))
EUR_GDP_Q = pd.DataFrame(Quandl.get('ECB/RTD_Q_S0_S_G_GDPM_TO_U_E', authtoken=authtoken))
JP_GDP_A = pd.DataFrame(Quandl.get('ODA/JPN_NGDP', authtoken=authtoken)) # Annual
da.extend([US_GDP_Q, UK_GDP_Q, EUR_GDP_Q, JP_GDP_A])
# Wage growth - YoY CANNOT FIND
# Corporate Profits Unadjusted
US_CP = pd.DataFrame(Quandl.get('FRED/CP', authtoken=authtoken))
da.append(US_CP)
# Employment ratio
US_ER = pd.DataFrame(Quandl.get('FRED/EMRATIO', authtoken=authtoken))
UK_ER = pd.DataFrame(Quandl.get('FRED/GBREPRNA', authtoken=authtoken))
ITALY_ER = pd.DataFrame(Quandl.get('FRED/ITAEPRNA', authtoken=authtoken))
FRANCE_ER = pd.DataFrame(Quandl.get('FRED/FRAEPRNA', authtoken=authtoken))
GER_ER = pd.DataFrame(Quandl.get('FRED/DEUEPRNA', authtoken=authtoken))
JAPAN_ER = pd.DataFrame(Quandl.get('FRED/JPNEPRNA', authtoken=authtoken))
da.extend([US_ER, UK_ER, ITALY_ER, FRANCE_ER, GER_ER, JAPAN_ER])
# M2
US_M2 = pd.DataFrame(Quandl.get('WORLDBANK/USA_FM_LBL_MQMY_GD_ZS', authtoken=authtoken))
UK_M2 = pd.DataFrame(Quandl.get('WORLDBANK/GBR_FM_LBL_MQMY_GD_ZS', authtoken=authtoken))
GER_M2 = pd.DataFrame(Quandl.get('WORLDBANK/DEU_FM_LBL_MQMY_GD_ZS', authtoken=authtoken))
ITA_M2 = pd.DataFrame(Quandl.get('WORLDBANK/ITA_FM_LBL_MQMY_GD_ZS', authtoken=authtoken))
FRA_M2 = pd.DataFrame(Quandl.get('WORLDBANK/FRA_FM_LBL_MQMY_GD_ZS', authtoken=authtoken))
JP_M2 = pd.DataFrame(Quandl.get('WORLDBANK/JPN_FM_LBL_MQMY_GD_ZS', authtoken=authtoken))
da.extend([US_M2, UK_M2, GER_M2, ITA_M2, FRA_M2, JP_M2])
# Current Account as % of GDP
US_CA = pd.DataFrame(Quandl.get('ODA/USA_BCA_NGDPD', authtoken=authtoken))
UK_CA = pd.DataFrame(Quandl.get('ODA/GBR_BCA_NGDPD', authtoken=authtoken))
GER_CA = pd.DataFrame(Quandl.get('ODA/DEU_BCA_NGDPD', authtoken=authtoken))
ITA_CA = pd.DataFrame(Quandl.get('ODA/ITA_BCA_NGDPD', authtoken=authtoken))
FRA_CA = pd.DataFrame(Quandl.get('ODA/FRA_BCA_NGDPD', authtoken=authtoken))
JP_CA = pd.DataFrame(Quandl.get('ODA/JPN_BCA_NGDPD', authtoken=authtoken))
da.extend([US_CA, UK_CA, GER_CA, ITA_CA, FRA_CA, JP_CA])
# Labor Force
US_LF = pd.DataFrame(Quandl.get('ODA/USA_LE', authtoken=authtoken))
UK_LF = pd.DataFrame(Quandl.get('ODA/GBR_LE', authtoken=authtoken))
GER_LF = pd.DataFrame(Quandl.get('ODA/DEU_LE', authtoken=authtoken))
ITA_LF = pd.DataFrame(Quandl.get('ODA/ITA_LE', authtoken=authtoken))
FRA_LF = pd.DataFrame(Quandl.get('ODA/FRA_LE', authtoken=authtoken))
JP_LF = pd.DataFrame(Quandl.get('ODA/JPN_LE', authtoken=authtoken))
da.extend([US_LF, UK_LF, GER_LF, ITA_LF, FRA_LF, JP_LF])
# Population
US_POP = pd.DataFrame(Quandl.get('ODA/USA_LP', authtoken=authtoken))
UK_POP = pd.DataFrame(Quandl.get('ODA/USA_LP', authtoken=authtoken))
GER_POP = pd.DataFrame(Quandl.get('ODA/USA_LP', authtoken=authtoken))
ITA_POP = pd.DataFrame(Quandl.get('ODA/USA_LP', authtoken=authtoken))
FRA_POP = pd.DataFrame(Quandl.get('ODA/USA_LP', authtoken=authtoken))
JP_POP = pd.DataFrame(Quandl.get('ODA/USA_LP', authtoken=authtoken))
da.extend([US_POP, UK_POP, GER_POP, ITA_POP, FRA_POP, JP_POP])
US_POP_65 = pd.DataFrame(Quandl.get('WORLDBANK/USA_SP_POP_65UP_TO_ZS', authtoken=authtoken))
UK_POP_65 = pd.DataFrame(Quandl.get('WORLDBANK/GBR_SP_POP_65UP_TO_ZS', authtoken=authtoken))
GER_POP_65 = pd.DataFrame(Quandl.get('WORLDBANK/DEU_SP_POP_65UP_TO_ZS', authtoken=authtoken))
ITA_POP_65 = pd.DataFrame(Quandl.get('WORLDBANK/ITA_SP_POP_65UP_TO_ZS', authtoken=authtoken))
FRA_POP_65 = pd.DataFrame(Quandl.get('WORLDBANK/FRA_SP_POP_65UP_TO_ZS', authtoken=authtoken))
JP_POP_65 = pd.DataFrame(Quandl.get('WORLDBANK/JPN_SP_POP_65UP_TO_ZS', authtoken=authtoken))
da.extend([US_POP_65, UK_POP_65, GER_POP_65, ITA_POP_65, FRA_POP_65, JP_POP_65])
data = da[0]
data.columns = ['US_PCE']
for i, df in enumerate(da):
if i != 0:
df.columns = [data_names[i]]
data = data.join(df, how='outer')
print(data)
data.to_csv('MACRO_DATA.csv')
'''
US_EQ = pd.DataFrame(Quandl.get('CHRIS/CME_ES1', authtoken=authtoken)['Settle'])
UK_EQ = pd.DataFrame(Quandl.get('YAHOO/INDEX_FTSE', authtoken=authtoken)['Close'])
GER_EQ = pd.DataFrame(Quandl.get('YAHOO/INDEX_GDAXI', authtoken=authtoken)['Close'])
ITA_EQ = pd.DataFrame(Quandl.get('YAHOO/INDEX_FTSEMIB_MI', authtoken=authtoken)['Close'])
FRA_EQ = pd.DataFrame(Quandl.get('YAHOO/INDEX_FCHI', authtoken=authtoken)['Close'])
JP_EQ = pd.DataFrame(Quandl.get('NIKKEI/INDEX', authtoken=authtoken)['Close Price'])
CL = pd.DataFrame(Quandl.get('CHRIS/CME_CL1', authtoken=authtoken)['Settle'])
TY = pd.DataFrame(Quandl.get('CHRIS/CME_TY1', authtoken=authtoken)['Settle'])
TU = pd.DataFrame(Quandl.get('CHRIS/CME_TU1', authtoken=authtoken)['Settle'])
| {
"repo_name": "boneil3/backtest",
"path": "data.py",
"copies": "1",
"size": "6251",
"license": "mit",
"hash": 8071876989895362000,
"line_mean": 56.3486238532,
"line_max": 118,
"alpha_frac": 0.6938089906,
"autogenerated": false,
"ratio": 2.1971880492091387,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.836876519818539,
"avg_score": 0.004446368324749786,
"num_lines": 109
} |
__author__ = 'Brendan'
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), "lib"))
from endpoints_proto_datastore.ndb.model import EndpointsModel
from endpoints_proto_datastore.ndb.properties import EndpointsAliasProperty
from endpoints_proto_datastore.ndb import EndpointsDateTimeProperty
from endpoints_proto_datastore.ndb import utils
from google.appengine.ext import ndb
import endpoints
import webapp2_extras.appengine.auth.models
from webapp2_extras import security
import time
class ModelUtils(object):
def to_dict(self):
result = super(ModelUtils, self).to_dict(exclude=['password', 'updated', 'auth_ids', 'email'])
result['user_id'] = str(self.key.id())
result['userCreatedAt'] = str(self.created)
return result
class Appointments(EndpointsModel):
start_dt = EndpointsDateTimeProperty(required=True)
end_dt = EndpointsDateTimeProperty(required=True)
cancelled = ndb.BooleanProperty(default=False)
request_user = ndb.KeyProperty(kind='User', required=True)
admission_officer = ndb.KeyProperty(kind='AdmissionsOfficer', required=True)
scheduled = EndpointsDateTimeProperty(auto_now_add=True)
class FreeUser(ModelUtils, EndpointsModel):
""" Free Signup User Model """
email = ndb.StringProperty(required=True)
first_name = ndb.StringProperty(required=True)
last_name = ndb.StringProperty(required=True)
phone = ndb.StringProperty(required=True)
school_type = ndb.StringProperty(required=True)
class User(ModelUtils, EndpointsModel, webapp2_extras.appengine.auth.models.User):
""" User Base Model """
email = ndb.StringProperty(required=True)
first_name = ndb.StringProperty(required=True)
last_name = ndb.StringProperty(required=True)
phone = ndb.StringProperty(required=True)
stripeCustId = ndb.StringProperty(default=None)
alias = ndb.StringProperty(default=None)
appointments = ndb.KeyProperty(kind='Appointments', default=None, repeated=True)
def id_setter(self, value):
# Allocate IDs if DNE
if value == '' or value is None or value == 'None':
first, last = User.allocate_ids(2)
self.UpdateFromKey(ndb.Key('User', int(first)))
elif not isinstance(value, basestring) and not isinstance(value, int):
raise endpoints.BadRequestException('ID not string or int')
else:
self.UpdateFromKey(ndb.Key('User', int(value)))
@EndpointsAliasProperty(setter=id_setter, required=True)
def id(self):
if self.key is not None:
return str(self.key.id())
def set_password(self, raw_password):
"""Sets the password for the current user
:param raw_password:
The raw password which will be hashed and stored
"""
self.password = security.generate_password_hash(raw_password, length=12)
@classmethod
def get_by_auth_token(cls, user_id, token, subject='auth'):
"""Returns a user object based on a user ID and token.
:param user_id:
The user_id of the requesting user.
:param token:
The token string to be verified.
:returns:
A tuple ``(User, timestamp)``, with a user object and
the token timestamp, or ``(None, None)`` if both were not found.
"""
token_key = cls.token_model.get_key(user_id, subject, token)
user_key = ndb.Key(cls, user_id)
# Use get_multi() to save a RPC call.
valid_token, user = ndb.get_multi([token_key, user_key])
if valid_token and user:
timestamp = int(time.mktime(valid_token.created.timetuple()))
if hasattr(user, 'force_login'):
user.force_login = False
user.put()
return None, None
else:
return user, timestamp
return None, None
class University(EndpointsModel):
""" University Institution Model """
UNIV_TYPES = []
rank = ndb.IntegerProperty(required=True)
name = ndb.StringProperty(required=True)
type = ndb.StringProperty(required=True, choices=UNIV_TYPES)
major = ndb.StringProperty(required=False)
class AdmissionsOfficer(EndpointsModel, webapp2_extras.appengine.auth.models.User):
""" Admissions Officer Model """
_message_fields_schema = ('id', 'verified', 'school', 'school_type', 'location', 'rating', 'alias',
'hours_consulted',
'last_active', 'knowledge_areas', 'whoami', 'job_title', 'howcanihelp', 'college_rank')
email = ndb.StringProperty(required=True)
first_name = ndb.StringProperty(required=True)
last_name = ndb.StringProperty(required=True)
phone = ndb.StringProperty(required=True)
stripeCustId = ndb.StringProperty(default=None)
alias = ndb.StringProperty(default=None)
appointments = ndb.KeyProperty(kind='Appointments', default=None, repeated=True)
verified = ndb.BooleanProperty(default=False)
paymentStuff = ndb.StringProperty(default='')
school = ndb.StringProperty(default='')
school_type = ndb.StringProperty(default='')
location = ndb.StringProperty(default='')
rating = ndb.FloatProperty(default=5.0)
hours_consulted = ndb.IntegerProperty(default=0)
last_active = EndpointsDateTimeProperty(auto_now=True)
knowledge_areas = ndb.StringProperty(repeated=True)
whoami = ndb.StringProperty(default='')
howcanihelp = ndb.StringProperty(default='')
job_title = ndb.StringProperty(default='')
college_rank = ndb.StringProperty(default='Top 40')
def id_setter(self, value):
# Allocate IDs if DNE
if value == '' or value is None or value == 'None':
first, last = AdmissionsOfficer.allocate_ids(2)
self.UpdateFromKey(ndb.Key('AdmissionsOfficer', first))
elif not isinstance(value, basestring) and not isinstance(value, int):
raise endpoints.BadRequestException('ID not string or int')
else:
self.UpdateFromKey(ndb.Key('AdmissionsOfficer', value))
@EndpointsAliasProperty(setter=id_setter, required=True)
def id(self):
if self.key is not None:
return str(self.key.id())
class Customer(User):
""" Client Model """
gpa = ndb.FloatProperty(required=True)
tests = ndb.KeyProperty(repeated=True)
essays = ndb.StringProperty(repeated=True)
colleges = ndb.KeyProperty(repeated=True)
helpMeWith = ndb.StringProperty(repeated=True)
class Test(EndpointsModel):
""" Test Model """
TEST_CHOICES = []
type = ndb.StringProperty(required=True, choices=TEST_CHOICES)
score = ndb.IntegerProperty(required=False)
| {
"repo_name": "Yury191/hyperAdmit",
"path": "backend/models.py",
"copies": "2",
"size": "6715",
"license": "mit",
"hash": 4853666161414979000,
"line_mean": 36.3055555556,
"line_max": 117,
"alpha_frac": 0.6731198809,
"autogenerated": false,
"ratio": 3.8837478311162523,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007690174943273335,
"num_lines": 180
} |
__author__ = 'Brendan'
import sys
sys.path.insert(0, 'lib')
sys.path.insert(0, 'stripe')
from webapp2_extras.auth import InvalidPasswordError, InvalidAuthIdError
from protorpc import remote
from backend.models import AdmissionsOfficer
from backend.models import User
from backend.models import FreeUser
from backend.utils import *
from sendgrid import SendGridClient
from sendgrid import Mail
import stripe
centralparkedu = endpoints.api(name='centralparkedu', version='v1',
allowed_client_ids=[endpoints.API_EXPLORER_CLIENT_ID],
scopes=[endpoints.EMAIL_SCOPE])
@centralparkedu.api_class(resource_name='hyperadmit', path='hyperadmit')
class HyperAdmit(remote.Service):
""" HyperAdmit API v1 """
@endpoints.method(USER_NEW_RC,
FreeUser.ProtoModel(),
path='freetrialsignup', http_method='POST', name='free_trial_signup')
def free_trial_signup(self, request):
new_user = FreeUser(email=request.email, first_name=request.first_name, last_name=request.last_name,
phone=request.phone, school_type=request.school_type)
ret_user_key = new_user.put()
ret_user = ret_user_key.get()
return FreeUser.ToMessage(ret_user)
@endpoints.method(path='sendemail', http_method='POST', name='send_email')
def send_email(self, request):
# make a secure connection to SendGrid
sg = SendGridClient(get_mail_username(), get_mail_pass(), secure=True)
# make a message object
message = Mail()
message.set_subject('message subject')
message.set_html('<strong>HTML message body</strong>')
message.set_text('plaintext message body')
message.set_from('from@example.com')
# add a recipient
message.add_to('John Doe <yury191@gmail.com>')
# use the Web API to send your message
sg.send(message)
return message_types.VoidMessage()
@endpoints.method(USER_AUTH_RC,
AdmissionsOfficer.ProtoModel(),
path='authuser', http_method='POST', name='auth_user')
def auth_user(self, request):
try:
user = User.get_by_auth_password(request.email, request.password)
except (InvalidPasswordError, InvalidAuthIdError):
raise endpoints.ForbiddenException('NAW GET OUT')
token, ts = User.create_auth_token(user.key.id())
@endpoints.method(USER_RC,
AdmissionsOfficer.ProtoCollection(),
path='getalladdOffs', http_method='POST', name='get_all_addOffs')
def get_all_adOffs(self, request):
user, ts = User.get_by_auth_token(int(request.user_id), request.user_token)
if user is None:
raise endpoints.ForbiddenException('User auth failed')
cursor = None
if request.last_cursor:
cursor = ndb.Cursor.from_websafe_string(request.last_cursor)
school_type = None
if request.school_type and request.school_type != '':
school_type = request.school_type
college_rank = None
if request.college_rank and request.college_rank != '':
college_rank = request.college_rank
if school_type and college_rank:
ad_off_query = AdmissionsOfficer.query(AdmissionsOfficer.college_rank == college_rank,
AdmissionsOfficer.school_type == school_type).order(-AdmissionsOfficer.created,
AdmissionsOfficer.key)
elif school_type and not college_rank:
ad_off_query = AdmissionsOfficer.query(AdmissionsOfficer.school_type == school_type).order(-AdmissionsOfficer.created,
AdmissionsOfficer.key)
elif not school_type and college_rank:
ad_off_query = AdmissionsOfficer.query(AdmissionsOfficer.college_rank == college_rank).order(-AdmissionsOfficer.created,
AdmissionsOfficer.key)
else:
ad_off_query = AdmissionsOfficer.query().order(-AdmissionsOfficer.created, AdmissionsOfficer.key)
ad_offs, next_cursor, more = ad_off_query.fetch_page(10, start_cursor=cursor)
ret_ad_off = AdmissionsOfficer.ToMessageCollection(ad_offs, next_cursor=next_cursor)
return ret_ad_off
# Ad Off methods
@AdmissionsOfficer.method(request_fields=('id',),
path='getadoff/{id}', http_method='GET', name='get_ad_off')
def get_ad_off(self, ad_off):
if not ad_off.from_datastore:
raise endpoints.NotFoundException('MyModel not found.')
return ad_off
@AdmissionsOfficer.method(request_fields=('email', 'last_name', 'first_name', 'phone'),
path='insertadoff', http_method='POST', name='insert_ad_off')
def insert_ad_off(self, ad_off):
if ad_off.from_datastore:
raise endpoints.NotFoundException('BLAH')
ad_off.school = 'UPenn'
ad_off.school_type = 'Undergrad'
ad_off.location = 'NYC'
ad_off.hours_consulted = 0
ad_off.knowledge_areas = ['Resume', 'Essays']
ad_off.whoami = 'Im The BEST!'
ad_off.howcanihelp = 'Being Awesome'
ad_off.job_title = 'Admissions Director'
ad_off.college_rank = 'Top 40'
ad_off.alias = ad_off.email
new_ad_off = AdmissionsOfficer(email=ad_off.email, first_name=ad_off.first_name, last_name=ad_off.last_name,
phone=ad_off.phone,
verified=ad_off.verified, school=ad_off.school, school_type=ad_off.school_type,
location=ad_off.location, rating=ad_off.rating,
hours_consulted=ad_off.hours_consulted,
knowledge_areas=ad_off.knowledge_areas, whoami=ad_off.whoami,
job_title=ad_off.job_title, howcanihelp=ad_off.howcanihelp,
college_rank=ad_off.college_rank, alias=ad_off.alias)
ret_ad_off_key = new_ad_off.put()
ret_ad_off = ret_ad_off_key.get()
return ret_ad_off
@AdmissionsOfficer.query_method(user_required=False,
query_fields=('school_type', 'college_rank', 'limit', 'order', 'pageToken'),
path='getadoffs', name='get_ad_off_list')
def get_ad_off_list(self, query):
return query
| {
"repo_name": "Yury191/hyperAdmit",
"path": "backend/endpoint_classes.py",
"copies": "2",
"size": "6823",
"license": "mit",
"hash": -2486036300456606700,
"line_mean": 45.7328767123,
"line_max": 132,
"alpha_frac": 0.5846401876,
"autogenerated": false,
"ratio": 3.811731843575419,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5396372031175418,
"avg_score": null,
"num_lines": null
} |
__author__ = 'brendan'
import helper_functions
import networkx as nx
import matplotlib.pyplot as plt
import datetime
from operator import itemgetter
import numpy as np
import os
proj_cwd = os.path.dirname(os.getcwd())
data_dir = proj_cwd + r'/data'
###############
# BUILD A GRAPH
###############
# Load data from the CSVs
edgelist_data = helper_functions.csv_to_list(data_dir + r'/scotus',
'citations_sublist.csv',
1, 0)
node_data = helper_functions.csv_to_list(data_dir + r'/scotus',
'consolidation.csv',
1, 0)
# Instantiate a directed graph object, D
D = nx.DiGraph()
# Add our nodes to D
for row in node_data:
# It is really easy to add arbitrary info about each node or edge. For example, here, I load each node with a
# date, judges and citation_id attribute.
case_number = int(row[0])
month, day, year = ['', '', ''] if row[3] is '' else [int(element) for element in row[3].rsplit('/')]
file_date = '' if month is '' else datetime.date(year=year, month=month, day=day)
judges = row[4]
citation_id = '' if row[5] is '' else int(row[5])
D.add_node(case_number,
date=file_date,
judges=judges,
citation_id=citation_id)
# Add our edges to D
for row in edgelist_data:
citer = row[0]
cited = row[1]
# Edges point from cited to citer -- so the node with the highest out degree represents the most cited decision
D.add_edge(int(row[1]), int(row[0]), random_attribute='random_string')
###############################
# EXPLORE STUFF ABOUT OUR GRAPH
###############################
print '10 HIGHEST DEGREES:', sorted(nx.degree(D).values(), reverse=True)[:10] # Gives 10 highest degrees in D
print '10 HIGHEST IN-DEGREES:', sorted(D.in_degree().values(), reverse=True)[:10] # Gives 10 highest IN-degrees in D
# sorted_node_indegree_tuples is a list of tuples of the form (node, in-degree) of all nodes in D
sorted_node_indegree_tuples = sorted(D.in_degree_iter(), key=itemgetter(1), reverse=True)
# sorted_node_outdegree_tuples is a list of tuples of the form (node, out-degree) of all nodes in D
sorted_node_outdegree_tuples = sorted(D.out_degree_iter(), key=itemgetter(1), reverse=True)
############################
# VISUALIZE ASPECTS OF GRAPH
############################
# Make a subgraph of the network
S = D.copy() # Create a copy of D
nodes_to_delete = [tup[0] for tup in sorted_node_outdegree_tuples[1000:]] # Keep only 1000 highest out-degree nodes
for node in nodes_to_delete:
S.remove_node(node)
nx.draw(S, arrows=True)
plt.show()
# Make a out-degree rank plot of netowrk
degree_sequence = sorted([tup[1] for tup in sorted(D.out_degree_iter(), reverse=True)], reverse=True) # degree sequence
dmax = max(degree_sequence)
plt.subplot(121)
plt.loglog(degree_sequence, 'b-', marker='o')
plt.title("Degree rank plot")
plt.ylabel("in degree")
plt.xlabel("rank")
# Make a histogram of the out-degrees
plt.subplot(122)
data = degree_sequence
# fixed bin size
bins = np.arange(min(degree_sequence),
max(degree_sequence),
5) # fixed bin size
plt.xlim([min(data)-5, max(data)+5])
plt.hist(data, bins=bins, alpha=0.5)
plt.title('In degree histogram')
plt.xlabel('variable X (bin size = 5)')
plt.ylabel('count')
plt.show()
| {
"repo_name": "brschneidE3/LegalNetworks",
"path": "python_code/main.py",
"copies": "1",
"size": "3440",
"license": "mit",
"hash": -8358682998653153000,
"line_mean": 35.5957446809,
"line_max": 120,
"alpha_frac": 0.6220930233,
"autogenerated": false,
"ratio": 3.310875842155919,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4432968865455919,
"avg_score": null,
"num_lines": null
} |
__author__ = 'brendan'
import helper_functions
import os
import csv
import datetime
import matplotlib.pyplot as plt
import networkx as nx
proj_cwd = os.path.dirname(os.getcwd())
data_dir = proj_cwd + r'/data'
def consolidate(court_name):
"""
Given court_name, a string representing a CourtListener court, consolidate will iterate through every file in the
'clusters' and 'opinions' subdirectories, beginning with 'clusters'. For every case, the following information is
grabbed:
-Case number: INT -- note: this is not the same as the citation id
-Cluster file: BOOLEAN True if we have a cluster file for this case
-Opinion file: BOOLEAN True if we have an opinion file for this case
-Date: DATETIME object based on CourtListener's 'file_date'
-Judges: LIST of judges found in the cluster file
-citation_id: INT representing the citation id found in the cluster file
A summary of all of this information is then exported to a CSV in the court parent directory.
"""
court_data_dir = data_dir + r'/%s' % court_name
cluster_cases = {case: None for case in os.listdir(court_data_dir + r'/clusters')}
num_clust = len(cluster_cases.keys())
opinion_cases = {case: None for case in os.listdir(court_data_dir + r'/opinions')}
num_op = len(opinion_cases.keys())
print '%s cluster files, %s opinion files detected.' % (num_clust, num_op)
data = {}
print 'Consolidating cluster files...'
clust_checked = 0
for case in cluster_cases.keys():
case_number = case.rsplit('.')[0] # Drop '.json'
has_cluster_file = True
if case in opinion_cases.keys():
has_opinion_file = True
del opinion_cases[case]
else:
has_opinion_file = False
cluster_file_data = helper_functions.json_to_dict(court_data_dir + r'/clusters/%s' % case)
year, month, day = [int(element) for element in cluster_file_data['date_filed'].rsplit('-')]
file_date = datetime.date(year=year, month=month, day=day)
judges = cluster_file_data['judges']
citation_id = "" if cluster_file_data['citation_id'] is None \
else int(cluster_file_data['citation_id'])
data[case_number] = [str(case_number), has_cluster_file, has_opinion_file,
'%s/%s/%s' % (file_date.month, file_date.day, file_date.year),
judges, citation_id]
clust_checked += 1
if clust_checked % 100 == 0:
print '...%s of %s clusters consolidated...' % (clust_checked, num_clust)
num_op = len(opinion_cases.keys())
op_checked = 0
for case in opinion_cases.keys()[:5]:
case_number = case.rsplit('.')[0] # Drop '.json'
data[case_number] = [case_number, False, True, "", "", ""]
op_checked += 1
if op_checked % 100 == 0:
print '...%s of %s opinions consolidated...' % (op_checked, num_op)
consolidated_data = [['case_no', 'cluster_file', 'opinion_file', 'date', 'judges', 'citation_id']]
for case in data.keys():
consolidated_data.append(data[case])
helper_functions.list_to_csv(data_dir + '/%s/node_metadata.csv'
% court_name,
consolidated_data)
def get_master_edge_dicts():
"""
Produces two dictionaries from our master edge list: citer_as_key and cited_as_key, where the keys are citation ids
and the values lists of the corresponding citation ids
"""
with open(data_dir + r'/citations.csv') as masterfile:
csv_reader = csv.reader(masterfile)
next(csv_reader) # Skip header
citer_as_key = {}
cited_as_key = {}
row_i = 0
for row in csv_reader:
row_i += 1
citer = int(row[0])
cited = int(row[1])
try:
citer_as_key[citer].append(cited)
except KeyError:
citer_as_key[citer] = [cited]
try:
cited_as_key[cited].append(citer)
except KeyError:
cited_as_key[cited] = [citer]
if row_i % 100000 == 0:
print '%s rows loaded.' % row_i
return citer_as_key, cited_as_key
def create_edge_sublist(court_name, master_cited_as_key):
"""
Given a court name (and a corresponding consolidation file) and a master edge list, create_edge_sublist will create
a citations.csv file in the court's directory, representing the subset of edges in which both nodes are in
court_name's court.
"""
court_dir = data_dir + r'/%s' % court_name
court_data = helper_functions.csv_to_list(court_dir,
'consolidation.csv', 1, 0)
print 'finding IDs in court...'
citation_ids_in_court = []
for row in court_data:
opinion_id = int(row[0])
citation_ids_in_court.append(opinion_id)
edge_sublist = [['citing', 'cited']]
num_ids = len(citation_ids_in_court)
id = 0
for opinion_id in citation_ids_in_court:
try:
list_of_citers = master_cited_as_key[opinion_id]
except KeyError:
list_of_citers = []
for citer in list_of_citers:
if citer in citation_ids_in_court:
edge_sublist.append([citer, opinion_id])
id += 1
if id % 1000 == 0:
print '%s of %s IDs checked (%s)' % (id, num_ids, float(id)/num_ids)
helper_functions.list_to_csv(court_dir + r'/citations_sublist.csv', edge_sublist)
# master_citer_as_key, master_cited_as_key = get_master_edge_dicts()
# create_edge_sublist('scotus', master_cited_as_key)
| {
"repo_name": "brschneidE3/LegalNetworks",
"path": "python_code/consolidate_data.py",
"copies": "1",
"size": "5733",
"license": "mit",
"hash": 8513154163254786000,
"line_mean": 35.5159235669,
"line_max": 119,
"alpha_frac": 0.5954997384,
"autogenerated": false,
"ratio": 3.3983402489626555,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44938399873626556,
"avg_score": null,
"num_lines": null
} |
__author__ = 'brendan'
import helper_functions
import os
import tarfile
proj_cwd = os.path.dirname(os.getcwd())
data_dir = proj_cwd + r'/data'
def download_url(url, destination_path, curl_path=r'C:/Users/brendan/Downloads/curl-7.38.0-win64/bin/curl'):
"""
This is a quick and easy function that simulates clicking a link in your browser that initiates a download.
It requires downloading the program CURL. Then the curl_path argument must point to whever your curl.exe executable
is located.
url:: the url from which data is to be downloaded.
destination_path:: the downloaded file to be created.
"""
os_string = '%s "%s" > %s' % (curl_path, url, destination_path)
print os_string
os.system(os_string)
def download_court_data(court_name, curl_path):
"""
This function proceeds as follows:
1) Given court_name, a string representing a CourtListener court, download_court_data first checks that there
exists a subdirectory for court_name. This directory should contain within it a 'clusters' and 'opinions'
sub-subdirectory. If these don't exist, they are created.
2) download_court_data then compares how many files are in the 'clusters' sub-subdirectory to what is on the
CourtListener server. If these numbers are not the same, all locally-saved files are deleted and re-downloaded
and extracted to the 'clusters' sub-subdirectory.
3) This process is then repeated for 'opinions'.
"""
court_data_dir = data_dir + r'/%s' % court_name
court_clusters_data_dir = court_data_dir + r'/clusters'
court_opinions_data_dir = court_data_dir + r'/opinions'
# Make a court data directory if we don't have one already
if not os.path.exists(court_data_dir):
os.makedirs(court_data_dir)
if not os.path.exists(court_clusters_data_dir):
os.makedirs(court_clusters_data_dir)
if not os.path.exists(court_opinions_data_dir):
os.makedirs(court_opinions_data_dir)
###################
# FOR CLUSTERS DATA
###################
court_metadata_url = 'https://www.courtlistener.com/api/rest/v3/clusters/?docket__court=%s' % court_name
court_metadata = helper_functions.url_to_dict(court_metadata_url)
num_files_on_server = court_metadata['count']
files_in_dir = os.listdir(court_data_dir + r'/clusters')
num_files_in_dir = len(files_in_dir)
# If the number of files downloaded isn't the same as the number on the server
if num_files_on_server != num_files_in_dir:
print 'Re-downloading cluster data for court %s...' % court_name.upper()
# Delete the files we currently have
print '...deleting files...'
for filename in files_in_dir:
os.remove(r'%s/%s' % (court_clusters_data_dir, filename))
# Download the .tar.gz file
print '...downloading new .tar.gz file...'
download_url(url='https://www.courtlistener.com/api/bulk-data/clusters/%s.tar.gz' % court_name,
destination_path=court_clusters_data_dir + r'/%s.tar.gz' % court_name,
curl_path=curl_path)
# Extract it
print '...extracting files...'
with tarfile.open(court_clusters_data_dir + r'/%s.tar.gz' % court_name) as TarFile:
TarFile.extractall(path=court_clusters_data_dir)
# And delete .tar.gz file
os.remove(r'%s/%s.tar.gz' % (court_clusters_data_dir, court_name))
print '...done.'
else:
print "All server (cluster) files accounted for."
###################
# FOR OPINIONS DATA
###################
court_metadata_url = 'https://www.courtlistener.com/api/rest/v3/opinions/?docket__court=%s' % court_name
court_metadata = helper_functions.url_to_dict(court_metadata_url)
num_files_on_server = court_metadata['count']
files_in_dir = os.listdir(court_data_dir + r'/opinions')
num_files_in_dir = len(files_in_dir)
# If the number of files downloaded isn't the same as the number on the server
if num_files_on_server != num_files_in_dir:
print 'Re-downloading opinions data for court %s...' % court_name.upper()
# Delete the files we currently have
print '...deleting files...'
for filename in files_in_dir:
os.remove(r'%s/%s' % (court_opinions_data_dir, filename))
# Download the .tar.gz file
print '...downloading new .tar.gz file...'
download_url(url='https://www.courtlistener.com/api/bulk-data/opinions/%s.tar.gz' % court_name,
destination_path=court_opinions_data_dir + r'/%s.tar.gz' % court_name,
curl_path=curl_path)
# Extract it
print '...extracting files...'
with tarfile.open(court_opinions_data_dir + r'/%s.tar.gz' % court_name) as TarFile:
TarFile.extractall(path=court_opinions_data_dir)
# And delete .tar.gz file
os.remove(r'%s/%s.tar.gz' % (court_opinions_data_dir, court_name))
print '...done.'
else:
print "All server (opinion) files accounted for."
# download_court_data('scotus', r'C:/Users/brendan/Downloads/curl-7.38.0-win64/bin/curl')
| {
"repo_name": "brschneidE3/LegalNetworks",
"path": "python_code/download_data_batch.py",
"copies": "1",
"size": "5208",
"license": "mit",
"hash": -3995542414378783000,
"line_mean": 41.6885245902,
"line_max": 119,
"alpha_frac": 0.6401689708,
"autogenerated": false,
"ratio": 3.329923273657289,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4470092244457289,
"avg_score": null,
"num_lines": null
} |
__author__ = 'brendan'
import json
from webapp2_extras.appengine.auth.models import UserToken
from webapp2_extras.auth import InvalidAuthIdError
from webapp2_extras.auth import InvalidPasswordError
from backend.models import User
from backend.basehandlers import BaseHandler
import sys
sys.path.insert(0, 'stripe')
import stripe
from backend import utils
# Decorators
def user_required(handler):
"""
Decorator that checks if there's a user associated with
the current session. Will also fail if there's no
session available.
"""
def check_login(self, *args, **kwargs):
self.response.headers['Content-Type'] = "application/json"
user_id = self.request.get('user_id')
token = self.request.get('token')
# Does the user id and token exist in the datastore
response_tuple = User.get_by_auth_token(int(user_id), token)
# Response is a None tuple then the user id & token do not exist
if response_tuple == (None, None):
self.send_response(self.RESPONSE_CODE_401,
"User not authenticated",
"")
else:
return handler(self, *args, **kwargs)
return check_login
# Helpers
def json_response(error_code, status, response_msg, json_object):
"""
Global function used to structure JSON output
"""
output = {'code': error_code,
'status': status,
'message': response_msg,
'response': json_object}
return json.dumps(output)
class SignupHandler(BaseHandler):
"""Signup New User"""
def post(self):
self.response.headers['Content-Type'] = "application/json"
# Does e-mail already exist?
jsn = json.loads(self.request.body)
email = jsn['email']
password = jsn['password']
first_name = jsn['first_name']
last_name = jsn['last_name']
phone = jsn['phone']
query = User.query(User.email == email)
users = query.fetch()
if users:
msg = 'Unable to create user. Duplicate email: %s' % email
self.send_response(self.RESPONSE_CODE_400, msg, "")
return
# Create Stripe customer
stripe.api_key = utils.get_stripe_api_key()
stripe_customer = stripe.Customer.create()
stripe_customer_id = stripe_customer.id
# If stripe customer Id doesn't exist, set to None
if not stripe_customer_id:
stripe_customer_id = None
# Create a user
unique_properties = ['email']
user_data = self.user_model.create_user(email,
unique_properties,
email=email,
password_raw=password,
first_name=first_name,
last_name=last_name,
phone=phone)
#stripeCustomerId=stripe_customer_id
# If user was not created, probably a duplicate email
if not user_data[0]: # user_data is a tuple
msg = 'Unable to create user. Duplicate email: %s' % email
self.send_response(self.RESPONSE_CODE_400, msg, "")
return
# New user created. Get user at index 1
user = user_data[1]
user_dict = user.to_dict()
user_id = user.get_id()
token = UserToken.create(user_id, subject='auth', token=None)
user_dict['token'] = str(token.token)
user_dict['email'] = email
del user_dict['created']
del user_dict['updated']
print user_dict
self.send_response(self.RESPONSE_CODE_200, "User Signed Up", user_dict)
class LoginHandler(BaseHandler):
"""Authenticate new users"""
def post(self):
#self.response.headers['Access-Control-Allow-Origin'] = 'https://sandboxx.herokuapp.com'
# TODO: Change in basehandlers.py
#self.response.headers['Access-Control-Allow-Origin'] = 'http://localhost:9000'
self.response.headers['Content-Type'] = "application/json"
jsn = json.loads(self.request.body)
email = jsn['email']
password = jsn['password']
# Login with email and password
try:
u = self.auth.get_user_by_password(email,
password,
remember=True,
save_session=True)
query = User.query(User.email == email)
users = query.fetch()
user = users[0]
'''
# Create Stripe customerID if one doesn't exist
if not user.stripeCustomerId:
stripe.api_key = stripe.api_key
stripe_customer = stripe.Customer.create()
stripe_customer_id = stripe_customer.id
user.stripeCustomerId = stripe_customer_id
user.put()
'''
# Merge both objects: auth user object and custom user model
user_dict = user.to_dict()
del user_dict['created']
results = dict(u.items() + user_dict.items())
results['email'] = email
print(results)
self.send_response(self.RESPONSE_CODE_200, "", results)
except (InvalidAuthIdError, InvalidPasswordError) as e:
error_message = 'Login failed for user %s' % email
self.send_response(self.RESPONSE_CODE_400, error_message, "")
class LogoutHandler(BaseHandler):
"""Logout users"""
@user_required
def post(self):
self.response.headers['Content-Type'] = "application/json"
jsn = json.loads(self.request.body)
token = jsn['token']
user_id = jsn['user_id']
# Reset current user's device token
user = User.get_by_auth_token(int(user_id), token)
user = user[0]
user.deviceToken = None
user.put()
self.user_model.delete_auth_token(user_id, token)
self.send_response(self.RESPONSE_CODE_200, "User logged out", "") | {
"repo_name": "boneil3/hyperAdmit",
"path": "backend/auth.py",
"copies": "2",
"size": "6302",
"license": "mit",
"hash": -9197112624926563000,
"line_mean": 31.4896907216,
"line_max": 96,
"alpha_frac": 0.5547445255,
"autogenerated": false,
"ratio": 4.198534310459694,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5753278835959694,
"avg_score": null,
"num_lines": null
} |
__author__ = 'brendan'
import main
import pandas as pd
import numpy as np
from datetime import datetime as dt
from matplotlib import pyplot as plt
import random
import itertools
import time
import dateutil
from datetime import timedelta
cols = ['BoP FA Net', 'BoP FA OI Net', 'BoP FA PI Net', 'CA % GDP']
raw_data = pd.read_csv('raw_data/BoP_UK.csv', index_col=0, parse_dates=True)
data = pd.DataFrame(raw_data.iloc[:240, :4].fillna(0)).astype(float)
data.columns = cols
data.index = pd.date_range('1955-01-01', '2014-12-31', freq='Q')
raw_eur = pd.read_csv('raw_data/EUR_CA.csv', index_col=0, parse_dates=True)
raw_eur = raw_eur[::-1]
raw_eur.index = pd.date_range('1999-01-01', '2015-03-01', freq='M')
raw_eur.index.name = 'Date'
raw_eur = raw_eur.resample('Q', how='sum')
data_eur_gdp_q = pd.read_csv('raw_data/MACRO_DATA.csv', index_col=0, parse_dates=True)['EUR_GDP_Q'].dropna()
data_eur_gdp_q.columns = ['EUR_GDP_Q']
data_eur_gdp_q.index.name = 'Date'
data_eur_gdp_q = data_eur_gdp_q.loc['1999-03-31':]
end_gdp = pd.DataFrame(data=[data_eur_gdp_q.iloc[-1], data_eur_gdp_q.iloc[-1],
data_eur_gdp_q.iloc[-1], data_eur_gdp_q.iloc[-1]],
index=pd.date_range('2014-06-30', '2015-03-31', freq='Q'))
eur_gdp = pd.concat([data_eur_gdp_q, end_gdp])
eur_gdp.columns = ['EUR_CA']
eur_ca = raw_eur.div(eur_gdp)
eur_ca.columns = ['EUR CA']
uk_ca = data['CA % GDP'] / 100.0
uk_ca.columns = ['UK CA']
uk_fa = pd.DataFrame(data.iloc[:, :3])
uk_gdp = pd.read_csv('raw_data/MACRO_DATA.csv', index_col=0, parse_dates=True)['UK_GDP_Q'].dropna()
uk_gdp_final = pd.concat([uk_gdp, pd.DataFrame(data=[uk_gdp.iloc[-1], uk_gdp.iloc[-1]],
index=pd.date_range('2014-09-01', '2014-12-31', freq='Q'))])
uk_fa_gdp = pd.DataFrame(index=uk_gdp_final.index)
uk_fa_gdp['UK FA Net'] = uk_fa['BoP FA Net'] / uk_gdp_final
uk_fa_gdp['UK FA OI'] = uk_fa['BoP FA OI Net'] / uk_gdp_final
uk_fa_gdp['UK FA PI'] = uk_fa['BoP FA PI Net'] / uk_gdp_final
print(eur_gdp)
eur_fa = pd.read_csv('raw_data/EUR_FA.csv', index_col=0, header=0, parse_dates=True).dropna().astype(float)
eur_fa = eur_fa.iloc[::-1]
print(eur_fa)
eur_fa.index = pd.date_range('2009-01-01', '2015-02-28', freq='M')
eur_fa = eur_fa.resample('Q', how='sum')
print(eur_fa)
eur_fa_gdp = pd.DataFrame(index=eur_gdp.index)
eur_fa_gdp['EUR FA Net'] = eur_fa['EUR FA Net'] / eur_gdp['EUR_CA'].loc['2009-03-31':]
eur_fa_gdp['EUR FA OI'] = eur_fa['EUR FA OI'] / eur_gdp['EUR_CA'].loc['2009-03-31':]
eur_fa_gdp['EUR FA PI'] = eur_fa['EUR FA PI'] / eur_gdp['EUR_CA'].loc['2009-03-31':]
print(eur_fa_gdp)
fig, ax = plt.subplots()
uk_ca.plot(ax=ax, label='UK CA')
eur_ca.plot(ax=ax, label='EUR CA')
ax.set_title('Current Account %GDP')
plt.legend()
uk_fa_gdp_4q = pd.rolling_mean(uk_fa_gdp, window=4)
fig2, ax2 = plt.subplots()
uk_fa_gdp_4q.plot(ax=ax2)
#eur_fa_gdp.plot(ax=ax2)
plt.legend()
ax2.set_title('UK Financial Account % GDP (4Q Avg.)')
#plt.show()
dates = pd.DataFrame(index=pd.date_range('1960-03-31', '2015-01-01', freq='Q'))
print(dates)
dates.to_csv('raw_data/US_BoP.csv') | {
"repo_name": "boneil3/backtest",
"path": "BoP.py",
"copies": "1",
"size": "3114",
"license": "mit",
"hash": -5144292381117619000,
"line_mean": 36.987804878,
"line_max": 108,
"alpha_frac": 0.6380860629,
"autogenerated": false,
"ratio": 2.1945031712473573,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.33325892341473573,
"avg_score": null,
"num_lines": null
} |
__author__ = 'brendan'
import pandas as pd
import numpy as np
from datetime import datetime as dt
from matplotlib import pyplot as plt
from main import Backtest
import random
import itertools
import time
import dateutil
import sys
sys.path.append('raw_data')
raw_data = pd.read_csv('raw_data/npr_history.csv')
data = pd.DataFrame(data=raw_data.iloc[13:, 1:].values, index=pd.to_datetime(raw_data.iloc[13:, 0]),
columns=[i for i in range(raw_data.shape[1] - 1)])
data.columns = ['Purchases of Domestic US', 'Sales of Domestic US', 'Net Domestic US Purchases', 'Net Private',
'Net Private Treasuries', 'Net Private Agency Bonds', 'Net Private Corporate Bonds',
'Net Private Equities', 'Net Official', 'Net Official Treasuries', 'Net Official Agency Bonds',
'Net Official Corporate Bonds', 'Net Official Equities', 'Purchases of Foreign by US',
'Sales of Foreign to US', 'Net Foreign Purchases by US', 'Net Foreign Bonds Purchases by US',
'Net Foreign Equity Purchases by US', 'Net Transactions in Securities', 'Other Acq. of Securities',
'Net Foreign Acq. of Securities', 'Change Foreign Owning Dollar ST Debt', 'Change Foreign Owning Bills',
'Change in Foreign Private Owning Bills', 'Change in Foreign Official Owning Bills',
'Change in Foreign Owning Other', 'Change in Foreign Private Owning Other',
'Change in Foreign Official Owning Other', 'Change in Bank Net Dollar Liabilities', 'Net TIC Flow',
'Net Private TIC Flow', 'Net Official TIC Flow']
data_MA3 = pd.rolling_mean(data, window=3, min_periods=1)
data_MA3.index.name = 'Date'
preds = []
data_names = ['Net Foreign Acq. of Securities', 'Change Foreign Owning Dollar ST Debt',
'Change in Bank Net Dollar Liabilities', 'Net TIC Flow']
x = Backtest()
for i, name in enumerate(data_names):
preds.append(pd.DataFrame(x.get_ssa_prediction(pd.DataFrame(data.ix[24:, name]), M=24), index=data.index))
fig, ax = plt.subplots(2, 2)
for i in range(len(preds)):
if i == 0:
preds[i].plot(ax=ax[0][0])
if i == 1:
preds[i].plot(ax=ax[1][0])
if i == 2:
preds[i].plot(ax=ax[0][1])
if i == 3:
preds[i].plot(ax=ax[1][1])
data_MA3['Net Foreign Acq. of Securities'].plot(ax=ax[0][0])
ax[0][0].set_title('Net Foreign Acq. of Securities')
data_MA3['Change Foreign Owning Dollar ST Debt'].plot(ax=ax[1][0])
ax[1][0].set_title('Change Foreign Owning Dollar ST Debt')
data_MA3['Change in Bank Net Dollar Liabilities'].plot(ax=ax[0][1])
ax[0][1].set_title('Change in Bank Net Dollar Liabilities')
data_MA3['Net TIC Flow'].plot(ax=ax[1][1])
ax[1][1].set_title('Net TIC Flow')
plt.tight_layout()
plt.show() | {
"repo_name": "boneil3/backtest",
"path": "tic.py",
"copies": "1",
"size": "2796",
"license": "mit",
"hash": -4084143796034819000,
"line_mean": 41.3787878788,
"line_max": 120,
"alpha_frac": 0.6552217454,
"autogenerated": false,
"ratio": 3.022702702702703,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41779244481027034,
"avg_score": null,
"num_lines": null
} |
__author__ = 'brendan'
import pandas as pd
import numpy as np
from datetime import datetime as dt
from matplotlib import pyplot as plt
import random
import itertools
import time
import dateutil
import sys
sys.path.append('raw_data')
class Backtest():
def __init__(self):
self.prices = pd.read_csv('raw_data/CCY_2000.csv', index_col=0, parse_dates=True)
def get_ssa_prediction(self, es, M=200):
""" Generate prediction from prices """
# ES OF TYPE DATAFRAME PANDAS
d = 20
bewl = False
N = es.shape[0]
timeStart = time.clock()
if int(N / 2) == N / 2:
es = es[1:]
N -= 1
L = int(N / 2) + 1
K = N - L + 1
# N-K+1 = L
#L = N/2
#N/2 = N-K+1
#K = N/2+1 = L+1
#2K -N - 1= 1
esix = es.iloc
X = pd.DataFrame([esix[i:i + L, 0].tolist() for i in range(K)])
#for i in range(K):
#XTix[i] = esix[i:i+L]
XT = X.T
#H = np.mat(X)*np.mat(XT)
#H = X.dot(X.T)
#sigma,V = scipy.linalg.eigh(H)
U, sigma, V = np.linalg.svd(X, full_matrices=False)
U = pd.DataFrame(U)
V = pd.DataFrame(V)
UT = U.T
#Potential Pairs
#### 0-14 ###
#periodogram analysis
Xtilde = [np.array(sigma[i] * np.outer(U.ix[:, i].values, V.ix[i].values))
for i in range(d)]
##create X1 X2 X3 GROUPED
##possible pairs on eigenvalue magnitude analysis
XX = np.zeros((3, L, L))
XX[0] = np.sum(Xtilde[0:3], axis=0, out=np.zeros((L, L)))
XX[1] = np.sum(Xtilde[3:5], axis=0, out=np.zeros((L, L)))
XX[2] = np.sum(Xtilde[5:], axis=0, out=np.zeros((L, L)))
XXsum0 = [[1.0 / (k + 1) * (np.sum([XX[j, i, k - i] for i in range(k + 1)]))
for k in range(L - 1)] for j in range(3)]
XXsum1 = [[1.0 / L * (np.sum([XX[j, i, k - i] for i in range(L)]))
for k in range(L - 1, K)] for j in range(3)]
XXsum2 = [[1.0 / (N - k) * (np.sum([XX[j, i - 1, k - i + 1] for i in range(k - K + 2, N - K + 2)]))
for k in range(K, N)] for j in range(3)]
#k = L-1 XX[0,L-1] [1,L-2], [
#k = K -> 1/(L-1)*np.sum([XX[1,L-1]
#k=N -> sum(XX[
#N-1-
#N-K=L-1
#K-N-K-1 = 2K-N-1 = 0
#K-1=L
g0 = np.concatenate((XXsum0[0], XXsum1[0], XXsum2[0]))
g1 = np.concatenate((XXsum0[1], XXsum1[1], XXsum2[1]))
g2 = np.concatenate((XXsum0[2], XXsum1[2], XXsum2[2]))
g = g0 + g1 + g2
#k = N-1
#N-K+1,N-K+1
# L-1,L
#K-N+K-1
#2K-N-1 = 2(N-L+1)-N-1 = N - 2L + 1 = 0
gPrime = g
Uiloc = U.iloc
pi = np.zeros(d)
pi = [Uiloc[-1, i] for i in range(d)]
#for i in range(d):
# pi[i] = U.ix[-1,i]
vS = np.linalg.norm(pi, ord=2, axis=0) ** 2
Rp = np.zeros((d, L - 1))
Rp = [pi[i] * (Uiloc[:L - 1, i]) for i in range(d)]
R = 1 / (1 - vS) * np.sum(Rp, axis=0)
R = R[::-1]
#How many predictions? M
g2 = np.zeros(N + M)
g2[:N] = gPrime
if bewl:
fig, ((ax1, ax2, ax3), (ax4, ax5, ax6), (ax7, ax8, ax9)) = plt.subplots(3, 3)
axes = [ax1, ax2, ax3, ax4, ax5, ax6, ax7, ax8, ax9]
for i, ax in enumerate(axes):
ax.plot(U.ix[:, i])
plt.show()
for i in range(N, N + M):
g2[i] = np.sum([R[j] * g2[i - j - 1] for j in range(L - 1)])
print("SSALoop " + str(es.index[-1]) + ": " + str(time.clock() - timeStart) + " sec")
return g2
def generate_exposure(self, sec_num, m=500, w=20):
"""
:return: exposure to calculate PnL
"""
prices = self.prices
N = prices.shape[0]
# m = number of days out of sample, N-m = days in sample
# w = days between predictions
min_delta = .02 # min prediction move to change exposure
max_loss = .0125
max_lev = 2
if not isinstance(prices, pd.DataFrame):
raise AssertionError('prices must be a DataFrame')
# array of dataframes of predictions
oo_sample = range(N - m, N - 1, w)
oo_sample_idx = [prices.index[i] for i in oo_sample]
predictions = [self.get_ssa_prediction(pd.DataFrame(prices.iloc[:i-1, sec_num]), M=m - j * w + 1) for j, i in
enumerate(oo_sample)]
pred_iter = zip(oo_sample_idx, predictions)
pred_dfs = []
for idx, pred in pred_iter:
ret_pred = pd.DataFrame(pred, index=prices.index, columns=[str(idx)])
pred_dfs.append(ret_pred)
exposure = np.zeros(N)
for i, idx in enumerate(prices.index):
if i == 0:
pass
elif idx in oo_sample_idx:
inter_pred = None
for pred in pred_dfs:
if pred.columns == [str(idx)]:
inter_pred = pred
p_data = inter_pred.iloc[i:, 0].values
a = np.diff(np.sign(np.diff(p_data))).nonzero()[0] + 1
pred_0 = inter_pred.iloc[i, 0]
val_0 = prices.iloc[i, sec_num]
if a.size >= 2 and abs((pred_0 - val_0) / val_0) < .01:
extrema_1 = inter_pred.iloc[i + a[0], 0]
extrema_2 = inter_pred.iloc[i + a[1], 0]
if (extrema_1 - pred_0) / pred_0 > min_delta and (extrema_2 - pred_0) / pred_0 > min_delta:
exposure[i] = 1
elif (extrema_1 - pred_0) / pred_0 < -min_delta and (extrema_2 - pred_0) / pred_0 < -min_delta:
exposure[i] = -1
else:
exposure[i] = exposure[i - 1]
else:
exposure[i] = exposure[i - 1]
# Stop Loss Check
# First find last local extremum
elif i > N-m:
# Stop loss
if (exposure[i-1] > 0 and (self.prices.iloc[i-1, sec_num] - val_0) / val_0 < -max_loss) or \
(exposure[i-1] < 0 and (self.prices.iloc[i-1, sec_num] - val_0) / val_0 > max_loss):
exposure[i] = 0
# Lever up
elif exposure[i-1] > 0 and self.prices.iloc[i-1, sec_num] > val_0 and \
exposure[i-1] < max_lev:
exposure[i] = exposure[i-1] + 0.1
elif exposure[i-1] < 0 and self.prices.iloc[i-1, sec_num] < val_0 and \
exposure[i-1] > -max_lev:
exposure[i] = exposure[i-1] - 0.1
else:
exposure[i] = exposure[i - 1]
return exposure
def generate_pnl(self, exposure, sec_num):
delta_price = self.prices.pct_change()
delta_portfolio = pd.DataFrame(delta_price.iloc[:, sec_num].values * exposure, index=self.prices.index)
pnl = pd.DataFrame(data=np.ones_like(exposure), index=self.prices.index, columns=['Value'])
for i, val in enumerate(delta_portfolio.iloc[:, 0].values):
if i == 0:
pass
else:
pnl.iloc[i] = pnl.iloc[i - 1] * (1.0 + val)
return pnl
def run(self, security, start_idx=0, end_idx=-1, len_sample=200, delta_sample=20, plot_bool=True):
if not isinstance(start_idx, int):
raise TypeError('Start index not int')
if not isinstance(len_sample, int):
raise TypeError('Length of sample not int')
if not isinstance(delta_sample, int):
raise TypeError('Length of sample not int')
if not isinstance(plot_bool, bool):
raise TypeError('Length of sample not bool')
if not isinstance(security, str) and not isinstance(security, int) and not isinstance(security, list):
raise TypeError('Security must be string or int or list of string/ints')
if end_idx - start_idx > self.prices.shape[0] or (end_idx - start_idx < 0 and end_idx > 0):
raise ValueError('Start and end indices do not work')
elif start_idx != 0 and end_idx != -1:
self.prices = self.prices.iloc[start_idx:end_idx]
elif end_idx != -1:
self.prices = self.prices.iloc[:end_idx]
elif start_idx != 0:
self.prices = self.prices.iloc[start_idx:]
sec_list = []
if isinstance(security, str):
if security.lower() == 'all':
sec_list = self.prices.columns
pnls = []
for s in range(len(self.prices.columns)):
exp = self.generate_exposure(s, m=len_sample, w=delta_sample)
pnl = self.generate_pnl(exp, s)
pnls.append(pnl)
else:
sec = self.prices.columns.index(security)
exp = self.generate_exposure(sec, m=len_sample, w=delta_sample)
pnls = [self.generate_pnl(exp, sec)]
sec_list = security
elif isinstance(security, list):
pnls = []
for s in security:
if isinstance(s, str):
sec = self.prices.columns.index(s)
else:
sec = s
sec_list.append(self.prices.columns[sec])
exp = self.generate_exposure(sec, m=len_sample, w=delta_sample)
pnl = self.generate_pnl(exp, sec)
pnls.append(pnl)
else:
exp = self.generate_exposure(security, m=len_sample, w=delta_sample)
pnls = [self.generate_pnl(exp, security)]
sec_list = [self.prices.columns[security]]
if plot_bool:
total_pnl = pnls[0]
fig, ax = plt.subplots()
for i, p in enumerate(pnls):
p.columns = [sec_list[i]]
p.plot(ax=ax)
p.columns = ['Value' for i in range(len(p.columns))]
if i > 0:
total_pnl = total_pnl + p
total_pnl = total_pnl / float(len(pnls))
fig2, ax2 = plt.subplots()
total_pnl.plot(ax=ax2)
plt.show()
#x = Backtest()
#x.run(0, start_idx=1000, len_sample=500, delta_sample=10)
| {
"repo_name": "boneil3/backtest",
"path": "main.py",
"copies": "1",
"size": "10391",
"license": "mit",
"hash": 8060628764924463000,
"line_mean": 34.7079037801,
"line_max": 117,
"alpha_frac": 0.4849388894,
"autogenerated": false,
"ratio": 3.1893799877225293,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4174318877122529,
"avg_score": null,
"num_lines": null
} |
__author__ = "Brendan O'Connor (anyall.org, brenocon@gmail.com)"
#### Modified by Satish Palaniappan
### Insert Current Path
import os, sys, inspect
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
import re,sys
mycompile = lambda pat: re.compile(pat, re.UNICODE)
#SMILEY = mycompile(r'[:=].{0,1}[\)dpD]')
#MULTITOK_SMILEY = mycompile(r' : [\)dp]')
NormalEyes = r'[:=]'
Wink = r'[;]'
NoseArea = r'(|o|O|-)' ## rather tight precision, \S might be reasonable...
HappyMouths = r'[D\)\]d]'
SadMouths = r'[\(\[]'
Tongue = r'[pP]'
OtherMouths = r'[oO/\\]' # remove forward slash if http://'s aren't cleaned
Happy_RE = mycompile( '(\^_\^|' + NormalEyes + NoseArea + HappyMouths + ')')
Sad_RE = mycompile(NormalEyes + NoseArea + SadMouths)
Wink_RE = mycompile(Wink + NoseArea + HappyMouths)
Tongue_RE = mycompile(NormalEyes + NoseArea + Tongue)
Other_RE = mycompile( '('+NormalEyes+'|'+Wink+')' + NoseArea + OtherMouths )
Emoticon = (
"("+NormalEyes+"|"+Wink+")" +
NoseArea +
"("+Tongue+"|"+OtherMouths+"|"+SadMouths+"|"+HappyMouths+")"
)
Emoticon_RE = mycompile(Emoticon)
#Emoticon_RE = "|".join([Happy_RE,Sad_RE,Wink_RE,Tongue_RE,Other_RE])
#Emoticon_RE = mycompile(Emoticon_RE)
def analyze_tweet(text):
h= Happy_RE.search(text)
s= Sad_RE.search(text)
if h and s: return "neutral"
if h: return "happy"
if s: return "sad"
return "nill"
# more complex & harder
def analyze_tweetHeavy(text):
h= Happy_RE.search(text)
s= Sad_RE.search(text)
w= Wink_RE.search(text)
t= Tongue_RE.search(text)
a= Other_RE.search(text)
h,w,s,t,a = [bool(x) for x in [h,w,s,t,a]]
if sum([h,w,s,t,a])>1: return "neutral"
if sum([h,w,s,t,a])==1:
if h: return "happy"
if s: return "sad"
if w: return "happy"
if a: return "other"
if t: return "tongue"
return "nill"
| {
"repo_name": "tpsatish95/SocialTextFilter",
"path": "Twokenize/emoticons.py",
"copies": "1",
"size": "1928",
"license": "apache-2.0",
"hash": 489043959897261500,
"line_mean": 28.2121212121,
"line_max": 105,
"alpha_frac": 0.6415975104,
"autogenerated": false,
"ratio": 2.342648845686513,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.828971255162964,
"avg_score": 0.038906760891374466,
"num_lines": 66
} |
__author__ = "Brendan O'Connor (anyall.org, brenocon@gmail.com)"
'''
Copyright 2015 Serendio Inc.
Modified By - Satish Palaniappan
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
'''
### Insert Current Path
import os, sys, inspect
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
import re,sys
mycompile = lambda pat: re.compile(pat, re.UNICODE)
#SMILEY = mycompile(r'[:=].{0,1}[\)dpD]')
#MULTITOK_SMILEY = mycompile(r' : [\)dp]')
NormalEyes = r'[:=]'
Wink = r'[;]'
NoseArea = r'(|o|O|-)' ## rather tight precision, \S might be reasonable...
HappyMouths = r'[D\)\]d]'
SadMouths = r'[\(\[]'
Tongue = r'[pP]'
OtherMouths = r'[oO/\\]' # remove forward slash if http://'s aren't cleaned
Happy_RE = mycompile( '(\^_\^|' + NormalEyes + NoseArea + HappyMouths + ')')
Sad_RE = mycompile(NormalEyes + NoseArea + SadMouths)
Wink_RE = mycompile(Wink + NoseArea + HappyMouths)
Tongue_RE = mycompile(NormalEyes + NoseArea + Tongue)
Other_RE = mycompile( '('+NormalEyes+'|'+Wink+')' + NoseArea + OtherMouths )
Emoticon = (
"("+NormalEyes+"|"+Wink+")" +
NoseArea +
"("+Tongue+"|"+OtherMouths+"|"+SadMouths+"|"+HappyMouths+")"
)
Emoticon_RE = mycompile(Emoticon)
#Emoticon_RE = "|".join([Happy_RE,Sad_RE,Wink_RE,Tongue_RE,Other_RE])
#Emoticon_RE = mycompile(Emoticon_RE)
def analyze_tweet(text):
h= Happy_RE.search(text)
s= Sad_RE.search(text)
if h and s: return "neutral"
if h: return "happy"
if s: return "sad"
return "nill"
# more complex & harder
def analyze_tweetHeavy(text):
h= Happy_RE.search(text)
s= Sad_RE.search(text)
w= Wink_RE.search(text)
t= Tongue_RE.search(text)
a= Other_RE.search(text)
h,w,s,t,a = [bool(x) for x in [h,w,s,t,a]]
if sum([h,w,s,t,a])>1: return "neutral"
if sum([h,w,s,t,a])==1:
if h: return "happy"
if s: return "sad"
if w: return "happy"
if a: return "other"
if t: return "tongue"
return "nill"
| {
"repo_name": "tpsatish95/Python-Workshop",
"path": "Python Scripts/social-text-parser/Twokenize/emoticons.py",
"copies": "2",
"size": "2484",
"license": "apache-2.0",
"hash": -4516260468148241000,
"line_mean": 30.8461538462,
"line_max": 168,
"alpha_frac": 0.6751207729,
"autogenerated": false,
"ratio": 2.6174920969441517,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4292612869844152,
"avg_score": null,
"num_lines": null
} |
__author__ = "Brett Bowman"
"""
import ConsensusCore as cc
from BarcodeAnalysis.utils import (arrayFromDataset,
asFloatFeature,
QUIVER_FEATURES)
class ConsensusCoreRead(object):
def __init__(self, bax, holeNum, start, end, chemistry):
self._bax = bax
self._holeNum = holeNum
self._offset = self._getOffset()
self._start = start
self._end = end
self._chemistry = chemistry
self._sequence = self._getSequence()
self._qvs = self._getQvFeatures()
self._sequenceFeatures = self._getSequenceFeatures()
self._read = cc.Read(self._sequenceFeatures, self.name, self.chemistry)
def _getOffset(self):
return self._bax._offsetsByHole[self._holeNum][0]
def _getSequenceFeature( self, feature ):
return arrayFromDataset(self._bax._basecallsGroup[feature],
self.absStart, self.absEnd)
def _getQvFeatures(self):
qvs = {}
for feature in QUIVER_FEATURES:
qvs[feature] = self._getSequenceFeature( feature )
return qvs
def _getSequence(self):
return self._getSequenceFeature( "Basecall" ).tostring()
def _getSequenceFeatures(self):
features = [self.sequence]
for feature in QUIVER_FEATURES:
features.append( asFloatFeature( self._qvs[feature] ) )
return cc.QvSequenceFeatures(*features)
@property
def movieName(self):
return self._bax.movieName
@property
def absStart(self):
return self._offset + self._start
@property
def absEnd(self):
return self._offset + self._end
@property
def name(self):
return "{0}/{1}/{2}_{3}".format(self.movieName, self._holeNum, self._start, self._end)
@property
def sequence(self):
return self._sequence
@property
def chemistry(self):
return self._chemistry
@property
def read(self):
return self._read
def _to_csv_segment(self, pos):
segment = ',' + self._sequence[pos]
for feature in QUIVER_FEATURES:
segment += ',' + str(self._qvs[feature][pos])
return segment
def __len__(self):
return len(self.sequence)
@property
def to_csv(self):
line = self.name
for i in range(len(self._sequence)):
line += self._to_csv_segment(i)
return line
def _makeSequenceFeatures( self, holeNum, start, end ):
absStart = zmwOffsetStart + start
absEnd = zmwOffsetStart + end
# Initialize the Seq features with the raw sequence
sequenceFeatures = [arrayFromDataset(bax._basecallsGroup["Basecall"],
absStart, absEnd).tostring()]
# Add each feature from the required feature set
for feature in REQ_FEATURES:
arrayFeature = arrayFromDataset(bax._basecallsGroup[feature], absStart, absEnd)
floatFeature = asFloatFeature( arrayFeature )
sequenceFeatures.append( floatFeature )
"""
| {
"repo_name": "bnbowman/BarcodeAnalysis",
"path": "BarcodeAnalysis/ConsensusCoreRead.py",
"copies": "1",
"size": "3102",
"license": "mit",
"hash": 6517154028440666000,
"line_mean": 32.3548387097,
"line_max": 94,
"alpha_frac": 0.6009026435,
"autogenerated": false,
"ratio": 4.018134715025907,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00023006323663295685,
"num_lines": 93
} |
__author__ = 'brett'
from monopyly import *
from monopyly.utility import Logger
from monopyly.game.board import Board
from monopyly.squares.property import Property
from monopyly.squares.property_set import PropertySet
from monopyly.squares.station import Station
from monopyly.squares.street import Street
from monopyly.squares.utility import Utility
import random
import operator
DONT_BID_ON_AUCTIONS_WITH_MORTAGED_PROPS = True
DONT_BID_ON_AUCTIONS_WITH_CASH_LT_HWM = True
DONT_BID_ON_AUCTIONS_WITH_CASH_LT_RESERVE = True
DONT_MORTGAGE_ANY_PROPERTIES = False
PROPOSE_DEALS = True
#PROBABILITY_SQUARE_LANDING_FACTOR = 0.14
PROBABILITY_SQUARE_LANDING_FACTOR_MAX = 0.05
PROBABILITY_SQUARE_LANDING_FACTOR_MIN = 0.03
CASH_RESERVE_FRACTION_SELL_TRIGGER=0.4
PROBS_TO_12 = [
0.0,
0.0,
1.0 / 36.0, # 2
2.0 / 36.0, # 3
3.0 / 36.0, # 4
4.0 / 36.0, # 5
5.0 / 36.0, # 6
6.0 / 36.0, # 7
5.0 / 36.0, # 8
4.0 / 36.0, # 9
3.0 / 36.0, # 10
2.0 / 36.0, # 11
1.0 / 36.0, # 12
]
HOUSE_PROP_SET_SELL_ORDER = [
PropertySet.BROWN,
PropertySet.DARK_BLUE,
PropertySet.LIGHT_BLUE,
PropertySet.PURPLE,
PropertySet.GREEN,
PropertySet.YELLOW,
PropertySet.ORANGE,
PropertySet.RED,
]
BEHAVIOUR_NONE = 0
BEHAVIOUR_SELL_PROPERTY = 1
class PropertyRequestInfo:
def __init__(self, prop, player):
self.property = prop
self.player = player
self.num_times = 0
self.last_turn = 0
def update_for_turn(self, turn_num):
if self.last_turn < turn_num:
self.num_times += 1
self.last_turn = turn_num
class DealProposalInfo:
def __init__(self, prop, price):
self.property = prop
self.price = price
class Buffy(PlayerAIBase):
def __init__(self):
'''
The 'constructor'.
'''
self.cash_reserve = 0
self.high_water_mark = 0
self.num_turns = 0
self.num_get_out_of_jail_cards = 0
self.propose_deal_turn_min = 10
self.propose_deal_turn_max = 500
self.sum_life_of_bot = 500
self.num_games_played = 1
self.behaviour_for_turn = BEHAVIOUR_NONE
def get_name(self):
'''
Returns the name shown for this AI.
'''
return "Buffy"
def start_of_game(self):
'''
Called at the start of the game.
No response is required.
'''
Logger.log("# Start of Game.", Logger.INFO)
self.num_turns = 0
self.num_get_out_of_jail_cards = 0
self.amount_to_raise = 0.0
self.mortgaged_properties = []
self.properties_requested = {}
return
def start_of_turn(self, game_state, player):
'''
Called when an AI's turn starts. All AIs receive this notification.
No response is required.
'''
if player.ai != self:
return
self.num_turns += 1
self.turn_properties_in_deal = set()
self.deals_proposed_this_turn = []
self.cash_spent_in_turn = 0
self.behaviour_for_turn = BEHAVIOUR_NONE
self.deal_proposals_for_turn = [] # used if behaviour is to sell properties
self.propose_deal_turn_num = 0
( self.cash_reserve, self.high_water_mark) = self._calc_cash_reserve(game_state, player)
if self.cash_reserve > player.state.cash:
Logger.log("# {0}: SOT {1} - cash_reserve = {2}, HWM = {3}, Cash = {4}.".format(self.get_name(), self.num_turns, self.cash_reserve, self.high_water_mark, player.state.cash), Logger.INFO)
if int(self.cash_reserve * CASH_RESERVE_FRACTION_SELL_TRIGGER) >= player.state.cash:
self.behaviour_for_turn = BEHAVIOUR_SELL_PROPERTY
self.amount_to_raise = 0.0
Logger.log("# Start of Turn {0} - cash_reserve = {1}, HWM = {2}.".format(self.num_turns, self.cash_reserve, self.high_water_mark), Logger.INFO)
return
def player_landed_on_square(self, game_state, square, player):
'''
Called when a player lands on a square. All AIs receive this notification.
No response is required.
'''
if player.ai != self:
return
Logger.log("# Landed on Square {0}".format(square), Logger.INFO)
return
def landed_on_unowned_property(self, game_state, player, property):
'''
price the property / evaluate the risks of buying
'''
ret = PlayerAIBase.Action.DO_NOT_BUY
act = "not buying"
if player.state.cash > (self.cash_reserve + property.price):
ret = PlayerAIBase.Action.BUY
act = "buying"
self.cash_spent_in_turn += property.price
Logger.log("# {0}: Turn {1}, landed on unowned property and buying for {2}, cash = {3}".format(self.get_name(), self.num_turns, property.price, player.state.cash), Logger.INFO)
Logger.log("# Landed on unowned property and {0}".format(act), Logger.INFO)
return ret
def money_will_be_taken(self, player, amount):
'''
Called shortly before money will be taken from the player.
Before the money is taken, there will be an opportunity to
make deals and/or mortgage properties. (This will be done via
subsequent callbacks.)
No response is required.
if amount > player.state.cash:
sell stuff
'''
if amount >= player.state.cash:
self.amount_to_raise = amount - player.state.cash + 1
Logger.log("# {0}: Turn {1} - Money will be taken and amount to raise = {2}".format(self.get_name(), self.num_turns, self.amount_to_raise), Logger.INFO)
if self.amount_to_raise >= 150:
self.behaviour_for_turn = BEHAVIOUR_SELL_PROPERTY
return
def money_taken(self, player, amount):
'''
Called when money has been taken from the player.
No response is required.
'''
if player.name != self.get_name():
return
Logger.log("# Money taken : {0}".format(amount), Logger.INFO)
return
def money_given(self, player, amount):
'''
Called when money has been given to the player.
No response is required.
'''
if player.name != self.get_name():
return
if self.amount_to_raise > 0:
self.amount_to_raise -= amount
if self.amount_to_raise < 0:
self.amount_to_raise = 0
Logger.log("# {0}: Money given {1}, amount_to_raise: {2}".format(self.get_name(), amount, self.amount_to_raise), Logger.INFO)
return
def got_get_out_of_jail_free_card(self):
'''
Called when the player has picked up a
Get Out Of Jail Free card.
No response is required.
TODO: increment get_out_of_jail counter
'''
self.num_get_out_of_jail_cards += 1
return
def pay_ten_pounds_or_take_a_chance(self, game_state, player):
'''
Called when the player picks up the "Pay a £10 fine or take a Chance" card.
Return either:
PlayerAIBase.Action.PAY_TEN_POUND_FINE
or
PlayerAIBase.Action.TAKE_A_CHANCE
'''
if self.amount_to_raise > 0:
# If we are selling stuff, take a chance!
# or if we have lots of cash
return PlayerAIBase.Action.TAKE_A_CHANCE
if player.state.cash > self.high_water_mark + 500:
return PlayerAIBase.Action.TAKE_A_CHANCE
return PlayerAIBase.Action.PAY_TEN_POUND_FINE
def property_offered_for_auction(self, game_state, player, property):
'''
Called when a property is put up for auction.
Properties are auctioned when a player lands on an unowned square but does
not want to buy it. All players take part in the auction, including the
player who landed on the square.
The property will be sold to the highest bidder using the eBay rule,
ie, for £1 more than the second-highest bid.
Return the amount you bid. To put in a bid this must be a positive integer.
Zero means that you are not bidding (it does not mean that you are bidding
zero).
The default behaviour is not to bid.
'''
if player.ai is not self:
Logger.log("# !!! ERROR player is NOT me in property_offered_for_auction")
if self.amount_to_raise > 0.0:
if player.state.cash > (property.price / 2):
return int((property.price / 2) + 1)
elif player.state.cash > 0:
return player.state.cash - 1
return 0
if len(self.mortgaged_properties) > 0 and DONT_BID_ON_AUCTIONS_WITH_MORTAGED_PROPS:
if player.state.cash > (property.price / 2):
return int((property.price / 2) + 1)
elif player.state.cash > 0:
return player.state.cash - 1
return 0
if player.state.cash < self.cash_reserve and DONT_BID_ON_AUCTIONS_WITH_CASH_LT_RESERVE:
if player.state.cash > (property.price / 2):
return int((property.price / 2) + 1)
return player.state.cash - 1
if player.state.cash < self.high_water_mark and DONT_BID_ON_AUCTIONS_WITH_CASH_LT_HWM:
if player.state.cash > (property.price / 2):
return int((property.price / 2) + 1)
return player.state.cash - 1
price_to_bid = 0.0
(bid_price, ask_price) = self._calc_value_of_properties(game_state, player, [ property ])
if player.state.cash > bid_price + self.cash_reserve:
price_to_bid = bid_price
Logger.log("# {0}: Property being auctioned and bidding {1}".format(self.get_name(), price_to_bid), Logger.INFO)
return price_to_bid
def auction_result(self, status, property, player, amount_paid):
'''
Called with the result of an auction. All players receive
this notification.
status is either AUCTION_SUCCEEDED or AUCTION_FAILED.
If the auction succeeded, the property, the player who won
the auction and the amount they paid are passed to the AI.
If the auction failed, the player will be None and the
amount paid will be 0.
No response is required.
'''
if player is None:
return
if player.name != self.get_name():
return
if status == PlayerAIBase.Action.AUCTION_SUCCEEDED:
Logger.log("# {0}: Property {1} won at auction".format(self.get_name(), property), Logger.INFO)
else:
Logger.log("# Property lost at auction", Logger.INFO)
return
def build_houses(self, game_state, player):
'''
Called near the start of the player's turn to give the option of building houses.
Return a list of tuples indicating which properties you want to build houses
on and how many houses to build on each. For example:
[(park_lane, 3), (mayfair, 4)]
The properties should be Property objects.
Return an empty list if you do not want to build.
Notes:
- You must own a whole set of unmortgaged properties before you can
build houses on it.
- You can build on multiple sets in one turn. Just specify all the streets
and houses you want to build.
- Build five houses on a property to have a "hotel".
- You specify the _additional_ houses you will be building, not the
total after building. For example, if Park Lane already has 3 houses
and you specify (park_lane, 2) you will end up with 5
houses (ie, a hotel).
- Sets must end up with 'balanced' housing. No square in a set can
have more than one more house than any other. If you request an
unbalanced build, the whole transaction will be rolled back, even
if it includes balanced building on other sets as well.
- If you do not have (or cannot raise) enough money to build all the
houses specified, the whole transaction will be rolled back. Between
this function call and money being taken, you will have an opportunity
to mortgage properties or make deals.
The default behaviour is not to build.
'''
if self.amount_to_raise > 0.0:
return []
if len(self.mortgaged_properties) > 0:
return []
if player.state.cash < self.high_water_mark:
return []
# We find the first set we own that we can build on...
houses_to_build = []
for owned_set in player.state.owned_unmortgaged_sets:
# We can't build on stations or utilities, or if the
# set already has hotels on all the properties...
if not owned_set.can_build_houses:
continue
# We see how much money we need for one house on each property...
cost = owned_set.house_price * owned_set.number_of_properties
if player.state.cash > (self.cash_reserve + cost):
# We build one house on each property...
houses_to_build = [(p, 1) for p in owned_set.properties]
break
if len(houses_to_build) > 0:
Logger.log("# {0}: Building the following houses: {1}".format(self.get_name(), str(houses_to_build)), Logger.INFO)
return houses_to_build
def sell_houses(self, game_state, player):
'''
Gives the player the option to sell properties.
This is called when any debt, fine or rent has to be paid. It is
called just before mortgage_properties (below).
Notes:
- You cannot mortgage properties with houses on them, so if you
plan to mortgage, make sure you sell all the houses first.
- For each house sold you receive half the price that they were
bought for.
- Houses on a set must end up 'balanced', ie no property can have
more than one more house than any other property in the set.
Return a list of tuples of the streets and number of houses you
want to sell. For example:
[(old_kent_road, 1), (bow_street, 1)]
The streets should be Property objects.
The default is not to sell any houses.
'''
houses_to_sell = []
if self.amount_to_raise > 0.0:
money_generated = 0
for prop_set in HOUSE_PROP_SET_SELL_ORDER:
(num_houses, owned_prop_list) = self._get_owned_houses_in_property_set(game_state, player, prop_set)
num_house_list = [ 0 for p in owned_prop_list ]
houses_sold = 0
while num_houses < houses_sold and money_generated < self.amount_to_raise:
for i in range(0, len(owned_prop_list)):
if num_house_list[i] < owned_prop_list[i].number_of_houses:
num_house_list[i] += 1
houses_sold += 1
money_generated += int(owned_prop_list[i].house_price / 2)
if houses_sold > 0:
for i in range(0, len(owned_prop_list)):
if num_house_list[i] > 0:
houses_to_sell.append( (owned_prop_list[i], num_house_list[i], ))
# update amount_to_raise
self.amount_to_raise -= money_generated
if self.amount_to_raise < 0:
self.amount_to_raise = 0.0
if len(houses_to_sell) > 0:
Logger.log("# {0}: Selling the following houses: {1}".format(self.get_name(), str(houses_to_sell)), Logger.INFO)
return houses_to_sell
def _get_owned_houses_in_property_set(self, game_state, player, set_enum):
board = game_state.board
props = board.get_properties_for_set(set_enum)
owned_prop_list = []
num_houses = 0
for p in props:
if p.owner == player and p.number_of_houses > 0:
owned_prop_list.append(p)
num_houses += p.number_of_houses
return (num_houses, owned_prop_list)
def mortgage_properties(self, game_state, player):
'''
Gives the player an option to mortgage properties.
This is called before any debt is paid (house building, rent,
tax, fines from cards etc).
Notes:
- You receive half the face value of each property mortgaged.
- You cannot mortgage properties with houses on them.
(The AI will have been given the option to sell houses before this
function is called.)
Return a list of properties to mortgage, for example:
[bow_street, liverpool_street_station]
The properties should be Property objects.
Return an empty list if you do not want to mortgage anything.
The default behaviour is not to mortgage anything.
'''
if DONT_MORTGAGE_ANY_PROPERTIES:
return []
if self.behaviour_for_turn == BEHAVIOUR_SELL_PROPERTY:
Logger.log("{0}: Behaviour is to sell properties - Not Mortgaging!".format(self.get_name()), Logger.INFO)
return []
properties_to_mortage = []
if self.amount_to_raise > 0.0:
money_generated = 0
board = game_state.board
for sq in board.squares:
if isinstance(sq, Property) and sq.owner == player and sq.is_mortgaged == False:
money_generated += int(sq.price / 2)
properties_to_mortage.append(sq)
if money_generated > self.amount_to_raise:
break
self.amount_to_raise -= money_generated
if self.amount_to_raise < 0.0:
self.amount_to_raise = 0.0
if len(properties_to_mortage) > 0:
Logger.log("# {0}: Mortgaging the following properties: {1}".format(self.get_name(), str(properties_to_mortage)), Logger.INFO)
self.mortgaged_properties.extend(properties_to_mortage)
return properties_to_mortage
def unmortgage_properties(self, game_state, player):
'''
Called near the start of the player's turn to give them the
opportunity to unmortgage properties.
Unmortgaging costs half the face value plus 10%. Between deciding
to unmortgage and money being taken the player will be given the
opportunity to make deals or sell other properties. If after this
they do not have enough money, the whole transaction will be aborted,
and no properties will be unmortgaged and no money taken.
Return a list of property names to unmortgage, like:
[old_kent_road, bow_street]
The properties should be Property objects.
The default is to return an empty list, ie to do nothing.
'''
props_to_unmortgage = []
if len(self.mortgaged_properties) > 0:
cash_to_spend = player.state.cash - self.cash_reserve
mortgaged = sorted(self.mortgaged_properties, key = lambda p: p.price)
while cash_to_spend > 0.0 and len(mortgaged) > 0:
mc = int(mortgaged[0].price * 0.5)
cash_to_spend -= mc
if cash_to_spend >= 0.0:
props_to_unmortgage.append(mortgaged[0])
mortgaged.pop(0)
if len(props_to_unmortgage) > 0:
Logger.log("# {0}: Unmortgaging: {1}".format(self.get_name(), str(props_to_unmortgage)), Logger.INFO)
for i in range(0, len(props_to_unmortgage)):
self.mortgaged_properties.remove(props_to_unmortgage[i])
return props_to_unmortgage
def get_out_of_jail(self, game_state, player):
'''
Called in the player's turn, before the dice are rolled, if the player
is in jail.
There are three possible return values:
PlayerAIBase.Action.BUY_WAY_OUT_OF_JAIL
PlayerAIBase.Action.PLAY_GET_OUT_OF_JAIL_FREE_CARD
PlayerAIBase.Action.STAY_IN_JAIL
Buying your way out of jail will cost £50.
The default action is STAY_IN_JAIL.
'''
if self._count_unowned_property(game_state) >= 8 and player.state.cash > self.cash_reserve:
if self.num_get_out_of_jail_cards > 0:
self.num_get_out_of_jail_cards -= 1
return PlayerAIBase.Action.PLAY_GET_OUT_OF_JAIL_FREE_CARD
elif player.state.cash > self.high_water_mark:
return PlayerAIBase.Action.BUY_WAY_OUT_OF_JAIL
return PlayerAIBase.Action.STAY_IN_JAIL
def _does_player_own_set(self, player, property):
for owner_tuple in property.property_set.owners:
if owner_tuple[0] == player and owner_tuple[2] >= 0.99:
return True
return False
def _get_deal_proposals_for_property(self, game_state, player, property, propose_to_player):
min_cash_wanted = self.amount_to_raise
(bid_price, ask_price) = self._calc_value_of_properties(game_state, player, [ property ])
max_cash_wanted = ask_price
mid_cash_wanted = int((max_cash_wanted + min_cash_wanted) / 2.0)
deal_proposal_max = DealProposal(
properties_offered=[property],
minimum_cash_wanted=max_cash_wanted,
propose_to_player=propose_to_player
)
deal_proposal_mid = DealProposal(
properties_offered=[property],
minimum_cash_wanted=mid_cash_wanted,
propose_to_player=propose_to_player
)
deal_proposal_min = DealProposal(
properties_offered=[property],
minimum_cash_wanted=min_cash_wanted,
propose_to_player=propose_to_player
)
return ( deal_proposal_max, deal_proposal_mid, deal_proposal_min )
def propose_deal(self, game_state, player):
'''
Called to allow the player to propose a deal.
You return a DealProposal object.
If you do not want to make a deal, return None.
If you want to make a deal, you provide this information:
- The player number of the player you are proposing the deal to
- A list of properties offered
- A list of properties wanted
- Maximum cash offered as part of the deal
- Minimum cash wanted as part of the deal.
Properties offered and properties wanted are passed as lists of
Property objects.
If you offer money as part of the deal, set the cash wanted to zero
and vice versa.
Note that the cash limits will not be shown to the proposed-to player.
When the deal is offered to them, they set their own limits for accepting
the deal without seeing your limits. If the limits are acceptable to both
players, the deal will be done at the halfway point.
For example, Player1 proposes:
Propose to: Player2
Properties offered: Mayfair
Properties wanted: (none)
Maximum cash offered: 0
Minimum cash wanted: 500
Player2 accepts with these limits:
Maximum cash offered: 1000
Minimum cash wanted: 0
The deal will be done with Player2 receiving Mayfair and paying £750
to Player1.
The only 'negotiation' is in the managing of cash along with the deal
as discussed above. There is no negotiation about which properties are
part of the deal. If a deal is rejected because it does not contain the
right properties, another deal can be made at another time with different
lists of properties.
Example construction and return of a DealProposal object:
return DealProposal(
propose_to_player_number=2,
properties_offered=[vine_street, bow_street],
properties_wanted=[park_lane],
maximum_cash_offered=200)
The default is for no deal to be proposed.
'''
if not PROPOSE_DEALS:
return None
if self.num_turns < self.propose_deal_turn_min or self.num_turns > self.propose_deal_turn_max:
return None
if self.behaviour_for_turn == BEHAVIOUR_SELL_PROPERTY:
self.propose_deal_turn_num += 1
if self.propose_deal_turn_num == 1:
self.deal_proposals_for_turn = []
props_to_sell = []
prop_sell_order = sorted(self.properties_requested.values(), key=operator.attrgetter('num_times'))
for prop_info in prop_sell_order:
if not prop_info.property.is_mortgaged and prop_info.property.owner is player:
# if we own the set, put it at the back
if self._does_player_own_set(player, prop_info.property):
props_to_sell.append(prop_info)
else:
props_to_sell.insert(0, prop_info)
# if we have no properties to sell, switch back to mortgage mode
if len(props_to_sell) == 0:
self.behaviour_for_turn = BEHAVIOUR_NONE
return None
if len(props_to_sell) == 1:
# generate 3 different deal proposals
# from offer price to amount_needed
prop_info = props_to_sell[0]
( deal_proposal_max, deal_proposal_mid, deal_proposal_min ) = \
self._get_deal_proposals_for_property(game_state, player, prop_info.property, prop_info.player)
self.deal_proposals_for_turn.append(deal_proposal_max)
self.deal_proposals_for_turn.append(deal_proposal_mid)
self.deal_proposals_for_turn.append(deal_proposal_min)
elif len(props_to_sell) == 2:
prop_info1 = props_to_sell[0]
prop_info2 = props_to_sell[1]
( deal_proposal1_max, deal_proposal1_mid, deal_proposal1_min ) = \
self._get_deal_proposals_for_property(game_state, player, prop_info1.property, prop_info1.player)
( deal_proposal2_max, deal_proposal2_mid, deal_proposal2_min ) = \
self._get_deal_proposals_for_property(game_state, player, prop_info2.property, prop_info2.player)
self.deal_proposals_for_turn.append(deal_proposal1_max)
self.deal_proposals_for_turn.append(deal_proposal1_mid)
self.deal_proposals_for_turn.append(deal_proposal2_mid)
elif len(props_to_sell) >= 2:
prop_info1 = props_to_sell[0]
prop_info2 = props_to_sell[1]
prop_info3 = props_to_sell[2]
( deal_proposal1_max, deal_proposal1_mid, deal_proposal1_min ) = \
self._get_deal_proposals_for_property(game_state, player, prop_info1.property, prop_info1.player)
( deal_proposal2_max, deal_proposal2_mid, deal_proposal2_min ) = \
self._get_deal_proposals_for_property(game_state, player, prop_info2.property, prop_info2.player)
( deal_proposal3_max, deal_proposal3_mid, deal_proposal3_min ) = \
self._get_deal_proposals_for_property(game_state, player, prop_info3.property, prop_info3.player)
self.deal_proposals_for_turn.append(deal_proposal1_mid)
self.deal_proposals_for_turn.append(deal_proposal2_mid)
self.deal_proposals_for_turn.append(deal_proposal3_mid)
if len(self.deal_proposals_for_turn) > 0:
deal_proposal = self.deal_proposals_for_turn.pop(0)
if len(self.deal_proposals_for_turn) == 0:
self.behaviour_for_turn = BEHAVIOUR_NONE
Logger.log("# {0}: Turn {1} - Selling property {2} for {3}".format(self.get_name(), self.num_turns, deal_proposal.properties_offered[0].name, deal_proposal.minimum_cash_wanted), Logger.INFO)
return deal_proposal
return None
# TODO Potentially sell stuff if we need to
if self.amount_to_raise > 0.0:
return None
if len(self.mortgaged_properties) > 0:
return None
properties_we_like = []
if len(player.state.properties) == 0:
# hell, we'll bid on anything!
board = game_state.board
for sq in board.squares:
if isinstance(sq, Property) and sq.owner is not None and sq.owner != player:
properties_we_like.append(sq.name)
else:
# OK, pick out some good properties to bid on
properties_we_like = set()
for p in player.state.properties:
properties_we_like.update(p.property_set.properties)
properties_we_like = [ p.name for p in properties_we_like ]
Logger.log("# Propose deal called!", Logger.INFO)
deal_proposal = DealProposal()
random.shuffle(properties_we_like)
# We check to see if any of the properties we like is owned
# by another player...
for property_name in properties_we_like:
property = game_state.board.get_square_by_name(property_name)
if (property.owner is player or property.owner is None):
# The property is either not owned, or owned by us...
continue
if property_name in self.turn_properties_in_deal:
continue
# The property is owned by another player, so we make them an
# offer for it...
(bid_price, ask_price) = self._calc_value_of_properties(game_state, player, [ property ])
price_offered = bid_price
if player.state.cash > price_offered + self.high_water_mark:
self.turn_properties_in_deal.add(property_name)
return DealProposal(
properties_wanted=[property],
maximum_cash_offered=price_offered,
propose_to_player=property.owner)
return None
def deal_proposed(self, game_state, player, deal_proposal):
'''
Called when a deal is proposed by another player.
'''
if len(deal_proposal.properties_wanted) > 0:
for p in deal_proposal.properties_wanted:
prop_info = None
if p.name in self.properties_requested:
prop_info = self.properties_requested[p.name]
else:
prop_info = PropertyRequestInfo(p, deal_proposal.proposed_by_player)
self.properties_requested[p.name] = prop_info
prop_info.update_for_turn(self.num_turns)
#Logger.log("##### DEAL_PROPOSED: player {0}".format(deal_proposal.proposed_by_player.name), Logger.INFO)
if len(deal_proposal.properties_wanted) > 0 and len(deal_proposal.properties_offered) == 0:
(bid_price, ask_price) = self._calc_value_of_properties(game_state, player, deal_proposal.properties_wanted)
if ask_price < self.amount_to_raise:
ask_price = self.amount_to_raise
Logger.log("# Accepted proposed deal of wanted properties {0} for {1}".format(str(deal_proposal.properties_wanted), ask_price))
return DealResponse(
action=DealResponse.Action.ACCEPT,
minimum_cash_wanted = ask_price
)
if self.amount_to_raise > 0.0:
return DealResponse(DealResponse.Action.REJECT)
# We only accept deals for single properties wanted from us...
if len(deal_proposal.properties_offered) > 0 and len(deal_proposal.properties_wanted) == 0:
(bid_price, ask_price) = self._calc_value_of_properties(game_state, player, deal_proposal.properties_offered)
if player.state.cash > bid_price + self.high_water_mark:
Logger.log("# Accepted proposed deal of offered properties {0} for {1}".format(str(deal_proposal.properties_offered), bid_price))
return DealResponse(
action=DealResponse.Action.ACCEPT,
maximum_cash_offered = bid_price
)
return DealResponse(DealResponse.Action.REJECT)
def deal_result(self, deal_info):
'''
Called when a proposed deal has finished. The players involved in
the deal receive this notification.
deal_info is a PlayerAIBase.DealInfo 'enum' giving indicating
whether the deal succeeded, and if not why not.
No response is required.
'''
if deal_info == PlayerAIBase.DealInfo.SUCCEEDED:
Logger.log("# {0}: Deal Result: {1}".format(self.get_name(), 'SUCCEEDED'), Logger.INFO)
if len(self.deal_proposals_for_turn) > 0:
self.deal_proposals_for_turn = []
self.behaviour_for_turn = BEHAVIOUR_NONE
return
def deal_completed(self, deal_result):
'''
Called when a deal has successfully completed to let all
players know the details of the deal which took place.
deal_result is a DealResult object.
Note that the cash_transferred_from_proposer_to_proposee in
the deal_result can be negative if cash was transferred from
the proposee to the proposer.
No response is required.
'''
pass
def property_offered_for_auction(self, game_state, player, property):
if self.amount_to_raise > 0.0:
return 0
(bid_price, ask_price) = self._calc_value_of_properties(game_state, player, [ property ])
if bid_price < self.high_water_mark:
if bid_price >= self.cash_reserve:
bid_price = (self.high_water_mark - self.cash_reserve)
else:
bid_price = 0
return bid_price
def players_birthday(self):
return "Happy Birthday!"
def _calc_reserve_buffers(self):
num_turns_peak = self.num_turns
if num_turns_peak > 90:
num_turns_peak = 90
cash_reserve_buffer = int(-0.25 * (num_turns_peak * num_turns_peak) + (25 * num_turns_peak))
hwm_buffer = int(-0.5 * (num_turns_peak * num_turns_peak) + (50 * num_turns_peak))
return ( cash_reserve_buffer, hwm_buffer )
def _calc_cash_reserve(self, game_state, player):
# OK, what we want to do is calculate how much cash to keep in
# reserve here This is currently enough to fund one turn
# around the board from our current position.
# We calculate this by taking the cost of each square * the
# probability of landing on the square
# if we are before the "Go to Jail" square, we factor in the
# probability of 1 trip to jail
# There is also the probability of getting a bad card from
# Chance, which is higher at the start of the game, but
# decreases throughout the game.
reserve = 0.0
rents_times_probs = 0.0
num_owned_properties = 0
num_owned_stations = 0
num_owned_utilities = 0
jail_reserve = 0.0
##
( rents_times_probs, chance_penalty, all_rents, go_to_jail_prob, tax_penalty ) = \
self._calc_expected_cost_of_turn(game_state, player)
##
(cash_reserve_buffer, hwm_buffer) = self._calc_reserve_buffers()
cash_reserve = int(rents_times_probs + chance_penalty + (go_to_jail_prob * 50.0) + tax_penalty) + cash_reserve_buffer
high_water_mark = int(rents_times_probs + chance_penalty + (go_to_jail_prob * 50.0) + tax_penalty) + hwm_buffer
all_rents.sort(reverse = True)
if len(all_rents) > 0:
highest_rent = all_rents[0] + hwm_buffer
if len(all_rents) > 1:
highest_rent += all_rents[1]
if high_water_mark < highest_rent:
high_water_mark = highest_rent
return ( cash_reserve, high_water_mark )
def _calc_rent_on_station(self, game_state, station):
board = game_state.board
owned_stations = board.get_property_set(PropertySet.STATION).intersection(station.owner.state.properties)
number_of_owned_stations = len(owned_stations)
if number_of_owned_stations == 1:
return 25
elif number_of_owned_stations == 2:
return 50
elif number_of_owned_stations == 3:
return 100
elif number_of_owned_stations == 4:
return 200
return 0
def _calc_prob_rent_on_utility(self, game_state, uty):
board = game_state.board
owned_utilities = board.get_property_set(PropertySet.UTILITY).intersection(uty.owner.state.properties)
number_of_owned_utilities = len(owned_utilities)
rent = 0.0
if number_of_owned_utilities == 1:
for i in range(2, len(PROBS_TO_12)):
rent += (PROBS_TO_12[i] * 4)
elif number_of_owned_utilities == 2:
for i in range(2, len(PROBS_TO_12)):
rent += (PROBS_TO_12[i] * 10)
return rent
def _calc_chance_penalty(self, player):
num_houses = 0
for p in player.state.properties:
if isinstance(p, Street):
num_houses = p.number_of_houses
penalty = 0.0
penalty += (1.0 / 16.0) * num_houses * 25.0
penalty += (1.0 / 16.0) * 15.0
penalty += (1.0 / 16.0) * 20.0
penalty += (1.0 / 16.0) * 150.0
return penalty
def _calc_expected_cost_of_turn(self, game_state, player, cur_square = None, cur_square_prob = 1.0):
if cur_square is None:
cur_square = player.state.square
rents_times_probs = 0.0
chance_penalty = 0.0
tax_penalty = 0.0
go_to_jail_prob = 0.0
all_rents = []
for dice_roll in range(0, len(PROBS_TO_12)):
bd = cur_square + dice_roll
if bd >= Board.NUMBER_OF_SQUARES:
bd -= Board.NUMBER_OF_SQUARES
sq = game_state.board.squares[bd]
if sq.name == Square.Name.GO_TO_JAIL:
go_to_jail_prob = PROBS_TO_12[dice_roll] * cur_square_prob
elif isinstance(sq, Street):
if sq.owner is not None and sq.owner != player:
# calculate the current rent
rent = sq.calculate_rent(None, player)
all_rents.append(rent)
# should maybe calculate the rent with 1 more house added...
rent_prob = (rent * PROBS_TO_12[dice_roll] * cur_square_prob)
rents_times_probs += rent_prob
elif isinstance(sq, Station):
if sq.owner is not None and sq.owner != player:
# calculate the current rent
rent = self._calc_rent_on_station(game_state, sq)
all_rents.append(rent)
rent_prob = (rent * PROBS_TO_12[dice_roll] * cur_square_prob)
rents_times_probs += rent_prob
elif isinstance(sq, Utility):
if sq.owner is not None and sq.owner != player:
# calculate the current rent
rent = self._calc_prob_rent_on_utility(game_state, sq)
all_rents.append(rent)
rent_prob = (rent * PROBS_TO_12[dice_roll] * cur_square_prob)
rents_times_probs += rent_prob
elif isinstance(sq, Chance):
chance_penalty += self._calc_chance_penalty(player)
elif isinstance(sq, Tax):
tax_penalty += (sq.tax * PROBS_TO_12[dice_roll] * cur_square_prob)
if cur_square_prob < 0.5:
for double_roll in [ 2, 4, 6, 8, 10, 12]:
bd = cur_square + dice_roll
if bd >= Board.NUMBER_OF_SQUARES:
bd -= Board.NUMBER_OF_SQUARES
tup = self._calc_expected_cost_of_turn(game_state, player, bd, 1.0/36.0)
rents_times_probs += tup[0]
chance_penalty += tup[1]
tax_penalty += tup[4]
return ( rents_times_probs, chance_penalty, all_rents, go_to_jail_prob, tax_penalty )
def _calc_value_of_properties(self, game_state, player, properties):
# The value of the street is (roughly)
# the number of players * expected_future_rent - expected_future_cost
max_rent = 0.0
min_rent = 0.0
price = 0.0
house_price_cost = 0.0
min_value = 0.0
for sq in properties:
price += sq.price
min_value += sq.mortgage_value
num_props_owned = 0
for op in sq.property_set.properties:
if op.owner is not None and op.owner == player:
num_props_owned += 1
prob_building = 0.1
if num_props_owned > 0:
prob_building = (0.20 * num_props_owned)
if self.num_turns > self._average_life_of_bot() - 1:
prob_building = 0.0
else:
prob_building *= (1.0 - float(self.num_turns / self._average_life_of_bot()))
if isinstance(sq, Street):
if len(sq.rents) > 0:
top_rent = sq.rents[len(sq.rents)-1]
max_rent += (sq.rents[0] + ((top_rent - sq.rents[0]) * prob_building))
min_rent += sq.rents[0]
house_price_cost += (sq.house_price * ((5 - sq.number_of_houses) * prob_building))
min_value += int(sq.house_price/2.0 * sq.number_of_houses)
elif isinstance(sq, Utility):
min_rent += 4.0
max_rent += (4.0 + (6.0 * prob_building))
elif isinstance(sq, Station):
min_rent += 25.0
max_rent += (25 + (175.0 * prob_building))
remaining_turns = 500 - self.num_turns
if remaining_turns < 0:
# this shouldn't happen!
remaining_turns = -remaining_turns
# no houses
min_expected_value = remaining_turns * (len(game_state.players)-1) * min_rent * PROBABILITY_SQUARE_LANDING_FACTOR_MIN
max_expected_value = remaining_turns * (len(game_state.players)-1) * max_rent * PROBABILITY_SQUARE_LANDING_FACTOR_MAX - house_price_cost
if max_expected_value < min_expected_value:
max_expected_value = min_expected_value
expected_value = (min_expected_value + max_expected_value) / 2.0
factor = float(remaining_turns / 500.0)
fair_price = (expected_value * factor) + (min_value * (1.0 - factor))
Logger.log("### (minp, maxp, fair, min) = ({0}, {1}, {2}, {3})".format(min_expected_value, max_expected_value, fair_price, min_value), Logger.INFO)
bid_price = min_expected_value
ask_price = max_expected_value
if bid_price < min_value:
bid_price = min_value + 10
if ask_price < min_value:
ask_price = min_value * 2.0
bid_price = int(bid_price)
ask_price = int(ask_price)
Logger.log("*** Calculated value for {0} properties of {1}, {2}".format(len(properties), bid_price, ask_price))
return (bid_price, ask_price)
def _count_unowned_property(self, game_state):
board = game_state.board
count = 0
for sq in board.squares:
if isinstance(sq, Property) and sq.owner is None:
count += 1
return count
def player_went_bankrupt(self, player):
if player.name == self.get_name():
Logger.log("# {0}: We went bankrupt at turn {1} - cash_reserve = {2}, HWM = {3}, Cash = {4}.".format(self.get_name(), self.num_turns, self.cash_reserve, self.high_water_mark, player.state.cash), Logger.INFO)
#exit(0)
#Logger.log("# Player {0} went bankrupt at turn {1}".format(player.name, self.num_turns), Logger.INFO)
return
def game_over(self, winner, maximum_rounds_played):
self.sum_life_of_bot += self.num_turns
self.num_games_played += 1
Logger.log("# {0}: GAME OVER at turn {1}. Average life of bot is {2}".format(self.get_name(), self.num_turns, float(self.sum_life_of_bot / self.num_games_played)), Logger.INFO)
def _average_life_of_bot(self):
return max(50, int(float(self.sum_life_of_bot) / float(self.num_games_played)))
| {
"repo_name": "richard-shepherd/monopyly",
"path": "AIs/Brett Hutley/buffy.py",
"copies": "1",
"size": "45035",
"license": "mit",
"hash": -8352565394576279000,
"line_mean": 37.25913339,
"line_max": 219,
"alpha_frac": 0.5828429304,
"autogenerated": false,
"ratio": 3.6706064558200198,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47534493862200194,
"avg_score": null,
"num_lines": null
} |
__author__ = 'BrianAguirre'
from DataStructures.Lists.Itr import Itr
from DataStructures.Lists.Itr import Node
class List:
node_list = []
size = len(node_list) - 2
head = Node()
tail = Node()
itr = Itr(head)
node_list.append(head)
node_list.append(tail)
def __init__(self):
self.length = 0
self.itr = Itr(self.head)
self.head.next = self.tail
self.tail.prev = self.head
def insert_end(self, new_val):
new_node = Node()
new_node.data = new_val
self.node_list.insert(self.size, new_node)
self.tail.prev.next = new_node
new_node.prev = self.tail.prev
new_node.next = self.tail
self.tail.prev = new_node
self.size += 1
def insert_front(self, n):
self.node_list.insert(0, n)
#GET METHODS:
def get_first(self):
return self.head.next
def get_last(self):
return self.tail.prev
def get_itr(self):
return self.itr
def move_itr_first(self):
self.itr = self.head.next
def move_itr_last(self):
self.itr = self.tail.prev
def print_list(self):
self.move_itr_first()
while self.itr != self.tail:
print(self.itr.get_data())
self.itr = self.itr.next
| {
"repo_name": "brianaguirre/SampleCodingInterviews",
"path": "DataStructures/Lists/List.py",
"copies": "1",
"size": "1311",
"license": "mit",
"hash": -2344312290060280300,
"line_mean": 17.7285714286,
"line_max": 50,
"alpha_frac": 0.5675057208,
"autogenerated": false,
"ratio": 3.1820388349514563,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4249544555751456,
"avg_score": null,
"num_lines": null
} |
__author__ = 'BrianAguirre'
__twitter__ = 'bnap48'
'''
REQUIREMENTS:
Given three int numbers, calculate which is the most late valid time you can write out of them.
EX: (1, 2, 3, 4) -> 23:41
It has to be valid. If no valid time can be made with the four numbers, return 'NO SOLUTION'
Print should be in format AB:CD, where AB and CD are both string numbers.
Assume numbers are positive integers.
Thus A, B, C, D >=0
NOTE: Don't worry about time or memory complexity, concentrate on correctness.
'''
#Consider the following brute force combination:
'''
AB CD
AB DC
AC BD
AC DB
AD BC
AD CB
BA CD
BA DC
BC AD
BC DA
BD AC
BD CA
CA BD
CA DB
CB AD
CB DA
CD AB
CD BA
DA BC
DA CB
DB AC
DB CA
DC AB
DC BA
'''
def lateTimeCalculator(A, B, C, D):
#Create a Dictionary that has all the combinations above: Hours:Minutes
hrsMins = []
choose = [A, B, C, D]
#CHOICES
choseAB = [C, D]
choseAC = [B, D]
choseAD = [B, C]
choseBA = [C, D]
choseBC = [A, D]
choseBD = [A, C]
choseCA = [B, D]
choseCB = [A, D]
choseCD = [A, B]
choseDA = [B, C]
choseDB = [A, C]
choseDC = [A, B]
#HOURS AND MINUTES LISTS:
hours = []
mins = []
#CREATING LISTS:
for i in choose:
#FOR A as leading hour
if i == A:
for j in choseAB:
if j == C:
hours.append(i*10 + B)
mins.append(j*10 + D)
else:
hours.append(i*10 + B)
mins.append(j*10 + C)
for j in choseAC:
if j == B:
hours.append(i*10 + C)
mins.append(j*10 + D)
else:
hours.append(i*10 + C)
mins.append(j*10 + B)
for j in choseAD:
if j == C:
hours.append(i*10 + D)
mins.append(j*10 + B)
else:
hours.append(i*10 + D)
mins.append(j*10 + C)
#FOR B as leading hour
if i == B:
for j in choseBA:
if j == C:
hours.append(i*10 + A)
mins.append(j*10 + D)
else:
hours.append(i*10 + A)
mins.append(j*10 + C)
for j in choseBC:
if j == A:
hours.append(i*10 + C)
mins.append(j*10 + D)
else:
hours.append(i*10 + C)
mins.append(j*10 + A)
for j in choseBD:
if j == A:
hours.append(i*10 + D)
mins.append(j*10 + C)
else:
hours.append(i*10 + D)
mins.append(j*10 + A)
#For C as leading hour:
if i == C:
for j in choseCA:
if j == B:
hours.append(i*10 + A)
mins.append(j*10 + D)
else:
hours.append(i*10 + A)
mins.append(j*10 + B)
for j in choseCB:
if j == A:
hours.append(i*10 + B)
mins.append(j*10 + D)
else:
hours.append(i*10 + B)
mins.append(j*10 + A)
for j in choseCD:
if j == A:
hours.append(i*10 + D)
mins.append(j*10 + B)
else:
hours.append(i*10 + D)
mins.append(j*10 + A)
#For D as leading hour:
if i == D:
for j in choseDA:
if j == B:
hours.append(i*10 + A)
mins.append(j*10 + C)
else:
hours.append(i*10 + A)
mins.append(j*10 + B)
for j in choseDB:
if j == A:
hours.append(i*10 + B)
mins.append(j*10 + C)
else:
hours.append(i*10 + B)
mins.append(j*10 + A)
for j in choseDC:
if j == A:
hours.append(i*10 + C)
mins.append(j*10 + B)
else:
hours.append(i*10 + C)
mins.append(j*10 + A)
''' TEST IF HRS AND MINS ARE GOING IN AS COORDINATES:
#Add all possibilities to coordianates:
for i in range(0, len(hours)):
hrsMins.append((hours[i],mins[i]))
'''
#Find largest one:
for i in range(0, len(hours)):
if hours[i] < 24 and (mins[i] < 60):
hrsMins.append((hours[i],mins[i]))
hrsMins.sort()
hrsMins.reverse()
solution = ""
if len(hrsMins)<=0:
solution = "NO SOLUTION."
else:
solution = str(hrsMins[0][0]) + ":" + str(hrsMins[0][1])
return solution
print(lateTimeCalculator(2,2,3,5))
print(lateTimeCalculator(1,1,2,4))
print(lateTimeCalculator(1,1,1,1))
print(lateTimeCalculator(9,9,5,4))
print(lateTimeCalculator(0,0,0,0)) | {
"repo_name": "brianaguirre/SampleCodingInterviews",
"path": "ValidLateTimes.py",
"copies": "1",
"size": "5176",
"license": "mit",
"hash": 397480870556093630,
"line_mean": 22.9675925926,
"line_max": 95,
"alpha_frac": 0.4333462133,
"autogenerated": false,
"ratio": 3.3675992192582953,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9276093081372201,
"avg_score": 0.004970470237218867,
"num_lines": 216
} |
__author__ = 'briana'
import sys
import os
import math
import skimage.io
import skimage.exposure
import numpy as np
SOLAR_IRRADIANCE = {
'LT4': {5: 214.700, 4: 1033.000, 3: 1554.000},
'LT5': {5: 214.900, 4: 1036.000, 3: 1551.000},
'LE7': {5: 225.700, 4: 1044.000, 3: 1547.000}
}
def get_value_from_file(filename, content):
for line in open(filename, 'r'):
if content in line:
return float(line.split('=')[1].strip())
def process_scene(sceneID, sun_elevation, d):
sun_elevation = float(sun_elevation)
d = float(d)
bands = {5: [], 4: [], 3: []}
metadata = os.path.join('temp', sceneID, "{sceneID}_MTL.txt".format(sceneID=sceneID))
for band in bands:
filename = os.path.join('temp', sceneID, "{sceneID}_B{band}.TIF".format(sceneID=sceneID, band=band))
im = skimage.io.imread(filename)
#
#im = transform.resize(im, (500, 500))
solar_irradiance = SOLAR_IRRADIANCE[sceneID[0:3]][band]
gain = get_value_from_file(metadata, "RADIANCE_MULT_BAND_{}".format(band))
bias = get_value_from_file(metadata, "RADIANCE_ADD_BAND_{}".format(band))
sun_zenith_angle = math.cos(math.radians(90 - sun_elevation))
#convert calibrated numbers back to radiance,
radiance = im * gain + bias
reflectance = math.pi * d * radiance / (solar_irradiance * sun_zenith_angle)
#make sure all values are positive
if np.min(reflectance.flat) < 0:
reflectance = reflectance - np.min(reflectance.flat)
gamma = 1.2
final = 255 * reflectance**(1/gamma)
bands[band] = final
#merge and scale bands to 0-255 together to maintain color balance
img = np.zeros((im.shape[0], im.shape[1], 3), dtype=np.float)
img[:, :, 0] = bands[5]
img[:, :, 1] = bands[4]
img[:, :, 2] = bands[3]
img = skimage.exposure.rescale_intensity(img)
skimage.io.imsave(os.path.join('temp', sceneID, "{sceneID}_B5_calibrated.TIF".format(sceneID=sceneID)), img[:, :, 0])
skimage.io.imsave(os.path.join('temp', sceneID, "{sceneID}_B4_calibrated.TIF".format(sceneID=sceneID)), img[:, :, 1])
skimage.io.imsave(os.path.join('temp', sceneID, "{sceneID}_B3_calibrated.TIF".format(sceneID=sceneID)), img[:, :, 2])
if __name__ == "__main__":
process_scene(*sys.argv[1:])
#process_scene(*['LE70400372000195EDC00', '64.97591318', '1.0165049']) | {
"repo_name": "zooniverse/kelp",
"path": "import-pipeline/color_calibration.py",
"copies": "1",
"size": "2418",
"license": "apache-2.0",
"hash": -2871293246245683700,
"line_mean": 33.0704225352,
"line_max": 121,
"alpha_frac": 0.6162117452,
"autogenerated": false,
"ratio": 2.959608323133415,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40758200683334145,
"avg_score": null,
"num_lines": null
} |
from Devices.Input import Input
from Devices.Timer import Timer
from Devices.AnalogInput import AnalogInput
from Devices.Output import Output
class DeviceManager:
def __init__(self):
self.inputs = {}
self.outputs = {}
def addSimpleInput(self, name, location, invert = False):
if name in self.inputs:
raise KeyError('Cannot create device with name %s because input with that name already exists' % name)
self.inputs[name] = Input(name, location, invert)
def addTimer(self, name, interval = 's'):
if name in self.inputs:
raise KeyError('Cannot create device with name %s because input with that name already exists' % name)
self.inputs[name] = Timer(name, interval)
def addAnalogInput(self, name, location):
if name in self.inputs:
raise KeyError('Cannot create device with name %s because input with that name already exists' % name)
self.inputs[name] = AnalogInput(name, location)
def addOutput(self, name, location, invert = False):
if name in self.outputs:
raise KeyError('Cannot create device with name %s because output with that name already exists' % name)
self.outputs[name] = Output(name, location, invert)
def read(self, name):
if not name in self.inputs:
raise KeyError('Cannot find input with name %s, unable to read' % name)
return self.inputs[name].read()
def turnOn(self, name):
if not name in self.outputs:
raise KeyError('Cannot find output with name %s, unable to turn on' % name)
self.outputs[name].on()
def turnOff(self, name):
if not name in self.outputs:
raise KeyError('Cannot find output with name %s, unable to turn off' % name)
self.outputs[name].off()
| {
"repo_name": "dillmann/rscs",
"path": "lib/DeviceManager.py",
"copies": "1",
"size": "1683",
"license": "mit",
"hash": -9088539949999225000,
"line_mean": 35.5869565217,
"line_max": 106,
"alpha_frac": 0.7272727273,
"autogenerated": false,
"ratio": 3.455852156057495,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4683124883357495,
"avg_score": null,
"num_lines": null
} |
import re
class Evaluator:
# devices should be a DeviceManager Object
def __init__(self, devices):
self.devices = devices
def evaluate(self, condition):
try:
self.checkLegalExpression(condition)
except:
raise ValueError(e)
# extract device name from quotes in condition
deviceNameExtract = re.findall('"([^"]*)"', condition)
if not len(deviceNameExtract):
raise ValueError('Unable to extract device name from condition')
deviceName = deviceNameExtract[0]
if not self.devices.hasInput(devicename):
raise KeyError('Cannot execute conditions. Input by name %s does not exist' % deviceName)
# remove device name and replace with input value
inputVal = self.devices.read(deviceName)
condition = re.sub(r'"([^"]*)"', inputVal, condition)
return eval(evalCondition)
def checkLegalExpression(condition)
rg = r'\"[A-z 0-9 \-_]+\" [!=<>]{1,2} [0-9.]'
matchObj = re.match(rg, condition)
if not matchObj:
raise ValueError('Illegal Condition. Refuse to eval: %s' % condition)
| {
"repo_name": "dillmann/rscs",
"path": "lib/Graph/Evaluator.py",
"copies": "1",
"size": "1059",
"license": "mit",
"hash": 3869587469151834600,
"line_mean": 29.2571428571,
"line_max": 92,
"alpha_frac": 0.7053824363,
"autogenerated": false,
"ratio": 3.372611464968153,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.918779202419653,
"avg_score": 0.07804037541432456,
"num_lines": 35
} |
__author__ = 'Brian Farrell'
__date_created__ = '1/17/14'
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.audio import MIMEAudio
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email import encoders
import mimetypes
from email.message import Message
import sys
import os
import getpass
import traceback
import StringIO
from inspect import currentframe, getframeinfo
class Messenger:
def __init__(self, username, password, application_name, recipients=[]):
#set up email to/from information
"""
This constructor sets the information needed to send an email and the computer information (neither of which
should change during program execution).
It also overrides the system exception handler to force it to send an email before handling the exception
as normal.
NOTE: overriding must be done in each thread of a threaded program.
@param username: The username of the gmail account.
@param password: The password of the gmail account.
@param recipients: String list of email addresses to send to. Will send a copy to self.
"""
self.fromaddr = username
recipients.append(self.fromaddr)
self.to = recipients
self.uname = self.fromaddr
self.pwd = password
self.application_name = application_name
#Store the computer and user name
self.computername = os.environ['COMPUTERNAME']
self.computeruser = getpass.getuser()
self.__override_exception_handler()
def __override_exception_handler(self):
"""
This method sends an email when the script encounters an unexpected error. Then it returns control to the normal
error handling exception.
NOTE: This will not work in threaded programs.
"""
def myexcepthook(exctype, value, tb):
frame = tb
while frame.tb_next is not None:
frame = frame.tb_next
msg = self.__build_message("An unhandled exception occurred", etype=exctype, evalue=value, etb=tb, stackframe=frame)
self.__send_email(msg)
sys.__excepthook__(exctype, value, traceback)
sys.excepthook = myexcepthook
def __build_message(self, message, subject="Python Script Message", exception=None, etype=None, evalue=None, etb=None, stackframe=None, include_traceback=True):
"""
@param message: A human friendly message that you want emailed.
@param exception: The exception object relevant to this message.
@param etype: The type of the exception.
@param evalue: The value of the exception.
@param etb: The exception traceback object.
@param stackframe: The stackframe from where you should get line number and file names.
@return: A MIMEMultipart, with a plain text body, ready for attachements.
"""
#Collect exception information
if exception:
etype, evalue, etb = sys.exc_info()
output = StringIO.StringIO()
traceback.print_exception(etype, evalue, etb, file=output)
#Collect the line number and file name where this method was called
lineno = "Unknown"
filename = "Unknown"
if stackframe is not None:
lineno = getframeinfo(stackframe).lineno
filepath = getframeinfo(stackframe).filename.split('/')
filename = filepath[len(filepath) - 1]
body_text = message + "\r\n"
if include_traceback:
body_text += "\r\n".join([
"File: " + filename,
"Line: " + str(lineno),
"Computer Name: " + self.computername,
"Computer User: " + self.computeruser,
"Error Information: " + output.getvalue()
])
COMMASPACE = ', '
msg = MIMEMultipart()
msg['Subject'] = "[" + self.application_name + "] "+subject
msg['To'] = COMMASPACE.join(self.to)
msg['From'] = self.fromaddr
body = MIMEMultipart('alternative')
body_content = MIMEText(body_text, 'plain')
body.attach(body_content)
msg.attach(body)
return msg
def __send_email(self, msg):
"""
@param msg: The message to be put in the body of the email.
"""
server = smtplib.SMTP('smtp.gmail.com:587')
server.ehlo()
server.starttls()
server.login(self.uname,self.pwd)
composed = msg.as_string()
server.sendmail(self.fromaddr, self.to, composed)
server.quit()
@staticmethod
def __add_attachments(msg, attachments):
"""
@param message: The MIMEMultipart message.
@param attachments: A list of strings; the filepaths to all email attachments.
"""
for attachment_path in attachments:
ctype, encoding = mimetypes.guess_type(attachment_path)
if ctype is None or encoding is not None:
# No guess could be made, or the file is encoded (compressed), so
# use a generic bag-of-bits type.
ctype = 'application/octet-stream'
maintype, subtype = ctype.split('/', 1)
if maintype == 'text':
fp = open(attachment_path)
# Note: we should handle calculating the charset
attachment = MIMEText(fp.read(), _subtype=subtype)
fp.close()
elif maintype == 'image':
fp = open(attachment_path, 'rb')
attachment = MIMEImage(fp.read(), _subtype=subtype)
fp.close()
elif maintype == 'audio':
fp = open(attachment_path, 'rb')
attachment = MIMEAudio(fp.read(), _subtype=subtype)
fp.close()
else:
fp = open(attachment_path, 'rb')
attachment = MIMEBase(maintype, subtype)
attachment.set_payload(fp.read())
fp.close()
# Encode the payload using Base64
encoders.encode_base64(attachment)
head, tail = os.path.split(attachment_path)
attachment.add_header('Content-Disposition', 'attachment', filename=tail)
msg.attach(attachment)
def email_error(self, message, exception, attachments=None):
"""
@param message: The message to be put in the body of the email.
@param exception: The exception object (if none, use email_message).
"""
msg = self.__build_message(message, exception=exception, stackframe=currentframe().f_back)
if(attachments):
self.__add_attachments(msg, attachments)
self.__send_email(msg)
def email_message(self, message, subject, attachments=None):
"""
@param message: The message to be put in the body of the email.
@param attachments: A list of strings; the filepaths to all email attachments.
"""
msg = self.__build_message(message, subject, stackframe=currentframe().f_back, include_traceback=False,)
if(attachments):
self.__add_attachments(msg, attachments)
self.__send_email(msg) | {
"repo_name": "vtcgit/ArcPy_Messenger",
"path": "ScriptMessaging.py",
"copies": "1",
"size": "7303",
"license": "mit",
"hash": -4401430514029833700,
"line_mean": 38.2688172043,
"line_max": 164,
"alpha_frac": 0.6115295084,
"autogenerated": false,
"ratio": 4.3756740563211505,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.548720356472115,
"avg_score": null,
"num_lines": null
} |
__author__ = 'brianhoffman'
import logging
class Node(object):
def __init__(self, value):
self.value = value
self._child_node = None
# TODO: do note that this is an ordered linked-list. Perhaps this class should
# be renamed to better indicate that.
# TODO: use a generator/iterator pattern for looping through the list.
class LinkedList(object):
insertion_result = {'INSERTED': 'inserted', 'EXISTS': 'exists', 'FAILED': 'failed'}
def __init__(self, order_by):
self._child_node = None
self._order_by = order_by
self._current_node = None
self.log = logging.getLogger(
'%s' % self.__class__.__name__
)
def insert(self, obj):
if not self._child_node:
self._child_node = Node(obj)
self._current_node = self._child_node
return LinkedList.insertion_result.get('INSERTED')
return self._insert(self, obj)
def clear(self):
self._child_node = None
self._current_node = None
def resetIndex(self):
self._current_node = self._child_node
def seek(self):
if not self._current_node:
return False
if not self._current_node._child_node:
return False
self._current_node = self._current_node._child_node
return True
def currentValue(self):
if not self._current_node:
raise LookupError('There aren\'t any nodes on the list.')
return self._current_node.value
def hasValue(self):
return bool(self._child_node)
def nextValue(self):
if not self._current_node:
raise LookupError('There aren\'t any nodes on the list.')
elif not self._current_node._child_node:
raise LookupError('The current node does not have any child nodes')
return self._current_node._child_node.value
def hasNext(self):
return bool(self._current_node._child_node)
def toArray(self):
return self._toArray(self, [])
def toArrayValue(self):
return self._toArray(self, [], True)
def _toArray(self, node, accum, value=False):
if not node._child_node:
return accum
if value and node._child_node:
return self._toArray(node._child_node, accum + [node._child_node.value])
else:
return self._toArray(node._child_node, accum + [node._child_node.value])
def _insert(self, parentNode, obj):
if not parentNode._child_node:
parentNode._child_node = Node(obj)
return LinkedList.insertion_result.get('INSERTED')
order = self._order_by(obj, parentNode._child_node.value)
if order <= -1:
node = Node(obj)
node._child_node = parentNode._child_node
parentNode._child_node = node
return LinkedList.insertion_result.get('INSERTED')
elif order >= 1:
return self._insert(parentNode._child_node, obj)
elif order == 0:
return LinkedList.insertion_result.get('EXISTS')
return LinkedList.insertion_result.get('FAILED')
| {
"repo_name": "freebazaar/FreeBazaar",
"path": "rudp/linkedlist.py",
"copies": "3",
"size": "3140",
"license": "mit",
"hash": -289130382915534100,
"line_mean": 28.9047619048,
"line_max": 87,
"alpha_frac": 0.5977707006,
"autogenerated": false,
"ratio": 3.969658659924147,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6067429360524147,
"avg_score": null,
"num_lines": null
} |
__author__ = "Brian Lenihan <brian.lenihan@gmail.com"
__copyright__ = "Copyright (c) 2012 Python for Android Project"
__license__ = "Apache License, Version 2.0"
import logging
import android
from pyxmpp2.jid import JID
from pyxmpp2.client import Client
from pyxmpp2.settings import XMPPSettings
from pyxmpp2.interfaces import XMPPFeatureHandler
from pyxmpp2.interfaces import EventHandler, event_handler, QUIT
from pyxmpp2.interfaces import message_stanza_handler
from pyxmpp2.streamevents import DisconnectedEvent
from pyxmpp2.ext.version import VersionProvider
logging.basicConfig(level = logging.INFO)
xmpp_trace = False
class SayChat(EventHandler, XMPPFeatureHandler):
def __init__(self):
self.droid = android.Android()
settings = XMPPSettings({"software_name": "Say Chat"})
settings["jid"] = self.droid.dialogGetInput("Google Talk Username").result
settings["password"] = self.droid.dialogGetInput("Google Talk Password").result
settings["server"] = "talk.google.com"
settings["starttls"] = True
self.client = Client(
JID(settings["jid"]),
[self, VersionProvider(settings)],
settings)
def connect(self):
self.client.connect()
self.client.run()
def disconnect(self):
self.client.disconnect()
self.client.run(timeout = 2)
@message_stanza_handler()
def handle_message(self, stanza):
self.droid.ttsSpeak(
"{!s} says {!s}".format(stanza.from_jid.as_unicode(),
stanza.body))
return ""
@event_handler(DisconnectedEvent)
def handle_disconnected(self, event):
return QUIT
@event_handler()
def handle_all(self, event):
"""If it's not logged, it didn't happen."""
logging.info("-- {}".format(event))
def run(self):
try:
self.connect()
except KeyboardInterrupt:
self.disconnect()
if xmpp_trace:
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
for logger in ("pyxmpp2.IN", "pyxmpp2.OUT"):
logger = logging.getLogger(logger)
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
logger.propagate = False
saychat = SayChat()
saychat.run()
| {
"repo_name": "louietsai/python-for-android",
"path": "python3-alpha/python3-src/android-scripts/say_chat.py",
"copies": "46",
"size": "2159",
"license": "apache-2.0",
"hash": 5032170297765571000,
"line_mean": 28.5753424658,
"line_max": 85,
"alpha_frac": 0.6956924502,
"autogenerated": false,
"ratio": 3.6043405676126876,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
__author__ = "Brian Lenihan <brian.lenihan@gmail.com"
__copyright__ = "Copyright (c) 2012 Python for Android Project"
__license__ = "Apache License, Version 2.0"
import logging
import sl4a
from pyxmpp2.jid import JID
from pyxmpp2.client import Client
from pyxmpp2.settings import XMPPSettings
from pyxmpp2.interfaces import XMPPFeatureHandler
from pyxmpp2.interfaces import EventHandler, event_handler, QUIT
from pyxmpp2.interfaces import message_stanza_handler
from pyxmpp2.streamevents import DisconnectedEvent
from pyxmpp2.ext.version import VersionProvider
logging.basicConfig(level = logging.INFO)
xmpp_trace = False
class SayChat(EventHandler, XMPPFeatureHandler):
def __init__(self):
self.droid = sl4a.Android()
settings = XMPPSettings({"software_name": "Say Chat"})
settings["jid"] = self.droid.dialogGetInput("Google Talk Username").result
settings["password"] = self.droid.dialogGetInput("Google Talk Password").result
settings["server"] = "talk.google.com"
settings["starttls"] = True
self.client = Client(
JID(settings["jid"]),
[self, VersionProvider(settings)],
settings)
def connect(self):
self.client.connect()
self.client.run()
def disconnect(self):
self.client.disconnect()
self.client.run(timeout = 2)
@message_stanza_handler()
def handle_message(self, stanza):
self.droid.ttsSpeak(
"{!s} says {!s}".format(stanza.from_jid.as_unicode(),
stanza.body))
return ""
@event_handler(DisconnectedEvent)
def handle_disconnected(self, event):
return QUIT
@event_handler()
def handle_all(self, event):
"""If it's not logged, it didn't happen."""
logging.info("-- {}".format(event))
def run(self):
try:
self.connect()
except KeyboardInterrupt:
self.disconnect()
if xmpp_trace:
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
for logger in ("pyxmpp2.IN", "pyxmpp2.OUT"):
logger = logging.getLogger(logger)
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
logger.propagate = False
saychat = SayChat()
saychat.run()
| {
"repo_name": "tomMoulard/python-projetcs",
"path": "scripts3/say_chat.py",
"copies": "1",
"size": "2153",
"license": "apache-2.0",
"hash": -4344941393585683500,
"line_mean": 28.4931506849,
"line_max": 85,
"alpha_frac": 0.6948444032,
"autogenerated": false,
"ratio": 3.570480928689884,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4765325331889884,
"avg_score": null,
"num_lines": null
} |
__author__ = "Brian Lenihan <brian.lenihan@gmail.com"
__copyright__ = "Copyright (c) 2012 Python for Android Project"
__license__ = "Apache License, Version 2.0"
import os
import logging
import android
"""
Create and set a new Tasker variable, display the variable's value in a Tasker
popup, and then clear the variable.
Misc / Allow External Access must be set in Tasker's prefs.
Tasker action code reference:
http://tasker.dinglisch.net/ActionCodes.java
"""
SET_VARIABLE = 547
CLEAR_VARIABLE = 549
POPUP = 550
logging.basicConfig(level=logging.INFO)
class Tasker(object):
def __init__(self):
self.droid = android.Android()
self.extras = dict(
version_number = '1.0',
task_name = 'tasker_demo.{}'.format(os.getpid()),
task_priority = 9)
self.actions = 0
def bundle(self, action, *args):
# Unused parameters are padded with False
args = list(args)
args.extend([False]*(6-len(args)))
self.actions += 1
self.extras.update(
{'action{}'.format(self.actions) : dict(
{'action' : action,
'arg:1' : args[0],
'arg:2' : args[1],
'arg:3' : args[2],
'arg:4' : args[3],
'arg:5' : args[4],
'arg:6' : args[5]})
})
def broadcast_intent(self):
intent = self.droid.makeIntent(
'net.dinglisch.android.tasker.ACTION_TASK', None, None, self.extras).result
logging.debug("-- {}".format(intent))
self.droid.sendBroadcastIntent(intent)
if __name__ == "__main__":
tasker = Tasker()
tasker.bundle(SET_VARIABLE, "%PY4A_DEMO", "Hello from python")
# Popup: String title, String text, String background image, Scene layout,
# Integer timeout, Boolean show over keyguard, Boolean condition
tasker.bundle(POPUP, "Tasker", "%PY4A_DEMO", "", "Popup", 5, True, False)
tasker.bundle(CLEAR_VARIABLE, "%PY4A_DEMO")
tasker.broadcast_intent()
| {
"repo_name": "kmonsoor/python-for-android",
"path": "python3-alpha/python3-src/android-scripts/tasker_example.py",
"copies": "46",
"size": "1883",
"license": "apache-2.0",
"hash": 6468423920425834000,
"line_mean": 28.8888888889,
"line_max": 81,
"alpha_frac": 0.6431226766,
"autogenerated": false,
"ratio": 3.2409638554216866,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
__author__ = "Brian Lenihan <brian.lenihan@gmail.com"
__copyright__ = "Copyright (c) 2012 Python for Android Project"
__license__ = "Apache License, Version 2.0"
import os
import logging
import sl4a
"""
Create and set a new Tasker variable, display the variable's value in a Tasker
popup, and then clear the variable.
Misc / Allow External Access must be set in Tasker's prefs.
Tasker action code reference:
http://tasker.dinglisch.net/ActionCodes.java
"""
SET_VARIABLE = 547
CLEAR_VARIABLE = 549
POPUP = 550
logging.basicConfig(level=logging.INFO)
class Tasker(object):
def __init__(self):
self.droid = sl4a.Android()
self.extras = dict(
version_number = '1.0',
task_name = 'tasker_demo.{}'.format(os.getpid()),
task_priority = 9)
self.actions = 0
def bundle(self, action, *args):
# Unused parameters are padded with False
args = list(args)
args.extend([False]*(6-len(args)))
self.actions += 1
self.extras.update(
{'action{}'.format(self.actions) : dict(
{'action' : action,
'arg:1' : args[0],
'arg:2' : args[1],
'arg:3' : args[2],
'arg:4' : args[3],
'arg:5' : args[4],
'arg:6' : args[5]})
})
def broadcast_intent(self):
intent = self.droid.makeIntent(
'net.dinglisch.sl4a.tasker.ACTION_TASK', None, None, self.extras).result
logging.debug("-- {}".format(intent))
self.droid.sendBroadcastIntent(intent)
if __name__ == "__main__":
tasker = Tasker()
tasker.bundle(SET_VARIABLE, "%PY4A_DEMO", "Hello from python")
# Popup: String title, String text, String background image, Scene layout,
# Integer timeout, Boolean show over keyguard, Boolean condition
tasker.bundle(POPUP, "Tasker", "%PY4A_DEMO", "", "Popup", 5, True, False)
tasker.bundle(CLEAR_VARIABLE, "%PY4A_DEMO")
tasker.broadcast_intent()
| {
"repo_name": "tomMoulard/python-projetcs",
"path": "scripts3/tasker_example.py",
"copies": "1",
"size": "1874",
"license": "apache-2.0",
"hash": 3777273291583605000,
"line_mean": 28.746031746,
"line_max": 78,
"alpha_frac": 0.6414087513,
"autogenerated": false,
"ratio": 3.192504258943782,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9261512547598115,
"avg_score": 0.014480092529133338,
"num_lines": 63
} |
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
##############################################################################
# Physical constants
echarge = float("1.6022e-19") # Coloumbs
evjoule = float("1.6022e-19") # eV = ev_to_joule Joules
eperm = float("8.854e-12") # s^4 A^2 m^-3 kg^-1
m_elec = float("9.109e-31") # kg
kb = float("1.381e-23") # m^2 kg s^-2 K^-1
##############################################################################
# Functions
def debyeL(kT,n,q):
#print 'debyeL function'
return math.sqrt(eperm * kT * evjoule / (n * q * q))
def larmorR(m,v,q,B):
#print 'larmorR function'
return m * v / (q * B)
def vth(kT,m):
#print 'vth function'
return math.sqrt(8.0 * kT * evjoule / (math.pi * m))
# Citation for this electron saturation current reduction factor
# author = D. Bohm
# title = The Characteristics of Electrical Discharges in Magnetic Fields
# year = 1949
def deltaCorr(rp,dl,beta):
temp1 = np.sqrt(1.0 + dl * dl * np.divide(np.multiply(beta,beta),rp**2))
temp2 = 1.0 / (1.0 + math.pi * rp * temp1 / (8.0 * dl))
return temp2
###############################################################################
# Magnetic field parameters
B = 2.0 # Tesla
# Electron parameters
kTe = 2.0 # eV, Electron temperature
ne = float("1.0e15") # m^-3, Electron density
etS = vth(kTe,m_elec) # m/s, Electron thermal speed
eDL = debyeL(kTe,ne,echarge) # m Electron Debye Length
# Dust parameters
dustR = float("4.5e-6") # m Dust Radius
##############################################################################
# Start the plotting
# evenly sampled B in 0.1 T intervals
arrayB = np.arange(0.05, 4.0, 0.05)
arrayLar = larmorR(m_elec,etS,echarge,arrayB)
arrayBeta = np.divide(dustR,arrayLar)
arrayBetaInv = np.divide(1.0,arrayBeta)
arrayDelta = deltaCorr(dustR,eDL,arrayBeta)
# make the plot
plt.plot(arrayBetaInv,arrayDelta)
plt.grid(True, which='both')
plt.xlabel(r'$\beta^{-1}$')
plt.ylabel('$\delta$')
plt.title('Electron Current Reduction in Magnetic Field')
# increase the font size
mpl.rcParams.update({'font.size': 16})
testname = 'electron_collection_reduction.png'
plt.savefig(str(testname))
plt.show()
| {
"repo_name": "brianrlynch85/PlasmaScaling",
"path": "src/electron_collection_reduction.py",
"copies": "1",
"size": "2569",
"license": "mit",
"hash": 6015472095649605000,
"line_mean": 32.8026315789,
"line_max": 79,
"alpha_frac": 0.5165434021,
"autogenerated": false,
"ratio": 3.1755253399258345,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4192068742025834,
"avg_score": null,
"num_lines": null
} |
import math
import numpy as np
import matplotlib.pyplot as plt
##############################################################################
# Physical constants
echarge = float("1.6022e-19") # Coloumbs
evjoule = float("1.6022e-19") # eV = ev_to_joule Joules
eperm = float("8.854e-12") # s^4 A^2 m^-3 kg^-1
##############################################################################
# Functions
def vav(kT,m):
return math.sqrt(8.0 * kT * evjoule / (m * math.pi))
def gyro_rad(m,v,q,B):
return m * v / (q * B)
##############################################################################
# Simulation Parameters
r_d = 0.25 # microns, Dust diameter
n_d = 2.2 # g/cm^3, Dust mass density
kTe = 2.0 # eV, Electron temperature
U_d = 1.5 * kTe # eV, Dust Potential
ne = float("1.0e15") # m^-3, Electron density
##############################################################################
# Important Calculated Global Constants
debL = math.sqrt(eperm * kTe * evjoule / (ne * (echarge**2))) # m
R_d = r_d * float("1.0e-6") # m
Vol = 4.0 * math.pi * (R_d**3) / 3.0 # m^3
den_kgperm = n_d * (10**3) # kg/m^3
m_d = den_kgperm * Vol # kg
q_d = 4.0 * math.pi * eperm * U_d * R_d * (1.0 + R_d / debL) # C
# Evenly sampled B 0.1 T intervals
B = np.arange(0.0, 4.0, 0.1)
# Plot a variety of dust temperatures
kT_sil_list = [0.025,0.100,1.000,5.000]
plt.rc('lines', linewidth = 2)
plt.xticks(np.arange(0,5,0.5))
plt.yticks(np.arange(0,100,5))
plt.grid(True, which='both')
# Make the plot
for i in range (0,4,1):
v_itr = vav(kT_sil_list[i],m_d)
plt.plot(B,100*gyro_rad(m_d,v_itr,q_d,B),label='T = '
+ str(round(kT_sil_list[i],2)) +
'[eV], $v_{avg}$ = ' + str(round(100*v_itr,2)) + '[cm/s]')
plt.yscale('log')
plt.legend(loc=1,ncol=1,borderaxespad=0.,prop={'size':11})
plt.xlabel('B [T]')
plt.ylabel('gyro radius [cm]')
plt.title('Silica Dust: r = ' + str(r_d) + ' [$\mu$m], ' + 'Density = '
+ str(den_kgperm/10**3) + ' [g/$cm^3$], '
+ 'Q = ' + '{:3.0f}'.format(q_d/echarge) + ' [#e]')
testname = 'SilicaDust_r_' + str(r_d) + 'Density_' + str(den_kgperm) + '.png'
plt.savefig(str(testname))
plt.show()
| {
"repo_name": "brianrlynch85/PlasmaScaling",
"path": "src/larmor_dust.py",
"copies": "1",
"size": "2482",
"license": "mit",
"hash": -2438656808747705300,
"line_mean": 36.0447761194,
"line_max": 78,
"alpha_frac": 0.4443996777,
"autogenerated": false,
"ratio": 2.7304730473047303,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.367487272500473,
"avg_score": null,
"num_lines": null
} |
import math
import numpy as np
import matplotlib.pyplot as plt
import plasma_parameters as plasma
# Plasma parameters
T_Ar = 0.025 * plasma.evjoule # eV
v_Ar = math.sqrt(T_Ar / plasma.m_Ar) # m s^-1
vt_Ar = math.sqrt(8.0 / math.pi) * v_Ar # m s^-1
##############################################################################
# Functions
def omega_p(n,m,q):
return math.sqrt(n * q**2 / (m * plasma.eperm))
def omega_B(q,B,m):
return q * B / m
def debL(kT,n,q):
return math.sqrt(plasma.eperm * kT * plasma.evjoule / (n * q**2))
def vav(kT,m):
return math.sqrt(8.0 * kT * plasma.evjoule / (m * math.pi))
def gyro_rad(m,v,q,B):
return m * v / (q * B)
##############################################################################
# Begin the plotting
# Evenly sampled B 0.1 T intervals
B = np.arange(0.0, 4.0, 0.1)
mfp_array = np.empty(40)
#Plot for a several different ion temperatures
kTs = np.array([0.025, 0.05, 0.10, 0.5000])
plt.rc('lines', linewidth = 2)
plt.xticks(np.arange(0,5,0.5)); plt.yticks(np.arange(0,100,5))
plt.grid(True, which='both')
#Neutral pressures and ion temperatures
P_Ar = np.array([100.0, 50.0, 5.0]) #mTorr
n_Ar = np.divide(P_Ar * plasma.TorrConv / 1000.0, T_Ar) # # m^-3
mfp_Ar = np.divide(1.0, plasma.A_Ar * n_Ar)
marks = ['r--', 'g--' ,'b--']
#Plot the mean free paths at several different pressures
for i in xrange(0, len(P_Ar), 1):
mfp_array.fill(100*mfp_Ar[i])
print('P = ' + '{:3.2e}'.format(P_Ar[i]) + ' [mTorr]' + ';' + 'dust mfp = ' + \
'{:3.2e}'.format(mfp_Ar[i]) + ' [m]')
plt.plot(B,mfp_array,marks[i],label='Neutral Collision Mean Free Path at ' + \
'{:3.0f}'.format(P_Ar[i]) + '[mTorr]')
#Plot the gyro-radius for several different ion thermal speeds
for i in range (0,4,1):
v_itr = vav(kTs[i],plasma.m_Ar)
plt.plot(B,100*gyro_rad(plasma.m_Ar,v_itr,plasma.echarge,B),label='T = '
+ str(round(kTs[i],3)) +
'[eV], $v_{avg}$ = ' + str(round(v_itr,2)) + '[m/s]')
plt.axis([B[0],B[-1],0.001,20])
plt.yscale('log')
plt.legend(loc=1, ncol=1, borderaxespad=0., prop={'size':11})
plt.xlabel('B [T]'); plt.ylabel('gyro radius [cm]'); plt.title('Argon Ions')
figname = 'Argon_ion_gyro.png'
plt.savefig(figname)
plt.show()
| {
"repo_name": "brianrlynch85/PlasmaScaling",
"path": "src/larmor_ion.py",
"copies": "1",
"size": "2474",
"license": "mit",
"hash": 5927120969690822000,
"line_mean": 31.9866666667,
"line_max": 82,
"alpha_frac": 0.5226354082,
"autogenerated": false,
"ratio": 2.5505154639175256,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.35731508721175254,
"avg_score": null,
"num_lines": null
} |
import math
import numpy as np
import matplotlib.pyplot as plt
##############################################################################
# Physical constants
echarge = float("1.6022e-19") # Coloumbs
evjoule = float("1.6022e-19") # eV = ev_to_joule Joules
eperm = float("8.854e-12") # s^4 A^2 m^-3 kg^-1
m_elec = float("9.109e-31") # kg
kb = float("1.381e-23") # m^2 kg s^-2 K^-1
##############################################################################
# Functions
def vav(kT,m):
return math.sqrt(8.0 * kT * evjoule / (m * math.pi))
def gyro_rad(m,v,q,B):
return m * v / (q * B)
def vth(kT,m):
return math.sqrt(2.0 * kT * evjoule / m)
# Evenly sampled B 0.1 T intervals
B = np.arange(0.0, 4.0, 0.1)
# Plot for a variety of electron temperatures
kT_sil_list = [0.1,0.5,2.0,4.0]
plt.rc('lines', linewidth = 2)
plt.xticks(np.arange(0,5,0.5))
plt.yticks(np.arange(0,10,5))
plt.grid(True, which='both')
# Make the plot
for i in range (0,4,1):
v_itr = vav(kT_sil_list[i],m_elec)
plt.plot(B,100*gyro_rad(m_elec,v_itr,echarge,B),label='T = '
+ str(round(kT_sil_list[i],3)) +
'[eV], $v_{avg}$ = ' + str(round(v_itr,2)) + '[m/s]')
plt.yscale('log')
plt.legend(loc=1,ncol=1,borderaxespad=0.,prop={'size':11})
plt.xlabel('B [T]')
plt.ylabel('gyro radius [cm]')
plt.title('Electrons')
testname = 'electron_gyro.png'
plt.savefig(str(testname))
plt.show()
| {
"repo_name": "brianrlynch85/PlasmaScaling",
"path": "src/larmor_elec.py",
"copies": "1",
"size": "1658",
"license": "mit",
"hash": 639673620895497900,
"line_mean": 29.1454545455,
"line_max": 78,
"alpha_frac": 0.4794933655,
"autogenerated": false,
"ratio": 2.726973684210526,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3706467049710526,
"avg_score": null,
"num_lines": null
} |
from numpy.testing import assert_array_equal, assert_raises
import numpy as np
from scipy.optimize import linear_sum_assignment
def test_linear_sum_assignment():
for cost_matrix, expected_cost in [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
[150, 400, 300]
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
[150, 2, 300]),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
[10, 1, 7]),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
[10, 1, 4]),
# n == 2, m == 0 matrix
([[], []],
[]),
]:
cost_matrix = np.array(cost_matrix)
row_ind, col_ind = linear_sum_assignment(cost_matrix)
assert_array_equal(row_ind, np.sort(row_ind))
assert_array_equal(expected_cost, cost_matrix[row_ind, col_ind])
cost_matrix = cost_matrix.T
row_ind, col_ind = linear_sum_assignment(cost_matrix)
assert_array_equal(row_ind, np.sort(row_ind))
assert_array_equal(np.sort(expected_cost),
np.sort(cost_matrix[row_ind, col_ind]))
def test_linear_sum_assignment_input_validation():
assert_raises(ValueError, linear_sum_assignment, [1, 2, 3])
C = [[1, 2, 3], [4, 5, 6]]
assert_array_equal(linear_sum_assignment(C),
linear_sum_assignment(np.asarray(C)))
assert_array_equal(linear_sum_assignment(C),
linear_sum_assignment(np.matrix(C)))
| {
"repo_name": "Shaswat27/scipy",
"path": "scipy/optimize/tests/test_hungarian.py",
"copies": "48",
"size": "1742",
"license": "bsd-3-clause",
"hash": -1567834453632790000,
"line_mean": 27.5573770492,
"line_max": 72,
"alpha_frac": 0.5212399541,
"autogenerated": false,
"ratio": 3.262172284644195,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from numpy.testing import assert_array_equal
from pytest import raises as assert_raises
import numpy as np
from scipy.optimize import linear_sum_assignment
from scipy.sparse.sputils import matrix
def test_linear_sum_assignment():
for sign in [-1, 1]:
for cost_matrix, expected_cost in [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
[150, 400, 300]
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
[150, 2, 300]),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
[10, 1, 7]),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
[10, 1, 4]),
# n == 2, m == 0 matrix
([[], []],
[]),
# Square with positive infinities
([[10, float("inf"), float("inf")],
[float("inf"), float("inf"), 1],
[float("inf"), 7, float("inf")]],
[10, 1, 7]),
]:
maximize = sign == -1
cost_matrix = sign * np.array(cost_matrix)
expected_cost = sign * np.array(expected_cost)
row_ind, col_ind = linear_sum_assignment(cost_matrix,
maximize=maximize)
assert_array_equal(row_ind, np.sort(row_ind))
assert_array_equal(expected_cost, cost_matrix[row_ind, col_ind])
cost_matrix = cost_matrix.T
row_ind, col_ind = linear_sum_assignment(cost_matrix,
maximize=maximize)
assert_array_equal(row_ind, np.sort(row_ind))
assert_array_equal(np.sort(expected_cost),
np.sort(cost_matrix[row_ind, col_ind]))
def test_linear_sum_assignment_input_validation():
assert_raises(ValueError, linear_sum_assignment, [1, 2, 3])
C = [[1, 2, 3], [4, 5, 6]]
assert_array_equal(linear_sum_assignment(C),
linear_sum_assignment(np.asarray(C)))
assert_array_equal(linear_sum_assignment(C),
linear_sum_assignment(matrix(C)))
I = np.identity(3)
assert_array_equal(linear_sum_assignment(I.astype(np.bool)),
linear_sum_assignment(I))
assert_raises(ValueError, linear_sum_assignment, I.astype(str))
I[0][0] = np.nan
assert_raises(ValueError, linear_sum_assignment, I)
I = np.identity(3)
I[1][1] = -np.inf
assert_raises(ValueError, linear_sum_assignment, I)
I = np.identity(3)
I[:, 0] = np.inf
assert_raises(ValueError, linear_sum_assignment, I)
def test_constant_cost_matrix():
# Fixes #11602
n = 8
C = np.ones((n, n))
row_ind, col_ind = linear_sum_assignment(C)
assert_array_equal(row_ind, np.arange(n))
assert_array_equal(col_ind, np.arange(n))
| {
"repo_name": "person142/scipy",
"path": "scipy/optimize/tests/test_linear_assignment.py",
"copies": "4",
"size": "3150",
"license": "bsd-3-clause",
"hash": -3455006542766542300,
"line_mean": 29.8823529412,
"line_max": 76,
"alpha_frac": 0.5038095238,
"autogenerated": false,
"ratio": 3.5512965050732808,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001278772378516624,
"num_lines": 102
} |
import numpy as np
from numpy.testing import assert_array_equal, assert_raises
from clustering_metrics.hungarian import linear_sum_assignment
from clustering_metrics.entropy import assignment_cost
from nose.tools import assert_equal, assert_almost_equal
def test_linear_sum_assignment():
for cost_matrix, expected_cost in [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
[150, 400, 300]),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
[150, 2, 300]),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
[10, 1, 7]),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
[10, 1, 4]),
# n == 2, m == 0 matrix
([[], []],
[]),
]:
cost_matrix = np.array(cost_matrix)
row_ind, col_ind = linear_sum_assignment(cost_matrix)
assert_array_equal(row_ind, np.sort(row_ind))
assert_array_equal(expected_cost, cost_matrix[row_ind, col_ind])
cost_matrix = cost_matrix.T
row_ind, col_ind = linear_sum_assignment(cost_matrix)
assert_array_equal(row_ind, np.sort(row_ind))
assert_array_equal(np.sort(expected_cost),
np.sort(cost_matrix[row_ind, col_ind]))
def test_assignment_score():
for cost_matrix, expected_cost in [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
[150, 400, 300]),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
[150, 2, 300]),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
[10, 1, 7]),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
[10, 1, 4]),
# n == 2, m == 0 matrix
([[], []],
[]),
]:
cost_matrix = np.array(cost_matrix)
cost_matrix_T = cost_matrix.T
expected_sum = np.sum(expected_cost)
score = assignment_cost(cost_matrix)
score_T = assignment_cost(cost_matrix_T)
assert_equal(score, expected_sum)
assert_equal(score_T, expected_sum)
score_dbl = assignment_cost(np.asarray(cost_matrix, dtype=float))
score_T_dbl = assignment_cost(np.asarray(cost_matrix_T, dtype=float))
assert_almost_equal(score_dbl, float(expected_sum))
assert_almost_equal(score_T_dbl, float(expected_sum))
def test_linear_sum_assignment_input_validation():
assert_raises(ValueError, linear_sum_assignment, [1, 2, 3])
C = [[1, 2, 3], [4, 5, 6]]
assert_array_equal(linear_sum_assignment(C),
linear_sum_assignment(np.asarray(C)))
assert_array_equal(linear_sum_assignment(C),
linear_sum_assignment(np.matrix(C)))
| {
"repo_name": "escherba/clustering-metrics",
"path": "tests/test_hungarian.py",
"copies": "1",
"size": "3103",
"license": "bsd-3-clause",
"hash": -8287419173181598000,
"line_mean": 27.7314814815,
"line_max": 77,
"alpha_frac": 0.5188527232,
"autogenerated": false,
"ratio": 3.3258306538049305,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43446833770049303,
"avg_score": null,
"num_lines": null
} |
import numpy as np
from numpy.testing import assert_array_equal, assert_raises
from lsh_hdc.hungarian import linear_sum_assignment
from lsh_hdc.entropy import assignment_cost
from nose.tools import assert_equal, assert_almost_equal
def test_linear_sum_assignment():
for cost_matrix, expected_cost in [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
[150, 400, 300]),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
[150, 2, 300]),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
[10, 1, 7]),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
[10, 1, 4]),
# n == 2, m == 0 matrix
([[], []],
[]),
]:
cost_matrix = np.array(cost_matrix)
row_ind, col_ind = linear_sum_assignment(cost_matrix)
assert_array_equal(row_ind, np.sort(row_ind))
assert_array_equal(expected_cost, cost_matrix[row_ind, col_ind])
cost_matrix = cost_matrix.T
row_ind, col_ind = linear_sum_assignment(cost_matrix)
assert_array_equal(row_ind, np.sort(row_ind))
assert_array_equal(np.sort(expected_cost),
np.sort(cost_matrix[row_ind, col_ind]))
def test_assignment_score():
for cost_matrix, expected_cost in [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
[150, 400, 300]),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
[150, 2, 300]),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
[10, 1, 7]),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
[10, 1, 4]),
# n == 2, m == 0 matrix
([[], []],
[]),
]:
cost_matrix = np.array(cost_matrix)
cost_matrix_T = cost_matrix.T
expected_sum = np.sum(expected_cost)
score = assignment_cost(cost_matrix)
score_T = assignment_cost(cost_matrix_T)
assert_equal(score, expected_sum)
assert_equal(score_T, expected_sum)
score_dbl = assignment_cost(np.asarray(cost_matrix, dtype=float))
score_T_dbl = assignment_cost(np.asarray(cost_matrix_T, dtype=float))
assert_almost_equal(score_dbl, float(expected_sum))
assert_almost_equal(score_T_dbl, float(expected_sum))
def test_linear_sum_assignment_input_validation():
assert_raises(ValueError, linear_sum_assignment, [1, 2, 3])
C = [[1, 2, 3], [4, 5, 6]]
assert_array_equal(linear_sum_assignment(C),
linear_sum_assignment(np.asarray(C)))
assert_array_equal(linear_sum_assignment(C),
linear_sum_assignment(np.matrix(C)))
| {
"repo_name": "escherba/lsh-hdc",
"path": "tests/test_hungarian.py",
"copies": "2",
"size": "3081",
"license": "bsd-3-clause",
"hash": -1475381451024048400,
"line_mean": 27.5277777778,
"line_max": 77,
"alpha_frac": 0.5154170724,
"autogenerated": false,
"ratio": 3.288153681963714,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48035707543637135,
"avg_score": null,
"num_lines": null
} |
import numpy as np
from numpy.testing import assert_array_equal, assert_raises
from scipy.optimize import linear_sum_assignment
def test_linear_sum_assignment():
for cost_matrix, expected_cost in [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
[150, 400, 300]
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
[150, 2, 300]),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
[10, 1, 7]),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
[10, 1, 4]),
# n == 2, m == 0 matrix
([[], []],
[]),
]:
cost_matrix = np.array(cost_matrix)
row_ind, col_ind = linear_sum_assignment(cost_matrix)
assert_array_equal(row_ind, np.sort(row_ind))
assert_array_equal(expected_cost, cost_matrix[row_ind, col_ind])
cost_matrix = cost_matrix.T
row_ind, col_ind = linear_sum_assignment(cost_matrix)
assert_array_equal(row_ind, np.sort(row_ind))
assert_array_equal(np.sort(expected_cost),
np.sort(cost_matrix[row_ind, col_ind]))
def test_linear_sum_assignment_input_validation():
assert_raises(ValueError, linear_sum_assignment, [1, 2, 3])
C = [[1, 2, 3], [4, 5, 6]]
assert_array_equal(linear_sum_assignment(C),
linear_sum_assignment(np.asarray(C)))
assert_array_equal(linear_sum_assignment(C),
linear_sum_assignment(np.matrix(C)))
| {
"repo_name": "DailyActie/Surrogate-Model",
"path": "01-codes/scipy-master/scipy/optimize/tests/test_hungarian.py",
"copies": "1",
"size": "1740",
"license": "mit",
"hash": 576177658038243600,
"line_mean": 28.4915254237,
"line_max": 72,
"alpha_frac": 0.5218390805,
"autogenerated": false,
"ratio": 3.2706766917293235,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42925157722293233,
"avg_score": null,
"num_lines": null
} |
import numpy as np
# XXX we should be testing the public API here
from sklearn.utils.linear_assignment_ import _hungarian
def test_hungarian():
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15
),
# n == 2, m == 0 matrix
([[], []],
0
),
]
for cost_matrix, expected_total in matrices:
cost_matrix = np.array(cost_matrix)
indexes = _hungarian(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
indexes = _hungarian(cost_matrix.T)
total_cost = 0
for c, r in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
| {
"repo_name": "phdowling/scikit-learn",
"path": "sklearn/utils/tests/test_linear_assignment.py",
"copies": "421",
"size": "1349",
"license": "bsd-3-clause",
"hash": -2570489902927458000,
"line_mean": 21.4833333333,
"line_max": 55,
"alpha_frac": 0.447739066,
"autogenerated": false,
"ratio": 3.540682414698163,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 60
} |
# TODO #0.23: Remove this test module as the methods being tested
# have been replaced by SciPy methods
import numpy as np
import pytest
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_hungarian():
from sklearn.utils.linear_assignment_ import _hungarian
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15
),
# n == 2, m == 0 matrix
([[], []],
0
),
]
for cost_matrix, expected_total in matrices:
cost_matrix = np.array(cost_matrix)
indexes = _hungarian(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
indexes = _hungarian(cost_matrix.T)
total_cost = 0
for c, r in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
| {
"repo_name": "chrsrds/scikit-learn",
"path": "sklearn/utils/tests/test_linear_assignment.py",
"copies": "5",
"size": "1482",
"license": "bsd-3-clause",
"hash": 4975567760992042000,
"line_mean": 22.5238095238,
"line_max": 65,
"alpha_frac": 0.4777327935,
"autogenerated": false,
"ratio": 3.605839416058394,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6583572209558394,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Brian M Wilcox'
__version__ = '0.1.3'
"""
Copyright 2014 Brian M Wilcox
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import quandl
import collections
import time
import sys
import pandas_datareader
from pandas_datareader import Options
import multiprocessing
from multiprocessing import Process, Manager
from multiprocessing.pool import ThreadPool
from random import randrange
def data_worker(**kwargs):
"""
Function to be spawned concurrently,
consume data keys from input queue, and push the resulting dataframes to output map
"""
if kwargs is not None:
if "function" in kwargs:
function = kwargs["function"]
else:
Exception("Invalid arguments, no function specified")
if "input" in kwargs:
input_queue = kwargs["input"]
else:
Exception("Invalid Arguments, no input queue")
if "output" in kwargs:
output_map = kwargs["output"]
else:
Exception("Invalid Arguments, no output map")
if "token" in kwargs:
argsdict = {"quandl_token": kwargs["token"]}
else:
if "Quandl" in function.__module__:
Exception("Invalid Arguments, no Quandl token")
if ("source" and "begin" and "end") in kwargs:
argsdict = {"data_source": kwargs["source"], "begin": kwargs["begin"], "end": kwargs["end"]}
else:
if "pandas_datareader" in function.__module__:
Exception("Invalid Arguments, no pandas data source specified")
if ("source" in kwargs) and (("begin" and "end") not in kwargs):
argsdict = {"data_source": kwargs["source"]}
else:
if "pandas_datareader" in function.__module__:
Exception("Invalid Arguments, no pandas data source specified")
else:
Exception("Invalid Arguments")
retries = 5
while not input_queue.empty():
data_key = input_queue.get()
get_data(function, data_key, output_map, retries, argsdict)
def get_data(data_get, data_key, output_map, retries_left, argdict):
"""
Function to use Python Pandas and / or Quandl to download a dataframe
Insert resulting dataframe into output map
"""
if retries_left <= 0:
print(data_key + " Failed to download.")
return
"""
Identify type of function to use, insert result into output map
"""
if "Quandl" in data_get.__module__:
output_map[data_key] = data_get(data_key, authtoken=argdict["quandl_token"])
return
if "pandas_datareader" in data_get.__module__:
# Verify we are not dealing with options
if 'get_call_data' not in dir(data_get):
if ("source" and "begin" and "end") in argdict:
try:
output_map[data_key] = data_get(data_key, argdict["data_source"], argdict["begin"], argdict["end"])
return
except:
print(data_key + " failed to download. Retrying up to " + retries_left.__str__() + " more times...")
else:
try:
output_map[data_key] = data_get(data_key, argdict["data_source"])
return
except:
print(data_key + " failed to download. Retrying up to " + retries_left.__str__() + " more times...")
# Verify we are dealing with options
if 'get_call_data' in dir(data_get):
try:
# Note options data will always be pulled from yahoo
temp = data_get(data_key, 'yahoo')
# For simplicities sake assume user wants all options data
output_map[data_key] = temp.get_all_data()
return
except:
print(data_key + " options failed to download. Retrying up to " + retries_left.__str__() + " more times...")
print("WARNING: If your version of Pandas is not up to date this may fail!")
"""
Retry at random times progressively slower in case of failures when number of retries remaining gets low
"""
if (retries_left == 3):
time.sleep(randrange(0, 5))
if (retries_left == 2):
time.sleep(randrange(5, 15))
if (retries_left == 1):
time.sleep(randrange(30, 90))
get_data(data_get, data_key, output_map, (retries_left-1), argdict)
class ConcurrentPandas:
"""
Concurrent Pandas is a class for concurrent asynchronous data downloads
from a variety of sources using either threads, or processes.
"""
def __init__(self):
self.output_map = Manager().dict()
self.input_queue = Manager().Queue()
self.data_worker = None
self.worker_args = None
self.source_name = None
def consume_keys(self):
"""
Work through the keys to look up sequentially
"""
print("\nLooking up " + self.input_queue.qsize().__str__() + " keys from " + self.source_name + "\n")
self.data_worker(**self.worker_args)
def consume_keys_asynchronous_processes(self):
"""
Work through the keys to look up asynchronously using multiple processes
"""
print("\nLooking up " + self.input_queue.qsize().__str__() + " keys from " + self.source_name + "\n")
jobs = multiprocessing.cpu_count()*4 if (multiprocessing.cpu_count()*4 < self.input_queue.qsize()) \
else self.input_queue.qsize()
pool = multiprocessing.Pool(processes=jobs, maxtasksperchild=10)
for x in range(jobs):
pool.apply(self.data_worker, [], self.worker_args)
pool.close()
pool.join()
def consume_keys_asynchronous_threads(self):
"""
Work through the keys to look up asynchronously using multiple threads
"""
print("\nLooking up " + self.input_queue.qsize().__str__() + " keys from " + self.source_name + "\n")
jobs = multiprocessing.cpu_count()*4 if (multiprocessing.cpu_count()*4 < self.input_queue.qsize()) \
else self.input_queue.qsize()
pool = ThreadPool(jobs)
for x in range(jobs):
pool.apply(self.data_worker, [], self.worker_args)
pool.close()
pool.join()
def return_map(self):
"""
Return hashmap consisting of key string -> data frame
"""
return self.output_map
def return_input_queue(self):
"""
Return input Queue
"""
return self.input_queue
def insert_keys(self, *args):
"""
Unpack each key and add to queue
"""
for key in args:
self.unpack(key)
def unpack(self, to_unpack):
"""
Unpack is a recursive function that will unpack anything that inherits
from abstract base class Container provided it is not also inheriting from Python basestring.
Raise Exception if resulting object is neither a container or a string
Code working in both Python 2 and Python 3
"""
# Python 3 lacks basestring type, work around below
try:
isinstance(to_unpack, basestring)
except NameError:
basestring = str
# Base Case
if isinstance(to_unpack, basestring):
self.input_queue.put(to_unpack)
return
for possible_key in to_unpack:
if isinstance(possible_key, basestring):
self.input_queue.put(possible_key)
elif sys.version_info >= (3, 0):
if isinstance(possible_key, collections.abc.Container) and not isinstance(possible_key, basestring):
self.unpack(possible_key)
else:
raise Exception("A type that is neither a string or a container was passed to unpack. "
"Aborting!")
else:
if isinstance(possible_key, collections.Container) and not isinstance(possible_key, basestring):
self.unpack(possible_key)
else:
raise Exception("A type that is neither a string or a container was passed to unpack. "
"Aborting!")
def set_source_quandl(self, quandl_token):
"""
Set data source to Quandl
"""
self.data_worker = data_worker
self.worker_args = {"function": Quandl.get, "input": self.input_queue, "output": self.output_map,
"token": quandl_token}
self.source_name = "Quandl"
def set_source_yahoo_finance(self):
"""
Set data source to Yahoo Finance
"""
self.data_worker = data_worker
self.worker_args = {"function": pandas_datareader.DataReader, "input": self.input_queue, "output": self.output_map,
"source": 'yahoo'}
self.source_name = "Yahoo Finance"
def set_source_google_finance(self):
"""
Set data source to Google Finance
"""
self.data_worker = data_worker
self.worker_args = {"function": pandas_datareader.DataReader, "input": self.input_queue, "output": self.output_map,
"source": 'google'}
self.source_name = "Google Finance"
def set_source_federal_reserve_economic_data(self):
"""
Set data source to Federal Reserve Economic Data
"""
self.data_worker = data_worker
self.worker_args = {"function": pandas_datareader.DataReader, "input": self.input_queue, "output": self.output_map,
"source": 'fred'}
self.source_name = "Federal Reserve Economic Data"
def set_source_yahoo_options(self):
"""
Set data source to yahoo finance, specifically to download financial options data
"""
self.data_worker = data_worker
self.worker_args = {"function": Options, "input": self.input_queue, "output": self.output_map,
"source": 'yahoo'}
self.source_name = "Yahoo Finance Options"
| {
"repo_name": "briwilcox/Concurrent-Pandas",
"path": "concurrentpandas.py",
"copies": "1",
"size": "10839",
"license": "apache-2.0",
"hash": -3338542504680345000,
"line_mean": 37.1654929577,
"line_max": 128,
"alpha_frac": 0.5760679029,
"autogenerated": false,
"ratio": 4.320047827819849,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0017105943888292671,
"num_lines": 284
} |
__author__ = 'briannelson'
import numpy as np
import pyaudio
import datetime
import sys
class Utilities:
def __init__(self):
"""
Constructor
"""
@staticmethod
def array_from_bytes(data_chunk, sample_width, data_type):
data_length = len(data_chunk)
remainder = data_length % sample_width
if remainder == 0:
reading_count = data_length // sample_width
channel1 = np.zeros(reading_count, dtype=data_type)
current_position = 0
for x in range(0, reading_count):
byte_array = bytearray(sample_width)
bytearray.zfill(byte_array, sample_width)
for y in range(0, sample_width):
byte_array[y] = data_chunk[current_position]
current_position += 1
if data_type == np.int16 or data_type == np.int32:
channel1[x] = int.from_bytes(byte_array, byteorder='little', signed=True)
else:
channel1[x] = float.from_bytes(byte_array, byteorder='little', signed=True)
return {'Channel1': channel1 }
else:
return None
@staticmethod
def deinterleave(data, sample_width, data_type):
channel_count = 2
data_length = len(data)
remainder = data_length % (sample_width * channel_count)
if remainder == 0:
reading_count = data_length // (sample_width * channel_count)
channel1 = np.zeros(reading_count, dtype=data_type)
channel2 = np.zeros(reading_count, dtype=data_type)
current_position = 0
for x in range(0, reading_count):
byte_array = bytearray(sample_width)
bytearray.zfill(byte_array, sample_width)
for y in range(0, sample_width):
byte_array[y] = data[current_position]
current_position += 1
if data_type == np.int16 or data_type == np.int32:
channel1[x] = int.from_bytes(byte_array, byteorder='little', signed=True)
else:
channel1[x] = float.from_bytes(byte_array, byteorder='little', signed=True)
bytearray.zfill(byte_array, sample_width)
for y in range(0, sample_width):
byte_array[y] = data[current_position]
current_position += 1
if data_type == np.int16 or data_type == np.int32:
channel2[x] = int.from_bytes(byte_array, byteorder='little', signed=True)
else:
channel2[x] = float.from_bytes(byte_array, byteorder='little', signed=True)
return {'Channel1': channel1, 'Channel2': channel2}
else:
return None
@staticmethod
def get_second_of_audio(audio_config):
CHUNK = 8000
audio_device = pyaudio.PyAudio()
CHUNKS_PER_SECOND = int(audio_config.SamplingRate / CHUNK)
audio_output = np.zeros(audio_config.SamplingRate, audio_config.NumberFormat)
stream = None
time = None
try:
audio_device = pyaudio.PyAudio()
stream = audio_device.open(format=audio_config.AudioFormat,
channels=1,
rate=audio_config.SamplingRate,
input=True,
frames_per_buffer=CHUNK)
time = datetime.datetime.utcnow()
current_sample_position = 0
#Read a second worth of data
for i in range(0, CHUNKS_PER_SECOND):
try:
data_chunk = stream.read(CHUNK)
result = Utilities.array_from_bytes(data_chunk, audio_config.SampleBytes, audio_config.NumberFormat)
channel1 = result['Channel1']
for j in range(0, CHUNK):
audio_output[current_sample_position] = channel1[j]
current_sample_position += 1
except:
print(sys.exc_info())
current_sample_position += CHUNK
return { "Time": time, "Data": audio_output}
except:
print("Unable to open audio stream.")
return { "Time": None, "Data": None}
finally:
if stream is not None:
#close the audio stream
stream.stop_stream()
stream.close()
audio_device.terminate()
| {
"repo_name": "SidWatch/pySIDWatch",
"path": "Source/Audio/Utilities.py",
"copies": "1",
"size": "4631",
"license": "mit",
"hash": 4397064531992239000,
"line_mean": 33.3037037037,
"line_max": 120,
"alpha_frac": 0.5227812567,
"autogenerated": false,
"ratio": 4.368867924528302,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5391649181228302,
"avg_score": null,
"num_lines": null
} |
__author__ = 'briannelson'
import numpy as np
class Logging:
def __init__(self, values_dictionary):
"""
Constructor
"""
self.FilenameFormat = values_dictionary["FilenameFormat"]
self.Folder = values_dictionary["Folder"]
self.TraceLevel = values_dictionary["TraceLevel"]
class Config:
def __init__(self, values_dictionary):
"""
Constructor
"""
self.Logging = Logging(values_dictionary["Logging"])
self.SidWatchServer = SidWatchServer(values_dictionary["SidWatchServer"])
self.SidWatchDatabase = SidWatchDatabase(values_dictionary["SidWatchDatabase"])
pass
class SidWatchServer:
def __init__(self, values_dictionary):
"""
Constructor
:param values_dictionary:
:return:
"""
self.SourceBucketName = values_dictionary["SourceBucketName"]
self.DestinationBucketName = values_dictionary["DestinationBucketName"]
self.AccessKey = values_dictionary["AccessKey"]
self.SecretKey = values_dictionary["SecretKey"]
self.TempFolder = values_dictionary["TempFolder"]
self.NFFT = values_dictionary["NFFT"]
if (self.NFFT is None) or (self.NFFT == 0):
self.NFFT = 1024
class SidWatchDatabase:
def __init__(self, values_dictionary):
"""
Constructor
:param values_dictionary:
:return:
"""
self.Host = values_dictionary["Host"]
self.Database = values_dictionary["Database"]
self.User = values_dictionary["User"]
self.Password = values_dictionary["Password"]
class Station:
def __init__(self):
"""
Constructor
:param row:
:return:
"""
self.Id = 0
self.Callsign = None
self.Country = None
self.Location = None
self.Notes = None
self.Frequency = 0
self.Latitude = None
self.Longitude = None
self.CreatedAt = None
self.UpdatedAt = None
def load_from_row(self, row):
self.Id = row[0]
self.Callsign = row[1]
self.Country = row[2]
self.Location = row[3]
self.Notes = row[4]
self.Frequency = row[5]
self.Latitude = row[6]
self.Longitude = row[7]
self.CreatedAt = row[8]
self.UpdatedAt = row[9]
class StationReading:
def __init__(self):
self.Id = 0
self.SiteId = 0
self.ReadingDateTime = None
self.StationId = 0
self.ReadingMagnitude = None
self.CreatedAt = None
self.UpdatedAt = None
def load_from_row(self, row):
self.Id = row[0]
self.SiteId = row[1]
self.ReadingDateTime = row[2]
self.StationId = row[3]
self.ReadingMagnitude = row[4]
self.CreatedAt = row[5]
self.UpdatedAt = row[6]
class Site:
def __init__(self):
self.Id = 0
self.MonitorId = None
self.Name = None
self.Timezone = None
self.UtcOffset = None
self.Latitude = None
self.Longitude = None
self.CreatedAt = None
self.UpdatedAt = None
def load_from_row(self, row):
self.Id = row[0]
self.MonitorId = row[1]
self.Name = row[2]
self.Timezone = row[3]
self.UtcOffset = row[4]
self.Latitude = row[5]
self.Longitude = row[6]
self.CreatedAt = row[7]
self.UpdatedAt = row[8]
class File:
def __init__(self):
self.Id = 0
self.SiteId = 0
self.DateTime = None
self.FileName = None
self.Processed = False
self.Archived = False
self.Available = False
self.CreatedAt = None
self.UpdatedAt = None
def load_from_row(self, row):
self.Id = row[0]
self.SiteId = row[1]
self.DateTime = row[2]
self.Processed = row[3]
self.Archived = row[4]
self.Available = row[5]
self.CreatedAt = row[6]
self.UpdatedAt = row[7]
def to_insert_array(self):
insert_array = (self.SiteId,
self.DateTime,
self.FileName,
self.Processed,
self.Archived,
self.Available,
self.CreatedAt,
self.UpdatedAt)
return insert_array
class StationReading:
def __init__(self):
self.Id = 0
self.SiteId = 0
self.StationId = 0
self.ReadingDateTime = None
self.ReadingMagnitude = None
self.FileId = 0
self.CreatedAt = None
self.UpdatedAt = None
def load_from_row(self, row):
self.Id = row[0]
self.SiteId = row[1]
self.StationId = row[2]
self.ReadingDateTime = row[3]
self.ReadingMagnitude = row[4]
self.FileId = row[5]
self.CreatedAt = row[6]
self.UpdatedAt = row[7]
def to_insert_array(self):
insert_array = (self.SiteId,
self.StationId,
self.ReadingDateTime,
np.asscalar(np.float64(self.ReadingMagnitude)),
self.FileId,
self.CreatedAt,
self.UpdatedAt)
return insert_array
class SiteSpectrumReading:
def __init__(self):
self.Id = 0
self.SiteSpectrumId = 0
self.Frequency = 0.0
self.ReadingMagnitude = 0.0
def load_from_row(self, row):
self.Id = row[0]
self.SiteSpectrumId = row[1]
self.Frequency = row[2]
self.ReadingMagnitude = row[3]
def to_insert_array(self):
insert_array = (self.SiteSpectrumId,
round(np.asscalar(np.float64(self.Frequency)), 2),
round(np.asscalar(np.float64(self.ReadingMagnitude)), 2))
return insert_array
class SiteSpectrum:
def __init__(self):
self.Id = 0
self.SiteId = 0
self.ReadingDateTime = None
self.SamplesPerSeconds = 0
self.NFFT = 0
self.SamplingFormat = 0
self.FileId = 0
self.CreatedAt = None
self.UpdatedAt = None
def load_from_row(self, row):
self.Id = row[0]
self.SiteId = row[1]
self.ReadingDateTime = row[2]
self.SamplesPerSeconds = row[3]
self.NFFT = row[4]
self.SamplingFormat = row[5]
self.FileId = row[6]
self.CreatedAt = row[7]
self.UpdatedAt = row[8]
def to_insert_array(self):
insert_array = (self.SiteId,
self.ReadingDateTime,
self.SamplesPerSeconds,
self.NFFT,
self.SamplingFormat,
self.FileId,
self.CreatedAt,
self.UpdatedAt)
return insert_array | {
"repo_name": "SidWatch/pySIDServerDataProcessor",
"path": "source/SIDServer/Objects.py",
"copies": "1",
"size": "6997",
"license": "mit",
"hash": -7395448491553022000,
"line_mean": 26.4431372549,
"line_max": 87,
"alpha_frac": 0.5370873231,
"autogenerated": false,
"ratio": 3.9089385474860334,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9943291408155253,
"avg_score": 0.0005468924861559964,
"num_lines": 255
} |
__author__ = 'briannelson'
import pyaudio
import numpy as np
class Site:
def __init__(self, values_dictionary):
"""
Constructor
"""
self.MonitorId = values_dictionary["MonitorId"]
self.Name = values_dictionary["Name"]
self.Latitude = values_dictionary["Latitude"]
self.Longitude = values_dictionary["Longitude"]
self.UtcOffset = values_dictionary["UtcOffset"]
self.Timezone = values_dictionary["Timezone"]
pass
class Station:
def __init__(self, values_dictionary):
"""
Constructor
"""
self.CallSign = values_dictionary["CallSign"]
self.Color = values_dictionary["Color"]
self.Frequency = values_dictionary["Frequency"]
class Logging:
def __init__(self, values_dictionary):
"""
Constructor
"""
self.FilenameFormat = values_dictionary["FilenameFormat"]
self.Folder = values_dictionary["Folder"]
self.TraceLevel = values_dictionary["TraceLevel"]
class Config:
def __init__(self, values_dictionary):
"""
Constructor
"""
self.Site = Site(values_dictionary["Site"])
self.Audio = Audio(values_dictionary["Audio"])
self.Logging = Logging(values_dictionary["Logging"])
self.SidWatch = SidWatch(values_dictionary["SidWatch"])
self.Stations = []
stations = values_dictionary["Station"]
for station_dictionary in stations:
station = Station(station_dictionary)
station.MonitoredBin = int(((int(station.Frequency) * self.SidWatch.NFFT) / self.Audio.SamplingRate))
self.Stations.append(station)
pass
class Audio:
def __init__(self, values_dictionary):
"""
Constructor
"""
self.SamplingRate = values_dictionary["SamplingRate"]
self.SamplingFormat = values_dictionary["SamplingFormat"]
if self.SamplingFormat == 16:
self.AudioFormat = pyaudio.paInt16
self.NumberFormat = np.int16
self.SampleBytes = 2
elif self.SamplingFormat == 24:
self.AudioFormat = pyaudio.paInt24
self.NumberFormat = np.int32
self.SampleBytes = 3
else:
self.AudioFormat = pyaudio.paFloat32
self.NumberFormat = np.float32
self.SampleBytes = 4
class SidWatch:
def __init__(self, values_dictionary):
"""
Constructor
:param values_dictionary:
:return:
"""
self.AutoUpload = values_dictionary["AutoUpload"]
self.DeleteAfterUpload = values_dictionary["DeleteAfterUpload"]
self.DataFolder = values_dictionary["DataFolder"]
self.ReadingPerFile = values_dictionary["ReadingPerFile"]
self.NFFT = values_dictionary["NFFT"]
self.SaveRawData = values_dictionary["SaveRawData"]
self.SaveFrequencies = values_dictionary["SaveFrequencies"]
self.SaveStationData = values_dictionary["SaveStationData"]
self.Username = values_dictionary["Username"]
self.Password = values_dictionary["Password"]
self.SidWatchServerUrl = values_dictionary["SidWatchServerUrl"]
if (self.NFFT is None) or (self.NFFT == 0) :
self.NFFT = 1024 | {
"repo_name": "SidWatch/pySIDWatch",
"path": "Source/SID/Objects.py",
"copies": "1",
"size": "3323",
"license": "mit",
"hash": -3272782816073355000,
"line_mean": 29.495412844,
"line_max": 113,
"alpha_frac": 0.6136021667,
"autogenerated": false,
"ratio": 4.2657252888318355,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5379327455531835,
"avg_score": null,
"num_lines": null
} |
__author__ = 'briannelson'
import yaml
import io
import h5py
from SID import Objects
import datetime as dt
import math
import numpy as np
from scipy import signal
class DateUtility:
def __init__(self):
"""
Constructor
"""
@staticmethod
def get_next_run_time(current_date_time):
last_seconds = int(math.floor(current_date_time.second / 5)) * 5
last_time = dt.datetime(current_date_time.year,
current_date_time.month,
current_date_time.day,
current_date_time.hour,
current_date_time.minute,
last_seconds)
return last_time + dt.timedelta(0, 5)
class ConfigUtility:
def __init__(self):
"""
Constructor
"""
@staticmethod
def load(filename):
stream = io.open(filename, mode="r")
config_dictionary = yaml.load(stream)
config = Objects.Config(config_dictionary)
return config
class HDF5Utility:
def __init__(self):
"""
Constructor
"""
@staticmethod
def add_raw_data_set(group, name, time, data_type, sample_data):
ds = group.create_dataset(name, (len(sample_data), ), dtype=data_type, data=sample_data)
ds.attrs["Time"] = time.isoformat()
return ds
@staticmethod
def close_file(file):
file.flush()
file.close()
@staticmethod
def open_file(filename, current_time, config):
site_config = config.Site
audio_config = config.Audio
data_file = h5py.File(filename, "a")
data_file.attrs["MonitorId"] = site_config.MonitorId
data_file.attrs["StationName"] = site_config.Name
data_file.attrs["Latitude"] = site_config.Latitude
data_file.attrs["Longitude"] = site_config.Longitude
data_file.attrs["UtcOffset"] = site_config.UtcOffset
data_file.attrs["Timezone"] = site_config.Timezone
data_file.attrs["CreatedDateTime"] = current_time.isoformat()
raw_data_group = None
frequency_spectrum_data_group = None
stations_group = None
if config.SidWatch.SaveRawData:
raw_data_group = data_file.get("raw_sid_data")
if raw_data_group is None:
raw_data_group = data_file.create_group("raw_sid_data")
raw_data_group.attrs["SamplingRate"] = audio_config.SamplingRate
raw_data_group.attrs["SamplingFormat"] = audio_config.SamplingFormat
if config.SidWatch.SaveFrequencies:
frequency_spectrum_data_group = data_file.get("frequency_spectrum_data")
if frequency_spectrum_data_group is None:
frequency_spectrum_data_group = data_file.create_group("frequency_spectrum_data")
frequency_spectrum_data_group.attrs["NFFT"] = config.SidWatch.NFFT
frequency_spectrum_data_group.attrs["SamplingRate"] = audio_config.SamplingRate
frequency_spectrum_data_group.attrs["SamplingFormat"] = audio_config.SamplingFormat
if config.SidWatch.SaveStationData:
stations_group = data_file.get("monitored_stations")
if stations_group is None:
stations_group = data_file.create_group("monitored_stations")
for station in config.Stations:
station_group = stations_group.get(station.CallSign)
if station_group is None:
station_group = stations_group.create_group(station.CallSign)
station_group.attrs["CallSign"] = station.CallSign
station_group.attrs["Frequency"] = station.Frequency
station_group.attrs["MonitoredBin"] = station.MonitoredBin
station_group.attrs["NFFT"] = config.SidWatch.NFFT
return { "File": data_file,
"RawDataGroup": raw_data_group,
"StationsGroup": stations_group,
"FrequencySpectrumDataGroup": frequency_spectrum_data_group }
@staticmethod
def add_signal_strength(station, stations_group, dataset_name, time, signal_strength):
station_group = stations_group.get(station.CallSign)
if station_group is not None:
name = time.isoformat
ds = station_group.create_dataset(dataset_name, (1, ), data=signal_strength)
ds.attrs["Time"] = time.isoformat()
@staticmethod
def add_frequency_spectrum(frequency_spectrum_data_group, dataset_name, time, frequencies, Pxx):
joined_array = np.vstack([frequencies.real, Pxx])
ds = frequency_spectrum_data_group.create_dataset(dataset_name,
shape=(2, len(frequencies)),
dtype=np.float64,
data=joined_array)
ds.attrs["Time"] = time.isoformat()
class FrequencyUtility:
def __init__(self):
"""
Constructor
"""
@staticmethod
def process_psd(data, nfft=1024, audio_sampling_rate=96000):
"""
:param data:
:param nfft: Nonequispaced FFT
:param audio_sampling_rate:
:return:
"""
f, pxx = signal.welch(x=data, nfft=nfft, fs=audio_sampling_rate)
return pxx, f
class FileUtility:
def __init__(self):
"""
Constructor
:return:
"""
@staticmethod
def dump_audio(data, filename):
f = open(filename, 'w')
for element in data:
f.printline(element)
f.close()
@staticmethod
def read_audio(filename):
data = []
f = open(filename, 'r')
for line in f:
data.append(float(line))
f.close()
return data
@staticmethod
def dump_psd(filename, frequencies, pxx):
f = open(filename, 'w')
for x in range(0, frequencies.shape[0]):
freq = frequencies[x]
power = pxx[x]
f.write(str(freq) + '\t' + str(power) + "\n")
f.close()
| {
"repo_name": "SidWatch/pySIDWatch",
"path": "Source/SID/Utilities.py",
"copies": "1",
"size": "6245",
"license": "mit",
"hash": -2843440281644529000,
"line_mean": 30.7005076142,
"line_max": 100,
"alpha_frac": 0.5681345076,
"autogenerated": false,
"ratio": 4.092398427260813,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5160532934860813,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Brian'
# My first neural net!!!
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# Consider implementing feature scaling/whitening (scipy.cluster.vq.whiten?)
# Consider implementing PCA whitening
# Consider implementing an autoencoder
# Consider implementing other optimization algorithms besides vanilla gradient descent, such as stochastic gradient descent,
# Adagrad, Adadelta, Adam, Nesterov's accelerated gradient descent, momentum, RMSprop
# Involve learning rate decay?
# Consider implementing dropout and maxout
# Consider implementing other activation functions (any others?)
# Consider implementing k-fold cross-validation and confusion matrix for classification to validate model performance
# Consider implementing a RNN
# Consider implementing a Reinforcement Learning agent
# Consider implementing a genetic algorithm or other evolutionary algorithms
# Consider implementing a Hidden Markov Model
# Consider implementing a SVM
# Consider implementing a SOM
# Consider implementing Attention Mechanisms
# Consider using deep learning frameworks like TensorFlow, Theano, Caffe, Torch, Neon, Keras, etc.
# Consider making a model with SyntaxNet
# Sigmoid function to get "activations" in [0, 1] for nodes in hidden layer:
# g(z) = 1/(1+e^(-z))
def sigmoid(z):
return 1/(1 + np.exp(-z))
# Tanh function to get "activations" in [-1, 1] for nodes in the hidden layer:
# g(z) = 2/(1+e^(-2z)) - 1
def tanh(z):
return 2/(1 + np.exp(-2*z)) - 1
# Computes leaky ReLU ( max(0, z) ) (normal RelU uses alpha = 0)
def relu(z):
alpha = 0.01 # can be modified
if z < 0:
return alpha * z
else:
return z
# Softmax function to get "activations" in [, ] for nodes in the hidden layer:
# P(y=k|x;theta) = e^(thetak*x)/sumK(e^(theta*x)) where k in {1, 2,..., K}
# g(z) = e^z[k]/sum(e^z)
def softmax(z, k):
return np.exp(z[k-1])/np.sum(np.exp(z))
# Softplus function to get "activations" ( "softer" RelU, which is max(0, z) )
# g(z) = log(1+e^z)
# derivative of softplus is simply the sigmoid function
def softplus(z):
return np.log(1 + np.exp(z))
# Derivative of sigmoid function to compute gradient terms in the hidden layer:
# g'(z) = sigmoid(z)*(1-sigmoid(z)) for sigmoid function
def dsigmoid(z):
return np.multiply(sigmoid(z), (1 - sigmoid(z)))
# Derivative of tanh function to compute gradient terms in the hidden layer:
# g'(z) = (1+tanh(z))*(1-tanh(z)) for tanh function
def dtanh(z):
return np.multiply((1 + tanh(z)), (1 - tanh(z)))
# Derivative of ReLU
def drelu(z):
alpha = 0.01
if z < 0:
return alpha
else:
return 1
# Calculate error term of hidden layer:
# # error2 = (theta2.T*error3) .* g'(z2)
def calcErrorTerm(theta, error, z):
return np.multiply((theta[:, 1:].T * error), dtanh(z))
# Calculate the regularized cost function for logistic regression:
# J(theta) = (1/m)*sum(-y*log(h)-(1-y)*log(1-h)) + (lambda/2m)*(sum(theta1^2)+sum(theta2^2))
def calcCostLg(h, y, theta1, theta2):
m = y.shape[0]
cost = 0
cost += np.sum(-np.multiply(y, np.log10(h)) - np.multiply((1 - y), np.log10(1 - h))) \
+ (regLambda/(2*m)) * (np.sum(np.square(theta1)) + np.sum(np.square(theta2)))
return cost
# Calculate the regularized cost function for linear regression:
# J(theta) = (1/2)*(sum(h - y)^2 + lambda*(sum(theta1^2)+sum(theta2^2))
def calcCostLr(h, y, theta1, theta2):
m = y.shape[0]
J = 1/2 * (np.sum(np.square(h - y)) + (regLambda * (np.sum(np.dot(theta1.T, theta1)) + np.sum(np.dot(theta2.T, theta2)))))
return J
"""
Multilayer perceptron
"""
# Train the neural net
def trainPerceptron():
# Read in data
filename = "file"
data = pd.read_csv(filename)
input = data[:, :-4]
y = data[:, -4:]
# Initialize key values
m = input.shape[0]
j1 = input.shape[1] + 1
j2 = 6
j3 = 4
epsilon = 0.13
numLayers = 3
targetCost = 0.0001
cost = 99999999
alpha = 0.01
regLambda = 1
# Initialize weights
theta1 = np.random.rand(j2-1, j1) * (2*epsilon) - epsilon
theta2 = np.random.rand(j3, j2) * (2*epsilon) - epsilon
while (cost >= targetCost):
# for j in range(1000):
# initialize a matrix to store the predictions
h = np.zeros((m, j3))
# initialize a count to accumulate adjustments to the weights
gradient1 = np.zeros((j2, j1+1))
gradient2 = np.zeros((j3, j2+1))
# Determine delta matrix for each layer
for i in range(m):
# Forward propagation
a1 = input[i].T
a1 = np.vstack((np.ones((1, 1)), a1))
z2 = np.dot(theta1, a1b)
a2 = tanh(z2)
a2 = np.vstack((np.ones((1, 1)), a2))
z3 = np.dot(theta2, a2b)
a3 = tanh(z3)
h[i, :] = a3
# Backpropagation
actual = y[i].T
delta3 = a3 - actual
delta2 = calcErrorTerm(theta2, error3, z2)
# Calculate adjustments for weights for this iteration
adjustments1 = np.dot(delta2, a1.T) # careful, bias term doesn't get multiplied through
adjustments2 = np.dot(delta3, a2.T) # careful, bias term doesn't get multiplied through
# Accumulate adjustments
gradient1 += adjustments1
gradient2 += adjustments2
# Adjust weights using regularization
adjustBias = alpha * (gradient1[:, 0] / m)
adjustWeights = alpha * (gradient1[:, 1:] / m + ((regLambda/m) * theta1[:, 1:]))
theta1[:, 0] -= adjustBias
theta1[:, 1:] -= adjustWeights
adjustBias = alpha * (gradient2[:, 0] / m)
adjustWeights = alpha * (gradient2[:, 1:] / m + ((regLambda/m) * theta2[:, 1:]))
theta2[:, 0] -= adjustBias
theta2[:, 1:] -= adjustWeights
cost = calcCostLg(h, y, theta1, theta2)
"""
Convolutional neural network (LeNet)
"""
# It may be a lot easier to learn something like Theano or TensorFlow and use it for functions like convolution and pooling
# Flatten image into a one-dimensional vector to reduce dimensions of tensors by one?
# Does deconvolution actually need to be implemented by dividing the fourier transforms of delta by W then taking the inverse fourier transform?
##-> means that the corresponding operation is run here, likely using a machine learning library
def trainCNN():
images = []
images.append("all images in np.matrix form")
y = ["correct labels"]
alpha = 0.01
regLambda = 1
epsilon = 0.13
channels = 3 # RGB or grayscale
kernelSize = (5, 5) # size of convolution kernel (could be different for various layers depending on image size)
maxPool = (2, 2) # stride of subsampling pool (could be different for various layers, and could be mean or L^p pooling)
imageShape = images[0].shape # dimensions of input images (assume 32x32)
c1 = 4 # number of convolved feature maps in layer 1
s1 = c1 # number of pooled feature maps in layer 1
c2 = 12 # number of convolved feature maps in layer 2
s2 = c2 # number of pooled feature maps in layer 2
n1 = 20 # number of nodes in fully connected layer 1 (there could be more hidden layers)
n2 = 10 # number of nodes in fully connected layer 2 (output layer)
W1 = np.random.rand(c1, 1, kernelSize[0], kernelSize[1], channels) * (2*epsilon) - epsilon # numpy array of convolution kernels connecting input image to c1
b1 = np.random.rand(c1, 1) * (2*epsilon) - epsilon # biases for convolution kernels connecting input image to c1
W2 = np.random.rand(c2, s1, kernelSize[0], kernelSize[1], channels) * (2*epsilon) - epsilon # numpy array of convolution kernels connecting s1 to c2
b2 = np.random.rand(c2, s1) * (2*epsilon) - epsilon # biases for convolution kernels connecting s1 to c2
W3 = np.random.rand(n1, s2, kernelSize[0], kernelSize[1], channels) * (2*epsilon) - epsilon # numpy array of convolution kernels connecting s2 to n1
b3 = np.random.rand(n1, s2) * (2*epsilon) - epsilon # biases for convolution kernels connecting s2 to n1
W4 = np.random.rand(n2, n1) * (2*epsilon) - epsilon # weights connecting n1 to n2
b4 = np.random.rand(n2) * (2*epsilon) - epsilon # weights for n1 bias term
for p in range(len(images)):
# Is there a better way to vectorize all this?
# Reshape dimensions of tensors to be consistent with TensorFlow?
image = images[p] # should be (32, 32, 3)
c1Convolved = np.zeros((c1, imageShape[0]-kernelSize[0]+1, imageShape[1]-kernelSize[1]+1, channels)) # should be (4, 28, 28, 3)
c1Activated = np.zeros(c1Convolved.shape) # should be (4, 28, 28, 3)
c1Pooled = np.zeros((c1Convolved.shape[0], c1Convolved.shape[1]/maxPool[0], c1Convolved.shape[2]/maxPool[1], channels)) # should be (4, 14, 14, 3)
c2Convolved = np.zeros((c2, c1Pooled.shape[0]-kernelSize[0]+1, c1Pooled.shape[1]-kernelSize[1]+1, channels)) # should be (12, 10, 10, 3)
c2Activated = np.zeros(c2Convolved.shape) # should be (12, 10, 10, 3)
c2Pooled = np.zeros((c2Convolved.shape[0], c2Convolved.shape[1]/maxPool[0], c2Convolved.shape[2]/maxPool[1], channels)) # should be (12, 5, 5, 3)
n1Convolved = np.zeros((n1))
n1Activated = np.zeros((n1))
n2Convolved = np.zeros((n2))
n2Activated = np.zeros((n2))
delta1Convolved = np.zeros(c1Convolved.shape) # should be (4, 28, 28, 3)
delta1Pooled = np.zeros(c1Pooled.shape) # should be (4, 14, 14, 3)
delta2Convolved = np.zeros(c2Convolved.shape) # should be (12, 10, 10, 3)
delta2Pooled = np.zeros(c2Pooled.shape) # should be (12, 5, 5, 3)
delta3 = np.zeros(n1)
delta4 = np.zeros(n2)
# initialize an array to store predictions
h = np.zeros((n2))
# Forward propagation layer 1
for i in range(c1):
##-> convolve image with W1[i, 0, :, :, :], add b1[i, 0], and store it in c1Convolved[i, :, :, :]
##-> run activation function on c1Convolved[:, :, :, :] for each pixel and channel, and store it in c1Activated[:, :, :, :]
##-> run max pooling on c1Activated[:, :, :, :] and store it in c1Pooled[:, :, :, :]
# Forward propagation layer 2
for i in range(c2):
for j in range(c1):
##-> convolve c1Pooled[j, :, :, :] with W2[i, j, :, :, :], add b2[i, j], and add it to c2Convolved[i, :, :, :]
## run activation function on c2Convolved[:, :, :, :] for each pixel and channel, and store it in c2Activated[:, :, :, :]
## run max pooling on c2Activated[:, :, :, :] and store it in c2Pooled[:, :, :, :]
# Forward propagation layer 3
for i in range(n1):
for j in range(c2):
##-> convolve c2Pooled[j, :, :, :] with W3[i, j, :, :, :], add b3[i, j], average the channels (yes?), and add the resulting number to n1Convolved[i]
##-> run activation function on n1Convolved and store it in n1Activated
# Forward propagation layer 4
n2Convolved += np.dot(W4, n1Activated)
n2Convolved += b4
##-> run softmax activation function on n2Convolved and store it n2Activated
# Backpropagation layer 4
delta4 = n2Activated - y
# Backpropagation layer 3
delta3 = calcErrorTerm(W4, delta4, n1Convolved) # don't need to factor in b4 to calculating delta3
# Backpropagation layer 2
for i in range(c2):
for j in range(n1):
##-> deconvolve delta3[j] with W3[j, i, :, :, :] and add it to delta2Pooled[i, :, :, :]
# expands shape to that of delta2Pooled, and means error is being distributed through all (3) channels
##-> upsample delta2Pooled[:, :, :, :] and store it in delta2Convolved[:, :, :, :]
##-> multiply element-wise delta2Convolved[:, :, :, :] with the result of running c2Convolved[:, :, :, :]
# through the derivative of the activation function and store it in delta2Convolved[:, :, :, :]
# Backpropagation layer 1
for i in range(c1):
for j in range(c2):
##-> deconvolve delta2Convolved[j, :, :, :] with W2[j, i, :, :, :] and add it to delta1Pooled[i, :, :, :]
# expands shape to that of delta1Pooled, and means error is continuing to be distributed through all (3) channels
##-> upsample delta1Pooled[:, :, :, :] and store it in delta1Convolved[:, :, :, :]
##-> multiply element-wise delta1Convolved[:, :, :, :] with the result of running c1Convolved[:, :, :, :]
# through the derivative of the activation function and store it in delta1Convolved[:, :, :, :]
# Compute gradients for layer 1
for i in range(c1):
##-> convolve image with delta1Convolved[i, :, :, :] and subtract that (times alpha) from W1[i, 0, :, :, :]
##-> average three channels of delta1Convolved[i, :, :, :] and subtract the width and height dimensions from b1[i, 0]
# TODO: Regularization
# Compute gradients for layer 2
for i in range(c2):
for j in range(c1):
##-> convolve c1Pooled[j, :, :, :] with delta2Convolved[i, :, :, :] and subtract that (times alpha) from W2[i, j, :, :, :]
##-> average three channels of delta2Convolved[i, :, :, :] and subtract the width and height dimensions from b2[i, j]
# TODO: Regularization
# Compute gradients for layer 3
for i in range(n1):
for j in range(c2):
##-> convolve c2Pooled[j, :, :, :] with delta3[i] and subtract that (times alpha) from W3[i, j, :, :, :]
##-> subtract delta3[i] from b3[i, j]
# TODO: Regularization
# Compute gradients for layer 4
W4 -= alpha * (np.outer(delta4, n1Activated) + (regLambda * W4)) # is regLambda correct? What about m?
b4 -= delta4
# FIXME: Fix biases, right now their operations don't make sense at all
# Biases should have one vector component for each output node of a given layer
"""
Implement a convolutional neural network in the machine learning Python API TensorFlow.
"""
mnist = input_data.read_data_sets('MNIST_data', one_hot=True) # holds pointer to MNIST data
# Define some functions that make code more concise and modular so we don't have type out
# TensorFlow operations a bunch of times
# Initialize weights in a Variable tensor
def weight(shape):
init = tf.truncated_normal(shape=shape, mean=0.0, stddev=0.1)
return tf.Variable(initial_value=init)
# Initialize biases in a Variable tensor
def bias(shape):
init = tf.constant(value=0.1, shape=shape)
return tf.Variable(initial_value=init)
# Create an Operation for convolution
def convolve(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
# Create an Operation for 2x2 max pooling
def maxpool(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# Build the computational graph in a TensorFlow Session (the context manager)
sess = tf.Session()
# Weights and biases for convolutional layer 1
W_conv1 = weight([5, 5, 1, 32]) # (800 weights)
b_conv1 = weight([32])
# Create a Placeholder tensor for the input data and true output labels
x = tf.placeholder(dtype=tf.float32, shape=[None, 784])
x_image = tf.reshape(x, [-1, 28, 28, 1])
y_label = tf.placeholder(dtype=tf.float32, shape=[None, 10])
# Convolution and pooling Operation for convolutional layer 1
h_conv1 = tf.nn.relu(convolve(x_image, W_conv1) + b_conv1) # 28x28x1 -> 28x28x32
h_pool1 = maxpool(h_conv1) # 28x28x32 -> 14x14x32
# Weights and biases for convolutional layer 2
W_conv2 = weight([5, 5, 32, 64])
b_conv2 = bias([64])
# Convolution and pooling Operation for convolutional layer 2
h_conv2 = tf.nn.relu(convolve(h_pool1, W_conv2) + b_conv2) # 14x14x32 -> 14x14x64
h_pool2 = maxpool(h_conv2) # 14x14x64 -> 7x7x64
# Weights and biases for fully connected layer 1
W_fc1 = weight([7*7*64, 1024])
b_fc1 = bias([1024])
# Activation function for fully connected layer 1
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64]) # 7*7*64 = 3,136 neurons flattened
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) # 3,136 -> 1,024 (3,211,264 weights)
# Implement dropout, TensorFlow takes care of the details in the computational graph
keep_probability = tf.placeholder(dtype=tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_probability)
# Weights and biases for fully connected layer 2
W_fc2 = weight([1024, 10])
b_fc2 = bias([10])
# Predicted output
y_prediction = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2) # 1024 -> 10 (10,240 weights)
# Build out the final steps of the computational graph so the model can be automatically
# trained via backpropagation
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_label * tf.log(y_prediction), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_prediction, 1), tf.argmax(y_label, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Prepare the Session to be run by initializing all Variables
sess.run(tf.initialize_all_variables())
# Train the model
for i in range(20000):
batch = mnist.train.next_batch(50)
# Print train accuracy every 100 iterations
if i % 100 == 0:
train_accuracy = accuracy.eval(session=sess, feed_dict={x: batch[0],
y_label: batch[1],
keep_probability: 1.0})
print("Step %d, training accuracy %g"%(i, train_accuracy))
# Run one epoch of training with dropout set to 50% keep probability
train_step.run(session=sess, feed_dict={x: batch[0],
y_label: batch[1],
keep_probability: 0.5})
# Print test accuracy (TensorFlow automatically partitions train and test data)
print("Test accuracy %g"%accuracy.eval(session=sess, feed_dict={x: mnist.test.images,
y_label: mnist.test.labels,
keep_probability: 1.0}))
| {
"repo_name": "bhwester/neural-network",
"path": "neuralnet.py",
"copies": "1",
"size": "18251",
"license": "mit",
"hash": 1912024338889718000,
"line_mean": 43.4063260341,
"line_max": 164,
"alpha_frac": 0.6410607638,
"autogenerated": false,
"ratio": 3.2486650053399786,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9348555293263489,
"avg_score": 0.008234095175297908,
"num_lines": 411
} |
__author__ = "Brian O'Neill" # BTO
__doc__ = """
Configurable decorator for debugging and profiling that writes
caller name(s), args+values, function return values, execution time,
number of call, to stdout or to a logger. log_calls can track
call history and provide it in CSV format and Pandas DataFrame format.
NOTE: for CPython only -- this uses internals of stack frames
which may well differ in other interpreters.
See docs/log_calls.md for details, usage info and examples.
Argument logging is based on the Python 2 decorator:
https://wiki.python.org/moin/PythonDecoratorLibrary#Easy_Dump_of_Function_Arguments
with changes for Py3 and several enhancements, as described in docs/log_calls.md.
"""
from log_calls.version import __version__
import inspect
import functools
from functools import wraps, partial
import logging
import sys
import os
import io # so we can refer to io.TextIOBase
import time
import datetime
from collections import namedtuple, deque, OrderedDict
# 0.3.0b23
from reprlib import recursive_repr
import fnmatch # 0.3.0 for omit, only
from .deco_settings import (DecoSetting,
DecoSetting_bool, DecoSetting_int, DecoSetting_str,
DecoSettingsMapping)
from .helpers import (no_duplicates, get_args_pos, get_args_kwargs_param_names,
difference_update, restrict_keys,
get_defaulted_kwargs_OD, get_explicit_kwargs_OD,
get_file_of_object,
dict_to_sorted_str, prefix_multiline_str,
is_quoted_str, any_match)
from .proxy_descriptors import ClassInstanceAttrProxy
from .used_unused_kwds import used_unused_keywords
__all__ = ['log_calls', 'CallRecord', '__version__', '__author__']
#-----------------------------------------------------------------------------
# DecoSetting subclasses with pre-call handlers.
# The `context` arg for pre_call_handler methods has these keys:
# decorator
# settings # self._deco_settings (of decorator)
# stats # self._stats (" " )
# prefixed_fname
# fparams
# call_list
# args
# kwargs
# indent
# output_fname # prefixed_fname + possibly num_calls_logged (if log_call_numbers true)
#
# argcount
# argnames # argcount-long
# argvals # argcount-long
# varargs
# varargs_name
# kwargs_name
# defaulted_kwargs
# explicit_kwargs
# implicit_kwargs
#-----------------------------------------------------------------------------
# Note: stats (data) attributes are all r/o (but method clear_history isn't!),
# . so wrapped function can't trash 'em;
# . settings - could pass settings.as_dict();
# . the rest (*_kwargs, call_list) could be mucked with,
# . so we'd have to deepcopy() to prevent that.
# . OR just document that wrapped functions shouldn't write to these values,
# . as they're "live" and altering them could cause confusion/chaos/weirdness/crashes.
class DecoSettingEnabled(DecoSetting_int):
def __init__(self, name, **kwargs):
# v0.3.0b25 Let's try default=True, see what tests break.
# It sucks having the real default be False.
# super().__init__(name, int, False, allow_falsy=True, **kwargs)
super().__init__(name, int, True, allow_falsy=True, **kwargs)
def pre_call_handler(self, context):
return ("%s <== called by %s"
% (context['output_fname'],
' <== '.join(context['call_list'])))
def value_from_str(self, s):
"""Virtual method for use by _deco_base._read_settings_file.
0.2.4.post1"""
try:
return int(s)
except ValueError:
try:
return bool(s)
except ValueError:
return self.default
class DecoSettingArgs(DecoSetting_bool):
def __init__(self, name, **kwargs):
super().__init__(name, bool, True, allow_falsy=True, **kwargs)
@staticmethod
def _get_all_ids_of_instances_in_progress(context, *, skipframes):
in_progress = set()
# First, deal with wrapper/the function it wraps
deco = context['decorator']
if deco.f.__name__ == '__init__' and deco._classname_of_f:
argvals = context['argvals']
if argvals and not inspect.isclass(argvals[0]): # not interested in metaclass __init__
in_progress.add(id(argvals[0]))
frame = sys._getframe(skipframes)
while 1:
funcname = frame.f_code.co_name
if funcname == '<module>':
break
if funcname == '__init__':
# so if it's really an instance __init__,
# eval('self.__init__', frame.f_globals, frame.f_locals)
# is a bound method, so .__func__ will be underlying function
# and .__self__ is the instance it's bound to :)
try:
init_method = eval('self.__init__', frame.f_globals, frame.f_locals)
except Exception as e:
pass
else:
if inspect.ismethod(init_method):
func = init_method.__func__
instance = init_method.__self__
if not inspect.isclass(instance): # not interested in metaclass __init__
in_progress.add(id(instance))
frame = frame.f_back
return in_progress
def pre_call_handler(self, context: dict):
"""Alert:
this class's handler knows the keyword of another handler (args_sep),
# whereas it shouldn't even know its own (it should use self.name)"""
if not context['fparams']:
return None
# Make msg
args_sep = context['settings'].get_final_value(
'args_sep', context['kwargs'], fparams=context['fparams'])
indent = context['indent']
# ~Kludge / incomplete treatment of seps that contain \n
end_args_line = ''
if args_sep[-1] == '\n':
args_sep = '\n' + (indent * 2)
end_args_line = args_sep
msg = indent + "arguments: " + end_args_line
# Two convenience functions
def map_to_arg_eq_val_strs(pairs):
return map(lambda pair: '%s=%r' % pair, pairs)
def map_to_arg_eq_val_strs_safe(pairs):
"""
:param pairs: sequence of (arg, val) pairs
:return: list of strings `arg=val_str` where val_str is
object.__repr__(val) if val is an instance currently being constructed,
repr(val) otherwise
"""
# Get all active instances whose __init__s are on call stack
# caller-of-caller-of-caller's frame
# caller is pre_call_handler, called by wrapper;
# we want to start with caller of wrapper, so skipframes=4
ids_objs_in_progress = self._get_all_ids_of_instances_in_progress(context, skipframes=4)
arg_eq_val_strs = []
for pair in pairs:
arg, val = pair
if id(val) in ids_objs_in_progress:
arg_eq_val_str = '%s=%s' % (arg, object.__repr__(val))
else:
#### Note, the '%r' can trigger indiectly recursive __repr__ calls,
#### . which is why we use now reprlib.recursive_repr (v0.3.0b23) :
arg_eq_val_str = '%s=%r' % pair
arg_eq_val_strs.append(arg_eq_val_str)
return arg_eq_val_strs
args_vals = list(zip(context['argnames'], context['argvals']))
if context['varargs']:
args_vals.append( ("*%s" % context['varargs_name'], context['varargs']) )
args_vals.extend( context['explicit_kwargs'].items() )
if context['implicit_kwargs']:
args_vals.append( ("**%s" % context['kwargs_name'], context['implicit_kwargs']) )
if args_vals:
msg += args_sep.join(
map_to_arg_eq_val_strs_safe(args_vals))
else:
msg += "<none>"
# The defaulted kwargs are kw args in self.f_params which
# are NOT in implicit_kwargs, and their vals are defaults
# of those parameters. Write these on a separate line.
# Don't just print the OrderedDict -- cluttered appearance.
if context['defaulted_kwargs']:
msg += ('\n' + indent + "defaults: " + end_args_line
+ args_sep.join(
map_to_arg_eq_val_strs(context['defaulted_kwargs'].items()))
)
return msg
#-----------------------------------------------------------------------------
# DecoSetting subclasses with post-call handlers.
# The `context` for post_call_handler methods has these additional keys:
# elapsed_secs
# process_secs
# timestamp
# retval
#-----------------------------------------------------------------------------
class DecoSettingRetval(DecoSetting_bool):
MAXLEN_RETVALS = 77
def __init__(self, name, **kwargs):
super().__init__(name, bool, False, allow_falsy=True, **kwargs)
def post_call_handler(self, context: dict):
retval_str = str(context['retval'])
if len(retval_str) > self.MAXLEN_RETVALS:
retval_str = retval_str[:self.MAXLEN_RETVALS] + "..."
return (context['indent'] +
"%s return value: %s" % (context['output_fname'], retval_str))
class DecoSettingElapsed(DecoSetting_bool):
def __init__(self, name, **kwargs):
super().__init__(name, bool, False, allow_falsy=True, **kwargs)
def post_call_handler(self, context: dict):
return (context['indent'] +
"elapsed time: %f [secs], process time: %f [secs]"
% (context['elapsed_secs'], context['process_secs']))
class DecoSettingExit(DecoSetting_bool):
def __init__(self, name, **kwargs):
super().__init__(name, bool, True, allow_falsy=True, **kwargs)
def post_call_handler(self, context: dict):
return ("%s ==> returning to %s"
% (context['output_fname'],
' ==> '.join(context['call_list'])))
class DecoSettingHistory(DecoSetting_bool):
def __init__(self, name, **kwargs):
super().__init__(name, bool, False, allow_falsy=True, **kwargs)
def post_call_handler(self, context: dict):
context['decorator']._add_to_history(
context['argnames'],
context['argvals'],
context['varargs'],
context['explicit_kwargs'],
context['defaulted_kwargs'],
context['implicit_kwargs'],
context['retval'],
elapsed_secs=context['elapsed_secs'],
process_secs=context['process_secs'],
timestamp_secs=context['timestamp'],
prefixed_func_name=context['prefixed_fname'],
caller_chain=context['call_list']
)
return None
#-----------------------------------------------------------------------------
# DecoSetting subclasses overriding value_from_str and has_acceptable_type
#-----------------------------------------------------------------------------
class DecoSettingFile(DecoSetting):
def value_from_str(self, s):
"""Virtual method for use by _deco_base._read_settings_file.
0.2.4.post1"""
if s == 'sys.stderr':
### print("DecoSettingFile.value_from_str, s=%s, returning %r (sys.stderr?)" % (s, sys.stderr))
return sys.stderr
# 'sys.stdout' ultimately becomes None via this:
return super().value_from_str(s)
def has_acceptable_type(self, value):
"""Accommodate IPython, whose sys.stderr is of type IPython.kernel.zmq.iostream.OutStream.
"""
if not value:
return False
if super().has_acceptable_type(value):
return True
# Hmmm ok maybe we're running under IPython:
try:
import IPython
return isinstance(value, IPython.kernel.zmq.iostream.OutStream)
except ImportError:
return False
class DecoSettingLogger(DecoSetting):
def value_from_str(self, s):
"""Virtual method for use by _deco_base._read_settings_file.
s is the name of a logger, enclosed in quotes, or something bad.
0.2.4.post1"""
if is_quoted_str(s):
return s[1:-1]
return super().value_from_str(s)
#-----------------------------------------------------------------------------
# CallRecord namedtuple, for history
#-----------------------------------------------------------------------------
CallRecord = namedtuple(
"CallRecord",
(
'call_num',
'argnames', 'argvals',
'varargs',
'explicit_kwargs', 'defaulted_kwargs', 'implicit_kwargs',
'retval',
'elapsed_secs', 'process_secs',
'timestamp',
'prefixed_func_name',
# caller_chain: list of fn names, possibly "prefixed".
# From most-recent (immediate caller) to least-recent if len > 1.
'caller_chain',
)
)
#-----------------------------------------------------------------------------
# useful lil lookup tables
#-----------------------------------------------------------------------------
# OrderedDicts instead of dicts for the sake of
# tests of <cls>.log_calls_omit, <cls>.log_calls_only
PROPERTY_USER_SUFFIXES_to_ATTRS = OrderedDict(
(('getter', 'fget'),
('setter', 'fset'),
('deleter', 'fdel')) )
# Keys: attributes of properties;
# Vals: what users can suffix prop names with in omit & only lists
PROPERTY_ATTRS_to_USER_SUFFIXES = OrderedDict(
(('fget', 'getter'),
('fset', 'setter'),
('fdel', 'deleter')) )
# Name of *** local variable *** of _deco_base_f_wrapper_,
# accessed by callstack-chaseback routine and (0.3.0) _get_own_deco_wrapper
STACKFRAME_HACK_DICT_NAME = '_deco_base__active_call_items__'
#-----------------------------------------------------------------------------
# _get_underlying_function
#-----------------------------------------------------------------------------
def _get_underlying_function(item, actual_item):
"""Factors out some code used 2x, in _get_deco_wrapper and in _deco_base._class__call__
For some class cls, and some name,
:param item: vars(cls)[name] == cls.__dict__[name]
:param actual_item: getattr(cls, name)
:return: func, as per body
"""
func = None
if type(item) == staticmethod:
func = actual_item # == item.__func__
elif type(item) == classmethod:
func = actual_item.__func__ # == item.__func__
elif inspect.isfunction(item):
func = actual_item # == item
return func
#-----------------------------------------------------------------------------
# _get_deco_wrapper kls |-->
# _get_own_deco_wrapper
# _get_own_deco_obj New 0.3.1
# _get_own_deco_wrapper_and_obj New 0.3.1
#
# The method deco_base._add_class_attrs sets attributes # on a deco'd class
# 'get_log_calls_wrapper', 'get_own_record_history_wrapper',
# whose values are e.g.
# staticmethod(partial(_get_deco_wrapper, deco_class))
#-----------------------------------------------------------------------------
def _get_own_deco_wrapper(deco_class, _extra_frames=0) -> 'function':
wrapper, _ = _get_own_deco_wrapper_and_obj(deco_class, extra_frames=(1 + _extra_frames))
return wrapper
def _get_own_deco_obj(deco_class, _extra_frames=0) -> 'deco_class obj':
_, deco_obj = _get_own_deco_wrapper_and_obj(deco_class, extra_frames=(1 + _extra_frames))
return deco_obj
def _get_own_deco_wrapper_and_obj(deco_class, extra_frames=0) -> ('function', 'deco_class obj'):
"""Return deco wrapper of caller of ... of caller,
[1+extra_frames many levels up the stack frame]
IFF
caller of ... of caller is deco'd.
: return
v 0.3.1, omitted last arg `cls` (unused); made exposed method *staticmethod* not classmethod
Raises AttributeError on error -- if any of many redundant consistency checks fail.
"""
# Error messages. We append a code to better determine cause of error.
ERR_NOT_DECORATED = "'%s' is not decorated [%d]"
ERR_BYPASSED_OR_NOT_DECORATED = "'%s' is true-bypassed (enabled < 0) or not decorated [%d]"
ERR_INCONSISTENT_DECO = "inconsistent %s decorator object for '%s' [%d]"
# caller is function whose wrapper we want
# ITs caller should be the wrapper
func_frame = sys._getframe(1 + extra_frames) # v0.3.1, was (1)
code = func_frame.f_code
funcname = code.co_name
wrapper_frame = func_frame.f_back
wrapper_funcname = wrapper_frame.f_code.co_name
# wrapper_funcname should be '_deco_base_f_wrapper_'
if wrapper_funcname != '_deco_base_f_wrapper_':
raise AttributeError(ERR_NOT_DECORATED % (funcname, 1))
# look in its f_locals :) [stackframe hack] for STACKFRAME_HACK_DICT_NAME
hack_dict = wrapper_frame.f_locals.get(STACKFRAME_HACK_DICT_NAME, None)
if not hack_dict:
raise AttributeError(ERR_BYPASSED_OR_NOT_DECORATED % (funcname, 2))
# value for key '_wrapper_deco' is the deco object
try:
deco_obj = hack_dict['_wrapper_deco']
except (TypeError, KeyError):
deco_obj = None
if not (deco_obj and type(deco_obj) == deco_class):
raise AttributeError(ERR_NOT_DECORATED % (funcname, 3))
# we've almost surely found a true wrapper
try:
wrapped_f = deco_obj.f
except AttributeError:
# Come here e.g. if deco_obj is None
raise AttributeError(ERR_INCONSISTENT_DECO % (deco_class.__name__, funcname, 4))
# more consistency checks:
# wrapped_f nonempty and has same name and identical code to our function
if not wrapped_f:
raise AttributeError(ERR_INCONSISTENT_DECO % (deco_class.__name__, funcname, 5))
if not (funcname == wrapped_f.__name__ and
wrapped_f.__code__ is code):
raise AttributeError(ERR_INCONSISTENT_DECO % (deco_class.__name__, funcname, 6))
# access its attr deco_obj._sentinels['WRAPPER_FN_OBJ'] --
# THAT, at long last, is (alllmost surely) the wrapper
wrapper = getattr(wrapped_f, deco_obj._sentinels['WRAPPER_FN_OBJ'], None)
# if wrapper is None then getattr returns None, so != deco_obj
if deco_obj != getattr(wrapper, deco_obj._sentinels['DECO_OF'], None):
raise AttributeError(ERR_INCONSISTENT_DECO % (deco_class.__name__, funcname, 7))
return wrapper, deco_obj
#-----------------------------------------------------------------------------
# _get_deco_wrapper
# classmethod(partial(_get_deco_wrapper, deco_class))
# added as attribute '<deco_name>_wrapper' to decorated classes,
# so that they can easily access the added attributes
# of methods and properties.
# <deco_name> = 'log_calls', 'record_history', ...
#-----------------------------------------------------------------------------
# @used_unused_keywords()
def _get_deco_wrapper(deco_class, cls, fname: str) -> "function":
"""
deco_class: log_calls, record_history, ...
cls is (supposed to be) a decorated class.
fname is name of a method (instance, static or class),
or name of a property (and then, we return the getter),
or name of a property + '.getter' or + '.setter' or + '.deleter'
Note: if a property is defined using the `property` constructor
as in
x = property(getx, setx, delx)
where getx, setx, delx are methods of a class (or None),
then e.g. setx can be accessed via either e.g.
x.log_calls_wrapper('setx')
or
x.log_calls_wrapper('x.setter')
where x is a decorated class or an instance thereof.
No need for qualnames. If A is decorated and has an inner class I,
then I is decorated too, so use A.I.log_calls_wrapper(fname)
Return wrapper if fname is decorated, None if it isn't;
raise exception if fname is crazy or doesn't exist in cls or etc.
Raise ValueError or TypeError on error:
ValueError
Raised when a built-in operation or function receives an argument
that has the right type but an inappropriate value, and the situation
is not described by a more precise exception such as IndexError.
"""
sentinel = deco_class._sentinels['DECO_OF']
if not isinstance(fname, str):
raise TypeError("expecting str for argument 'fname', got %r of type %s"
% (fname, type(fname).__name__))
parts = fname.split('.')
if len(parts) > 2:
raise ValueError("no such method specifier '%s'" % fname)
prop_suffix = None
if len(parts) == 2:
fname, prop_suffix = parts
if not (fname and prop_suffix):
raise ValueError("bad method specifier '%s.%s'"
% (fname, prop_suffix))
cls_dict = cls.__dict__ # = vars(cls) but faster
if fname not in cls_dict:
raise ValueError("class '%s' has no such attribute as '%s'"
% (cls.__name__, fname))
item = cls_dict[fname]
# Guard against '.getter' etc appended to non-properties,
# unknown things appended to property names
# surely these deserves complaints (exceptions)
if prop_suffix:
if type(item) != property:
raise ValueError("%s.%s -- '%s' is not a property of class '%s'"
% (fname, prop_suffix, fname, cls.__name__))
if prop_suffix not in PROPERTY_USER_SUFFIXES_to_ATTRS:
raise ValueError("%s.%s -- unknown qualifier '%s'"
% (fname, prop_suffix, prop_suffix))
actual_item = getattr(cls, fname)
func = _get_underlying_function(item, actual_item)
if func:
# func is an instance-, class- or static-method
# Return func if it's a deco wrapper, else return None
return func if getattr(func, sentinel, None) else None
# not func: item isn't any kind of method.
# If it's not a property either, raise error
if type(item) != property:
raise TypeError("item '%s' of class '%s' is of type '%s' and can't be decorated"
% (fname, cls.__name__, type(item).__name__))
# item is a property
if not prop_suffix:
# unqualified property name ==> we assume the user means the 'getter'
prop_suffix = 'getter'
func = getattr(item, PROPERTY_USER_SUFFIXES_to_ATTRS[prop_suffix])
if func:
return func if getattr(func, sentinel, None) else None
else:
# property has no such attribute (no 'setter', for example)
raise ValueError("property '%s' has no '%s' in class '%s'"
% (fname, prop_suffix, cls.__name__))
#-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
# _deco_base
# Fat base class for log_calls and record_history decorators
#-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
class _deco_base():
"""
Base class for decorators that records history and optionally write to
the console or logger by supplying their own settings (DecoSetting subclasses)
with pre_call_handler and post_call_handler methods.
The wrapper of the wrapped function collects a lot of information,
saved in a dict `context`, which is passed to the handlers.
This and derived decorators take various keyword arguments, same as settings keys.
Every parameter except prefix and max_history can take two kinds of values,
direct and indirect. Briefly, if the value of any of those parameters
is a string that ends in in '=', then it's treated as the name of a keyword
arg of the wrapped function, and its value when that function is called is
the final, indirect value of the decorator's parameter (for that call).
See deco_settings.py docstring for details.
Settings/keyword params to __init__ that this base class knows about,
and uses in __call__ (in wrapper for wrapped function): ... see docs.
"""
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# constants for the `mute` setting
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class MUTE():
assert False == 0 and True == 1 # a friendly reminder
NOTHING = False # (default -- all output produced)
CALLS = True # (mute output from decorated functions & methods & properties,
# but log_calls.print() and log_calls.print_exprs()
# (log_message and thus log_exprs) produce output;
# call # recording, history recording continue if enabled)
ALL = 2 # (no output at all; but call # recording, history recording continue if enabled)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# sentinels, for identifying functions on the calls stack
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
_sentinels_proto = {
'SENTINEL_ATTR': '_$_%s_sentinel_', # name of attr
'SENTINEL_VAR': "_$_%s-deco'd",
'WRAPPER_FN_OBJ': '_$_f_%s_wrapper_-BACKPTR', # LATE ADDITION
'DECO_OF': '_$_f_%s_wrapper_-or-cls-DECO' # value = self (0.3.0)
}
_version = __version__
@classmethod
def version(cls):
return cls._version
@classmethod
def _set_class_sentinels(cls):
""" 'virtual', called from __init__
"""
sentinels = cls._sentinels_proto.copy()
for sk in sentinels:
sentinels[sk] = sentinels[sk] % cls.__name__
return sentinels
# placeholder! _set_class_sentinels called from __init__
_sentinels = None
INDENT = 4 # number of spaces to __ by at a time
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# # *** DecoSettingsMapping "API" --
# # (1) initialize: Subclasses must call register_class_settings
# # with a sequence of DecoSetting objs containing at least these:
# # (Yes it's an odd lot of required DecoSetting objs)
#
# _setting_info_list = (
# DecoSettingEnabled('enabled'),
# DecoSetting_bool( 'indent', bool, False, allow_falsy=True),
# DecoSetting_bool( 'log_call_numbers', bool, False, allow_falsy=True),
# DecoSetting_str( 'prefix', str, '', allow_falsy=True, allow_indirect=False)
# # and (example args; record_history adds visible=False):
# DecoSetting( 'mute', int, False, allow_falsy=True, allow_indirect=False)
# )
# DecoSettingsMapping.register_class_settings('_deco_base',
# _setting_info_list)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# call history and stats stuff
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
_data_descriptor_names = (
'num_calls_logged',
'num_calls_total',
'elapsed_secs_logged',
'process_secs_logged',
'history',
'history_as_csv',
'history_as_DataFrame',
)
_method_descriptor_names = (
'clear_history',
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# virtual classmethods
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@classmethod
def get_logging_fn(cls, _get_final_value_fn):
return print
# 0.3.0
@classmethod
def allow_repr(cls) -> bool:
"""Subclass must say yay or nay"""
raise NotImplementedError
# 0.3.0
@classmethod
def fixup_for_init(cls, some_settings: dict):
"""Default: do nothing"""
return
# 0.3.0
@classmethod
def global_mute(cls) -> bool:
"""Default: False (never globally muted)"""
return False
#----------------------------------------------------------------
# history stuff
#----------------------------------------------------------------
# A few generic properties, internal logging, and exposed
# as descriptors on the stats (ClassInstanceAttrProxy) obj
@property
def num_calls_logged(self):
return self._num_calls_logged
@property
def num_calls_total(self):
"""All calls, logged and not logged"""
return self._num_calls_total
@property
def elapsed_secs_logged(self):
# This value is accumulated for logged calls
# whether or not history is being recorded.
return self._elapsed_secs_logged
@property
def process_secs_logged(self):
# This value is accumulated for logged calls
# whether or not history is being recorded.
return self._process_secs_logged
@property
def history(self):
return tuple(self._call_history)
@property
def history_as_csv(self):
"""
Headings (columns) are:
call_num
each-arg *
varargs (str)
implicit_kwargs (str)
retval (repr?)
elapsed_secs
process_secs
timestamp (format somehow? what is it anyway)
function (it's a name/str)
"""
csv_sep = '|'
all_args = list(self.f_params)
varargs_name, kwargs_name = get_args_kwargs_param_names(self.f_params)
csv = ''
# Write column headings line (append to csv str)
fields = ['call_num']
fields.extend(all_args)
fields.extend(['retval', 'elapsed_secs', 'process_secs', 'timestamp', 'prefixed_fname', 'caller_chain'])
# 0.2.1 - use str not repr, get rid of quotes around column names
csv = csv_sep.join(map(str, fields))
csv += '\n'
# Write data lines
for rec in self._call_history:
fields = [str(rec.call_num)]
# Do arg vals.
# make dict of ALL args/vals
all_args_vals_dict = {a: repr(v) for (a, v) in zip(rec.argnames, rec.argvals)}
all_args_vals_dict.update(
{a: repr(v) for (a, v) in rec.explicit_kwargs.items()}
)
all_args_vals_dict.update(
{a: repr(v) for (a, v) in rec.defaulted_kwargs.items()}
)
for arg in all_args:
if arg == varargs_name:
fields.append(str(rec.varargs))
elif arg == kwargs_name:
fields.append(dict_to_sorted_str(rec.implicit_kwargs)) # str(rec.implicit_kwargs)
else:
fields.append(all_args_vals_dict[arg])
# and now the remaining fields
fields.append(repr(rec.retval))
fields.append(str(rec.elapsed_secs))
fields.append(str(rec.process_secs))
fields.append(rec.timestamp) # it already IS a formatted str
fields.append(repr(rec.prefixed_func_name))
fields.append(repr(rec.caller_chain))
csv += csv_sep.join(fields)
csv += '\n'
return csv
@property
def history_as_DataFrame(self):
try:
import pandas as pd
except ImportError:
return None
import io
df = pd.DataFrame.from_csv(io.StringIO(self.history_as_csv),
sep='|',
infer_datetime_format=True)
return df
def _make_call_history(self):
return deque(maxlen=(self.max_history if self.max_history > 0 else None))
def clear_history(self, max_history=0):
"""Using clear_history it's possible to change max_history"""
self._num_calls_logged = 0
self._num_calls_total = 0
self._elapsed_secs_logged = 0.0
self._process_secs_logged = 0.0
self.max_history = int(max_history) # set before calling _make_call_history
self._call_history = self._make_call_history()
self._settings_mapping.__setitem__('max_history', max_history, _force_mutable=True)
def _add_call(self, *, logged):
self._num_calls_total += 1
if logged:
self._num_calls_logged += 1
def _add_to_elapsed(self, elapsed_secs, process_secs):
self._elapsed_secs_logged += elapsed_secs
self._process_secs_logged += process_secs
def _add_to_history(self,
argnames, argvals,
varargs,
explicit_kwargs, defaulted_kwargs, implicit_kwargs,
retval,
elapsed_secs, process_secs,
timestamp_secs,
prefixed_func_name,
caller_chain
):
"""Only called for *logged* calls, with record_history true.
Call counters are already bumped."""
# Convert timestamp_secs to datetime
timestamp = datetime.datetime.fromtimestamp(timestamp_secs).\
strftime('%x %X.%f') # or '%Y-%m-%d %I:%M:%S.%f %p'
## 0.2.3+ len(argnames) == len(argvals)
## assert len(argnames) == len(argvals)
# n = min(len(argnames), len(argvals))
# argnames = argnames[:n]
# argvals = argvals[:n]
self._call_history.append(
CallRecord(
self._num_calls_logged,
argnames, argvals,
varargs,
explicit_kwargs, defaulted_kwargs, implicit_kwargs,
retval,
elapsed_secs, process_secs,
timestamp,
prefixed_func_name=prefixed_func_name,
caller_chain=caller_chain)
)
#----------------------------------------------------------------
# log_* output methods
#----------------------------------------------------------------
# 0.3.0
LoggingState = namedtuple("LoggingState",
('logging_fn',
'indent_len',
'output_fname',
'mute'))
# 0.3.0
def _enabled_state_push(self, enabled):
self._enabled_stack.append(enabled)
# 0.3.0
def _enabled_state_pop(self):
self._enabled_stack.pop()
def _logging_state_push(self, logging_fn, global_indent_len, output_fname, mute):
self.logging_state_stack.append(
self.LoggingState(logging_fn, global_indent_len, output_fname, mute)
)
def _logging_state_pop(self, enabled_too=False):
self.logging_state_stack.pop()
if enabled_too:
self._enabled_state_pop()
#----------------------------------------------------------------
# `log_calls`-aware debug-message writers
#----------------------------------------------------------------
def _log_exprs(self, *exprs,
sep=', ',
extra_indent_level=1,
prefix_with_name=False,
prefix='',
suffix='',
_extra_frames=0):
"""Evaluates each expression (str) in exprs in the context of the caller;
makes string from each, expr = val,
pass those strs to _log_message.
:param exprs: exprs to evaluate and log with value
:param sep: default ', '
:param extra_indent_level: as for _log_message
:param prefix_with_name: as for _log_message
:param prefix: additional text to prepend to output message
:param suffix: additional text to append to output message (0.3.1)
"""
if not exprs:
return
msgs = []
caller_frame = sys._getframe(1 + _extra_frames)
for expr in exprs:
try:
val = eval(expr, caller_frame.f_globals, caller_frame.f_locals)
except Exception as e: # (SyntaxError, NameError, IndexError, ...)
val = '<** ' + str(e) + ' **>'
msgs.append('%s = %r' % (expr, val))
self._log_message(*msgs,
sep=sep,
extra_indent_level=extra_indent_level,
prefix_with_name=prefix_with_name,
_prefix=prefix,
_suffix=suffix)
def _log_message(self, *msgs,
sep=' ',
extra_indent_level=1,
prefix_with_name=False,
_prefix='',
_suffix=''):
"""Signature much like that of print, such is the intent.
"log" one or more "messages", which can be anything - a string,
an int, object with __str__ method... all get str()'d.
sep: what to separate the messages with
extra_indent_level: self.INDENT * this number is
an offset from the (absolute) column in which
the entry/exit messages for the function are written.
I.e. an offset from the visual frame of log_calls output,
in increments of 4 (cols) from its left margin.
log_calls itself explicitly provides extra_indent_level=0.
The given default value, extra_indent_level=1, is what users
*other* than log_calls itself want: this aligns the message(s)
with the "arguments:" part of log_calls output, rather than
with the function entry/exit messages.
Negative values of extra_indent_level have their place:
me.log_message("*** An important message", extra_indent_level=-1)
me.log_message("An ordinary message").
prefix_with_name: bool. If True, prepend
self._output_fname[-1] + ": "
to the message ultimately written.
self._output_fname[-1] is the function's possibly prefixed name,
+ possibly [its call #]
_prefix: (for log_exprs, callers of log_message won't need to use it)
additional text to prepend to output message
_suffix: (0.3.1) (for log_exprs, callers of log_message won't need to use it)
additional text to append to output message
"""
if not msgs:
return
# do nothing unless enabled! if disabled, the other 'stack' accesses will blow up
if self._enabled_stack[-1] <= 0: # disabled
return
# 0.3.0
logging_state = self.logging_state_stack[-1]
# Write nothing if output is stifled (caller is NOT _deco_base_f_wrapper_)
# NOTE: only check global_mute() IN REALTIME, like so:
mute = max(logging_state.mute, self.global_mute())
if mute == self.MUTE.ALL:
return
# adjust for calls not being logged -- don't indent an extra level
# (no 'log_calls frame', no 'arguments:' to align with),
# and prefix with display name cuz there's no log_calls "frame"
# NOTE, In this case we force "prefix_with_name = True" <<<
####if mute == self.MUTE.CALLS:
if mute >= self.log_message_auto_prefix_threshold():
extra_indent_level -= 1
prefix_with_name = True
indent_len = (logging_state.indent_len
+ (extra_indent_level * self.INDENT)
)
if indent_len < 0:
indent_len = 0 # clamp
the_msg = sep.join(map(str, msgs))
if prefix_with_name:
the_msg = logging_state.output_fname + ': ' + the_msg
assert isinstance(_prefix, str) and isinstance(_suffix, str)
the_msg = _prefix + the_msg + _suffix
logging_state.logging_fn(prefix_multiline_str(' ' * indent_len, the_msg))
#---------------------------------
# 3.1 new:
# log_calls.print(....)
# log_calls.print_exprs(....)
#---------------------------------
# Allows you to keep calls to log_calls.print*
# in production code (enclosing callable isn't decorated)-- it does nothing
# except waste a few cycles each call -- as opposed to having to delete
# the call or comment it out.
print_methods_raise_if_no_deco = False
@classmethod
def print(cls, *msgs,
sep=' ',
extra_indent_level=1,
prefix_with_name=False,
_prefix=''):
"""
:param msgs:
:param sep:
:param extra_indent_level:
:param prefix_with_name:
:param _prefix:
:return:
"""
# Get the associated deco_obj, as we need to call
# ITS instance method _log_message.
# cls is "deco_class" (`log_calls`, `record_history`)
try:
deco_obj = _get_own_deco_obj(cls, _extra_frames=1)
except:
if cls.print_methods_raise_if_no_deco: raise # default False/don't
else: return
deco_obj._log_message(*msgs,
sep=sep,
extra_indent_level=extra_indent_level,
prefix_with_name=prefix_with_name,
_prefix=_prefix)
@classmethod
def print_exprs(cls, *exprs,
sep=', ',
extra_indent_level=1,
prefix_with_name=False,
prefix='',
suffix=''):
# Get the associated deco_obj, as we need to call
# ITS instance method _log_exprs.
# cls is "deco_class" (`log_calls`, `record_history`)
try:
deco_obj = _get_own_deco_obj(cls, _extra_frames=1)
except:
if cls.print_methods_raise_if_no_deco: raise
else: return
deco_obj._log_exprs(*exprs,
sep=sep,
extra_indent_level=extra_indent_level,
prefix_with_name=prefix_with_name,
prefix=prefix,
suffix=suffix,
_extra_frames=1)
#----------------------------------------------------------------
# settings
#----------------------------------------------------------------
# v0.3.0b24
@classmethod
def get_factory_defaults_OD(cls) -> OrderedDict:
return DecoSettingsMapping.get_factory_defaults_OD(cls.__name__)
# v0.3.0b24
@classmethod
def get_defaults_OD(cls) -> OrderedDict:
return DecoSettingsMapping.get_defaults_OD(cls.__name__)
@classmethod
def reset_defaults(cls):
DecoSettingsMapping.reset_defaults(cls.__name__)
@classmethod
def _get_settings_dict(cls, *,
settings=None,
deco_settings_keys=None,
extra_settings_dict=None
) -> dict:
"""Get settings from dict or read settings from file, if given, as a dict;
update that dict with any settings from extra_settings_dict, and return the result.
:param settings: dict, or str as for _read_settings_file (or None)
:param deco_settings_keys: seq or set of keys naming settings for this deco class `cls`
:param extra_settings_dict: more settings, restricted to deco_settings_keys (any others ignored)
:return:
"""
if not deco_settings_keys:
deco_settings_keys = set(DecoSettingsMapping.get_deco_class_settings_dict(cls.__name__))
settings_dict = {}
if isinstance(settings, dict):
settings_dict = restrict_keys(settings, deco_settings_keys)
elif isinstance(settings, str):
settings_dict = cls._read_settings_file(settings_path=settings)
if extra_settings_dict:
settings_dict.update(extra_settings_dict)
return settings_dict
@classmethod
def set_defaults(cls, settings=None, ** more_defaults):
"""
:param settings: a dict,
or a str specifying a "settings file"
such as _read_settings_file accepts (its `settings_path` parameter):
* a directory name (dir containing settings file '.' + self.__class__.__name__),
* or a path to a (text) settings file
:param more_defaults: keyword params where every key is a "setting".
These override any default settings provided by `settings`
"""
d = cls._get_settings_dict(settings=settings,
extra_settings_dict=more_defaults)
DecoSettingsMapping.set_defaults(cls.__name__, d)
@classmethod
def _read_settings_file(cls, settings_path=''):
"""If settings_path names a file that exists,
load settings from that file.
If settings_path names a directory, load settings from
settings_path + '.' + cls.__name__
e.g. the file '.log_calls' in directory specified by settings_path.
If not settings_path or it doesn't exist, return {}.
Format of settings file - zero or more lines of the form:
setting_name=setting_value
with possible whitespace around *_name.
Blank lines are ok & ignored; lines whose first non-whitespace char is '#'
are treated as comments & ignored.
v0.3.0 -- special-case handling for pseudo-setting `NO_DECO`
"""
if not settings_path:
return {}
if os.path.isdir(settings_path):
settings_path = os.path.join(settings_path, '.' + cls.__name__)
if not os.path.isfile(settings_path):
return {}
d = {} # returned
try:
with open(settings_path) as f:
lines = f.readlines()
except BaseException: # FileNotFoundError?!
return d
settings_dict = DecoSettingsMapping.get_deco_class_settings_dict(cls.__name__)
for line in lines:
line = line.strip()
# Allow blank lines & comments
if not line or line[0] == '#':
continue
try:
setting, val_txt = line.split('=', 1) # only split at first '='
except ValueError:
# fail silently. (Or, TODO: report error? ill-formed line)
continue # bad line
setting = setting.strip()
val_txt = val_txt.strip()
if setting not in settings_dict or not val_txt:
# fail silently. (Or, TODO: report error? ill-formed line)
continue
# special case: None
if val_txt == 'None':
if settings_dict[setting].allow_falsy:
d[setting] = None
continue
# If val_txt is enclosed in quotes (single or double)
# and ends in '=' (indirect value) then let val = val_txt;
# otherwise, defer to settings_dict[setting].value_from_str
is_indirect = (is_quoted_str(val_txt) and
len(val_txt) >= 3 and
val_txt[-2] == '=')
if is_indirect:
val = val_txt[1:-1] # remove quotes
else:
try:
val = settings_dict[setting].value_from_str(val_txt)
except ValueError as e:
# fail silently. (Or, TODO: report error? bad value)
continue # bad line
d[setting] = val
return d
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# __init__, __call__
# & helpers
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def __init__(self,
settings=None,
_omit=(), # 0.3.0 class deco'ing: str or seq - omit these methods/proper; not a setting
_only=(), # 0.3.0 class deco'ing: str or seq - deco only these (sans any in omit); not a setting
_name_param=None, # 0.3.0 name or oldstyle fmt str for f_display_name of fn; not a setting
_override=False, # 0.3.0b18: new settings override existing ones. NOT a "setting"
_used_keywords_dict={}, # 0.2.4 new parameter, but NOT a "setting"
enabled=True,
log_call_numbers=False,
indent=True, # 0.3.0 changed default
prefix='',
mute=False,
** other_values_dict):
"""(See class docstring)
_used_keywords_dict: passed by subclass via super().__init__:
the *explicit* keyword args of subclass that the user actually passed,
not ones that are implicit keyword args,
and not ones that the user did not pass and which have default values.
(It's default value is mutable, but we don't change it.)
"""
#--------------------------------------------------------------------
# 0.2.4 `settings` stuff, rejiggered in 0.3.0
# Set/save self._changed_settings =
# `settings` (param -- dict or file)
# updated with actual keyword parameters supplied to deco call
# set/save self._effective_settings -
# static defaults (for self.__class__.__name__)
# updated with self._changed_settings
#--------------------------------------------------------------------
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Initialize effective_settings_dict with log_calls's defaults - the static ones:
# self.__class__.__name__ is name *of subclass*, clsname,
# which we trust has already called
# DecoSettingsMapping.register_class_settings(clsname, list-of-deco-setting-objs)
# Special-case handling of 'enabled' (ugh, eh), whose DecoSetting obj
# has .default = False, for "technical" reasons
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
deco_settings_map = DecoSettingsMapping.get_deco_class_settings_dict(self.__class__.__name__)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Get settings from dict | read settings from file, if given, as a dict
# Update with _used_keywords_dict, save as self._changed_settings,
# so that these can be reapplied by any outer class deco --
# (a copy of a class deco's _effective_settings is updated with these
# in class case of __call__)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
self._changed_settings = self._get_settings_dict(
settings=settings,
deco_settings_keys=set(deco_settings_map),
extra_settings_dict=_used_keywords_dict
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Initialize effective_settings_dict with log_calls's defaults - the static ones.
#
# update effective_settings_dict with settings *explicitly* passed to caller
# of subclass's __init__, and save *that* (used in __call__)
# as self._effective_settings, which are the final settings used
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
effective_settings_dict = {k: deco_settings_map[k].default for k in deco_settings_map}
effective_settings_dict['enabled'] = True
effective_settings_dict.update(self._changed_settings)
self._effective_settings = effective_settings_dict
def _make_token_sequence(names) -> tuple:
"""names is either a string of space- and/or comma-separated umm tokens,
or is already a sequence of tokens.
Return tuple of tokens."""
if isinstance(names, str):
names = names.replace(',', ' ').split()
return tuple(map(str, names))
self._omit_ex = self._omit = _make_token_sequence(_omit)
self._only_ex = self._only = _make_token_sequence(_only)
self.prefix = prefix # special case
self._name_param = _name_param
self._other_values_dict = other_values_dict # 0.3.0
self._override = _override # 0.3.0b18
# initialize sentinel strings
if not self.__class__._sentinels:
self.__class__._sentinels = self._set_class_sentinels()
# 0.3.0 Factored out rest of __init__ to function case of __call__
@property
def omit(self): return self._omit_ex
@property
def only(self): return self._only_ex
@staticmethod
def _is_a_function_in_class(xname, cls) -> bool:
if xname in cls.__dict__:
# Get 'raw' item for xname from cls;
# if it's a function, return True
xitem = cls.__getattribute__(cls, xname)
if inspect.isfunction(xitem):
return True
return False
def _add_property_method_names(self, cls, method_specs: tuple) -> tuple:
"""For each name in method_specs (a tuple),
if name is of the form propertyname.suffix
where suffix is in ('getter', 'setter', 'deleter'),
add to method_specs the name of the corresponding function
(.fget, .fset, .fdel) of attribute with name propertyname,
provided that function is in the class dict and is a function.
More generally, a name in method_specs is of the form
expr [. suffix]
where expr is a method or property name, possibly prefixed
by classname + '.', AND WHICH MAY CONTAIN WILDCARDS AND
CHARACTER RANGES (to match or reject). Classname can name
inner classes & so can contain dots; Wildcards can match dot.
Wildcards/char classes are as in "globs" --
matching is done with fnmatch.fnmatchcase.
:param cls: class being deco'd, some of whose methods/fns
are in method_specs
:param method_specs: self._omit or self._only
members are names of methods/fns,
or propertyname.suffix as described above
:return: tuple - method_specs_ex, consisting of the method specs
in method_specs, each followed by any & all added
property functions, with no duplicates
"""
cls_prefix = cls.__qualname__ + '.'
# Make list/collection of properties in cls,
# plus their names
# Note that item.__qualname__ == cls_prefix + item.__name__
cls_properties = []
for name, item in cls.__dict__.items():
if type(item) == property:
# properties don't HAVE __name__s or __qualname__s
cls_properties.append((name, item))
# return value; method_specs_ex will contain method_specs
method_specs_ex = []
for method_spec in method_specs:
method_specs_ex.append(method_spec) # method_specs_ex contains method_specs
dot_pos = method_spec.rfind('.')
suffix = ''
if dot_pos != -1:
suffix = method_spec[dot_pos+1:]
if suffix in PROPERTY_USER_SUFFIXES_to_ATTRS:
pattern = method_spec[:dot_pos]
suffixes = (suffix,)
else:
pattern = method_spec
suffixes = tuple(PROPERTY_USER_SUFFIXES_to_ATTRS.keys())
matching_props_suffixes_and_flags = []
for name, prop in cls_properties:
for sfx in suffixes:
if fnmatch.fnmatchcase(name, pattern):
matching_props_suffixes_and_flags.append((prop, sfx, False))
elif fnmatch.fnmatchcase(cls_prefix + name, pattern):
matching_props_suffixes_and_flags.append((prop, sfx, True))
if not matching_props_suffixes_and_flags:
continue
# For each (prop, sfx, matches_qualname) in matching_props_suffixes_and_flags,
# get attribute (function) of prop corresponds to sfx;
# if it exists & is a function in cls, add its matching name
# (its .__name__ if not matches_qualname, else cls_prefix + its .__name__)
for prop, sfx, matches_qualname in matching_props_suffixes_and_flags:
func = getattr(prop, PROPERTY_USER_SUFFIXES_to_ATTRS[sfx], None)
if not func:
continue
# Is func, by name, actually a function of class cls?
# (This is false if @property etc decos were used to create prop,
# true if property ctor was used.)
# If so, add its (possibly cls-prefix'd) name to list
func_name = func.__name__
if self._is_a_function_in_class(func_name, cls):
if matches_qualname:
func_name = cls_prefix + func_name
method_specs_ex.append(func_name)
return tuple(no_duplicates(method_specs_ex))
### 0.3.0b18
def _update_settings(self, new: dict, old: dict, override_existing: bool):
new.update({k: v
for (k,v) in old.items()
if (not override_existing or k not in old)
})
def _class__call__(self, klass):
"""
:param klass: class to decorate ALL the methods of,
including properties and methods of inner classes.
:return: decorated class (klass - modified/operated on)
Operate on each function in klass.
Use __getattribute__ to determine whether a function is (/will be)
an instance method, staticmethod or classmethod;
if either of the latter, get wrapped actual function (.__func__);
if wrapped function is itself decorated by <this decorator>
that is, self.__class__.__name__
that is, self.__class__
(look for 'signature' attribute on function,
hasattr 'DECO_OF')
*** GET THE INSTANCE of this deco class *** for that function,
using sentinel deco_obj = getattr(func, 'DECO_OF')
get deco_obj._changed_settings of that instance,
THEN make its settings = self._effective_settings (for this klass)
updated with those deco_obj._changed_settings
otherwise, (a non-wrapped function that will be an instance method)
we already have the function.
Properties are different:
if type(item) == property,
getattr(item, '__get__').__self__ is a property object,
with attributes fget, fset, fdel,
and each of these yields the function to deal with (or None).
"""
# Convenience function
_any_match = partial(any_match, fnmatch.fnmatchcase)
# Fixup self._only, self._omit,
# so that if a named method (function) of the class is specified
# via propertyname.getter or .setter or .deleter
# and then the method's name is added to the list too.
# Otherwise, if the function gets enumerated after the property
# in loop through klass.__dict__ below, it won't be recognized
# by name as something to omit or decorate-only.
self._omit_ex = self._add_property_method_names(klass, self._omit)
self._only_ex = self._add_property_method_names(klass, self._only)
## Equivalently,
# for name in klass.__dict__:
# item = klass.__getattribute__(klass, name)
for name, item in klass.__dict__.items():
actual_item = getattr(klass, name)
# If item is a staticmethod or classmethod,
# actual_item is the underlying function;
# if item is a function (instance method) or class, actual_item is item.
# In all these cases, actual_item is callable.
# If item is a property, it's not callable, and actual_item is item.
if not (callable(actual_item) or type(item) == property):
continue
#-------------------------------------------------------
# Handle inner classes
#-------------------------------------------------------
if inspect.isclass(item):
# item is an inner class.
# decorate it, using self._changed_settings
# Use sentinel 'DECO_OF' attribute on klass to get those
new_settings = self._changed_settings.copy()
new_only = self._only
new_omit = self._omit
deco_obj = getattr(item, self._sentinels['DECO_OF'], None)
if deco_obj: # klass is already decorated
# It IS already deco'd, so we want its settings to be
# (copy of) self._changed_settings updated with its _changed_settings
### 0.3.0b18 -- Use self._override
self._update_settings(new=new_settings,
old=deco_obj._changed_settings,
override_existing=self._override)
# NOTICE WHAT THIS DOES (if override == False):
# inner "only" is what was originally given IF SOMETHING WAS GIVEN
# -- DON'T add outer ones -- otherwise, use the outer ones;
# inner "omit" is cumulative, union -- DO add outer ones
new_only = deco_obj._only or self._only
new_omit += deco_obj._omit
new_class = self.__class__(
settings=new_settings,
only=new_only,
omit=new_omit
)(item)
# and replace in class dict
setattr(klass, name, new_class)
continue # for name, item in ...
#-------------------------------------------------------
# Handle properties
#
# Caller can specify, in omit or only parameters,
# property_name (matches all prop fns, get set del)
# property_name + '.getter' or '.setter' or '.deleter'
# name of function supplied as fget, fset, fdel arg
# to property() function/constructor
# and/or
# any of the above three, prefixed with class name
# (INCLUDING possibly inner function qualifiers,
# thus e.g. X.method.innerfunc.<locals>.Cls.prop.getter
# If property_name is given, it matches any/all of the
# property functions (get/set/del).
#-------------------------------------------------------
if type(item) == property:
# item == actual_item is a property object,
# also == getattr(item, '__get__').__self__ :)
new_funcs = {} # or {'fget': None, 'fset': None, 'fdel': None}
change = False
for attr in PROPERTY_ATTRS_to_USER_SUFFIXES: # ('fget', 'fset', 'fdel')
func = getattr(item, attr)
# put this func in new_funcs[attr]
# in case any change gets made. func == None is ok
new_funcs[attr] = func
if not func:
continue # for attr in (...)
func_name = func.__name__
# Filter -- `omit` and `only`
# 4 maybe 6 names to check
# (4 cuz func.__name__ == name if @property and @propname.xxxer decos used)
dont_decorate = False
namelist = [pre + fn
for pre in ('',
klass.__qualname__ + '.')
for fn in {name, # varies faster than pre
name + '.' + PROPERTY_ATTRS_to_USER_SUFFIXES[attr],
func_name}]
if _any_match(namelist, self._omit_ex):
dont_decorate = True
if self._only and not _any_match(namelist, self._only_ex):
dont_decorate = True
# get a fresh copy for each attr
new_settings = self._changed_settings.copy() # updated below
# either func is deco'd, or it isn't
deco_obj = getattr(func, self._sentinels['DECO_OF'], None)
if dont_decorate:
if deco_obj:
new_funcs[attr] = deco_obj.f # Undecorate
change = True
continue
if deco_obj: # it IS decorated
# Tweak its deco settings
### 0.3.0b18 -- Use self._override
self._update_settings(new=new_settings,
old=deco_obj._changed_settings,
override_existing=self._override)
# update func's settings (_force_mutable=True to handle `max_history` properly)
deco_obj._settings_mapping.update(new_settings, _force_mutable=True)
# ...
# and use same func ( = wrapper)
# We already did this above:
# new_funcs[attr] = func
else: # not deco'd
# so decorate it
new_func = self.__class__(** new_settings)(func)
# update property
new_funcs[attr] = new_func
# Possibly update klass definition of func with new_func
# NOTE: if `property` ctor used to create property (item),
# then func (its name) is in class dict, ** as a function **,
# BUT IT MAY NOT BE DECO'd YET: despite the order of declarations
# in the class body, we get them ~randomly via klass.__dict__.
# SO in that case we ALSO have to update klass with new decorated func,
# otherwise we'll create a another, new wrapper for it,
# and THAT will be found by log_calls_wrapper(func.__name__)
# but log_calls_wrapper(property_name + '.___ter') will find this wrapper,
# and bad things can happen (_log_message can use "the other" wrapper).
# So: Is func also in class klass dict *** as a function ***?
# NOT the case if @property decorator used to create property (item),
# but it IS the case if some random methods (including func)
# have been fed to 'property' ctor to create the property.
if self._is_a_function_in_class(func_name, klass):
setattr(klass, func_name, new_func)
change = True
# Make new property object if anything changed
if change:
# Replace property object in klass
setattr(klass,
name,
property(new_funcs['fget'], new_funcs['fset'], new_funcs['fdel']))
continue # for name, item in ...
#-------------------------------------------------------
# Handle instance, static, class methods.
# All we know is, actual_item is callable
#-------------------------------------------------------
# Filter with self._only and self._omit.
dont_decorate = False
namelist = [name, klass.__qualname__ + '.' + name]
if _any_match(namelist, self._omit_ex):
dont_decorate = True
if self._only and not _any_match(namelist, self._only_ex):
dont_decorate = True
func = _get_underlying_function(item, actual_item)
# not hasattr(func, '__name') and etc: assume it's <deco_name>_wrapper
# SO if user creates a classmethod that's a partial,
# it can't & won't be deco'd. No tragedy.
if not func or (not hasattr(func, '__name__')
and type(func) == functools.partial
and type(item) == classmethod): # nothing we're interested in (whatever it is)
continue
# It IS a method; func is the corresponding function
deco_obj = getattr(func, self._sentinels['DECO_OF'], None)
if dont_decorate:
if deco_obj:
setattr(klass, name, deco_obj.f) # Undecorate
continue
new_settings = self._changed_settings.copy() # updated below
# __init__ fixup, a nicety:
# By default, don't log retval for __init__.
# If user insists on it with 'log_retval=True' in __init__ deco,
# that will override this.
if name == '__init__':
self.fixup_for_init(new_settings)
if deco_obj: # is func deco'd by this decorator?
# Yes. Figure out settings for func,
### 0.3.0b18 -- Use self._override
self._update_settings(new=new_settings,
old=deco_obj._changed_settings,
override_existing=self._override)
# update func's settings (_force_mutable=True to handle `max_history` properly)
deco_obj._settings_mapping.update(new_settings, _force_mutable=True)
else:
# func is not deco'd.
# decorate it, using self._changed_settings
# record_history doesn't know from 'settings' param,
# cuz it really doesn't need one, so instead we do:
new_func = self.__class__(** new_settings)(func)
# if necessary, rewrap with @classmethod or @staticmethod
if type(item) == staticmethod:
new_func = staticmethod(new_func)
elif type(item) == classmethod:
new_func = classmethod(new_func)
# and replace in class dict
setattr(klass, name, new_func)
return klass
def __call__(self, f_or_klass):
"""Because there are decorator arguments, __call__() is called
only once, and it can take only a single argument: the function
or class to decorate. The return value of __call__ is called
subsequently. So, this method *returns* the decorator proper.
(~ Bruce Eckel in a book, ___) TODO ref.
"""
#+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*****
# 0.3.0
# -- implement "kill switch", NO_DECO
# -- handle decorating both functions and classes
#+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*****
# 0.3.0b16: if it isn't callable, scram'
if not callable(f_or_klass):
return f_or_klass
# Special-case handling for ``NO_DECO``: remove from settings of ``self``
if self._effective_settings.get('NO_DECO'):
return f_or_klass
# else, delete that item wherever it might be
if 'NO_DECO' in self._effective_settings:
del self._effective_settings['NO_DECO']
if 'NO_DECO' in self._changed_settings:
del self._changed_settings['NO_DECO']
f = f_or_klass if inspect.isfunction(f_or_klass) else None
klass = f_or_klass if inspect.isclass(f_or_klass) else None
self.f = f
self.cls = klass
if klass:
#+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*
# 0.3.0 -- case "f_or_klass is a class" -- namely, klass
#+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*
self._class__call__(klass) # modifies klass (methods & inner classes) (if not builtin)
self._add_class_attrs(klass) # v0.3.0v20 traps TypeError for builtins
return klass
elif not f:
#+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*
# 0.3.0 -- case "f_or_klass is a callable but not a function"
#+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*
# functools.partial objects are callable, have no __name__ much less __qualname__,
# and trying to deco __call__ gets messy.
# Callable builtins e.g. len are not functions in the isfunction sense,
# can't deco anyway. Just give up (quietly):
return f_or_klass
else: # not a class, f nonempty is a function of f_or_klass callable
#+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*
# 0.3.0 -- case "f_or_klass is a function" -- namely, f
#+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*
#----------------------------------------------------------------
# Don't double-decorate -- don't wanna, & it doesn't work anyway!
#----------------------------------------------------------------
# Note: As with methods of classes,
# . if f is deco'd, its existing EXPLICITLY GIVEN settings take precedence.
# # From _class__call__, props & methods cases, w/a few name changes
deco_obj = getattr(f, self._sentinels['DECO_OF'], None) # type: _deco_base
# get a fresh copy for each attr
new_settings = self._changed_settings.copy() # updated below
# __init__ fixup, a nicety:
# By default, don't log retval for __init__.
# If user insists on it with 'log_retval=True' in __init__ deco,
# that will override this.
if f.__name__ == '__init__':
self.fixup_for_init(new_settings)
if deco_obj: # f is deco'd by this decorator
# Yes. Figure out settings for f,
### 0.3.0b18 -- Use self._override
self._update_settings(new=new_settings,
old=deco_obj._changed_settings,
override_existing=self._override)
# update func's settings (_force_mutable=True to handle `max_history` properly)
deco_obj._settings_mapping.update(new_settings, _force_mutable=True)
return f
#----------------------------------------------------------------
# f is a function & is NOT already deco'd
#----------------------------------------------------------------
# 0.3.0.x -- f may not have a .__qualname__
try:
self._classname_of_f = '.'.join( f.__qualname__.split('.')[:-1] )
except AttributeError as e:
self._classname_of_f = ''
# Special-case '__repr__' handling, if deco subclass doesn't allow it.
if f.__name__ == '__repr__' and self._classname_of_f and not self.allow_repr():
# v0.3.0b23 -- Instead of refusing to deco, use recursive_repr
# return f
return recursive_repr(fillvalue="...")(f)
# 0.3.0
# Use __qualname__ ALL the time, unless user provides `name=display_name_str`
# where `display_name_str` is either the name to be used for the fn in logged output,
# or is an oldstyle format str into which f.__name__ will be substituted
# to obtain the display name.
# We require Py3.3+, so __qualname__ is available.
# setup f_display_name
if self._name_param:
try:
self.f_display_name = (self._name_param % f.__name__)
except TypeError:
self.f_display_name = self._name_param
else:
self.f_display_name = f.__qualname__
# TODO TRY THIS -- anything break?
# 0.3.1 Inspired by fractions.Fraction.__sub__ et al:
# __name__ may be very different from __qualname__;
# if so, show both
if f.__name__ not in f.__qualname__:
self.f_display_name += " (" + f.__name__ + ")"
#================================================================
# 0.3.0 -- Init things (migrated from __init__)
#----------------------------------------------------------------
# set up pseudo-dict (DecoSettingsMapping),
# using settings given by self._effective_settings.
#
# *** DecoSettingsMapping "API" --
# (2) construct DecoSettingsMapping object
# that will provide mapping & attribute access to settings, & more
#----------------------------------------------------------------
self._settings_mapping = DecoSettingsMapping(
deco_class=self.__class__,
# DecoSettingsMapping calls the rest ** values_dict
** self._effective_settings # 0.3.0 set by __init__
)
#----------------------------------------------------------------
# Init more stuff
#----------------------------------------------------------------
self._stats = ClassInstanceAttrProxy(
class_instance=self,
data_descriptor_names=self.__class__._data_descriptor_names,
method_descriptor_names=self.__class__._method_descriptor_names)
# Accessed by descriptors on the stats obj
self._num_calls_total = 0
self._num_calls_logged = 0
# max_history > 0 --> size of self._call_history; <= 0 --> unbounded
# Set before calling _make_call_history
# 0.3.0 self._other_values_dict set by __init__
self.max_history = self._other_values_dict.get('max_history', 0) # <-- Nota bene
self._call_history = self._make_call_history()
# Accumulate this (for logged calls only)
# even when record_history is false:
self._elapsed_secs_logged = 0.0
self._process_secs_logged = 0.0
# 0.2.2.post1
# stack(s), pushed & popped wrapper of deco'd function
# by _logging_state_push, _logging_state_pop
# 0.3.0 convert to pushing/popping single namedtuples
self.logging_state_stack = [] # 0.3.0 stack of LoggingState namedtuples
self._enabled_stack = [] # 0.3.0 - um, stack, of 'enabled's
#----------------------------------------------------------------
# end of Init passage
#================================================================
# Save signature and parameters of f
self.f_signature = inspect.signature(f) # Py >= 3.3
self.f_params = self.f_signature.parameters
# 0.3.0 We assume Py3.3 so we use perf_counter, process_time all the time
wall_time_fn = time.perf_counter
process_time_fn = time.process_time
#############################
# The wrapper of a callable
#############################
@wraps(f)
def _deco_base_f_wrapper_(*args, **kwargs):
"""Wrapper around the wrapped function f.
When this runs, f has been called, so we can now resolve
any indirect values for the settings/keyword-params
of log_calls, using info in kwargs and self.f_params."""
# *** Part of the DecoSettingsMapping "API" --
# (4) using self._settings_mapping.get_final_value in wrapper
# [[[ This/these is/are 4th chronologically ]]]
# inner/local fn -- save a few cycles and characters -
# we call this a lot (<= 9x).
def _get_final_value(setting_name):
"Use outer scope's kwargs and self.f_params"
return self._settings_mapping.get_final_value(
setting_name, kwargs, fparams=self.f_params)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# if nothing to do, hurry up & don't do it.
# NOTE: call_chain_to_next_log_calls_fn looks in stack frames
# to find (0.2.4) STACKFRAME_HACK_DICT_NAME (really!)
# It and its values (the following _XXX variables)
# must be set before calling f.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
_enabled = _get_final_value('enabled')
# 0.3.0 in case f calls log_message (no output if f disabled)
self._enabled_state_push(_enabled)
# 0.2.4.post5 "true bypass": if 'enabled' < 0 then scram
if _enabled < 0:
ret = f(*args, **kwargs)
self._enabled_state_pop()
return ret
# Bump call counters, before calling fn.
# Note: elapsed_secs, process_secs not reflected yet of course
self._add_call(logged=_enabled)
_log_call_numbers = _get_final_value('log_call_numbers')
# counters just got bumped
_active_call_number = (self._stats.num_calls_logged
if _log_call_numbers else
0)
# Get list of callers up to & including first log_call's-deco'd fn
# (or just caller, if no such fn)
call_list, prev_indent_level = self.call_chain_to_next_log_calls_fn()
# Bump _extra_indent_level if last fn on call_list is deco'd AND enabled,
# o/w it's the _extra_indent_level which that fn 'inherited'.
# _extra_indent_level: prev_indent_level, or prev_indent_level + 1
do_indent = _get_final_value('indent')
_extra_indent_level = (prev_indent_level +
int(not not do_indent and not not _enabled))
# 0.3.0
########## prefixed_fname = _get_final_value('prefix') + f.__name__
prefixed_fname = _get_final_value('prefix') + self.f_display_name
# Stackframe hack:
assert '_deco_base__active_call_items__' == STACKFRAME_HACK_DICT_NAME
_deco_base__active_call_items__ = {
'_enabled': _enabled,
'_log_call_numbers': _log_call_numbers,
'_prefixed_fname': prefixed_fname, # Hack alert (Pt 1)
'_active_call_number': _active_call_number,
'_extra_indent_level': _extra_indent_level,
# 0.3.0 for _get_own_deco_wrapper
'_wrapper_deco': self
}
# Get logging function IF ANY.
# For the benefit of callees further down the call chain,
# if this f is not enabled (_enabled <= 0).
# Subclass can return None to suppress printed/logged output.
logging_fn = self.get_logging_fn(_get_final_value)
# Only do global indentation for print, not for loggers
global_indent_len = max(_extra_indent_level, 0) * self.INDENT
# 0.2.2.post1 - save output_fname for log_message use
call_number_str = ((' [%d]' % _active_call_number)
if _log_call_numbers else '')
output_fname = prefixed_fname + call_number_str
# 0.3.0
# Note: DON'T combine with global_mute(),
# cuz this value will be pushed,
# and when popped any realtime changes to global mute
# made during call to f would be ignored.
mute = _get_final_value('mute')
# 0.2.2 -- self._log_message() will use
# the logging_fn, indent_len and output_fname at top of these stacks;
# thus, verbose functions should use log_calls.print (~ log_message)
# to write their blather.
# There's a stack of logging-state ,
# used by self._log_message(), maintained in this wrapper.
self._logging_state_push(logging_fn, global_indent_len, output_fname, mute)
# (_xxx variables set, ok to call f)
if not _enabled:
ret = f(*args, **kwargs)
self._logging_state_pop(enabled_too=True)
return ret
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Set up context, for pre-call handlers
# (after calling f, add to it for post-call handlers)
# THIS is the time sink - 23x slower than other 'blocks'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Key/values of "context" whose values we know so far:
context = {
'decorator': self,
'settings': self._settings_mapping,
'stats': self._stats,
'prefixed_fname': prefixed_fname,
'fparams': self.f_params,
'call_list': call_list,
'args': args,
'kwargs': kwargs,
'indent': " " * self.INDENT, # our unit of indentation
'output_fname': output_fname,
}
# Gather all the things we need (for log output, & for history)
# Use inspect module's Signature.bind method.
# bound_args.arguments -- contains only explicitly bound arguments
# 0.2.4.post5 - using
# inspect.signature(f).bind(*args, **kwargs)
# took 45% of execution time of entire wrapper; this takes 23%:
# 0.3.1 TODO BUG No args is a problem?!
bound_args = self.f_signature.bind(*args, **kwargs)
"""
File "/Users/brianoneill/Desktop/Programming/Python-package-staging/log_calls/log_calls/tests/_temp.py", line 12, in <module>
g(f())
File "/Users/brianoneill/Desktop/Programming/Python-package-staging/log_calls/log_calls/log_calls.py", line 1935, in _deco_base_f_wrapper_
bound_args = self.f_signature.bind(*args, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/inspect.py", line 2646, in bind
return args[0]._bind(args[1:], kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/inspect.py", line 2571, in _bind
raise TypeError('too many positional arguments') from None
TypeError: too many positional arguments
"""
varargs_pos = get_args_pos(self.f_params) # -1 if no *args in signature
argcount = varargs_pos if varargs_pos >= 0 else len(args)
context['argcount'] = argcount
# The first argcount-many things in bound_args
context['argnames'] = list(bound_args.arguments)[:argcount]
context['argvals'] = args[:argcount]
context['varargs'] = args[argcount:]
(context['varargs_name'],
context['kwargs_name']) = get_args_kwargs_param_names(self.f_params)
# These 3 statements = 31% of execution time of wrapper
context['defaulted_kwargs'] = get_defaulted_kwargs_OD(self.f_params, bound_args)
context['explicit_kwargs'] = get_explicit_kwargs_OD(self.f_params, bound_args, kwargs)
# context['implicit_kwargs'] = {
# k: kwargs[k] for k in kwargs if k not in context['explicit_kwargs']
# }
# At least 2x as fast:
context['implicit_kwargs'] = \
difference_update(kwargs.copy(), context['explicit_kwargs'])
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Call pre-call handlers, collect nonempty return values
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# only consult global mute in r/t
if not (mute or self.global_mute()): # 0.3.0
pre_msgs = []
for setting_name in self._settings_mapping._pre_call_handlers: # keys
if _get_final_value(setting_name):
info = self._settings_mapping._get_DecoSetting(setting_name)
msg = info.pre_call_handler(context)
if msg:
pre_msgs.append(msg)
# Write pre-call messages
if logging_fn:
for msg in pre_msgs:
self._log_message(msg, extra_indent_level=0)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Call f(*args, **kwargs) and get its retval; time it.
# Add timestamp, elapsed time(s) and retval to context.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# No dictionary overhead between timer(s) start & stop.
t0 = time.time() # for timestamp
t0_wall = wall_time_fn()
t0_process = process_time_fn()
retval = f(*args, **kwargs)
t_end_wall = wall_time_fn()
t_end_process = process_time_fn()
context['elapsed_secs'] = (t_end_wall - t0_wall)
context['process_secs'] = (t_end_process - t0_process)
context['timestamp'] = t0
context['retval'] = retval
self._add_to_elapsed(context['elapsed_secs'], context['process_secs'])
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Call post-call handlers, collect nonempty return values
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# only consult global mute in r/t
if not (mute or self.global_mute()): # 0.3.0
post_msgs = []
for setting_name in self._settings_mapping._post_call_handlers: # keys
if _get_final_value(setting_name):
info = self._settings_mapping._get_DecoSetting(setting_name)
msg = info.post_call_handler(context)
if msg:
post_msgs.append(msg)
# Write post-call messages
if logging_fn:
for msg in post_msgs:
self._log_message(msg, extra_indent_level=0)
# v0.3.0b22 -- if recording history, add record of call even if we're muted(!)
elif _get_final_value('record_history'):
info = self._settings_mapping._get_DecoSetting('record_history')
_ = info.post_call_handler(context)
self._logging_state_pop(enabled_too=True)
return retval
self._add_function_attrs(f, _deco_base_f_wrapper_)
return _deco_base_f_wrapper_
#-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# end else (case "f_or_klass is a function",
# subcase "f is a function & is NOT already deco'd")
#+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*
def _add_function_attrs(self, f, f_wrapper):
# Add a sentinel as an attribute to f_wrapper
# so we can in theory chase back to any previous log_calls-decorated fn
setattr(
f_wrapper, self._sentinels['SENTINEL_ATTR'], self._sentinels['SENTINEL_VAR']
)
# A back-pointer
setattr(
f, self._sentinels['WRAPPER_FN_OBJ'], f_wrapper
)
# 0.3.0 -- pointer to self
setattr(
f_wrapper, self._sentinels['DECO_OF'], self
)
# stats objects (attr of wrapper)
setattr(
f_wrapper, 'stats', self._stats
)
setattr(
f_wrapper, self.__class__.__name__ + '_settings', self._settings_mapping
)
# Note: Next two are deprecated as of 0.3.1.
# 0.2.1a
setattr(
f_wrapper, 'log_message', self._log_message,
)
# 0.3.0
setattr(
f_wrapper, 'log_exprs', self._log_exprs,
)
def _add_class_attrs(self, klass):
"""Add attribute(s) to klass: key is useful as sentinel, value is this deco
v0.3.0b20 - trap builtin/extension type failure
TypeError: can't set attributes of built-in/extension type 'dict'
"""
# It's only necessary to trap builtin failure for the first `setattr`
try:
setattr(
klass,
self._sentinels['DECO_OF'],
self
)
except TypeError as e:
# Note, this is fragile (if errormessage changes, `errmsg in str(e)` may fail).
# . There's a test for this situation.
# E.g. log_calls(only='update')(dict)
# or decorate_class(dict, only='update')
errmsg = "can't set attributes of built-in/extension type"
if errmsg in str(e):
return
# klass is not a builtin or extension type
# .....................................................
# NOTE: Next two are deprecated as of 0.3.1.
# Make it easy for user to find the log_calls wrapper of a method,
# given its name, via `get_log_calls_wrapper(fname)`
# or `get_record_history_wrapper(fname)`
# This can be called on a deco'd class or on an instance thereof.
this_deco_class = self.__class__
this_deco_class_name = this_deco_class.__name__
setattr(
klass,
'get_' + this_deco_class_name + '_wrapper',
classmethod(partial(_get_deco_wrapper, this_deco_class))
)
# Make it even easier for methods to find their own log_calls wrappers,
# via `get_own_log_calls_wrapper(fname)`
# or `get_own_record_history_wrapper(fname)`
# This can be called on a deco'd class or on an instance thereof.
this_deco_class = self.__class__
setattr(
klass,
'get_own_' + this_deco_class_name + '_wrapper',
staticmethod(partial(_get_own_deco_wrapper, this_deco_class)) # TODO new 3.1 verify
)
# . END deprecation.
# .....................................................
# largely for testing (by the time anyone gets to see these,
# they're no longer used... 'cept outer class at class level
# can manipulate inner classes' omit and only, but so what)
setattr(klass, this_deco_class_name + '_omit', self.omit)
setattr(klass, this_deco_class_name + '_only', self.only)
# ---------------------------------------------
# call-chain chaser. kludgefest.
# ---------------------------------------------
@classmethod
def call_chain_to_next_log_calls_fn(cls):
"""Return list of callers (names) on the call chain
from caller of caller to first log_calls-deco'd function inclusive,
if any. If there's no log_calls-deco'd function on the stack,
or anyway if none are discernible, return [caller_of_caller]."""
curr_frame = sys._getframe(2) # caller-of-caller's frame
call_list = []
prev_indent_level = -1
found = False
found_enabled = False
hit_bottom = False # break both loops: reached <module>
while not found_enabled and not hit_bottom:
while 1: # until found a deco'd fn or <module> reached
curr_funcname = curr_frame.f_code.co_name
if curr_funcname == '_deco_base_f_wrapper_':
# Previous was decorated inner fn, fixup; overwrite '_deco_base_f_wrapper_'
# with name of wrapped function
inner_fn = curr_frame.f_locals['f']
call_list[-1] = inner_fn.__name__ # ~ placeholder
wrapper_frame = curr_frame
found = True
break # inner loop
call_list.append(curr_funcname)
if curr_funcname == '<module>':
hit_bottom = True
break # inner loop
globs = curr_frame.f_back.f_globals
curr_fn = None
if curr_funcname in globs:
wrapper_frame = curr_frame.f_back
curr_fn = globs[curr_funcname]
# If curr_funcname is a decorated inner function,
# then it's not in globs. If it's called from outside
# its enclosing function, its caller is '_deco_base_f_wrapper_'
# so we'll see that on next iteration.
else:
try:
# if it's a decorated inner function that's called
# by its enclosing function, detect that:
locls = curr_frame.f_back.f_back.f_locals
except AttributeError: # "never happens"
# print("**** %s not found (inner fn?)" % curr_funcname) # <<<DEBUG>>>
pass
else:
wrapper_frame = curr_frame.f_back
if curr_funcname in locls:
curr_fn = locls[curr_funcname]
# print("**** %s found in locls = curr_frame.f_back.f_back.f_locals, "
# "curr_frame.f_back.f_back.f_code.co_name = %s"
# % (curr_funcname, curr_frame.f_back.f_back.f_locals)) # <<<DEBUG>>>
if hasattr(curr_fn, cls._sentinels['SENTINEL_ATTR']):
found = True
break # inner loop
curr_frame = curr_frame.f_back
# If found, then call_list[-1] is log_calls-wrapped
if found:
# Look in stack frame (!) for (0.2.4) STACKFRAME_HACK_DICT_NAME
# and use its values
# _enabled, _log_call_numbers, _active_call_number, _extra_indent_level, _prefixed_fname
if wrapper_frame.f_locals.get(STACKFRAME_HACK_DICT_NAME):
active_call_items = wrapper_frame.f_locals[STACKFRAME_HACK_DICT_NAME]
enabled = active_call_items['_enabled'] # it's >= 0
log_call_numbers = active_call_items['_log_call_numbers']
active_call_number = active_call_items['_active_call_number']
call_list[-1] = active_call_items['_prefixed_fname'] # Hack alert (Pt 3)
# only change prev_indent_level once, for nearest deco'd fn
if prev_indent_level < 0:
prev_indent_level = active_call_items['_extra_indent_level']
if enabled and log_call_numbers:
call_list[-1] += (' [%d]' % active_call_number)
found_enabled = enabled # done with outer loop too if enabled
else: # bypassed
enabled = False
if not enabled:
curr_frame = curr_frame.f_back
else: # not found
# if not found, truncate call_list to first element.
hit_bottom = True
if hit_bottom:
call_list = call_list[:1]
return call_list, prev_indent_level
# ---------------------------------------------
# decorate_* methods
# None deco builtins. deco_module only decos
# things with source in that module.
# ---------------------------------------------
@classmethod
def decorate_hierarchy(cls, baseclass: type, **setting_kwds) -> None:
"""Decorate baseclass and, recursively, all of its descendants.
If any subclasses are directly decorated, their explicitly given setting_kwds,
EXCEPT ``omit`` and ``only``, override those in ``setting_kwds``
UNLESS ``override=True`` is in ``setting_kwds``.
"""
# Filter out builtins. Better than failing to deco all subclasses too.
if not get_file_of_object(baseclass):
return
cls.decorate_class(baseclass, decorate_subclasses=True, **setting_kwds)
@classmethod
def decorate_class(cls, klass: type, decorate_subclasses=False, **setting_kwds) -> None:
"""Decorate klass and, optionally, all of its descendants recursively.
(If decorate_subclasses == True, and if any subclasses are decorated,
their explicitly given setting_kwds, EXCEPT `omit` and `only`,
override those in `setting_kwds` UNLESS 'override=True' is in `setting_kwds`.)
"""
assert isinstance(klass, type) # in "debug" mode only
if not isinstance(klass, type): # in either mode, have the same awareness at the same time
return
# Filter out builtins.
if not get_file_of_object(klass):
return
def _deco_class(kls: type):
t = cls(**setting_kwds)
_ = t(kls)
# assert _ == kls
def _deco_class_rec(kls: type):
_deco_class(kls)
for subclass in kls.__subclasses__():
_deco_class_rec(subclass)
if decorate_subclasses:
_deco_class_rec(klass)
else:
_deco_class(klass)
# (_deco_class_rec if decorate_subclasses else _deco_class)(klass)
@classmethod
def decorate_package_function(cls, f: 'Callable', **setting_kwds) -> None:
"""Wrap ``f`` with decorator ``cls`` (e..g ``log_calls``) using settings in ``settings_kwds``;
replace definition of ``f.__name__`` with that decorated function in the ``__dict__``
of the module of ``f``.
:param cls: decorator class e.g. log_calls
:param f: a function object, qualified with package, e.g. mypackage.myfunc,
however it would be referred to in code at the point of a call
to `decorate_package_function`.
:param setting_kwds: settings for decorator
inspect.getmodule(f).__name__
'sklearn.cluster.k_means_'
inspect.getmodulename('sklearn/cluster/k_means_.py')
'k_means_'
SO
* fmodname = inspect.getmodule(f).__name__
'sklearn.cluster.k_means_'
* replace '.' with '/' in fmodname
fmodname = 'sklearn/cluster/k_means_'
* inspect.getmodulename('sklearn/cluster/k_means_.py')
'k_means_'
VS
inspect.getmodulename('sklearn.cluster')
None
So call inspect.getmodulename(fmodname + '.py')
If it returns None, leave alone, f was called through module.
If it's NOT None, then trim off last bit from path
fmodname = '.'.join(fmodname.split('/')[:-1])
eval(fmodname + '.' + f.__name__
"""
# Filter out builtins.
if not get_file_of_object(f):
return
f_deco = cls(**setting_kwds)(f)
namespace = vars(inspect.getmodule(f))
fmodname = inspect.getmodule(f).__name__
# 'sklearn.cluster.k_means_'
basic_modname = inspect.getmodulename(fmodname.replace('.', '/') + '.py')
# 'k_means_' or 'some_module', or None
if basic_modname and '.' in fmodname:
fpackagename = namespace['__package__'] # '.'.join(fmodname.split('.')[:-1])
exec("import " + fpackagename)
package_dict = eval("vars(%s)" % fpackagename)
package_dict[f.__name__] = f_deco
namespace[f.__name__] = f_deco
@classmethod
def decorate_module_function(cls, f: 'Callable', **setting_kwds) -> None:
"""Wrap ``f`` with decorator ``cls`` (e..g ``log_calls``) using settings in ``settings_kwds``;
replace definition of ``f.__name__`` with that decorated function in the ``__dict__``
of the module of ``f``.
:param cls: decorator class e.g. log_calls
:param f: a function object, qualified with module, e.g. mymodule.myfunc,
however it would be referred to in code at the point of a call to `decorate_module_function`.
:param setting_kwds: settings for decorator
"""
# Filter out builtins.
if not get_file_of_object(f):
return
namespace = vars(inspect.getmodule(f))
namespace[f.__name__] = cls(**setting_kwds)(f)
@classmethod
def decorate_function(cls, f: 'Callable', **setting_kwds) -> None:
"""Wrap f with decorator `cls` using settings, replace definition of f.__name__
with that decorated function in the global namespace OF THE CALLER.
:param cls: decorator class e.g. log_calls
:param f: a function object, with no package/module qualifier.
However it would be referred to in code at the point of the call
to `decorate_function`.
Typically, one which is defined in the calling module,
or imported/in namespace.
:param setting_kwds: settings for decorator
"""
# Filter out builtins.
if not get_file_of_object(f):
return
caller_frame = sys._getframe(1) # caller's frame
namespace = caller_frame.f_globals
namespace[f.__name__] = cls(**setting_kwds)(f)
@classmethod
def decorate_module(cls, mod: 'module',
functions=True, classes=True,
**setting_kwds) -> None:
"""
:param cls: the decorator class (``log_calls`` or ``record_history``
Can't decorate builtins, attempting
log_calls.decorate_class(dict, only='update')
gives:
TypeError: can't set attributes of built-in/extension type 'dict'
Only decorate things with sourcecode in module.
As ever, don't try to deco builtins.
"""
module_filename = get_file_of_object(mod)
if not (module_filename and inspect.ismodule(mod)):
return # refuse, SILENTLY
# Functions
if functions:
for name, f in inspect.getmembers(mod, inspect.isfunction):
if get_file_of_object(f) == module_filename:
vars(mod)[name] = cls(**setting_kwds)(f)
### Note, vars(mod) also has key __package__,
### . e.g. 'sklearn.cluster' for mod = 'sklearn.cluster.k_means_'
# Classes
if classes:
for name, kls in inspect.getmembers(mod, inspect.isclass):
if get_file_of_object(kls) == module_filename:
_ = cls(**setting_kwds)(kls)
# assert _ == kls
#----------------------------------------------------------------------------
# log_calls
#----------------------------------------------------------------------------
class log_calls(_deco_base):
"""
This decorator logs the caller of a decorated function, and optionally
the arguments passed to that function, before calling it; after calling
the function, it optionally writes the return value (default: it doesn't),
and optionally prints a 'closing bracket' message on return (default:
it does).
"logs" means: prints to stdout, or, optionally, to a logger.
The decorator takes various keyword arguments, all with sensible defaults.
Every parameter except prefix and max_history can take two kinds of values,
direct and indirect. Briefly, if the value of any of those parameters
is a string that ends in in '=', then it's treated as the name of a keyword
arg of the wrapped function, and its value when that function is called is
the final, indirect value of the decorator's parameter (for that call).
See deco_settings.py docstring for details.
enabled: If true, then logging will occur. (Default: True)
args_sep: str used to separate args. The default is ', ', which lists
all args on the same line. If args_sep ends in a newline '\n',
additional spaces are appended to that to make for a neater
display. Other separators in which '\n' occurs are left
unchanged, and are untested -- experiment/use at your own risk.
log_args: Arguments passed to the (decorated) function will be logged,
if true (Default: True)
log_retval: Log what the wrapped function returns, if true (truthy).
At most MAXLEN_RETVALS chars are printed. (Default: False)
log_exit: If true, the decorator will log an exiting message after
calling the function, and before returning what the function
returned. (Default: True)
log_call_numbers: If truthy, display the (1-based) number of the function call,
e.g. f [n] <== <module> for n-th logged call.
This call would correspond to the n-th record
in the functions call history, if record_history is true.
(Default: False)
log_elapsed: If true, display how long it took the function to execute,
in seconds. (Default: False)
indent: if true, log messages for each level of log_calls-decorated
functions will be indented by 4 spaces, when printing
and not using a logger (default: True (0.3.0))
prefix: str to prefix the function name with when it is used
in logged messages: on entry, in reporting return value
(if log_retval) and on exit (if log_exit). (Default: '')
file: If `logger` is `None`, a stream (an instance of type `io.TextIOBase`)
to which `log_calls` will print its messages. This value is
supplied to the `file` keyword parameter of the `print` function.
(Default: sys.stdout)
logger: If not None (the default), a Logger which will be used
(instead of the print function) to write all messages.
loglevel: logging level, if logger != None. (Default: logging.DEBUG)
mute: setting. 3-valued:
log_calls.MUTE.NOTHING (default -- all output produced)
alias False
log_calls.MUTE.CALLS (mute output from decorated functions
& methods & properties, but log_message
and log_exprs produce output;
call # recording, history recording continue
if enabled)
alias True
log_calls.MUTE.ALL (no output at all; but call # recording,
history recording continue if enabled)
alias -1
mutable, but NOT allow_indirect: log_message has to be able
to get the value, and then doesn't have access to the args to f
(if f is not enabled, and only kludgily, if f is enabled)
When `mute` is True (log_calls.MUTE.CALLS,
log_expr and log_message adjust for calls not being logged:
because there's no log_calls "frame",
-- they don't indent an extra level no 'arguments:' to align with), and
-- they automatically prefix messages with function's display name
record_history: If true, an array of records will be kept, one for each
call to the function; each holds call number (1-based),
arguments and defaulted keyword arguments, return value,
time elapsed, time of call, caller (call chain), prefixed
function name.(Default: False)
max_history: An int. value > 0 --> store at most value-many records,
oldest records overwritten;
value <= 0 --> unboundedly many records are stored.
Parameters that aren't *settings*
0.3.0
omit=()
When decorating a class, specifies the methods that will NOT be decorated.
As for `field_names` parameter of namedtuples:
a single string with each name separated by whitespace and/or commas,
for example 'x y' or 'x, y',
or a tuple/list/sequence of strings.
The strings themselves can be globs, i.e. can contain wildcards:
Pattern Meaning
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any character not in seq
* and ? can match dots, seq can be a range e.g. 0-9, a-z
Matching is case-sensitive, of course.
See https://docs.python.org/3/library/fnmatch.html
Can be class-prefixed e.g. C.f, or D.DI.foo,
or unprefixed (and then any matching method outermost or inner classes
will be omitted).
Ignored when decorating a function.
only=()
As for `field_names` parameter of namedtuples:
a single string with each name separated by whitespace and/or commas,
for example 'x y' or 'x, y',
or a tuple/list/sequence of strings.
When decorating a class, ONLY this/these methods, minus any in omit,
will be decorated.
Can be class-prefixed e.g. D.DI.foo,
or unprefixed (and then any matching method outermost or inner classes
will be deco'd).
Ignored when decorating a function.
name
We now use __qualname__ ALL the time as the display name of a function or method
(the name used for the fn in logged output),
UNLESS the user provides `name=display_name_str`
where `display_name_str` is either the name to be used for the fn in logged output,
or is an oldstyle format str into which f.__name__ will be substituted
to obtain the display name.
Useful e.g. to suppress the clutter of qualnames of inner functions and methods:
to use just, say, "inner_fn" instead of "outer_fn.<locals>.inner_fn",
supply `name='%s'`.
Ignored when decorating a class.
"""
# *** DecoSettingsMapping "API" --
# (1) initialize: call register_class_settings
# allow indirection for all except prefix and max_history, which also isn't mutable
_setting_info_list = (
# indirect_default='False': a user attr which constructor knows about
DecoSettingEnabled('enabled', indirect_default=False),
DecoSetting_str('args_sep', str, ', ', allow_falsy=False),
DecoSettingArgs('log_args'),
DecoSettingRetval('log_retval'),
DecoSettingElapsed('log_elapsed'),
DecoSettingExit('log_exit'),
DecoSetting_bool('indent', bool, True, allow_falsy=True),
DecoSetting_bool('log_call_numbers', bool, False, allow_falsy=True),
DecoSetting_str('prefix', str, '', allow_falsy=True,
allow_indirect=False, mutable=True), # 0.3.0; was mutable=False
DecoSettingFile('file', io.TextIOBase, None, allow_falsy=True),
DecoSettingLogger('logger', (logging.Logger,
str), None, allow_falsy=True),
DecoSetting_int('loglevel', int, logging.DEBUG, allow_falsy=False),
DecoSetting_int('mute', int, False, allow_falsy=True,
allow_indirect=True, mutable=True),
DecoSettingHistory('record_history'),
DecoSetting_int('max_history', int, 0, allow_falsy=True,
allow_indirect=False, mutable=False),
DecoSetting_bool('NO_DECO', bool, False, allow_falsy=True, mutable=False,
pseudo_setting=True
),
)
DecoSettingsMapping.register_class_settings('log_calls', # name of this class. DRY - oh well.
_setting_info_list)
@used_unused_keywords()
def __init__(self,
settings=None, # 0.2.4.post2. A dict or a pathname
omit=(), # 0.3.0 class deco'ing: omit these methods or properties; not a setting
only=(), # 0.3.0 class deco'ing: deco only these methods or props (sans any in omit); not a setting
name='', # 0.3.0 name or oldstyle fmt str for f_display_name of fn; not a setting
override=False, # 0.3.0b18: new settings override existing ones. NOT a "setting"
enabled=True,
args_sep=', ',
log_args=True,
log_retval=False,
log_elapsed=False,
log_exit=True,
indent=True, # 0.3.0, this seems the better default
log_call_numbers=False,
prefix='',
file=None, # detectable value so we late-bind to sys.stdout
logger=None,
loglevel=logging.DEBUG,
mute=False,
record_history=False,
max_history=0,
NO_DECO=False,
):
"""(See base class docstring)
"""
# 0.2.4 settings stuff:
# determine which keyword arguments were actually passed by caller!
used_keywords_dict = log_calls.__dict__['__init__'].get_used_keywords()
# remove non-"settings"
for kwd in ('settings', 'omit', 'only', 'name', 'override'):
if kwd in used_keywords_dict:
del used_keywords_dict[kwd]
super().__init__(
settings=settings,
_omit=omit, # 0.3.0 class deco'ing: tuple - omit these methods/inner classes
_only=only, # 0.3.0 class deco'ing: tuple - decorate only these methods/inner classes (sans omit)
_name_param=name, # 0.3.0 name or oldstyle fmt str etc.
_override=override, # 0.3.0b18: new settings override existing ones. NOT a "setting"
_used_keywords_dict=used_keywords_dict,
enabled=enabled,
args_sep=args_sep,
log_args=log_args,
log_retval=log_retval,
log_elapsed=log_elapsed,
log_exit=log_exit,
indent=indent,
log_call_numbers=log_call_numbers,
prefix=prefix,
file=file,
logger=logger,
loglevel=loglevel,
mute=mute,
record_history=record_history,
max_history=max_history,
NO_DECO=NO_DECO,
)
# 0.3.0
@classmethod
def allow_repr(cls) -> bool:
return False
# 0.3.0
@classmethod
def fixup_for_init(cls, some_settings: dict):
some_settings['log_retval'] = False
mute = False # CLASS level attribute
# 0.3.0
@classmethod
def global_mute(cls) -> bool:
return cls.mute
# 0.3.0
@classmethod
def log_message_auto_prefix_threshold(cls) -> int:
""":return: one of the "constants" of _deco_base.MUTE
The log_* functions will automatically prefix their output
with the function's display name if max of
the function's mute setting, global_mute()
is this mute level or higher.
"""
return cls.MUTE.CALLS
# 0.3.0
@classmethod
def log_message_auto_prefix_threshold(cls) -> int:
""":return: one of the "constants" of _deco_base.MUTE
The log_* functions will automatically prefix their output
with the function's display name if max of
the function's mute setting, global_mute()
is this mute level or higher.
"""
return cls.MUTE.CALLS
@classmethod
def get_logging_fn(cls, _get_final_value_fn):
"""Return logging_fn or None.
cls: unused. Present so this method can be overridden."""
outfile = _get_final_value_fn('file')
if not outfile:
outfile = sys.stdout # possibly rebound by doctest
logger = _get_final_value_fn('logger')
# 0.2.4 logger can also be a name of a logger
if logger and isinstance(logger, str): # not None, not ''
# We can't first check f there IS such a logger.
# This creates one (with no handlers) if it doesn't exist:
logger = logging.getLogger(logger)
# If logger has no handlers then it can't write anything,
# so we'll fall back on print
if logger and not logger.hasHandlers():
logger = None
loglevel = _get_final_value_fn('loglevel')
# Establish logging function
logging_fn = (partial(logger.log, loglevel)
if logger else
lambda msg: print(msg, file=outfile, flush=True))
# lambda *pargs, **pkwargs: print(*pargs, file=outfile, flush=True, **pkwargs))
# 0.2.4 - Everybody can indent.
# loggers: just use formatters with '%(message)s'.
return logging_fn
| {
"repo_name": "Twangist/log_calls",
"path": "log_calls/log_calls.py",
"copies": "1",
"size": "124843",
"license": "mit",
"hash": -3856040721634263000,
"line_mean": 44.7635630499,
"line_max": 155,
"alpha_frac": 0.5155355126,
"autogenerated": false,
"ratio": 4.200356638180472,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5215892150780472,
"avg_score": null,
"num_lines": null
} |
__author__ = "Brian O'Neill" # BTO
__doc__ = """
Module version = '0.2.4'
Slightly ad-hoc decorator
used_unused_keywords
for `__init__` function of `log_calls`. It's not *totally* ad-hoc:
`log_calls.__init__` only uses half the functionality of this decorator ;/
This decorator allows a function to determine which of its keyword arguments
were actually supplied by its caller, and which were not supplied and therefore
receive default values. It makes these two collections available as OrderedDicts.
Given
@used_unused_keywords()
def h(a='yes', b='no', c=17, **kwargs):
print("h, used keywords: ", h.get_used_keywords())
print("h, unused keywords: ", h.get_unused_keywords())
each time `h` is called, it can can `OrderedDict`s of used and unused
(defaulted) keyword arguments together with their values,
using the methods `get_used_keywords()` and `get_unused_keywords()`
that the decorator adds (to the wrapper of `h`).
Only keyword arguments that appear explicitly in the decorated function's
signature occur in the dictionaries of "used" and "unused" keywords;
"implicit" keywords that show up in a function's **kwargs do not occur
in those dictionaries.
Mandatory keyword-only arguments that have no default value are of course
included in the "used" dictionary.
See the doctests in function main() below for examples/tests.
"""
from functools import wraps
import inspect
from .helpers import (get_explicit_kwargs_OD, get_defaulted_kwargs_OD)
class used_unused_keywords():
"""TODO
"""
def __init__(self, enabled=True):
_ = enabled # enabled is um not used
self._used_kwds = {}
self._unused_kwds = {}
def get_used_keywords(self):
return self._used_kwds
def get_unused_keywords(self):
return self._unused_kwds
def __call__(self, f):
# (nada)
@wraps(f)
def f_used_unused_keywords_wrapper_(*args, **kwargs):
# Figure out which parameters were NOT supplied
# by the actual call - the 'defaulted' arguments
f_params = inspect.signature(f).parameters
bound_args = inspect.signature(f).bind(*args, **kwargs)
# These return OrderedDicts
self._used_kwds = get_explicit_kwargs_OD(f_params, bound_args, kwargs)
self._unused_kwds = get_defaulted_kwargs_OD(f_params, bound_args)
return f(*args, **kwargs)
setattr(
f_used_unused_keywords_wrapper_,
'get_used_keywords',
self.get_used_keywords,
)
setattr(
f_used_unused_keywords_wrapper_,
'get_unused_keywords',
self.get_unused_keywords,
)
return f_used_unused_keywords_wrapper_
#----------------------------------------------------------------------------
# doctests
#----------------------------------------------------------------------------
def main():
"""
Here's how a global function can access dictionaries of used and unused
(defaulted) keyword arguments together with their values, per-call.
This function `f` has no **kwargs:
>>> @used_unused_keywords()
... def f(x=1, y=2, z=3):
... print("f, used keywords: ", f.get_used_keywords())
... print("f, unused keywords: ", f.get_unused_keywords())
>>> f(x=101, z=2003)
f, used keywords: OrderedDict([('x', 101), ('z', 2003)])
f, unused keywords: OrderedDict([('y', 2)])
>>> f(z='a string')
f, used keywords: OrderedDict([('z', 'a string')])
f, unused keywords: OrderedDict([('x', 1), ('y', 2)])
>>> f()
f, used keywords: OrderedDict()
f, unused keywords: OrderedDict([('x', 1), ('y', 2), ('z', 3)])
In the next example, `g` does have **kwargs, so it can be passed any
old keyword argument and value; however, only explicit keyword parameters
occur in the used and unused dictionaries. Each call to `g` below
passes `extra_kwd` with a value, but as this parameter isn't an explicit
keyword parameter of `g`, it doesn't show up in either the "used" or the
"unused" dictionary:
>>> @used_unused_keywords()
... def g(x=1, y=2, z=3, **kwargs):
... print("g, used keywords: ", g.get_used_keywords())
... print("g, unused keywords: ", g.get_unused_keywords())
>>> g(x=101, z=2003, extra_kwd='wtf')
g, used keywords: OrderedDict([('x', 101), ('z', 2003)])
g, unused keywords: OrderedDict([('y', 2)])
>>> g(z='a string', extra_kwd='wtf')
g, used keywords: OrderedDict([('z', 'a string')])
g, unused keywords: OrderedDict([('x', 1), ('y', 2)])
>>> g(extra_kwd='wtf')
g, used keywords: OrderedDict()
g, unused keywords: OrderedDict([('x', 1), ('y', 2), ('z', 3)])
Mandatory keyword-only arguments that have no default value are of course
included in the "used" dictionary:
>>> @used_unused_keywords()
... def t(*, u, v, w, x=1, y=2, z=3, **kwargs):
... print("t, used keywords: ", t.get_used_keywords())
... print("t, unused keywords: ", t.get_unused_keywords())
>>> t(u='a', v='b', w='c', x=101, z=2003, extra_kwd='wtf')
t, used keywords: OrderedDict([('u', 'a'), ('v', 'b'), ('w', 'c'), ('x', 101), ('z', 2003)])
t, unused keywords: OrderedDict([('y', 2)])
Here's how a decorated `__init__` instance method accesses the dictionaries:
>>> class C():
... @used_unused_keywords()
... def __init__(self, x=1, y=2, z=3, **kwargs):
... wrapper = C.__dict__['__init__']
... print("__init__, used keywords: ", wrapper.get_used_keywords())
... print("__init__, unused keywords: ", wrapper.get_unused_keywords())
>>> c1 = C(x=101, z=2003, extra_kwd='wtf')
__init__, used keywords: OrderedDict([('x', 101), ('z', 2003)])
__init__, unused keywords: OrderedDict([('y', 2)])
>>> c2 = C(z='a string', extra_kwd='wtf')
__init__, used keywords: OrderedDict([('z', 'a string')])
__init__, unused keywords: OrderedDict([('x', 1), ('y', 2)])
>>> c3 = C(extra_kwd='wtf')
__init__, used keywords: OrderedDict()
__init__, unused keywords: OrderedDict([('x', 1), ('y', 2), ('z', 3)])
"""
pass
| {
"repo_name": "Twangist/log_calls",
"path": "log_calls/used_unused_kwds.py",
"copies": "1",
"size": "6209",
"license": "mit",
"hash": -6162236203628143000,
"line_mean": 36.1796407186,
"line_max": 97,
"alpha_frac": 0.5872121115,
"autogenerated": false,
"ratio": 3.637375512595196,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4724587624095196,
"avg_score": null,
"num_lines": null
} |
__author__ = "Brian O'Neill" # BTO
__doc__ = """
Module version = '0.3.0'
"""
from .deco_settings import DecoSetting, DecoSettingsMapping, DecoSetting_bool
from .log_calls import _deco_base, DecoSettingHistory
from .used_unused_kwds import used_unused_keywords
class record_history(_deco_base):
"""
"""
# allow indirection for all except prefix and max_history, which also isn't mutable
_setting_info_list = (
DecoSetting('log_call_numbers', bool, True, allow_falsy=True, visible=False),
DecoSetting('indent', bool, False, allow_falsy=True, visible=False),
# visible:
DecoSettingHistory('enabled'), # alias "record_history" in log_calls
DecoSetting('prefix', str, '', allow_falsy=True, allow_indirect=False),
DecoSetting('mute', int, False, allow_falsy=True, visible=False), # 0.3.0
DecoSetting('max_history', int, 0, allow_falsy=True, mutable=False),
DecoSetting_bool('NO_DECO', bool, False, allow_falsy=True, mutable=False),
)
DecoSettingsMapping.register_class_settings('record_history', # name of this class. DRY - oh well.
_setting_info_list)
# 0.2.6 Fix: use decorator:
@used_unused_keywords()
def __init__(self,
settings=None, # 0.3.0b18 added: A dict or a pathname
omit=tuple(), # 0.3.0 class deco'ing: omit these methods/inner classes
only=tuple(), # 0.3.0 class deco'ing: decorate only these methods/inner classes (minus any in omit)
name=None, # 0.3.0 name or oldstyle fmt str for f_display_name of fn; not a setting
enabled=True,
prefix='',
max_history=0,
NO_DECO=False,
):
# 0.2.6 get used_keywords_dict and pass to super().__init__
used_keywords_dict = record_history.__dict__['__init__'].get_used_keywords()
# 0.3.0 but first, ditch parameters that aren't settings
for kwd in ('omit', 'only', 'name'):
if kwd in used_keywords_dict:
del used_keywords_dict[kwd]
super().__init__(
settings=settings, # 0.3.0b18 added: A dict or a pathname
_omit=omit, # 0.3.0 class deco'ing: tuple - omit these methods/inner classes
_only=only, # 0.3.0 class deco'ing: tuple - decorate only these methods/inner classes (minus omit)
_name_param=name, # 0.3.0 name or oldstyle fmt str etc.
_used_keywords_dict=used_keywords_dict,
enabled=enabled,
prefix=prefix,
mute=False,
max_history=max_history,
indent=False, # p.i.t.a. that this is here :|
log_call_numbers=True, # for call chain in history record
NO_DECO=NO_DECO,
)
# 0.3.0
@classmethod
def allow_repr(cls) -> bool:
return True
# 0.3.0
@classmethod
def log_message_auto_prefix_threshold(cls) -> int:
""":return: one of the "constants" of _deco_base.MUTE
The log_* functions will automatically prefix their output
with the function's display name if max of
the function's mute setting, global_mute()
is this mute level or higher.
Returning _deco_base.MUTE.NOTHING means, always prefix.
"""
return cls.MUTE.NOTHING
#### 0.3.0.beta12+ try letting record_history use log_* functions
# @classmethod
# def get_logging_fn(cls, _get_final_value_fn):
# """Return None: no output.
# cls: unused.."""
# return None
# def __call__(self, f):
# return super().__call__(f)
| {
"repo_name": "Twangist/log_calls",
"path": "log_calls/record_history.py",
"copies": "1",
"size": "3808",
"license": "mit",
"hash": -5299845316110112000,
"line_mean": 42.2727272727,
"line_max": 121,
"alpha_frac": 0.5701155462,
"autogenerated": false,
"ratio": 3.606060606060606,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9662466746605854,
"avg_score": 0.0027418811309503324,
"num_lines": 88
} |
__author__ = "Brian O'Neill" # BTO
__version__ = '0.1.14'
__doc__ = """
100% coverage of deco_settings.py
"""
from unittest import TestCase
from log_calls import DecoSetting, DecoSettingsMapping
from log_calls.log_calls import DecoSettingEnabled, DecoSettingHistory
from collections import OrderedDict
import inspect
import logging # not to use, just for the logging.Logger type
import sys
# - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# helper
# - - - - - - - - - - - - - - - - - - - - - - - - - - - -
import re
def collapse_whitespace(s):
s = re.sub(r'\n', ' ', s)
s = re.sub(r'\s\s+', ' ', s)
return s.strip()
##############################################################################
# DecoSetting tests
##############################################################################
class TestDecoSetting(TestCase):
info_plain = None
info_extended = None
@classmethod
def setUpClass(cls):
cls.info_plain = DecoSetting('set_once', int, 15,
allow_falsy=True, mutable=False)
cls.hidden = DecoSetting('hidden', bool, True,
allow_falsy=True, visible=False)
cls.twotype = DecoSetting('twotype', (logging.Logger, str), None,
allow_falsy=True)
# with extra fields
cls.info_extended = DecoSetting('extended', tuple, ('Joe', "Schmoe"),
allow_falsy=True, allow_indirect=False,
extra1='Tom', extra2='Dick', extra3='Harry')
def test___init__1(self):
"""without any additional attributes"""
self.assertEqual(self.info_plain.name, 'set_once')
self.assertEqual(self.info_plain.final_type, int)
self.assertEqual(self.info_plain.default, 15)
self.assertEqual(self.info_plain.allow_falsy, True)
self.assertEqual(self.info_plain.allow_indirect, True)
self.assertEqual(self.info_plain.mutable, False)
self.assertEqual(self.info_plain.visible, True)
self.assertEqual(self.info_plain._user_attrs, [])
def test___init__2(self):
"""without any additional attributes"""
self.assertEqual(self.hidden.name, 'hidden')
self.assertEqual(self.hidden.final_type, bool)
self.assertEqual(self.hidden.default, True)
self.assertEqual(self.hidden.allow_falsy, True)
self.assertEqual(self.hidden.allow_indirect, False) # because visible=False
self.assertEqual(self.hidden.mutable, True)
self.assertEqual(self.hidden.visible, False)
def test___init__3(self):
"""without any additional attributes"""
self.assertEqual(self.twotype.name, 'twotype')
self.assertEqual(self.twotype.final_type, (logging.Logger, str))
self.assertEqual(self.twotype.default, None)
self.assertEqual(self.twotype.allow_falsy, True)
self.assertEqual(self.twotype.allow_indirect, True) # because visible=False
self.assertEqual(self.twotype.mutable, True)
self.assertEqual(self.twotype.visible, True)
def test___init__4(self):
"""WITH additional attributes."""
self.assertEqual(self.info_extended.name, 'extended')
self.assertEqual(self.info_extended.final_type, tuple)
self.assertEqual(self.info_extended.default, ('Joe', "Schmoe"))
self.assertEqual(self.info_extended.allow_falsy, True)
self.assertEqual(self.info_extended.allow_indirect, False)
self.assertEqual(self.info_extended.mutable, True)
self.assertEqual(self.info_extended.visible, True)
self.assertEqual(self.info_extended._user_attrs, ['extra1', 'extra2', 'extra3'])
self.assertEqual(self.info_extended.extra1, 'Tom')
self.assertEqual(self.info_extended.extra2, 'Dick')
self.assertEqual(self.info_extended.extra3, 'Harry')
def test___repr__1(self):
plain_repr = "DecoSetting('set_once', int, 15, allow_falsy=True, " \
"allow_indirect=True, mutable=False, visible=True, pseudo_setting=False, indirect_default=15)"
self.assertEqual(repr(self.info_plain), plain_repr)
def test___repr__2(self):
hidden_repr = "DecoSetting('hidden', bool, True, allow_falsy=True, " \
"allow_indirect=False, mutable=True, visible=False, pseudo_setting=False, indirect_default=True)"
self.assertEqual(repr(self.hidden), hidden_repr)
def test___repr__3(self):
twotype_repr = "DecoSetting('twotype', (Logger, str), None, allow_falsy=True, " \
"allow_indirect=True, mutable=True, visible=True, pseudo_setting=False, indirect_default=None)"
self.assertEqual(repr(self.twotype), twotype_repr)
def test___repr__4(self):
ext_repr = "DecoSetting('extended', tuple, ('Joe', 'Schmoe'), " \
"allow_falsy=True, allow_indirect=False, " \
"mutable=True, visible=True, pseudo_setting=False, " \
"indirect_default=('Joe', 'Schmoe'), " \
"extra1='Tom', extra2='Dick', extra3='Harry')"
self.assertEqual(repr(self.info_extended), ext_repr)
##############################################################################
# DecoSetting tests
##############################################################################
class TestDecoSettingsMapping(TestCase):
# placeholders:
_settings_mapping = OrderedDict()
@classmethod
def setUpClass(cls):
cls._settings = (
DecoSettingEnabled('enabled', indirect_default=False),
DecoSetting('folderol', str, '', allow_falsy=True, allow_indirect=False),
DecoSetting('my_setting', str, 'on', allow_falsy=False, allow_indirect=True),
DecoSetting('your_setting', str, 'off', allow_falsy=False, allow_indirect=False,
mutable=False),
DecoSettingHistory('history', visible=False),
)
DecoSettingsMapping.register_class_settings('TestDecoSettingsMapping',
cls._settings)
def setUp(self):
"""__init__(self, *, deco_class, **values_dict)"""
self._settings_mapping = DecoSettingsMapping(
deco_class=self.__class__,
# the rest are what DecoSettingsMapping calls **values_dict
enabled=True,
folderol='bar',
my_setting='eek', # str but doesn't end in '=' --> not indirect
your_setting='Howdy',
history=False
)
def test_register_class_settings(self):
self.assertIn('TestDecoSettingsMapping', DecoSettingsMapping._classname2SettingsData_dict)
od = DecoSettingsMapping._classname2SettingsData_dict['TestDecoSettingsMapping']
self.assertIsInstance(od, OrderedDict) # implies od, od is not None, etc.
# self.assertEqual(len(od), len(self._settings))
names = tuple(map(lambda s: s.name, self._settings))
self.assertEqual(tuple(od), names)
def test___init__(self):
"""setUp does what __init__ ordinarily does:
__init__(self, *, deco_class, **values_dict)"""
self.assertEqual(self._settings_mapping.deco_class, self.__class__)
## TO DO - howdya test THIS?
## Implicitly, it gets tested and the descriptors it creates get tested.
## make_setting_descriptor is a classmethod.
# def test_make_setting_descriptor(self):
# descr = DecoSettingsMapping.make_setting_descriptor('key')
# self.fail()
def test__deco_class_settings_dict(self):
"""property. Can't use/call/test _deco_class_settings_dict till __init__"""
od = self._settings_mapping._deco_class_settings_dict
self.assertIs(od, self._settings_mapping._classname2SettingsData_dict[
self._settings_mapping.deco_class.__name__]
)
self.assertEqual(list(self._settings_mapping),
['enabled', 'folderol', 'my_setting', 'your_setting']
)
def test_registered_class_settings_repr(self):
settings_repr = """
DecoSettingsMapping.register_class_settings(
TestDecoSettingsMapping,
[DecoSetting('enabled', int, True, allow_falsy=True, allow_indirect=True, mutable=True, visible=True,
pseudo_setting=False, indirect_default=False),
DecoSetting('folderol', str, '', allow_falsy=True, allow_indirect=False, mutable=True, visible=True,
pseudo_setting=False, indirect_default=''),
DecoSetting('my_setting', str, 'on', allow_falsy=False, allow_indirect=True, mutable=True, visible=True,
pseudo_setting=False, indirect_default='on'),
DecoSetting('your_setting', str, 'off', allow_falsy=False, allow_indirect=False, mutable=False, visible=True,
pseudo_setting=False, indirect_default='off'),
DecoSetting('history', bool, False, allow_falsy=True, allow_indirect=False, mutable=True, visible=False,
pseudo_setting=False, indirect_default=False)
])
"""
self.assertEqual(
collapse_whitespace(self._settings_mapping.registered_class_settings_repr()),
collapse_whitespace(settings_repr)
)
def test__handlers(self):
self.assertEqual(self._settings_mapping._handlers,
(('enabled',), ('history',)))
def test__pre_call_handlers(self):
self.assertEqual(self._settings_mapping._pre_call_handlers,
('enabled',))
def test__post_call_handlers(self):
self.assertEqual(self._settings_mapping._post_call_handlers,
('history',))
def test__get_DecoSetting(self):
for key in self._settings_mapping._deco_class_settings_dict:
self.assertEqual(self._settings_mapping._get_DecoSetting(key),
self._settings_mapping._deco_class_settings_dict[key])
self.assertEqual(key, self._settings_mapping._get_DecoSetting(key).name)
def test___getitem__(self):
"""Test descriptors too"""
mapping = self._settings_mapping
self.assertEqual(mapping['enabled'], True)
self.assertEqual(mapping['folderol'], 'bar')
self.assertEqual(mapping['my_setting'], 'eek')
self.assertEqual(mapping['your_setting'], "Howdy")
self.assertEqual(mapping.enabled, True)
self.assertEqual(mapping.folderol, 'bar')
self.assertEqual(mapping.my_setting, 'eek')
self.assertEqual(mapping.your_setting, "Howdy")
def get_bad_item(bad_key):
return mapping[bad_key]
def get_bad_attr(bad_attr):
return getattr(mapping, bad_attr)
def get_hidden_item():
return mapping['history']
def get_hidden_attr():
return mapping.history
self.assertRaises(KeyError, get_bad_item, 'no_such_key')
self.assertRaises(AttributeError, get_bad_attr, 'no_such_attr')
self.assertRaises(KeyError, get_hidden_item)
self.assertRaises(AttributeError, get_hidden_attr)
def test___setitem__(self):
"""Test descriptors too.
Test your_setting -- mutable=False"""
mapping = self._settings_mapping
mapping['enabled'] = False
mapping['folderol'] = 'BAR'
mapping['my_setting'] = 'OUCH'
def set_item_not_mutable(s):
mapping['your_setting'] = s
self.assertRaises(ValueError, set_item_not_mutable, "HARK! Who goes there?")
self.assertEqual(mapping['enabled'], False)
self.assertEqual(mapping['folderol'], 'BAR')
self.assertEqual(mapping['my_setting'], 'OUCH')
self.assertEqual(mapping['your_setting'], "Howdy") # not mutable, so not changed!
# Now set back to mostly 'orig' values using descriptors
mapping.enabled = True
mapping.folderol = 'bar'
mapping.my_setting = 'eek'
def set_attr_not_mutable(s):
mapping.your_setting = s
self.assertRaises(ValueError, set_attr_not_mutable, "This won't work either.")
self.assertEqual(mapping.enabled, True)
self.assertEqual(mapping.folderol, 'bar')
self.assertEqual(mapping.my_setting, 'eek')
self.assertEqual(mapping.your_setting, "Howdy")
mapping.__setitem__('your_setting', 'not howdy', _force_mutable=True)
self.assertEqual(mapping.your_setting, "not howdy")
# Now test setting a nonexistent key, & a nonexistent attr/descr
def set_bad_item(bad_key, val):
mapping[bad_key] = val
self.assertRaises(KeyError, set_bad_item, 'no_such_key', 413)
## BUT The following does NOT raise an AttributeError
# def set_bad_attr(bad_attr, val):
# setattr(mapping, bad_attr, val)
#
# self.assertRaises(AttributeError, set_bad_attr, 'no_such_attr', 495)
mapping.no_such_attr = 495
self.assertEqual(mapping.no_such_attr, 495)
# Test setting settings with visible=False
def set_hidden_item():
mapping['history'] = False
def set_hidden_attr():
mapping.history = False
self.assertRaises(KeyError, set_hidden_item)
# Get value of history setting
history_val = mapping.get_final_value('history', fparams=None)
# You CAN add an attribute called 'history'
# BUT it is *not* the 'history' setting:
mapping.history = not history_val
self.assertEqual(mapping.history, not history_val)
# The setting is unchanged:
self.assertEqual(history_val, mapping.get_final_value('history', fparams=None))
# Actually change the value:
mapping.__setitem__('history', not history_val, _force_visible=True)
# get new val of history
new_val = mapping.get_final_value('history', fparams=None)
self.assertEqual(new_val, not history_val)
def test___len__(self):
self.assertEqual(len(self._settings_mapping), 4)
def test___iter__(self):
names = [name for name in self._settings_mapping]
self.assertEqual(names, ['enabled', 'folderol', 'my_setting', 'your_setting'])
def test_items(self):
items = [item for item in self._settings_mapping.items()]
self.assertEqual(items,
[('enabled', self._settings_mapping['enabled']),
('folderol', self._settings_mapping['folderol']),
('my_setting', self._settings_mapping['my_setting']),
('your_setting', self._settings_mapping['your_setting'])]
)
def test___contains__(self):
self.assertIn('enabled', self._settings_mapping)
self.assertIn('folderol', self._settings_mapping)
self.assertIn('my_setting', self._settings_mapping)
self.assertIn('your_setting', self._settings_mapping)
self.assertNotIn('history', self._settings_mapping)
self.assertNotIn('no_such_key', self._settings_mapping)
def test___repr__(self):
"""
Split into cases because this bug got fixed in Python 3.5:
http://bugs.python.org/issue23775
"Fix pprint of OrderedDict.
Currently pprint prints the repr of OrderedDict if it fits in one line,
and prints the repr of dict if it is wrapped.
Proposed patch makes pprint always produce an output compatible
with OrderedDict's repr.
"
The bugfix also affected tests in test_log_calls_more.py (see docstring there).
"""
if (sys.version_info.major == 3 and sys.version_info.minor >= 5
) or sys.version_info.major > 3: # :)
the_repr = """
DecoSettingsMapping(
deco_class=TestDecoSettingsMapping,
** OrderedDict([
('enabled', True),
('folderol', 'bar'),
('my_setting', 'eek'),
('your_setting', 'Howdy')]) )
"""
else: # Py <= 3.4
the_repr = """
DecoSettingsMapping(
deco_class=TestDecoSettingsMapping,
** {
'enabled': True,
'folderol': 'bar',
'my_setting': 'eek',
'your_setting': 'Howdy'} )
"""
self.assertEqual(
collapse_whitespace(repr(self._settings_mapping)),
collapse_whitespace(the_repr),
)
def test___str__(self):
## print("self._settings_mapping str: %s" % str(self._settings_mapping))
## {'folderol': 'bar', 'my_setting': 'eek', 'your_setting': 'Howdy', 'enabled': True}
self.assertDictEqual(
eval(str(self._settings_mapping)),
{'folderol': 'bar', 'my_setting': 'eek', 'your_setting': 'Howdy', 'enabled': True}
)
def test_update(self):
mapping = self._settings_mapping
d = {'enabled': False, 'folderol': 'tomfoolery', 'my_setting': 'balderdash=', 'your_setting': "Goodbye."}
mapping.update(**d) # pass as keywords
self.assertEqual(mapping.enabled, False)
self.assertEqual(mapping.folderol, 'tomfoolery')
self.assertEqual(mapping.my_setting, 'balderdash=')
self.assertEqual(mapping.your_setting, 'Howdy') # NOT changed, and no exception
self.assertEqual(len(mapping), 4)
mapping.enabled = not mapping.enabled
mapping.folderol = 'nada'
mapping.my_setting = "something-new"
mapping.update(d) # pass as dict
self.assertEqual(mapping.enabled, False)
self.assertEqual(mapping.folderol, 'tomfoolery')
self.assertEqual(mapping.my_setting, 'balderdash=')
self.assertEqual(mapping.your_setting, 'Howdy') # NOT changed, and no exception
self.assertEqual(len(mapping), 4)
d1 = {'enabled': False, 'folderol': 'gibberish'}
d2 = {'enabled': True, 'my_setting': 'hokum='}
mapping.update(d1, d2)
self.assertEqual(mapping.enabled, True)
self.assertEqual(mapping.folderol, 'gibberish')
self.assertEqual(mapping.my_setting, 'hokum=')
mapping.update(d1, d2, **d)
self.assertEqual(mapping.enabled, False)
self.assertEqual(mapping.folderol, 'tomfoolery')
self.assertEqual(mapping.my_setting, 'balderdash=')
self.assertRaises(
KeyError,
mapping.update,
no_such_setting=True
)
self.assertRaises(
KeyError,
mapping.update,
history=True
)
def test_as_OD(self):
self.assertDictEqual(
OrderedDict([('enabled', True), ('folderol', 'bar'), ('my_setting', 'eek'), ('your_setting', 'Howdy')]),
self._settings_mapping.as_OD()
)
def test_as_dict(self):
self.assertDictEqual(self._settings_mapping.as_dict(),
{'folderol': 'bar', 'my_setting': 'eek', 'your_setting': 'Howdy', 'enabled': True})
def test__get_tagged_value(self):
mapping = self._settings_mapping
mapping['enabled'] = "enabled_kwd"
mapping['folderol'] = 'my_setting_kwd='
mapping['my_setting'] = 'OUCH'
self.assertEqual(mapping._get_tagged_value('enabled'), (True, 'enabled_kwd'))
self.assertEqual(mapping._get_tagged_value('folderol'), (False, 'my_setting_kwd='))
self.assertEqual(mapping._get_tagged_value('my_setting'), (False, 'OUCH'))
self.assertEqual(mapping._get_tagged_value('your_setting'), (False, 'Howdy'))
def bad_key():
mapping._get_tagged_value('no_such_key')
self.assertRaises(KeyError, bad_key)
def test_get_final_value(self):
mapping = self._settings_mapping
v = mapping.get_final_value('enabled', fparams=None)
self.assertEqual(v, True)
mapping['enabled'] = 'enabled_kwd='
d = {'enabled_kwd': 17}
v = mapping.get_final_value('enabled', d, fparams=None)
self.assertEqual(v, 17)
def f(a, enabled_kwd=3):
pass
fparams = inspect.signature(f).parameters
v = mapping.get_final_value('enabled', fparams=fparams)
self.assertEqual(v, 3)
def g(a, wrong_kwd='nevermind'):
pass
gparams = inspect.signature(g).parameters
v = mapping.get_final_value('enabled', fparams=gparams)
self.assertEqual(v, False)
def h(a, enabled_kwd=[]):
pass
hparams = inspect.signature(h).parameters
v = mapping.get_final_value('enabled', fparams=hparams)
self.assertEqual(v, False)
import logging
class TestDecoSettingsMapping_set_reset_defaults(TestCase):
@classmethod
def setUpClass(cls):
cls._settings = (
DecoSettingEnabled('enabled', indirect_default=False),
DecoSetting('number', (str, int), '12', allow_falsy=True, allow_indirect=False),
DecoSetting('my_logger', (str, logging.Logger), 'nix', allow_falsy=False, allow_indirect=True),
DecoSetting('your_setting', str, 'off', allow_falsy=False, allow_indirect=False,
mutable=False),
DecoSettingHistory('history', visible=False),
)
DecoSettingsMapping.register_class_settings('TestDecoSettingsMapping_set_reset_defaults',
cls._settings)
# # "'enabled' setting default value = False"
# print("'enabled' setting default value =",
# cls._settings[0].default)
def setUp(self):
"""
"""
pass
def test_set_reset_defaults(self):
clsname = self.__class__.__name__
settings_map = DecoSettingsMapping.get_deco_class_settings_dict(clsname)
# try set 'my_logger' = '' ==> no effect (setting doesn't .allow_falsy)
DecoSettingsMapping.set_defaults(clsname, {'my_logger': ''})
self.assertEqual(settings_map['my_logger'].default, 'nix')
# try setting 'your_setting' = 500 ==> no effect (not acceptable type)
DecoSettingsMapping.set_defaults(clsname, {'your_setting': 500})
self.assertEqual(settings_map['your_setting'].default, 'off')
# try setting 'no_such_setting' = 0 ==> KeyError
def set_no_such_setting():
DecoSettingsMapping.set_defaults(clsname, {'no_such_setting': 0})
self.assertRaises(KeyError, set_no_such_setting)
# try setting 'history' = False ==> KeyError (setting not visible)
def set_history():
DecoSettingsMapping.set_defaults(clsname, {'history': False})
self.assertRaises(KeyError, set_history)
# set enabled=False, number=17 (int);
# check that .default of things in settings_map reflect this
DecoSettingsMapping.set_defaults(clsname, dict(enabled=False, number=17))
self.assertEqual(settings_map['enabled'].default, False)
self.assertEqual(settings_map['number'].default, 17)
self.assertEqual(settings_map['my_logger'].default, 'nix')
self.assertEqual(settings_map['your_setting'].default, 'off')
# self.assertEqual(settings_map['history'].default, 'True')
# set enabled=True, number='100', your_setting='Howdy';
# check that .default of things in settings_map reflect this
DecoSettingsMapping.set_defaults(clsname, dict(enabled=True, number='100', your_setting='Howdy'))
self.assertEqual(settings_map['enabled'].default, True)
self.assertEqual(settings_map['number'].default, '100')
self.assertEqual(settings_map['my_logger'].default, 'nix')
self.assertEqual(settings_map['your_setting'].default, 'Howdy')
# reset, see that defaults are correct
DecoSettingsMapping.reset_defaults(clsname)
self.assertEqual(settings_map['enabled'].default, True) # the default for DecoSettingEnabled
self.assertEqual(settings_map['number'].default, '12')
self.assertEqual(settings_map['my_logger'].default, 'nix')
self.assertEqual(settings_map['your_setting'].default, 'off')
| {
"repo_name": "Twangist/log_calls",
"path": "tests/test_deco_settings.py",
"copies": "1",
"size": "24702",
"license": "mit",
"hash": -952965135023368700,
"line_mean": 42.2609457093,
"line_max": 126,
"alpha_frac": 0.5885758238,
"autogenerated": false,
"ratio": 3.9764971023824853,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001691008441121366,
"num_lines": 571
} |
__author__ = "Brian O'Neill" # BTO
# __version__ = '0.3.0'
__doc__ = """
DecoSettingsMapping -- class that's usable with any class-based decorator
that has several keyword parameters; this class makes it possible for
a user to access the collection of settings as an attribute
(object of type DecoSettingsMapping) of the decorated function.
The attribute/obj of type DecoSettingsMapping provides
(*) a mapping interface for the decorator's keyword params
(*) an attribute interface for its keyword params
i.e. attributes of the same names,
as well as 'direct' and 'indirect' values for its keyword params
q.v.
Using this class, any setting under its management can take two kinds of values:
direct and indirect, which you can think of as static and dynamic respectively.
Direct/static values are actual values used when the decorated function is
interpreted, e.g. enabled=True, args_sep=" / ". Indirect/dynamic values are
strings that name keyword arguments of the decorated function; when the
decorated function is called, the arguments passed by keyword and the
parameters of the decorated function are searched for the named parameter,
and if it is found, its value is used. Parameters whose normal type is str
(args_sep) indicate an indirect value by appending an '='.
Thus, in:
@log_calls(args_sep='sep=', prefix="MyClass.")
def f(a, b, c, sep='|'): pass
args_sep has an indirect value, and prefix has a direct value. A call can
dynamically override the default value in the signature of f by supplying
a value:
f(1, 2, 3, sep=' $ ')
or use func's default by omitting the sep argument.
A decorated function doesn't have to explicitly declare the named parameter,
if its signature includes **kwargs. Consider:
@log_calls(enabled='enable')
def func1(a, b, c, **kwargs): pass
@log_calls(enabled='enable')
def func2(z, **kwargs): func1(z, z+1, z+2, **kwargs)
When the following statement is executed, the calls to both func1 and func2
will be logged:
func2(17, enable=True)
whereas neither of the following two statements will trigger logging:
func2(42, enable=False)
func2(99)
For consistency, any parameter value that names a keyword
parameter of the decorated function can also end in a trailing '=', which
is stripped. Thus, enabled='enable_=' indicates an indirect value supplied
by the keyword 'enable_' of the decorated function.
"""
from collections import OrderedDict, defaultdict
import pprint
import warnings # v0.3.0b23
from .helpers import is_keyword_param, is_quoted_str
__all__ = ['DecoSetting', 'DecoSettingsMapping']
#----------------------------------------------------------------------------
# DecoSetting & basic subclasses
#----------------------------------------------------------------------------
class DecoSetting():
"""a little struct - static info about one setting (keyword parameter),
sans any value.
v0.3.0b25
indirect_default: a user attribute which this constructor knows about.
If present in kwargs, use the value there; o/w use .default.
This is the latest take on how to handle missing indirect value of "enabled".
Callers can add additional fields by passing additional keyword args.
The additional fields/keys & values are made attributes of this object,
and a (sorted) list of the keys is saved (_user_attrs).
Subclasses can supply a pre_call_handler method
returning str or empty:
def pre_call_handler(self, context: dict):
return ("%s <== called by %s"
% (context['output_fname'],
' <== '.join(context['call_list'])))
context contains these keys:
decorator
settings # of decorator
indent
prefixed_fname
output_fname
fparams
argcount
argnames # len = argcount
argvals # len = argcount
varargs
explicit_kwargs
implicit_kwargs
defaulted_kwargs
call_list
args
kwargs
Subclasses can supply a post_call_handler method:
returning str or empty:
def post_call_handler(self, context: dict):
return ("%s ==> returning to %s"
% (context['output_fname'],
' ==> '.join(context['call_list'])))
For a post_call_handler, context adds these keys:
elapsed_secs
timestamp
retval
"""
def __init__(self, name, final_type, default, *,
allow_falsy, allow_indirect=True, mutable=True, visible=True,
pseudo_setting=False, # v0.3.0b24
**more_attributes):
"""not visible => not allow_indirect
"""
assert not default or isinstance(default, final_type)
self.name = name # key
self.final_type = final_type # bool int str logging.Logger ...
self.default = default
self.allow_falsy = allow_falsy # is a falsy final val of setting allowed
self.allow_indirect = allow_indirect and visible # are indirect values allowed
self.mutable = mutable
self.visible = visible
self.pseudo_setting = pseudo_setting # v0.3.0b24
# v0.3.0b25
self.indirect_default = more_attributes.pop('indirect_default', self.default)
# we need to write fields in repr the same way every time,
# so even though more_attributes isn't ordered,
# we need to pick an order & stick to it
self._user_attrs = sorted(list(more_attributes))
self.__dict__.update(more_attributes)
def __repr__(self):
if isinstance(self.final_type, tuple): # it's a tuple of types
final_type = '(' + ', '.join(map(lambda t: t.__name__, self.final_type)) + ')'
else: # it's a type
final_type = self.final_type.__name__
#default = self.default if final_type != 'str' else repr(self.default)
output = ("DecoSetting(%r, %s, %r, allow_falsy=%s, allow_indirect=%s, "
"mutable=%s, visible=%s, pseudo_setting=%r, indirect_default=%r"
%
(self.name, final_type, self.default, self.allow_falsy, self.allow_indirect,
self.mutable, self.visible, self.pseudo_setting, self.indirect_default)
)
# append user attrs
for attr in self._user_attrs:
output += ", %s=%r" % (attr, self.__dict__[attr])
output += ")"
return output
def value_from_str(self, s):
"""Virtual method for use by _deco_base._read_settings_file.
0.2.4.post1"""
raise ValueError()
def has_acceptable_type(self, value):
return isinstance(value, self.final_type)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# DecoSetting subclasses
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class DecoSetting_bool(DecoSetting):
def value_from_str(self, s):
"""Virtual method for use by _deco_base._read_settings_file.
0.2.4.post1"""
ddict = defaultdict(lambda: self.default)
ddict['TRUE'] = True
ddict['FALSE'] = False
return ddict[s.upper()]
class DecoSetting_int(DecoSetting):
def value_from_str(self, s):
"""Virtual method for use by _deco_base._read_settings_file.
0.2.4.post1"""
try:
return int(s)
except ValueError:
return super().value_from_str(s)
class DecoSetting_str(DecoSetting):
def value_from_str(self, s):
"""Virtual method for use by _deco_base._read_settings_file.
s must be enclosed in quotes (the same one on each end!)
and then we return what's quoted... or raise ValueError
0.2.4.post1"""
if is_quoted_str(s):
return s[1:-1]
return super().value_from_str(s)
#----------------------------------------------------------------------------
# DecoSettingsMapping
#----------------------------------------------------------------------------
class DecoSettingsMapping():
"""Usable with any class-based decorator that wants to implement
a mapping interface and attribute interface for its keyword params,
as well as 'direct' and 'indirect' values for its keyword params"""
# Class-level mapping: classname |-> OrderedDict of class's settings (info 'structs')
_classname2SettingsData_dict = {}
_classname2SettingsDataOrigDefaults_dict = {}
# Class-level mapping: classname |-> pair of tuples:
# (pre-call handler settings names,
# post-call handler settings names)
_classname2handlers = {}
# When this is last char of a parameter value (to decorator),
# interpret value of parameter to be the name of
# a keyword parameter ** of the wrapped function f **
INDIRECT_VALUE_MARKER = '='
@classmethod
def register_class_settings(cls, deco_classname, settings_iter):
"""
Called before __init__, presently - by deco classes.
Client classes should call this *** from class level ***
e.g.
DecoSettingsMapping.register_class_settings('log_calls', _setting_info_list)
Add item (deco_classname, od) to _classname2SettingsData_dict
where od is an ordered dict built from items of settings_iter.
cls: this class
deco_classname: key for dict produced from settings_iter
settings_iter: iterable of DecoSetting objs"""
# Only do once per deco class
if deco_classname in cls._classname2SettingsData_dict:
return
od = OrderedDict()
pre_handlers = []
post_handlers = []
for setting in settings_iter:
od[setting.name] = setting
if setting.__class__.__dict__.get('pre_call_handler'):
pre_handlers.append(setting.name)
if setting.__class__.__dict__.get('post_call_handler'):
post_handlers.append(setting.name)
cls._classname2SettingsData_dict[deco_classname] = od
# v0.3.0b23 Make this an OD too
cls._classname2SettingsDataOrigDefaults_dict[deco_classname] = OrderedDict(
[(name, od[name].default) for name in od]
)
cls._classname2handlers[deco_classname] = (
tuple(pre_handlers), tuple(post_handlers))
# <<<attributes>>> Set up descriptors -- OMIT .pseudo_setting !
for name in od:
if od[name].visible and not od[name].pseudo_setting:
setattr(cls, name, cls.make_setting_descriptor(name))
# v0.3.0b24
@classmethod
def get_factory_defaults_OD(cls, deco_classname) -> OrderedDict:
# return cls._classname2SettingsDataOrigDefaults_dict[deco_classname]
class_settings = cls._classname2SettingsData_dict[deco_classname]
return OrderedDict(
[(name, value)
for name, value in cls._classname2SettingsDataOrigDefaults_dict[deco_classname].items()
if class_settings[name].visible and not class_settings[name].pseudo_setting
]
)
# v0.3.0b24
@classmethod
def get_defaults_OD(cls, deco_classname) -> OrderedDict:
# return cls._classname2SettingsData_dict[deco_classname]
return OrderedDict(
[(name, setting.default)
for name, setting in cls._classname2SettingsData_dict[deco_classname].items()
if setting.visible and not setting.pseudo_setting
]
)
@classmethod
def set_defaults(cls, deco_classname, defaults: dict):
"""Change global default values for all (subsequent) uses of decorator
with name deco_classname.
Only settings that are *visible* for cls can be changed.
Raises KeyError if any key in defaults isn't actually "settings" are is not "visible".
In both cases no changes are made.
Ignores any items in `defaults` whose values are of incorrect type,
or whose value is 'falsy' but the setting has .allow_falsy == False.
These behaviors are what __setitem__ & __getitem__ do.
:param deco_classname: name of decorator class, subclass of _deco_base
:param defaults: dict of setting-name keys and new default values
"""
# Change defaults of items in cls._classname2SettingsData_dict[deco_classname]
deco_settings = cls._classname2SettingsData_dict[deco_classname]
# Integrity check:
# if setting_name is not a "setting" or it's not a "visible" setting for cls,
# raise KeyError: that's what __getitem__/__setitem__ do
for setting_name in defaults:
if setting_name not in deco_settings:
raise KeyError(
"set_defaults: no such setting (key) as '%s'" % setting_name)
elif not deco_settings[setting_name].visible:
raise KeyError(
"set_defaults: setting (key) '%s' is not visible in class %s."
% (setting_name, deco_classname))
# TODO 'indirect' values -- Disallow? anyway, prevent? Somehow.
# | Perhaps just get rid of any trailing INDIRECT_VALUE_MARKER ('=')
# Change working default values
for setting_name in defaults:
deco_setting = deco_settings[setting_name]
new_default_val = defaults[setting_name]
if ((new_default_val or deco_setting.allow_falsy)
and deco_setting.has_acceptable_type(new_default_val)
):
# set working default value = new_default_val
deco_setting.default = new_default_val
@classmethod
def reset_defaults(cls, deco_classname):
"""Revert to initial defaults as per documentation & static declarations in code
"""
# v0.3.0b24 -- use new classmethods
orig_defaults = cls._classname2SettingsDataOrigDefaults_dict[deco_classname]
settings_map = cls._classname2SettingsData_dict[deco_classname]
for name in settings_map:
settings_map[name].default = orig_defaults[name]
# <<<attributes>>>
@classmethod
def make_setting_descriptor(cls, name):
class SettingDescr():
"""A little data descriptor which just delegates
to __getitem__ and __setitem__ of instance"""
def __get__(self, instance, owner):
"""
instance: a DecoSettingsMapping
owner: class DecoSettingsMapping(?)"""
return instance[name]
def __set__(self, instance, value):
"""
instance: a DecoSettingsMapping
value: what to set"""
# ONLY do this is name is a legit setting name
# (for this obj, as per this obj's initialization)
instance[name] = value
return SettingDescr()
@property
def _handlers(self) -> tuple:
"""Can't use/call till self.deco_class set in __init__
Return: duple of tuples (pre-call-handler setting keys, post-call-handler setting keys).
"""
return self._classname2handlers[self.deco_class.__name__]
@property
def _pre_call_handlers(self) -> tuple:
"""Can't use/call till self.deco_class set in __init__"""
return self._handlers[0]
@property
def _post_call_handlers(self) -> tuple:
"""Can't use/call till self.deco_class set in __init__"""
return self._handlers[1]
@property
def _deco_class_settings_dict(self) -> OrderedDict:
"""Can't use/call till self.deco_class set in __init__"""
return self._classname2SettingsData_dict[self.deco_class.__name__]
@classmethod
def get_deco_class_settings_dict(cls, clsname) -> OrderedDict:
"""For use when loading settings files -
decorator's DecoSettingsMapping doesn't exist yet."""
return cls._classname2SettingsData_dict[clsname]
def _get_DecoSetting(self, key) -> DecoSetting:
"""
:param key: a setting key.
:return: the corresponding DecoSetting.
"""
return self._deco_class_settings_dict[key]
def _is_visible(self, key) -> bool:
"""key - a setting name."""
return self._get_DecoSetting(key).visible
@property
def _visible_setting_names_gen(self) -> list:
return (name for name in self._tagged_values_dict if self._is_visible(name))
def __init__(self, *, deco_class, **values_dict):
"""classname: name of class that has already stored its settings
by calling register_class_settings(cls, classname, settings_iter)
values_iterable: iterable of pairs
(name,
value such as is passed to log_calls-__init__)
values are either 'direct' or 'indirect'
Assumption: every name in values_iterable is info.name
for some info in settings_info.
Must be called after __init__ sets self.classname."""
self.deco_class = deco_class
class_settings_dict = self._deco_class_settings_dict
# Insert values in the proper order - as given by caller,
# both visible and not visible ones.
self._tagged_values_dict = OrderedDict() # stores pairs inserted by __setitem__
for k in class_settings_dict:
if k in values_dict: # allow k to be set later
self.__setitem__(k, values_dict[k],
info=class_settings_dict[k],
_force_mutable=True,
_force_visible=True)
def registered_class_settings_repr(self) -> str:
list_of_settingsinfo_reprs = []
for k, info in self._deco_class_settings_dict.items():
list_of_settingsinfo_reprs.append(repr(info))
return ("DecoSettingsMapping.register_class_settings("
" " + self.deco_class.__name__ + ",\n"
" [%s\n"
"])") % ',\n '.join(list_of_settingsinfo_reprs)
def __setitem__(self, key, value,
info=None, _force_mutable=False, _force_visible=False):
"""
key: name of setting, e.g. 'prefix';
must be in self._deco_class_settings_dict()
value: something passed to __init__ (of log_calls),
info: self.deco_class_settings_dict[key] or None
_force_mutable: if key is already in self._tagged_values_dict and
it's not mutable, attempting to __setitem__ on it
raises KeyError, unless force_mutable is True
in which case it will be written.
Store pair (is_indirect, modded_val) at key in self._tagged_values_dict[key]
where
is_indirect: bool,
modded_val = val if kind is direct (not is_indirect),
= keyword of wrapped fn if is_indirect
(sans any trailing '=')
THIS method assumes that the values in self._deco_class_settings_dict()
are DecoSetting objects -- all fields of that class are used
You can only set visible settings.
"""
# Blithely assuming that if info is not None then it's DecoSetting for key
if not info:
if key not in self._deco_class_settings_dict:
raise KeyError(
"no such setting (key) as '%s'" % key)
info = self._get_DecoSetting(key)
if not info.visible and not _force_visible:
raise KeyError(
"setting (key) '%s' is not visible in class '%s'."
% (key, self.deco_class.__name__))
final_type = info.final_type
default = info.default
allow_falsy = info.allow_falsy # 0.2.4 was info.default :| FIXED.
allow_indirect = info.allow_indirect
# if the setting is immutable (/not mutable/set-once-only),
# raise ValueError unless _force_mutable:
if not info.mutable and not _force_mutable: # and key in self._tagged_values_dict:
raise ValueError("%s' is write-once (current value: %r)"
% (key, self._tagged_values_dict[key][1]))
if not allow_indirect:
self._tagged_values_dict[key] = False, value
return
# Detect fixup direct/static values, except for final_type == str
if not isinstance(value, str) or not value:
indirect = False
# value not a str, or == '', so use value as-is if valid, else default
if (not value and not allow_falsy) or not info.has_acceptable_type(value): # isinstance(value, final_type)
value = default
else: # val is a nonempty str
if final_type != str and \
(not isinstance(final_type, tuple) or str not in final_type):
# It IS indirect, and val designates a keyword of f
indirect = True
# Remove trailing self.INDIRECT_VALUE_MARKER if any
if value[-1] == self.INDIRECT_VALUE_MARKER:
value = value[:-1]
else:
# final_type == str, or
# isinstance(final_type, tuple) and str in final_type.
# so val denotes an indirect value, an f-keyword,
# IFF last char is INDIRECT_VALUE_MARKER
indirect = (value[-1] == self.INDIRECT_VALUE_MARKER)
if indirect:
value = value[:-1]
self._tagged_values_dict[key] = indirect, value
def __getitem__(self, key):
"""You can only get visible settings."""
if not self._is_visible(key):
raise KeyError(
"setting (key) '%s' is not visible in class '%s'."
% (key, self.deco_class.__name__))
indirect, value = self._tagged_values_dict[key]
return value + self.INDIRECT_VALUE_MARKER if indirect else value
def __len__(self):
"""Return # of visible settings."""
#return len(self._tagged_values_dict)
return len(list(self._visible_setting_names_gen))
def __iter__(self):
"""Return iterable of names of visible settings."""
return self._visible_setting_names_gen
def items(self):
"""Return iterable of items of visible settings."""
return ((name, self.__getitem__(name)) for name in self._visible_setting_names_gen)
def __contains__(self, key):
"""True iff key is a visible setting."""
return key in self._tagged_values_dict and self._is_visible(key)
def __repr__(self):
return ("DecoSettingsMapping( \n"
" deco_class=%s,\n"
" ** %s\n"
")") % \
(self.deco_class.__name__,
pprint.pformat(self.as_OD(), indent=8)
)
def __str__(self):
return str(self.as_dict())
def as_OD(self) -> OrderedDict:
"""Return OrderedDict of visible settings (only).
v0.3.0b23
Renamed ``as_OrderedDict`` ==> ``as_OD`` -- to match new classmethods
``log_calls.get_factory_defaults_OD()``, ``log_calls.get_defaults_OD()``.
``as_OrderedDict`` deprecated.
"""
od = OrderedDict()
for k, v in self._tagged_values_dict.items():
if self._is_visible(k):
od[k] = v[1]
return od
def as_OrderedDict(self) -> OrderedDict:
"""Deprecated alias for ``as_OD`` -- v0.3.0b23."""
# Issue a warning. (and don't do it ALL the time.)
# In Py3.2+ "DeprecationWarning is now ignored by default"
# (https://docs.python.org/3/library/warnings.html),
# so to see it, you have to run the Python interpreter
# with the -W switch, e.g. `python -W default run_tests.py`
# [equivalently: `python -Wd run_tests.py`]
warnings.warn("Warning: 'as_OrderedDict()' method is deprecated, use 'as_OD()' instead.",
DeprecationWarning,
stacklevel=2) # refer to stackframe of caller
return self.as_OD()
def as_dict(self):
"""Return dict of visible settings only."""
return dict(self.as_OD())
def update(self, *dicts, _force_mutable=False, **d_settings):
"""Do __setitem__ for every key/value pair in every dictionary
in dicts + (d_settings,).
Allow but ignore attempts to write to immutable keys!
This permits the user to get the settings as_dict() or as_OrderedDict(),
make changes & use them,
and then restore the original settings, which will contain items
for immutable settings too. Otherwise the user would have to
remove all the immutable keys before doing update - ugh.
0.3.0 added , _force_mutable keyword param
"""
for d in dicts + (d_settings,):
for k, v in d.items():
info = self._deco_class_settings_dict.get(k)
# skip immutable settings
if info and not self._deco_class_settings_dict[k].mutable and not _force_mutable:
continue
# if not info, KeyError from __setitem__
self.__setitem__(k, v, info=info, _force_mutable=_force_mutable)
def _get_tagged_value(self, key):
"""Return (indirect, value) for key"""
return self._tagged_values_dict[key]
def get_final_value(self, name, *dicts, fparams):
"""
name: key into self._tagged_values_dict, self._setting_info_list
*dicts: varargs, usually just kwargs of a call to some function f,
but it can also be e.g. *(explicit_kwargs, defaulted_kwargs,
implicit_kwargs, with fparams=None) of that function f,
fparams: inspect.signature(f).parameters of that function f
THIS method assumes that the objs stored in self._deco_class_settings_dict
are DecoSetting objects -- this method uses every attribute of that class
except allow_indirect.
A very (deco-)specific method, it seems.
"""
indirect, di_val = self._tagged_values_dict[name] # di_ - direct or indirect
if not indirect:
return di_val
# di_val designates a (potential) f-keyword
setting_info = self._deco_class_settings_dict[name]
final_type = setting_info.final_type
## v0.3.0b25
# default = setting_info.default
default = setting_info.indirect_default
allow_falsy = setting_info.allow_falsy
# If di_val is in any of the dictionaries, get corresponding value
found = False
for d in dicts:
if di_val in d: # actually passed to f
val = d[di_val]
found = True
break
if not found:
if fparams and is_keyword_param(fparams.get(di_val)): # not passed; explicit f-kwd?
# yes, explicit param of f, so use f's default value
val = fparams[di_val].default
else:
val = default
# fixup: "loggers" that aren't loggers (or strs), "strs" that arent strs, etc
# if (not val and not allow_falsy) or (val and not isinstance(val, final_type)):
if (not val and not allow_falsy) or \
(type(final_type) == type and not isinstance(val, final_type)) or \
(type(final_type) == tuple and all((not isinstance(val, t) for t in final_type))):
val = default
return val
| {
"repo_name": "Twangist/log_calls",
"path": "log_calls/deco_settings.py",
"copies": "1",
"size": "27911",
"license": "mit",
"hash": 3632666086308834000,
"line_mean": 41.8082822086,
"line_max": 119,
"alpha_frac": 0.5869012217,
"autogenerated": false,
"ratio": 4.1441722345953975,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0016287309374215667,
"num_lines": 652
} |
__author__ = 'brianoneill'
from log_calls import log_calls
##############################################################################
def test_double_func_deco():
"""
Double-decorating a function doesn't raise:
>>> @log_calls()
... @log_calls()
... def f(): pass
>>> f()
f <== called by <module>
f ==> returning to <module>
The inner settings take precedence over the outer ones:
the outer explicitly given settings are updated with the inner explicitly given settings,
and the result becomes the settings of the decorated function.
Here, the resulting settings of g are: args_sep='; ', log_retval=True:
>>> @log_calls(args_sep=' | ', log_retval=True)
... @log_calls(args_sep='; ')
... def g(x, y): pass
>>> g('a', 'b')
g <== called by <module>
arguments: x='a'; y='b'
g return value: None
g ==> returning to <module>
Finally, a function decorated multiple times is wrapped just once, not multiple times;
after the first/innermost decoration, subsequent decorations only affect its settings.
Here's a similar example, this time using `log_calls` as a higher-order
function so we can access both the wrapped function and its wrapper(s).
Recall how the @ syntactic sugar works:
@log_calls(args_sep='; ')
def h(x, y): ...
is shorthand for:
def h(x, y): ...
h = log_calls(args_sep='; ')(h)
so h is set to log_calls(args_sep='; ')(h), which is a wrapper function that writes
log_calls output before and after calling the inner/original/wrapped h.
The following lines achieve the same effect but allow us to save the original h:
>>> def h(x, y): return x + y
>>> orig_h = h
>>> h_lc1 = log_calls(args_sep='; ')(h)
>>> h_lc1 is not orig_h
True
orig_h is the original undecorated function:
>>> orig_h(3, 4)
7
and h_lc1 is the wrapper:
>>> h_lc1(3, 4)
h <== called by <module>
arguments: x=3; y=4
h ==> returning to <module>
7
Now "decorate h again" -- that is, decorate h_lc1, yielding h_lc2.
Note that h_lc1 IS h_lc2, so there is NO additional wrapper,
and only h_lc1's settings have changed:
>>> h_lc2 = log_calls(args_sep=' | ', log_retval=True)(h_lc1)
>>> h_lc1 is h_lc2
True
>>> h_lc2(3, 4)
h <== called by <module>
arguments: x=3; y=4
h return value: 7
h ==> returning to <module>
7
"""
pass
##############################################################################
# end of tests.
##############################################################################
import doctest
# For unittest integration
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite())
return tests
if __name__ == "__main__":
doctest.testmod() # (verbose=True)
# unittest.main()
| {
"repo_name": "Twangist/log_calls",
"path": "tests/test_double_func_deco.py",
"copies": "1",
"size": "2809",
"license": "mit",
"hash": 2229902743336150000,
"line_mean": 27.6632653061,
"line_max": 89,
"alpha_frac": 0.5738697045,
"autogenerated": false,
"ratio": 3.564720812182741,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4638590516682741,
"avg_score": null,
"num_lines": null
} |
__author__ = 'brianoneill'
from log_calls import log_calls, record_history
import doctest
def test_():
"""
``record_history`` is equivalent to ``log_calls`` with the settings:
record_history=True
log_call_numbers=True
mute=log_calls.MUTE.CALLS
This example, ``f``, doesn't use ``log_message`` or ``log_exprs``
so the absence of ``log_call_numbers`` won't be noticed.
``record_history`` *can* use the ``log_*`` methods.
Test with both ``mute=log_calls.MUTE.CALLS`` and ``mute=log_calls.MUTE.ALL``:
both should record history:
>>> @log_calls(record_history=True, mute=log_calls.MUTE.CALLS)
... def f(n):
... for i in range(n): pass
>>> f(1); f(2); f(3)
>>> print(f.stats.history_as_csv) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
call_num|n|retval|elapsed_secs|process_secs|timestamp|prefixed_fname|caller_chain
1|1|None|...|...|...|'f'|['<module>']
2|2|None|...|...|...|'f'|['<module>']
3|3|None|...|...|...|'f'|['<module>']
>>> f.stats.clear_history()
>>> f.mute = log_calls.MUTE.ALL
>>> f(1); f(2); f(3)
>>> print(f.stats.history_as_csv) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
call_num|n|retval|elapsed_secs|process_secs|timestamp|prefixed_fname|caller_chain
1|1|None|...|...|...|'f'|['<module>']
2|2|None|...|...|...|'f'|['<module>']
3|3|None|...|...|...|'f'|['<module>']
Without ``log_call_numbers=True``, call numbers won't be included in `log_*` output
using `log_calls`:
>>> @log_calls(record_history=True, mute=log_calls.MUTE.CALLS)
... def g(n):
... g.log_exprs("n")
... for i in range(n): pass
>>> g(1); g(2) # No call numbers, by default
g: n = 1
g: n = 2
For reference, call numbers *are* included in `log_*` output
when using `record_history`:
>>> @record_history()
... def h(n):
... h.log_exprs("n")
... for i in range(n): pass
>>> h(1); h(2) # call numbers
h [1]: n = 1
h [2]: n = 2
"""
pass
#############################################################################
# For unittest integration
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite())
return tests
if __name__ == "__main__":
doctest.testmod() # (verbose=True)
| {
"repo_name": "Twangist/log_calls",
"path": "tests/test_log_calls_as_record_history.py",
"copies": "1",
"size": "2287",
"license": "mit",
"hash": -2907061340422337000,
"line_mean": 29.0921052632,
"line_max": 86,
"alpha_frac": 0.5557498907,
"autogenerated": false,
"ratio": 3.02113606340819,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9002053232383453,
"avg_score": 0.01496654434494725,
"num_lines": 76
} |
__author__ = 'brianoneill'
from log_calls import record_history
#-----------------------------------------------------
# record_history.print, record_history.print_exprs
# Test in methods, in functions
#-----------------------------------------------------
def test_rh_log_message__output_expected():
"""
------------------------------------------------
log_message
------------------------------------------------
>>> @record_history(omit='not_decorated')
... class B():
... def __init__(self):
... record_history.print('Hi')
... # Test that the old version still works! It shares code.
... wrapper = self.get_own_record_history_wrapper()
... wrapper.log_message("Hi from original log_message")
...
... def method(self):
... record_history.print('Hi')
... def not_decorated(self):
... record_history.print('Hi')
... @classmethod
... def clsmethod(cls):
... record_history.print('Hi')
... @staticmethod
... def statmethod():
... record_history.print('Hi')
...
... @property
... def prop(self):
... record_history.print('Hi')
... @prop.setter
... @record_history(name='B.%s.setter')
... def prop(self, val):
... record_history.print('Hi')
...
... def setx(self, val):
... record_history.print('Hi from setx alias x.setter')
... def delx(self):
... record_history.print('Hi from delx alias x.deleter')
... x = property(None, setx, delx)
>>> b = B()
B.__init__ [1]: Hi
B.__init__ [1]: Hi from original log_message
>>> b.method()
B.method [1]: Hi
>>> # NO OUTPUT from this, nor an exception,
>>> # because by default
>>> record_history.print_methods_raise_if_no_deco
False
>>> b.not_decorated()
>>> b.statmethod()
B.statmethod [1]: Hi
>>> b.clsmethod()
B.clsmethod [1]: Hi
>>> b.prop
B.prop [1]: Hi
>>> b.prop = 17
B.prop.setter [1]: Hi
>>> b.x = 13
B.setx [1]: Hi from setx alias x.setter
>>> del b.x
B.delx [1]: Hi from delx alias x.deleter
------------------------------------------------
log_exprs
------------------------------------------------
>>> @record_history(omit='not_decorated')
... class D():
... def __init__(self):
... x = 2
... y = 3
... # Original first:
... wrapper = self.get_own_record_history_wrapper()
... wrapper.log_exprs('x', 'y', 'x+y')
...
... record_history.print_exprs('x', 'y', 'x+y')
...
... def method(self):
... x = 2; y = 3
... record_history.print_exprs('x', 'y', 'x+y')
...
... def not_decorated(self):
... x = 2; y = 3
... record_history.print_exprs('x', 'y', 'x+y')
...
... @classmethod
... def clsmethod(cls):
... x = 2; y = 3
... record_history.print_exprs('x', 'y', 'cls.__name__')
...
... @staticmethod
... def statmethod():
... x = 2; y = 3
... record_history.print_exprs('x', 'y', 'x+y')
...
... @property
... def prop(self):
... x = 2; y = 3
... record_history.print_exprs('x', 'y', 'x+y')
...
... @prop.setter
... @record_history(name='D.%s.setter')
... def prop(self, val):
... x = 2; y = 3
... record_history.print_exprs('x', 'y', 'x+y')
...
... def setx(self, val):
... x = 2; y = 3
... record_history.print_exprs('x', 'y', 'x+y')
...
... def delx(self):
... x = 2; y = 3
... record_history.print_exprs('x', 'y', 'x+y')
... x = property(None, setx, delx)
>>> d = D()
D.__init__ [1]: x = 2, y = 3, x+y = 5
D.__init__ [1]: x = 2, y = 3, x+y = 5
>>> d.method()
D.method [1]: x = 2, y = 3, x+y = 5
# NO OUTPUT from this, NOR AN EXCEPTION,
# because by default
# record_history.log_methods_raise_if_no_deco == False
>>> d.not_decorated()
>>> d.statmethod()
D.statmethod [1]: x = 2, y = 3, x+y = 5
>>> d.clsmethod()
D.clsmethod [1]: x = 2, y = 3, cls.__name__ = 'D'
>>> d.prop
D.prop [1]: x = 2, y = 3, x+y = 5
>>> d.prop = 17
D.prop.setter [1]: x = 2, y = 3, x+y = 5
>>> d.x = 13
D.setx [1]: x = 2, y = 3, x+y = 5
>>> del d.x
D.delx [1]: x = 2, y = 3, x+y = 5
------------------------------------------------
functions
------------------------------------------------
>>> @record_history()
... def bar(x, y, z):
... record_history.print("Hi", "there")
... pass
>>> bar(1, 2, 3)
bar [1]: Hi there
"""
pass
#-----------------------------------------------------
# Test record_history.log_methods_raise_if_no_deco (bool)
# On undecorated functions/methods,
# and deco'd but with NO_DECO=True parameter
#-----------------------------------------------------
def test_rh_log_message__no_output_no_exceptions_expected():
"""
>>> record_history.print_methods_raise_if_no_deco = False # the default
>>> def nodeco(x, y, z):
... record_history.print("Hi", "from", "function nodeco")
... pass
>>> nodeco(11, 12, 13) # no output, NO EXCEPTION
>>> @record_history(omit='not_decorated')
... class A():
... def __init__(self):
... record_history.print('Hi')
... def not_decorated(self):
... record_history.print('Hi')
>>> a = A()
A.__init__ [1]: Hi
>>> a.not_decorated() # no output, NO EXCEPTION
>>> @record_history(NO_DECO=True)
... class C():
... def __init__(self):
... record_history.print('Hi')
... def cmethod(self, x):
... record_history.print('Hi')
... record_history.print_exprs('x + 10')
>>> c = C() # no output, no exception
>>> c.cmethod(5) # no output, no exception
>>> def schmoe(x):
... record_history.print("Yo, schmoe")
... pass
>>> schmoe(170) # no output, no exception
"""
pass
def test_rh_log_message__exceptions_expected():
"""
>>> record_history.print_methods_raise_if_no_deco = True # not the default
>>> @record_history(omit='not_decorated')
... class A():
... def __init__(self):
... record_history.print('Hi')
... def not_decorated(self):
... record_history.print('Hi')
>>> a = A()
A.__init__ [1]: Hi
>>> a.not_decorated() # doctest: +IGNORE_EXCEPTION_DETAIL, +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: ... is not decorated...
>>> @record_history(NO_DECO=True)
... class B():
... def __init__(self):
... # Comment out so we can create a B object!
... # record_history.print('Hi')
... pass
... def bmethod1(self):
... record_history.print('Hi')
... def bmethod2(self, z):
... record_history.print_exprs('z * 3')
>>> b = B() # no harm, noop
>>> b.bmethod1() # doctest: +IGNORE_EXCEPTION_DETAIL, +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: ... is not decorated...
>>> b.bmethod2(1) # doctest: +IGNORE_EXCEPTION_DETAIL, +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: ... is not decorated...
>>> @record_history(NO_DECO=True)
... def foo(x, y, z):
... record_history.print("Hi", "from", "function foo")
... pass
>>> foo(1, 2, 3) # doctest: +IGNORE_EXCEPTION_DETAIL, +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: ... is not decorated...
Undecorated, ever
>>> def schmoe(x):
... record_history.print("Yo, schmoe")
... pass
>>> schmoe(100) # doctest: +IGNORE_EXCEPTION_DETAIL, +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: ... is not decorated...
"""
pass
###############################################################
import doctest
# For unittest integration
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite())
return tests
if __name__ == '__main__':
doctest.testmod()
| {
"repo_name": "Twangist/log_calls",
"path": "tests/test_record_history_log_methods.py",
"copies": "1",
"size": "8533",
"license": "mit",
"hash": -5525261937872755000,
"line_mean": 27.5384615385,
"line_max": 80,
"alpha_frac": 0.4522442283,
"autogenerated": false,
"ratio": 3.5028735632183907,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44551177915183904,
"avg_score": null,
"num_lines": null
} |
__author__ = 'brianoneill'
import doctest
from log_calls import log_calls
# from log_calls.tests.settings_with_NO_DECO import g_DECORATE, g_settings_dict
from settings_with_NO_DECO import g_DECORATE, g_settings_dict
def test_no_deco__via_dict():
"""
>>> @log_calls(settings=g_settings_dict)
... def f(n, m):
... return 3*n*n*m + 4*n*m*m
>>> @log_calls(log_exit=False, settings=g_settings_dict)
... def g(x, y):
... if g_DECORATE:
... g.log_message("Some expressions and their values:")
... g.log_exprs('x', 'y', 'f(x,y)')
... return f(x, y) - 20
>>> @log_calls(only='method', settings=g_settings_dict)
... class C():
... def __init__(self, prefix=''):
... self.prefix = prefix
...
... def method(self, s):
... return self.prefix + s
>>> print(f(1, 2))
22
>>> print(g(3, 4))
280
>>> print(C('Hello, ').method('world!'))
Hello, world!
>>> hasattr(f, 'log_calls_settings')
False
>>> hasattr(g, 'log_calls_settings')
False
>>> hasattr(C.method, 'log_calls_settings')
False
"""
pass
#-----------------------------------------------------------------------------
# For unittest integration
#-----------------------------------------------------------------------------
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite())
return tests
#-----------------------------------------------------------------------------
if __name__ == "__main__":
doctest.testmod() # (verbose=True)
# unittest.main()
| {
"repo_name": "Twangist/log_calls",
"path": "tests/test_no_deco__via_dict.py",
"copies": "1",
"size": "1618",
"license": "mit",
"hash": 4791455272864588000,
"line_mean": 26.8965517241,
"line_max": 79,
"alpha_frac": 0.4703337454,
"autogenerated": false,
"ratio": 3.611607142857143,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4581940888257143,
"avg_score": null,
"num_lines": null
} |
__author__ = 'brianoneill'
import doctest
from log_calls import log_calls
##############################################################################
def test_deco_lambda():
"""
>>> f = log_calls()(lambda x: 2 * x)
>>> f(3)
<lambda> <== called by <module>
arguments: x=3
<lambda> ==> returning to <module>
6
"""
pass
def test_cant_deco_callables():
"""
Builtins aren't / can't be decorated:
>>> len is log_calls()(len) # No ""wrapper"" around `len` -- not deco'd
True
>>> len('abc') # Redundant check that `len` isn't deco'd
3
Similarly,
>>> dict.update is log_calls()(dict.update)
True
Builtin classes aren't deco'd (v0.3.0b20 fix to _add_class_attrs() makes it harmless)
(Best to have a test for this: fix in `_add_class_attrs` looks for a substring
in TypeError error message, so be able to detect if that changes.)
None of these three lines raise raise TypeError:
>>> dict is log_calls(only='update')(dict)
True
>>> dict is log_calls()(dict)
True
>>> log_calls.decorate_class(dict, only='update')
>>> d = dict(x=1, y=2) # no output, dict.__init__ not deco'd
>>> d.update(x=500) # no output, dict.update not deco'd
Objects that are callable by virtue of implementing the `__call__` method
can't themselves be decorated -- anyway, `log_calls` declines to do so:
>>> from functools import partial
>>> def h(x, y): return x + y
>>> h2 = partial(h, 2) # so
>>> callable(h2)
True
>>> h2 is log_calls()(h2) # not deco'd
True
>>> h2(3)
5
Another example of that:
>>> class Rev():
... def __call__(self, s): return s[::-1]
>>> rev = Rev()
>>> callable(rev)
True
>>> rev is log_calls()(rev) # not deco'd
True
>>> rev('ABC')
'CBA'
However, the class whose instances are callables can be decorated, and then,
`log_calls` produces output when instances are called.
Let's use log_calls() as a function, applying it to the `Rev` class already
defined, instead of using @log_calls() and redefining the class.
All we have to do is call `log_calls()(Rev)`. But that returns a value,
and since this is a doctest, that would be a failed test. We'd have to say
`_ = log_calls()(Rev)` to suppress the value. So, better to show and test
what the returned value really is.
When called on a class, `log_calls` alters the class and some of its members
but returns the same class object:
>>> # Save Rev, just in case you suspect log_calls might change
>>> # the binding of 'Rev' (! -- it doesn't)
>>> T = Rev
>>> # All three of these things are identical (`is`-chaining):
>>> T is Rev is log_calls()(Rev)
True
Now, instances of Rev have a decorated `__call__` method:
>>> rev2 = Rev()
>>> rev2('XYZ') # doctest: +ELLIPSIS
Rev.__call__ <== called by <module>
arguments: self=<__main__.Rev object at 0x...>, s='XYZ'
Rev.__call__ ==> returning to <module>
'ZYX'
"""
pass
# SURGERY:
test_cant_deco_callables.__doc__ = \
test_cant_deco_callables.__doc__.replace('__main__', __name__)
##############################################################################
# end of tests.
##############################################################################
# For unittest integration
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite())
return tests
if __name__ == "__main__":
doctest.testmod() # (verbose=True)
# unittest.main()
| {
"repo_name": "Twangist/log_calls",
"path": "tests/test_deco_lambda_cant_deco_callables.py",
"copies": "1",
"size": "3571",
"license": "mit",
"hash": 3510669529132237000,
"line_mean": 27.7983870968,
"line_max": 85,
"alpha_frac": 0.5642677121,
"autogenerated": false,
"ratio": 3.5781563126252505,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.464242402472525,
"avg_score": null,
"num_lines": null
} |
__author__ = 'brianoneill'
import doctest
from log_calls import log_calls
#-----------------------------------------------------------------------------
def test_dont_decorate__via_file():
"""
>>> @log_calls(settings='settings-with-NO_DECO.txt')
... def f(n, m):
... return 3*n*n*m + 4*n*m*m
>>> @log_calls(log_exit=False, settings='settings-with-NO_DECO.txt')
... def g(x, y):
... return f(x, y) - 20
>>> @log_calls(only='method', settings='settings-with-NO_DECO.txt')
... class C():
... def __init__(self, prefix=''):
... self.prefix = prefix
...
... def method(self, s):
... return self.prefix + s
>>> print(f(1, 2))
22
>>> print(g(3, 4))
280
>>> print(C('Hello, ').method('world!'))
Hello, world!
>>> hasattr(f, 'log_calls_settings')
False
>>> hasattr(g, 'log_calls_settings')
False
>>> hasattr(C.method, 'log_calls_settings')
False
"""
pass
#-----------------------------------------------------------------------------
# For unittest integration
#-----------------------------------------------------------------------------
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite())
return tests
#-----------------------------------------------------------------------------
if __name__ == "__main__":
doctest.testmod() # (verbose=True)
# unittest.main()
| {
"repo_name": "Twangist/log_calls",
"path": "tests/test_no_deco__via_file.py",
"copies": "1",
"size": "1454",
"license": "mit",
"hash": -7544700846073288000,
"line_mean": 25.9259259259,
"line_max": 78,
"alpha_frac": 0.4215955983,
"autogenerated": false,
"ratio": 3.9510869565217392,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4872682554821739,
"avg_score": null,
"num_lines": null
} |
__author__ = "Brian O'Neill"
__version__ = '0.3.0'
from log_calls import log_calls
import doctest
#-----------------------------------------------------------------------------
# main__test__get_own_log_calls_wrapper
# test methods accessing their OWN wrappers via utility function/classmethod
# Aiming for complete coverage of the function
#-----------------------------------------------------------------------------
def main__test__get_own_log_calls_wrapper():
"""
Class we'll use through this entire set of tests:
>>> @log_calls(omit='no_deco', mute=log_calls.MUTE.CALLS)
... class B():
... def __init__(self):
... wrapper = self.get_own_log_calls_wrapper()
... wrapper.log_message('Hi')
... def method(self):
... wrapper = self.get_own_log_calls_wrapper()
... wrapper.log_message('Hi')
... def no_deco(self):
... wrapper = self.get_own_log_calls_wrapper() # raises ValueError
... wrapper.log_message('Hi')
... @staticmethod
... def statmethod():
... wrapper = B.get_own_log_calls_wrapper()
... wrapper.log_message('Hi')
...
... @classmethod
... def clsmethod(cls):
... wrapper = B.get_own_log_calls_wrapper()
... wrapper.log_message('Hi')
...
... @property
... def prop(self):
... wrapper = self.get_own_log_calls_wrapper()
... wrapper.log_message('Hi')
... @prop.setter
... @log_calls(name='B.%s.setter')
... def prop(self, val):
... wrapper = self.get_own_log_calls_wrapper()
... wrapper.log_message('Hi')
...
... def setx(self, val):
... wrapper = self.get_own_log_calls_wrapper()
... wrapper.log_message('Hi from setx alias x.setter')
... def delx(self):
... wrapper = self.get_own_log_calls_wrapper()
... wrapper.log_message('Hi from delx alias x.deleter')
... x = property(None, setx, delx)
>>> b = B()
B.__init__: Hi
>>> b.method()
B.method: Hi
>>> b.no_deco() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AttributeError: ...
>>> b.statmethod()
B.statmethod: Hi
>>> b.clsmethod()
B.clsmethod: Hi
>>> b.prop
B.prop: Hi
>>> b.prop = 17
B.prop.setter: Hi
>>> b.x = 13
B.setx: Hi from setx alias x.setter
>>> del b.x
B.delx: Hi from delx alias x.deleter
This won't work/is wrong:
try:
b.method.get_own_log_calls_wrapper()
except AttributeError as e:
# 'function' object has no attribute 'get_own_log_calls_wrapper'
print(e)
>>> try:
... b.no_deco()
... except AttributeError as e:
... print(e)
'no_deco' is not decorated [1]
>>> b.method.log_calls_settings.enabled = 0
>>> # no log_* output if enabled <= 0, but method can still call get_own_log_calls_wrapper
>>> b.method()
>>> b.method.log_calls_settings.enabled = -1
>>> # "true bypass" -- method can't call get_own_log_calls_wrapper
>>> try:
... b.method()
... except AttributeError as e:
... print(e)
'method' is true-bypassed (enabled < 0) or not decorated [2]
Induce more errors
>>> def _deco_base_f_wrapper_(): # note name -- fake out get_own_log_calls_wrapper for a few clauses
... # No local named _deco_base__active_call_items__
... try:
... b.no_deco()
... except AttributeError as e:
... print(e)
>>> _deco_base_f_wrapper_()
'no_deco' is true-bypassed (enabled < 0) or not decorated [2]
>>> def _deco_base_f_wrapper_(): # note name -- fake out get_own_log_calls_wrapper longer
... _deco_base__active_call_items__ = 17 # exists but isn't a dict
... try:
... b.no_deco()
... except AttributeError as e:
... print(e)
>>> _deco_base_f_wrapper_()
'no_deco' is not decorated [3]
>>> def _deco_base_f_wrapper_(): # note name -- fake out get_own_log_calls_wrapper still longer
... _deco_base__active_call_items__ = { # exists, is a dict, no key '_wrapper_deco'
... 'a': 45
... }
... try:
... b.no_deco()
... except AttributeError as e:
... print(e)
>>> _deco_base_f_wrapper_()
'no_deco' is not decorated [3]
>>> def _deco_base_f_wrapper_(): # note name -- fake out get_own_log_calls_wrapper even longer
... _deco_base__active_call_items__ = { # exists, is a dict, has key '_wrapper_deco', but type != log_calls
... '_wrapper_deco': 45
... }
... try:
... b.no_deco()
... except AttributeError as e:
... print(e)
>>> _deco_base_f_wrapper_()
'no_deco' is not decorated [3]
>>> def _deco_base_f_wrapper_(): # note name -- fake out get_own_log_calls_wrapper even longer
... _deco_base__active_call_items__ = { # exists, is a dict, has key '_wrapper_deco', but type != log_calls
... '_wrapper_deco': log_calls() # correct type, but not hooked up properly
... }
... try:
... b.no_deco()
... except AttributeError as e:
... print(e)
>>> _deco_base_f_wrapper_()
inconsistent log_calls decorator object for 'no_deco' [4]
>>> def _deco_base_f_wrapper_(): # note name -- fake out get_own_log_calls_wrapper even longer
... lc = log_calls() # correct type, but still not hooked up properly
... lc.f = None
... _deco_base__active_call_items__ = { # exists, is a dict, has key '_wrapper_deco', but type != log_calls
... '_wrapper_deco': lc
... }
...
... try:
... b.no_deco()
... except AttributeError as e:
... print(e)
>>> _deco_base_f_wrapper_()
inconsistent log_calls decorator object for 'no_deco' [5]
>>> def _deco_base_f_wrapper_(): # note name -- fake out get_own_log_calls_wrapper even longer
... lc = log_calls() # correct type, but still not hooked up properly
... lc.f = None
... _deco_base__active_call_items__ = { # exists, is a dict, has key '_wrapper_deco', but type != log_calls
... '_wrapper_deco': lc
... }
...
... try:
... b.no_deco()
... except AttributeError as e:
... print(e)
>>> _deco_base_f_wrapper_()
inconsistent log_calls decorator object for 'no_deco' [5]
>>> def _deco_base_f_wrapper_(): # note name -- fake out get_own_log_calls_wrapper even longer
... lc = log_calls() # correct type, lc.f correct, but STILL not hooked up properly
... lc.f = B.no_deco
... _deco_base__active_call_items__ = { # exists, is a dict, has key '_wrapper_deco', but type != log_calls
... '_wrapper_deco': lc
... }
...
... try:
... b.no_deco()
... except AttributeError as e:
... print(e)
>>> _deco_base_f_wrapper_()
inconsistent log_calls decorator object for 'no_deco' [7]
"""
pass
##############################################################################
# end of tests.
##############################################################################
#-----------------------------------------------------------------------------
# For unittest integration
#-----------------------------------------------------------------------------
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite())
return tests
if __name__ == "__main__":
doctest.testmod() # (verbose=True)
# unittest.main()
| {
"repo_name": "Twangist/log_calls",
"path": "tests/test_get_own_log_calls_wrapper.py",
"copies": "1",
"size": "7870",
"license": "mit",
"hash": -6184044619503666000,
"line_mean": 35.1009174312,
"line_max": 118,
"alpha_frac": 0.493519695,
"autogenerated": false,
"ratio": 3.5482416591523895,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45417613541523894,
"avg_score": null,
"num_lines": null
} |
__author__ = "Brian O'Neill"
__version__ = '0.3.0'
from log_calls import record_history
import doctest
#-----------------------------------------------------------------------------
# main__record_history_class_deco
#-----------------------------------------------------------------------------
def main__record_history_class_deco():
"""
# [The *record_history* decorator as a class decorator](id:record_history-decorator-class-deco)
The `record_history` decorator
## Basic example
illustrating that `record_history` works as a class decorator too, and,
unlike `log_calls`, can record calls to `__repr__`:
>>> from log_calls import record_history
A not very interesting class:
>>> @record_history(omit='h')
... class RecordThem():
... def __init__(self, a):
... self._a = a
... def f(self, x):
... return self.a * x
... @record_history(name='RT.gee')
... def g(self, x, y):
... return self.f(x) + y
... def h(self, x, y, z):
... pass
... @property
... def a(self):
... return self._a
... def __repr__(self):
... return '<A(%r) at 0x%x>' % (self._a, id(self))
>>> rt = RecordThem(10)
`RecordThem.__init__` is decorated:
>>> rt.__init__.stats.num_calls_logged
1
Unlike `log_calls`, `record_history` *can* decorate `__repr__`.
This will call `RecordThem.__repr__` once:
>>> print(rt) # doctest: +ELLIPSIS
<A(10) at 0x...>
`__repr` is decorated in `RecordThem`:
>>> rt.__repr__.stats.num_calls_logged
1
`RecordThem.f` and `RecordThem.g` are decorated:
>>>
>>> for i in range(5):
... _ = rt.f(i), rt.g(i, 2*i) # _ = ... : suppress doctest output
>>> rt.f.stats.num_calls_logged, rt.g.stats.num_calls_logged
(10, 5)
>>> print(rt.f.stats.history_as_csv) # doctest: +ELLIPSIS
call_num|self|x|retval|elapsed_secs|process_secs|timestamp|prefixed_fname|caller_chain
1|<A(10) at 0x...>|0|0|...|...|...|'RecordThem.f'|['<module>']
2|<A(10) at 0x...>|0|0|...|...|...|'RecordThem.f'|['RT.gee [1]']
3|<A(10) at 0x...>|1|10|...|...|...|'RecordThem.f'|['<module>']
4|<A(10) at 0x...>|1|10|...|...|...|'RecordThem.f'|['RT.gee [2]']
5|<A(10) at 0x...>|2|20|...|...|...|'RecordThem.f'|['<module>']
6|<A(10) at 0x...>|2|20|...|...|...|'RecordThem.f'|['RT.gee [3]']
7|<A(10) at 0x...>|3|30|...|...|...|'RecordThem.f'|['<module>']
8|<A(10) at 0x...>|3|30|...|...|...|'RecordThem.f'|['RT.gee [4]']
9|<A(10) at 0x...>|4|40|...|...|...|'RecordThem.f'|['<module>']
10|<A(10) at 0x...>|4|40|...|...|...|'RecordThem.f'|['RT.gee [5]']
<BLANKLINE>
`RecordThem.h` is not decorated:
>>> hasattr(rt.h, 'stats')
False
---------------------------------------------------------
## `get_record_history_wrapper` method of a decorated class
Attributes of properties defined by the @property decorator can be accessed
using the `get_record_history_wrapper` classmethod:
>>> RecordThem.get_record_history_wrapper('a.getter').stats.num_calls_logged
10
We could have also used `rt.get_record_history_wrapper('a.getter')`. You pass
`get_record_history_wrapper` the name of a method, or a property suffixed with
`.getter`, '.setter' or '.deleter'. If you pass just the name of a property,
`.getter` is assumed:
>>> RecordThem.get_record_history_wrapper('a').stats.num_calls_logged
10
`get_record_history_wrapper` returns None if the method exists but isn't decorated:
>>> print(rt.get_record_history_wrapper('h'))
None
and raises an exception for other arguments (see tests and description for `log_calls_wrapper`).
---------------------------------------------------------
## `record_history_omit` `record_history_only`
>>> rt.record_history_omit
('h',)
---------------------------------------------------------
## Methods can also use the `get_own_record_history_wrapper` classmethod
of a decorated class to access their `record_history` wrappers,
without having to pass their own names as a string.
>>> @record_history()
... class XYZ():
... def __init__(self, a):
... self.a = a
... def f(self, x):
... wrapper = self.get_own_record_history_wrapper()
... wrapper.log_exprs('x', 'self.a * x')
... return self.a * x
>>> xyz = XYZ(7)
>>> xyz.f(3)
XYZ.f [1]: x = 3, self.a * x = 21
21
"""
pass
# SURGERY:
main__record_history_class_deco.__doc__ = \
main__record_history_class_deco.__doc__.replace("__main__", __name__)
#-----------------------------------------------------------------------------
# main__record_history_class_deco__
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# main__record_history_class_deco__
#-----------------------------------------------------------------------------
##############################################################################
# end of tests.
##############################################################################
#-----------------------------------------------------------------------------
# For unittest integration
#-----------------------------------------------------------------------------
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite())
return tests
if __name__ == "__main__":
@record_history()
class XYZ():
def __init__(self, a):
self._a = a
def f(self, x):
self.get_own_record_history_wrapper().log_exprs('x', 'self.a * x')
return self.a * x
xyz = XYZ(7)
xyz.f(3)
doctest.testmod() # (verbose=True)
| {
"repo_name": "Twangist/log_calls",
"path": "tests/test_record_history__class_deco.py",
"copies": "1",
"size": "5829",
"license": "mit",
"hash": 1046654000542091100,
"line_mean": 30.5081081081,
"line_max": 96,
"alpha_frac": 0.4712643678,
"autogenerated": false,
"ratio": 3.5051112447384245,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44763756125384246,
"avg_score": null,
"num_lines": null
} |
"""
getters, setters, adders, deleters in both raw and numeric format?
update method to equivocate raw data and numeric data? Call in read method?
how to deal with NaNs?
"""
import sys
import copy
import operator
import numpy as np
import csv
import analysis
class Data:
# Constructor
def __init__(self, filename=None, dataset=None):
# create and initialize fields for the class
self.raw_headers = []
self.raw_types = []
if dataset == None:
self.raw_data = []
self.header2raw = {}
self.matrix_data = np.matrix([])
self.header2matrix = {}
else:
self.raw_headers = dataset[0]
self.raw_types = dataset[1]
self.raw_data = dataset[2:]
self.header2raw = {}
self.matrix_data = np.matrix([])
self.header2matrix = {}
for i in range(len(self.raw_headers)):
self.header2raw[self.raw_headers[i]] = i
return
if filename != None:
self.read(filename)
# puts the original data in string format into a list of lists, with one sublist for each data point
def read(self, filename):
# make a new file reader object
fp = open(filename, 'rU')
reader = csv.reader(fp, delimiter=',', skipinitialspace=True)
# assign the headers and types
# .__next__() NECESSARY FOR PYTHON 3.5 -- CHANGE TO .next() FOR PYTHON 2.7
self.raw_headers = reader.__next__()
self.raw_types = reader.__next__()
# fill in the raw data
for row in reader:
self.raw_data.append(row)
# map the headers to their column index in the raw data
for i in range(len(self.raw_headers)):
self.header2raw[self.raw_headers[i]] = i
# go through each column of numeric data and convert the string data to numeric form (floats)
# map the headers to their column index in the numeric data
numeric_matrix = np.zeros((len(self.raw_data), len(self.raw_headers)), dtype='float64')
cols = 0
for i in range(len(self.raw_headers)):
if self.raw_types[i] == "numeric" or self.raw_types[i] == "int" or self.raw_types[i] == "float":
self.header2matrix[self.raw_headers[i]] = cols
for j in range(len(self.raw_data)):
temp = copy.deepcopy(self.raw_data[j][i])
numeric_matrix[j, cols] = temp
cols += 1
# delete any other columns that were copied over
if cols < len(self.raw_headers):
numeric_matrix = numeric_matrix[:, :cols]
# copy the matrix data to self.matrix_data and free some memory
self.matrix_data = np.matrix(numeric_matrix)
del numeric_matrix
# returns a list of all of the headers in the raw data
def get_raw_headers(self):
return self.raw_headers
# returns a list of all of the types in the raw data
def get_raw_types(self):
return self.raw_types
# returns the number of columns in the raw data set
def get_raw_num_columns(self):
return len(self.raw_headers)
# returns the number of rows in the raw data set
def get_raw_num_rows(self):
return len(self.raw_data)
# returns a row of raw data (the type is list) given a row index (int) in the raw data
def get_raw_row(self, row):
return self.raw_data[row]
# returns a column of raw data (the type is list) given a column header (string) in the raw data
def get_raw_column(self, colHeader):
# find the column index that colHeader is in
col = self.header2raw[colHeader]
# piece together a list containing the correctly indexed element of each row
column = []
for row in self.raw_data:
column.append(row[col])
return column
# takes a row index (an int) and column header (a string) and returns the raw data (a string) at that location
def get_raw_value(self, row, colHeader):
# find the column index that colHeader is in
col = self.header2raw[colHeader]
return self.raw_data[row][col]
# takes a list of column headers and return a matrix with the raw data for all rows but just the specified columns,
# optional to also allow the caller to specify a specific set of rows
def get_raw_data(self, colHeaders, rows=None):
if rows != None:
rowRange = np.array(rows)
else:
rowRange = range(len(self.raw_data))
raw_data_matrix = np.zeros((len(rowRange), len(colHeaders)))
for i in rowRange:
for j in range(len(colHeaders)):
header = self.header2raw[colHeaders[j]]
raw_data_matrix[i-rowRange[0], j] = str(self.raw_data[i][header]) # adjust to fit regular matrices
return raw_data_matrix
# returns a list of all of the headers in the numeric data
def get_headers(self):
# return list(self.header2matrix.keys())
return [x[0] for x in sorted(self.header2matrix.items(), key=operator.itemgetter(1))]
# returns the number of columns in the numeric data set
def get_num_columns(self):
return self.matrix_data.shape[0]
# returns the number of rows in the numeric data set
def get_num_rows(self):
return self.matrix_data.shape[1]
# returns a row of data (the type is list) given a row index (int) in the numeric data
def get_row(self, row):
return self.matrix_data[row, :]
# returns a column of data (the type is list) given a column header (string) in the numeric data
def get_column(self, colHeader):
if type(colHeader) == str:
return self.matrix_data[:, self.header2matrix[colHeader]]
elif type(colHeader) == int:
return self.matrix_data[:, colHeader]
else:
return
# takes a row index (an int) and column header (a string) and returns the numeric data (a float) at that location
def get_value(self, row, colHeader):
return self.matrix_data[row, self.header2matrix[colHeader]]
# takes a list of column headers and return a matrix with the numeric data for all rows but just the specified
# columns, optional to also allow the caller to specify a specific set of rows
# start index and stop index?
def get_data(self, colHeaders, rows=None):
if rows != None:
rowRange = np.array(rows)
else:
rowRange = np.arange(len(self.matrix_data))
headers = [self.header2matrix[colHeader] for colHeader in colHeaders]
matrix = np.hstack((self.matrix_data[rowRange, col] for col in headers))
return matrix
# updates a row of raw data in the Data object
def set_raw_row(self, data, row):
try:
for i in range(len(data)):
self.raw_data[row][i] = data[i]
print("Row %i updated in raw data." % row)
except: print("Error: index out of bounds. Row %i not updated." % row)
# updates a row of numeric data in the Data object
def set_row(self, data, row):
try:
self.matrix_data[row, :] = data
print("Row %i updated in numeric data." % row)
except: print("Error: index out of bounds. Row %i not updated." % row)
# updates a column of data in the Data object
def set_column(self, data, colHeader, type=None):
numCol = self.header2matrix[colHeader]
rawCol = self.header2raw[colHeader]
if type != None:
self.raw_types[rawCol] = type
try:
for i in range(len(self.matrix_data)):
self.raw_data[i][rawCol] = str(np.array(data[i])[0, 0])
self.matrix_data[:, numCol] = data
print("Column %s updated.")
except: print("Error: improper column title. Column '%s' not updated." % colHeader)
# updates an individual value in the Data object
def set_value(self, value, row, colHeader):
numCol = self.header2matrix[colHeader]
rawCol = self.header2raw[colHeader]
try:
self.matrix_data[row, numCol] = value
self.raw_data[row][rawCol] = str(value)
print("Value (%i, '%s') updated to ", value, "." % row, colHeader, value)
except: print("Error: index out of bounds or improper column title. Value ", value, " not updated.")
# adds a row of raw data to the Data object
def add_raw_row(self, data):
# incorporate type checking to allow each column in the row to be the correct type and prevent data from being
# added if that's not the case
try:
if len(data) != len(self.raw_headers):
print("Error: improper data dimensions. Row not added.")
return
self.raw_data.append([])
for col in data:
self.raw_data[-1].append(str(col))
print("Row added to raw data.")
except:
print("Error: improper data dimensions. Row not added.")
# adds a row of numeric data to the Data object
def add_row(self, data):
try:
self.matrix_data = np.vstack((self.matrix_data, np.array(data)))
print("Row added to numeric data.")
except:
print("Error: improper data dimensions. Row not added.")
# adds a column of data to the Data object
def add_column(self, data, colHeader, type):
try:
self.raw_headers.append(colHeader)
self.raw_types.append(type)
self.header2raw[colHeader] = len(self.header2raw)
if type == 'numeric' or type == 'int' or type == 'float':
self.header2matrix[colHeader] = len(self.header2matrix)
newdata = np.array(data).reshape(len(self.matrix_data), 1)
self.matrix_data = np.hstack([self.matrix_data, newdata])
# ADD BACK IN
# for i in range(len(self.raw_data)): # modify so that individual values are added rather than one-element matrices
# self.raw_data[i].append(str(np.array(data[i])[0, 0]))
print("Column %s added." % colHeader)
except: print("Error: improper data dimensions. Column %s not added." % colHeader)
# deletes a row of data from the Data object
def delete_row(self, row):
try:
self.matrix_data = np.delete(self.matrix_data, row, axis=0)
del self.raw_data[row]
print ("Row %i deleted." % row)
except: print("Error: index out of bounds. Row %i not deleted." % row)
# deletes a column of data from the Data object
def delete_column(self, colHeader):
try:
try:
numCol = self.header2matrix[colHeader]
del self.header2matrix[colHeader]
self.matrix_data = np.delete(self.matrix_data, numCol, axis=1)
except KeyError:
pass
rawCol = self.header2raw[colHeader]
del self.raw_headers[rawCol]
del self.raw_types[rawCol]
del self.header2raw[colHeader]
for i in range(len(self.raw_data)):
del self.raw_data[i][rawCol]
print("Column %s deleted." % colHeader)
except: print("Error: improper column title. Column %s not deleted." % colHeader)
# prints out the data to the command line
def printData(self, numRows=999999):
print("\n\nData:")
print(self.raw_headers)
print(self.raw_types)
print(self.matrix_data[:numRows, :])
print(self.raw_data[:numRows])
# writes out a selected set of headers to a specified file
def writeHeaders(self, filename, headers=None):
if headers != None:
with open(filename, 'w') as f:
f.write(headers)
f.close()
else:
print("No headers given")
""" Holds information for data in PCA space"""
class PCAData(Data):
# Constructor
def __init__(self, data, evecs, evals, means, headers):
Data.__init__(self)
self.matrix_data = data # numpy matrix of projected data
self.evecs = evecs # numpy matrix with evecs on rows
self.evals = evals # one row numpy matrix
self.means = means # one row numpy matrix
self.headers = headers # list
for i in range(len(headers)):
self.header2matrix[headers[i]] = i
for i in range(self.matrix_data.shape[0]):
self.raw_data.append(self.matrix_data[i, :].tolist()[0])
# Accessor for self.matrix_data
def get_matrix_data(self):
return self.matrix_data
# Accessor for self.evecs
def get_eigenvectors(self):
return self.evecs
# Accessor for self.evals
def get_eigenvalues(self):
return self.evals
# Accessor for self.means
def get_data_means(self):
return self.means
# Accessor for self.headers
def get_data_headers(self):
return self.headers
if __name__ == "__main__":
# # Load the data files into a Data object
# dataClean = Data(filename='data-clean.csv')
# dataGood = Data(filename='data-good.csv')
# dataNoisy = Data(filename='data-noisy.csv')
#
# # Run multiple linear regression on the Data objects
# analysis.testRegression(dataClean)
# analysis.testRegression(dataGood)
# analysis.testRegression(dataNoisy)
data = Data(filename='GOOG-NASDAQ_TSLA.csv')
# print out some analyses
print("\n\nDescriptive statistics of Tesla's stock data (daily open and close prices and trading volume:")
print("Mean: ", analysis.mean(['Open', 'Close', 'Volume'], data))
print("Standard deviation: ", analysis.stdev(['Open', 'Close', 'Volume'], data))
print("Ranges: ", analysis.dataRange(['Open', 'Close', 'Volume'], data))
print("Normalized columns: ", analysis.normalizeColumnsSeparately(['Open', 'Close', 'Volume'], data))
print("Normalized globally: ", analysis.normalizeColumnsTogether(['Open', 'Close', 'Volume'], data))
print("Variance: ", analysis.variance(['Open', 'Close', 'Volume'], data))
print("Median: ", analysis.median(['Open', 'Close', 'Volume'], data))
print("Mode value: ", analysis.modeValue(['Open', 'Close', 'Volume'], data))
print("Mode frequency: ", analysis.modeFreq(['Open', 'Close', 'Volume'], data))
print("Range value: ", analysis.rangeDiff(['Open', 'Close', 'Volume'], data), "\n")
data.printData(20)
# manipulate the data to show their efficacy
data.set_value(0.0001, 5, 'Open')
data.set_column(data.get_column('Open'), 'Close')
data.add_column(data.get_column('Volume'), 'Volume2', 'numeric')
data.add_raw_row(['6/28/10', 2.0, 3.0, 4.0, 5.0, 1000.0, 3])
data.add_row([1.0, 2.0, 3.0, 4.0, 5.0, 6])
# Here I print out the whole data set to show its full five-year comprehensive glory
data.printData(20)
# run some more methods to test
print(data.get_data(['High', 'Low'], range(30, 50)))
data.delete_column('Date')
data.delete_row(0)
data.printData(20)
# print out some more stats about the dataset
print("Extra stats:")
print("Headers: ", data.get_headers())
print("Types: ", data.get_raw_types())
print("Column numbers: ", data.get_num_columns())
print("Row numbers: ", data.get_num_rows())
print("Row 3: ", data.get_row(2))
print("Column \'Open\': ", data.get_column('Open'))
data.set_raw_row(data.get_raw_row(5), 6)
data.set_row(data.get_row(3), 4)
data.printData(20) | {
"repo_name": "bhwester/computer-science-projects",
"path": "data_analysis_and_visualization_system/data.py",
"copies": "1",
"size": "15719",
"license": "mit",
"hash": 70542518007010830,
"line_mean": 35.6433566434,
"line_max": 127,
"alpha_frac": 0.6071633056,
"autogenerated": false,
"ratio": 3.7813326918450807,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48884959974450803,
"avg_score": null,
"num_lines": null
} |
import numpy as np
class View:
# constructor
def __init__(self):
# automatically resets the view
self.reset()
def reset(self,
vrp=np.matrix([0.5, 0.5, 1]),
vpn=np.matrix([0, 0, -1]),
vup=np.matrix([0, 1, 0]),
u=np.matrix([-1, 0, 0]),
extent=np.array([1, 1, 1]),
screen=np.array([400, 400]),
offset=np.array([20, 20])):
# initializes default values
self.vrp = vrp
self.vpn = vpn
self.vup = vup
self.u = u
self.extent = extent
self.screen = screen
self.offset = offset
self.translationX = 0
self.translationY = 0
def build(self):
# Generate a 4x4 identity matrix, which will be the basis for the view matrix
vtm = np.identity(4, dtype=float)
# Generate a translation matrix to move the VRP to the origin and then premultiply the vtm by the translation matrix
t1 = np.matrix([[1, 0, 0, -self.vrp[0, 0]],
[0, 1, 0, -self.vrp[0, 1]],
[0, 0, 1, -self.vrp[0, 2]],
[0, 0, 0, 1]])
vtm = t1 * vtm
# Calculate the view reference axes tu, tvup, tvpn
# tu is the cross product (np.cross) of the vup and vpn vectors
tu = np.cross(self.vup, self.vpn)
# tvup is the cross product of the vpn and tu vectors
tvup = np.cross(self.vpn, tu)
# tvpn is a copy of the vpn vector
tvpn = self.vpn
# Normalize the view axes tu, tvup, and tvpn to unit length
# du, dv, and dz are all a part of the normalization process, make explicit?
# Bruce's edits: (didn't work, caused an error, and rotation/translation/scaling still work fine for axes and data)
# tu /= np.linalg.norm(tu)
# tvup /= np.linalg.norm(tvup)
# tvpn /= np.linalg.norm(tvpn)
np.linalg.norm(tu)
np.linalg.norm(tvup)
np.linalg.norm(tvpn)
# Copy the orthonormal axes tu, tvup, and tvpn back to self.u, self.vup and self.vpn
self.u = tu
self.vup = tvup
self.vpn = tvpn
# align the axes
r1 = np.matrix([[tu[0,0], tu[0, 1], tu[0, 2], 0.0],
[tvup[0, 0], tvup[0, 1], tvup[0, 2], 0.0],
[tvpn[0, 0], tvpn[0, 1], tvpn[0, 2], 0.0],
[0.0, 0.0, 0.0, 1.0]])
vtm = r1 * vtm
# Perspective view transformation goes here
#p = np.matrix([[1, 0, 0, 0],
# [0, 1, 0, 0],
# [0, 0, 1, 0],
# [0, 0, 1/d, 0]])
#p = p ####
# Translate the lower left corner of the view space to the origin. Since the axes are aligned, this is just a
# translation by half the extent of the view volume in the X and Y view axes
t2 = np.matrix([[1, 0, 0, 0.5*self.extent[0]],
[0, 1, 0, 0.5*self.extent[1]],
[0, 0, 1, 0],
[0, 0, 0, 1]])
vtm = t2 * vtm
# Use the extent and screen size values to scale to the screen
s1 = np.matrix([[-self.screen[0]/self.extent[0], 0, 0, 0],
[0, -self.screen[1]/self.extent[1], 0, 0],
[0, 0, 1.0/self.extent[2], 0],
[0, 0, 0, 1]])
vtm = s1 * vtm
# Translate the lower left corner to the origin and add the view offset, which gives a little buffer around the
# top and left edges of the window
t3 = np.matrix([[1, 0, 0, self.screen[0]+self.offset[0]],
[0, 1, 0, self.screen[1]+self.offset[1]],
[0, 0, 1, 0],
[0, 0, 0, 1]])
vtm = t3 * vtm
return vtm
def clone(self):
# make a new View object
clone = View()
# copy all fields of the current View object to the new View object
clone.vrp = np.copy(self.vrp)
clone.vpn = np.copy(self.vpn)
clone.vup = np.copy(self.vup)
clone.u = np.copy(self.u)
clone.extent = np.copy(self.extent)
clone.screen = np.copy(self.screen)
clone.offset = np.copy(self.offset)
clone.translationX = self.translationX
clone.translationY = self.translationY
return clone
def rotateVRC(self, thetaU, thetaVUP, thetaVPN):
# translate the center of rotation (the middle of the extent volume) to the origin, rotate around the Y axis,
# rotate around the X axis, then translate back by the opposite of the first translation
tvrc = np.matrix([[self.vrp[0, 0], self.vrp[0, 1], self.vrp[0, 2], 1],
[self.u[0, 0], self.u[0, 1], self.u[0, 2], 0],
[self.vup[0, 0], self.vup[0, 1], self.vup[0, 2], 0],
[self.vpn[0, 0], self.vpn[0, 1], self.vpn[0, 2], 0]])
point = np.matrix(self.vrp + self.vpn * self.extent[2] * 0.5)
t1 = np.matrix([[1, 0, 0, -point[0, 0]],
[0, 1, 0, -point[0, 1]],
[0, 0, 1, -point[0, 2]],
[0, 0, 0, 1]])
Rxyz = np.matrix([[self.u[0,0], self.u[0, 1], self.u[0, 2], 0.0],
[self.vup[0, 0], self.vup[0, 1], self.vup[0, 2], 0.0],
[self.vpn[0, 0], self.vpn[0, 1], self.vpn[0, 2], 0.0],
[0.0, 0.0, 0.0, 1.0]])
r1 = np.matrix([[1, 0, 0, 0],
[0, np.cos(thetaU), -np.sin(thetaU), 0],
[0, np.sin(thetaU), np.cos(thetaU), 0],
[0, 0, 0, 1]])
r2 = np.matrix([[np.cos(thetaVUP), 0, np.sin(thetaVUP), 0],
[0, 1, 0, 0],
[-np.sin(thetaVUP), 0, np.cos(thetaVUP), 0],
[0, 0, 0, 1]])
r3 = np.matrix([[np.cos(thetaVPN), -np.sin(thetaVPN), 0, 0],
[np.sin(thetaVPN), np.cos(thetaVPN), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
t2 = np.matrix([[1, 0, 0, point[0, 0]],
[0, 1, 0, point[0, 1]],
[0, 0, 1, point[0, 2]],
[0, 0, 0, 1]])
tvrc = (t2 * Rxyz.T * r3 * r2 * r1 * Rxyz * t1 * tvrc.T).T
# Copy values from tvrc back into VRP, U, VUP, and VPN
self.vrp = tvrc[0, 0:3]
self.u = tvrc[1, 0:3]
self.vup = tvrc[2, 0:3]
self.vpn = tvrc[3, 0:3]
# Normalize U, VUP, and VPN
np.linalg.norm(self.u)
np.linalg.norm(self.vup)
np.linalg.norm(self.vpn) | {
"repo_name": "bhwester/computer-science-projects",
"path": "data_analysis_and_visualization_system/view.py",
"copies": "1",
"size": "6811",
"license": "mit",
"hash": 4936978279960080000,
"line_mean": 38.3757225434,
"line_max": 124,
"alpha_frac": 0.4698282191,
"autogenerated": false,
"ratio": 3.200657894736842,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9160910858978344,
"avg_score": 0.0019150509716996877,
"num_lines": 173
} |
import numpy as np
import pandas as pd
import sklearn.linear_model as sklearnLinearModel
import sklearn.svm as sklearnSVM
import matplotlib.pyplot as plt
# Read in data
fundedCompanies = pd.read_excel("/Users/Brian/Downloads/cb_data_xlsx_sample.xlsx", \
sheetname="Funded Companies")
rounds = pd.read_excel("/Users/Brian/Downloads/cb_data_xlsx_sample.xlsx", \
sheetname="Rounds")
# Keep only the features of interest
fundedCompaniesVars = fundedCompanies[['funding_rounds', 'funding_total_usd']]
roundsVars = rounds[['funding_round_type', 'raised_amount_usd', 'investor_count']]
# Clean up the data a bit
fundedCompaniesVars = fundedCompaniesVars.dropna(axis=0) # get rid of any rows with NaN
roundsVars = roundsVars.dropna(axis=0)
# Initialize the machine learning models
linearModel = sklearnLinearModel.LinearRegression()
classifierSVM = sklearnSVM.SVC()
# Fit the models to the data
inputRegression = np.matrix(fundedCompaniesVars['funding_rounds']).T
outputRegression = np.matrix(fundedCompaniesVars['funding_total_usd']).T
linearModel.fit(inputRegression, outputRegression)
inputClassification = np.matrix(roundsVars[['investor_count', 'raised_amount_usd']])
outputClassification = np.array(roundsVars['funding_round_type'])
classifierSVM.fit(inputClassification, outputClassification)
# Evaluate performance of supervised learning models
predictionsRegression = linearModel.predict(inputRegression)
predictionsClassification = classifierSVM.predict(inputClassification)
r2 = linearModel.score(np.matrix(fundedCompaniesVars['funding_rounds']).T, np.matrix(fundedCompaniesVars['funding_total_usd']).T)
accuracy = classifierSVM.score(np.matrix(roundsVars[['investor_count', 'raised_amount_usd']]), np.array(roundsVars['funding_round_type']))
print("R^2 of regression: ", r2)
print("Accuracy of classifier: ", accuracy)
# Plot the models
plt.figure(1) # makes a new figure
plt.title('Linear Regression of Total Funding vs. Funding Rounds')
plt.xlabel('Funding rounds')
plt.ylabel('Total funding in hundreds of millions of dollars')
plt.axis([0, 10, 0, 100000000])
plt.scatter(inputRegression, outputRegression)
m = linearModel.coef_
b = linearModel.intercept_
plt.plot(inputRegression, inputRegression*m+b, color='red')
plt.figure(2) # makes a new figure
plt.title('Support Vector Machine Classification of Funding Round Type from Amount Raised and Investor Count')
plt.xlabel('Number of investors')
plt.ylabel('Amount raised in hundreds of millions of dollars')
plt.axis([0, 14, 0, 250000000])
classes = {0:'venture', 1:'seed', 2:'angel', 3:'private_equity'}
colors = {'venture':'red', 'seed':'green', 'angel':'blue', 'private_equity':'yellow'}
for i in range(4):
label = predictionsClassification == classes[i]
color = colors[classes[i]]
plt.scatter(inputClassification[label, 0], inputClassification[label, 1], color=color)
# Display the plots
plt.show()
| {
"repo_name": "bhwester/computer-science-projects",
"path": "data_analysis_and_visualization_system/finalproject.py",
"copies": "1",
"size": "2965",
"license": "mit",
"hash": 4568199451218487000,
"line_mean": 38.5333333333,
"line_max": 138,
"alpha_frac": 0.7679595278,
"autogenerated": false,
"ratio": 3.357870894677237,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9600723910603962,
"avg_score": 0.005021302374654951,
"num_lines": 75
} |
__author__ = 'brock'
"""
Taken from: https://gist.github.com/1094140
"""
from functools import wraps
from flask import request, current_app
from werkzeug.routing import BaseConverter
from werkzeug.exceptions import HTTPException
def jsonp(func):
"""Wraps JSONified output for JSONP requests."""
@wraps(func)
def decorated_function(*args, **kwargs):
callback = request.args.get('callback', False)
if callback:
data = str(func(*args, **kwargs).data)
content = str(callback) + '(' + data + ')'
mimetype = 'application/javascript'
return current_app.response_class(content, mimetype=mimetype)
else:
return func(*args, **kwargs)
return decorated_function
def getParamAsInt(request, key, default):
"""
Safely pulls a key from the request and converts it to an integer
@param request: The HttpRequest object
@param key: The key from request.args containing the desired value
@param default: The value to return if the key does not exist
@return: The value matching the key, or if it does not exist, the default value provided.
"""
if key in request.args and request.args[key].isdigit():
return int(request.args.get(key))
else:
return default
def getClientIP(request):
"""
Pull the requested client IP address from the X-Forwarded-For request
header. If there is more than one IP address in the value, it will return
the first one.
For more info, see: 'def access_route' in
https://github.com/mitsuhiko/werkzeug/blob/master/werkzeug/wrappers.py
:param request:
:return str: The client IP address, or none if neither the X-Forwarded-For
header, nor REMOTE_ADDR are present in the environment.
"""
if request.access_route > 0:
ip = request.access_route[0]
else:
ip = None
return ip
class RegexConverter(BaseConverter):
def __init__(self, url_map, *items):
super(RegexConverter, self).__init__(url_map)
self.regex = items[0]
class ShHTTPException(HTTPException):
def get_body(self, environ):
"""Get the HTML body."""
return ('%(description)s') % {'description': self.get_description(environ)}
def get_headers(self, environ):
"""Always return errors as json"""
return [('Content-Type', 'application/json')]
class BadRequest(ShHTTPException):
"""*400* `Bad Request`
Raise if the browser sends something to the application the application
or server cannot handle.
"""
code = 400
description = (
'<p>The browser (or proxy) sent a request that this server could '
'not understand.</p>'
)
class Unauthorized(ShHTTPException):
"""*401* `Unauthorized`
Raise if the user is not authorized. Also used if you want to use HTTP
basic auth.
"""
code = 401
description = (
'<p>The server could not verify that you are authorized to access '
'the URL requested. You either supplied the wrong credentials (e.g. '
'a bad password), or your browser doesn\'t understand how to supply '
'the credentials required.</p><p>In case you are allowed to request '
'the document, please check your user-id and password and try '
'again.</p>'
)
class Forbidden(ShHTTPException):
"""*403* `Forbidden`
Raise if the user doesn't have the permission for the requested resource
but was authenticated.
"""
code = 403
description = (
'<p>You don\'t have the permission to access the requested resource. '
'It is either read-protected or not readable by the server.</p>'
)
class NotFound(ShHTTPException):
"""*404* `Not Found`
Raise if a resource does not exist and never existed.
"""
code = 404
description = (
'<p>The requested URL was not found on the server.</p>'
'<p>If you entered the URL manually please check your spelling and '
'try again.</p>'
)
class Conflict(ShHTTPException):
"""*409* `Conflict`
Raise to signal that a request cannot be completed because it conflicts
with the current state on the server.
.. versionadded:: 0.7
"""
code = 409
description = (
'<p>A conflict happened while processing the request. The resource '
'might have been modified while the request was being processed.'
)
class NotImplemented(ShHTTPException):
"""*501* `Not Implemented`
Raise if the application does not support the action requested by the
browser.
"""
code = 501
description = (
'<p>The server does not support the action requested by the '
'browser.</p>'
)
def paginate(request, objects, total, offset, limit):
def get_url(offset, limit):
return '{}?&offset={}&limit={}'.format(request.path, offset, limit)
response = {
'meta': {
'total': total,
'limit': limit,
'next': get_url(offset + limit, limit)
if offset + limit < total else None,
'previous': get_url(offset - limit, limit)
if offset - limit >= 0 else None,
'offset': offset,
},
'objects': objects
}
return response | {
"repo_name": "Sendhub/flashk_util",
"path": "request.py",
"copies": "1",
"size": "5285",
"license": "bsd-3-clause",
"hash": -978435764751360800,
"line_mean": 29.7325581395,
"line_max": 93,
"alpha_frac": 0.6344370861,
"autogenerated": false,
"ratio": 4.184481393507522,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5318918479607522,
"avg_score": null,
"num_lines": null
} |
class dstat_plugin(dstat):
def __init__(self):
self.nick = ('read', 'write')
def check(self):
if not os.path.exists('/proc/fs/lustre/llite'):
raise Exception, 'Lustre filesystem not found'
info(1, 'Module %s is still experimental.' % self.filename)
def name(self):
return [mount for mount in os.listdir('/proc/fs/lustre/llite')]
def vars(self):
return [mount for mount in os.listdir('/proc/fs/lustre/llite')]
def extract(self):
for name in self.vars:
for l in open(os.path.join('/proc/fs/lustre/llite', name, 'stats')).splitlines():
if len(l) < 6: continue
if l[0] == 'read_bytes':
read = long(l[6])
elif l[0] == 'write_bytes':
write = long(l[6])
self.set2[name] = (read, write)
self.val[name] = map(lambda x, y: (y - x) * 1.0 / elapsed, self.set1[name], self.set2[name])
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4
| {
"repo_name": "SpamapS/dstat-plugins",
"path": "dstat_plugins/plugins/dstat_lustre.py",
"copies": "2",
"size": "1158",
"license": "apache-2.0",
"hash": 877481199610314600,
"line_mean": 34.0909090909,
"line_max": 104,
"alpha_frac": 0.5483592401,
"autogenerated": false,
"ratio": 3.190082644628099,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9725716807020972,
"avg_score": 0.0025450155414255466,
"num_lines": 33
} |
""" author: Brogan Ross
A simple to do list app build using kivy.
@todo:
1 - move todo item storage to a better location, ie database, or new internal storage location.
2 - test layouts on mobile devices.
3 - Fix weird layout issues with the PopupDialog's content
Edit Icon from https://www.iconfinder.com/designmodo
license at: http://creativecommons.org/licenses/by/3.0/
changed the color from black to white
"""
__version__ = "0.0.1"
from datetime import datetime
from uuid import uuid4
import kivy
kivy.require("1.8.0")
from kivy.adapters.listadapter import ListAdapter
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.dropdown import DropDown
from kivy.uix.listview import SelectableView
from kivy.uix.popup import Popup
from kivy.uix.widget import Widget
from kivy.properties import BooleanProperty
from kivy.properties import DictProperty
from kivy.properties import ListProperty
from kivy.properties import ObjectProperty
from kivy.properties import StringProperty
from uix.popupdialog import ButtonFlags
from uix.popupdialog import PopupDialog
from models.item import Item
from settings import DATE_FORMAT
from settings import STORAGE
class OrderDropDown(DropDown):
""" Simply DropDown with options for sorting
"""
pass
class TodoListItem(SelectableView, BoxLayout):
selected_color = ListProperty([1., 0., 0., 1])
'''
:attr:`selected_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [1., 0., 0., 1].
'''
deselected_color = ListProperty([0., 1., 0., 1])
'''
:attr:`deselected_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [0., 1., 0., 1].
'''
item = ObjectProperty(None)
"""
:attr:`item` is a :class:`~models.item.Item` and defaults to None
"""
label = ObjectProperty(None)
delete_button = ObjectProperty(None)
__events__ = ("on_item_save", "on_item_delete", "on_item_edit")
def on_item_save(self):
pass
def on_item_delete(self):
pass
def _delete_item(self):
self.item.delete()
self.dispatch("on_item_delete")
def on_item_edit(self):
""" When the item is pressed, display a edit popup
"""
content = ItemDetailBase(item=self.item)
btns = ButtonFlags.Ok | ButtonFlags.Cancel
self._popup = PopupDialog(title="Edit details",
content=content,
buttons=btns)
self._popup.bind(on_accept=self._save_item)
self._popup.bind(on_reject=self._popup.dismiss)
self._popup.open()
def _save_item(self, button):
""" if ok button is pressed on the edit popup
update the item in storage
"""
# check that the input is valid
# if not don't close the popup
if not self._popup.content.is_valid():
return True
self.item.update(
title = self._popup.content.title_input.text,
content = self._popup.content.content_input.text,
complete = self._popup.content.completed_input.active
)
self.item.save()
self._popup.dismiss()
self.dispatch("on_item_save")
class ItemDetailBase(BoxLayout):
"""
Base widget for editing a todo item
"""
completed_input = ObjectProperty(None)
"""
The input item for whether the item has been completed or not
:attr:`completed_input` is an :class:`~kivy.properties.ObjectProperty` and defaults
to None.
"""
content_input = ObjectProperty(None)
"""
The input object for the item's content
:attr: `content_input` is a :class:`~kivy.properties.ObjectProperty` and defaults to None
"""
title_input = ObjectProperty(None)
"""
The input object for the item's title
:attr: `title_input` is a :class: `~kivy.properties.ObjectProperty` and defaults to None
"""
created_group = ObjectProperty(None)
"""
The layout containing the labels of the item's creation data
:attr: `created_group` is a :class: `~kivy.properties.ObjectProperty` and defaults to None
"""
completed_group = ObjectProperty(None)
"""
The layout containing the label and check box for the item's completion data
:attr: `completed_group` is a :class: `~kivy.properties.ObjectProperty` and defaults to None
"""
item = ObjectProperty(None)
"""
Stores the item that is being editing or created
:attr: `item` is a :class: `~kivy.properties.ObjectProperty` and defaults to None
"""
def is_valid(self):
""" Determine whether the input contents are valid
"""
return self.title_input.text != ""
def validate_title(self):
""" Determine whether the title input is valid
"""
title = self.title_input.text
if title == "":
self.title_input.hint_text_color = [1,0,0,1]
self.title_input.hint_text = "Title Required"
def on_title_focus(self, value):
""" When defocusing the title_input validate it, and shift focus to the content_input
"""
if value == False:
self.validate_title()
self.content_input.focus = True
class NewTaskWidget(ItemDetailBase):
""" Widget specifically for creating a new todo item
Same as :class: `~ItemDetailBase` but with the created_group removed
"""
def __init__(self, **kwargs):
super(NewTaskWidget, self).__init__(**kwargs)
self.remove_widget(self.created_group)
# class ItemDetailWidget(ItemDetailBase):
# item = ObjectProperty(None)
class SortListAdapter(ListAdapter):
""" List adapter that automatically sorts the data. Sorts happen whenever
:attr:`order_by` or :attr:`data` are set.
"""
order_by = StringProperty("")
""" String used to define the ordering of the data.
"""
order_map = DictProperty({})
""" Stores a mapping of the order_by display name and the attribute name used to sort the todo items.
"""
def __init__(self, **kwargs):
if kwargs.has_key("order_by"):
self.order_by = kwargs.pop("order_by")
super(SortListAdapter, self).__init__(**kwargs)
self.bind(order_by=self.sort)
self.bind(data=self.sort)
def sort(self, *args):
self.unbind(data=self.sort)
self.data = sorted(self.data, key=lambda x: getattr(x, self.order_map[self.order_by]))
self.bind(data=self.sort)
class TodoWidget(Widget):
""" Base widget for the app
"""
list_view = ObjectProperty(None)
""" The :class:`~kivy.uix.listview.ListView` for displaying the todo items
:attr:`list_view` is a :class:`~kivy.properties.ObjectProperty`
"""
include_hidden = BooleanProperty(False)
""" :class:`~kivy.properties.BooleanProperty` for whether items marked as completed are displayed. Defaults to False
"""
list_adapter = ObjectProperty(None)
""" :class:`~kivy.adapter.ListAdapter` for the :attr:`list_view`
"""
def __init__(self, *args, **kwargs):
self.list_adapter = SortListAdapter(
data=[],
cls=TodoListItem,
args_converter=self.args_converter,
selection_mode="none",
order_by="created",
order_map={"created" : "created",
"title" : "title",
"completed" : "complete_date"})
self.order_drop = OrderDropDown()
self.order_drop.bind(on_select=self._order_selected)
super(TodoWidget, self).__init__(*args, **kwargs)
self.add_button.bind(on_press=self.add_task)
def _order_selected(self, button, order):
""" Wrapped method for when an item in the :attr:`OrderDropDown` gets changed
"""
self.list_adapter.order_by = order
self.list_view.populate()
def on_include_hidden(self, instance, value):
""" when the :attr:`include_hidden` is changed reload the items from storage
"""
self.load_items()
def args_converter(self, row_index, rec):
""" Argument converter method for the :attr:`list_adapter`
"""
return {"text" : rec.title,
"item" : rec,
"size_hint_y" : None,
"height" : "40sp",
"font_size" : "24sp",
"on_item_save" : self.load_items,
"on_item_delete" : self.load_items
}
def load_items(self, *args):
""" Loads the the todo items from storage and populates the :attr:`list_adapter`
"""
todos = []
for k in STORAGE:
data = STORAGE[k]
if self.include_hidden == False and data.get("complete", False) == True:
continue
i = Item(k, **data)
todos.append(i)
self.list_adapter.data = todos
self.list_view.populate()
def add_task(self, touch):
""" When the :attr:`add_button` is pressed open a PopupDialog for creating the new
item
"""
content = NewTaskWidget()
btns = ButtonFlags.Ok | ButtonFlags.Cancel
self._popup = PopupDialog(title="Add Task",
content=content,
buttons=btns,
size_hint=(.9,.9))
self._popup.bind(on_accept=self._add_task_button)
self._popup.bind(on_reject=self._popup.dismiss)
self._popup.open()
def _add_task_button(self, button):
""" when the popup's ok button is pressed valid and create the new item
"""
if self._popup.content.is_valid() == False:
return False
content = self._popup.content.content_input.text
title = self._popup.content.title_input.text
complete = self._popup.content.completed_input.active
if title != "":
dt = datetime.utcnow().strftime(DATE_FORMAT) + "UTC" # cause datetime is stupid
complete_date = ""
if complete == True:
complete_date = dt
i = Item(str(uuid4()), content=content,
created=dt, title=title, complete=complete,
complete_date=complete_date)
i.save()
self._popup.dismiss()
self._popup = None
self.load_items()
class TodoApp(App):
def build(self):
listWidget = TodoWidget()
listWidget.load_items()
return listWidget
if __name__ == "__main__":
app = TodoApp()
app.run()
| {
"repo_name": "broganross/kivy_tests",
"path": "todo/main.py",
"copies": "1",
"size": "10816",
"license": "mit",
"hash": 2513866079621651000,
"line_mean": 31.7757575758,
"line_max": 121,
"alpha_frac": 0.5953217456,
"autogenerated": false,
"ratio": 3.9691743119266056,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5064496057526605,
"avg_score": null,
"num_lines": null
} |
__author__ = 'broglea'
import hashlib
import string
import itertools
def hash_value(type=None, value=None):
if type is None:
return 'You must specify a type'
if value is None:
return 'You must specify a value'
if type == 'MD5':
return hashlib.md5(value).hexdigest()
if type == 'SHA1':
return hashlib.sha1(value).hexdigest()
if type == 'SHA256':
return hashlib.sha256(value).hexdigest()
if type == 'SHA512':
return hashlib.sha512(value).hexdigest()
return 'Specified type not supported'
# rotational cipher encoder/decoder
def rot(shift, value, encode):
try:
alphabet = string.ascii_lowercase
dic = {}
#If we want to encode this
if encode == "True":
for i in range(0, len(alphabet)):
dic[alphabet[i]] = alphabet[(i + int(shift, 10)) % len(alphabet)]
#If we want to decode a rotational cipher
else:
for i in range(0, len(alphabet)):
dic[alphabet[i]] = alphabet[(i + (26 - (int(shift, 10) % 26))) % len(alphabet)]
#Convert each letter of plaintext to the corresponding
#encrypted letter in our dictionary creating the cryptext
ciphertext = ""
for l in value.lower():
if l in dic:
l = dic[l]
ciphertext += l
return ciphertext
except:
return "An error occurred"
# main base conversion function
def base_conversions(value=None, base=None, currBase=10):
try:
if base is None:
return 'You must specify a base'
if value is None:
return 'You must specify a value'
if base < 2:
return 'Base must be greater than 1'
base = int(str(base), 10)
currBase = int(str(currBase), 10)
if currBase == 10:
value = int(str(value), 10)
return int_to_base(value, base)
else:
value = int(str(value), currBase)
return int_to_base(value, base)
except:
return "An error occurred"
# converts any integer to any base; only used internally, should never be called from the actual site
def int_to_base(value, base):
try:
alphanum = string.digits + string.ascii_lowercase
if value < 0:
sign = -1
elif value == 0:
return '0'
else:
sign = 1
value *= sign
digits = []
while value:
digits.append(alphanum[value % base])
value /= base
if sign < 0:
digits.append('-')
digits.reverse()
return ''.join(digits)
except:
return "An error occurred"
def xor_tool(val=None, xor_key=None):
if val is None:
return 'You must specify a base'
if xor_key is None:
return 'You must specify a value'
return ''.join(chr(ord(a) ^ ord(b)) for a, b in zip(val, itertools.cycle(xor_key)))
| {
"repo_name": "HackUCF/collabCTF",
"path": "tools/crypto.py",
"copies": "1",
"size": "2974",
"license": "mit",
"hash": 5064461042047061000,
"line_mean": 27.0566037736,
"line_max": 101,
"alpha_frac": 0.5625420309,
"autogenerated": false,
"ratio": 3.986595174262735,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004166625879086289,
"num_lines": 106
} |
__author__ = 'brooksc'
from pprint import pprint
import requests
import json, time
from requests.auth import HTTPBasicAuth
# import sys
import collections
import json
import time
API_BASE_URL = 'http://www.bugherd.com/api_v2/{api}'
class Error(Exception):
pass
class Response(object):
def __init__(self, body):
self.raw = body
self.body = json.loads(body)
# self.successful = self.body['ok']
# self.error = self.body.get('error')
class BaseAPI(object):
def __init__(self, token=None):
self.token = token
def _request(self, method, api, **kwargs):
# if self.token:
# kwargs.setdefault('params', {})['token'] = self.token
if 'page' in kwargs:
page_num = kwargs['page']
del kwargs['page']
else:
page_num = 1
if 'data' in kwargs and (type(kwargs['data']) == type(dict()) or type(kwargs['data']) == type(collections.defaultdict())):
kwargs['data'] = json.dumps(kwargs['data'], sort_keys=True, indent=4)
if 'headers' not in kwargs:
kwargs['headers'] = dict()
kwargs['headers']['Content-Type'] = 'application/json'
try:
kwargs['proxies'] = self.proxies
except KeyError:
pass
if self.debug:
print "%s %s" % (method.__name__.upper(), api)
print
if 'data' in kwargs:
print kwargs['data']
requests_response = method(API_BASE_URL.format(api=api), auth=HTTPBasicAuth(self.api_key, 'x'),
**kwargs)
if requests_response.status_code == 429:
time.sleep(3.0)
requests_response = method(API_BASE_URL.format(api=api), auth=HTTPBasicAuth(self.api_key, 'x'),
**kwargs)
assert requests_response.status_code >= 200 and requests_response.status_code <= 299
response = Response(requests_response.text)
try:
response.page = 1
response.pages = int(response.body['meta']['count'])/100 + 1
response.paged = True
except KeyError:
# print "No tasks found"
# print response.body.keys()
response.paged = False
response.page = 1
response.pages = 1
if self.debug:
print "Response: %s" % requests_response.status_code
print
if requests_response.json():
print json.dumps(requests_response.json(), sort_keys=True, indent=4)
# if not requests_response.successful:
# raise Error(requests_response.error)
# return requests_response
return response
def _get(self, api, **kwargs):
return self._request(requests.get, api, **kwargs)
def _post(self, api, **kwargs):
return self._request(requests.post, api, **kwargs)
def _put(self, api, **kwargs):
return self._request(requests.put, api, **kwargs)
def _delete(self, api, **kwargs):
return self._request(requests.delete, api, **kwargs)
# Usage Examples
# bh = BugHerd(api_key)
# bh = BugHerd(api_key, True) # enable debugging
# bh.organization()
# bh.projects()
# bh.projects_active()
# bh.users()
# bh.members()
# bh.guests()
class BugHerd(BaseAPI):
def __init__(self, api_key, debug=False):
# self.api_key = 'sqpauk9fi9fk1qcvv0svdg'
self.api_key = api_key
self.url = API_BASE_URL
# self.url = 'https://www.bugherd.com/api_v2'
self.proxies = {
# "http": "http://localhost:8080",
# "https": "http://localhost:8080",
}
self.debug = debug
def project(self, project_id=None):
return Project(self.api_key, project_id, self.debug)
# Get more detail of your account.
#
# GET /api_v2/organization.json
def organization(self):
return self._get('organization.json')
# Get a list of all projects within your account.
#
# See all the people in your account.
#
# GET /api_v2/users.json
def users(self):
return self._get('users.json')
# GET /api_v2/users/members.json
def members(self):
return self._get('users/members.json')
# GET /api_v2/users/guests.json
def guests(self):
return self._get('users/guests.json')
# bh = BugHerd(api_key)
# project = bh.Project()
# project.list()
# project = bh.Project(123)
# project.id # 123
# project.show()
# project.update??
class Project(BaseAPI):
def __init__(self, api_key, project_id=None, debug=False):
self.api_key = api_key
self.project_id = project_id
self.id = project_id
self.debug = debug
self.proxies = {
# 'http': 'http://imac:8080',
}
# GET /api_v2/projects.json
def list(self):
return self._get('projects.json')
# GET /api_v2/projects/active.json
def list_active(self):
return self._get('projects/active.json')
# Show details for a specific project. Note: if you'd like to see the tasks in the project, refer to section 'List tasks'.
#
# GET /api_v2/projects/#{project_id}.json
def details(self):
if not self.project_id:
raise Exception
return self._get('projects/%s.json' % self.project_id)
# List details of a task in a given project, includes all data including comments, attachments, etc.
#
# GET /api_v2/projects/#{project_id}/tasks/#{task_id}.json
def task(self, task_id=None):
return Task(self.api_key, self.project_id, task_id, self.debug)
# Create a new project. The project will initially have no members.
#
# POST /api_v2/projects.json
#
# Example request data:
#
# {"project":{
# "name":"My Website",
# "devurl":"http://www.example.com",
# "is_active":true,
# "is_public":false
# }}
def create(self, name, devurl, is_active=True, is_public=False):
if not name or not devurl:
raise Exception
tree = lambda: collections.defaultdict(tree)
data = tree()
data['project']['name'] = name
if devurl:
data['project']['devurl'] = devurl
# if is_active:
data['project']['is_active'] = is_active
# if is_public:
data['project']['is_public'] = is_public
return self._post('projects.json', data=data)
# Add a member to a project.
#
# POST /api_v2/projects/#{project_id}/add_member.json
#
# Request data:
#
# {"user_id":123}
def add_member(self, user_id):
# TODO: implement
pass
# Add an existing guest to a project, or invite someone by email address.
#
# POST /api_v2/projects/#{project_id}/add_guest.json
#
# Request data:
#
# {"user_id":123}
# {"email":"someone@example.com"}
def add_guest(self, user_id=None, email=None):
url = "projects/%s/add_guest.json" % (self.project_id)
data = dict()
if user_id:
data['user_id'] = user_id
if email:
data['email'] = email
return self._post(url, data=data)
# Update settings for an existing project under your control (ie: only the ones you own).
#
# PUT /api_v2/projects/#{project_id}.json
#
# Example request data:
#
# {"project":{
# "is_public":true
# }}
def update(self, is_public=None):
# TODO: implement
pass
# Delete a project and all associated data. Use with care, deleted projects cannot be recovered.
#
# DELETE /api_v2/projects/#{project_id}.json
def delete(self):
if not self.project_id:
raise Exception
return self._delete('projects/%s.json' % self.project_id)
# task = bh.project(123).Task()
# task.create(description, requester_id, assigned_to_id, status, priority, tags)
# task.list()
# task = bh.project(123).Task(123)
# task.id
# task.update??
# task.detail()
class Task(BaseAPI):
def __init__(self, api_key, project_id, task_id=None, debug=False):
self.api_key = api_key
self.project_id = project_id
self.id = task_id
self.task_id = task_id
self.proxies = {
# 'http': 'http://imac:8080',
}
self.debug = debug
# self.debug = True
# print "works!"
# Get a full list of tasks for a project, including archived tasks.
#
# GET /api_v2/projects/#{project_id}/tasks.json
#
#
# You can filter tasks using the following GET parameters: updated_since, created_since, status, priority, tag, assigned_to_id and external_id. Examples on how to use filters are below:
# def list(self, status=None):
def list(self, **kwargs):
# TODO: implement filter
url = "projects/%s/tasks.json" % (self.project_id)
for k in ['updated_since','created_since','status','priority','tag','assigned_to_id','external_id','page']:
if k in kwargs:
url += "?%s=%s" % (k, kwargs[k])
break
# else:
# print "Error: Task.list() Unknown argument %s" % k
# print url
return self._get(url)
def detail(self):
url = "projects/%s/tasks/%s.json" % (self.project_id, self.task_id)
return self._get(url)
# POST /api_v2/projects/#{project_id}/tasks.json
#
# Example request data:
#
# {"task":{
# "description":"Example task",
# "priority":"normal",
# "status":"backlog",
# "requester_id":123,
# "tag_names":["ui","feature"],
# "assigned_to_id":123,
# "external_id":"ABC123"
# }}
# or:
#
# {"task":{
# "description":"Example task",
# "requester_email":"user@example.com",
# "assigned_to_email":"someone@company.com",
# }}
# "requester_email" can be any email address while "assigned_to_email" needs to be of a current project member.
#
# Values for "priority" are not set, critical, important, normal, and minor.
#
# Values for "status" are backlog, todo, doing, done, and closed. Omit this field or set as "null" to send tasks to the Feedback panel.
#
# External ID is an API-only field. It cannot be set from the BugHerd application, only using the API. An external ID can be used to track originating IDs from other systems in BugHerd bugs.
def create(self, description=None, requester_id=None, assigned_to_id=None, status=None, priority=None, tags=None):
# if not description or not requester_id:
# raise Exception
url = "projects/%s/tasks.json" % (self.project_id)
tree = lambda: collections.defaultdict(tree)
data = tree()
data['task']['description'] = description
if requester_id:
data['task']['requester_id'] = requester_id
if assigned_to_id:
data['task']['assigned_to_id'] = assigned_to_id
if tags:
data['task']['tag_names'] = tags
if status:
data['task']['status'] = status
if priority:
data['task']['priority'] = priority
# data['task']['external_id'] = "testing"
return self._post(url, data=data)
# Update one of the tasks in a project.
#
# PUT /api_v2/projects/#{project_id}/tasks/#{task_id}.json
#
# Request data:
#
# {"task":{
# "priority":"normal",
# "status":"backlog",
# "assigned_to_id":123,
# }}
#
# If you'd like the update to happen on behalf of a specific user in the project (note that those user's permissions do not apply when making an update via the API, this is only for audit logging purposes)
#
# {"task":{
# "status":"todo",
# "updater_email":"someone@company.com",
# }}
# Below are examples for unsettings values (only allowed for status and assigned_to_id)
#
# Unassigning a task:
#
# {"task":{"assigned_to_id":null}}
# Moving a task back to feedback:
#
# {"task":{"status_id":null}}
def update(self, data):
url = "projects/%s/tasks/%s.json" % (self.project_id, self.task_id)
return self._put(url, data=data)
# attachments = bh.Project(123).Task(456).attachments
# attachments.list()
# attachments(123).show()
# attachments().create()
# attachments(123).delete()
# TODO: implement
# class Attachments(BaseAPI):
# List attachments
#
# Get a paginated list of attachments for a task.
#
# GET /api_v2/projects/#{project_id}/tasks/#{task_id}/attachments.json
#
#
# Show attachment
#
# Get detail for specific attachment.
#
# GET /api_v2/projects/#{project_id}/tasks/#{task_id}/attachments/#{id}.json
#
#
# Create attachment
#
# Adds a new attachment to the specified task using an existing URL.
#
# POST /api_v2/projects/#{project_id}/tasks/#{task_id}/attachments.json
# Request data:
#
# {"comment":{
# "file_name":"resolution.gif",
# "url":"http://i.imgur.com/U9h3jZI.gif"
# }}
#
# Upload attachment
#
# Upload a new attachment and add it to the specified task. The file contents need to be specified as the POST data on this request.
#
# Note that your upload needs to be reasonable in size as the maximum time the request may take is around 30 seconds. If you have larger uploads please create arrange your own file upload and create the attachment from a URL instead.
#
# POST /api_v2/projects/#{project_id}/tasks/#{task_id}/attachments/upload
# Note in the sample below please specify an existing file name.
#
#
# Delete attachment
#
# Delete an attachment from a task. Note that this action is permanent and cannot be undone.
#
# DELETE /api_v2/projects/#{project_id}/tasks/#{task_id}/attachments/#{id}.json
#
class Comments(BaseAPI):
pass
# Get a paginated list of comments for a task.
#
# GET /api_v2/projects/#{project_id}/tasks/#{task_id}/comments.json
# def list_comments(self):
# pass
# Adds a new comment to the specified task.
#
# POST /api_v2/projects/#{project_id}/tasks/#{task_id}/comments.json
# Request data:
#
# {"comment":{
# "text":"comment here",
# "user_id":123
# }}
# or:
#
# {"comment":{
# "text":"comment here",
# "email":"user@example.com"
# }}
def create_comment(self, project_id, task_id, comment, user_id):
if not project_id or not task_id or not comment or not user_id:
raise Exception
url = "projects/%s/tasks/%s/comments.json" % (project_id, task_id)
tree = lambda: collections.defaultdict(tree)
data = tree()
data['comment']['text'] = comment
data['comment']['user_id'] = user_id
return self._post(url, data=data)
# class Webhook(BaseAPI):
# def list_webhooks(self):
# return self._get('webhooks.json')
#
# def create_webhook(self, target_url, event, project_id=None):
# url = "%s/webhooks.json" % (self.url)
# tree = lambda: collections.defaultdict(tree)
# data = tree()
# if project_id:
# data['project_id'] = project_id
# data['target_url'] = target_url
# data['event'] = event
# return self._post(url, data)
#
# def delete_webhooks(self, webhook_id):
# url = "%s/webhooks/%s.json" % (self.url, webhook_id)
# return self._delete(url)
#
# def list_comments(self, project_id, task_id):
# url = "%s/projects/%s/tasks/%s/comments.json" % (self.url, project_id, task_id)
# return self._get(url)
| {
"repo_name": "brooksc/bugherd",
"path": "bugherd/__init__.py",
"copies": "1",
"size": "15558",
"license": "mit",
"hash": 8345657850478032000,
"line_mean": 29.5058823529,
"line_max": 233,
"alpha_frac": 0.5903072374,
"autogenerated": false,
"ratio": 3.5103790613718413,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4600686298771841,
"avg_score": null,
"num_lines": null
} |
"""
Check that promotion of read replicas and renaming instances works as expected
"""
import unittest
import time
from boto.rds import RDSConnection
class PromoteReadReplicaTest(unittest.TestCase):
rds = True
def setUp(self):
self.conn = RDSConnection()
self.mainDB_name = "boto-db-%s" % str(int(time.time()))
self.replicaDB_name = "replica-%s" % self.mainDB_name
self.renamedDB_name = "renamed-replica-%s" % self.mainDB_name
def tearDown(self):
instances = self.conn.get_all_dbinstances()
for db in [self.mainDB_name, self.replicaDB_name, self.renamedDB_name]:
for i in instances:
if i.id == db:
self.conn.delete_dbinstance(db, skip_final_snapshot=True)
def test_promote(self):
print '--- running RDS promotion & renaming tests ---'
self.mainDB = self.conn.create_dbinstance(self.mainDB_name, 5, 'db.t1.micro', 'root', 'bototestpw')
# Wait up to 15 minutes for the mainDB to become available
print '--- waiting for "%s" to become available ---' % self.mainDB_name
wait_timeout = time.time() + (15 * 60)
time.sleep(60)
instances = self.conn.get_all_dbinstances(self.mainDB_name)
inst = instances[0]
while wait_timeout > time.time() and inst.status != 'available':
time.sleep(15)
instances = self.conn.get_all_dbinstances(self.mainDB_name)
inst = instances[0]
self.assertTrue(inst.status == 'available')
self.replicaDB = self.conn.create_dbinstance_read_replica(self.replicaDB_name, self.mainDB_name)
# Wait up to 15 minutes for the replicaDB to become available
print '--- waiting for "%s" to become available ---' % self.replicaDB_name
wait_timeout = time.time() + (15 * 60)
time.sleep(60)
instances = self.conn.get_all_dbinstances(self.replicaDB_name)
inst = instances[0]
while wait_timeout > time.time() and inst.status != 'available':
time.sleep(15)
instances = self.conn.get_all_dbinstances(self.replicaDB_name)
inst = instances[0]
self.assertTrue(inst.status == 'available')
# Promote the replicaDB and wait for it to become available
self.replicaDB = self.conn.promote_read_replica(self.replicaDB_name)
# Wait up to 15 minutes for the replicaDB to become available
print '--- waiting for "%s" to be promoted and available ---' % self.replicaDB_name
wait_timeout = time.time() + (15 * 60)
time.sleep(60)
instances = self.conn.get_all_dbinstances(self.replicaDB_name)
inst = instances[0]
while wait_timeout > time.time() and inst.status != 'available':
time.sleep(15)
instances = self.conn.get_all_dbinstances(self.replicaDB_name)
inst = instances[0]
# Verify that the replica is now a standalone instance and no longer
# functioning as a read replica
self.assertTrue(inst)
self.assertTrue(inst.status == 'available')
self.assertFalse(inst.status_infos)
# Verify that the main no longer has any read replicas
instances = self.conn.get_all_dbinstances(self.mainDB_name)
inst = instances[0]
self.assertFalse(inst.read_replica_dbinstance_identifiers)
print '--- renaming "%s" to "%s" ---' % ( self.replicaDB_name, self.renamedDB_name )
self.renamedDB = self.conn.modify_dbinstance(self.replicaDB_name, new_instance_id=self.renamedDB_name, apply_immediately=True)
# Wait up to 15 minutes for the mainDB to become available
print '--- waiting for "%s" to exist ---' % self.renamedDB_name
wait_timeout = time.time() + (15 * 60)
time.sleep(60)
# Wait up to 15 minutes until the new name shows up in the instance table
found = False
while found == False and wait_timeout > time.time():
instances = self.conn.get_all_dbinstances()
for i in instances:
if i.id == self.renamedDB_name:
found = True
if found == False:
time.sleep(15)
self.assertTrue(found)
print '--- waiting for "%s" to become available ---' % self.renamedDB_name
instances = self.conn.get_all_dbinstances(self.renamedDB_name)
inst = instances[0]
# Now wait for the renamed instance to become available
while wait_timeout > time.time() and inst.status != 'available':
time.sleep(15)
instances = self.conn.get_all_dbinstances(self.renamedDB_name)
inst = instances[0]
self.assertTrue(inst.status == 'available')
# Since the replica DB was renamed...
self.replicaDB = None
print '--- tests completed ---'
| {
"repo_name": "KaranToor/MA450",
"path": "google-cloud-sdk/platform/gsutil/third_party/boto/tests/integration/rds/test_promote_modify.py",
"copies": "2",
"size": "5397",
"license": "apache-2.0",
"hash": -8197522717942799000,
"line_mean": 38.1086956522,
"line_max": 134,
"alpha_frac": 0.6283120252,
"autogenerated": false,
"ratio": 3.866045845272206,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5494357870472206,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.