index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
40,025
|
sumitskr/action-repo
|
refs/heads/main
|
/db.py
|
# getting mongodb database
import pymongo
import datetime
import pytz
# from wtforms import SubmitField,ValidationError
myclient = pymongo.MongoClient(
"mongodb+srv://sumit:sumitsarkar@cluster0.c5xwu.mongodb.net/myFirstDatabase?retryWrites=true&w=majority")
git = myclient.get_database("git")
activity = git.get_collection('req') # getting collection
def timeStamp():
today = datetime.datetime.now(datetime.timezone.utc)
day = (today.strftime("%d"))
if 4 <= int(day) <= 20 or 24 <= int(day) <= 30:
suffix = "th"
else:
suffix = ["st", "nd", "rd"][int(day) % 10 - 1]
dayn = day + suffix
dayn = dayn + today.strftime(" %b %Y - %I:%M %p ") + 'UTC'
return dayn
class Pull:
def __init__(self, request_id, author, action, from_branch, to_branch):
self.request_id = request_id
self.author = author
self.action = action
self.from_branch = from_branch
self.to_branch = to_branch
self.timestamp = timeStamp()
def commit(self):
activity.insert_one(self.__dict__)
|
{"/app.py": ["/db.py"]}
|
40,029
|
gracetuxy/popular-music-analysis
|
refs/heads/main
|
/spot.py
|
import pandas as pd
from dataclasses import dataclass, field, asdict
from typing import List, Tuple
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
import numpy as np
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
import json
import billboard
from collections import defaultdict, Counter
from models import *
#spotipy wraps the official spotify api providing simple python functions.
# TODO: Replace these two variables with the client_id and client_secret that you generated
CLIENT_ID = "44dfa19c82854a37bc9d6385bf4cca4f"
CLIENT_SECRET = "cfe60a68a11a48c29cf20cd80b04a5c3"
#https://developer.spotify.com/dashboard/applications to get client_id and client_secret
sp = spotipy.Spotify(auth_manager=SpotifyClientCredentials(client_id=CLIENT_ID,
client_secret=CLIENT_SECRET))
def getPlaylist(id: str) -> List[Track]:
'''
Given a playlist ID, returns a list of Track objects corresponding to the songs on the playlist. See
models.py for the definition of dataclasses Track, Artist, and AudioFeatures.
We need the audio features of each track to populate the audiofeatures list.
We need the genre(s) of each artist in order to populate the artists in the artist list.
We've written parts of this function, but it's up to you to complete it!
'''
# fetch tracks data from spotify given a playlist id
playlistdata = sp.playlist(id)
tracks = playlistdata['tracks']['items']
# fetch audio features based on the data stored in the playlist result
track_ids = [x['track']['id'] for x in tracks]
audio_features = sp.audio_features(track_ids)
audio_info = {} # Audio features list might not be in the same order as the track list
for af in audio_features:
audio_info[af['id']] = AudioFeatures(af['danceability'], \
af['energy'], \
af['key'], \
af['loudness'], \
af['mode'], \
af['speechiness'], \
af['acousticness'], \
af['instrumentalness'], \
af['liveness'], \
af['valence'], \
af['tempo'], \
af['duration_ms'], \
af['time_signature'], \
af['id'])
# prepare artist dictionary
artist_ids = [] # TODO: make a list of unique artist ids from tracks list
for t in tracks:
for artist in t['track']['artists']:
if artist['id'] not in artist_ids:
artist_ids.append(artist['id'])
artists = {}
for k in range(1+len(artist_ids)//50): # can only request info on 50 artists at a time!
artists_response = sp.artists(artist_ids[k*50:min((k+1)*50,len(artist_ids))]) #what is this doing?
for a in artists_response['artists']: # TODO: create the Artist for each id (see audio_info, above)
artists[a['id']] = Artist(a['id'], \
a['name'], \
a['genres'])
# populate track dataclass
trackList = [Track(id = t['track']['id'], \
name= t['track']['name'], \
artists= [artists[a['id']] for a in t['track']['artists']], \
audio_features= audio_info[t['track']['id']]) \
for t in tracks]
return trackList
''' this function is just a way of naming the list we're using. You can write
additional functions like "top Canadian hits!" if you want.'''
def getHot100() -> List[Track]:
# Billboard hot 100 Playlist ID URI
hot_100_id = "6UeSakyzhiEt4NB3UAd6NQ"
return getPlaylist(hot_100_id)
# ---------------------------------------------------------------------
# part1: implement helper functions to organize data into DataFrames
def getGenres(t: Track) -> List[str]:
'''
TODO
Takes in a Track and produce a list of unique genres that the artists of this track belong to
'''
unique_genres = []
for artist in t.artists:
for genre in artist.genres:
if genre not in unique_genres:
unique_genres.append(genre)
return unique_genres
def doesGenreContains(t: Track, genre: str) -> bool:
'''
TODO
Checks if the genres of a track contains the key string specified
For example, if a Track's unique genres are ['pop', 'country pop', 'dance pop']
doesGenreContains(t, 'dance') == True
doesGenreContains(t, 'pop') == True
doesGenreContains(t, 'hip hop') == False
'''
for gen in getGenres(t):
if genre in gen:
return True
return False
def getTrackDataFrame(tracks: List[Track]) -> pd.DataFrame:
'''
This function is given.
Prepare dataframe for a list of tracks
audio-features: 'danceability', 'energy', 'key', 'loudness', 'mode', 'speechiness',
'acousticness', 'instrumentalness', 'liveness', 'valence', 'tempo',
'duration_ms', 'time_signature', 'id',
track & artist: 'track_name', 'artist_ids', 'artist_names', 'genres',
'is_pop', 'is_rap', 'is_dance', 'is_country'
'''
# populate records
records = []
for t in tracks:
to_add = asdict(t.audio_features) #converts the audio_features object to a dict
to_add["track_name"] = t.name
to_add["artist_ids"] = list(map(lambda a: a.id, t.artists)) # we will discuss this in class
to_add["artist_names"] = list(map(lambda a: a.name, t.artists))
to_add["genres"] = getGenres(t)
to_add["is_pop"] = doesGenreContains(t, "pop")
to_add["is_rap"] = doesGenreContains(t, "rap")
to_add["is_dance"] = doesGenreContains(t, "dance")
to_add["is_country"] = doesGenreContains(t, "country")
records.append(to_add)
# create dataframe from records
df = pd.DataFrame.from_records(records)
return df
# minor testing code:
top100Tracks = getHot100()
df = getTrackDataFrame(top100Tracks)
df_rap=df[df['is_rap']]
df_notrap=df[~df['is_rap']]
df_pop=df[df['is_pop']]
df_dance=df[df['is_dance']]
df_country=df[df['is_country']]
# you may want to experiment with the dataframe now!
# ---------------------------------------------------------------------
# Part2: The most popular artist of the week
def artist_with_most_tracks(tracks: List[Track]) -> (Artist, int):
'''
TODO
List of tracks -> (artist, number of tracks the artist has)
This function finds the artist with most number of tracks on the list
If there is a tie, you may return any of the artists
'''
tally = Counter([art.name for t in tracks for art in t.artists]) # these structures will be useful!
art_name = [name for name,count in tally.most_common(1)]
num = [count for name,count in tally.most_common(1)]
for t in tracks:
for art in t.artists:
if art.name == art_name[0]:
return (art, num[0])
# minor testing code:
artist, num_track = artist_with_most_tracks(top100Tracks)
print("%s has the most number of tracks on this week's Hot 100 at a whopping %d tracks!" % (artist.name, num_track))
# Part3: Data Visualization
# 3.1 scatter plot of dancability-tempo colored by genre is_rap
ax=df_rap.plot.scatter(x="danceability", y="tempo",c='orange', label="is rap")
df_notrap.plot.scatter(x="danceability", y="tempo",c='grey',label="not rap", ax=ax)
plt.savefig('dancability-tempo.png')
plt.show()
# 3.1 scatter plot of dancability-speechiness colored by genre is_rap
ax=df_rap.plot.scatter(x="danceability", y="speechiness",c='orange', label="is rap")
df_notrap.plot.scatter(x="danceability", y="speechiness",c='grey',label="not rap", ax=ax)
plt.savefig('dancability-speechiness.png')
plt.show()
# 3.2 scatter plot (which genre is most similar to pop in terms in speechiness and energy?)
axi=df_pop.plot.scatter(x="speechiness", y="energy", c='pink', marker='o', label='pop')
df_dance.plot.scatter(x="speechiness", y="energy", c='cyan', marker='.', label='dance', ax=axi)
plt.savefig('pop-dance.png')
plt.show()
axi=df_pop.plot.scatter(x="speechiness", y="energy", c='pink', marker='o', label='pop')
df_country.plot.scatter(x="speechiness", y="energy", c='yellow', marker='.', label='country', ax=axi)
plt.savefig('pop-country.png')
plt.show()
axi=df_pop.plot.scatter(x="speechiness", y="energy", c='pink', marker='o', label='pop')
df_rap.plot.scatter(x="speechiness", y="energy", c='red', marker='.', label='rap', ax=axi)
plt.savefig('pop-rap.png')
plt.show()
# (Bonus) Part4:
# take a song that's not on the list, compute distance with the songs on the list and see if we get the same artist
def genres_in_top10(tracks: List[Track]) -> dict:
'''
Assuming given tracks are ordered form top to bottom in rank, function returns counts of genres in the top 10.
'''
tally = Counter([genre for t in tracks[0:10] for art in t.artists for genre in art.genres])
return tally
|
{"/spot.py": ["/models.py"]}
|
40,030
|
gracetuxy/popular-music-analysis
|
refs/heads/main
|
/models.py
|
from dataclasses import dataclass, field
from typing import List, Tuple
@dataclass
class Artist:
id: str
name: str
genres: List[str]
@dataclass
class AudioFeatures:
danceability: float
energy: float
key: int
loudness: float
mode: int
speechiness: float
acousticness: float
instrumentalness: float
liveness: float
valence: float
tempo: float
duration_ms: int
time_signature: int
id: str
@dataclass
class Track:
id: str
name: str
artists: List[Artist]
audio_features: AudioFeatures
|
{"/spot.py": ["/models.py"]}
|
40,032
|
Kubster96/narcotic_sound_generator
|
refs/heads/master
|
/bar.py
|
from note import Note
class Bar:
notes = []
scale = []
key = 0
time = 0
length = 0
octaves_range = 0
def __init__(self, scale, key, time, meter, octaves_range):
self.scale = scale
self.key = key
self.time = time
self.length = meter
self.octaves_range = octaves_range
def generate_tact(self):
while self.length > 0:
note = Note(self.length, self.scale, self.time, self.key, self.octaves_range)
note.generate_note()
new_note = list(note.note)
self.notes.append(new_note)
self.time += note.duration
self.length -= note.duration
|
{"/bar.py": ["/note.py"], "/main.py": ["/song.py"], "/song.py": ["/bar.py"]}
|
40,033
|
Kubster96/narcotic_sound_generator
|
refs/heads/master
|
/main.py
|
import argparse
from song import Song
from miditime.miditime import MIDITime
import sys
def main():
parser = argparse.ArgumentParser(description='Provide parameters!')
parser.add_argument('tempo', help="Provide tempo of the song!", type=int)
parser.add_argument('key', help="Provide key of the song!", type=int)
parser.add_argument('scale', help="Provide scale of the song!", type=int)
parser.add_argument('number_of_bars', help="Provide number of bars!", type=int)
parser.add_argument('meter', help="Provide the meter of the song!", type=int)
parser.add_argument('octaves_range', help="Provide the range of octaves", type=int)
parser.add_argument('song_name', help="Provide song name", type=str)
args = parser.parse_args()
tempo = args.tempo
key = args.key
scale = args.scale
number_of_bars = args.number_of_bars
meter = args.meter
octaves_range = args.octaves_range
song_name = "resources/" + args.song_name + ".mid"
with open('resources/scales.txt', 'r') as f:
x = f.readlines()
# scales are in scales.txt file
if scale > len(x):
print("Scale number is out of range")
sys.exit()
if key > 11 or key < 0:
print("Key number should be between 0 - 11")
sys.exit()
if meter < 1 or meter > 7:
print("Meter should be between 1 - 7")
sys.exit()
if tempo < 1 or tempo > 300:
print("Tempo should be between 1 - 300")
sys.exit()
if number_of_bars < 1 or number_of_bars > 50:
print("Number of bars should be between 1 - 50")
sys.exit()
if octaves_range < 1 or octaves_range > 3:
print("Number of octaves should be between 1 - 3")
sys.exit()
scale_list = x[scale-1].replace('\n', '')
scale_list = scale_list.split(", ")
tempo *= 4
meter *= 4
song = Song(number_of_bars, scale_list, tempo, key, meter, octaves_range)
song.generate_song()
midi = MIDITime(tempo, song_name)
song_notes = song.notes
midi.add_track(song_notes)
midi.save_midi()
if __name__ == "__main__":
main()
|
{"/bar.py": ["/note.py"], "/main.py": ["/song.py"], "/song.py": ["/bar.py"]}
|
40,034
|
Kubster96/narcotic_sound_generator
|
refs/heads/master
|
/note.py
|
import math
from random import randint
class Note:
note = [0, 0, 0, 0]
max_length = 0
scale = []
time = 0
key = 0
octaves_range = 0
def __init__(self, length, scale, time, key, octaves_range):
self.max_length = length
self.scale = scale
self.time = time
self.key = key
self.octaves_range = octaves_range
def generate_note(self):
durations = []
i = 0
while 2**i <= self.max_length:
durations += [2**i]
i += 1
duration = durations[randint(0, len(durations)-1)]
pitches = []
if self.octaves_range == 3:
for i in range(0, len(self.scale)-1):
pitches += [int(self.scale[i])-12]
if self.octaves_range >= 2:
for i in range(0, len(self.scale) - 1):
pitches += [int(self.scale[i])+12]
if self.octaves_range >= 1:
for i in range(0, len(self.scale) - 1):
pitches += [int(self.scale[i])]
pitch = 60 + self.key + pitches[randint(0, len(pitches)-1)]
velocity = 127
self.note[0] = self.time
self.note[1] = pitch
self.note[2] = velocity
self.note[3] = duration
|
{"/bar.py": ["/note.py"], "/main.py": ["/song.py"], "/song.py": ["/bar.py"]}
|
40,035
|
Kubster96/narcotic_sound_generator
|
refs/heads/master
|
/song.py
|
from bar import Bar
class Song:
notes = []
scale = []
number_of_bars = 0
key = 0
time = 0
meter = 0
octaves_range = 0
def __init__(self, number_of_bars, scale_list, tempo, key, meter, octaves_range):
self.number_of_bars = number_of_bars
self.scale = scale_list
self.tempo = tempo
self.key = key
self.meter = meter
self.octaves_range = octaves_range
def generate_song(self):
while self.number_of_bars > 0:
bar = Bar(self.scale, self.key, self.time, self.meter, self.octaves_range)
bar.generate_tact()
self.notes = self.notes + bar.notes
self.time += self.meter
self.number_of_bars -= 1
|
{"/bar.py": ["/note.py"], "/main.py": ["/song.py"], "/song.py": ["/bar.py"]}
|
40,036
|
gasman/telepath-poc
|
refs/heads/master
|
/formfields/views.py
|
import json
from django.shortcuts import render
from telepath import JSContext
from .forms import MyForm
def index(request):
form = MyForm()
js_context = JSContext()
widgets = json.dumps({
name: js_context.pack(field.widget)
for name, field in form.fields.items()
})
form_data = json.dumps({
'title': "Matthew",
'description': "just this guy, y'know?",
'office': "charlbury",
'profile_page': {
'id': 3,
'parentId': 2,
'title': 'Matthew',
'editUrl': '/cms/pages/3/',
},
})
return render(request, 'formfields/index.html', {
'media': js_context.media,
'widgets': widgets,
'form_data': form_data,
})
|
{"/formfields/views.py": ["/telepath/__init__.py", "/formfields/forms.py"], "/collage/views.py": ["/telepath/__init__.py", "/collage/shapes.py"], "/formfields/forms.py": ["/telepath/__init__.py"], "/streamfield/views.py": ["/telepath/__init__.py"], "/collage/shapes.py": ["/telepath/__init__.py"], "/streamfield/blocks.py": ["/telepath/__init__.py"]}
|
40,037
|
gasman/telepath-poc
|
refs/heads/master
|
/collage/apps.py
|
from django.apps import AppConfig
class CollageConfig(AppConfig):
name = 'collage'
|
{"/formfields/views.py": ["/telepath/__init__.py", "/formfields/forms.py"], "/collage/views.py": ["/telepath/__init__.py", "/collage/shapes.py"], "/formfields/forms.py": ["/telepath/__init__.py"], "/streamfield/views.py": ["/telepath/__init__.py"], "/collage/shapes.py": ["/telepath/__init__.py"], "/streamfield/blocks.py": ["/telepath/__init__.py"]}
|
40,038
|
gasman/telepath-poc
|
refs/heads/master
|
/streamfield/apps.py
|
from django.apps import AppConfig
class StreamfieldConfig(AppConfig):
name = 'streamfield'
|
{"/formfields/views.py": ["/telepath/__init__.py", "/formfields/forms.py"], "/collage/views.py": ["/telepath/__init__.py", "/collage/shapes.py"], "/formfields/forms.py": ["/telepath/__init__.py"], "/streamfield/views.py": ["/telepath/__init__.py"], "/collage/shapes.py": ["/telepath/__init__.py"], "/streamfield/blocks.py": ["/telepath/__init__.py"]}
|
40,039
|
gasman/telepath-poc
|
refs/heads/master
|
/formfields/apps.py
|
from django.apps import AppConfig
class FormfieldsConfig(AppConfig):
name = 'formfields'
|
{"/formfields/views.py": ["/telepath/__init__.py", "/formfields/forms.py"], "/collage/views.py": ["/telepath/__init__.py", "/collage/shapes.py"], "/formfields/forms.py": ["/telepath/__init__.py"], "/streamfield/views.py": ["/telepath/__init__.py"], "/collage/shapes.py": ["/telepath/__init__.py"], "/streamfield/blocks.py": ["/telepath/__init__.py"]}
|
40,040
|
gasman/telepath-poc
|
refs/heads/master
|
/telepath/apps.py
|
from django.apps import AppConfig
class TelepathConfig(AppConfig):
name = 'telepath'
|
{"/formfields/views.py": ["/telepath/__init__.py", "/formfields/forms.py"], "/collage/views.py": ["/telepath/__init__.py", "/collage/shapes.py"], "/formfields/forms.py": ["/telepath/__init__.py"], "/streamfield/views.py": ["/telepath/__init__.py"], "/collage/shapes.py": ["/telepath/__init__.py"], "/streamfield/blocks.py": ["/telepath/__init__.py"]}
|
40,041
|
gasman/telepath-poc
|
refs/heads/master
|
/formfields/models.py
|
from django.db import models
from wagtail.admin.edit_handlers import PageChooserPanel
from wagtail.core.models import Page
class PersonPage(Page):
other_page = models.ForeignKey(Page, blank=True, null=True, on_delete=models.SET_NULL, related_name='+')
content_panels = Page.content_panels + [
PageChooserPanel('other_page'),
]
|
{"/formfields/views.py": ["/telepath/__init__.py", "/formfields/forms.py"], "/collage/views.py": ["/telepath/__init__.py", "/collage/shapes.py"], "/formfields/forms.py": ["/telepath/__init__.py"], "/streamfield/views.py": ["/telepath/__init__.py"], "/collage/shapes.py": ["/telepath/__init__.py"], "/streamfield/blocks.py": ["/telepath/__init__.py"]}
|
40,042
|
gasman/telepath-poc
|
refs/heads/master
|
/telepath/__init__.py
|
from django import forms
from django.forms import MediaDefiningClass
adapters = {}
def register(adapter, cls):
adapters[cls] = adapter
class JSContext:
def __init__(self):
self.media = forms.Media(js=['telepath.js'])
self.objects = {}
def pack(self, obj):
for cls in type(obj).__mro__:
adapter = adapters.get(cls)
if adapter:
break
if adapter is None:
raise Exception("don't know how to add object to JS context: %r" % obj)
self.media += adapter.media
return [adapter.js_constructor, *adapter.js_args(obj, self)]
class Adapter(metaclass=MediaDefiningClass):
pass
|
{"/formfields/views.py": ["/telepath/__init__.py", "/formfields/forms.py"], "/collage/views.py": ["/telepath/__init__.py", "/collage/shapes.py"], "/formfields/forms.py": ["/telepath/__init__.py"], "/streamfield/views.py": ["/telepath/__init__.py"], "/collage/shapes.py": ["/telepath/__init__.py"], "/streamfield/blocks.py": ["/telepath/__init__.py"]}
|
40,043
|
gasman/telepath-poc
|
refs/heads/master
|
/collage/views.py
|
import json
from django.shortcuts import render
from telepath import JSContext
from .shapes import Circle, Rectangle
def index(request):
shapes = [
Circle(30, 'red'),
Circle(50, 'blue'),
Rectangle(100, 50, 'yellow'),
]
js_context = JSContext()
shapes_json = json.dumps([
js_context.pack(shape)
for shape in shapes
])
return render(request, 'collage/index.html', {
'shapes_json': shapes_json,
'media': js_context.media,
})
|
{"/formfields/views.py": ["/telepath/__init__.py", "/formfields/forms.py"], "/collage/views.py": ["/telepath/__init__.py", "/collage/shapes.py"], "/formfields/forms.py": ["/telepath/__init__.py"], "/streamfield/views.py": ["/telepath/__init__.py"], "/collage/shapes.py": ["/telepath/__init__.py"], "/streamfield/blocks.py": ["/telepath/__init__.py"]}
|
40,044
|
gasman/telepath-poc
|
refs/heads/master
|
/formfields/forms.py
|
from django import forms
from wagtail.admin.widgets import AdminPageChooser
from wagtail.core.models import Page
from telepath import register, Adapter
class MyForm(forms.Form):
title = forms.CharField()
description = forms.CharField(widget=forms.Textarea)
office = forms.ChoiceField(choices=[('charlbury', "Charlbury"), ('bristol', "Bristol")], widget=forms.RadioSelect)
profile_page = forms.ModelChoiceField(queryset=Page.objects.all(), widget=AdminPageChooser(can_choose_root=False))
class WidgetAdapter(Adapter):
js_constructor = 'formfields.Widget'
def js_args(self, widget, context):
return [
widget.render('__NAME__', None, attrs={'id': '__ID__'}),
widget.id_for_label('__ID__'),
]
class Media:
js = ['formfields/js/widget.js']
register(WidgetAdapter(), forms.widgets.Input)
register(WidgetAdapter(), forms.Textarea)
register(WidgetAdapter(), forms.Select)
class RadioSelectAdapter(WidgetAdapter):
js_constructor = 'formfields.RadioSelect'
register(RadioSelectAdapter(), forms.RadioSelect)
class PageChooserAdapter(Adapter):
js_constructor = 'formfields.PageChooser'
def js_args(self, widget, context):
return [
widget.render_html('__NAME__', None, attrs={'id': '__ID__'}),
widget.id_for_label('__ID__'),
widget.model_names, widget.can_choose_root, widget.user_perms
]
@property
def media(self):
return AdminPageChooser().media + forms.Media(js=['formfields/js/widget.js'])
register(PageChooserAdapter(), AdminPageChooser)
|
{"/formfields/views.py": ["/telepath/__init__.py", "/formfields/forms.py"], "/collage/views.py": ["/telepath/__init__.py", "/collage/shapes.py"], "/formfields/forms.py": ["/telepath/__init__.py"], "/streamfield/views.py": ["/telepath/__init__.py"], "/collage/shapes.py": ["/telepath/__init__.py"], "/streamfield/blocks.py": ["/telepath/__init__.py"]}
|
40,045
|
gasman/telepath-poc
|
refs/heads/master
|
/streamfield/views.py
|
import json
from django.shortcuts import render
from telepath import JSContext
from wagtail.core import blocks
from streamfield import blocks as foo # load adapters
def index(request):
block = blocks.StructBlock([
('first_name', blocks.CharBlock()),
('surname', blocks.CharBlock()),
])
block.set_name("title")
js_context = JSContext()
block_json = json.dumps(js_context.pack(block))
value_json = json.dumps({'first_name': "Spongebob", 'surname': "Squarepants"})
return render(request, 'streamfield/index.html', {
'media': js_context.media,
'block_json': block_json,
'value_json': value_json,
})
|
{"/formfields/views.py": ["/telepath/__init__.py", "/formfields/forms.py"], "/collage/views.py": ["/telepath/__init__.py", "/collage/shapes.py"], "/formfields/forms.py": ["/telepath/__init__.py"], "/streamfield/views.py": ["/telepath/__init__.py"], "/collage/shapes.py": ["/telepath/__init__.py"], "/streamfield/blocks.py": ["/telepath/__init__.py"]}
|
40,046
|
gasman/telepath-poc
|
refs/heads/master
|
/collage/shapes.py
|
import json
from django.db import models
from telepath import register, Adapter
class Circle:
def __init__(self, radius, colour):
self.radius = radius
self.colour = colour
class CircleAdapter(Adapter):
js_constructor = 'shapes.Circle'
def js_args(self, obj, context):
return [obj.radius, obj.colour]
class Media:
js = ['collage/js/shapes/circle.js']
register(CircleAdapter(), Circle)
class Rectangle:
def __init__(self, width, height, colour):
self.width = width
self.height = height
self.colour = colour
class RectangleAdapter(Adapter):
js_constructor = 'shapes.Rectangle'
def js_args(self, obj, context):
return [obj.width, obj.height, obj.colour]
class Media:
js = ['collage/js/shapes/rectangle.js']
register(RectangleAdapter(), Rectangle)
|
{"/formfields/views.py": ["/telepath/__init__.py", "/formfields/forms.py"], "/collage/views.py": ["/telepath/__init__.py", "/collage/shapes.py"], "/formfields/forms.py": ["/telepath/__init__.py"], "/streamfield/views.py": ["/telepath/__init__.py"], "/collage/shapes.py": ["/telepath/__init__.py"], "/streamfield/blocks.py": ["/telepath/__init__.py"]}
|
40,047
|
gasman/telepath-poc
|
refs/heads/master
|
/streamfield/blocks.py
|
from wagtail.core import blocks
from telepath import register, Adapter
from formfields import forms # load field adapters
class FieldBlockAdapter(Adapter):
js_constructor = 'streamfield.FieldBlock'
def js_args(self, block, context):
return [
block.name,
context.pack(block.field.widget),
{'label': block.label, 'required': block.required, 'icon': block.meta.icon},
]
class Media:
js = ['streamfield/js/blocks.js']
register(FieldBlockAdapter(), blocks.FieldBlock)
class StructBlockAdapter(Adapter):
js_constructor = 'streamfield.StructBlock'
def js_args(self, block, context):
return [
block.name,
[context.pack(child) for child in block.child_blocks.values()],
{
'label': block.label, 'required': block.required, 'icon': block.meta.icon,
'classname': block.meta.form_classname, 'helpText': getattr(block.meta, 'help_text', None),
},
]
class Media:
js = ['streamfield/js/blocks.js']
register(StructBlockAdapter(), blocks.StructBlock)
|
{"/formfields/views.py": ["/telepath/__init__.py", "/formfields/forms.py"], "/collage/views.py": ["/telepath/__init__.py", "/collage/shapes.py"], "/formfields/forms.py": ["/telepath/__init__.py"], "/streamfield/views.py": ["/telepath/__init__.py"], "/collage/shapes.py": ["/telepath/__init__.py"], "/streamfield/blocks.py": ["/telepath/__init__.py"]}
|
40,049
|
lns/deep-rl
|
refs/heads/master
|
/agents/tabular_q_agent.py
|
from collections import defaultdict
import numpy as np
from gym import spaces
import copy
from agents.base_agent import BaseAgent
class TabularQAgent(BaseAgent):
'''Tabular Q-learning agent.'''
def __init__(self,
action_space,
observation_space,
q_init=0.0,
learning_rate=0.1,
discount=1.0,
epsilon=0.05):
if not isinstance(action_space, spaces.Discrete):
raise TypeError("Action space type should be Discrete.")
if not isinstance(observation_space, spaces.Discrete):
raise TypeError("Observation space type should be Discrete.")
self._action_space = action_space
self._learning_rate = learning_rate
self._discount = discount
self._epsilon = epsilon
self._q = defaultdict(lambda: q_init * np.ones(action_space.n))
def act(self, observation, greedy=False):
greedy_action = np.argmax(self._q[observation])
if greedy or np.random.random() >= self._epsilon:
action = greedy_action
else:
action = self._action_space.sample()
self._observation = observation
self._action = action
return action
def learn(self, reward, next_observation, done):
future = np.max(self._q[next_observation]) if not done else 0.0
before = self._q[self._observation][self._action]
target = self._learning_rate * (reward + self._discount * future - \
self._q[self._observation][self._action])
self._q[self._observation][self._action] += self._learning_rate * (
reward + self._discount * future - \
self._q[self._observation][self._action])
|
{"/agents/conv_dqn_agent.py": ["/agents/memory.py"], "/run.py": ["/agents/dqn_agent.py", "/agents/conv_dqn_agent.py", "/agents/random_agent.py", "/agents/tabular_q_agent.py"], "/agents/dqn_agent.py": ["/agents/memory.py"]}
|
40,050
|
lns/deep-rl
|
refs/heads/master
|
/agents/conv_dqn_agent.py
|
import copy
import numpy as np
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.optim as optim
from gym import wrappers
from gym import spaces
from agents.base_agent import BaseAgent
from agents.memory import ReplayMemory, Transition
class ConvNet(nn.Module):
def __init__(self, num_channel_input, num_output):
super(ConvNet, self).__init__()
self.conv1 = nn.Conv2d(num_channel_input, 32, 3, stride=2, padding=1)
self.conv2 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
self.conv3 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
self.conv4 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
self.fc = nn.Linear(32 * 3 * 3, num_output)
def forward(self, x):
assert x.size(2) == 42 and x.size(3) == 42
x = F.elu(self.conv1(x))
x = F.elu(self.conv2(x))
x = F.elu(self.conv3(x))
x = F.elu(self.conv4(x))
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class ConvDQNAgent(BaseAgent):
'''Deep Q-learning agent.'''
def __init__(self,
action_space,
observation_space,
batch_size=128,
learning_rate=1e-3,
discount=1.0,
epsilon=0.05):
if not isinstance(action_space, spaces.Discrete):
raise TypeError("Action space type should be Discrete.")
self._action_space = action_space
self._batch_size = batch_size
self._discount = discount
self._epsilon = epsilon
self._q_network = ConvNet(
num_channel_input=observation_space.shape[0],
num_output=action_space.n)
self._optimizer = optim.RMSprop(
self._q_network.parameters(), lr=learning_rate)
self._memory = ReplayMemory(100000)
def act(self, observation, greedy=False):
q_values = self._q_network(
Variable(torch.FloatTensor(np.expand_dims(observation, 0))))
_, action = q_values[0].data.max(0)
greedy_action = action[0]
if greedy or np.random.random() >= self._epsilon:
action = greedy_action
else:
action = self._action_space.sample()
self._observation = observation
self._action = action
return action
def learn(self, reward, next_observation, done):
# experience replay
self._memory.push(self._observation, self._action, reward,
next_observation, done)
if len(self._memory) < self._batch_size:
return
transitions = self._memory.sample(self._batch_size)
batch = Transition(*zip(*transitions))
# convert to torch variable
next_observation_batch = Variable(
torch.from_numpy(np.stack(batch.next_observation)), volatile=True)
observation_batch = Variable(
torch.from_numpy(np.stack(batch.observation)))
reward_batch = Variable(torch.FloatTensor(batch.reward))
action_batch = Variable(torch.LongTensor(batch.action))
done_batch = Variable(torch.Tensor(batch.done))
# compute max-q target
q_values_next = self._q_network(next_observation_batch)
futures = q_values_next.max(dim=1)[0] * (1 - done_batch)
target_q = reward_batch + self._discount * futures
target_q.volatile = False
# compute gradient
q_values = self._q_network(observation_batch)
loss_fn = torch.nn.MSELoss()
loss = loss_fn(q_values.gather(1, action_batch.view(-1, 1)), target_q)
self._optimizer.zero_grad()
loss.backward()
# update q-network
self._optimizer.step()
|
{"/agents/conv_dqn_agent.py": ["/agents/memory.py"], "/run.py": ["/agents/dqn_agent.py", "/agents/conv_dqn_agent.py", "/agents/random_agent.py", "/agents/tabular_q_agent.py"], "/agents/dqn_agent.py": ["/agents/memory.py"]}
|
40,051
|
lns/deep-rl
|
refs/heads/master
|
/agents/memory.py
|
from collections import namedtuple
import random
Transition = namedtuple(
'Transition',
('observation', 'action', 'reward', 'next_observation', 'done'))
class ReplayMemory(object):
def __init__(self, capacity):
self._capacity = capacity
self._memory = []
self._position = 0
def push(self, *args):
if len(self._memory) < self._capacity:
self._memory.append(None)
self._memory[self._position] = Transition(*args)
self._position = (self._position + 1) % self._capacity
def sample(self, batch_size):
return random.sample(self._memory, batch_size)
def __len__(self):
return len(self._memory)
|
{"/agents/conv_dqn_agent.py": ["/agents/memory.py"], "/run.py": ["/agents/dqn_agent.py", "/agents/conv_dqn_agent.py", "/agents/random_agent.py", "/agents/tabular_q_agent.py"], "/agents/dqn_agent.py": ["/agents/memory.py"]}
|
40,052
|
lns/deep-rl
|
refs/heads/master
|
/run.py
|
import os
import argparse
import yaml
import gym
from gym import wrappers
import matplotlib.pyplot as plt
from agents.dqn_agent import DQNAgent
from agents.conv_dqn_agent import ConvDQNAgent
from agents.random_agent import RandomAgent
from agents.tabular_q_agent import TabularQAgent
from envs.pong_env import PongSinglePlayerEnv
from wrappers.process_frame import AtariRescale42x42Wrapper
from wrappers.process_frame import NormalizeWrapper
def create_env(conf):
env = gym.make(conf['env'])
if conf['monitor_dir']:
env = wrappers.Monitor(env, conf['monitor_dir'], force=True)
if conf['use_atari_wrapper']:
env = AtariRescale42x42Wrapper(env)
env = NormalizeWrapper(env)
return env
def create_agent(conf, action_space, observation_space):
if conf['agent'] == "dqn":
return DQNAgent(
action_space,
observation_space,
batch_size=conf['batch_size'],
learning_rate=conf['learning_rate'],
discount=conf['discount'],
epsilon=conf['random_explore'])
elif conf['agent'] == "conv_dqn":
return ConvDQNAgent(
action_space,
observation_space,
batch_size=conf['batch_size'],
learning_rate=conf['learning_rate'],
discount=conf['discount'],
epsilon=conf['random_explore'])
elif conf['agent'] == "tabular_q":
return TabularQAgent(
action_space,
observation_space,
q_init=conf['q_value_init'],
learning_rate=conf['learning_rate'],
discount=conf['discount'],
epsilon=conf['random_explore'])
elif conf['agent'] == "random":
return RandomAgent(action_space, observation_space)
else:
raise ArgumentError("Agent type [%s] is not supported." %
conf['agent'])
def plot_return(return_list, filepath):
plt.plot(return_list)
plt.xlabel('Episode')
plt.ylabel('Cummulative Rewards Per Episode')
plt.grid()
dirname = os.path.dirname(filepath)
if not os.path.exists(dirname):
os.mkdir(dirname)
plt.savefig(filepath)
def run_sync(conf):
print("----- Running job [%s] ----- " % conf['job_name'])
env = create_env(conf)
env.seed(0)
agent = create_agent(conf, env.action_space, env.observation_space)
return_list = []
for episode in xrange(conf['num_episodes']):
cum_return = 0.0
observation = env.reset()
done = False
while not done:
action = agent.act(observation)
next_observation, reward, done, _ = env.step(action)
agent.learn(reward, next_observation, done)
observation = next_observation
cum_return += reward
return_list.append(cum_return)
print("Episode %d/%d Return: %f." %
(episode + 1, conf['num_episodes'], cum_return))
env.close()
return return_list
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--config",
type=str,
required=True,
help="Configuration file in ymal format.")
args = parser.parse_args()
conf = yaml.load(file(args.config, 'r'))
return_list = run_sync(conf)
plot_return(return_list, os.path.join('figures', conf['job_name']))
|
{"/agents/conv_dqn_agent.py": ["/agents/memory.py"], "/run.py": ["/agents/dqn_agent.py", "/agents/conv_dqn_agent.py", "/agents/random_agent.py", "/agents/tabular_q_agent.py"], "/agents/dqn_agent.py": ["/agents/memory.py"]}
|
40,053
|
lns/deep-rl
|
refs/heads/master
|
/agents/random_agent.py
|
from agents.base_agent import BaseAgent
class RandomAgent(BaseAgent):
'''Random agent.'''
def __init__(self, action_space, observation_space):
self._action_space = action_space
def act(self, observation, greedy=False):
return self._action_space.sample()
def learn(self, reward, next_observation, done):
pass
|
{"/agents/conv_dqn_agent.py": ["/agents/memory.py"], "/run.py": ["/agents/dqn_agent.py", "/agents/conv_dqn_agent.py", "/agents/random_agent.py", "/agents/tabular_q_agent.py"], "/agents/dqn_agent.py": ["/agents/memory.py"]}
|
40,054
|
lns/deep-rl
|
refs/heads/master
|
/agents/dqn_agent.py
|
import copy
import numpy as np
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.optim as optim
from gym import wrappers
from gym import spaces
from agents.base_agent import BaseAgent
from agents.memory import ReplayMemory, Transition
class FCNet(nn.Module):
def __init__(self, input_size, output_size):
super(FCNet, self).__init__()
self.fc1 = nn.Linear(input_size, 1024)
self.fc2 = nn.Linear(1024, output_size)
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
class DQNAgent(BaseAgent):
'''Deep Q-learning agent.'''
def __init__(self,
action_space,
observation_space,
batch_size=128,
learning_rate=1e-3,
discount=1.0,
epsilon=0.05):
if not isinstance(action_space, spaces.Discrete):
raise TypeError("Action space type should be Discrete.")
self._action_space = action_space
self._batch_size = batch_size
self._discount = discount
self._epsilon = epsilon
self._q_network = FCNet(
input_size=reduce(lambda x, y: x * y, observation_space.shape),
output_size=action_space.n)
self._optimizer = optim.RMSprop(
self._q_network.parameters(), lr=learning_rate)
self._memory = ReplayMemory(100000)
def act(self, observation, greedy=False):
q_values = self._q_network(
Variable(torch.FloatTensor([observation]).view(1, -1)))
_, action = q_values[0].data.max(0)
greedy_action = action[0]
if greedy or np.random.random() >= self._epsilon:
action = greedy_action
else:
action = self._action_space.sample()
self._observation = observation
self._action = action
return action
def learn(self, reward, next_observation, done):
# experience replay
self._memory.push(self._observation, self._action, reward,
next_observation, done)
if len(self._memory) < self._batch_size:
return
transitions = self._memory.sample(self._batch_size)
batch = Transition(*zip(*transitions))
# convert to torch variable
next_observation_batch = Variable(
torch.FloatTensor(batch.next_observation).view(self._batch_size,
-1),
volatile=True)
observation_batch = Variable(
torch.FloatTensor(batch.observation).view(self._batch_size, -1))
reward_batch = Variable(torch.FloatTensor(batch.reward))
action_batch = Variable(torch.LongTensor(batch.action))
done_batch = Variable(torch.Tensor(batch.done))
# compute max-q target
q_values_next = self._q_network(next_observation_batch)
futures = q_values_next.max(dim=1)[0] * (1 - done_batch)
target_q = reward_batch + self._discount * futures
target_q.volatile = False
# compute gradient
q_values = self._q_network(observation_batch)
loss_fn = torch.nn.MSELoss()
loss = loss_fn(q_values.gather(1, action_batch.view(-1, 1)), target_q)
self._optimizer.zero_grad()
loss.backward()
# update q-network
self._optimizer.step()
|
{"/agents/conv_dqn_agent.py": ["/agents/memory.py"], "/run.py": ["/agents/dqn_agent.py", "/agents/conv_dqn_agent.py", "/agents/random_agent.py", "/agents/tabular_q_agent.py"], "/agents/dqn_agent.py": ["/agents/memory.py"]}
|
40,085
|
CodyKochmann/battle_tested
|
refs/heads/master
|
/battle_tested/beta/mutators.py
|
import itertools, json, string, sys, math
from functools import wraps, partial
from contextlib import suppress
from collections import deque, defaultdict
import operator
from base64 import b16encode, b32encode, b64encode, b64encode, a85encode
from generators import G, chain, window, first
eprint = partial(print, file=sys.stderr, flush=True)
standard_types = { bool, bytearray, bytes, complex, dict, float, int, list, set, str, tuple }
standard_defaults = [i() for i in standard_types]
printables = {k:v for k,v in enumerate(string.printable)}
small_int_cyclers = zip(*(itertools.cycle(range(1, i)) for i in range(2, 7)))
kinda_random_small_int = map(sum, small_int_cyclers)
kinda_random_medium_int = (a*b for a,b in zip(kinda_random_small_int, kinda_random_small_int))
kinda_random_big_int = (a*b for a,b in zip(kinda_random_medium_int, kinda_random_medium_int))
encoders = b16encode, b32encode, b64encode, b64encode, a85encode
str_encode_or_ignore = partial(str.encode, errors='ignore')
bytes_decode_or_ignore = partial(bytes.decode, errors='ignore')
def cached_uniq(pipe):
cache = defaultdict(partial(deque, maxlen=4))
for i in pipe:
if i not in cache[type(i)]:
cache[type(i)].append(i)
yield i
def uniq(pipe):
prev = next(pipe)
for i in pipe:
if type(i) == type(prev) and i == prev:
continue
else:
yield i
prev = i
def hashable(o):
try:
hash(o)
return True
except:
return False
def hashable_or_none(o):
'''returns an object if it is hashable or just None'''
try:
hash(o)
return o
except:
return None
def flipped(fn):
'''this decorator allows generators to yield their output and their flipped output'''
assert callable(fn), fn
def flip(o):
if isinstance(o, str):
return ''.join(reversed(o))
elif isinstance(o, bytes):
return bytes(bytearray(reversed(o)))
elif isinstance(o, (bytearray, list, set, tuple)):
return type(o)(reversed(o))
else:
raise Exception('this wasnt worth flipping: {}'.format(o))
@wraps(fn)
def wrapper(*a, **k):
for i in fn(*a, **k):
yield i
yield flip(i)
return wrapper
def map_attempt(fn, iterable):
''' this works just like map but filters out crashes '''
assert callable(fn), fn
iterable = iter(iterable)
still_going = True
while still_going:
with suppress(Exception):
for i in iterable:
yield fn(i)
still_going = False
def harvest_bool_from_bool(o):
assert type(o) is bool, o
yield not o
yield o
def harvest_bytearray_from_bool(o):
assert type(o) is bool, o
yield bytearray(o)
yield bytearray(not o)
def harvest_bytes_from_bool(o):
assert type(o) is bool, o
yield bytes(o)
yield bytes(not o)
def harvest_complex_from_bool(o):
assert type(o) is bool, o
yield complex(o)
yield complex(not o)
def harvest_dict_from_bool(o):
assert type(o) is bool, o
global standard_defaults
for i in map(hashable_or_none, standard_defaults):
yield {o:i}
yield {i:o}
yield {i:o, o:i}
def harvest_float_from_bool(o):
assert type(o) is bool, o
yield float(o)
yield float(not o)
def harvest_int_from_bool(o):
assert type(o) is bool, o
yield int(o)
yield int(not o)
def harvest_list_from_bool(o):
assert type(o) is bool, o
for i in range(1, 8):
yield [o] * i
def harvest_set_from_bool(o):
assert type(o) is bool, o
yield {o}
yield {not o}
yield {o, not o}
def harvest_str_from_bool(o):
assert type(o) is bool, o
yield json.dumps(o)
yield repr(o)
int_o = int(o)
yield str(int_o)
yield bin(int_o)
yield bytes(int_o).decode()
def harvest_tuple_from_bool(o):
assert type(o) is bool, o
yield from map(tuple, harvest_list_from_bool(o))
def harvest_bool_from_bytearray(o):
assert type(o) is bytearray, o
yield from harvest_bool_from_bool(bool(o))
for i in harvest_list_from_bytearray(o):
if isinstance(i, int):
yield from harvest_bool_from_int(i)
def harvest_bytearray_from_bytearray(o):
assert type(o) is bytearray, o
for i in range(1, 9):
tmp = o * i
yield tmp
tmp.reverse()
yield tmp
def harvest_bytes_from_bytearray(o):
assert type(o) is bytearray, o
yield from map(bytes, harvest_bytearray_from_bytearray(o))
def harvest_complex_from_bytearray(o):
assert type(o) is bytearray, o
yield complex(len(o), len(o))
yield from G(harvest_bytearray_from_bytearray(o)
).chain(
).window(2
).map(lambda i:[complex(i[0]), complex(i[1]), complex(*i)]
).chain()
def harvest_dict_from_bytearray(o):
assert type(o) is bytearray, o
yield from harvest_dict_from_list(list(o))
yield from harvest_dict_from_list(list(map(chr, o)))
def harvest_float_from_bytearray(o):
assert type(o) is bytearray, o
yield from harvest_float_from_float(float(len(o) * len(o)))
if o:
for i in harvest_bytearray_from_bytearray(o):
yield float.fromhex(o.hex())
for ii in i:
yield from harvest_float_from_int(i)
def harvest_int_from_bytearray(o):
assert type(o) is bytearray, o
for i in [o, o.upper(), o.lower()]:
yield int.from_bytes(o, 'little')
yield int.from_bytes(o, 'big')
@flipped
def harvest_list_from_bytearray(o):
assert type(o) is bytearray, o
for x in range(-1, 2):
yield [i+x for i in o]
yield [(i+x)%2 for i in o]
yield [i+x for i in o if i%2]
yield [i+x for i in o if not i%2]
def harvest_set_from_bytearray(o):
assert type(o) is bytearray, o
yield from map(set, harvest_list_from_bytearray(o))
def harvest_str_from_bytearray(o):
assert type(o) is bytearray, o
for l in harvest_list_from_bytearray(o):
with suppress(Exception):
yield ''.join(map(chr, l))
def harvest_tuple_from_bytearray(o):
assert type(o) is bytearray, o
yield from map(tuple, harvest_list_from_bytearray(o))
def harvest_bool_from_bytes(o):
assert type(o) is bytes, o
yield from harvest_bool_from_int(len(o))
for i in o:
yield from (x=='1' for x in bin(i)[2:])
def harvest_bytearray_from_bytes(o):
assert type(o) is bytes, o
yield from map(bytearray, harvest_bytes_from_bytes(o))
def harvest_bytes_from_bytes(o):
assert type(o) is bytes, o
yield bytes(o)
byte_pipe = lambda:map(lambda i:i%256, harvest_int_from_bytes(o))
yield from map(bytes, byte_pipe())
for ints in window(byte_pipe(), 8):
for i in range(1, 8):
yield bytes(ints[:i])
yield bytes(ints[:i]) * i
def harvest_complex_from_bytes(o):
assert type(o) is bytes, o
yield from harvest_complex_from_int(len(o))
for a, b in window(harvest_int_from_bytes(o), 2):
yield complex(a, b)
def harvest_dict_from_bytes(o):
assert type(o) is bytes, o
for l in harvest_list_from_bytes(o):
yield from harvest_dict_from_list(l)
def harvest_float_from_bytes(o):
assert type(o) is bytes, o
yield from harvest_float_from_list(list(o))
for a, b in window(harvest_int_from_bytes(o), 2):
yield float(a * b)
def harvest_int_from_bytes(o):
assert type(o) is bytes, o
yield from harvest_int_from_list(list(o))
for i in o:
yield from harvest_int_from_int(i)
@flipped
def harvest_list_from_bytes(o):
assert type(o) is bytes, o
yield [i for i in o]
yield [bool(i) for i in o]
yield [str(i) for i in o]
yield [float(i) for i in o]
def harvest_set_from_bytes(o):
assert type(o) is bytes, o
yield from map(set, harvest_list_from_bytes(o))
def harvest_str_from_bytes(o):
assert type(o) is bytes, o
for b in harvest_bytes_from_bytes(o):
yield bytes_decode_or_ignore(b)
def harvest_tuple_from_bytes(o):
assert type(o) is bytes, o
yield from map(tuple, harvest_list_from_bytes(o))
def harvest_bool_from_complex(o):
assert type(o) is complex, o
yield from harvest_bool_from_float(o.imag)
yield from harvest_bool_from_float(o.real)
def harvest_bytearray_from_complex(o):
assert type(o) is complex, o
yield from harvest_bytearray_from_float(o.imag)
yield from harvest_bytearray_from_float(o.real)
def harvest_bytes_from_complex(o):
assert type(o) is complex, o
yield from harvest_bytes_from_float(o.imag)
yield from harvest_bytes_from_float(o.real)
def harvest_complex_from_complex(o):
assert type(o) is complex, o
for a, b in window(harvest_int_from_float(o.imag), 2):
yield complex(a, b)
for a, b in window(harvest_int_from_float(o.real), 2):
yield complex(a, b)
def harvest_dict_from_complex(o):
assert type(o) is complex, o
yield from harvest_dict_from_float(o.imag)
yield from harvest_dict_from_float(o.real)
def harvest_float_from_complex(o):
assert type(o) is complex, o
yield from harvest_float_from_float(o.imag)
yield from harvest_float_from_float(o.real)
def harvest_int_from_complex(o):
assert type(o) is complex, o
yield from harvest_int_from_float(o.imag)
yield from harvest_int_from_float(o.real)
def harvest_list_from_complex(o):
assert type(o) is complex, o
yield from harvest_list_from_float(o.imag)
yield from harvest_list_from_float(o.real)
def harvest_set_from_complex(o):
assert type(o) is complex, o
yield from harvest_set_from_float(o.imag)
yield from harvest_set_from_float(o.real)
def harvest_str_from_complex(o):
assert type(o) is complex, o
yield from harvest_str_from_float(o.imag)
yield from harvest_str_from_float(o.real)
def harvest_tuple_from_complex(o):
assert type(o) is complex, o
yield from map(tuple, harvest_list_from_complex(o))
def remutate_dict(o, output_type):
assert type(o) is dict, o
assert output_type in standard_types
if not o:
yield output_type()
for k, v in o.items():
if type(k) in standard_types:
yield from mutate(k, output_type)
if not isinstance(v, dict) and type(v) in standard_types: # prevent infinite mutations
yield from mutate(v, output_type)
def harvest_bool_from_dict(o):
assert type(o) is dict, o
yield from remutate_dict(o, bool)
def harvest_bytearray_from_dict(o):
assert type(o) is dict, o
yield from remutate_dict(o, bytearray)
def harvest_bytes_from_dict(o):
assert type(o) is dict, o
yield from remutate_dict(o, bytes)
def harvest_complex_from_dict(o):
assert type(o) is dict, o
yield from remutate_dict(o, complex)
def harvest_dict_from_dict(o):
assert type(o) is dict, o
for key_subset in harvest_list_from_list(list(o.keys())):
yield {k:o[k] for k in key_subset}
def harvest_float_from_dict(o):
assert type(o) is dict, o
yield from remutate_dict(o, float)
def harvest_int_from_dict(o):
assert type(o) is dict, o
yield from remutate_dict(o, int)
@flipped
def harvest_list_from_dict(o):
assert type(o) is dict, o
yield list(o.keys())
yield list(o.values())
def harvest_tuple_from_dict(o):
assert type(o) is dict, o
yield from map(tuple, harvest_list_from_dict(o))
def harvest_set_from_dict(o):
assert type(o) is dict, o
yield set(o.keys())
yield from harvest_set_from_list(list(o.values()))
def harvest_str_from_dict(o):
assert type(o) is dict, o
yield from remutate_dict(o, str)
def harvest_bool_from_float(o):
assert type(o) is float, o
for i in harvest_int_from_float(o):
yield from harvest_bool_from_int(i)
def harvest_bytearray_from_float(o):
assert type(o) is float, o
for i in harvest_int_from_float(o):
yield from harvest_bytearray_from_int(i)
def harvest_bytes_from_float(o):
assert type(o) is float, o
for i in harvest_int_from_float(o):
yield from harvest_bytes_from_int(i)
def harvest_complex_from_float(o):
assert type(o) is float, o
for i in harvest_int_from_float(o):
yield from harvest_complex_from_int(i)
def harvest_dict_from_float(o):
assert type(o) is float, o
for i in harvest_int_from_float(o):
yield from harvest_dict_from_int(i)
def harvest_float_from_float(o):
assert type(o) is float, o
for i in harvest_int_from_float(o):
yield o * i
yield o + i
yield o - i
yield i - o
def harvest_int_from_float(o):
assert type(o) is float, o
try:
o = o.as_integer_ratio()
yield from chain(map(harvest_int_from_int, o))
except (ValueError, OverflowError) as e:
yield from harvest_int_from_int(1)
def harvest_list_from_float(o):
assert type(o) is float, o
try:
a, b = o.as_integer_ratio()
except (ValueError, OverflowError) as e:
a, b = 1, 2
aa = abs(min(512, a))
bb = abs(min(512, b))
yield from harvest_list_from_list([o])
try:
yield [o] * aa
yield [o] * aa
yield [a] * bb
yield [b] * aa
yield [([o] * aa)] * bb
yield [([o] * bb)] * aa
yield [([o*a] * aa)] * bb
yield [([o*a] * bb)] * aa
yield [([o*b] * aa)] * bb
yield [([o*b] * bb)] * aa
except MemoryError:
pass
def harvest_set_from_float(o):
assert type(o) is float, o
for l in harvest_list_from_float(o):
yield from harvest_set_from_list(l)
def harvest_str_from_float(o):
assert type(o) is float, o
yield str(o)
yield repr(o)
yield from map(chr, map(lambda i:i%1114112, harvest_int_from_float(o)))
def harvest_tuple_from_float(o):
assert type(o) is float, o
yield from map(tuple, harvest_list_from_float(o))
def harvest_bool_from_int(o):
assert type(o) is int, o
yield o % 2 == 1
yield o % 2 == 0
yield from (x=='1' for x in bin(o))
yield from (x=='1' for x in bin(o**2))
def harvest_bytearray_from_int(o):
assert type(o) is int, o
yield from map(bytearray, harvest_bytes_from_int(o))
def harvest_bytes_from_int(o):
assert type(o) is int, o
for ints in window(map(lambda i:i%256, harvest_int_from_int(o)), 8):
yield from (bytes(ints[:i]) for i in range(1, 8))
def harvest_complex_from_int(o):
assert type(o) is int, o
for a, b in window(harvest_int_from_int(o), 2):
yield complex(a, b)
def harvest_dict_from_int(o):
assert type(o) is int, o
for k, v in zip(harvest_str_from_int(o), harvest_int_from_int(o)):
yield {k:v for _,k,v in zip(range(min(16, max(1, v))), harvest_str_from_str(k), harvest_int_from_int(v))}
def harvest_float_from_int(o):
assert type(o) is int, o
for a, b in window(harvest_int_from_int(o), 2):
if a != 0:
yield b / a
if b != 0:
yield a / b
yield float(a * b)
def harvest_int_from_int(o):
assert type(o) is int, o
yield from (o+x for x in range(-10, 11))
yield from (o//x for x in range(-10, -1))
yield from (o//x for x in range(1, 11))
yield from (int(o*x) for x in range(-10, 11))
yield from (o%x for x in range(-10, -1))
yield from (o%x for x in range(1, 11))
@flipped
def harvest_list_from_int(o):
assert type(o) is int, o
bin_o = bin(o)[2:]
yield list(bin_o)
as_bools = [i=='1' for i in bin_o]
for i in range(1, len(as_bools)):
yield as_bools[:i]
yield as_bools[i:]
yield [(not x) for x in as_bools[:i]]
yield [(not x) for x in as_bools[i:]]
def harvest_set_from_int(o):
assert type(o) is int, o
yield from map(set, harvest_list_from_int(o))
def harvest_str_from_int(o):
assert type(o) is int, o
yield bin(o)
yield json.dumps(o)
chars = filter(bool, map_attempt(lambda i:(printables[i%len(printables)]), harvest_int_from_int(o)))
for l in kinda_random_small_int:
out = ''.join(c for _,c in zip(range(l), chars))
if out:
yield out
else:
break
def harvest_tuple_from_int(o):
assert type(o) is int, o
for i in harvest_list_from_int(o):
yield tuple(i)
yield tuple(set(i))
def harvest_bool_from_list(o):
assert type(o) is list, o
yield bool(o)
len_o = len(o)
for i in range(2,10):
yield bool(len_o % i)
as_bools = list(map(bool, o))
yield from as_bools
for i in as_bools:
yield not i
def harvest_bytearray_from_list(o):
assert type(o) is list, o
yield from map(bytearray, harvest_bytes_from_list(o))
def harvest_bytes_from_list(o):
assert type(o) is list, o
yield from map_attempt(str_encode_or_ignore, harvest_str_from_list(o))
def harvest_complex_from_list(o):
assert type(o) is list, o
for a, b in window(harvest_int_from_list(o)):
yield complex(a, b)
def harvest_dict_from_list(o):
assert type(o) is list, o
len_o = len(o)
yield {len_o: None}
yield {None: len_o}
yield {'data': o}
yield {'result': o}
o = itertools.cycle(o)
for i in range(1, int(len_o*2)):
with suppress(Exception):
yield {next(o):next(o) for _ in range(i)}
def harvest_float_from_list(o):
assert type(o) is list, o
yield float(len(o))
pipe = iter(harvest_int_from_list(o))
for a, b in zip(pipe, pipe):
yield float(a * b)
if b and a:
yield a/b
yield b/a
def harvest_int_from_list(o):
assert type(o) is list, o
yield from harvest_int_from_int(len(o))
for fn in [len, int, ord]:
yield from map_attempt(fn, o)
yield from str_encode_or_ignore(repr(o))
@flipped
def harvest_list_from_list(o):
assert type(o) is list, o
yield o
if o:
for i in range(1, int(math.sqrt(len(o)))+1):
yield [v for ii,v in enumerate(o) if not ii%i]
yield [v for ii,v in enumerate(o) if ii%i]
yield [i for i in o if i]
yield [i for i in o if not i]
def harvest_set_from_list(o):
assert type(o) is list, o
for l in harvest_list_from_list(o):
s = set(map(hashable_or_none, l))
yield {i for i in s if i is not None}
yield {i for i in s if i}
yield s
def harvest_str_from_list(o):
assert type(o) is list, o
yield repr(o)
for i in o:
with suppress(Exception):
yield i.decode() if isinstance(i, bytes) else str(i)
yield from map(repr, o)
for i in o:
with suppress(Exception):
as_bytes = bytes(i) if isinstance(i, int) else bytes(str(i), encoding='utf-8')
for encoder in encoders:
yield encoder(as_bytes).decode()
for i in o:
with suppress(Exception):
yield json.dumps(i)
def harvest_tuple_from_list(o):
assert type(o) is list, o
yield from map(tuple, harvest_list_from_list(o))
yield from map(tuple, harvest_set_from_list(o))
def harvest_bool_from_set(o):
assert type(o) is set, o
yield from harvest_bool_from_list(list(o))
def harvest_bytearray_from_set(o):
assert type(o) is set, o
yield from harvest_bytearray_from_list(list(o))
def harvest_bytes_from_set(o):
assert type(o) is set, o
yield from harvest_bytes_from_list(list(o))
def harvest_complex_from_set(o):
assert type(o) is set, o
yield from harvest_complex_from_list(list(o))
def harvest_dict_from_set(o):
assert type(o) is set, o
yield from harvest_dict_from_list(list(o))
def harvest_float_from_set(o):
assert type(o) is set, o
yield from harvest_float_from_list(list(o))
def harvest_int_from_set(o):
assert type(o) is set, o
yield from harvest_int_from_list(list(o))
@flipped
def harvest_list_from_set(o):
assert type(o) is set, o
yield from harvest_list_from_list(list(o))
def harvest_set_from_set(o):
assert type(o) is set, o
yield from harvest_set_from_list(list(o))
def harvest_str_from_set(o):
assert type(o) is set, o
yield from harvest_str_from_list(list(o))
def harvest_tuple_from_set(o):
assert type(o) is set, o
yield from map(tuple, harvest_list_from_set(o))
def harvest_bool_from_str(o):
assert type(o) is str, o
yield from harvest_bool_from_list(list(o))
yield from (bool(ord(ch)%2) for ch in o)
def harvest_bytearray_from_str(o):
assert type(o) is str, o
yield from map(bytearray, harvest_bytes_from_str(o))
def harvest_bytes_from_str(o):
assert type(o) is str, o
yield from map(str.encode, harvest_str_from_str(o))
def harvest_complex_from_str(o):
assert type(o) is str, o
yield from harvest_complex_from_list(list(o))
for a, b in window(harvest_int_from_str(o), 2):
yield complex(a, b)
def harvest_dict_from_str(o):
assert type(o) is str, o
yield {o: None}
yield {None: o}
yield {o: o}
yield {o: {o: None}}
yield {o: {o: o}}
yield from harvest_dict_from_dict({a:b for a,b in zip(*([iter(o)]*2))})
def harvest_float_from_str(o):
assert type(o) is str, o
yield from harvest_float_from_float(float(len(o)))
for a, b in window(filter(bool, map(ord, o)), 2):
yield a * b
yield a / b
yield b / a
def harvest_int_from_str(o):
assert type(o) is str, o
yield from harvest_int_from_int(len(o))
yield from map(ord, o)
@flipped
def harvest_list_from_str(o):
assert type(o) is str, o
yield from harvest_list_from_list(list(o))
yield from harvest_list_from_list(list(map(ord, o)))
def harvest_set_from_str(o):
assert type(o) is str, o
for l in harvest_list_from_str(o):
yield from harvest_set_from_list(l)
def harvest_str_from_str(o):
assert type(o) is str, o
yield o.upper()
yield o.lower()
yield o.strip()
common_chars = ['\n', '"', "'", ' ', '\t', '.', ',', ':']
yield from (o.replace(old_char, new_char) for old_char, new_char in itertools.combinations(common_chars, 2) if old_char in o)
yield ''.join(x for x in o if x.isnumeric())
yield ''.join(x for x in o if not x.isnumeric())
def harvest_tuple_from_str(o):
assert type(o) is str, o
yield from map(tuple, harvest_list_from_str(o))
def harvest_bool_from_tuple(o):
assert type(o) is tuple, o
yield from harvest_bool_from_bool(bool(o))
yield from map(bool, o)
def harvest_bytearray_from_tuple(o):
assert type(o) is tuple, o
yield from harvest_bytearray_from_list(list(o))
def harvest_bytes_from_tuple(o):
assert type(o) is tuple, o
yield from harvest_bytes_from_list(list(o))
def harvest_complex_from_tuple(o):
assert type(o) is tuple, o
yield from harvest_complex_from_list(list(o))
def harvest_dict_from_tuple(o):
assert type(o) is tuple, o
yield from harvest_dict_from_list(list(o))
def harvest_float_from_tuple(o):
assert type(o) is tuple, o
yield from harvest_float_from_list(list(o))
def harvest_int_from_tuple(o):
assert type(o) is tuple, o
yield from harvest_int_from_list(list(o))
@flipped
def harvest_list_from_tuple(o):
assert type(o) is tuple, o
yield from harvest_list_from_list(list(o))
def harvest_set_from_tuple(o):
assert type(o) is tuple, o
yield from harvest_set_from_list(list(o))
def harvest_str_from_tuple(o):
assert type(o) is tuple, o
yield from harvest_str_from_list(list(o))
def harvest_tuple_from_tuple(o):
assert type(o) is tuple, o
yield from map(tuple, harvest_list_from_tuple(o))
mutation_map = {
(bool, bool): harvest_bool_from_bool,
(bool, bytearray): harvest_bytearray_from_bool,
(bool, bytes): harvest_bytes_from_bool,
(bool, complex): harvest_complex_from_bool,
(bool, dict): harvest_dict_from_bool,
(bool, float): harvest_float_from_bool,
(bool, int): harvest_int_from_bool,
(bool, list): harvest_list_from_bool,
(bool, set): harvest_set_from_bool,
(bool, str): harvest_str_from_bool,
(bool, tuple): harvest_tuple_from_bool,
(bytearray, bool): harvest_bool_from_bytearray,
(bytearray, bytearray): harvest_bytearray_from_bytearray,
(bytearray, bytes): harvest_bytes_from_bytearray,
(bytearray, complex): harvest_complex_from_bytearray,
(bytearray, dict): harvest_dict_from_bytearray,
(bytearray, float): harvest_float_from_bytearray,
(bytearray, int): harvest_int_from_bytearray,
(bytearray, list): harvest_list_from_bytearray,
(bytearray, set): harvest_set_from_bytearray,
(bytearray, str): harvest_str_from_bytearray,
(bytearray, tuple): harvest_tuple_from_bytearray,
(bytes, bool): harvest_bool_from_bytes,
(bytes, bytearray): harvest_bytearray_from_bytes,
(bytes, bytes): harvest_bytes_from_bytes,
(bytes, complex): harvest_complex_from_bytes,
(bytes, dict): harvest_dict_from_bytes,
(bytes, float): harvest_float_from_bytes,
(bytes, int): harvest_int_from_bytes,
(bytes, list): harvest_list_from_bytes,
(bytes, set): harvest_set_from_bytes,
(bytes, str): harvest_str_from_bytes,
(bytes, tuple): harvest_tuple_from_bytes,
(complex, bool): harvest_bool_from_complex,
(complex, bytearray): harvest_bytearray_from_complex,
(complex, bytes): harvest_bytes_from_complex,
(complex, complex): harvest_complex_from_complex,
(complex, dict): harvest_dict_from_complex,
(complex, float): harvest_float_from_complex,
(complex, int): harvest_int_from_complex,
(complex, list): harvest_list_from_complex,
(complex, set): harvest_set_from_complex,
(complex, str): harvest_str_from_complex,
(complex, tuple): harvest_tuple_from_complex,
(dict, bool): harvest_bool_from_dict,
(dict, bytearray): harvest_bytearray_from_dict,
(dict, bytes): harvest_bytes_from_dict,
(dict, complex): harvest_complex_from_dict,
(dict, dict): harvest_dict_from_dict,
(dict, float): harvest_float_from_dict,
(dict, int): harvest_int_from_dict,
(dict, list): harvest_list_from_dict,
(dict, set): harvest_set_from_dict,
(dict, str): harvest_str_from_dict,
(dict, tuple): harvest_tuple_from_dict,
(float, bool): harvest_bool_from_float,
(float, bytearray): harvest_bytearray_from_float,
(float, bytes): harvest_bytes_from_float,
(float, complex): harvest_complex_from_float,
(float, dict): harvest_dict_from_float,
(float, float): harvest_float_from_float,
(float, int): harvest_int_from_float,
(float, list): harvest_list_from_float,
(float, set): harvest_set_from_float,
(float, str): harvest_str_from_float,
(float, tuple): harvest_tuple_from_float,
(int, bool): harvest_bool_from_int,
(int, bytearray): harvest_bytearray_from_int,
(int, bytes): harvest_bytes_from_int,
(int, complex): harvest_complex_from_int,
(int, dict): harvest_dict_from_int,
(int, float): harvest_float_from_int,
(int, int): harvest_int_from_int,
(int, list): harvest_list_from_int,
(int, set): harvest_set_from_int,
(int, str): harvest_str_from_int,
(int, tuple): harvest_tuple_from_int,
(list, bool): harvest_bool_from_list,
(list, bytearray): harvest_bytearray_from_list,
(list, bytes): harvest_bytes_from_list,
(list, complex): harvest_complex_from_list,
(list, dict): harvest_dict_from_list,
(list, float): harvest_float_from_list,
(list, int): harvest_int_from_list,
(list, list): harvest_list_from_list,
(list, set): harvest_set_from_list,
(list, str): harvest_str_from_list,
(list, tuple): harvest_tuple_from_list,
(set, bool): harvest_bool_from_set,
(set, bytearray): harvest_bytearray_from_set,
(set, bytes): harvest_bytes_from_set,
(set, complex): harvest_complex_from_set,
(set, dict): harvest_dict_from_set,
(set, float): harvest_float_from_set,
(set, int): harvest_int_from_set,
(set, list): harvest_list_from_set,
(set, set): harvest_set_from_set,
(set, str): harvest_str_from_set,
(set, tuple): harvest_tuple_from_set,
(str, bool): harvest_bool_from_str,
(str, bytearray): harvest_bytearray_from_str,
(str, bytes): harvest_bytes_from_str,
(str, complex): harvest_complex_from_str,
(str, dict): harvest_dict_from_str,
(str, float): harvest_float_from_str,
(str, int): harvest_int_from_str,
(str, list): harvest_list_from_str,
(str, set): harvest_set_from_str,
(str, str): harvest_str_from_str,
(str, tuple): harvest_tuple_from_str,
(tuple, bool): harvest_bool_from_tuple,
(tuple, bytearray): harvest_bytearray_from_tuple,
(tuple, bytes): harvest_bytes_from_tuple,
(tuple, complex): harvest_complex_from_tuple,
(tuple, dict): harvest_dict_from_tuple,
(tuple, float): harvest_float_from_tuple,
(tuple, int): harvest_int_from_tuple,
(tuple, list): harvest_list_from_tuple,
(tuple, set): harvest_set_from_tuple,
(tuple, str): harvest_str_from_tuple,
(tuple, tuple): harvest_tuple_from_tuple
}
for type_combo in itertools.product(standard_types, repeat=2):
assert type_combo in mutation_map, type_combo
def mutate(o, output_type):
''' this function takes an input object and runs mutations on it to harvest
inputs of the specified output type. this allows battle_tested to create
more test inputs without needing to rely on random generation '''
global mutation_map
assert isinstance(mutation_map, dict), mutation_map
assert all(type(k) is tuple for k in mutation_map), mutation_map
assert all(len(k) is 2 for k in mutation_map), mutation_map
assert all(all(type(t)==type for t in k) for k in mutation_map), mutation_map
assert o is not type, o
assert output_type in standard_types, output_type
if o is None:
o = False
def mutator():
if isinstance(o, output_type):
for i in mutation_map[type(o), output_type](o):
yield i
else:
for i in mutation_map[type(o), output_type](o):
yield i
yield from mutation_map[type(i), output_type](i)
return cached_uniq(mutator())
def warn_about_duplicates(pipe):
last = None
count = 0
current_dup = None
for a, b in window(pipe, 2):
if a == b and type(a) == type(b):
current_dup = a
count += 1
elif count > 0:
eprint('WARNING: found', count, 'duplicates of', repr(current_dup))
count = 0
yield a
last = b
yield last
def test_all_mutations():
tests = len(standard_types) * len(standard_defaults)
done = 0
count = 0
for start_variable in standard_defaults:
for output_type in standard_types:
ran = False
done += 1
eprint(done, '/', tests, 'testing harvest_{}_from_{}'.format(output_type.__name__, type(start_variable).__name__))
for v in first(mutate(start_variable, output_type), 10000000):
ran = True
assert type(v) is output_type, v
count += 1
assert ran, locals()
eprint('success: created', count, 'inputs')
if __name__ == '__main__':
for _ in range(1):
for c,i in enumerate(harvest_complex_from_bytearray(bytearray(b'hi'))):
continue #print('-', i, type(i))
for i in mutate({'name':'billy'}, int):
continue #print(i)
#print(c)
for test in "hello world why don't we get some waffles or something? 7777".split(' '):
for _type in (str, dict, list, bool, int, float):
for i,v in enumerate(warn_about_duplicates(mutate(test, _type))):
continue #print(repr(v))
#print(i)
test_all_mutations()
|
{"/battle_tested/beta/input_type_combos.py": ["/battle_tested/beta/ammo.py"], "/generate-db-from-beta.py": ["/battle_tested/beta/__init__.py"], "/battle_tested/__main__.py": ["/battle_tested/__init__.py"], "/battle_tested/beta/fuzz_planner.py": ["/battle_tested/beta/input_type_combos.py"], "/battle_tested/beta/__init__.py": ["/battle_tested/beta/api.py"], "/battle_tested/beta/runner.py": ["/battle_tested/beta/mutators.py", "/battle_tested/beta/ammo.py", "/battle_tested/beta/easy_street.py", "/battle_tested/beta/FuzzResult.py", "/battle_tested/beta/function_arg_count.py", "/battle_tested/beta/input_type_combos.py"], "/battle_tested/beta/api.py": ["/battle_tested/beta/function_arg_count.py", "/battle_tested/beta/FuzzResult.py", "/battle_tested/beta/runner.py"]}
|
40,086
|
CodyKochmann/battle_tested
|
refs/heads/master
|
/battle_tested/CallTree.py
|
from pdb import Pdb
from io import StringIO
from functools import partial
class CallStep:
__slots__ = ['pdb_text', 'hash']
def __init__(self, pdb_text):
assert pdb_text.startswith('> '), 'first line starts with > right????'
assert pdb_text.splitlines()[1].startswith('-> '), 'second line starts with -> right??????'
self.pdb_text = pdb_text
def __str__(self):
return 'CallStep({})'.format({
'path':self.path,
'name':self.name,
'line':self.line,
'return_value':self.return_value,
'code':self.code
})
__repr__ = __str__
def __hash__(self):
if not hasattr(self, 'hash'):
self.hash = hash(tuple((self.path,self.name,self.line,self.code)))
return self.hash
def __eq__(self, target):
return type(target) == type(self) and hash(target) == hash(target)
@property
def _first_line(self):
return self.pdb_text.splitlines()[0]
@property
def _second_line(self):
return self.pdb_text.splitlines()[1]
@property
def _third_line(self):
return self.pdb_text.splitlines()[2] if len(self.pdb_text.splitlines())>2 else ''
@property
def path(self):
return self.pdb_text.split('(')[0].split('> ')[1]
@property
def line(self):
return int(self.pdb_text.split('(')[1].split(')')[0])
@property
def code(self):
return self._second_line.split('-> ')[1]
@property
def name(self):
return self.pdb_text.split(')')[1].split('(')[0]
@property
def path_and_name(self):
''' shorthand helper for
self.path + '-' + self.name
'''
return self.path + '-' + self.name
@property
def return_value(self):
if '->' in self._first_line and len(self._first_line.split('->')[1]):
return eval(self._first_line.split('->')[1]) # only doing this to see if it breaks anything
#return self._first_line.split('->')[1]
else:
return None
@property
def has_return_value(self):
return self.return_value is not None
class CallTree:
def __init__(self, fn):
''' generate a CallTree with the given function '''
assert callable(fn), 'CallTree needs a function as its argument, not {}'.format(fn)
sio = StringIO()
p = Pdb(stdout=sio)
for i in range(512):
p.cmdqueue.append('s')
p.cmdqueue.append('q')
p.runcall(fn)
sio.seek(0)
self.target_fn = fn
self.pdb_text = sio.read()
@property
def call_steps(self):
''' takes the pdb_chunks and converts them to CallStep objects '''
for i in self.pdb_chunks:
yield CallStep(i)
def print_pdb_chunks(self):
''' displays the steps extracted from running in pdb '''
for i in self.pdb_chunks:
print('')
print(i)
@property
def pdb_chunks(self):
''' splits the pdb_output into chunked strings for each step '''
for i in self.pdb_text.split('\n> '):
yield i if i.startswith('> ') else '> {}'.format(i)
def print_coverage(self):
''' returns a dict showing what line numbers have been executed from what functions '''
out = {}
unique_functions = {s.path+'-'+s.name for s in self.call_steps}
unique_lines = set()
for s in self.call_steps:
unique_lines.add('{}-{}-{:06d}'.format(s.path,s.name,s.line))
for i in sorted(unique_lines):
print(i)
@property
def first_step(self):
return next(self.call_steps)
@property
def target_call_path(self):
return (i for i in self.call_steps if i.name == self.first_step.name)
@property
def target_lines_ran(self):
return set(i.line for i in self.target_call_path)
def __str__(self):
return self.pdb_text.__str__()
if __name__ == '__main__':
def my_function(i):
# convert the input to a string
i = str(i)
if i: # if i is not an empty string
# run this function again, with one less character
return my_function(i[:-1])
else:
return i
def function_lines_ran(fn):
return CallTree(fn).target_lines_ran
print(function_lines_ran(partial(my_function,329104)))
|
{"/battle_tested/beta/input_type_combos.py": ["/battle_tested/beta/ammo.py"], "/generate-db-from-beta.py": ["/battle_tested/beta/__init__.py"], "/battle_tested/__main__.py": ["/battle_tested/__init__.py"], "/battle_tested/beta/fuzz_planner.py": ["/battle_tested/beta/input_type_combos.py"], "/battle_tested/beta/__init__.py": ["/battle_tested/beta/api.py"], "/battle_tested/beta/runner.py": ["/battle_tested/beta/mutators.py", "/battle_tested/beta/ammo.py", "/battle_tested/beta/easy_street.py", "/battle_tested/beta/FuzzResult.py", "/battle_tested/beta/function_arg_count.py", "/battle_tested/beta/input_type_combos.py"], "/battle_tested/beta/api.py": ["/battle_tested/beta/function_arg_count.py", "/battle_tested/beta/FuzzResult.py", "/battle_tested/beta/runner.py"]}
|
40,087
|
CodyKochmann/battle_tested
|
refs/heads/master
|
/battle_tested/beta/input_type_combos.py
|
from itertools import product
from unittest import TestCase, main
from battle_tested.beta.ammo import standard
''' This code converts user provided type combos to combos that can be split
out for a fuzz plan.
'''
def has_nested_combos(input_types):
''' returns True if any object in the type collection is not a type '''
assert isinstance(input_types, (list, tuple, set)), input_types
return any(not isinstance(i, type) for i in input_types)
def flatten_types(input_types):
''' returns every type from a nested structure '''
assert isinstance(input_types, (list, tuple, set, type)), input_types
if isinstance(input_types, type):
yield input_types
else:
for i in input_types:
yield from flatten_types(i)
class IllegalTypeComboSettings(ValueError):
'''raised if illegal combination of type_combos are entered with a give arg_count'''
class TooManyNestedInputTypesForArgCount(ValueError):
'''raised when too many nested input type combos are given for the desired number of arguments'''
def input_type_combos(input_types, arg_count):
''' expands all combinations generated from the user's given settings '''
assert isinstance(input_types, (list, tuple, set)), input_types
assert isinstance(arg_count, int), arg_count
assert arg_count > 0, arg_count
if arg_count == 1:
# flatten out the types and yield the unique product
yield from product(set(flatten_types(input_types)))
elif has_nested_combos(input_types):
if len(input_types) > arg_count:
raise TooManyNestedInputTypesForArgCount(str(locals()))
elif len(input_types) == arg_count:
# turn every input type into a tuple
pipe = (
(i,) if isinstance(i, type) else i
for i in input_types
)
# yield out the product of those tuples
yield from product(*pipe)
else:
assert len(input_types) < arg_count, locals()
# if not enough input_types were given for the given function, default the rest
new_input_types = tuple(input_types) + tuple((tuple(standard.types) for _ in range(arg_count - len(input_types))))
assert len(new_input_types) == arg_count, locals()
yield from input_type_combos(input_types=new_input_types, arg_count=arg_count)
else:
if len(input_types) == arg_count:
yield tuple(input_types)
else:
# yield out the product of every type for each arg
yield from product(input_types, repeat=arg_count)
class Test_input_type_combos(TestCase):
def test_basic_one_type_one_arg(self):
self.assertEqual(set(input_type_combos((int, ), 1)), {(int, )})
def test_basic_two_types_one_arg(self):
self.assertEqual(set(input_type_combos((int, bool), 1)),
{(int, ), (bool, )})
def test_basic_one_type_two_args(self):
self.assertEqual(set(input_type_combos((int, ), 2)), {(int, int)})
def test_basic_two_types_two_args(self):
self.assertEqual(set(input_type_combos((int, bool), 2)), {(int, bool)})
def test_one_nested_two_args(self):
self.assertEqual(set(input_type_combos((int, (bool, str)), 2)),
{(int, bool), (int, str)})
def test_two_nested_three_args(self):
self.assertEqual(
set(input_type_combos(((int, float), bool, (bool, str)), 3)), {
(float, bool, bool),
(float, bool, str),
(int, bool, bool),
(int, bool, str)
})
def test_two_nested_three_args_different_sizes(self):
self.assertEqual(
set(input_type_combos(((int, float), bool, (bool, str, int)), 3)),
{
(float, bool, bool),
(float, bool, int),
(float, bool, str),
(int, bool, bool),
(int, bool, int),
(int, bool, str)
})
def test_three_nested_three_args_different_sizes(self):
self.assertEqual(
set(
input_type_combos(
((int, float), (bool, int), (bool, str, int)), 3)), {
(float, bool, bool),
(float, bool, int),
(float, bool, str),
(float, bool, str),
(float, int, bool),
(float, int, int),
(float, int, str),
(float, int, str),
(int, bool, bool),
(int, bool, int),
(int, bool, str),
(int, bool, str),
(int, int, bool),
(int, int, int),
(int, int, str),
(int, int, str)
})
def test_flatten_nested_types_for_one_arg(self):
self.assertEqual(
set(input_type_combos((int, float, (str, bool)), 1)),
{(int,), (float,), (str,), (bool,)}
)
def test_not_enough_args_for_types(self):
with self.assertRaises(ValueError):
set(
input_type_combos(
(
int,
float,
(
str, bool
) # no nested cases here since everything will apply to every arg when types outnumbers args
),
2))
def test_multiply_types_for_not_enough_args(self):
self.assertEqual(
set(input_type_combos((
int,
float,
str
), 2)), {(int, int), (int, float), (int, str), (float, int),
(float, float), (float, str), (str, int), (str, float),
(str, str)})
if __name__ == '__main__':
main(verbosity=2)
|
{"/battle_tested/beta/input_type_combos.py": ["/battle_tested/beta/ammo.py"], "/generate-db-from-beta.py": ["/battle_tested/beta/__init__.py"], "/battle_tested/__main__.py": ["/battle_tested/__init__.py"], "/battle_tested/beta/fuzz_planner.py": ["/battle_tested/beta/input_type_combos.py"], "/battle_tested/beta/__init__.py": ["/battle_tested/beta/api.py"], "/battle_tested/beta/runner.py": ["/battle_tested/beta/mutators.py", "/battle_tested/beta/ammo.py", "/battle_tested/beta/easy_street.py", "/battle_tested/beta/FuzzResult.py", "/battle_tested/beta/function_arg_count.py", "/battle_tested/beta/input_type_combos.py"], "/battle_tested/beta/api.py": ["/battle_tested/beta/function_arg_count.py", "/battle_tested/beta/FuzzResult.py", "/battle_tested/beta/runner.py"]}
|
40,088
|
CodyKochmann/battle_tested
|
refs/heads/master
|
/generate-db-from-beta.py
|
#!/usr/local/bin/env python3
# by: Cody Kochmann
# import beta fuzz
from battle_tested.beta import fuzz
# define a target we will fuzz
def my_adder(a, b):
return a + b
# run the fuzz
result = fuzz(my_adder, max_tests=65536)
# save the result to a file
result.save_to_file('fuzz-result.db')
print('finished writing fuzz-result.db')
print('currently I am using sqlitebrowser to explore this database')
|
{"/battle_tested/beta/input_type_combos.py": ["/battle_tested/beta/ammo.py"], "/generate-db-from-beta.py": ["/battle_tested/beta/__init__.py"], "/battle_tested/__main__.py": ["/battle_tested/__init__.py"], "/battle_tested/beta/fuzz_planner.py": ["/battle_tested/beta/input_type_combos.py"], "/battle_tested/beta/__init__.py": ["/battle_tested/beta/api.py"], "/battle_tested/beta/runner.py": ["/battle_tested/beta/mutators.py", "/battle_tested/beta/ammo.py", "/battle_tested/beta/easy_street.py", "/battle_tested/beta/FuzzResult.py", "/battle_tested/beta/function_arg_count.py", "/battle_tested/beta/input_type_combos.py"], "/battle_tested/beta/api.py": ["/battle_tested/beta/function_arg_count.py", "/battle_tested/beta/FuzzResult.py", "/battle_tested/beta/runner.py"]}
|
40,089
|
CodyKochmann/battle_tested
|
refs/heads/master
|
/battle_tested/beta/function_arg_count.py
|
# -*- coding: utf-8 -*-
# @Author: Cody Kochmann
# @Date: 2019-04-30 07:20:45
# @Last Modified by: Cody Kochmann
# @Last Modified time: 2019-04-30 07:44:37
from typing import Callable
from re import findall
from functools import lru_cache
@lru_cache(64) # this can be an expensive operation, so use caching
def function_arg_count(fn:Callable) -> int:
""" finds how many args a function has """
assert callable(fn), 'function_arg_count needed a callable function, not {0}'.format(repr(fn))
# normal functions
if ( hasattr(fn, '__code__')
and hasattr(fn.__code__, 'co_argcount')
and isinstance(fn.__code__.co_argcount, int)
and fn.__code__.co_argcount >= 0):
return fn.__code__.co_argcount
# partials
elif ( hasattr(fn, 'args')
and hasattr(fn.args, '__len__')
and hasattr(fn, 'func')
and callable(fn.func)
and hasattr(fn, 'keywords')
and hasattr(fn.keywords, '__len__')):
# partials
return function_arg_count(fn.func) - (len(fn.args) + len(fn.keywords))
# brute force
else:
# attempts to brute force and find how many args work for the function
number_of_args_that_work = []
for i in range(1, 64):
try:
fn(*range(i))
except TypeError as ex:
search = findall(r'((takes (exactly )?(one|[0-9]{1,}))|(missing (one|[0-9]{1,})))', repr(ex))
our_specific_type_error = len(repr(findall(r'((takes (exactly )?(one|[0-9]{1,}))|(missing (one|[0-9]{1,})))', repr(ex))))>10
if not our_specific_type_error: # if you find something
number_of_args_that_work.append(i)
pass
except Exception:
#number_of_args_that_work.append(i)
pass
else:
number_of_args_that_work.append(i)
# if brute forcing worked, return the smallest count that did
if len(number_of_args_that_work):
return min(number_of_args_that_work)
#logging.warning('using backup plan')
return 1 # not universal, but for now, enough... :/
if __name__ == '__main__':
from functools import partial
# ensure it works with its basic functionality
def test_0():
pass
assert function_arg_count(test_0) == 0
def test_1(a):
pass
assert function_arg_count(test_1) == 1
def test_2(a, b):
pass
assert function_arg_count(test_2) == 2
# ensure it works with lambdas
assert function_arg_count(lambda: None) == 0
assert function_arg_count(lambda a: None) == 1
assert function_arg_count(lambda a,b: None) == 2
assert function_arg_count(lambda a,b,c: None) == 3
assert function_arg_count(lambda a,b,c,d: None) == 4
# ensure it works on itself
assert function_arg_count(function_arg_count) == 1
# function_arg_count returns how many args are left to fuzz. partials imply
# that one of the args needs to be locked down to a specific value
assert function_arg_count(partial(test_2, 'pancakes')) == 1
assert function_arg_count(partial(
lambda a, b: None,
'waffles'
)) == 1
|
{"/battle_tested/beta/input_type_combos.py": ["/battle_tested/beta/ammo.py"], "/generate-db-from-beta.py": ["/battle_tested/beta/__init__.py"], "/battle_tested/__main__.py": ["/battle_tested/__init__.py"], "/battle_tested/beta/fuzz_planner.py": ["/battle_tested/beta/input_type_combos.py"], "/battle_tested/beta/__init__.py": ["/battle_tested/beta/api.py"], "/battle_tested/beta/runner.py": ["/battle_tested/beta/mutators.py", "/battle_tested/beta/ammo.py", "/battle_tested/beta/easy_street.py", "/battle_tested/beta/FuzzResult.py", "/battle_tested/beta/function_arg_count.py", "/battle_tested/beta/input_type_combos.py"], "/battle_tested/beta/api.py": ["/battle_tested/beta/function_arg_count.py", "/battle_tested/beta/FuzzResult.py", "/battle_tested/beta/runner.py"]}
|
40,090
|
CodyKochmann/battle_tested
|
refs/heads/master
|
/battle_tested/beta/ammo.py
|
import gc, sys, random, copy
from functools import wraps, partial
eprint = partial(print, file=sys.stderr, flush=True)
class standard:
types = { bool, bytearray, bytes, complex, dict, float, int, list, set, str, tuple }
containers = { bytearray, dict, dict, list, set, tuple }
objects = { bool, bytes, complex, float, int, str }
defaults = [i() for i in types]
class never_repeat_ids(set):
''' returns an empty iterable if the input argument has already been seen '''
def __call__(self, fn):
assert callable(fn), fn
@wraps(fn)
def wrapper(arg):
if id(arg) not in self:
self.add(id(arg))
yield from fn(arg)
wrapper.clear_cache = self.clear
return wrapper
@never_repeat_ids()
def extract_objects(o):
type_o = type(o)
if o is None:
yield None
elif type(o) in standard.objects:
yield o
#elif type_o in standard.types:
# if type_o in standard.containers:
# if type_o is dict:
# yield {k:v for k,v in o.items() if type(k) in standard.objects and type(v) in standard.objects}
# for k in o:
# yield from extract_objects(k)
# yield from extract_objects(o[k])
# else:
# yield type_o(i for i in o if type(i) in standard.objects)
# for i in o:
# yield from extract_objects(i)
# else:
# if type_o in standard.objects:
# yield o
def ammo_from_gc():
#{tuple({type(ii) for ii in i if type(ii) in standard.objects}) for i in (x for x in gc.get_objects() if type(x) in standard.containers)}
containers = standard.containers
objects = standard.objects
for obj in gc.get_objects():
if type(obj) in objects:
yield obj
if type(obj) in containers:
if type(obj) is dict:
items = [[k,v] if type(v) in objects else [k,None] for k,v in obj.items()]
yield dict(items)
yield items
for k,v in items:
yield k
yield v
else:
items = [i for i in obj if type(i) in objects]
yield type(obj)(items)
yield from items
def infinite_gc_ammo():
while 1:
for i in ammo_from_gc():
yield i
if __name__ == '__main__':
collected_types = set()
eprint('running through ammo_from_gc to get initial test variables')
for i,v in enumerate(ammo_from_gc()):
collected_types.add(type(v))
eprint(i, type(v).__name__, v)
eprint('validating that at least one of every standard type was collected')
for t in standard.types:
if t not in collected_types:
print('couldnt find:', t.__name__)
eprint('success!')
|
{"/battle_tested/beta/input_type_combos.py": ["/battle_tested/beta/ammo.py"], "/generate-db-from-beta.py": ["/battle_tested/beta/__init__.py"], "/battle_tested/__main__.py": ["/battle_tested/__init__.py"], "/battle_tested/beta/fuzz_planner.py": ["/battle_tested/beta/input_type_combos.py"], "/battle_tested/beta/__init__.py": ["/battle_tested/beta/api.py"], "/battle_tested/beta/runner.py": ["/battle_tested/beta/mutators.py", "/battle_tested/beta/ammo.py", "/battle_tested/beta/easy_street.py", "/battle_tested/beta/FuzzResult.py", "/battle_tested/beta/function_arg_count.py", "/battle_tested/beta/input_type_combos.py"], "/battle_tested/beta/api.py": ["/battle_tested/beta/function_arg_count.py", "/battle_tested/beta/FuzzResult.py", "/battle_tested/beta/runner.py"]}
|
40,091
|
CodyKochmann/battle_tested
|
refs/heads/master
|
/setup.py
|
from distutils.core import setup
from setuptools import find_packages
version = '2023.8.20'
package_name = 'battle_tested'
packages = find_packages()
assert package_name in packages, packages
setup(
name = package_name,
packages = packages,
version = version,
install_requires = ["hypothesis", "stricttuple", "prettytable", "generators", "strict_functions"],
description = 'automated function and api fuzzer for easy testing of production code',
author = 'Cody Kochmann',
author_email = 'kochmanncody@gmail.com',
url = 'https://github.com/CodyKochmann/battle_tested',
download_url = f'https://github.com/CodyKochmann/battle_tested/tarball/{version}',
keywords = ['battle_tested', 'test', 'hypothesis', 'fuzzing', 'fuzz', 'production', 'unittest', 'api', 'fuzzer', 'stress'],
classifiers = []
)
|
{"/battle_tested/beta/input_type_combos.py": ["/battle_tested/beta/ammo.py"], "/generate-db-from-beta.py": ["/battle_tested/beta/__init__.py"], "/battle_tested/__main__.py": ["/battle_tested/__init__.py"], "/battle_tested/beta/fuzz_planner.py": ["/battle_tested/beta/input_type_combos.py"], "/battle_tested/beta/__init__.py": ["/battle_tested/beta/api.py"], "/battle_tested/beta/runner.py": ["/battle_tested/beta/mutators.py", "/battle_tested/beta/ammo.py", "/battle_tested/beta/easy_street.py", "/battle_tested/beta/FuzzResult.py", "/battle_tested/beta/function_arg_count.py", "/battle_tested/beta/input_type_combos.py"], "/battle_tested/beta/api.py": ["/battle_tested/beta/function_arg_count.py", "/battle_tested/beta/FuzzResult.py", "/battle_tested/beta/runner.py"]}
|
40,092
|
CodyKochmann/battle_tested
|
refs/heads/master
|
/battle_tested/__init__.py
|
# -*- coding: utf-8 -*-
# @Author: Cody Kochmann
# @Date: 2017-04-27 12:49:17
# @Last Modified 2018-03-12
# @Last Modified time: 2020-04-05 11:01:47
"""
battle_tested - automated function fuzzing library to quickly test production
code to prove it is "battle tested" and safe to use.
Examples of Primary Uses:
from battle_tested import fuzz
def test_function(a,b,c):
return c,b,a
fuzz(test_function)
# or to collect tests
fuzz(test_function, keep_testing=True)
Or:
from battle_tested import battle_tested
@battle_tested()
def test_function(a,b,c):
return c,b,a
"""
from __future__ import print_function, unicode_literals
import builtins
from collections import deque
from functools import wraps, partial
from gc import collect as gc
from generators.inline_tools import attempt
from hypothesis import given, strategies as st, settings, Verbosity
from hypothesis.errors import HypothesisException
from itertools import product, cycle, chain, islice
from multiprocessing import Process, Queue, cpu_count as multi_cpu_count
from prettytable import PrettyTable
from random import choice, randint
from re import findall
from stricttuple import stricttuple
from string import ascii_letters, digits
from time import sleep
from time import time
import generators as gen
import logging
import os
import signal
import sys
import traceback
__all__ = 'battle_tested', 'fuzz', 'disable_traceback', 'enable_traceback', 'garbage', 'crash_map', 'success_map', 'results', 'stats', 'print_stats', 'function_versions', 'time_all_versions_of', 'easy_street', 'run_tests', 'multiprocess_garbage'
# try to set the encoding
attempt(lambda: (reload(sys), sys.setdefaultencoding('utf8')))
class hardware:
''' single reference of what hardware the system is working with '''
# get the count of cpu cores, if it fails, assume 1 for safety
cpu_count = attempt(multi_cpu_count, default_output=1)
single_core = cpu_count == 1
class float(float): # this patches float.__repr__ to work correctly
def __repr__(self):
if all(i in '1234567890.' for i in builtins.float.__repr__(self)):
return 'float({})'.format(builtins.float.__repr__(self))
else:
return 'float("{}")'.format(builtins.float.__repr__(self))
class complex(complex): # this patches float.__repr__ to work correctly
def __repr__(self):
return 'complex("{}")'.format(builtins.complex.__repr__(self))
def compilable(src):
return attempt(
lambda:(compile(src, 'waffles', 'exec'), True)[1] ,
False
)
def runnable(src):
return attempt(
lambda:(eval(compile(src, 'waffles', 'exec')), True)[1] ,
False
)
def runs_fine(src):
return attempt(
lambda:(eval(src), True)[1] ,
False
)
def valid_repr(o):
''' returns true if the object has a valid repr '''
return attempt(
lambda: (eval(repr(o)) == o) or (eval(repr(o)) is o),
False
)
class unittest_builder(object):
@staticmethod
def test_body(fn, test_code):
''' call this to add the code needed for a full unittest script '''
d = {
'function_path':fn.__code__.co_filename,
'function_name':fn.__name__,
'module_name':'.'.join(os.path.basename(fn.__code__.co_filename).split('.')[:-1]),
'test_code': test_code
}
return '''#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from uuid import UUID
from fractions import Fraction
import sys
import os.path
sys.path.append(os.path.dirname("{function_path}"))
from {module_name} import {function_name}
class Test_{function_name}(unittest.TestCase):
""" automated unittest generated by battle_tested """{test_code}
if __name__ == '__main__':
unittest.main()
'''.format(**d)
@staticmethod
def equal_test(test_name, invocation_code, output):
''' generate tests that assert that the input equals the output '''
return '''
def test_{}(self):
self.assertEqual({}, {})'''.format(test_name, invocation_code, repr(output))
@staticmethod
def raises_test(test_name, invocation_code, ex_type):
''' generate a unittest that asserts that a certain input raises the given exception '''
return '''
def test_{}(self):
with self.assertRaises({}):
{}'''.format(test_name, ex_type.__name__, invocation_code.replace('nan', 'float("""nan""")'))
def getsource(fn):
''' basically just inspect.getsource, only this one doesn't crash as much '''
from inspect import getsource
try:
return getsource(fn)
except:
return attempt(lambda: '{}'.format(fn), default_output='')
def pin_to_cpu(core_number):
''' pin the current process to a specific cpu to avoid dumping L1 cache'''
assert type(core_number) == int, 'pin_to_cpu needs an int as the argument'
# just attempt this, it wont work on EVERY system in existence
attempt(lambda: os.sched_setaffinity(os.getpid(), (core_number,)))
def renice(new_niceness):
''' renice the current process calling this function to the new input '''
assert type(new_niceness) == int, 'renice needs an int as its argument'
# just attempt this, it wont work on EVERY system in existence
attempt(lambda: os.nice(new_niceness))
pin_to_cpu(0) # pin this main process to the first core
renice(15) # renice this main process, idk why 15, but it gives room for priorities above and below
def shorten(string, max_length=80, trailing_chars=3):
''' trims the 'string' argument down to 'max_length' to make previews to long string values '''
assert type(string).__name__ in {'str', 'unicode'}, 'shorten needs string to be a string, not {}'.format(type(string))
assert type(max_length) == int, 'shorten needs max_length to be an int, not {}'.format(type(max_length))
assert type(trailing_chars) == int, 'shorten needs trailing_chars to be an int, not {}'.format(type(trailing_chars))
assert max_length > 0, 'shorten needs max_length to be positive, not {}'.format(max_length)
assert trailing_chars >= 0, 'shorten needs trailing_chars to be greater than or equal to 0, not {}'.format(trailing_chars)
return (
string
) if len(string) <= max_length else (
'{before:}...{after:}'.format(
before=string[:max_length-(trailing_chars+3)],
after=string[-trailing_chars:] if trailing_chars>0 else ''
)
)
class easy_street:
''' This is a namespace for high speed test generation of various types '''
@staticmethod
def chars():
test_chars = ascii_letters + digits
for _ in gen.loop():
for combination in product(test_chars, repeat=4):
for i in combination:
yield i
@staticmethod
def strings():
test_strings = [
'',
'exit("######## WARNING this code is executing strings blindly ########")'
]
# this snippet rips out every word from doc strings
test_strings += list(set(findall(
r'[a-zA-Z\_]{1,}',
[v.__doc__ for v in globals().values() if hasattr(v, '__doc__')].__repr__()
)))
for _ in gen.loop():
for combination in product(test_strings, repeat=4):
for i in combination:
yield i
@staticmethod
def bools():
booleans = (True, False)
for _ in gen.loop():
for combination in product(booleans, repeat=4):
for i in combination:
yield i
@staticmethod
def ints():
numbers = tuple(range(-33,65))
for _ in gen.loop():
for combination in product(numbers, repeat=3):
for i in combination:
yield i
@staticmethod
def floats():
non_zero_ints = (i for i in easy_street.ints() if i != 0)
stream1 = gen.chain(i[:8] for i in gen.chunks(non_zero_ints, 10))
stream2 = gen.chain(i[:8] for i in gen.chunks(non_zero_ints, 12))
for i in stream1:
yield next(stream2)/(1.0*i)
@staticmethod
def lists():
strategies = easy_street.strings(), easy_street.ints(), easy_street.floats(), easy_street.bools()
strategies = list(gen.chain(product(strategies, repeat=len(strategies))))
lengths = cycle(list(range(0, 21)))
for _ in gen.loop():
for length in lengths:
for strat in strategies:
yield [st for st in islice(strat, length)]
@staticmethod
def tuples():
for i in easy_street.lists():
yield tuple(i)
@staticmethod
def dicts():
strategies = easy_street.strings(), easy_street.ints(), easy_street.floats(), easy_street.bools()
strategies = list(gen.chain(product(strategies, repeat=len(strategies))))
lengths = cycle(list(range(0, 21)))
for _ in gen.loop():
for length in lengths:
for strat in strategies:
yield { k:v for k,v in gen.chunks(islice(strat,length*2), 2) }
@staticmethod
def sets():
strategies = easy_street.strings(), easy_street.ints(), easy_street.floats(), easy_street.bools()
strategies = list(gen.chain(product(strategies, repeat=len(strategies))))
lengths = cycle(list(range(0, 21)))
for _ in gen.loop():
for length in lengths:
for strat in strategies:
yield {i for i in islice(strat, length)}
@staticmethod
def garbage():
while 1:
strategies = (
easy_street.strings(),
easy_street.ints(),
easy_street.floats(),
easy_street.bools(),
easy_street.dicts(),
easy_street.sets(),
easy_street.lists(),
easy_street.tuples()
)
for strat in gen.chain(product(strategies, repeat=len(strategies))):
yield next(strat)
class StrategyNotPopulatingBufferException(Exception):
''' raised if a hypothesis strategy is not populating a buffer at all '''
def strategy_stream(strat, buffer_limit=64):
''' emulates what strategy.example() use to do but now with a buffered generator '''
# define a buffer to work with
buffer = deque(maxlen=buffer_limit)
# loop forever, clearing the buffer every time
while buffer.clear() == None:
# populate the buffer with a hypothesis test that runs enough to fill the buffer
@settings(database=None, max_examples=buffer_limit)
@given(strat)
def buffer_append(data):
nonlocal buffer
return buffer.append(data)
# run the hypothesis test
buffer_append()
if len(buffer) == 0:
raise StrategyNotPopulatingBufferException(f'buffer was found empty, it appears strategy[{strat}] is not producing anything')
# empty the buffer out
yield from buffer
def background_strategy(strats, q):
target_core = q.get()
renice(20) # maximize niceness
if not hardware.single_core:
pin_to_cpu(target_core)
q_put = q.put
# create a stream that cycles through the provided strategies
stream = cycle(strategy_stream(s) for s in strats)
# rotate through the strategies appending their outputs into the queue
for strat in stream:
try:
q_put(next(strat))
except:
pass
def background_manager(child_queues, q):
if not hardware.single_core:
pin_to_cpu(1)
renice(20)
q_put = q.put
for cq in cycle(child_queues):
try:
item = cq.get_nowait()
q_put(item)
except:
sleep(0.0001)
def multiprocess_garbage():
basics = (
st.binary(),
st.booleans(),
st.characters(),
st.complex_numbers(),
st.floats(),
st.uuids(),
st.fractions(),
st.integers(),
st.decimals(),
st.dates(),
st.datetimes(),
st.dates().map(str),
st.datetimes().map(str),
st.none(),
st.text(),
st.dictionaries(keys=st.text(), values=st.text())
)
hashables = tuple(s for s in basics if hashable_strategy(s))
lists = tuple(st.lists(elements=i) for i in basics)
tuples = tuple(st.lists(elements=i).map(tuple) for i in basics)
sets = tuple(st.sets(elements=i) for i in hashables)
dictionaries = tuple(st.dictionaries(keys=st.one_of(*hashables), values=i) for i in basics)
strats = basics + lists + tuples + sets + dictionaries
# add logic here that plays on `if hardware.single_core:` to set up single core stuff cleanly
# if more than two cores, use special core logic
# master has 0, collector has 1
if hardware.cpu_count > 2: # logic for 3 or more cores
cores_used_for_generation = hardware.cpu_count - 2
specified_cores = cycle(range(2, hardware.cpu_count))
else:
cores_used_for_generation = 1
if hardware.cpu_count == 2:
# dual core has second core do generation
specified_cores = cycle([1])
else:
# single core systems do everything on the same core
specified_cores = cycle([0])
jobs = cycle([[] for _ in range(cores_used_for_generation)])
for s in strats:
next(jobs).append(s)
jobs = [(next(jobs), Queue(4)) for _ in range(cores_used_for_generation)]
# add specific core to each job's queue
for job, q in jobs:
q.put(next(specified_cores))
processes = [
Process(target=background_strategy, args=j)
for j in jobs
]
for p in processes:
p.start()
gather_queue = Queue(16)
gather_process = Process(target=background_manager, args=([q for _, q in jobs], gather_queue))
gather_process.start()
try:
fast_alternative = easy_street.garbage()
gather_queue_full = gather_queue.full
gather_queue_get = gather_queue.get_nowait
fast_alternative_next = getattr(fast_alternative, ('next' if hasattr(fast_alternative, 'next') else '__next__'))
for _ in gen.loop(): # loop forever
try:
yield gather_queue_get()
except:
yield fast_alternative_next()
'''if gather_queue_full(): # if the queue is full, yield the value
yield gather_queue_get()
else:
for _ in range(4): # dont waste time looking for a full queue, be productive while you wait
yield next(fast_alternative)'''
except (KeyboardInterrupt, SystemExit, GeneratorExit, StopIteration):
gather_process.terminate() ###MP isn't this redundant with same sequence in finally?
gather_process.join()
for p in processes:
p.terminate()
p.join()
finally:
gather_process.terminate()
gather_process.join()
for p in processes:
p.terminate()
p.join()
class MaxExecutionTimeError(Exception):
pass
class max_execution_time:
def signal_handler(self, signum, frame):
raise self.ex_type('operation timed out')
def __init__(self, seconds, ex_type=MaxExecutionTimeError):
#print('setting timeout for {} seconds'.format(seconds))
self.seconds = 1 if seconds < 1 else seconds
self.ex_type = ex_type
def __enter__(self):
signal.signal(signal.SIGALRM, self.signal_handler)
signal.alarm(self.seconds)
def __exit__(self, *a):
signal.alarm(0) ###MP which signal is it? MAGIC NUMBERS, this is why signals have const'ed names
def hashable_strategy(s): ###MP predicates are nice to indicate with <is_condition> or ? if you're weird enough
""" Predicate stating a hash-able hypothesis strategy """
assert hasattr(s, 'example'), 'hashable_strategy needs a strategy argument' ###MP strategies are marked up with attributes not types/base class?
stream = strategy_stream(s)
for i in range(32):
sample = next(stream)
try:
hash(sample)
assert type(sample) != dict
except:
return False
return True
def replace_strategy_repr(strat, new_repr):
""" replaces a strategy's repr and str functions with a custom one """
class custom_repr_strategy(type(strat)):
__repr__ = new_repr
__str__ = new_repr
return custom_repr_strategy(strategies=strat.original_strategies)
def build_garbage_strategy():
''' builds battle_tested's primary strategy '''
basics = (
st.binary(),
st.booleans(),
st.characters(),
st.complex_numbers(),
st.floats(),
st.fractions(),
st.integers(),
st.none(),
st.text(),
st.uuids(),
st.dictionaries(keys=st.text(), values=st.text())
)
hashables = tuple(s for s in basics if hashable_strategy(s))
# returns a strategy with only basic values
any_basics = partial(st.one_of, *basics)
# returns a strategy with only hashable values
any_hashables = partial(st.one_of, *hashables)
# returns a strategy of lists with basic values
basic_lists = partial(st.lists, elements=any_basics())
# returns a strategy of lists with hashable values
hashable_lists = partial(st.lists, elements=any_basics())
iterable_strategies = (
# iterables with the same type inside
st.builds(lambda a:[i for i in a if type(a[0])==type(i)], basic_lists(min_size=3)),
st.builds(lambda a:tuple(i for i in a if type(a[0])==type(i)), basic_lists(min_size=3)),
#st.builds(lambda a:{i for i in a if type(a[0])==type(i)}, hashable_lists(min_size=3)),
st.iterables(elements=any_basics()),
#st.builds(lambda a:(i for i in a if type(a[0])==type(i)), basic_lists(min_size=3)),
# garbage filled iterables
st.builds(tuple, basic_lists()),
#st.builds(set, hashable_lists()),
st.dictionaries(keys=any_hashables(), values=any_basics())
)
# returns a strategy with only iterable values
any_iterables = partial(st.one_of, *iterable_strategies)
return st.one_of(any_basics(), any_iterables())
garbage = replace_strategy_repr(build_garbage_strategy(), lambda s:'<garbage>')
class storage():
""" where battle_tested stores things """
test_inputs = deque()
results = {}
@staticmethod
def build_new_examples(how_many=100):
""" use this to add new examples to battle_tested's pre-loaded examples in storage.test_inputs """
assert type(how_many) == int, 'build_new_examples needs a positive int as the argument'
assert how_many > 0, 'build_new_examples needs a positive int as the argument'
@settings(max_examples=how_many)
@given(garbage)
def garbage_filler(i):
try:
storage.test_inputs.append(i)
except:
pass
try:
garbage_filler()
except:
pass
@staticmethod
def refresh_test_inputs():
""" wipe battle_tested test_inputs and cache new examples """
storage.test_inputs.clear()
try:
# just fill test inputs with something to start with
storage.test_inputs.append('waffles') # easter egg :)
for i in islice(easy_street.garbage(), 64):
storage.test_inputs.append(i)
storage.build_new_examples()
except Exception as e:
pass
storage.build_new_examples.garbage = garbage
class io_example(object):
""" demonstrates the behavior of input and output """
def __init__(self, input_args, output):
self.input = input_args
self.output = output
def __repr__(self):
return '{} -> {}'.format(self.input,self.output)
def __str__(self):
return '{} -> {}'.format(self.input,self.output) ### why not pull value of __repr__? .format cant be cheap, it's parsing and interpolation
def __hash__(self):
return hash('io_example') + hash(self.__repr__())
def __eq__(self, target):
return hasattr(target, '__hash__') and self.__hash__() == target.__hash__()
class suppress(): ###MP dead code? i dont see it referenced anywhere?
""" suppress exceptions coming from certain code blocks """
def __init__(self, *exceptions):
self._exceptions = exceptions
def __enter__(self):
pass
def __exit__(self, exctype, excinst, exctb):
return exctype is not None and issubclass(exctype, self._exceptions)
def is_py3():
return sys.version_info >= (3, 0)
class UniqueCrashContainer(tuple):
''' a pretty printable container for crashes '''
def __repr__(self):
try:
table = PrettyTable(('exception type','arg types','location','crash message'), sortby='exception type')
table.align["exception type"] = "l"
table.align["arg types"] = "l"
table.align["location"] = "l"
table.align["crash message"] = "l"
for i in self:
table.add_row((i.err_type.__name__,repr(tuple(i.__name__ for i in i.arg_types)),[x for x in i.trace.split(', ') if x.startswith('line ')][-1],i.message))
return table.get_string()
except:
return tuple.__repr__(self)
class PrettyTuple(tuple):
''' tuples with better pretty printing '''
def __repr__(self):
if len(self) > 0:
try:
table = PrettyTable(None)
try:
# attempt to sort the tuple
tup = tuple(sorted(self, key=repr))
except:
tup = self
# build up the PrettyTable
for i in tup:
# logic for nested tuples
if isinstance(i, tuple):
# replace the <class 'int'> format for types with just the name of the type
t = tuple(x.__name__ if isinstance(x,type) and hasattr(x,'__name__') else repr(x) for x in i)
table.add_row(t)
else:
# replace the <class 'int'> format for types with just the name of the type
if isinstance(i, type):
if hasattr(i, '__name__'):
i = i.__name__
else:
i = repr(i)
table.add_row((i,))
#table.align='l'
return '\n'.join(table.get_string().splitlines()[2:])
except:
return tuple.__repr__(self)
else:
return '()'
class tb_controls():
old_excepthook = sys.excepthook
no_tracebacklimit_on_sys = 'tracebacklimit' not in dir(sys)
old_tracebacklimit = (sys.tracebacklimit if 'tracebacklimit' in dir(sys) else None)
traceback_disabled = False
@staticmethod
def disable_traceback():
if is_py3():
sys.tracebacklimit = None
else:
sys.excepthook = lambda t, v, n:tb_controls.old_excepthook(t, v, None)
tb_controls.traceback_disabled = True
@staticmethod
def enable_traceback():
if tb_controls.traceback_disabled:
if is_py3():
if tb_controls.no_tracebacklimit_on_sys:
del sys.tracebacklimit
else:
sys.tracebacklimit = tb_controls.old_tracebacklimit
else:
sys.excepthook = tb_controls.old_excepthook
tb_controls.traceback_disabled = False
def enable_traceback():
""" disables tracebacks from being added to exception raises """
tb_controls.enable_traceback()
def disable_traceback():
""" enables tracebacks to be added to exception raises """
tb_controls.disable_traceback()
def traceback_file_lines(trace_text=None):
""" this returns a list of lines that start with file in the given traceback
usage:
traceback_steps(traceback.format_exc())
"""
# split the text into traceback steps
return [i for i in trace_text.splitlines() if i.startswith(' File "') and '", line' in i] ###MP extract out the condition for readability?
def traceback_steps(trace_text=None):
""" this generates the steps in a traceback
usage:
traceback_steps(traceback.format_exc())
"""
if trace_text == None: ### is None?
trace_text = traceback.format_exc()
# get rid of the first line with traceback
trace_text = ('\n'.join(trace_text.splitlines()[1:-1])) ### split text to rejoin without first and last? why not just slice the middle out?
# split the text into traceback steps
file_lines = [i for i in trace_text.splitlines() if '", line' in i and i.startswith(' File "') ]
# build the output
out = []
for i in trace_text.splitlines():
if i in file_lines:
if len(out):
yield '\n'.join(out) ###MP why split then rejoin later again?
out = [i]
else:
out.append(i)
yield '\n'.join(out)
def traceback_text():
""" this returns the traceback in text form """
return('\n'.join(i for i in traceback_steps()))
def format_error_message(f_name, err_msg, trace_text, evil_args):
top_line = " battle_tested crashed {f_name:}() ".format(f_name=f_name)
while len(top_line) < 79:
top_line = "-{}-".format(top_line)
top_line = '\n\n{}'.format(top_line)
bottom_line = '-'*len(top_line)
break_path = trace_text.split('"')[1]
break_line_number = int(trace_text.split(',')[1].split(' ')[-1])
break_line_number_up = break_line_number-1
break_line_number_down = break_line_number+1
out = """{top_line:}
Error Message:
{err_msg:}
Breakpoint: {break_path:} - line {break_line_number:}""".format(
top_line=top_line,
err_msg=err_msg,
break_path=break_path,
break_line_number=break_line_number
) ###MP put the fields in a dict, let format unpack it into the right fields
try:
with open(break_path) as f:
for i, line in enumerate(f):
i+=1
if i == break_line_number_up:
line_above=line.replace('\n','')
if i == break_line_number:
break_line=line.replace('\n','')
if i == break_line_number_down:
line_below=line.replace('\n','')
out += """
{break_line_number_up:>{num_len:}}|{line_above:}
->{break_line_number:>{num_len:}}|{break_line:}
{break_line_number_down:>{num_len:}}|{line_below:}""".format(
break_line_number_up=break_line_number_up,
break_line_number=break_line_number,
break_line_number_down=break_line_number_down,
line_above=line_above,
line_below=line_below,
break_line=break_line,
num_len=len(str(break_line_number_down))+1
)
except Exception as ex:
# i only want this part if the whole file read works
pass
out += """
To reproduce this error, run:
{f_name:}{evil_args:}
{bottom_line:}
""".format(
bottom_line=bottom_line,
f_name=f_name,
evil_args=evil_args,
)
return out
class generators(object):
def started(generator_function):
""" starts a generator when created """
def wrapper(*args, **kwargs):
g = generator_function(*args, **kwargs)
next(g)
return g
return wrapper
@staticmethod
@started
def sum():
"generator that holds a sum"
total = 0
while 1:
total += yield total
@staticmethod
@started
def counter(): ###MP why does a counter need to be a generator?
"""generator that holds a sum"""
c = 0
while 1:
i = yield c
if i is None:
c += 1
else:
c += i
@staticmethod
@started
def avg():
""" generator that holds a rolling average """
count = 0.0
total = generators.sum()
i=0
while 1:
i = yield (((total.send(i)*1.0)/count) if count else 0)
count += 1
@staticmethod
def timer():
""" generator that tracks time """
start_time = time()
while 1:
yield time()-start_time
@staticmethod
def countdown(seconds):
""" yields True until time expires """
start = time()
while 1:
yield time()-start < seconds
@staticmethod
def chunks(itr, size): ###MP isn't this a copy of stuff from generators?
""" yields a windowed chunk of a given size """
out = deque(maxlen=size)
for i in itr:
out.append(i)
if len(out) == size:
yield tuple(out)
out.clear()
@staticmethod
def chain(*a): ###MP isn't this a copy of stuff from generators?
"""itertools.chain, just better"""
for g in a:
if hasattr(g, '__iter__'):
# iterate through if its iterable
for i in g:
yield i
else:
# just yield the whole thing if its not
yield g
@staticmethod
def every_possible_object(iterable):
""" like flatten, just more desperate """
try:
for i in iterable:
yield i
if isinstance(i, dict):
for k in i:
yield k
for v in i.values():
for i in generators.every_possible_object(v):
yield i
elif isinstance(i, (list,tuple,set)):
for i in generators.every_possible_object(i):
yield i
except TypeError:
pass
yield iterable
class FuzzTimeoutError(BaseException):
pass
from threading import Timer
class IntervalTimer(object): ###MP some classes are explicitly inheriting from object, others are not. Inconsistent
""" run functions on intervals in the background
by: Cody Kochmann
"""
def __init__(self, seconds, function):
assert type(seconds).__name__ in ('int','float')
assert callable(function)
self.seconds=seconds
self.function=function
self.stopped=False
self.running=False
self.thread=Timer(self.seconds,self.function)
def start(self):
if self.thread.is_alive():
self.thread.join()
if not self.stopped:
if not self.running:
self.function()
self.running=True
self.thread=Timer(self.seconds,self.function)
self.thread.start()
self.restart_thread=Timer(self.seconds, self.start)
self.restart_thread.start()
def stop(self):
self.stopped = True
self.running = False
try:
self.thread.cancel()
except AttributeError: pass
try:
self.restart_thread.cancel()
except AttributeError: pass
from io import StringIO
def run_silently(fn):
""" runs a function silently with no stdout """
stdout_holder = sys.stdout
sys.stdout = StringIO()
fn()
sys.stdout = stdout_holder
class ipython_tools(object):
""" tools to make battle_tested work with ipython nicely """
detected = 'IPython' in sys.modules
if detected:
from IPython import get_ipython
detected = get_ipython() is not None
if detected:
magic = get_ipython().magic
@staticmethod
def silence_traceback():
""" silences ipythons verbose debugging temporarily """
if ipython_tools.detected:
# this hijacks stdout because there is a print in ipython.magic
run_silently(lambda:ipython_tools.magic("xmode Plain"))
@staticmethod
def verbose_traceback():
""" re-enables ipythons verbose tracebacks """
if ipython_tools.detected:
ipython_tools.magic("xmode Verbose")
def function_arg_count(fn):
""" finds how many args a function has """
assert callable(fn), 'function_arg_count needed a callable function, not {0}'.format(repr(fn))
if hasattr(fn, '__code__') and hasattr(fn.__code__, 'co_argcount'):
# normal functions
return fn.__code__.co_argcount
elif hasattr(fn, 'args') and hasattr(fn, 'func') and hasattr(fn, 'keywords'):
# partials
return function_arg_count(fn.func) - (len(fn.args)+len(fn.keywords))
else:
number_of_args_that_work = []
for i in range(1,64):
try:
fn(*range(i))
except TypeError as ex:
search = findall(r'((takes (exactly )?(one|[0-9]{1,}))|(missing (one|[0-9]{1,})))', repr(ex))
our_specific_type_error = len(repr(findall(r'((takes (exactly )?(one|[0-9]{1,}))|(missing (one|[0-9]{1,})))', repr(ex))))>10
if not our_specific_type_error: # if you find something
number_of_args_that_work.append(i)
pass
except Exception:
#number_of_args_that_work.append(i)
pass
else:
number_of_args_that_work.append(i)
if len(number_of_args_that_work):
return min(number_of_args_that_work)
#logging.warning('using backup plan')
return 1 # not universal, but for now, enough... :/
class battle_tested(object):
"""
battle_tested - automated function fuzzing library to quickly test production
code to prove it is "battle tested" and safe to use.
Examples of Primary Uses:
from battle_tested import fuzz
def my_adder(a, b):
''' switches the variables '''
return b + a
fuzz(my_adder) # returns a report of what works/breaks
Or:
from battle_tested import battle_tested
@battle_tested(keep_testing=False, allow=(AssertionError,))
def my_strict_add(a, b):
''' adds a and b together '''
assert isinstance(a, int), 'a needs to be an int'
assert isinstance(b, int), 'b needs to be an int'
return a + b
# This runs tests and halts the program if there is an error if that error
# isn't an AssertionError. This tests if you've written enough assertions.
Parameters:
fn - the function to be fuzzed (must accept at least one argument)
seconds - maximum time battle_tested is allowed to fuzz the function
max_tests - maximum number of tests battle_tested will run before exiting
(if the time limit doesn't come first)
verbose - setting this to False makes battle_tested raise the first
exception that wasn't specifically allowed in the allow option
keep_testing - setting this to True allows battle_tested to keep testing
even after it finds the first falsifying example, the results
can be accessed with crash_map() and success_map()
quiet - setting this to True silences all of the outputs coming from
the test
allow - this can be a tuple of exception types that you want
battle_tested to skip over in its tests
"""
def __init__(self, seconds=6, max_tests=1000000, keep_testing=True, verbose=False, quiet=False, allow=(), strategy=garbage, **kwargs):
""" your general constructor to get things in line """
# this is here if someone decides to use it as battle_tested(function)
if callable(seconds):
raise Exception('\n\n\tyou gave battle_tested() a function as the argument, did you mean battle_tested.fuzz()?')
self.kwargs = kwargs
self.tested = False
# needed to determine how quiet it will be
self.__verify_quiet__(quiet)
self.quiet = quiet
# needed to determine how verbosly it will work
self.__verify_verbose__(verbose)
self.verbose = False if self.quiet else verbose # quiet silences verbose mode
# needed to determine the maximum time the tests can run
self.__verify_seconds__(seconds)
self.seconds = seconds
# determine whether to keep testing after finding a crash
self.__verify_keep_testing__(keep_testing)
self.keep_testing = keep_testing
# needed to determine maximum number of tests it can
self.__verify_max_tests__(max_tests)
self.max_tests = max_tests
# determine what kind of exceptions are allowed
self.__verify_allow__(allow)
self.allow = allow
# determine what kind of strategy to use
self.__verify_strategy__(strategy)
self.strategy = strategy
@staticmethod
def __verify_seconds__(seconds):
assert type(seconds) == int, 'battle_tested needs seconds to be an int, not {0}'.format(repr(seconds))
assert seconds > 0, 'battle_tested needs seconds to be a positive int, not {0}'.format(repr(seconds))
@staticmethod
def __verify_verbose__(verbose):
""" asserts that verbose is valid """
assert type(verbose) == bool, 'battle_tested needs verbose to be a bool, not {0}'.format(repr(verbose))
@staticmethod
def __verify_max_tests__(max_tests):
""" asserts that max_tests is valid """
assert type(max_tests) == int, 'battle_tested needs max_tests to be an int, not {0}'.format(repr(max_tests))
assert max_tests > 0, 'battle_tested needs max_tests to be a positive int, not {0}'.format(repr(max_tests))
@staticmethod
def __verify_function__(fn):
""" asserts that the input is a function """
assert callable(fn), 'battle_tested needs a callable function, not {0}'.format(repr(fn))
@staticmethod
def __verify_tested__(fn):
""" asserts that the function exists in battle_tested's results """
battle_tested.__verify_function__(fn)
assert fn in storage.results.keys(), '{} was not found in battle_tested\'s results, you probably haven\'t tested it yet'.format(fn)
@staticmethod
def __verify_keep_testing__(keep_testing):
""" ensures keep_testing is a valid argument """
assert type(keep_testing) == bool, 'keep_testing needs to be a bool'
assert keep_testing == True or keep_testing == False, 'invalid value for keep_testing'
@staticmethod
def __verify_quiet__(quiet):
""" ensures quiet is a valid argument """
assert type(quiet) == bool, 'quiet needs to be a bool'
assert quiet == True or quiet == False, 'invalid value for quiet'
@staticmethod
def __verify_allow__(allow):
""" ensures allow is a valid argument """
assert type(allow) == tuple, 'allow needs to be a tuple of exceptions'
assert all(issubclass(i, BaseException) for i in allow), 'allow only accepts exceptions as its members'
@staticmethod
def __verify_args_needed__(args_needed):
""" ensures args_needed is a valid number of args for a function """
assert type(args_needed) == int, 'args_needed needs to be a positive int'
assert args_needed > 0, 'args_needed needs to be a positive int'
@staticmethod
def __verify_strategy__(strategy):
""" ensures strategy is a strategy or tuple of strategies """
def is_strategy(strategy):
assert 'strategy' in type(strategy).__name__.lower(), 'strategy needs to be a hypothesis strategy, not {}'.format(strategy)
assert hasattr(strategy,'example'), 'strategy needs to be a hypothesis strategy, not {}'.format(strategy)
return True
if type(strategy) == tuple:
assert len(strategy)>0, 'strategy cannot be an empty tuple, please define at least one'
assert all(is_strategy(i) for i in strategy), 'not all members in strategy were valid hypothesis strategies'
else:
is_strategy(strategy)
# results are composed like this
# results[my_function]['unique_crashes']=[list_of_crashes]
# results[my_function]['successes']=[list_of_successes]
# safe container that holds crash results
Crash = stricttuple(
'Crash',
arg_types = (
lambda arg_types:type(arg_types)==tuple,
lambda arg_types:len(arg_types)>0,
),
args = (
lambda args:type(args)==tuple,
lambda args:len(args)>0,
),
message = (
lambda message:type(message).__name__ in 'str unicode NoneType' ,
),
err_type = (
lambda err_type:type(err_type)==type ,
),
trace = (
lambda trace:type(trace).__name__ in 'str unicode' ,
)
)
class Result(object):
''' container that holds test results '''
def __init__(self, successful_input_types, crash_input_types, iffy_input_types, output_types, exception_types, unique_crashes, successful_io, function):
# assertions for successful_input_types
assert type(successful_input_types)==PrettyTuple
assert all(type(i)==tuple for i in successful_input_types)
assert all(all(isinstance(x,type) for x in i) for i in successful_input_types)
# assertions for crash_input_types
assert type(crash_input_types)==PrettyTuple
assert all(type(i)==tuple for i in crash_input_types)
assert all(all(isinstance(x,type) for x in i) for i in crash_input_types)
# assertions for iffy_input_types
assert type(iffy_input_types)==PrettyTuple
assert all(type(i)==tuple for i in iffy_input_types)
assert all(all(isinstance(x,type) for x in i) for i in iffy_input_types)
# assertions for output_types
assert type(output_types)==PrettyTuple
assert all(isinstance(i, type) for i in output_types)
# assertions for exception_types
assert type(exception_types)==PrettyTuple
assert all(isinstance(i,Exception) or issubclass(i,Exception) for i in exception_types)
# assertions for unique_crashes
assert type(unique_crashes)==UniqueCrashContainer
# assertions for successful_io
assert type(successful_io)==deque
assert all(type(i) == io_example for i in successful_io) if len(successful_io) else 1
self.successful_input_types = successful_input_types
self.crash_input_types = crash_input_types
self.iffy_input_types = iffy_input_types
self.output_types = output_types
self.exception_types = exception_types
self.unique_crashes = unique_crashes
self.successful_io = successful_io
self.function = function
self.unittest = attempt(self._generate_unit_test)
self._fields = 'successful_input_types', 'crash_input_types', 'iffy_input_types', 'output_types', 'exception_types', 'unique_crashes', 'successful_io'
def __repr__(self):
table = PrettyTable(None)
for i in sorted(self._fields):
new_lines_in_repr = repr(getattr(self,i)).count('\n')
if new_lines_in_repr > 0:
ii = '{}{}'.format('\n'*int(new_lines_in_repr/2), i)
else:
ii = i
if i == 'successful_io':
table.add_row((ii, repr(list(getattr(self,i)))))
else:
table.add_row((ii, getattr(self,i)))
table.align='l'
return '\n'.join(table.get_string().splitlines()[2:])
def _generate_unit_test(self):
''' give this a function to fuzz and it will spit out a unittest file '''
# I know the code in this function is a little hateful, its brand new
# and I'll clean it up as soon as I'm certain it is where it needs to be
# negative tests
negative_tests = deque()
for i in self.unique_crashes:
#logging.warning(repr(i))
invocation_code = '{}{}'.format(self.function.__name__, repr(i.args))
tmp='def {}(*a,**k):pass\n'.format(self.function.__name__)+invocation_code
if runnable(tmp) and compilable(tmp) and valid_repr(i.args):
#logging.warning(invocation_code)
test_name = 'raises_{}'.format(i.err_type.__name__)
negative_tests.append(unittest_builder.raises_test(test_name, invocation_code, i.err_type))
#else:
# logging.warning('not runnable')
# logging.warning(repr(invocation_code))
# positive tests
positive_tests = deque()
for c, io_object in enumerate(self.successful_io):
io_object.input = tuple(float(i) if type(i)==builtins.float else i for i in io_object.input)
io_object.output = attempt(
lambda:tuple(float(i) if type(i)==builtins.float else i for i in io_object.output) ,
default_output=io_object.output
)
io_object.input = tuple(complex(i) if type(i)==builtins.complex else i for i in io_object.input)
io_object.output = attempt(
lambda:tuple(complex(i) if type(i)==builtins.complex else i for i in io_object.output) ,
default_output=io_object.output
)
if type(io_object.output) == builtins.complex:
io_object.output = complex(io_object.output)
if type(io_object.output) == builtins.float:
io_object.output = float(io_object.output)
invocation_code = '{}{}'.format(self.function.__name__, repr(io_object.input))
tmp='def {}(*a,**k):pass\n'.format(self.function.__name__)+invocation_code
if runnable(tmp) and compilable(tmp) and valid_repr(io_object.input) and valid_repr(io_object.output):
if all(runs_fine(repr(i)) for i in (io_object.input, io_object.output)):
positive_tests.append((invocation_code, io_object.output))
positive_tests = [
unittest_builder.equal_test('equals_{}'.format(i+1), *v)
for i, v in enumerate(positive_tests)
]
#print(negative_tests)
#print(positive_tests)
positive_tests = ''.join(sorted(positive_tests))
negative_tests = ''.join(sorted(negative_tests))
test_functions = negative_tests + positive_tests
#print(test_functions)
return unittest_builder.test_body(self.function, test_functions)
@staticmethod
def results(fn):
'''returns the collected results of the given function'''
battle_tested.__verify_tested__(fn)
return storage.results[fn]
@staticmethod
def stats(fn):
''' returns the stats found when testing a function '''
results = battle_tested.results(fn)
return {k:len(getattr(results, k)) for k in results._fields}
@staticmethod
def print_stats(fn):
''' prints the stats on a tested function '''
stats = battle_tested.stats(fn)
fn_name = fn.__name__ if '__name__' in dir(fn) else fn
s = 'fuzzing {}() found:'.format(fn_name)
s += ' '*(79-len(s))
print(s)
t=PrettyTable(None)
for k in sorted(stats.keys()):
t.add_row((k,stats[k]))
print('\n'.join(t.get_string().splitlines()[2:]))
# these two are here so the maps can have doc strings
class _crash_map(dict):
'''a map of crashes generated by the previous test'''
class _success_map(set):
'''a map of data types that were able to get through the function without crashing'''
crash_map = _crash_map()
success_map = _success_map()
@staticmethod
def generate_examples(args_needed=1, strategy=None):
""" this is the primary argument generator that battle_tested runs on """
battle_tested.__verify_args_needed__(args_needed)
if strategy is not None: # logic for a custom strategy
battle_tested.__verify_strategy__(strategy)
if type(strategy) == tuple:
assert len(strategy) == args_needed, 'invalid number of strategies, needed {} got {}'.format(args_needed, len(strategy))
print('using {} custom strategies - {}'.format(len(strategy),strategy))
strategy = st.builds(lambda *x: list(x), *strategy)
yield from strategy_stream(strategy)
else:
# generate lists containing output only from the given strategy
stream = strategy_stream(strategy)
for _ in gen.loop():
# build a list of args to test next
out = [stream_data for _, stream_data in zip(range(args_needed), stream)]
# yield all combinations of that list
yield from product(out, repeat=len(out))
else: # logic for fuzzing approach
# first run through the cache
storage.refresh_test_inputs()
for chunk in generators.chunks(chain(storage.test_inputs, reversed(storage.test_inputs)),size=args_needed):
for combination in product(chunk, repeat=args_needed):
yield combination
try:
garbage = multiprocess_garbage()
while 2:
out = [next(garbage) for i in range(args_needed)]
for i in product(out, repeat=len(out)):
yield i
finally:
garbage.close()
@staticmethod
def fuzz(fn, seconds=6, max_tests=1000000000, verbose=False, keep_testing=True, quiet=False, allow=(), strategy=garbage):
"""
fuzz - battle_tested's primary weapon for testing functions.
Example Usage:
def my_adder(a, b):
''' switches the variables '''
return b + a
fuzz(my_adder) # returns a report of what works/breaks
# or
def my_strict_add(a, b):
''' adds a and b together '''
assert isinstance(a, int), 'a needs to be an int'
assert isinstance(b, int), 'b needs to be an int'
return a + b
# This runs tests and halts the program if there is an error if that error
# isn't an AssertionError. This tests if you've written enough assertions.
fuzz(my_strict_add, keep_testing=False, allow=(AssertionError,))
Parameters:
fn - the function to be fuzzed (must accept at least one argument)
seconds - maximum time battle_tested is allowed to fuzz the function
max_tests - maximum number of tests battle_tested will run before exiting
(if the time limit doesn't come first)
verbose - setting this to False makes battle_tested raise the first
exception that wasn't specifically allowed in the allow option
keep_testing - setting this to True allows battle_tested to keep testing
even after it finds the first falsifying example, the results
can be accessed with crash_map() and success_map()
quiet - setting this to True silences all of the outputs coming from
the test
allow - this can be a tuple of exception types that you want
battle_tested to skip over in its tests
"""
battle_tested.__verify_function__(fn)
battle_tested.__verify_seconds__(seconds)
battle_tested.__verify_verbose__(verbose)
battle_tested.__verify_max_tests__(max_tests)
battle_tested.__verify_keep_testing__(keep_testing)
battle_tested.__verify_quiet__(quiet)
battle_tested.__verify_allow__(allow)
battle_tested.__verify_strategy__(strategy)
using_native_garbage = hash(strategy) == hash(garbage)
args_needed = function_arg_count(fn)
# code for instance methods
if hasattr(fn, '__self__'):
# create a partial with fn.__self__ as the first arg
#fn = partial(fn, fn.__self__)
_name = repr(fn)
_type = type(fn).__name__
#print(dir(fn))
# wrap the method in a hashable wrapper
fn = partial(fn)
fn.__name__ = _name
# if fn is not a builtin, chop off one arg needed
if 'builtin' not in _type and args_needed > 1:
args_needed = args_needed-1
del _name
del _type
#if type(strategy) == tuple:
# assert len(strategy) == args_needed, 'invalid number of strategies, needed {} got {}'.format(args_needed, len(strategy))
# print('using {} custom strategies - {}'.format(len(strategy),strategy))
# strategy = st.builds(lambda *x: list(x), *strategy)
#else:
# # generate a strategy that creates a list of garbage variables for each argument
# strategy = st.lists(elements=strategy, max_size=args_needed, min_size=args_needed)
if not quiet:
print('testing: {0}()'.format(getattr(fn, '__name__', repr(fn))))
battle_tested.crash_map.clear()
battle_tested.success_map.clear()
count = generators.counter()
average = generators.avg()
timer = generators.timer()
def calculate_window_speed():
w = calculate_window_speed.window
w.append(_inner_window_speed())
return int((1.0*sum(w))/len(w))
calculate_window_speed.window = deque(maxlen=4)
def _inner_window_speed():
cw = display_stats.count_window
tw = display_stats.time_window
if len(cw) == 2:
c = cw[1]-cw[0]
t = tw[1]-tw[0]
if c != 0 and t != 0:
out = int(c*(1/t))
return out if out > 0 else 1
return 1
def display_stats(overwrite_line=True):
now = next(display_stats.timer)
display_stats.remaining = display_stats.test_time-now
if not display_stats.quiet:
display_stats.count_window.append(display_stats.count)
display_stats.time_window.append(now)
print('tests: {:<8} speed: {:>6}/sec avg:{:>6}/sec {} {}s '.format(
display_stats.count,
calculate_window_speed(),
int(display_stats.count/(now if now > 0 else 0.001)),
'-' if overwrite_line else 'in',
int(display_stats.test_time-now)+1 if overwrite_line else display_stats.test_time
), end=('\r' if overwrite_line else '\n'))
sys.stdout.flush()
display_stats.test_time = seconds
display_stats.remaining = display_stats.test_time
display_stats.count = 0
display_stats.time_window = deque(maxlen=2)
display_stats.count_window = deque(maxlen=2)
display_stats.timer = generators.timer()
display_stats.average = generators.avg()
display_stats.interval = IntervalTimer(0.16, display_stats)
display_stats.quiet = quiet or verbose
display_stats.start = lambda:(next(display_stats.timer),display_stats.interval.start())
ipython_tools.silence_traceback()
storage.results[fn] = {
'successful_input_types':deque(maxlen=512),
'crash_input_types':set(),
'iffy_input_types':set(), # types that both succeed and crash the function
'output_types':set(),
'exception_types':set(),
'unique_crashes':dict(),
'successful_io':deque(maxlen=512)
}
def fn_info():
pass
fn_info.fuzz_time = time()
fn_info.fuzz_id = len(storage.results.keys())
# stores examples that succeed and return something other than None
fn_info.successful_io = deque(maxlen=512)
# stores examples that return None
fn_info.none_successful_io = deque(maxlen=512)
gc_interval = IntervalTimer(3, gc)
#@settings(perform_health_check=False, database_file=None, deadline=None, max_examples=max_tests, verbosity=(Verbosity.verbose if verbose else Verbosity.normal))
#@given(strategy)
def _fuzz(given_args):
if _fuzz.first_run:
_fuzz.first_run = False
# start the display interval
display_stats.start()
# start the countdown for timeout
_fuzz.timestopper.start()
arg_list = tuple(given_args)
#if len(arg_list) != fuzz.args_needed:
# exit('got {} args? {}'.format(len(arg_list),next(test_variables)))
# unpack the arguments
if not _fuzz.has_time:
raise FuzzTimeoutError()
display_stats.count += 1
try:
with max_execution_time(int(display_stats.remaining)):
out = fn(*arg_list)
# if out is a generator, empty it out.
if hasattr(out, '__iter__') and (hasattr(out,'__next__') or hasattr(out,'next')):
for i in out:
pass
# the rest of this block is handling logging a success
input_types = tuple(type(i) for i in arg_list)
# if the input types have caused a crash before, add them to iffy_types
if input_types in storage.results[fn]['crash_input_types']:
storage.results[fn]['iffy_input_types'].add(input_types)
# add the input types to the successful collection
if input_types not in storage.results[fn]['successful_input_types']:
storage.results[fn]['successful_input_types'].append(input_types)
# add the output type to the output collection
storage.results[fn]['output_types'].add(type(out))
battle_tested.success_map.add(tuple(type(i) for i in arg_list))
try:
(fn_info.none_successful_io if out is None else fn_info.successful_io).append(io_example(arg_list, out))
'''
# I want to add this, but it wrecks the fuzzer's performance :(
io_object = io_example(arg_list, out)
if out is None:
if io_object not in fn_info.none_successful_io:
fn_info.none_successful_io.append(io_object)
else:
if io_object not in fn_info.successful_io:
fn_info.successful_io.append(io_object)
'''
except:
pass
except MaxExecutionTimeError:
pass
except _fuzz.allow as ex:
pass
except Exception as ex:
ex_message = ex.args[0] if (
hasattr(ex, 'args') and len(ex.args) > 0
) else (ex.message if (
hasattr(ex, 'message') and len(ex.message) > 0
) else '')
storage.results[fn]['crash_input_types'].add(tuple(type(i) for i in arg_list))
if keep_testing:
tb_text = traceback_text()
tb = '{}{}'.format(traceback_file_lines(tb_text),repr(type(ex)))
battle_tested.crash_map[tb]={'type':type(ex),'message':ex_message,'args':arg_list,'arg_types':tuple(type(i) for i in arg_list)}
storage.results[fn]['unique_crashes'][tb]=battle_tested.Crash(
err_type=type(ex),
message=repr(ex_message),
args=arg_list,
arg_types=tuple(type(i) for i in arg_list),
trace=str(tb_text)
)
storage.results[fn]['exception_types'].add(type(ex))
else:
# get the step where the code broke
tb_steps_full = [i for i in traceback_steps()]
tb_steps_with_func_name = [i for i in tb_steps_full if i.splitlines()[0].endswith(fn.__name__)]
if len(tb_steps_with_func_name)>0:
tb = tb_steps_with_func_name[-1]
else:
tb = tb_steps_full[-1]
error_string = format_error_message(
fn.__name__,
'{} - {}'.format(type(ex).__name__,ex_message),
tb,
(arg_list if len(arg_list)!=1 else '({})'.format(repr(arg_list[0])))
)
ex.message = error_string
ex.args = error_string,
raise ex
_fuzz.has_time = True
_fuzz.first_run = True
_fuzz.timestopper = Timer(seconds, lambda:setattr(_fuzz,'has_time',False))
_fuzz.exceptions = deque()
_fuzz.args_needed = args_needed
_fuzz.allow = allow
_fuzz.using_native_garbage = using_native_garbage
# run the test
test_gen = battle_tested.generate_examples(args_needed, None if using_native_garbage else strategy)
next(test_gen) # start the test generator
try:
gc_interval.start()
for test_args in test_gen:
if verbose:
try:
s = '{}'.format(tuple(test_args))
s = s[:-2]+s[-1]
print('trying {}{}'.format(fn.__name__, s))
except: pass
_fuzz(test_args)
max_tests -= 1
if max_tests <= 0:
break
except FuzzTimeoutError:
pass
except KeyboardInterrupt:
if not quiet:
print(' stopping fuzz early...')
finally:
attempt(test_gen.close)
display_stats.interval.stop()
display_stats(False)
gc_interval.stop()
attempt(_fuzz.timestopper.cancel)
if not display_stats.quiet:
print('compiling results...')
results_dict = storage.results[fn]
results_dict['iffy_input_types'] = set(i for i in results_dict['crash_input_types'] if i in results_dict['successful_input_types'])
# merge the io maps
for i in fn_info.none_successful_io:
#if len(fn_info.successful_io)<fn_info.successful_io.maxlen:
fn_info.successful_io.append(i)
# remove io map with None examples
del fn_info.none_successful_io
storage.results[fn] = battle_tested.Result(
successful_input_types=PrettyTuple(set(i for i in results_dict['successful_input_types'] if i not in results_dict['iffy_input_types'] and i not in results_dict['crash_input_types'])),
crash_input_types=PrettyTuple(results_dict['crash_input_types']),
iffy_input_types=PrettyTuple(results_dict['iffy_input_types']),
output_types=PrettyTuple(results_dict['output_types']),
exception_types=PrettyTuple(results_dict['exception_types']),
unique_crashes=UniqueCrashContainer(results_dict['unique_crashes'].values()),
successful_io=fn_info.successful_io,
function=fn
)
storage.results[fn].function = fn
## find the types that both crashed and succeeded
#results_dict['iffy_input_types'] = set(i for i in results_dict['crash_input_types'] if i in results_dict['successful_input_types'])
## clean up the unique_crashes section
#results_dict['unique_crashes'] = tuple(results_dict['unique_crashes'].values())
## remove duplicate successful input types
#results_dict['successful_input_types'] = set(results_dict['successful_input_types'])
if keep_testing:
#examples_that_break = ('examples that break' if len(battle_tested.crash_map)>1 else 'example that broke')
#print('found {} {} {}()'.format(len(battle_tested.crash_map),examples_that_break,fn.__name__))
if not quiet:
battle_tested.print_stats(fn)
#print('run crash_map() or success_map() to access the test results')
else:
if not quiet:
print('battle_tested: no falsifying examples found')
# try to save the fields to the function object
try:
for f in storage.results[fn]._fields:
setattr(fn, f, getattr(storage.results[fn], f))
except: pass
# try to store the unique crashes as readable attributes
try:
for crash in storage.results[fn].unique_crashes:
try:
setattr(fn_info.unique_crashes, '{}_{}'.format(crash.err_type.__name__, [x.strip() for x in crash.trace.split(', ') if x.startswith('line ')][-1].replace(' ','_')), crash)
except: pass
try:
setattr(storage.results[fn].unique_crashes, '{}_{}'.format(crash.err_type.__name__, [x.strip() for x in crash.trace.split(', ') if x.startswith('line ')][-1].replace(' ','_')), crash)
except: pass
except: pass
try:
def dummy_function(): pass
for a in dir(fn_info):
if a not in dir(dummy_function):
try:
setattr(fn, a, getattr(fn_info, a))
except:
pass
except: pass
return storage.results[fn]
def __call__(self, fn):
""" runs before the decorated function is called """
self.__verify_function__(fn)
if fn not in storage.results:
# only test the first time this function is called
if not ('skip_test' in self.kwargs and self.kwargs['skip_test']):
# skip the test if it is explicitly turned off
self.fuzz(fn, seconds=self.seconds, max_tests=self.max_tests, keep_testing=self.keep_testing, verbose=self.verbose, quiet=self.quiet, allow=self.allow, strategy=self.strategy)
#self.tested = True
if any(i in self.kwargs for i in ('logger','default_output')):
# only wrap if needed
def wrapper(*args, **kwargs):
try:
out = fn(*args, **kwargs)
except Exception as e:
# log the error
if 'logger' in self.kwargs:
assert callable(self.kwargs['logger']), "battle_tested.logger needs to be a callable log function, not: {0}".format(repr(self.kwargs['logger']))
self.kwargs['logger'](e)
else:
logging.exception(e)
# only raise the error if there isnt a default_output
if 'default_output' in self.kwargs:
out = self.kwargs['default_output']
else:
raise e
return out
return wrapper
else:
return fn
# make fuzz its own independent function
fuzz = battle_tested.fuzz
results = battle_tested.results
stats = battle_tested.stats
print_stats = battle_tested.print_stats
def crash_map():
'''returns a map of crashes generated by the previous test'''
return tuple(sorted(battle_tested.crash_map.values(), key=lambda i:i['type'].__name__))
def success_map():
'''returns a map of data types that were able to get through the function without crashing'''
return tuple(sorted(battle_tested.success_map, key=lambda i:i[0].__name__))
def function_versions(fn):
''' returns all tested versions of the given function '''
for f in storage.results.keys():
if f.__name__ == fn.__name__ and f.__module__ == fn.__module__:
yield f
def time_io(fn,args,rounds=1000):
''' time how long it takes for a function to run through given args '''
tests = range(rounds)
args = tuple(args) # solidify this so we can run it multiple times
start = time()
for t in tests:
for a in args:
fn(*a)
return time()-start
def all_common_successful_io(*functions):
''' gets all io objects that works with all given '''
for io in generators.chain(*(fn.successful_io for fn in functions)):
succeeded = 0
for fn in functions:
try:
out = fn(*io.input)
if hasattr(out, '__iter__'):
for i in out:
pass
succeeded += 1
except:
pass
if succeeded == len(functions):
yield io
def time_all_versions_of(fn):
''' time how long each version of a function takes to run through the saved io '''
print('\ntiming all versions of {}'.format(fn.__name__))
common_io = partial(all_common_successful_io, *list(function_versions(fn)))
print('found {} inputs that all versions can run'.format(len(list(common_io()))))
for f in function_versions(fn):
print('\n{}\n\n{}'.format('-'*60,getsource(f)))
print('{:.10f}'.format(time_io(f,(io.input for io in common_io()))),'seconds')
#print(time_io(f,(io.input for io in f.successful_io)),'seconds with {} runs'.format(len(f.successful_io)*1000))
# for ff in function_versions(fn):
# #print(time_io(f,(io.input for io in ff.successful_io)),'seconds')
print('\n{}'.format('-'*60))
def run_tests():
''' this is where all of the primary functionality of battle_tested is tested '''
print('''
WARNING | This test exercises ALL functions of the fuzzer and WILL take a while.
| Let it churn to see if its fully operational. This includes testing
| on all available cpu cores and the full result rendering so just let
| it scroll.
|
| If this battle test succeeds, you are running on all cylinders!!!
''')
sleep(20)
print('''
Initiating core battle test...
''')
sleep(3)
# test instance methods
class TestClass(tuple):
def testmethod(self,a,b,c,d,e):
return a,b,c,d
tc = TestClass([1,2,3])
print(fuzz(tc.testmethod))
l = list(range(10))
print(fuzz(l.append))
# test fuzzing all the types
for i in (str, bool, bytearray, bytes, complex, dict, float, frozenset, int, list, object, set, str, tuple):
print('testing: {}'.format(i))
print(fuzz(i))
def test_generator(a):
for i in a:
yield i
print(fuzz(test_generator, seconds=10))
def test_generator(a):
for i in a:
yield i,i
print(fuzz(test_generator, seconds=10))
print(time_all_versions_of(test_generator))
# try the custom strategy syntax
@battle_tested(strategy=st.text(),max_tests=50)
def custom_text_strategy(a,b):
if len(a) == 0:
return None
else:
return a in b
print(dir(custom_text_strategy))
for i in ('successful_io','crash_input_types','exception_types','iffy_input_types','unique_crashes','output_types','successful_input_types'):
assert hasattr(custom_text_strategy, i), 'custom_text_strategy doesnt have a {} attribute'.format(i)
def custom_text_fuzz_strategy(a,b):
return a in b
fuzz(custom_text_fuzz_strategy, strategy=st.text())
# try the multiple custom strategy syntax
@battle_tested(strategy=(st.text(), st.integers()))
def custom_text_int_strategy(a,b):
assert isinstance(a, str), 'a needs to be text'
assert isinstance(b, int), 'b needs to be an int'
return a+b
def custom_text_int_fuzz_strategy(a,b):
return a in b
r=fuzz(custom_text_fuzz_strategy, strategy=(st.integers(),st.text()))
#======================================
# Examples using the wrapper syntax
#======================================
@battle_tested(default_output=[], seconds=1, max_tests=5)
def sample(i):
return []
@battle_tested(keep_testing=False)
def sample2(a,b,c,d=''):
t = a, b, c, d
# output for documentation
def test(a):
return int(a)
print(repr(fuzz(test)))
# test different speeds
@battle_tested(seconds=1)
def arg1_1sec(a):
return a
@battle_tested()
def arg1(a):
return a
@battle_tested(seconds=1)
def args2_1sec(a,b):
return a+b
@battle_tested()
def args2(a,b):
return a+b
@battle_tested(seconds=1)
def args3_1sec(a,b,c):
return a+b+c
@battle_tested()
def args3(a,b,c):
return a+b+c
@battle_tested(seconds=1)
def args4_1sec(a,b,c,d):
return a+b+c+d
@battle_tested()
def args4(a,b,c,d):
return a+b+c+d
@battle_tested(seconds=1)
def args5_1sec(a,b,c,d,e):
return a+b+c+d+e
@battle_tested()
def args5(a,b,c,d,e):
return a+b+c+d+e
# test the allow option
@battle_tested(allow=(AssertionError,))
def allowed_to_assert(a,b):
assert a==b, 'a needs to equal b'
@battle_tested(allow=(AssertionError,), keep_testing=False)
def allowed_to_assert_and_stop_on_fail(a,b):
assert a==b, 'a needs to equal b'
fuzz(max, allow=(ValueError,))
fuzz(max, keep_testing=False, allow=(ValueError,TypeError))
# test going quiet
print('going quiet')
def quiet_test_out():
pass
@battle_tested(keep_testing=False, quiet=True)
def quiet_test(a,b,c):
setattr(quiet_test_out, 'args', (a,b,c))
assert len(quiet_test_out.args) == 3, 'fuzzing quiet test failed'
quiet_lambda = lambda a,b,c:setattr(quiet_test_out, 'lambda_args', (a,b,c))
r = fuzz(quiet_lambda, quiet=True, keep_testing=False)
assert len(quiet_test_out.lambda_args) == 3, 'fuzzing quiet lambda failed'
print('quiet test complete')
# proof that they only get tested once
print(sample(4))
print(sample2(1,2,3,4))
print(sample('i'))
print(sample2('a','b',2,4))
# prove that successes of any type are possible
r = fuzz(lambda i:i , keep_testing=True, seconds=10)
assert len(r.crash_input_types) == 0, 'fuzzing lambda() changed expected behavior'
assert len(r.exception_types) == 0, 'fuzzing lambda() changed expected behavior'
assert len(r.iffy_input_types) == 0, 'fuzzing lambda() changed expected behavior'
assert len(r.unique_crashes) == 0, 'fuzzing lambda() changed expected behavior'
assert len(r.output_types) > 10, 'fuzzing lambda() changed expected behavior'
assert len(r.successful_input_types) > 10, 'fuzzing lambda() changed expected behavior'
#======================================
# Examples using the function syntax
#======================================
def sample3(a,b):
# this one blows up on purpose
return a+b+1
# this tests a long fuzz
r=fuzz(sample3, seconds=20)
assert len(r.successful_io)>0, 'succesful_io was empty'
print(r.successful_io)
crash_map()
success_map()
assert len(r.crash_input_types) > 10 , 'fuzzing sample3() changed expected behavior'
assert len(r.exception_types) > 0, 'fuzzing sample3() changed expected behavior'
assert len(r.unique_crashes) > 0, 'fuzzing sample3() changed expected behavior'
assert len(r.output_types) > 1, 'fuzzing sample3() changed expected behavior'
assert len(r.successful_input_types) > 10, 'fuzzing sample3() changed expected behavior'
fuzz(lambda i:i)
#======================================
# example harness
#======================================
def harness(key,value):
global mydict
global crash_examples
global successful_types
try:
mydict[key]=value
successful_types.add((type(key).name, type(value).name))
except Exception as e:
print('found one')
crash_examples[e.args[0]]=(key,value)
for tested_function in storage.results.keys():
s = '\n'
try:
s+=tested_function.__module__
s+=' ---> '
s+=tested_function.__name__
s+=' ---> '
s+=str([i for i in dir(storage.results[tested_function]) if not i.startswith('_')])
except:
pass
finally:
print(s)
print('''
SUCCESS | Core battle test complete. Go forth and fuzz the world!!!
''')
if __name__ == '__main__':
run_tests()
|
{"/battle_tested/beta/input_type_combos.py": ["/battle_tested/beta/ammo.py"], "/generate-db-from-beta.py": ["/battle_tested/beta/__init__.py"], "/battle_tested/__main__.py": ["/battle_tested/__init__.py"], "/battle_tested/beta/fuzz_planner.py": ["/battle_tested/beta/input_type_combos.py"], "/battle_tested/beta/__init__.py": ["/battle_tested/beta/api.py"], "/battle_tested/beta/runner.py": ["/battle_tested/beta/mutators.py", "/battle_tested/beta/ammo.py", "/battle_tested/beta/easy_street.py", "/battle_tested/beta/FuzzResult.py", "/battle_tested/beta/function_arg_count.py", "/battle_tested/beta/input_type_combos.py"], "/battle_tested/beta/api.py": ["/battle_tested/beta/function_arg_count.py", "/battle_tested/beta/FuzzResult.py", "/battle_tested/beta/runner.py"]}
|
40,093
|
CodyKochmann/battle_tested
|
refs/heads/master
|
/battle_tested/beta/FuzzResult.py
|
# -*- coding: utf-8 -*-
# @Author: CodyKochmann
# @Date: 2020-04-05 11:39:37
# @Last Modified by: CodyKochmann
# @Last Modified time: 2020-04-05 12:23:59
from typing import Set, Tuple, Dict
import sqlite3, inspect, logging, unittest
'''
FuzzResult structure
{
(int, int): {
True: [
([3, 5], 8),
([1, 1], 2),
([9, 15], 24),
],
False: {}
}
(set, str): {
True: [],
False: {
(<class 'TypeError'>, ('can only concatenate str (not "set") to str',)): [
({'yolo', {2, 4, 6}})
]
}
}
}
'''
def attempt_getsource(fn):
try:
return inspect.getsource(fn)
except:
return None
class FuzzResultDB(sqlite3.Connection):
schema = [
'''
CREATE TABLE IF NOT EXISTS fuzz_tests (
id INTEGER PRIMARY KEY AUTOINCREMENT,
ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
target_module TEXT,
target_function_name TEXT,
target_function_repr TEXT,
target_function_source TEXT
);
''',
'''
CREATE TABLE IF NOT EXISTS test_ingest (
test_id INTEGER REFERENCES fuzz_tests(id),
successful BOOLEAN NOT NULL,
input_types TEXT NOT NULL,
input_args TEXT NOT NULL,
output_type TEXT,
output TEXT,
exception_type TEXT,
exception_message TEXT,
CHECK (
(
output_type != NULL AND output != NULL
) OR (
exception_type != NULL AND exception_message != NULL
)
)
);
'''
]
def __init__(self, fuzz_target, fuzz_result: Dict, db_path=":memory:"):
# validate input
assert callable(fuzz_target), f'FuzzResultDB assumes fuzz_target is some type of calable function - {fuzz_target}'
assert isinstance(fuzz_result, dict), f'FuzzResultDB assumes fuzz_result will be a dict - {fuzz_result}'
assert isinstance(db_path, str), f'FuzzResultDB assumes db_path will be a string - {db_path}'
# initialize sqlite3.Connection components
sqlite3.Connection.__init__(self, db_path)
# save input for potential future reference
self._db_path = db_path
self._fuzz_target = fuzz_target
self._fuzz_result = fuzz_result
# load the base schema
self.load_schema()
# save the fuzz results
self.save_results(fuzz_target, fuzz_result)
def load_schema(self):
cursor = self.cursor()
for command in FuzzResultDB.schema:
try:
list(cursor.execute(command))
except Exception as ex:
logging.exception('failed to run sql command - %', command)
raise ex
cursor.close()
@property
def test_id(self):
if not hasattr(self, '_test_id'):
cursor = self.cursor()
self._test_id = cursor.execute(
'''
INSERT INTO fuzz_tests (
target_module,
target_function_name,
target_function_repr,
target_function_source
) VALUES (?, ?, ?, ?);
''',
(
self._fuzz_target.__module__ if hasattr(self._fuzz_target, '__module__') else None,
self._fuzz_target.__name__ if hasattr(self._fuzz_target, '__name__') else None,
repr(self._fuzz_target),
attempt_getsource(self._fuzz_target)
)
).lastrowid
cursor.close()
return self._test_id
def save_results(self, fuzz_target, fuzz_result):
cursor = self.cursor()
cursor.execute('begin')
# iterate through the FuzzResult to store its tests to the db
for type_combo, result in fuzz_result.items():
unittest.TestCase.assertEquals(unittest.TestCase(), [type(type_combo), type(result)], [tuple, dict])
assert len(result) > 0, result
assert len(result) <= 2, result
if True in result and len(result[True]) > 0: # successful tests need to be stored
list(cursor.executemany(
'''
INSERT INTO test_ingest (
test_id,
successful,
input_types,
input_args,
output_type,
output
) VALUES (?, ?, ?, ?, ?, ?);
''',
(
(
self.test_id,
True,
repr(type_combo),
repr(input_args),
repr(type(output)),
repr(output)
)
for input_args, output in result[True]
)
))
if False in result and len(result[False]) > 0: # failed tests need to be stored
for exception, exception_message in result[False]:
list(cursor.executemany(
'''
INSERT INTO test_ingest (
test_id,
successful,
input_types,
input_args,
exception_type,
exception_message
) VALUES (?, ?, ?, ?, ?, ?);
''',
(
(
self.test_id,
False,
repr(type_combo),
repr(input_args), # (type(output), output.args) or (ex_type, ex_message)
repr(exception),
exception_message[0] if isinstance(exception_message, tuple) and len(exception_message) == 1 and isinstance(exception_message[0], str) and len(exception_message[0].strip()) > 0 else repr(exception_message)
)
for input_args in result[False][(exception, exception_message)]
)
))
cursor.execute('commit')
cursor.close()
def save_to_file(self, file_path):
''' this function saves the FuzzResultDB to a file on the filesystem '''
assert isinstance(file_path, str), file_path
cursor = self.cursor()
cursor.execute("vacuum main into ?", (file_path,))
cursor.close()
class FuzzResult(dict):
''' acts as a user friendly data structure to explore fuzz results '''
@property
def crash_input_types(self) -> Set[Tuple[type]]:
return {k for k,v in self.items() if len(v[False]) > 0}
@property
def crash_input_count(self) -> int:
return sum(len(v[False]) > 0 for v in self.values())
@property
def successful_input_types(self) -> Set[Tuple[type]]:
return {k for k,v in self.items() if len(v[True]) > 0}
@property
def successful_input_count(self) -> int:
return sum(len(v[True]) > 0 for v in self.values())
@property
def iffy_input_types(self) -> Set[Tuple[type]]:
return self.crash_input_types.intersection(self.successful_input_types)
@property
def iffy_input_count(self) -> int:
return sum(len(v[True]) > 0 and len(v[False]) > 0 for v in self.values())
def __str__(self) -> str:
return f'''
FuzzResult:
type_combos:
successful: {self.successful_input_count}
problematic: {self.crash_input_count}
iffy: {self.iffy_input_count}
'''.strip()
|
{"/battle_tested/beta/input_type_combos.py": ["/battle_tested/beta/ammo.py"], "/generate-db-from-beta.py": ["/battle_tested/beta/__init__.py"], "/battle_tested/__main__.py": ["/battle_tested/__init__.py"], "/battle_tested/beta/fuzz_planner.py": ["/battle_tested/beta/input_type_combos.py"], "/battle_tested/beta/__init__.py": ["/battle_tested/beta/api.py"], "/battle_tested/beta/runner.py": ["/battle_tested/beta/mutators.py", "/battle_tested/beta/ammo.py", "/battle_tested/beta/easy_street.py", "/battle_tested/beta/FuzzResult.py", "/battle_tested/beta/function_arg_count.py", "/battle_tested/beta/input_type_combos.py"], "/battle_tested/beta/api.py": ["/battle_tested/beta/function_arg_count.py", "/battle_tested/beta/FuzzResult.py", "/battle_tested/beta/runner.py"]}
|
40,094
|
CodyKochmann/battle_tested
|
refs/heads/master
|
/battle_tested/__main__.py
|
# -*- coding: utf-8 -*-
# @Author: Cody Kochmann
# @Date: 2017-10-08 15:01:56
# @Last Modified by: Cody Kochmann
# @Last Modified time: 2017-10-08 17:34:50
from sys import argv
import battle_tested as bt
import argparse
parser = argparse.ArgumentParser(prog='__main__.py')
parser.add_argument(
'--fuzz',
help="send the fuzzer's output to stdout",
action='store_true'
)
parser.add_argument(
'--test',
help="run tests to see if battle_tested works correctly on you system",
action='store_true'
)
parser.add_argument(
'--benchmark',
help="test how fast battle_tested can run on your system",
action='store_true'
)
if '__main__.py' in argv[-1] or 'help' in argv:
parsed = parser.parse_args(['-h'])
args, unknown = parser.parse_known_args()
def runs_in_window(time_window=1):
def counter(a,b):
counter.c += 1
counter.c = 0
bt.fuzz(counter, quiet=True, seconds=time_window)
return counter.c
if args.benchmark:
print('running benchmarks now')
for i in [1,3,6,15,30,60]:
print('{} seconds {} tests'.format(i,runs_in_window(i)))
if args.fuzz:
for i in bt.multiprocess_garbage():
try:
print(i)
except:
pass
if args.test:
print('running battle_tested.run_tests')
bt.run_tests()
|
{"/battle_tested/beta/input_type_combos.py": ["/battle_tested/beta/ammo.py"], "/generate-db-from-beta.py": ["/battle_tested/beta/__init__.py"], "/battle_tested/__main__.py": ["/battle_tested/__init__.py"], "/battle_tested/beta/fuzz_planner.py": ["/battle_tested/beta/input_type_combos.py"], "/battle_tested/beta/__init__.py": ["/battle_tested/beta/api.py"], "/battle_tested/beta/runner.py": ["/battle_tested/beta/mutators.py", "/battle_tested/beta/ammo.py", "/battle_tested/beta/easy_street.py", "/battle_tested/beta/FuzzResult.py", "/battle_tested/beta/function_arg_count.py", "/battle_tested/beta/input_type_combos.py"], "/battle_tested/beta/api.py": ["/battle_tested/beta/function_arg_count.py", "/battle_tested/beta/FuzzResult.py", "/battle_tested/beta/runner.py"]}
|
40,095
|
CodyKochmann/battle_tested
|
refs/heads/master
|
/battle_tested/beta/fuzz_planner.py
|
from battle_tested.beta.input_type_combos import input_type_combos
|
{"/battle_tested/beta/input_type_combos.py": ["/battle_tested/beta/ammo.py"], "/generate-db-from-beta.py": ["/battle_tested/beta/__init__.py"], "/battle_tested/__main__.py": ["/battle_tested/__init__.py"], "/battle_tested/beta/fuzz_planner.py": ["/battle_tested/beta/input_type_combos.py"], "/battle_tested/beta/__init__.py": ["/battle_tested/beta/api.py"], "/battle_tested/beta/runner.py": ["/battle_tested/beta/mutators.py", "/battle_tested/beta/ammo.py", "/battle_tested/beta/easy_street.py", "/battle_tested/beta/FuzzResult.py", "/battle_tested/beta/function_arg_count.py", "/battle_tested/beta/input_type_combos.py"], "/battle_tested/beta/api.py": ["/battle_tested/beta/function_arg_count.py", "/battle_tested/beta/FuzzResult.py", "/battle_tested/beta/runner.py"]}
|
40,096
|
CodyKochmann/battle_tested
|
refs/heads/master
|
/battle_tested/beta/__init__.py
|
''' dedicated namespace for the new beta :) '''
from battle_tested.beta.api import fuzz
|
{"/battle_tested/beta/input_type_combos.py": ["/battle_tested/beta/ammo.py"], "/generate-db-from-beta.py": ["/battle_tested/beta/__init__.py"], "/battle_tested/__main__.py": ["/battle_tested/__init__.py"], "/battle_tested/beta/fuzz_planner.py": ["/battle_tested/beta/input_type_combos.py"], "/battle_tested/beta/__init__.py": ["/battle_tested/beta/api.py"], "/battle_tested/beta/runner.py": ["/battle_tested/beta/mutators.py", "/battle_tested/beta/ammo.py", "/battle_tested/beta/easy_street.py", "/battle_tested/beta/FuzzResult.py", "/battle_tested/beta/function_arg_count.py", "/battle_tested/beta/input_type_combos.py"], "/battle_tested/beta/api.py": ["/battle_tested/beta/function_arg_count.py", "/battle_tested/beta/FuzzResult.py", "/battle_tested/beta/runner.py"]}
|
40,097
|
CodyKochmann/battle_tested
|
refs/heads/master
|
/battle_tested/beta/runner.py
|
from itertools import cycle, chain, product
from collections import defaultdict, deque
from functools import partial
from contextlib import contextmanager
from pprint import pprint
from time import time
import gc
from battle_tested.beta.mutators import mutate
from battle_tested.beta.ammo import infinite_gc_ammo, standard
from battle_tested.beta.easy_street import easy_street
from battle_tested.beta.FuzzResult import FuzzResult
from battle_tested.beta.function_arg_count import function_arg_count
from battle_tested.beta.input_type_combos import input_type_combos
def fuzz_generator(input_type):
''' this is the core function that creates test inputs to fuzz with.
simply give it a type you want and it will use the mutation libraries
to generate a little chaos for you to use.
'''
assert isinstance(input_type, type), input_type
#for i in infinite_gc_ammo():
# yield from mutate(i, input_type)
pipes = [infinite_gc_ammo(), easy_street.garbage()]
pipes = chain.from_iterable(chain.from_iterable(cycle(product(pipes, repeat=len(pipes)))))
standard_types = standard.types
for i in pipes:
if type(i) in standard.types:
yield from mutate(i, input_type)
def runner(fn, input_types):
''' this contains the logic for fuzzing a single type combo against a function
to make it so you can balance which type combos will recieve more or less
processing time purely through python generator logic/stepping
'''
fuzz_generators = map(fuzz_generator, input_types)
for args in zip(*fuzz_generators):
try:
yield input_types, True, args, fn(*args)
except Exception as ex:
yield input_types, False, args, ex
def fuzz_test(fn, input_type_combinations):
''' this is the money shot of where the actual fuzzing is ran. '''
# generate a "runner" for every input type combo
test_runners = cycle(runner(fn, combo) for combo in input_type_combinations)
# this data structure contains all of the information of the fuzz test
result_map = defaultdict(
lambda: { # each type combo gets its own dict
True: deque(maxlen=16), # successful runs get stored as (args, output)
False: defaultdict( # each unique exception gets its own dict
# exceptions are uniqued by the hash of the tuple of:
# (exception_type, exception_args)
lambda: deque(maxlen=16) # arguments that caused each crash are stored in tuples here
)
}
)
# a simplified view of what this objects materialized form looks like is:
#
#{
# (int, int): {
# True: [
# ([3, 5], 8),
# ([1, 1], 2),
# ([9, 15], 24),
# ],
# False: {}
# }
# (set, str): {
# True: [],
# False: {
# (<class 'TypeError'>, ('can only concatenate str (not "set") to str',)): [
# ({'yolo', {2, 4, 6}})
# ]
# }
# }
#}
for tr in test_runners:
input_types, success, args, output = next(tr)
if success:
# store the input, output pair in the successful runs
result_map[input_types][success].append((args, output))
else:
# store the arguments in the deque for that specific exception
result_map[input_types][success][(type(output), output.args)].append(args)
# return the current state of the result map to let external logic
# decide if its necessary to continue fuzzing.
yield result_map
def quick_show_result(result):
''' converts an optimized result object into something a little cleaner
and pprints the simplified version.
This will become a method of FuzzResult later.
'''
assert isinstance(result, dict), type(result)
pprint({k:{vk:list(vv) for vk, vv in v.items()} for k,v in result.items()})
@contextmanager
def no_gc():
gc.disable()
try:
yield
finally:
gc.enable()
gc.collect(2)
def run_fuzz(fn,
*, # force settings to be kv pairs
max_tests=100_000,
seconds=6, # note: might be solvable with generators.timed_pipeline
input_types=tuple(),
exit_on_first_crash=False,
allow=tuple(),
verbosity=1):
with no_gc():
start_time = time()
input_types = tuple(
input_type_combos(
input_types if input_types else standard.types,
function_arg_count(fn)
)
)
result = None
pipe = fuzz_test(fn, input_types)
update_interval = int(max_tests/100)
if verbosity <= 1:
for i, v in zip(range(max_tests), pipe):
if not i % update_interval:
print(f'{i} / {max_tests}')
print(FuzzResult(v))
result = v
else:
for i, v in zip(range(max_tests), pipe):
print(i)
#quick_show_result(v)
if not i % update_interval:
print(f'{i} / {max_tests}')
print(FuzzResult(v))
result = v
duration = time() - start_time
print(f'fuzz duration for {fn}: {duration}s or {max_tests/duration} per sec')
return result
def main():
''' runs the fuzzer components through the basic movements to show how all
the components would run in a primary fuzz() function
'''
fn = lambda a, b: a + b
result = None
#for i in fuzz_test(fn, tuple(product(standard.types, repeat=2))):
for i, v in zip(range(100_000), fuzz_test(fn, tuple(product(standard.types, repeat=2)))):
#pass
if i%1000 == 0:
print(i, 100_000)
if i%10_000 == 0:
print('-')
print(FuzzResult(v))
result = v
return result
if __name__ == '__main__':
main()
|
{"/battle_tested/beta/input_type_combos.py": ["/battle_tested/beta/ammo.py"], "/generate-db-from-beta.py": ["/battle_tested/beta/__init__.py"], "/battle_tested/__main__.py": ["/battle_tested/__init__.py"], "/battle_tested/beta/fuzz_planner.py": ["/battle_tested/beta/input_type_combos.py"], "/battle_tested/beta/__init__.py": ["/battle_tested/beta/api.py"], "/battle_tested/beta/runner.py": ["/battle_tested/beta/mutators.py", "/battle_tested/beta/ammo.py", "/battle_tested/beta/easy_street.py", "/battle_tested/beta/FuzzResult.py", "/battle_tested/beta/function_arg_count.py", "/battle_tested/beta/input_type_combos.py"], "/battle_tested/beta/api.py": ["/battle_tested/beta/function_arg_count.py", "/battle_tested/beta/FuzzResult.py", "/battle_tested/beta/runner.py"]}
|
40,098
|
CodyKochmann/battle_tested
|
refs/heads/master
|
/battle_tested/beta/easy_street.py
|
from itertools import product, cycle, islice
from random import shuffle
from re import findall
from string import ascii_letters, digits
import generators as gen
class easy_street:
''' This is a namespace for high speed test generation of various types '''
@staticmethod
def chars():
test_chars = ascii_letters + digits
for _ in gen.loop():
for combination in product(test_chars, repeat=4):
for i in combination:
yield i
@staticmethod
def strings():
test_strings = [
'',
'exit("######## WARNING this code is executing strings blindly ########")'
]
# this snippet rips out every word from doc strings
test_strings += list(set(findall(
r'[a-zA-Z\_]{1,}',
[v.__doc__ for v in globals().values() if hasattr(v, '__doc__')].__repr__()
)))
for _ in gen.loop():
for combination in product(test_strings, repeat=4):
for i in combination:
yield i
@staticmethod
def bools():
booleans = (True, False)
for _ in gen.loop():
for combination in product(booleans, repeat=4):
for i in combination:
yield i
@staticmethod
def ints():
numbers = tuple(range(-33,65))
for _ in gen.loop():
for combination in product(numbers, repeat=3):
for i in combination:
yield i
@staticmethod
def floats():
non_zero_ints = (i for i in easy_street.ints() if i != 0)
stream1 = gen.chain(i[:8] for i in gen.chunks(non_zero_ints, 10))
stream2 = gen.chain(i[:8] for i in gen.chunks(non_zero_ints, 12))
for i in stream1:
yield next(stream2)/(1.0*i)
@staticmethod
def lists():
strategies = easy_street.strings(), easy_street.ints(), easy_street.floats(), easy_street.bools()
strategies = list(gen.chain(product(strategies, repeat=len(strategies))))
lengths = cycle(list(range(0, 21)))
for _ in gen.loop():
for length in lengths:
for strat in strategies:
yield [st for st in islice(strat, length)]
@staticmethod
def tuples():
for i in easy_street.lists():
yield tuple(i)
@staticmethod
def dicts():
strategies = easy_street.strings(), easy_street.ints(), easy_street.floats(), easy_street.bools()
strategies = list(gen.chain(product(strategies, repeat=len(strategies))))
lengths = cycle(list(range(1, 21)))
for _ in gen.loop():
for length in lengths:
for strat in strategies:
yield { k:v for k,v in gen.chunks(islice(strat,length*2), 2) }
@staticmethod
def sets():
strategies = easy_street.strings(), easy_street.ints(), easy_street.floats(), easy_street.bools()
strategies = list(gen.chain(product(strategies, repeat=len(strategies))))
lengths = cycle(list(range(0, 21)))
for _ in gen.loop():
for length in lengths:
for strat in strategies:
yield {i for i in islice(strat, length)}
@staticmethod
def garbage():
strategies = [
easy_street.strings(),
easy_street.ints(),
easy_street.floats(),
easy_street.bools(),
easy_street.dicts(),
easy_street.sets(),
easy_street.lists(),
easy_street.tuples()
]
while 1:
shuffle(strategies)
for strat in gen.chain(product(strategies, repeat=len(strategies))):
yield next(strat)
if __name__ == '__main__':
for a,b in zip(range(1000000), easy_street.garbage()):
pass #print(a,b)
|
{"/battle_tested/beta/input_type_combos.py": ["/battle_tested/beta/ammo.py"], "/generate-db-from-beta.py": ["/battle_tested/beta/__init__.py"], "/battle_tested/__main__.py": ["/battle_tested/__init__.py"], "/battle_tested/beta/fuzz_planner.py": ["/battle_tested/beta/input_type_combos.py"], "/battle_tested/beta/__init__.py": ["/battle_tested/beta/api.py"], "/battle_tested/beta/runner.py": ["/battle_tested/beta/mutators.py", "/battle_tested/beta/ammo.py", "/battle_tested/beta/easy_street.py", "/battle_tested/beta/FuzzResult.py", "/battle_tested/beta/function_arg_count.py", "/battle_tested/beta/input_type_combos.py"], "/battle_tested/beta/api.py": ["/battle_tested/beta/function_arg_count.py", "/battle_tested/beta/FuzzResult.py", "/battle_tested/beta/runner.py"]}
|
40,099
|
CodyKochmann/battle_tested
|
refs/heads/master
|
/commit-update.py
|
from os import popen, chdir
from datetime import datetime
chdir("/Users/cody/git/battle_tested/")
def bash(command, check=True):
if check:
raw_input('about to run: {} [enter to continue]'.format(command))
return popen(command).read()
def list_tags():
''' returns a list of the tags in the repo '''
return bash('git tag -l', False).strip().split('\n')
def latest_tag():
""" returns the latest tag used """
out = ''
tags_all_sorted = sorted(list_tags())
if tags_all_sorted is not None:
out = tags_all_sorted[-1]
return out
def create_next_tag():
""" creates a tag based on the date and previous tags """
date = datetime.utcnow()
date_tag = '{}.{}.{}'.format(date.year, date.month, date.day)
if date_tag in latest_tag(): # if there was an update already today
latest = latest_tag().split('.') # split by spaces
if len(latest) == 4: # if it is not the first revision of the day
latest[-1]= str(int(latest[-1])+1)
else: # if it is the first revision of the day
latest+=['1']
date_tag = '.'.join(latest)
return date_tag
def replace_all_in_file(file_path, old, new):
with open(file_path, 'r') as reader:
file_text = reader.read()
file_text = file_text.replace(old, new)
with open(file_path, 'w') as writer:
writer.write(file_text)
def update_setup():
args = 'setup.py', latest_tag(), create_next_tag()
raw_input("about to replace {1:} with {2:} in {0:}".format(*args))
replace_all_in_file(*args)
def sync_readmes():
""" just copies README.md into README for pypi documentation """
print("syncing README")
with open("README.md", 'r') as reader:
file_text = reader.read()
with open("README", 'w') as writer:
writer.write(file_text)
# make this part automated later, Im tired...
update_setup()
sync_readmes()
commit_message = raw_input('Enter your commit message: ')
bash("git status", False)
bash('git add .')
bash("git commit -m '{}'".format(commit_message))
bash("git push origin master")
bash("git tag {} -m '{}'".format(create_next_tag(), commit_message))
bash("git push --tags origin master")
bash("git status", False)
#bash("python setup.py register -r pypitest")
bash("python setup.py sdist upload -r pypitest")
#bash("python setup.py register -r pypi")
bash("python setup.py sdist upload -r pypi")
|
{"/battle_tested/beta/input_type_combos.py": ["/battle_tested/beta/ammo.py"], "/generate-db-from-beta.py": ["/battle_tested/beta/__init__.py"], "/battle_tested/__main__.py": ["/battle_tested/__init__.py"], "/battle_tested/beta/fuzz_planner.py": ["/battle_tested/beta/input_type_combos.py"], "/battle_tested/beta/__init__.py": ["/battle_tested/beta/api.py"], "/battle_tested/beta/runner.py": ["/battle_tested/beta/mutators.py", "/battle_tested/beta/ammo.py", "/battle_tested/beta/easy_street.py", "/battle_tested/beta/FuzzResult.py", "/battle_tested/beta/function_arg_count.py", "/battle_tested/beta/input_type_combos.py"], "/battle_tested/beta/api.py": ["/battle_tested/beta/function_arg_count.py", "/battle_tested/beta/FuzzResult.py", "/battle_tested/beta/runner.py"]}
|
40,100
|
CodyKochmann/battle_tested
|
refs/heads/master
|
/battle_tested/beta/api.py
|
# -*- coding: utf-8 -*-
# @Author: Cody Kochmann
# @Date: 2019-04-28 10:43:15
# @Last Modified by: Cody Kochmann
# @Last Modified time: 2019-04-30 08:47:31
from typing import Callable
from functools import partial
from battle_tested.beta.function_arg_count import function_arg_count
from battle_tested.beta.FuzzResult import FuzzResult, FuzzResultDB
from battle_tested.beta.runner import run_fuzz
# how the previous api worked
#
# fn, seconds=6, max_tests=1000000000, verbose=False, keep_testing=True, quiet=False, allow=(), strategy=garbage
#
def _verify_function(fn):
""" asserts that the input is a function """
if not callable(fn):
raise TypeError('battle_tested needs a callable function, not {0}'.format(repr(fn)))
args = function_arg_count(fn)
if not isinstance(args, int):
raise TypeError('expected an int, not {0}'.format(args))
if args <= 0:
raise ValueError('battle_tested cannot fuzz a function that accepts "{0}" args'.format(args))
def _verify_max_tests(_max_tests):
""" asserts that max_tests is valid """
assert type(_max_tests) == int, 'battle_tested needs max_tests to be an int, not {0}'.format(repr(_max_tests))
assert _max_tests > 0, 'battle_tested needs max_tests to be a positive int, not {0}'.format(repr(_max_tests))
def _verify_seconds(_seconds):
assert type(_seconds) == int, 'battle_tested needs seconds to be an int, not {0}'.format(repr(_seconds))
assert _seconds > 0, 'battle_tested needs seconds to be a positive int, not {0}'.format(repr(_seconds))
def _is_tuple_of_types(types):
assert isinstance(types, tuple), types
return all(isinstance(i, type) for i in types)
def _is_type_or_tuple_of_types(t):
assert isinstance(t, (tuple, type))
return isinstance(t, type) or _is_tuple_of_types(t)
def _is_nested_tuple_of_types(types):
assert isinstance(types, tuple), types
return all(_is_type_or_tuple_of_types(i) for i in types)
def _verify_input_types(_input_types):
assert type(_input_types) == tuple, 'battle_tested needs seconds to be an tuple, not {0}'.format(repr(_input_types))
assert _is_tuple_of_types(_input_types
) or _is_nested_tuple_of_types(_input_types
), 'battle_tested needs input_types to be a tuple of types or a tuple multiple types, not {0}'.format(repr(_input_types))
def _verify_exit_on_first_crash(_exit_on_first_crash):
""" ensures exit_on_first_crash is a valid argument """
assert type(_exit_on_first_crash) == bool, 'exit_on_first_crash needs to be a bool, not {0}'.format(repr(_exit_on_first_crash))
assert _exit_on_first_crash in {True, False}, 'invalid value for exit_on_first_crash: {0}'.format(repr(_exit_on_first_crash))
def _verify_allow(_allow):
""" ensures allow is a valid argument """
assert type(_allow) == tuple, 'battle_tested needs allow to be a tuple, not {0}'.format(repr(_allow))
for ex in _allow:
assert issubclass(ex, BaseException), 'allow only accepts exceptions as its members, found: {0}'.format(ex)
def _verify_verbosity(_verbosity):
""" asserts that verbosity setting is valid """
assert type(_verbosity) == int, 'battle_tested needs verbosity to be a int, not {}'.format(repr(_verbosity))
# do not reformat the following assertion
assert _verbosity in {
0: "quiet", 1: "normal", 2: "extreme"
}, 'battle_tested needs verbosity to be 0, 1, or 2. - {}'.format(_verbosity)
def _verify_input_types_fits_function(fn, input_types):
_verify_function(fn)
_verify_input_types(input_types)
assert len(input_types) == function_arg_count(fn), 'battle_tested needs input_types to be the same length as the arg count of {}, found {} not {}'.format(fn, len(input_types), function_arg_count(fn))
def _verify_fuzz_settings(*, fn=None, max_tests=None, seconds=None, input_types=None, exit_on_first_crash=None, allow=None, verbosity=None):
_verify_function(fn)
_verify_max_tests(max_tests)
_verify_seconds(seconds)
_verify_input_types(input_types)
_verify_exit_on_first_crash(exit_on_first_crash)
_verify_allow(allow)
_verify_verbosity(verbosity)
if len(input_types):
_verify_input_types_fits_function(fn, input_types)
# this is the primary usage and supports "fuzz(fn)" syntax
def _fuzz_decorator(
fn: Callable,
*, # force settings to be kv pairs
max_tests=1000000000,
seconds=6,
input_types=tuple(),
exit_on_first_crash=False,
allow=tuple(),
verbosity=1):
_verify_fuzz_settings(**locals())
# placeholder for now. decorators basically queue a fuzz for multiprocessing madness
#return QueuedFuzzResult(locals())
return fn
def _fuzz_function(
fn: Callable,
*, # force settings to be kv pairs
max_tests=1000000000,
seconds=6,
input_types=tuple(),
exit_on_first_crash=False,
allow=tuple(),
verbosity=1):
_verify_fuzz_settings(**locals())
return FuzzResultDB(fuzz_target=fn, fuzz_result=run_fuzz(**locals()))
# this is for the "@fuzz()" decorator syntax, to allow users to input settings
def fuzz( fn=None,
*, # force settings to be kv pairs
max_tests=100_000,
seconds=6,
input_types=tuple(),
exit_on_first_crash=False,
allow=tuple(),
verbosity=1):
if fn is None:
return partial(_fuzz_decorator, **{k:v for k,v in locals().items() if k != 'fn'})
else:
return _fuzz_function(**locals())
if __name__ == '__main__':
def my_adder(a, b):
return a + b
# test a short run to get most of the flow tested quickly
result = fuzz(my_adder, max_tests=4096)
print(result)
assert isinstance(result, FuzzResultDB)
# uncomment to get right to debugging
#result = fuzz(my_adder, verbosity=2)
# test multiple runs
result = fuzz(my_adder)
print(result)
assert isinstance(result, FuzzResultDB)
result = fuzz(my_adder)
print(result)
assert isinstance(result, FuzzResultDB)
result = fuzz(my_adder)
print(result)
assert isinstance(result, FuzzResultDB)
# test various settings
# test seconds
# not implemented yet
#result = fuzz(my_adder, seconds=3)
#print(result)
#assert isinstance(result, FuzzResultDB)
result = fuzz(my_adder, input_types=(int, str))
print(result)
assert isinstance(result, FuzzResultDB)
result = fuzz(my_adder, input_types=((int, str), (bool, bool)))
print(result)
assert isinstance(result, FuzzResultDB)
result = fuzz(my_adder, input_types=(int, (list, float, bool)))
print(result)
assert isinstance(result, FuzzResultDB)
# not implemented yet
#result = fuzz(my_adder, exit_on_first_crash=True)
#print(result)
#assert isinstance(result, FuzzResultDB)
#result = fuzz(my_adder, allow=(AssertionError,))
#print(result)
#assert isinstance(result, FuzzResultDB)
# only uncomment if you wanna REALLY see some logs
#result = fuzz(my_adder, verbosity=2)
#print(result)
#assert isinstance(result, FuzzResultDB)
# test decorator syntax
@fuzz(seconds=3)
def my_adder_1(a, b):
return a + b
assert callable(my_adder_1)
assert my_adder_1.__name__ == 'my_adder_1'
@fuzz(input_types=(int, str))
def my_adder_2(a, b):
return a + b
assert callable(my_adder_2)
assert my_adder_2.__name__ == 'my_adder_2'
@fuzz(input_types=((int, str), (bool, bool)))
def my_adder_3(a, b):
return a + b
assert callable(my_adder_3)
assert my_adder_3.__name__ == 'my_adder_3'
@fuzz(input_types=(int, (list, float, bool)))
def my_adder_4(a, b):
return a + b
assert callable(my_adder_4)
assert my_adder_4.__name__ == 'my_adder_4'
@fuzz(exit_on_first_crash=True)
def my_adder_5(a, b):
return a + b
assert callable(my_adder_5)
assert my_adder_5.__name__ == 'my_adder_5'
@fuzz(allow=(AssertionError,))
def my_adder_6(a, b):
return a + b
assert callable(my_adder_6)
assert my_adder_6.__name__ == 'my_adder_6'
@fuzz(verbosity=2)
def my_adder_7(a, b):
return a + b
assert callable(my_adder_7)
assert my_adder_7.__name__ == 'my_adder_7'
print('success')
|
{"/battle_tested/beta/input_type_combos.py": ["/battle_tested/beta/ammo.py"], "/generate-db-from-beta.py": ["/battle_tested/beta/__init__.py"], "/battle_tested/__main__.py": ["/battle_tested/__init__.py"], "/battle_tested/beta/fuzz_planner.py": ["/battle_tested/beta/input_type_combos.py"], "/battle_tested/beta/__init__.py": ["/battle_tested/beta/api.py"], "/battle_tested/beta/runner.py": ["/battle_tested/beta/mutators.py", "/battle_tested/beta/ammo.py", "/battle_tested/beta/easy_street.py", "/battle_tested/beta/FuzzResult.py", "/battle_tested/beta/function_arg_count.py", "/battle_tested/beta/input_type_combos.py"], "/battle_tested/beta/api.py": ["/battle_tested/beta/function_arg_count.py", "/battle_tested/beta/FuzzResult.py", "/battle_tested/beta/runner.py"]}
|
40,101
|
TrevorDemille/Simple-Analysis-App
|
refs/heads/master
|
/HelpfulFunctions.py
|
import numpy as np
import matplotlib.pyplot as plt
import glob
import os
from pH import *
from Parser import *
#
#Written by Trevor Demille, Summer 2016, Goldner Biophysics Group
"""
List of functions necessary/helpful in the analysis of Fluorimeter and Dynamic Light Scattering data.
Data is all taken to either be in the CSV or TXT format.
Some conditions on the plotting of data such as the concentrations, iterations, and legends must be maually
edited in the code here.
A copy of this file as well as the Parser.py an pH.py python files must be saved in the program files directory where
the master storage for local python libraries is kept. (ie C_drive-->Program Files-->Python2.7)
"""
#Function to import and parse out data from text files
def loadData(fileName, fileNum):
#fileName must be of the format: "directory/subdirectory/*.txt" where .txt is the matching
#glob is going to find and from which it will choose which files to import. This must be the ending of a filename,
#not some random part of the filename, and fileNum is the number of files in the directory
path1 = 'C:/Users/tdemille/Desktop/UMA/Sublime/Python/Text Files for pH'
inputPath = os.path.join(path1, fileName)
#
all_txts = glob.iglob(inputPath)
print(all_txts)
#
columnFiles = [None] * fileNum
counter = 0 #My poor solution to iterating unevenly is counter variables. Ehh...
for dataFile in all_txts:
data = np.loadtxt(dataFile)
#
columnFiles[counter] = data
counter=counter+1
return columnFiles
def solvepH(R,S):
#For finding individual pH's based on individual inputs of ratio and std dev
print(get_pH(R,S))
#
self.Rentry.delete(0,'end')
self.Sentry.delete(0,'end')
def parseFile(Dir, File):
#Parse a CSV file of the full time, intensity, and background data and save as seperate csv files of the desired bkg and intensities
#for each individually exported group of data, not the session as a whole (D1, D2, etc)
Parser(Dir, File)
#
def Indiv_Ratios(D1file,D2file,bkg1,bkg2,cNum,legd,titl):
#Assign the lengths of columns and rows to a value
Dcols = len(D1file[0,:])
Drows = len(D1file[:,0])-1 #rows are always over counted by 1 due to 0-indexing
rTotal = Drows*10
bkgCols = len(bkg1[0,:])
#Set up matrices to store value solved for in the for loop
D1bkg = [None] * bkgCols
D2bkg = [None] * bkgCols
#Take means of background file columns, as there are half the number of background measurements
#as there are fluorescein intensity measurements. This is just each column's mean value
for jj in range(bkgCols):
D1bkg[jj] = np.mean(bkg1[:,jj], dtype=np.float64)
D2bkg[jj] = np.mean(bkg2[:,jj], dtype=np.float64)
#Background files must be resized so that every 5 values (assigned to each concentration) can be taken for a mean
D1bkg = np.array(D1bkg)
D1meanBkg = np.mean(D1bkg.reshape(-1,5), axis=1)
D2bkg = np.array(D2bkg)
D2meanBkg = np.mean(D2bkg.reshape(-1,5), axis=1)
#Set up counters so the loop ahead can keep all the indexes on track, and set up empty matrices
ratTot = Dcols*Drows
ratios = np.array([])
D1use = np.array([])
D2use = np.array([])
bkgIndex = 0
count = 1
countr = 0
countT = 0
#
#Loop to subtract the background from D1 & D2, and to solve for ratios of each concentration mean.
for cc in range(Dcols):
if count % 10 == 1:
bkgIndex = bkgIndex+1
for rr in range(Drows):
D1use = np.append(D1use, (D1file[rr,cc]-D1meanBkg[bkgIndex-1]))
D2use = np.append(D2use, (D2file[rr,cc]-D2meanBkg[bkgIndex-1]))
result = (D1use[countT+countr]-D2use[countT+countr]) / (D1use[countT+countr]+D2use[countT+countr])
ratios = np.append(ratios, result)
#
countr = countr+1
countT = cc*Drows
countr = 0
count = count+1
#Split up the ratio values by concentration
val2 = 1
sub_RatList = [None]*cNum
sub_RatRange = [None]*cNum
ratList = [None]*cNum
ratRange = [None]*cNum
for tt in range(cNum):
sub_RatList[tt] = ratios[(tt*rTotal+1):(val2*rTotal)]
sub_RatRange[tt] = xrange((tt*rTotal+1),(val2*rTotal))
ratList[tt] = sub_RatList[tt]
ratRange[tt] = sub_RatRange[tt]
val2=val2+1
#plot everything up individually such that the colors can be changed.
#The legend has to be manually altered as the concentrations change from measurement to measurement
colorInd = ['ro','bo','ko','mo','yo','go','co','ro','bo','ko','mo','yo','go','co','ro','bo','ko']
f, fig1 = plt.subplots()
for hh in range(cNum):
fig1.plot(ratRange[hh],ratList[hh],colorInd[hh])
#fig1.plot(R7,rat7,'yo')
fig1.set_xlabel('Index', fontsize=15)
fig1.set_ylabel('Ratio', fontsize=15)
fig1.set_title(titl, fontsize=18)
#fig1.legend(['1:100','1:200','1:300','1:400'],numpoints=1,loc=3,frameon=False)
fig1.legend(['0M','1uM','30uM','100uM','300uM','1mM','3mM','5mM','7.5mM','10mM','15mM','30mM','100mM','200mM'], numpoints=1, loc=4, frameon=False)
def Mean_Ratios(D1file,D2file,bkg1,bkg2,cNum,legd,titl1,titl2):
#Function to find the means of all the D1 and D2 intensity data found by measuring the 514nm and 550nm
#emission of fluorescein. This data is taken as 10 sets of 16 measurements for each concentration of surfactant.
#The mean of each 16 measurements is taken, and the std dev found. the mean of these 10 means is then taken, and
#its stdev is found.
Dcols = len(D1file[0,:])
Drows = len(D1file[:,0])-1
bkgCols = len(bkg1[0,:])
#Set up matrices to store data in loop. This isn't the most eficient way, but it works for now.
D1meanList = [None] * Dcols
D2meanList = [None] * Dcols
D1stdList = [None] * Dcols
D2stdList = [None] * Dcols
D1bkg = [None] * bkgCols
D2bkg = [None] * bkgCols
#Loop to take means and std dev of each column of intensity data
for i in range(Dcols):
D1meanList[i] = np.mean(D1file[:,i], dtype=np.float64)
D2meanList[i] = np.mean(D2file[:,i], dtype=np.float64)
D1stdList[i] = np.std(D1file[:,i])
D2stdList[i] = np.std(D2file[:,i])
#Loop to take mean of background data
for k in range(bkgCols):
D1bkg[k] = np.mean(bkg1[:,k], dtype=np.float64)
D2bkg[k] = np.mean(bkg2[:,k], dtype=np.float64)
#I need to take the mean of the first 10 values, then the next 5, then the next 10 and so on, so I must reshape the array
#by first making them arrays and then spliting them up into fives where the first 2 sets of 5 are intensity data, and
#every third set of five values is the corresponding background
D1bkg = np.array(D1bkg)
D1meanbkg = np.mean(D1bkg.reshape(-1,5), axis=1)
D2bkg = np.array(D2bkg)
D2meanbkg = np.mean(D2bkg.reshape(-1,5), axis=1)
D1meanList = np.array(D1meanList)
D1mean = np.mean(D1meanList.reshape(-1,10), axis=1)
D2meanList = np.array(D2meanList)
D2mean = np.mean(D2meanList.reshape(-1,10), axis=1)
D1stdList = np.array(D1stdList)
D1std = np.mean(D1stdList.reshape(-1,10), axis=1)
D2stdList = np.array(D2stdList)
D2std = np.mean(D2stdList.reshape(-1,10), axis=1)
#Correct intensity data for the background and add the std devs in quadriture
CorD1 = D1mean-D1meanbkg
CorD2 = D2mean-D2meanbkg
D1sqr = np.power(D1std,2)
D2sqr = np.power(D2std,2)
DstdAdd = np.sqrt(D1sqr+D2sqr)
#More matrices
DstdRat = [None] * cNum
ratio = [None] * cNum
topE = [None] * cNum
botE = [None] * cNum
#Loop to find the ratio and its errorbars above and below based on the number of iterations or solute concentrations (Cnum)
for j in range(cNum):
ratio[j] = (CorD1[j]-CorD2[j]) / (CorD1[j]+CorD2[j])
topE[j] = np.power((DstdAdd[j] / (CorD1[j]+CorD2[j])),2)
botE[j] = np.power((DstdAdd[j] / (CorD1[j]-CorD2[j])),2)
DstdRat[j] = np.sqrt(topE[j] + botE[j])*abs(ratio[j])
print('\n')
print('Ratios\n')
print(ratio)
print('\n')
print('Standard Deviations\n')
print(DstdRat)
print('\n')
#
R = len(ratio)
pHresults = [None] * R
devR = [None] * R
devL = [None] * R
#Loop to use the get_pH script written by Kieran to find the probabilistic pH and save outputs as printable strings
#Errorstate gets rid of inevitable errors which accompany values not supported by the ratio curve found in the calibration
for kk in range(R):
with np.errstate(divide='ignore', invalid='ignore'):
result = get_pH(ratio[kk],DstdRat[kk],plot=False)
pHresults[kk] = result[0]
devL[kk] = result[1]
devR[kk] = result[2]
#These are to be changed each time new data is taken and used to reflect the concentrations and spot check values
concList = [0.00001,0.001,0.01,0.03,0.1,0.3,1,3,5,7,10,30,100,200,0.00001,0.001,0.03,0.1,0.3,1,3,5,7.5,10,15,30,100,200] #29 concentrations
#concPlot = [0.001,0.3,30]
#repResults = [results[15],results[15],results[15]]
print('\n')
print('pHs\n')
print(pHresults)
print('\n')
print('Lower pH std. deviations\n')
print(devL)
print('\n')
print('Upper pH std. deviations\n')
print(devR)
print('\n')
print('Concentrations\n')
print(concList)
print('\n')
#Set up for plotting as subplots so I can add things on top if I do spot checks later.
f, fig = plt.subplots()
plt.xscale('log')
f2, fig2 = plt.subplots()
plt.xscale('log')
#If statement for the repeating of old points to check accuracy
Repeat=False
if Repeat==True:
fig.errorbar(concPlot,ratio[14:cNum],DstdRat[14:cNum], fmt='ro', linewidth=1.5)
fig2.errorbar(concPlot,repResults,yerr=[devL[14:cNum], devR[14:cNum]], fmt='ro', linewidth=1.5)
#xScale is changed every time new data is taken. Could add to GUI at some point?
fig.errorbar(concList[0:14],ratio[0:14],DstdRat[0:14], fmt='bo', linewidth=1.5, label='Repeated Series')
fig.errorbar(concList[14:cNum],ratio[14:cNum],DstdRat[14:cNum], fmt='r^', linewidth=1.5, label='Original Series')
fig.set_xlim([0.000001,1000])
fig.set_xlabel('Concentration (%)', fontsize=15)
fig.set_ylabel('Ratio', fontsize=15)
fig.set_title(titl1, fontsize=18)
plt.grid()
#
fig2.errorbar(concList[0:14],pHresults[0:14],yerr=[devL[0:14], devR[0:14]], fmt='k^', linewidth=1.5, label='Repeated Series')
fig2.errorbar(concList[14:cNum],pHresults[14:cNum],yerr=[devL[14:cNum], devR[14:cNum]], fmt='r^', linewidth=1.5, label='Original Series')
fig2.set_xlabel('Concentration (%)', fontsize=15)
fig2.set_ylabel('pH', fontsize=15)
fig2.set_xlim([0.000001,1000])
fig2.set_title(titl2, fontsize=18)
#plt.grid()
#
#if legd != '': fig.legend(numpoints=1)
fig.legend(numpoints=1, loc=2)
#fig2.legend(numpoints=1, loc=2)
def plot_Gen(e1,e2,e3,e4,e5,e6,e7,e8,L2,eColor):
#Function to plot general data given a text file of columns
datalist = np.loadtxt(e1)
rowTot = len(datalist[:,0])
#Set up plots
fig, ax0 = plt.subplots()
#Assign each column assuming a certain organization to x, y, and std dev
xCol = datalist[:,e2]
yCol = datalist[:,e3]
stdCol = datalist[:,e4]
#If statements to decide if color should be changed mid way through the columns to signify some change in condition of the data
if eColor != '':
if e4 != '':
ax0.errorbar(xCol[0:eColor],yCol[0:eColor],stdCol[0:eColor],fmt='k^')
ax0.errorbar(xCol[eColor:rowTot],yCol[eColor:rowTot],stdCol[eColor:rowTot],fmt='ko')
else:
ax0.plot(xCol,yCol,'k^')
ax0.plot(xCol,yCol,'ko')
else:
if e4 != '':
stdCol = datalist[:,e4]
ax0.errorbar(xCol,yCol,stdCol,fmt='k^',linewidth=1.5)
else:
ax0.plot(xCol,yCol,'bo')
#
if e5 != '' and L2 == '':
ax0.legend([e5], numpoints=1)
if e5 != '' and L2 != '':
ax0.legend([e5,L2], numpoints=1)
ax0.set_xlabel(e6, fontsize=15)
ax0.set_ylabel(e7, fontsize=15)
ax0.set_title(e8, fontsize=18)
def mean_Gen(meanFile,Leg,xLab,yLab,Titl):
meanData = np.loadtxt(meanFile)
cols = len(meanData[0,:])
rows = len(meanData[:,0])
xscale = range(1,cols+1)
#Matrices!
meanVal = [None] * cols
stdVal = [None] * cols
for i in range(cols):
meanVal[i] = np.mean([meanData[:,i]], dtype=np.float64)
stdVal[i] = np.std([meanData[:,i]])
#Concentration values subject to manual change in the code
concent = [2,1,0.5,0.1]
f, ax1 = plt.subplots()
ax1.errorbar(concent,meanVal,stdVal,fmt='ko',linewidth=1.5)
plt.xscale('log')
ax1.set_xlabel(xLab, fontsize=18)
ax1.set_ylabel(yLab, fontsize=18)
ax1.set_title(Titl, fontsize=15)
ax1.set_xlim(0.05,3)
#throws error if a blank legend is assigned occasionally.
if Leg != '':
ax1.legend([Leg], numpoints=1)
|
{"/HelpfulFunctions.py": ["/Parser.py"], "/PlotAnalysis.py": ["/Parser.py", "/HelpfulFunctions.py"]}
|
40,102
|
TrevorDemille/Simple-Analysis-App
|
refs/heads/master
|
/PlotAnalysis.py
|
import numpy as np
import matplotlib.pyplot as plt
import Parser as Parser
from pH import *
from HelpfulFunctions import *
from Tkinter import *
#
#Written by Trevor Demille, summer 2016, for Goldner Biophysics Group
"""
GUI for the purpose of expediting the data analysis process for ratio data, DLS data,
and pH to ratio calculation. This will plot results for each of the possible analyses.
Data files must be in the same directory as the PlotAnalysis.py script.
"""
class myButtons:
def __init__(self, master):
frame1 = Frame(master)
frame1.pack(side=LEFT, fill=Y)
#Labels
impLabel = Label(frame1, text='Imports', relief=SUNKEN, fg='white', bg='black')
labelfont = ('times', 15, 'bold')
buttonfont = ('times', 14)
impLabel.config(font=labelfont)
impLabel.pack(side=TOP, fill=X, expand=YES)
#Entry selection window
self.selectImports = Button(frame1, text='Import and Plot General',fg='blue',bd=3, command=self.selectImp)
self.selectImports.config(width=25, height=3, font=buttonfont)
self.selectImports.pack(side=TOP, padx=4, pady=2, anchor=W)
#
self.meanPlot = Button(frame1, text='Import and Plot Means', fg='blue',bd=3,command=self.meanImp)
self.meanPlot.config(width=25, height=3, font=buttonfont)
self.meanPlot.pack(side=TOP, padx=4, pady=2, anchor=W)
#
self.ratioBut = Button(frame1, text='Import and Plot Ratio Means',fg='blue', bd=3, command=self.plotRatioM)
self.ratioBut.config(width=25, height=3, font=buttonfont)
self.ratioBut.pack(side=TOP, padx=4, pady=2, anchor=W)
#
self.phBut = Button(frame1, text='Find pH', fg='blue', bd=3, command=self.ph_GUI)
self.phBut.config(width=25, height=3, font=buttonfont)
self.phBut.pack(side=TOP, padx=4, pady=2, anchor=W)
#
self.parsBut = Button(frame1, text='Parse CSV File', fg='blue', bd=3, command=self.parseGUI)
self.parsBut.config(width=25, height=3, font=buttonfont)
self.parsBut.pack(side=TOP, padx=4, pady=2, anchor=W)
def parseGUI(self):
root5 = Tk()
root5.title('Parse CSV File')
frameRoot = Frame(root5)
#
Label1 = Label(root5, text='Input CSV File Directory:')
Label1.grid(row=0, column=0, padx=3, sticky=W)
Label2 = Label(root5, text='Input CSV File Name:')
Label2.grid(row=1, column=0, padx=3, sticky=W)
#
self.pEntry1 = Entry(root5, bd=4)
self.pEntry1.grid(row=0, column=1)
self.pEntry2 = Entry(root5, bd=4)
self.pEntry2.grid(row=1, column=1)
#
self.pButton = Button(root5, text='Import File', fg='blue', command=self.parseIt)
self.pButton.config(width=15)
self.pButton.grid(row=1, column=2, padx=3)
def ph_GUI(self):
root4 = Tk()
root4.title('pH Calculator')
frameRoot = Frame(root4)
#
Label1 = Label(root4, text='Input Ratio:')
Label1.grid(row=0, column=0, padx=3, sticky=W)
Label2 = Label(root4, text='Input Ratio Std. Dev.:')
Label2.grid(row=1, column=0, padx=3, pady=5, sticky=W)
#
self.Rentry = Entry(root4, bd=4)
self.Rentry.grid(row=0, column=1)
self.Sentry = Entry(root4, bd=4)
self.Sentry.grid(row=1, column=1, pady=5)
#
self.impBut1 = Button(root4, text='Import Ratio Data', fg='blue', command=self.solvepH_setup)
self.impBut1.config(width=15)
self.impBut1.grid(row=1, column=2, padx=3)
def parseIt(self):
Dir = self.pEntry1.get()
File = self.pEntry2.get()
parseFile(Dir,File)
self.pEntry1.delete(0,'end')
self.pEntry2.delete(0,'end')
def solvepH_setup(self):
R = self.Rentry.get()
S = self.Sentry.get()
solvepH(R,S)
def plotRatioM(self):
#GUI setup for taking means of fluorimeter ratio emission data taken at 514 & 550 nm.
#GUI allows the input of text file titles for intensities, backgrounds, and plot axis labels
root3 = Tk()
root3.title('Intensity Ratio Data')
frameRoot = Frame(root3)
#
labelR = Label(root3, text='Input D1 File to Import:')
labelR.grid(row=0, column=0, padx=3, sticky=W)
labelD2 = Label(root3, text='Input D2 File to Import:')
labelD2.grid(row=1, column=0, padx=3, pady=5, sticky=W)
labelB1 = Label(root3, text='Input D1 bkg File:')
labelB1.grid(row=2, column=0, padx=3, pady=5, sticky=W)
labelB2 = Label(root3, text='Input D2 bkg File:')
labelB2.grid(row=3, column=0, padx=3, pady=5, sticky=W)
labelR2 = Label(root3, text='Legend Text:')
labelR2.grid(row=4, column=0, padx=3, pady=5, sticky=W)
LabelR3 = Label(root3, text='pH Plot Title:')
LabelR3.grid(row=5, column=0, padx=3, pady=5, sticky=W)
labelR4 = Label(root3, text='Ratio Plot Title:')
labelR4.grid(row=6, column=0, padx=3, pady=5, sticky=W)
labelC = Label(root3, text='Concentration Values:')
labelC.grid(row=0, column=2, padx=3, pady=5, sticky=W)
#
self.entryRD1 = Entry(root3, bd=4)
self.entryRD1.grid(row=0, column=1)
self.entryRD2 = Entry(root3, bd=4)
self.entryRD2.grid(row=1, column=1, pady=5)
self.entrybkg1 = Entry(root3, bd=4)
self.entrybkg1.grid(row=2, column=1, pady=5)
self.entrybkg2 = Entry(root3, bd=4)
self.entrybkg2.grid(row=3, column=1, pady=5)
self.entryR2 = Entry(root3, bd=4)
self.entryR2.grid(row=4, column=1, pady=5)
self.entryR3 = Entry(root3, bd=4)
self.entryR3.grid(row=5, column=1, pady=5)
self.entryR4 = Entry(root3, bd=4)
self.entryR4.grid(row=6, column=1, pady=5)
self.concNum = Spinbox(root3, from_=0, to=10, width=5, bd=3)
self.concNum.grid(row=1, column=2, padx=3)
#
self.impButR = Button(root3, text='Import for Mean', fg='blue', command=self.getRatioM)
self.impButR.config(width=15)
self.impButR.grid(row=5, column=2, padx=3)
#
#Allow for the individual ratios to be examined from each file.
self.impButR2 = Button(root3, text='Import Individual', fg='blue', command=self.getRatioI)
self.impButR2.config(width=15)
self.impButR2.grid(row=4, column=2, padx=3)
#
self.runIt = Button(root3, text='Plot Entry', command=self.runCode)
self.runIt.config(width=15)
self.runIt.grid(row=6, column=2, padx=3)
def Ratio_Entry_Delete(self):
#Clear out inputs after each function taking entries has been successfully completed.
self.entryRD1.delete(0,'end')
self.entryRD2.delete(0,'end')
self.entrybkg1.delete(0,'end')
self.entrybkg2.delete(0,'end')
self.entryR2.delete(0,'end')
self.entryR3.delete(0,'end')
self.entryR4.delete(0,'end')
def getRatioI(self):
#Load in entry information and open text files
E1 = np.loadtxt(self.entryRD1.get())
E2 = np.loadtxt(self.entryRD2.get())
E3 = np.loadtxt(self.entrybkg1.get())
E4 = np.loadtxt(self.entrybkg2.get())
E5 = self.concNum.get()
if E5 != '':
E5 = int(str(self.concNum.get()))
E6 = self.entryR2.get()
E7 = self.entryR4.get()
#
Indiv_Ratios(E1,E2,E3,E4,E5,E6,E7)
#
self.Ratio_Entry_Delete()
def getRatioM(self):
#Load in entry information and open text files
E1 = np.loadtxt(self.entryRD1.get())
E2 = np.loadtxt(self.entryRD2.get())
E3 = np.loadtxt(self.entrybkg1.get())
E4 = np.loadtxt(self.entrybkg2.get())
E5 = self.concNum.get()
if E5 != '':
E5 = int(str(self.concNum.get()))
E6 = self.entryR2.get()
E7 = self.entryR4.get()
E8 = self.entryR3.get()
#
Mean_Ratios(E1,E2,E3,E4,E5,E6,E7,E8)
#
self.Ratio_Entry_Delete()
def selectImp(self):
root1 = Tk()
root1.title('Plotting Data Imports')
frameRoot = Frame(root1)
#
label1 = Label(root1, text='Input File to Import:')
label1.grid(row=0, column=0, padx=3, sticky=W)
label2 = Label(root1, text='Column Index for X-axis Data:')
label2.grid(row=1, column=0, padx=3, pady=5, sticky=W)
label3 = Label(root1, text='Column Index for Y-axis Data:')
label3.grid(row=2, column=0, padx=3, pady=5, sticky=W)
label4 = Label(root1, text='Column Index for Std. Dev.:')
label4.grid(row=3, column=0, padx=3, pady=5, sticky=W)
labelColor = Label(root1, text='First Row Number for Second Data Set:')
labelColor.grid(row=4, column=0, padx=3, pady=5, sticky=W)
label5 = Label(root1, text='First Legend Text:')
label5.grid(row=5, column=0, padx=3, pady=5, sticky=W)
label6 = Label(root1, text='Second Legend Text:')
label6.grid(row=6, column=0, padx=3, pady=5, sticky=W)
label7 = Label(root1, text='X-axis Label:')
label7.grid(row=7, column=0, padx=3, pady=5, sticky=W)
label8 = Label(root1, text='Y-axis Label:')
label8.grid(row=8, column=0, padx=3, pady=5, sticky=W)
label9 = Label(root1, text='Plot Title:')
label9.grid(row=9, column=0, padx=3, pady=5, sticky=W)
#
self.entry1 = Entry(root1, bd=4)
self.entry1.grid(row=0, column=1)
self.entry2 = Entry(root1, bd=4)
self.entry2.grid(row=1, column=1, pady=5)
self.entry3 = Entry(root1, bd=4)
self.entry3.grid(row=2, column=1, pady=5)
self.entry4 = Entry(root1, bd=4)
self.entry4.grid(row=3, column=1, pady=5)
self.entryColor = Entry(root1, bd=4)
self.entryColor.grid(row=4, column=1, pady=5)
self.entry5 = Entry(root1, bd=4)
self.entry5.grid(row=5, column=1, pady=5)
self.entryL2 = Entry(root1, bd=4)
self.entryL2.grid(row=6, column=1, pady=5)
self.entry6 = Entry(root1, bd=4)
self.entry6.grid(row=7, column=1, pady=5)
self.entry7 = Entry(root1, bd=4)
self.entry7.grid(row=8, column=1, pady=5)
self.entry8 = Entry(root1, bd=4)
self.entry8.grid(row=9, column=1, pady=5)
#
self.impBut1 = Button(root1, text='Import', fg='blue', command=self.getInput1)
self.impBut1.grid(row=0, column=3, sticky=N)
#
self.runIt = Button(root1, text='Plot Entries', command=self.runCode)
self.runIt.grid(row=9, column=3, padx=3, pady=5, sticky=E)
def getInput1(self):
e1 = self.entry1.get()
e2 = self.entry2.get()
e3 = self.entry3.get()
e4 = self.entry4.get()
e5 = self.entry5.get()
e6 = self.entry6.get()
e7 = self.entry7.get()
e8 = self.entry8.get()
L2 = self.entryL2.get()
eColor = self.entryColor.get()
if eColor != '':
eColor = int(str(self.entryColor.get()))
#
plot_Gen(e1,e2,e3,e4,e5,e6,e7,e8,L2,eColor)
#
self.entry1.delete(0,'end')
self.entry2.delete(0,'end')
self.entry3.delete(0,'end')
self.entry4.delete(0,'end')
self.entry5.delete(0,'end')
self.entry6.delete(0,'end')
self.entry7.delete(0,'end')
self.entry8.delete(0,'end')
self.entryL2.delete(0,'end')
self.entryColor.delete(0,'end')
def meanImp(self):
root2 = Tk()
root2.title('Mean Data Imports')
frameRootM = Frame(root2)
#
labelM = Label(root2, text='Input File to Import:')
labelM.grid(row=0, column=0, padx=3, sticky=W)
labelM2 = Label(root2, text='Legend Text:')
labelM2.grid(row=1, column=0, padx=3, pady=5, sticky=W)
labelM3 = Label(root2, text='X-axis Label:')
labelM3.grid(row=2, column=0, padx=3, pady=5, sticky=W)
labelM4 = Label(root2, text='Y-axis Label:')
labelM4.grid(row=3, column=0, padx=3, pady=5, sticky=W)
labelM5 = Label(root2, text='Plot Title:')
labelM5.grid(row=4, column=0, padx=3, pady=5, sticky=W)
#
self.entryM = Entry(root2, bd=4)
self.entryM.grid(row=0, column=1)
self.entryM2 = Entry(root2, bd=4)
self.entryM2.grid(row=1, column=1, pady=5)
self.entryM3 = Entry(root2, bd=4)
self.entryM3.grid(row=2, column=1, pady=5)
self.entryM4 = Entry(root2, bd=4)
self.entryM4.grid(row=3, column=1, pady=5)
self.entryM5 = Entry(root2, bd=4)
self.entryM5.grid(row=4, column=1, pady=5)
#
self.impButM = Button(root2, text='Import', fg='blue', command=self.getMean)
self.impButM.grid(row=0, column=2, padx=3, sticky=W)
#
self.runIt = Button(root2, text='Plot Entry', command=self.runCode)
self.runIt.grid(row=4, column=2, sticky=W)
def getMean(self):
meanFile = self.entryM.get()
Leg = self.entryM2.get()
xLab = self.entryM3.get()
yLab = self.entryM4.get()
Titl = self.entryM5.get()
#
mean_Gen(meanFile,Leg,xLab,yLab,Titl)
#
self.entryM.delete(0,'end')
self.entryM2.delete(0,'end')
self.entryM3.delete(0,'end')
self.entryM4.delete(0,'end')
self.entryM5.delete(0,'end')
def runCode(self):
plt.show()
#Establish, title, and loop the main window until the program is manually quit.
root = Tk()
root.title('Data Plotter')
obj = myButtons(root)
root.mainloop()
|
{"/HelpfulFunctions.py": ["/Parser.py"], "/PlotAnalysis.py": ["/Parser.py", "/HelpfulFunctions.py"]}
|
40,103
|
TrevorDemille/Simple-Analysis-App
|
refs/heads/master
|
/Parser.py
|
#
#Written by Trevor Demille, summer 2016, for Goldner Biophysics Group
"""
Parser designed to keep only the intensity data from D1 and D2 data files taken from the fluorimeter
and put into an excel sheet saved as a csv file.
"""
import numpy as np
#
def Parser(Dir, File):
directory = Dir
fileName = File
fileName = np.append(directory,fileName)
#
with open(fileName) as F:
for line in F:
A = line.split(',')
data = A[1::2]
Str = ','.join(data)
with open('JustData.csv', 'a+') as F2:
F2.write(Str)
#
with open('JustData.csv', 'r+') as F3:
for row in F3:
B = row.split(',')
B = np.array(B)
fiveData = B.reshape(-1,5)
#
bkg = fiveData[2::3]
bkgArr = np.concatenate(bkg[:], axis=0)
bkgStr = ','.join(bkgArr)
#
flr1 = fiveData[0::3]
flr2 = fiveData[1::3]
flrArray = [None]*len(flr1)
#
for kk in range(len(flr1)):
flrArray[kk] = np.append(flr1[kk],flr2[kk])
#
flrArray = np.array(flrArray)
flrList = np.concatenate(flrArray[:], axis=0)
flrData = ','.join(flrList)
with open('IntenData.csv', 'a+') as F4:
F4.write(flrData)
F4.write('\n')
Success1 = True
with open('BkgData.csv', 'a+') as F5:
F5.write(bkgStr)
Success2 = True
#
if Success1 == True && Success2 == True:
print('\n')
print(Parse Successful)
print('\n')
|
{"/HelpfulFunctions.py": ["/Parser.py"], "/PlotAnalysis.py": ["/Parser.py", "/HelpfulFunctions.py"]}
|
40,109
|
alexflint/spline-initialization
|
refs/heads/master
|
/trifocal.py
|
import numpy as np
from lie import SO3
import geometry
def normalized(x):
x = np.asarray(x)
return x / np.linalg.norm(x)
def main():
num_landmarks = 5
num_frames = 3
landmarks = np.random.randn(num_landmarks, 3)
positions = np.random.randn(num_frames, 3)
orientations = map(SO3.exp, np.random.randn(num_frames, 3))
positions[0,:] = 0
orientations[0] = np.eye(3)
poses = [np.hstack((r, -np.dot(r,p)[:,np.newaxis])) for r, p in zip(orientations, positions)]
features = [[normalized(np.dot(r, x-p)) for r, p in zip(orientations, positions)]
for x in landmarks]
a = poses[1].T
b = poses[2].T
slices = [np.outer(a[i], b[3]) - np.outer(a[3], b[i]) for i in range(3)]
x0, xa, xb = features[0]
middle = sum(x0i*slice for x0i, slice in zip(x0, slices))
residual = np.dot(geometry.skew(xa), np.dot(middle, geometry.skew(xb)))
print 'Trifocal residual:', np.linalg.norm(residual)
if __name__ == '__main__':
main()
|
{"/triangulation.py": ["/utils.py"], "/triangulation_test.py": ["/utils.py", "/lie.py", "/triangulation.py", "/spline_socp.py"], "/spline.py": ["/utils.py"], "/cayley.py": ["/lie.py"], "/sensor_models.py": ["/cayley.py", "/geometry.py"], "/plotting.py": ["/geometry.py"], "/geometry.py": ["/lie.py"], "/spline_test.py": ["/spline.py"], "/fit_bezier.py": ["/bezier.py"], "/structures.py": ["/lie.py"], "/interpolate_imu_orientation.py": ["/lie.py"]}
|
40,110
|
alexflint/spline-initialization
|
refs/heads/master
|
/triangulation.py
|
import numpy as np
import utils
def householder(x):
"""Compute a 2x3 matrix where the rows are orthogonal to x and orthogonal to each other."""
assert len(x) == 3, 'x=%s' % x
assert np.linalg.norm(x) > 1e-8
a = (np.arange(3) == np.argmin(np.abs(x))).astype(float)
u = utils.normalized(np.cross(x, a))
v = utils.normalized(np.cross(x, u))
return np.array([u, v])
def calibrated(z, k):
"""Compute the calibrated position for an image feature z."""
assert k.shape == (3, 3)
assert len(z) == 2
return utils.normalized(np.linalg.solve(k, utils.unpr(z)))
def triangulate_midpoint(features, frame_orientations, frame_positions, imu_to_camera, camera_matrix):
"""Triangulate a landmark from two or more views using the midpoint method."""
assert len(features) > 0
jtj, jtr = np.zeros((3, 3)), np.zeros(3)
for f in features:
r = frame_orientations[f.frame_id]
p = frame_positions[f.frame_id]
h = householder(calibrated(f.position, camera_matrix))
a = utils.dots(h, imu_to_camera, r)
b = -utils.dots(h, imu_to_camera, r, p)
jtj += np.dot(a.T, a)
jtr += np.dot(a.T, b)
return -np.linalg.solve(jtj, jtr)
|
{"/triangulation.py": ["/utils.py"], "/triangulation_test.py": ["/utils.py", "/lie.py", "/triangulation.py", "/spline_socp.py"], "/spline.py": ["/utils.py"], "/cayley.py": ["/lie.py"], "/sensor_models.py": ["/cayley.py", "/geometry.py"], "/plotting.py": ["/geometry.py"], "/geometry.py": ["/lie.py"], "/spline_test.py": ["/spline.py"], "/fit_bezier.py": ["/bezier.py"], "/structures.py": ["/lie.py"], "/interpolate_imu_orientation.py": ["/lie.py"]}
|
40,111
|
alexflint/spline-initialization
|
refs/heads/master
|
/bezier.py
|
import numpy as np
def repeat_diag(x, k):
return np.hstack([np.eye(k, dtype=x.dtype) * xi for xi in x])
def bezier(params, t):
"""Evaluate a bezier curve at time t (between 0 and 1)"""
return np.dot(bezier_coefs(t, len(params)-1), params)
def bezier_coefs(t, order):
"""Evaluate a bezier curve at time t (between 0 and 1)"""
if order == 0:
return np.array([1.])
else:
c = bezier_coefs(t, order-1)
return np.hstack((c, 0)) * (1.-t) + np.hstack((0, c)) * t
def bezier_deriv(params, t):
return np.dot(bezier_deriv_coefs(t, len(params)-1), params)
def bezier_deriv_coefs(t, order):
if order == 0:
return np.array([0.])
else:
c = bezier_coefs(t, order-1)
dc = bezier_deriv_coefs(t, order-1)
return np.hstack((dc, 0))*(1.-t) + np.hstack((0, dc))*t - np.hstack((c, 0)) + np.hstack((0, c))
def bezier_second_deriv(params, t):
return np.dot(bezier_second_deriv_coefs(t, len(params)-1), params)
def bezier_second_deriv_coefs(t, order):
if order == 0:
return np.array([0.])
else:
dc = bezier_deriv_coefs(t, order-1)
ddc = bezier_second_deriv_coefs(t, order-1)
return np.hstack((ddc, 0))*(1.-t) + np.hstack((0, ddc))*t - np.hstack((dc, 0))*2 + np.hstack((0, dc))*2
def zero_offset_bezier(params, t):
return np.dot(zero_offset_bezier_coefs(t, len(params)), params)
def zero_offset_bezier_coefs(t, order):
return bezier_coefs(t, order)[1:]
def zero_offset_bezier_mat(t, order, ndims):
return repeat_diag(zero_offset_bezier_coefs(t, order), ndims)
def zero_offset_bezier_deriv(params, t):
return np.dot(zero_offset_bezier_deriv_coefs(t, len(params)), params)
def zero_offset_bezier_deriv_coefs(t, order):
return bezier_deriv_coefs(t, order)[1:]
def zero_offset_bezier_second_deriv(params, t):
return np.dot(zero_offset_bezier_second_deriv_coefs(t, len(params)), params)
def zero_offset_bezier_second_deriv_coefs(t, order):
return bezier_second_deriv_coefs(t, order)[1:]
def zero_offset_bezier_second_deriv_mat(t, order, ndims):
return repeat_diag(zero_offset_bezier_second_deriv_coefs(t, order), ndims)
|
{"/triangulation.py": ["/utils.py"], "/triangulation_test.py": ["/utils.py", "/lie.py", "/triangulation.py", "/spline_socp.py"], "/spline.py": ["/utils.py"], "/cayley.py": ["/lie.py"], "/sensor_models.py": ["/cayley.py", "/geometry.py"], "/plotting.py": ["/geometry.py"], "/geometry.py": ["/lie.py"], "/spline_test.py": ["/spline.py"], "/fit_bezier.py": ["/bezier.py"], "/structures.py": ["/lie.py"], "/interpolate_imu_orientation.py": ["/lie.py"]}
|
40,112
|
alexflint/spline-initialization
|
refs/heads/master
|
/socp.py
|
import time
import numpy as np
import cvxopt as cx
import mosek
import lie
import geometry
timings = {}
class SocpConstraint(object):
def __init__(self, a=None, b=None, c=None, d=None):
if a is None:
a = np.zeros(len(b), len(c))
if b is None:
b = np.zeros(len(a))
if c is None:
c = np.zeros(len(a[0]))
if d is None:
d = 0.
assert np.ndim(a) == 2
assert np.ndim(b) == 1
assert np.ndim(c) == 1
assert np.shape(a) == (len(b), len(c))
self.a = np.asarray(a)
self.b = np.asarray(b)
self.c = np.asarray(c)
self.d = float(d)
def conditionalize(self, mask, values):
"""Given a constraint over x1...xn, return a new constraint over a subset of the variables given fixed values
for the remaining variables."""
assert len(values) == sum(mask)
mask = np.asarray(mask)
values = np.asarray(values)
a = self.a[:, ~mask]
b = self.b + np.dot(self.a[:, mask], values)
c = self.c[~mask]
d = self.d + float(np.dot(self.c[mask], values))
return SocpConstraint(a, b, c, d)
def conditionalize_at_zero(self, mask):
"""Given a constraint over x1...xn, return a new constraint over a subset of the variables given fixed values
for the remaining variables."""
mask = np.asarray(mask)
return SocpConstraint(self.a[:, ~mask], self.b, self.c[~mask], self.d)
def lhs(self, x):
return np.linalg.norm(np.dot(self.a, x) + self.b)
def rhs(self, x):
return np.dot(self.c, x) + self.d
def is_satisfied(self, x):
return self.lhs(x) <= self.rhs(x)
class SocpProblem(object):
def __init__(self, objective, constraints=None):
self.objective = np.asarray(objective)
self.constraints = constraints or []
def add_constraint(self, *args, **kwargs):
self.constraints.append(SocpConstraint(*args, **kwargs))
def conditionalize(self, mask, values=None):
mask = np.asarray(mask)
if values is None:
return SocpProblem(self.objective[~mask], [x.conditionalize_at_zero(mask) for x in self.constraints])
else:
return SocpProblem(self.objective[~mask], [x.conditionalize(mask, values) for x in self.constraints])
def conditionalize_indices(self, var_indices, values=None):
if values is not None:
assert len(var_indices) == len(values)
mask = np.zeros(len(self.objective), bool)
mask[np.array(var_indices)] = True
return self.conditionalize(mask, values)
def evaluate(self, x, verbose=False):
print 'Objective:', np.dot(self.objective, x)
lhs = np.array([constraint.lhs(x) for constraint in self.constraints])
rhs = np.array([constraint.rhs(x) for constraint in self.constraints])
num_violated = np.sum(lhs > rhs)
if verbose or num_violated > 0:
for i, (lhs, rhs) in enumerate(zip(lhs, rhs)):
label = 'satisfied' if (lhs <= rhs) else 'not satisfied'
print ' Constraint %d: %s (lhs=%.8f, rhs=%.8f)' % (i, label, lhs, rhs)
if num_violated == 0:
print ' All constraints satisfied'
else:
print ' Not satisfied (%d constraints violated)' % num_violated
def solve(problem, sparse=False, **kwargs):
"""Minimize w*x subject to ||Ax + b|| <= c*x + d."""
gs = []
hs = []
for constraint in problem.constraints:
a = constraint.a
b = constraint.b
c = constraint.c
d = constraint.d
g = np.vstack((-c, -a))
hs.append(cx.matrix(np.hstack((d, b))))
if sparse:
gs.append(cx.sparse(cx.matrix(g)))
else:
gs.append(cx.matrix(g))
begin = time.clock()
cx.solvers.options.update(kwargs)
cx.solvers.options['MOSEK'] = {mosek.iparam.log: 100, mosek.iparam.intpnt_max_iterations: 50000}
solution = cx.solvers.socp(cx.matrix(problem.objective), Gq=gs, hq=hs, solver='mosek')
duration = time.clock() - begin
# duration = solution['duration']
timings['last_solve'] = duration
print 'SOCP duration: %.3f' % duration
print 'Total duration (including python wrappers): %.3f' % duration
print 'Solver exited with status "%s"' % solution['status']
return solution
def run_2d_circle_problem():
constraints = [
SocpConstraint(np.eye(2), np.zeros(2), np.zeros(2), 3.),
SocpConstraint(np.eye(2), [2, 0], np.zeros(2), 3.)
]
problem = SocpProblem([0., -1.], constraints)
sol = solve(problem)
print sol['x']
def run_sfm():
num_points = 4
num_frames = 2
num_vars = num_points * 3 + num_frames * 3
r0 = np.eye(3)
p0 = np.zeros(3)
r1 = lie.SO3.exp([.1, .2, .3])
p1 = np.array([2., 3., 0.])
rs = [r0, r1]
ps = [p0, p1]
xs = np.random.randn(num_points, 3)
vars = np.hstack(list(ps) + list(xs))
gamma = 1e-6
problem = SocpProblem(np.ones(num_vars), [])
for i, x in enumerate(xs):
for j, (r, p) in enumerate(zip(rs, ps)):
z = geometry.pr(np.dot(r, x-p))
position_offset = j*3
point_offset = num_frames*3 + i*3
a = np.zeros((2, num_vars))
a[:, position_offset:position_offset+3] = np.outer(z, r[2]) - r[:2]
a[:, point_offset:point_offset+3] = r[:2] - np.outer(z, r[2])
sign = 1. if np.dot(r[2], x-p) >= 0 else -1.
c = np.zeros(num_vars)
c[position_offset:position_offset+3] = -sign * gamma * r[2]
c[point_offset:point_offset+3] = sign * gamma * r[2]
b = np.zeros(2)
d = 0.
ax = np.dot(a, vars)
cx = np.dot(c, vars)
print 'Point %d, camera %d:' % (i, j)
print ' ax=', ax
print ' cx=', cx
problem.constraints.append(SocpConstraint(a, b, c, d))
problem.evaluate(vars)
#return
structure_problem = problem.conditionalize(np.arange(num_vars) < 6, np.hstack(ps))
sol = solve(structure_problem)
print sol['x']
if sol['x'] is None:
print 'Solution not found'
else:
estimated_xs = np.array(sol['x']).reshape((-1, 3))
print 'True:'
print xs
print 'Estimated:'
print estimated_xs
print 'Errors:'
print xs - estimated_xs
if __name__ == '__main__':
np.set_printoptions(suppress=True)
#run_2d_circle_problem()
run_sfm()
|
{"/triangulation.py": ["/utils.py"], "/triangulation_test.py": ["/utils.py", "/lie.py", "/triangulation.py", "/spline_socp.py"], "/spline.py": ["/utils.py"], "/cayley.py": ["/lie.py"], "/sensor_models.py": ["/cayley.py", "/geometry.py"], "/plotting.py": ["/geometry.py"], "/geometry.py": ["/lie.py"], "/spline_test.py": ["/spline.py"], "/fit_bezier.py": ["/bezier.py"], "/structures.py": ["/lie.py"], "/interpolate_imu_orientation.py": ["/lie.py"]}
|
40,113
|
alexflint/spline-initialization
|
refs/heads/master
|
/lie.py
|
import numpy as np
def skew(m):
# Compute the skew-symmetric matrix for m
m = np.asarray(m)
assert m.shape == (3,), 'shape was was %s' % str(m.shape)
return np.array([[ 0, -m[2], m[1]],
[ m[2], 0, -m[0]],
[-m[1], m[0], 0. ]])
class LieGroup(object):
# Abstract base class for lie groups
# Returns a function f: m -> R0 * exp(m)
@classmethod
def right_chart(cls, *args):
if len(args) == 0:
X0 = cls.identity()
else:
X0 = cls.unpack(*args)
f = lambda x: cls.dot(X0, cls.exp(x))
f.dimensions = cls.dimensions()
return f
# Returns a function f: m -> exp(m) * R0
@classmethod
def left_chart(cls, *args):
if len(args) == 0:
X0 = cls.identity()
else:
X0 = cls.unpack(*args)
f = lambda x: cls.dot(cls.exp(x), X0)
f.dimensions = cls.dimensions()
return f
# Returns a function f: m -> exp(m) * R0
@classmethod
def left_neg_chart(cls, *args):
if len(args) == 0:
X0 = cls.identity()
else:
X0 = cls.unpack(*args)
f = lambda x: cls.dot(cls.exp(-x), X0)
f.dimensions = cls.dimensions()
return f
# Returns a function f: m -> X0 * exp(m)
@classmethod
def chart(cls, X0):
return cls.right_chart(X0)
def inv(self):
return self.__class__.inverse(self)
def __mul__(self, x):
try:
return self.__class__.dot(self, x)
except:
return NotImplemented
def __rmul__(self, x):
try:
return self.__class__.dot(x, self)
except:
return NotImplemented
def __repr__(self):
return repr(self.mat).replace('array', self.__class__.__name__)
def __str__(self):
return str(self.mat)
class SO3(LieGroup):
generators = np.array([[[ 0., 0., 0. ],
[ 0., 0., -1. ],
[ 0., 1., 0. ]],
[[ 0., 0., 1. ],
[ 0., 0., 0. ],
[ -1., 0., 0. ]],
[[ 0., -1., 0. ],
[ 1., 0., 0. ],
[ 0., 0., 0. ]]])
def __init__(self, *args):
'''Construct an SO3 element from a rotation matrix.'''
# TODO: permit quaternions as input to this function
if len(args) == 0:
self._R = np.eye(3)
else:
self._R = SO3.unpack(*args)
@property
def mat(self):
return self._R
@property
def T(self):
return self.inv()
# Manifold dimensionality
@classmethod
def dimensions(cls):
return 3
# Group identity
@classmethod
def identity(cls):
return cls.pack(np.eye(3))
# Group multiplication
@classmethod
def dot(cls, X, Y):
return np.dot(cls.unpack(X), cls.unpack(Y))
# Group inverse element
@classmethod
def inverse(cls, X):
return cls.unpack(X).T
# Compute the mapping from so(3) to SO(3)
@classmethod
def exp(cls, m):
m = np.asarray(m)
assert np.shape(m) == (3,), 'SO3.exp(m) called with m='+str(np.shape(m))
tsq = np.dot(m,m)
if tsq < 1e-8:
# Taylor expansion of sin(sqrt(x))/sqrt(x):
# http://www.wolframalpha.com/input/?i=sin(sqrt(x))/sqrt(x)
a = 1. - tsq/6. + tsq*tsq/120.;
# Taylor expansion of (1 - cos(sqrt(x))/x:
# http://www.wolframalpha.com/input/?i=(1-cos(sqrt(x)))/x
b = .5 - tsq/24. + tsq*tsq/720.;
else:
t = np.sqrt(tsq)
a = np.sin(t)/t
b = (1. - np.cos(t)) / tsq
M = skew(m)
return cls.pack(np.eye(3) + a*M + b*np.dot(M,M))
# Compute the mapping from SO(3) to so(3)
@classmethod
def log(cls, R):
R = cls.unpack(R)
# http://math.stackexchange.com/questions/83874/
t = R.trace()
r = np.array(( R[2,1] - R[1,2],
R[0,2] - R[2,0],
R[1,0] - R[0,1] ))
if t >= 3. - 1e-8:
return (.5 - (t-3.)/12.) * r
elif t > -1. + 1e-8:
th = np.arccos(t/2. - .5)
return th / (2. * np.sin(th)) * r
else:
assert t <= -1. + 1e-8
a = np.argmax(R[ np.diag_indices_from(R) ])
b = (a+1) % 3
c = (a+2) % 3
s = np.sqrt(R[a,a] - R[b,b] - R[c,c] + 1.)
v = np.empty(3)
v[a] = s/2.
v[b] = (R[b,a] + R[a,b]) / (2.*s)
v[c] = (R[c,a] + R[a,c]) / (2.*s)
return v / np.linalg.norm(v)
# Compute jacobian of exp(m)*x with respect to m, evaluated at
# m=[0,0,0]. x is assumed constant with respect to m.
@classmethod
def J_expm_x(cls, x):
return skew(-x)
# Return the generators times x
@classmethod
def generator_field(cls, x):
return skew(x)
@classmethod
def pack(cls, X):
assert isinstance(X, np.ndarray) and X.shape == (3,3)
return X
@classmethod
def unpack(cls, X):
assert isinstance(X, np.ndarray) or isinstance(X, SO3)
if isinstance(X, SO3):
return X.mat
else:
assert X.shape == (3,3)
return X
class SE3(LieGroup):
def __init__(self, *args):
if len(args) == 0:
self._R = SO3.eye()
self._t = np.zeros(3)
else:
self._R, self._t = SE3.unpack(*args)
@property
def mat(self):
return np.hstack((self.R, self.t[:,np.newaxis]))
@property
def R(self):
return self._R
@property
def t(self):
return self._t
@property
def Rt(self):
return (self.R, self.t)
# Manifold dimensionality
@classmethod
def dimensions(cls):
return 6
# Compute the identity
@classmethod
def identity(cls):
return cls.pack(SO3.identity(), np.zeros(3))
# Compute the group multiplication operation
@classmethod
def dot(cls, X, Y):
RX,tx = cls.unpack(X)
RY,ty = cls.unpack(Y)
return cls.pack(SO3.dot(RX,RY), np.dot(RX,ty) + tx)
@classmethod
def inverse(cls, X):
R,t = cls.unpack(X)
return cls.pack(SO3.inverse(R), -np.dot(SO3.inverse(R), t))
# Mapping from se(3) to SE(3)
@classmethod
def exp(cls, x):
x = np.asarray(x)
assert np.shape(x) == (6,), 'shape was '+str(x.shape)
return cls.pack(SO3.exp(x[:3]), x[3:])
# Mapping from SE(3) to se(3)
@classmethod
def log(cls, X):
R,t = cls.unpack(X)
return np.hstack((SO3.log(R), t))
@classmethod
def pack(cls, R, t):
return SE3(R,t)
@classmethod
def unpack(cls, *args):
assert len(args) in (1,2)
if len(args) == 2:
assert np.shape(args[1]) == (3,), 'args='+str(args)
return SO3.unpack(args[0]), np.asarray(args[1])
elif len(args) == 1:
arg = args[0]
if isinstance(arg, SE3):
return arg.Rt
elif isinstance(arg, SO3):
return arg.matrix, np.zeros(3)
if isinstance(arg, tuple):
assert len(arg) == 2 and np.shape(arg[1]) == (3,)
return SO3.unpack(arg[0]), np.asarray(arg[1])
elif isinstance(arg, np.ndarray):
# We have a matrix. Permitted shapes are 3x3, 3x4, 4x4
assert arg.shape == (3,3) or arg.shape == (3,4) or arg.shape == (4,4)
if arg.shape == (3,3):
return arg, np.zeros(3)
if arg.shape == (3,4):
return arg[:,:3], arg[:,3]
elif arg.shape == (4,4):
# last row must be (0 0 0 1)
assert np.linalg.norm(arg[3] - (0,0,0,1)) < 1e-8
return arg[:3,:3], arg[:3,3]
|
{"/triangulation.py": ["/utils.py"], "/triangulation_test.py": ["/utils.py", "/lie.py", "/triangulation.py", "/spline_socp.py"], "/spline.py": ["/utils.py"], "/cayley.py": ["/lie.py"], "/sensor_models.py": ["/cayley.py", "/geometry.py"], "/plotting.py": ["/geometry.py"], "/geometry.py": ["/lie.py"], "/spline_test.py": ["/spline.py"], "/fit_bezier.py": ["/bezier.py"], "/structures.py": ["/lie.py"], "/interpolate_imu_orientation.py": ["/lie.py"]}
|
40,114
|
alexflint/spline-initialization
|
refs/heads/master
|
/plot_errors_vs_trial.py
|
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
def plot_accuracy_histograms():
results = np.loadtxt('results/accuracy_comparison.txt').reshape((-1, 2, 4))
pos_errs_socp, pos_errs_linear = results[:, :, 0].T
vel_errs_socp, vel_errs_linear = results[:, :, 1].T
bias_errs_socp, bias_errs_linear = results[:, :, 2].T
g_errs_socp, g_errs_linear = results[:, :, 3].T
c1, c2 = sns.color_palette("Set1", 2)
xticks = range(-4, 1)
xtick_labels = ['$10^{%d}$' % x for x in xticks]
plt.clf()
sns.kdeplot(np.log10(pos_errs_socp), shade=True, color=c1, label='SOCP')
sns.kdeplot(np.log10(pos_errs_linear), shade=True, color=c2, label='Linear')
plt.xlabel('Device position error (m)')
plt.xticks(xticks, xtick_labels)
plt.ylabel('Frequency')
plt.yticks([])
plt.xlim(-4, 1)
plt.savefig('figures/position_error_histogram.pdf')
plt.clf()
sns.kdeplot(np.log10(vel_errs_socp), shade=True, color=c1, label='SOCP')
sns.kdeplot(np.log10(vel_errs_linear), shade=True, color=c2, label='Linear')
plt.xlabel('Device velocity error (m/s)')
plt.xticks(xticks, xtick_labels)
plt.ylabel('Frequency')
plt.yticks([])
plt.xlim(-4, 1)
plt.savefig('figures/velocity_error_histogram.pdf')
plt.clf()
sns.kdeplot(np.log10(bias_errs_socp), shade=True, color=c1, label='SOCP')
sns.kdeplot(np.log10(bias_errs_linear), shade=True, color=c2, label='Linear')
plt.xlabel('Accel bias error')
plt.xticks(xticks, xtick_labels)
plt.ylabel('Frequency')
plt.yticks([])
plt.xlim(-4, 1)
plt.savefig('figures/bias_error_histogram.pdf')
plt.clf()
sns.kdeplot(np.log10(np.rad2deg(g_errs_socp)), shade=True, color=c1, label='SOCP')
sns.kdeplot(np.log10(np.rad2deg(g_errs_linear)), shade=True, color=c2, label='Linear')
plt.xlabel('Gravity error (degrees)')
plt.xticks(xticks, xtick_labels)
plt.ylabel('Frequency')
plt.yticks([])
plt.xlim(-4, 1)
plt.savefig('figures/gravity_error_histogram.pdf')
def plot_accuracy_vs_feature_noise():
results = np.loadtxt('results/accuracy_vs_feature_noise.txt')
feature_noise, socp_err = results.T
c1, c2 = sns.color_palette("Set1", 2)
plt.clf()
#plt.errorbar(feature_noise, socp_mean, socp_std, color=c1, label='SOCP')
#plt.errorbar(feature_noise, linear_mean, linear_std, color=c2, label='Linear')
plt.plot(feature_noise, socp_err)
plt.xlabel('Feature noise (image pixels)')
plt.ylabel('Position error (m)')
plt.ylim(ymin=0)
plt.savefig('figures/accuracy_vs_feature_noise.pdf')
if __name__ == '__main__':
#plot_accuracy_histograms()
plot_accuracy_vs_feature_noise()
|
{"/triangulation.py": ["/utils.py"], "/triangulation_test.py": ["/utils.py", "/lie.py", "/triangulation.py", "/spline_socp.py"], "/spline.py": ["/utils.py"], "/cayley.py": ["/lie.py"], "/sensor_models.py": ["/cayley.py", "/geometry.py"], "/plotting.py": ["/geometry.py"], "/geometry.py": ["/lie.py"], "/spline_test.py": ["/spline.py"], "/fit_bezier.py": ["/bezier.py"], "/structures.py": ["/lie.py"], "/interpolate_imu_orientation.py": ["/lie.py"]}
|
40,115
|
alexflint/spline-initialization
|
refs/heads/master
|
/run_estimator.py
|
import bisect
import numpy as np
import matplotlib
matplotlib.use('Agg', warn=False)
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import seaborn as sns
import structures
import spline_socp
import spline
import utils
def plot_features(features, frame_timestamps, frame_orientations, estimated, imu_to_camera, camera_matrix, output):
"""Synthesize features for each frame and compare to observations."""
features_by_frame = [[] for _ in frame_timestamps]
predictions_by_frame = [[] for _ in frame_timestamps]
predicted_frame_positions = estimated.position_curve.evaluate(frame_timestamps)
num_behind = 0
for feature in features:
r = frame_orientations[feature.frame_id]
p = predicted_frame_positions[feature.frame_id]
x = estimated.landmarks[feature.track_id]
z = sensor_models.predict_feature_with_pose(r, p, x, imu_to_camera, camera_matrix)
if z is None:
num_behind += 1
else:
predictions_by_frame[feature.frame_id].append(z)
features_by_frame[feature.frame_id].append(feature.position)
if num_behind > 0:
print '%d features (of %d) were behind the camera' % (num_behind, len(features))
xmin, _, xmax = utils.minmedmax([f.position[0] for f in features])
ymin, _, ymax = utils.minmedmax([f.position[1] for f in features])
pdf = PdfPages(output)
for i, (zs, zzs) in enumerate(zip(predictions_by_frame, features_by_frame)):
print 'Plotting %d features for frame %d...' % (len(zs), i)
zs = np.asarray(zs)
zzs = np.asarray(zzs)
plt.clf()
if len(zs) > 0:
plotting.plot_segments(zip(zs, zzs), '.-k', alpha=.5)
plt.plot(zs[:, 0], zs[:, 1], '.r', alpha=.8)
plt.plot(zzs[:, 0], zzs[:, 1], '.g', alpha=.8)
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
pdf.savefig()
pdf.close()
def visualize_simulation_results(true_trajectory, estimated_trajectory, frame_timestamps):
true_frame_positions = true_trajectory.position_curve.evaluate(frame_timestamps)
estimated_frame_positions = estimated_trajectory.position_curve.evaluate(frame_timestamps)
print 'Position errors:', np.linalg.norm(estimated_frame_positions - true_frame_positions, axis=1)
print 'Gravity error:', np.linalg.norm(estimated_trajectory.gravity - true_trajectory.gravity)
print 'Accel bias error:', np.linalg.norm(estimated_trajectory.accel_bias - true_trajectory.accel_bias)
print 'Max error:', np.max(estimated_trajectory.flatten() - true_trajectory.flatten())
# Plot the variables
plt.clf()
plt.barh(np.arange(true_trajectory.size), true_trajectory.flatten(), height=.3, alpha=.3, color='g')
plt.barh(np.arange(estimated_trajectory.size)+.4, estimated_trajectory.flatten(), height=.3, alpha=.3, color='r')
plt.savefig('out/vars.pdf')
plot_timestamps = np.linspace(frame_timestamps[0], frame_timestamps[-1], 500)
true_ps = true_trajectory.position_curve.evaluate(plot_timestamps)
estimated_ps = estimated_trajectory.position_curve.evaluate(plot_timestamps)
# Plot the estimated trajectory
plt.clf()
plt.plot(estimated_ps[:, 0], estimated_ps[:, 1], 'r-')
plt.plot(true_ps[:, 0], true_ps[:, 1], 'b-')
plt.axis('equal')
plt.savefig('out/trajectory.pdf')
def run_with_dataset():
dataset_path = '/tmp/dataset'
vfusion_path = '/tmp/out'
gravity = np.array([0, 0, 9.82])
min_track_length = 3
max_frames = 100
min_features_per_frame = 10
max_iters = 100
begin_time_offset = 5.
end_time_offset = 7.
knot_frequency = 10
num_knots = int(np.ceil((end_time_offset - begin_time_offset) * knot_frequency))
# Load vision model
vision_model = list(open(dataset_path + '/vision_model.txt'))
camera_matrix = np.array(map(float, vision_model[0].split())).reshape((3, 3))
imu_to_camera = np.array(map(float, vision_model[1].split())).reshape((3, 3))
# Load frame timestamps
all_frame_timestamps = np.loadtxt(dataset_path + '/frame_timestamps.txt')
# Load accel data
all_accel = np.loadtxt(dataset_path + '/accel.txt')
# Load features
all_features = []
with open(dataset_path + '/features.txt') as fd:
for line in fd:
frame_id, track_id, x, y = line.split()
all_features.append(
structures.FeatureObservation(int(frame_id), int(track_id), np.array([float(x), float(y)])))
# Load trajectory from vfusion
all_vfusion_states = np.loadtxt(vfusion_path + '/states.txt')
all_vfusion_timestamps = all_vfusion_states[:, 1]
begin_timestamp = all_vfusion_timestamps[0] + begin_time_offset
end_timestamp = all_vfusion_timestamps[0] + end_time_offset
vfusion_states = select_by_timestamp(all_vfusion_states,
all_vfusion_timestamps,
begin_timestamp,
end_timestamp)
vfusion_timestamps = vfusion_states[:, 1]
vfusion_orientations = vfusion_states[:, 2:11].reshape((-1, 3, 3))
vfusion_positions = vfusion_states[:, -3:]
vfusion_gyro_bias = vfusion_states[:, 11:14]
vfusion_velocities = vfusion_states[:, 14:17]
vfusion_accel_bias = vfusion_states[:, 17:20]
vfusion_orientation_curve = FirstOrderRotationCurve(vfusion_timestamps, vfusion_orientations)
vfusion_pos_curve = spline.fit(vfusion_timestamps, vfusion_positions, knot_frequency=1)
print 'Max accel bias:', np.max(np.linalg.norm(vfusion_accel_bias, axis=1))
# Set up IMU measurements
accel = select_by_timestamp(all_accel, all_accel[:, 0], begin_timestamp, end_timestamp)
accel_timestamps = accel[:, 0]
accel_readings = accel[:, 1:]
accel_orientations = [interpolate_orientation(vfusion_timestamps, vfusion_orientations, t)
for t in accel_timestamps]
# Set up frames
begin_frame_index = bisect.bisect_left(all_frame_timestamps, begin_timestamp)
end_frame_index = bisect.bisect_left(all_frame_timestamps, end_timestamp)
if end_frame_index - begin_frame_index <= max_frames:
selected_frame_ids = np.arange(begin_frame_index, end_frame_index, dtype=int)
else:
selected_frame_ids = np.linspace(begin_frame_index, end_frame_index-1, max_frames).round().astype(int)
print 'Selected frames:', selected_frame_ids
frame_timestamps = all_frame_timestamps[selected_frame_ids]
frame_orientations = [interpolate_orientation(vfusion_timestamps, vfusion_orientations, t)
for t in frame_timestamps]
frame_seed_positions = vfusion_pos_curve.evaluate(frame_timestamps)
# Set up features
print 'Selecting frame indices %d...%d' % (begin_frame_index, end_frame_index)
tracks_by_id = collections.defaultdict(list)
for f in all_features:
if f.frame_id in selected_frame_ids:
tracks_by_id[f.track_id].append(f)
# Filter by track length
tracks = filter(lambda t: len(t) >= min_track_length, tracks_by_id.viewvalues())
track_counts = {index: 0 for index in selected_frame_ids}
for track in tracks:
for f in track:
track_counts[f.frame_id] += 1
# Filter tracks by track length, max tracks, and min features per frame
features = []
num_tracks_added = 0
sorted_tracks = sorted(tracks, key=len)
for track in sorted_tracks:
if any(track_counts[f.frame_id] <= min_features_per_frame for f in track):
num_tracks_added += 1
features.extend(track)
else:
for f in track:
track_counts[f.frame_id] -= 1
print ' selected %d of %d features' % (len(features), len(all_features))
print ' features per frame: ', ' '.join(map(str, track_counts.viewvalues()))
# Renumber track IDs and frame_ids consecutively
frame_ids = sorted(selected_frame_ids)
track_ids = sorted(set(f.track_id for f in features))
frame_index_by_id = {frame_id: index for index, frame_id in enumerate(frame_ids)}
track_index_by_id = {track_id: index for index, track_id in enumerate(track_ids)}
for f in features:
f.track_id = track_index_by_id[f.track_id]
f.frame_id = frame_index_by_id[f.frame_id]
# Create vfusion estimate
tracks_by_id = collections.defaultdict(list)
for f in features:
tracks_by_id[f.track_id].append(f)
vfusion_landmarks = np.array([triangulation.triangulate_midpoint(tracks_by_id[i],
frame_orientations,
frame_seed_positions,
imu_to_camera,
camera_matrix)
for i in range(len(tracks_by_id))])
print 'landmarks:'
print vfusion_landmarks
vfusion_estimate = structures.PositionEstimate(vfusion_pos_curve, gravity, vfusion_accel_bias, vfusion_landmarks)
vfusion_reproj_errors = compute_reprojection_errors(features, frame_timestamps, frame_orientations,
vfusion_estimate, imu_to_camera, camera_matrix)
features = [f for f, err in zip(features, vfusion_reproj_errors) if np.linalg.norm(err) < 5.]
features, vfusion_estimate.landmarks = utils.renumber_tracks(features, vfusion_estimate.landmarks, min_track_length=2)
# Plot the reprojected landmarks
plot_features(features, frame_timestamps, frame_orientations, vfusion_estimate,
imu_to_camera, camera_matrix, 'out/vfusion_features.pdf')
# Create the problem
print 'Creating problem for %d frames, %d tracks, %d features, and %d accel readings...' % \
(len(frame_timestamps), len(track_ids), len(features), len(accel_readings))
spline_tpl = spline.SplineTemplate.linspaced(num_knots, dims=3, begin=begin_timestamp, end=end_timestamp)
estimator = 'mixed'
if estimator == 'infnorm':
estimated = estimate_trajectory_inf(spline_tpl,
accel_timestamps,
accel_orientations,
accel_readings,
frame_timestamps,
frame_orientations,
features,
camera_matrix=camera_matrix,
imu_to_camera=imu_to_camera,
feature_tolerance=10.,
accel_tolerance=2.,
max_bias_magnitude=1.,
gravity_magnitude=np.linalg.norm(gravity) + .1,
maxiters=max_iters)
elif estimator == 'mixed':
estimated = estimate_trajectory_mixed(spline_tpl,
accel_timestamps,
accel_orientations,
accel_readings,
frame_timestamps,
frame_orientations,
features,
camera_matrix=camera_matrix,
imu_to_camera=imu_to_camera,
feature_tolerance=2.,
max_bias_magnitude=1.,
gravity_magnitude=np.linalg.norm(gravity) + .1,
maxiters=max_iters)
elif estimator == 'linear':
estimated = estimate_trajectory_linear(spline_tpl,
accel_timestamps,
accel_orientations,
accel_readings,
frame_timestamps,
frame_orientations,
features,
camera_matrix=camera_matrix,
imu_to_camera=imu_to_camera,
accel_weight=100.)
elif estimator == 'lsqnonlin':
estimated = estimate_trajectory_lsqnonlin(spline_tpl,
accel_timestamps,
accel_orientations,
accel_readings,
frame_timestamps,
frame_orientations,
features,
camera_matrix=camera_matrix,
imu_to_camera=imu_to_camera,
accel_weight=100.,
seed=vfusion_estimate)
else:
print 'Invalid solver:', estimator
return
if estimated is None:
print 'No solution found'
return
estimated_frame_positions = estimated.position_curve.evaluate(frame_timestamps)
vfusion_frame_positions = vfusion_pos_curve.evaluate(frame_timestamps)
print 'Estimated gravity:', estimated.gravity
print 'Estimated accel bias:', estimated.accel_bias
print ' vfusion accel bias box: ', np.min(vfusion_accel_bias, axis=0), np.max(vfusion_accel_bias, axis=0)
print 'Position errors:', np.linalg.norm(estimated_frame_positions - vfusion_frame_positions, axis=1)
# Save results to file
np.savetxt('/tmp/solution/estimated_frame_positions.txt', estimated_frame_positions)
np.savetxt('/tmp/solution/estimated_pos_controls.txt', estimated.position_curve.controls)
np.savetxt('/tmp/solution/estimated_accel_bias.txt', estimated.accel_bias)
np.savetxt('/tmp/solution/estimated_gravity.txt', estimated.gravity)
np.savetxt('/tmp/solution/estimated_landmarks.txt', estimated.landmarks)
np.savetxt('/tmp/solution/knots.txt', spline_tpl.knots)
plot_timestamps = np.linspace(begin_timestamp, end_timestamp, 500)
estimated_ps = estimated.position_curve.evaluate(plot_timestamps)
vfusion_ps = vfusion_pos_curve.evaluate(plot_timestamps) - vfusion_pos_curve.evaluate(begin_timestamp)
# Plot the estimated trajectory
plt.clf()
plt.plot(estimated_ps[:, 0], estimated_ps[:, 1], 'r-')
plt.plot(vfusion_ps[:, 0], vfusion_ps[:, 1], 'b-')
plt.axis('equal')
plt.savefig('out/trajectory.pdf')
# Plot the estimated trajectory at its own scale
plt.clf()
plt.plot(estimated_ps[:, 0], estimated_ps[:, 1], 'r-')
plt.plot(estimated.position_curve.controls[:, 0], estimated.position_curve.controls[:, 1], 'b-', alpha=.2)
plt.axis('equal')
plt.savefig('out/lone_trajectory.pdf')
# Plot the estimated vars
plt.clf()
plt.barh(np.arange(estimated.size), estimated.flatten(), height=.75, color='r')
plt.savefig('out/vars.pdf')
# Synthesize accel readings and compare to measured values
timestamps = np.linspace(begin_timestamp, end_timestamp, 100)
predicted_accel = []
for t in timestamps:
predicted_accel.append(sensor_models.predict_accel(estimated.position_curve,
vfusion_orientation_curve,
estimated.accel_bias,
estimated.gravity,
t))
# Synthesize accel readings from gravity and accel bias only
predicted_stationary_accel = []
for t in timestamps:
r = cayley.cayley(vfusion_orientation_curve.evaluate(t))
predicted_stationary_accel.append(np.dot(r, estimated.gravity) + estimated.accel_bias)
predicted_accel = np.array(predicted_accel)
predicted_stationary_accel = np.array(predicted_stationary_accel)
plt.clf()
plt.plot(timestamps, predicted_accel, '-', label='predicted')
plt.plot(accel_timestamps, accel_readings, '-', label='observed')
plt.legend()
plt.savefig('out/accel.pdf')
plt.clf()
plt.plot(timestamps, predicted_stationary_accel, '-', label='predicted')
plt.plot(accel_timestamps, accel_readings, '-', label='observed')
plt.legend()
plt.savefig('out/accel_stationary.pdf')
# Plot features
plot_features(features, frame_timestamps, frame_orientations, estimated,
imu_to_camera, camera_matrix, 'out/estimated_features.pdf')
def run_fit_spline():
ts = np.linspace(0, 10, 10)
ys = np.random.randn(len(ts))
curve = spline.fit(ts, ys, num_knots=8)
plt.clf()
plot_ts = np.linspace(0, 10, 200)
plt.plot(plot_ts, curve.evaluate(plot_ts), 'r-')
plt.plot(ts, ys, 'xk')
plt.savefig('out/fit.pdf')
def run_fit_spline_multidim():
ts = np.linspace(0, 10, 9)
ys = np.random.randn(len(ts), 3)
curve = spline.fit(ts, ys, num_knots=8)
plt.clf()
plot_ts = np.linspace(0, 10, 200)
plot_ys = curve.evaluate(plot_ts)
plt.plot(plot_ys[:, 0], plot_ys[:, 1], 'r-')
plt.plot(ys[:, 0], ys[:, 1], 'xk')
plt.savefig('out/fit.pdf')
if __name__ == '__main__':
np.set_printoptions(linewidth=1000)
#run_with_dataset()
#run_fit_spline()
#run_fit_spline_multidim()
|
{"/triangulation.py": ["/utils.py"], "/triangulation_test.py": ["/utils.py", "/lie.py", "/triangulation.py", "/spline_socp.py"], "/spline.py": ["/utils.py"], "/cayley.py": ["/lie.py"], "/sensor_models.py": ["/cayley.py", "/geometry.py"], "/plotting.py": ["/geometry.py"], "/geometry.py": ["/lie.py"], "/spline_test.py": ["/spline.py"], "/fit_bezier.py": ["/bezier.py"], "/structures.py": ["/lie.py"], "/interpolate_imu_orientation.py": ["/lie.py"]}
|
40,116
|
alexflint/spline-initialization
|
refs/heads/master
|
/plot_timings.py
|
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
def main():
num_landmarks, duration = np.loadtxt('results/timings_vs_num_landmarks.txt').T
plt.clf()
plt.plot(num_landmarks, duration)
plt.xlabel('Number of landmarks')
plt.ylabel('Solve duration (s)')
plt.ylim(ymin=0)
plt.savefig('figures/timings_vs_num_landmarks.pdf')
num_knots, duration = np.loadtxt('results/timings_vs_num_knots.txt').T
plt.clf()
plt.plot(num_knots, duration)
plt.xlabel('Number of spline knots')
plt.ylabel('Solve duration (s)')
plt.ylim(0, .1)
plt.savefig('figures/timings_vs_num_knots.pdf')
if __name__ == '__main__':
main()
|
{"/triangulation.py": ["/utils.py"], "/triangulation_test.py": ["/utils.py", "/lie.py", "/triangulation.py", "/spline_socp.py"], "/spline.py": ["/utils.py"], "/cayley.py": ["/lie.py"], "/sensor_models.py": ["/cayley.py", "/geometry.py"], "/plotting.py": ["/geometry.py"], "/geometry.py": ["/lie.py"], "/spline_test.py": ["/spline.py"], "/fit_bezier.py": ["/bezier.py"], "/structures.py": ["/lie.py"], "/interpolate_imu_orientation.py": ["/lie.py"]}
|
40,117
|
alexflint/spline-initialization
|
refs/heads/master
|
/triangulation_test.py
|
import unittest
import numpy as np
import numpy.testing
import utils
import lie
import triangulation
import spline_socp
class TriangulationTest(unittest.TestCase):
def test_triangulate_midpoint(self):
np.random.seed(0)
for num_frames in [2, 3, 10]:
for noise, decimals in [(0, 12), (1e-8, 8), (1e-3, 3), (1e-2, 1)]:
ps = np.random.randn(num_frames, 3)
rs = map(lie.SO3.exp, np.random.randn(num_frames, 3)*.1)
imu_to_camera = lie.SO3.exp(np.random.randn(3)*.1)
camera_matrix = np.array([[100, 0, 50],
[0, 100, 50],
[0, 0, 1]], dtype=float)
x = np.random.randn(3) + [0, 0, 10]
features = []
for i, (r, p) in enumerate(zip(rs, ps)):
z = utils.pr(utils.dots(camera_matrix, imu_to_camera, r, x - p))
if noise > 0:
z += np.random.randn(2) * noise
features.append(spline_socp.FeatureObservation(i, 0, z))
estimated = triangulation.triangulate_midpoint(features, rs, ps, imu_to_camera, camera_matrix)
numpy.testing.assert_array_almost_equal(estimated, x, decimal=decimals)
|
{"/triangulation.py": ["/utils.py"], "/triangulation_test.py": ["/utils.py", "/lie.py", "/triangulation.py", "/spline_socp.py"], "/spline.py": ["/utils.py"], "/cayley.py": ["/lie.py"], "/sensor_models.py": ["/cayley.py", "/geometry.py"], "/plotting.py": ["/geometry.py"], "/geometry.py": ["/lie.py"], "/spline_test.py": ["/spline.py"], "/fit_bezier.py": ["/bezier.py"], "/structures.py": ["/lie.py"], "/interpolate_imu_orientation.py": ["/lie.py"]}
|
40,118
|
alexflint/spline-initialization
|
refs/heads/master
|
/estimate_position_socp.py
|
import time
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
import utils
import cayley
import bezier
import socp
import geometry
def predict_accel(pos_controls, orient_controls, accel_bias, gravity, t):
orientation = cayley.cayley(bezier.zero_offset_bezier(orient_controls, t))
global_accel = bezier.zero_offset_bezier_second_deriv(pos_controls, t)
return np.dot(orientation, global_accel + gravity) + accel_bias
def predict_feature(pos_controls, orient_controls, landmark, t):
r = cayley.cayley(bezier.zero_offset_bezier(orient_controls, t))
p = bezier.zero_offset_bezier(pos_controls, t)
y = np.dot(r, landmark - p)
assert y[2] > 0
return geometry.pr(y)
def predict_depth(pos_controls, orient_controls, landmark, t):
r = cayley.cayley(bezier.zero_offset_bezier(orient_controls, t))
p = bezier.zero_offset_bezier(pos_controls, t)
return np.linalg.norm(np.dot(r, landmark - p))
def construct_problem(bezier_degree,
observed_accel_timestamps,
observed_accel_orientations,
observed_accel_readings,
observed_frame_timestamps,
observed_frame_orientations,
observed_features,
imu_to_camera=np.eye(3),
camera_matrix=np.eye(3),
feature_tolerance=1e-2,
accel_tolerance=1e-3,
gravity_magnitude=9.8,
max_bias_magnitude=.1):
# Compute offsets
position_offset = 0
position_len = (bezier_degree-1)*3
accel_bias_offset = position_offset + position_len
gravity_offset = accel_bias_offset + 3
structure_offset = gravity_offset + 3
num_vars = structure_offset + 3 * observed_features.shape[1]
# Initialize the problem
objective = np.zeros(num_vars)
problem = socp.SocpProblem(objective, [])
# Construct gravity constraints
a_gravity = np.zeros((3, num_vars))
a_gravity[:, gravity_offset:gravity_offset+3] = np.eye(3)
d_gravity = gravity_magnitude
problem.add_constraint(a=a_gravity, d=d_gravity)
# Construct accel bias constraints
a_bias = np.zeros((3, num_vars))
a_bias[:, accel_bias_offset:accel_bias_offset+3] = np.eye(3)
d_bias = max_bias_magnitude
problem.add_constraint(a=a_bias, d=d_bias)
# Construct accel constraints
for t, r, a in zip(observed_accel_timestamps, observed_accel_orientations, observed_accel_readings):
amat = bezier.zero_offset_bezier_second_deriv_mat(t, bezier_degree-1, 3)
j = np.zeros((3, num_vars))
j[:, :position_len] = np.dot(r, amat)
j[:, gravity_offset:gravity_offset+3] = r
j[:, accel_bias_offset:accel_bias_offset+3] = np.eye(3)
r = -a
problem.add_constraint(a=j, b=r, d=accel_tolerance)
# Construct structure constraints
for i, (t, r, zs) in enumerate(zip(observed_frame_timestamps, observed_frame_orientations, observed_features)):
for j, z in enumerate(zs):
point_offset = structure_offset + j*3
pmat = bezier.zero_offset_bezier_mat(t, bezier_degree-1, 3)
k_rc_r = np.dot(camera_matrix, np.dot(imu_to_camera, r))
ymat = np.zeros((3, num_vars))
ymat[:, :position_len] = -np.dot(k_rc_r, pmat)
ymat[:, point_offset:point_offset+3] = k_rc_r
a_feature = ymat[:2] - np.outer(z, ymat[2])
c_feature = ymat[2] * feature_tolerance
problem.add_constraint(a=a_feature, c=c_feature)
return problem
def run_bezier_position_estimation():
np.random.seed(0)
#
# Construct ground truth
#
num_frames = 6
num_landmarks = 10
num_imu_readings = 100
bezier_degree = 4
print 'Num landmarks:', num_landmarks
print 'Num frames:', num_frames
print 'Num IMU readings:', num_imu_readings
print 'Bezier curve degree:', bezier_degree
# Both splines should start at 0,0,0
true_frame_timestamps = np.linspace(0, .9, num_frames)
true_accel_timestamps = np.linspace(0, 1, num_imu_readings)
true_rot_controls = np.random.randn(bezier_degree-1, 3) * .1
true_pos_controls = np.random.randn(bezier_degree-1, 3)
true_landmarks = np.random.randn(num_landmarks, 3)*5 + [0., 0., 20.]
true_frame_orientations = np.array([cayley.cayley(bezier.zero_offset_bezier(true_rot_controls, t))
for t in true_frame_timestamps])
true_frame_positions = np.array([bezier.zero_offset_bezier(true_pos_controls, t) for t in true_frame_timestamps])
true_gravity_magnitude = 9.8
true_gravity = utils.normalized(np.random.rand(3)) * true_gravity_magnitude
true_accel_bias = np.random.randn(3) * .01
print 'True gravity:', true_gravity
true_imu_orientations = np.array([cayley.cayley(bezier.zero_offset_bezier(true_rot_controls, t))
for t in true_accel_timestamps])
true_accel_readings = np.array([predict_accel(true_pos_controls, true_rot_controls, true_accel_bias, true_gravity, t)
for t in true_accel_timestamps])
true_features = np.array([[predict_feature(true_pos_controls, true_rot_controls, x, t) for x in true_landmarks]
for t in true_frame_timestamps])
true_vars = np.hstack((true_pos_controls.flatten(), true_accel_bias, true_gravity, true_landmarks.flatten()))
#
# Add sensor noise
#
accel_timestamp_noise = 1e-5
accel_reading_noise = 1e-5
accel_orientation_noise = 1e-5
frame_timestamp_noise = 1e-5
frame_orientation_noise = 1e-5
feature_noise = 1e-5
observed_accel_timestamps = utils.add_white_noise(true_accel_timestamps, accel_timestamp_noise)
observed_accel_readings = utils.add_white_noise(true_accel_readings, accel_reading_noise)
observed_accel_orientations = utils.add_orientation_noise(true_imu_orientations, accel_orientation_noise)
observed_frame_timestamps = utils.add_white_noise(true_frame_timestamps, frame_timestamp_noise)
observed_frame_orientations = utils.add_orientation_noise(true_frame_orientations, frame_orientation_noise)
observed_features = utils.add_white_noise(true_features, feature_noise)
#
# Solve
#
problem = construct_problem(
bezier_degree,
observed_accel_timestamps,
observed_accel_orientations,
observed_accel_readings,
observed_frame_timestamps,
observed_frame_orientations,
observed_features,
gravity_magnitude=true_gravity_magnitude+.1,
accel_tolerance=1e-3,
feature_tolerance=1e-3)
#problem.evaluate(true_vars)
result = socp.solve(problem, sparse=True)
if result['x'] is None:
print 'Did not find a feasible solution'
return
estimated_vars = np.squeeze(result['x'])
estimated_pos_controls = estimated_vars[:true_pos_controls.size].reshape((-1, 3))
estimated_accel_bias = estimated_vars[true_pos_controls.size:true_pos_controls.size+3]
estimated_gravity = estimated_vars[true_pos_controls.size+3:true_pos_controls.size+6]
estimated_landmarks = estimated_vars[true_pos_controls.size+6:].reshape((-1, 3))
estimated_frame_positions = np.array([bezier.zero_offset_bezier(estimated_pos_controls, t)
for t in true_frame_timestamps])
print 'Position norms:', np.linalg.norm(true_frame_positions, axis=1)
print 'Position errors:', np.linalg.norm(estimated_frame_positions - true_frame_positions, axis=1)
print 'Gravity error:', np.linalg.norm(estimated_gravity - true_gravity)
print 'Accel bias error:', np.linalg.norm(estimated_accel_bias - true_accel_bias)
print 'Max error:', np.max(estimated_vars - true_vars)
plt.clf()
plt.barh(np.arange(len(true_vars)), true_vars, height=.3, alpha=.3, color='g')
plt.barh(np.arange(len(true_vars))+.4, estimated_vars, height=.3, alpha=.3, color='r')
plt.savefig('out/vars.pdf')
if __name__ == '__main__':
np.set_printoptions(linewidth=1000)
run_bezier_position_estimation()
|
{"/triangulation.py": ["/utils.py"], "/triangulation_test.py": ["/utils.py", "/lie.py", "/triangulation.py", "/spline_socp.py"], "/spline.py": ["/utils.py"], "/cayley.py": ["/lie.py"], "/sensor_models.py": ["/cayley.py", "/geometry.py"], "/plotting.py": ["/geometry.py"], "/geometry.py": ["/lie.py"], "/spline_test.py": ["/spline.py"], "/fit_bezier.py": ["/bezier.py"], "/structures.py": ["/lie.py"], "/interpolate_imu_orientation.py": ["/lie.py"]}
|
40,119
|
alexflint/spline-initialization
|
refs/heads/master
|
/spline.py
|
import numpy as np
import matplotlib
matplotlib.use('Agg', warn=False)
import matplotlib.pyplot as plt
import seaborn
import utils
def diagify(xs, dims):
"""Given a rank-1 vector xs, return a DIMS x n*DIMS array where each DIMS x DIMS subblock is a diagonal matrix
with xs[i] repeated along the diagonal."""
return np.hstack([np.eye(dims)*x for x in xs])
def num_bases(num_knots, degree):
"""Compute the number of basis functions for a spline with the given degree and number of knots."""
return num_knots + degree - 1
def basis(ts, i, knots, degree):
"""Evaluate the i-th B-spline basis function at T."""
n = len(knots)
if degree == 0:
if i == 0:
return (ts <= knots[1]).astype(float)
elif i+2 == n:
return (ts > knots[-2]).astype(float)
else:
return np.logical_and(knots[i] <= ts, ts < knots[i+1]).astype(float)
else:
out = 0.
if i > 0:
coef = (ts - knots[max(i-degree, 0)]) / (knots[min(i, n-1)] - knots[max(i-degree, 0)])
out += coef * basis(ts, i-1, knots, degree-1)
if i + 1 < n + degree - 1:
coef = (knots[min(i+1, n-1)] - ts) / (knots[min(i+1, n-1)] - knots[max(i-degree+1, 0)])
out += coef * basis(ts, i, knots, degree-1)
return out
def basis_d1(ts, i, knots, degree):
"""Evaluate the first derivative of the i-th B-spline basis function at T."""
n = len(knots)
if degree == 0:
return 0 if np.isscalar(ts) else np.zeros(len(ts))
else:
out = 0.
if i > 0:
denom = 1. / (knots[min(i, n-1)] - knots[max(i-degree, 0)])
coef = (ts - knots[max(i-degree, 0)]) * denom
out += coef * basis_d1(ts, i-1, knots, degree-1) + denom * basis(ts, i-1, knots, degree-1)
if i + 1 < n + degree - 1:
denom = 1. / (knots[min(i+1, n-1)] - knots[max(i-degree+1, 0)])
coef = (knots[min(i+1, n-1)] - ts) * denom
out += coef * basis_d1(ts, i, knots, degree-1) - denom * basis(ts, i, knots, degree-1)
return out
def basis_d2(ts, i, knots, degree):
"""Evaluate the first derivative of the i-th B-spline basis function at T."""
n = len(knots)
if degree == 0:
return 0 if np.isscalar(ts) else np.zeros(len(ts))
else:
out = 0.
if i > 0:
denom = 1. / (knots[min(i, n-1)] - knots[max(i-degree, 0)])
coef = (ts - knots[max(i-degree, 0)]) * denom
out += coef*basis_d2(ts, i-1, knots, degree-1) + 2*denom*basis_d1(ts, i-1, knots, degree-1)
if i + 1 < n + degree - 1:
denom = 1. / (knots[min(i+1, n-1)] - knots[max(i-degree+1, 0)])
coef = (knots[min(i+1, n-1)] - ts) * denom
out += coef*basis_d2(ts, i, knots, degree-1) - 2*denom*basis_d1(ts, i, knots, degree-1)
return out
def coefficients(ts, knots, degree):
"""Compute the coefficients of all bases for a spline evaluated at T."""
return np.array([basis(ts, i, knots, degree)
for i in range(num_bases(len(knots), degree))]).T
def coefficients_d1(ts, knots, degree):
"""Compute the coefficients of all bases for a spline evaluated at T."""
return np.array([basis_d1(ts, i, knots, degree)
for i in range(num_bases(len(knots), degree))]).T
def coefficients_d2(ts, knots, degree):
"""Compute the coefficients of all bases for a spline evaluated at T."""
return np.array([basis_d2(ts, i, knots, degree)
for i in range(num_bases(len(knots), degree))]).T
def multidim_coefficients(t, knots, degree, dims):
"""For splines through multidimensional space, compute a matrix A such that dot(A, controls.flatten()) is the
spline output at T."""
return diagify(coefficients(t, knots, degree), dims)
def multidim_coefficients_d1(t, knots, degree, dims):
"""For splines through multidimensional space, compute a matrix A such that dot(A, controls.flatten()) is the
spline output at T."""
return diagify(coefficients_d1(t, knots, degree), dims)
def multidim_coefficients_d2(t, knots, degree, dims):
"""For splines through multidimensional space, compute a matrix A such that dot(A, controls.flatten()) is the
spline output at T."""
return diagify(coefficients_d2(t, knots, degree), dims)
def evaluate(ts, knots, degree, controls):
"""Evaluate a spline at T."""
return np.dot(coefficients(ts, knots, degree), controls)
def evaluate_d1(ts, knots, degree, controls):
"""Evaluate a spline at T."""
return np.dot(coefficients_d1(ts, knots, degree), controls)
def evaluate_d2(ts, knots, degree, controls):
"""Evaluate a spline at T."""
return np.dot(coefficients_d2(ts, knots, degree), controls)
class SplineTemplate(object):
"""Represents the knots and degree for a B-spline."""
def __init__(self, knots, degree, dims=1):
self.knots = knots
self.degree = degree
self.dims = dims
@property
def num_bases(self):
return num_bases(len(self.knots), self.degree)
@property
def control_shape(self):
return (self.num_bases,) if self.dims == 1 else (self.num_bases, self.dims)
@property
def control_size(self):
return self.num_bases * self.dims
def build_random(self, scale=1., offset=0., first_control=None):
controls = np.random.randn(*self.control_shape)*scale + offset
if first_control is not None:
controls[0] = first_control
return Spline(self, controls)
def coefficients(self, ts):
return coefficients(ts, self.knots, self.degree)
def coefficients_d1(self, ts):
return coefficients_d1(ts, self.knots, self.degree)
def coefficients_d2(self, ts):
return coefficients_d2(ts, self.knots, self.degree)
def multidim_coefficients(self, ts):
return multidim_coefficients(ts, self.knots, self.degree, self.dims)
def multidim_coefficients_d1(self, ts):
return multidim_coefficients_d1(ts, self.knots, self.degree, self.dims)
def multidim_coefficients_d2(self, ts):
return multidim_coefficients_d2(ts, self.knots, self.degree, self.dims)
def evaluate(self, ts, controls):
return evaluate(ts, self.knots, self.degree, controls)
def evaluate_d1(self, ts, controls):
return evaluate_d1(ts, self.knots, self.degree, controls)
def evaluate_d2(self, ts, controls):
return evaluate_d2(ts, self.knots, self.degree, controls)
@classmethod
def linspaced(cls, num_knots, dims, duration=1., degree=3, begin=0., end=None):
if end is None:
end = begin + duration
return SplineTemplate(np.linspace(begin, end, num_knots), degree, dims)
class Spline(object):
"""Represents a B-spline curve."""
def __init__(self, template, controls):
controls = np.asarray(controls)
assert isinstance(template, SplineTemplate)
if template.dims == 1:
assert len(controls) == template.num_bases
else:
assert controls.shape == template.control_shape, '%s vs %s' % (controls.shape, template.control_shape)
self.template = template
self.controls = controls
def evaluate(self, ts):
"""Evaluate the spline at T."""
return self.template.evaluate(ts, self.controls)
def evaluate_d1(self, ts):
"""Evaluate the first derivative of the spline at T."""
return self.template.evaluate_d1(ts, self.controls)
def evaluate_d2(self, ts):
"""Evaluate the second derivative of the spline at T."""
return self.template.evaluate_d2(ts, self.controls)
@classmethod
def canonical(cls, controls, duration=1., degree=3, begin=0., end=None):
"""Construct a spline with uniformly spaced knots from the specified control points."""
num_knots = len(controls) - degree + 1
dims = np.shape(controls)[1] if np.ndim(controls) > 1 else 1
tpl = SplineTemplate.linspaced(num_knots, dims, degree=degree, duration=duration, begin=begin, end=end)
return Spline(tpl, controls)
def fit(ts, ys, degree=3, num_knots=None, knot_frequency=5.):
ts = np.asarray(ts)
ys = np.asarray(ys)
t0 = ts[0]
duration = ts[-1] - t0
dims = 1 if np.ndim(ys) == 1 else ys.shape[1]
if num_knots is None:
num_knots = int(np.ceil(duration * knot_frequency)) + 1
# Create the linear system
tpl = SplineTemplate(np.linspace(t0, t0+duration, num_knots), degree, dims)
a = np.vstack([tpl.multidim_coefficients(t) for t in ts])
b = ys.flatten()
# Solve the system
controls, _, _, _ = np.linalg.lstsq(a, b)
# Construct the spline
if dims > 1:
controls = controls.reshape((-1, dims))
return Spline(tpl, controls)
def main():
np.random.seed(2)
degree = 3
num_knots = 8
knots = sorted([0, 10] + list(np.random.rand(num_knots-2) * 10.))
plt.clf()
for i in range(num_knots + degree - 1):
ts = np.linspace(-.1, 10.1, 200)
ys = basis(ts, i, knots, degree)
plt.plot(ts, ys)
plt.vlines(knots, -1, 2, linestyles='dotted', alpha=.4)
plt.savefig('out/bases.pdf')
if __name__ == '__main__':
main()
|
{"/triangulation.py": ["/utils.py"], "/triangulation_test.py": ["/utils.py", "/lie.py", "/triangulation.py", "/spline_socp.py"], "/spline.py": ["/utils.py"], "/cayley.py": ["/lie.py"], "/sensor_models.py": ["/cayley.py", "/geometry.py"], "/plotting.py": ["/geometry.py"], "/geometry.py": ["/lie.py"], "/spline_test.py": ["/spline.py"], "/fit_bezier.py": ["/bezier.py"], "/structures.py": ["/lie.py"], "/interpolate_imu_orientation.py": ["/lie.py"]}
|
40,120
|
alexflint/spline-initialization
|
refs/heads/master
|
/cayley.py
|
import numpy as np
from lie import SO3, skew
def cayley_mat(s):
s = np.asarray(s, float)
return np.eye(3) * (1. - np.dot(s, s)) + 2.*skew(s) + 2.*np.outer(s, s)
def cayley_denom(s):
s = np.asarray(s, float)
return 1. + np.dot(s, s)
def cayley(s):
s = np.asarray(s, float)
return cayley_mat(s) / cayley_denom(s)
def cayley_inv(r):
w = SO3.log(r)
theta = np.linalg.norm(w)
if theta < 1e-8:
return w
else:
return w * np.tan(theta / 2.) / theta
def cayley_av_mat(x):
return (np.eye(3) - skew(x)) * 2. / (1. + np.dot(x, x))
def angular_velocity_from_cayley_deriv(x, dx):
return np.dot(cayley_av_mat(x), dx)
|
{"/triangulation.py": ["/utils.py"], "/triangulation_test.py": ["/utils.py", "/lie.py", "/triangulation.py", "/spline_socp.py"], "/spline.py": ["/utils.py"], "/cayley.py": ["/lie.py"], "/sensor_models.py": ["/cayley.py", "/geometry.py"], "/plotting.py": ["/geometry.py"], "/geometry.py": ["/lie.py"], "/spline_test.py": ["/spline.py"], "/fit_bezier.py": ["/bezier.py"], "/structures.py": ["/lie.py"], "/interpolate_imu_orientation.py": ["/lie.py"]}
|
40,121
|
alexflint/spline-initialization
|
refs/heads/master
|
/utils.py
|
import collections
import numpy as np
from lie import skew, SO3
def normalized(x):
x = np.asarray(x)
return x / np.sqrt(np.sum(np.square(x), axis=-1))[..., None]
def pr(x):
x = np.asarray(x)
return x[..., :-1] / x[..., -1:]
def unpr(x):
x = np.asarray(x)
col_shape = x.shape[:-1] + (1,)
return np.concatenate((x, np.ones(col_shape)), axis=-1)
def spy(x, tol=1e-4):
x = np.atleast_2d(x)
return '\n'.join(map(lambda row: '['+''.join('x' if abs(val)>tol else ' ' for val in row)+']', x))
def unreduce(x, mask, fill=0.):
x = np.asarray(x)
out = np.repeat(fill, len(mask))
out[mask] = x
return out
def unreduce_info(info, mask):
out = np.zeros((len(mask), len(mask)))
out[np.ix_(mask, mask)] = info
return out
def cis(theta):
"""This works for both scalar and vector theta."""
return np.array((np.cos(theta), np.sin(theta)))
def dots(*m):
"""Multiple an arbitrary number of matrices with np.dot."""
return reduce(np.dot, m)
def sumsq(x, axis=None):
"""Compute the sum of squared elements."""
return np.sum(np.square(x), axis=axis)
def unit(i, n):
return (np.arange(n) == i).astype(float)
def orthonormalize(r):
u, s, v = np.linalg.svd(r)
return np.dot(u, v)
def minmedmax(xs):
if len(xs) == 0:
print 'warning [utils.minmedmax]: empty list passed'
return 0., 0., 0.
else:
return np.min(xs), np.median(xs), np.max(xs)
def essential_matrix(R1, p1, R2, p2):
Rrel = np.dot(R2, R1.T)
prel = np.dot(R1, p2-p1)
return essential_matrix_from_relative_pose(Rrel, prel)
def essential_matrix_from_relative_pose(Rrel, prel):
return np.dot(Rrel, skew(prel))
def add_white_noise(x, sigma):
return x + np.random.randn(*x.shape) * sigma
def add_orientation_noise(x, sigma):
x = np.atleast_3d(x)
return np.array([np.dot(xi, SO3.exp(np.random.randn(3)*sigma)) for xi in x])
def renumber_tracks(features, landmarks=None, min_track_length=None):
# Drop tracks that are too short
if min_track_length is not None:
track_lengths = collections.defaultdict(int)
for f in features:
track_lengths[f.track_id] += 1
features = filter(lambda f: track_lengths[f.track_id] >= min_track_length, features)
# Apply consistent renumbering
track_ids = sorted(set(f.track_id for f in features))
track_index_by_id = {track_id: index for index, track_id in enumerate(track_ids)}
for f in features:
f.track_id = track_index_by_id[f.track_id]
# Return the final features
if landmarks is not None:
assert len(track_ids) == 0 or len(landmarks) > max(track_ids)
landmarks = np.array([landmarks[i] for i in track_ids])
return features, landmarks
else:
return features
|
{"/triangulation.py": ["/utils.py"], "/triangulation_test.py": ["/utils.py", "/lie.py", "/triangulation.py", "/spline_socp.py"], "/spline.py": ["/utils.py"], "/cayley.py": ["/lie.py"], "/sensor_models.py": ["/cayley.py", "/geometry.py"], "/plotting.py": ["/geometry.py"], "/geometry.py": ["/lie.py"], "/spline_test.py": ["/spline.py"], "/fit_bezier.py": ["/bezier.py"], "/structures.py": ["/lie.py"], "/interpolate_imu_orientation.py": ["/lie.py"]}
|
40,122
|
alexflint/spline-initialization
|
refs/heads/master
|
/sensor_models.py
|
import numpy as np
import cayley
import geometry
def predict_accel(pos_curve, orient_curve, accel_bias, gravity, t):
orientation = cayley.cayley(orient_curve.evaluate(t))
return predict_accel_with_orientation(pos_curve, orientation, accel_bias, gravity, t)
def predict_accel_with_orientation(pos_curve, orientation, accel_bias, gravity, t):
global_accel = pos_curve.evaluate_d2(t)
return np.dot(orientation, global_accel + gravity) + accel_bias
def predict_feature(pos_curve, orient_curve, landmark, t, imu_to_camera, camera_matrix):
p = pos_curve.evaluate(t)
r = cayley.cayley(orient_curve.evaluate(t))
return predict_feature_with_pose(r, p, landmark, imu_to_camera, camera_matrix)
def predict_feature_with_pose(r, p, x, imu_to_camera, camera_matrix, allow_behind=True):
assert np.shape(r) == (3, 3), 'shape was '+str(np.shape(r))
assert np.shape(p) == (3,), 'shape was '+str(np.shape(p))
assert np.shape(x) == (3,), 'shape was '+str(np.shape(x))
assert np.shape(imu_to_camera) == (3, 3), 'shape was '+str(np.shape(imu_to_camera))
assert np.shape(camera_matrix) == (3, 3), 'shape was '+str(np.shape(camera_matrix))
y = np.dot(camera_matrix, np.dot(imu_to_camera, np.dot(r, x - p)))
if not allow_behind and y[2] <= 0:
return None
return geometry.pr(y)
|
{"/triangulation.py": ["/utils.py"], "/triangulation_test.py": ["/utils.py", "/lie.py", "/triangulation.py", "/spline_socp.py"], "/spline.py": ["/utils.py"], "/cayley.py": ["/lie.py"], "/sensor_models.py": ["/cayley.py", "/geometry.py"], "/plotting.py": ["/geometry.py"], "/geometry.py": ["/lie.py"], "/spline_test.py": ["/spline.py"], "/fit_bezier.py": ["/bezier.py"], "/structures.py": ["/lie.py"], "/interpolate_imu_orientation.py": ["/lie.py"]}
|
40,123
|
alexflint/spline-initialization
|
refs/heads/master
|
/estimate_position.py
|
import numdifftools
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
from lie import SO3
from geometry import pr
from cayley import cayley
from utils import normalized, skew_jacobian, essential_matrix, add_white_noise, add_orientation_noise
from bezier import zero_offset_bezier, zero_offset_bezier_mat, zero_offset_bezier_second_deriv, zero_offset_bezier_second_deriv_mat
from plotting import plot_tracks
def diagify(x, k):
x = np.atleast_2d(x)
m, n = x.shape
out = np.zeros((m*k, n*k), x.dtype)
for i in range(x.shape[0]):
for j in range(x.shape[1]):
out[i*k:i*k+k, j*k:j*k+k] = np.eye(k) * x[i, j]
return out
def dots(*args):
return reduce(np.dot, args)
def accel_residual(pos_controls, accel_bias, gravity,
timestamp, accel_reading, orientation):
global_accel = zero_offset_bezier_second_deriv(pos_controls, timestamp)
apparent_accel = np.dot(orientation, global_accel + gravity) + accel_bias
return apparent_accel - accel_reading
def accel_jacobian(bezier_order, timestamp, orientation):
bezier_mat = zero_offset_bezier_second_deriv_mat(timestamp, bezier_order, 3)
return np.hstack((np.dot(orientation, bezier_mat), np.eye(3), orientation))
def evaluate_accel_residuals(pos_controls, accel_bias, gravity,
accel_timestamps, accel_readings, accel_orientations):
return np.hstack([accel_residual(pos_controls, accel_bias, gravity, t, accel, R)
for t, R, accel in zip(accel_timestamps, accel_orientations, accel_readings)])
def evaluate_accel_jacobians(bezier_order, accel_timestamps, accel_orientations):
return np.vstack([accel_jacobian(bezier_order, t, R)
for t, R in zip(accel_timestamps, accel_orientations)])
def epipolar_residual(pos_controls, ti, tj, zi, zj, Ri, Rj):
pi = zero_offset_bezier(pos_controls, ti)
pj = zero_offset_bezier(pos_controls, tj)
E = essential_matrix(Ri, pi, Rj, pj)
return dots(zj, E, zi)
def epipolar_jacobian(bezier_order, ti, tj, zi, zj, Ri, Rj):
Rrel = np.dot(Rj, Ri.T)
zzt = np.outer(zj, zi).flatten()
Ai = zero_offset_bezier_mat(ti, bezier_order, 3)
Aj = zero_offset_bezier_mat(tj, bezier_order, 3)
return dots(zzt, diagify(Rrel, 3), skew_jacobian(), np.dot(Ri, Aj - Ai))
def evaluate_epipolar_residuals(pos_controls, frame_timestamps, frame_orientations,
features, feature_mask=None):
residuals = []
for i, (ti, Ri) in enumerate(zip(frame_timestamps, frame_orientations)):
for j, (tj, Rj) in enumerate(zip(frame_timestamps, frame_orientations)):
if i != j:
for k in range(features.shape[1]):
if feature_mask is None or (feature_mask[i, k] and feature_mask[j, k]):
zi = features[i][k]
zj = features[j][k]
residuals.append(epipolar_residual(pos_controls, ti, tj, zi, zj, Ri, Rj))
return np.array(residuals)
def evaluate_epipolar_jacobians(bezier_order, frame_timestamps, frame_orientations,
features, feature_mask=None):
jacobians = []
for i, (ti, Ri) in enumerate(zip(frame_timestamps, frame_orientations)):
for j, (tj, Rj) in enumerate(zip(frame_timestamps, frame_orientations)):
if i != j:
for k in range(features.shape[1]):
if feature_mask is None or (feature_mask[i, k] and feature_mask[j, k]):
zi = features[i][k]
zj = features[j][k]
jacobians.append(epipolar_jacobian(bezier_order, ti, tj, zi, zj, Ri, Rj))
return np.array(jacobians)
def reprojection_residual(pos_controls, landmark, depth, timestamp, feature, orientation):
position = zero_offset_bezier(pos_controls, timestamp)
return np.dot(orientation, landmark - position) - depth * feature
def reprojection_jacobian(bezier_order, timestamp, feature, orientation):
bezier_mat = zero_offset_bezier_mat(timestamp, bezier_order, 3)
return -np.dot(orientation, bezier_mat), orientation, -feature
def evaluate_reprojection_residuals(pos_controls, landmarks, depths, frame_timestamps, frame_orientations,
features, feature_mask=None):
residuals = []
for i, (t, R) in enumerate(zip(frame_timestamps, frame_orientations)):
for k, (z, landmark, depth) in enumerate(zip(features[i], landmarks, depths[i])):
if feature_mask is None or feature_mask[i, k]:
residuals.append(reprojection_residual(pos_controls, landmark, depth, t, z, R))
return np.hstack(residuals)
def evaluate_reprojection_jacobians(bezier_order, frame_timestamps, frame_orientations,
features, feature_mask=None):
num_frames, num_landmarks = np.shape(features)[:2]
position_offset = 0
accel_bias_offset = position_offset + bezier_order*3
gravity_offset = accel_bias_offset + 3
landmark_offset = gravity_offset + 3
depth_offset = landmark_offset + num_landmarks * 3
size = depth_offset + num_frames * num_landmarks
jacobians = []
for i, (t, R) in enumerate(zip(frame_timestamps, frame_orientations)):
for j, z in enumerate(features[i]):
if feature_mask is None or feature_mask[i, j]:
J_r_wrt_p, J_r_wrt_x, J_r_wrt_k = reprojection_jacobian(bezier_order, t, z, R)
x_offset = landmark_offset + j * 3
k_offset = depth_offset + i * num_landmarks + j
jcur = np.zeros((3, size))
jcur[:, position_offset:accel_bias_offset] = J_r_wrt_p
jcur[:, x_offset:x_offset+3] = J_r_wrt_x
jcur[:, k_offset] = J_r_wrt_k
jacobians.append(jcur)
return np.vstack(jacobians)
def ba_reprojection_residual(pos_controls, landmark, timestamp, feature, orientation):
position = zero_offset_bezier(pos_controls, timestamp)
return pr(np.dot(orientation, landmark - position)) - pr(feature)
def evaluate_ba_reprojection_residuals(pos_controls, landmarks, frame_timestamps, frame_orientations,
features, feature_mask=None):
residuals = []
for i, (t, R) in enumerate(zip(frame_timestamps, frame_orientations)):
for k, (z, x) in enumerate(zip(features[i], landmarks)):
if feature_mask is None or feature_mask[i, k]:
residuals.append(ba_reprojection_residual(pos_controls, x, t, z, R))
return np.hstack(residuals)
def estimate_position(bezier_degree,
observed_accel_timestamps,
observed_accel_orientations,
observed_accel_readings,
observed_frame_timestamps,
observed_frame_orientations,
observed_features,
vision_weight=1.):
# Compute offsets for parameter vector
position_offs = 0
accel_bias_offset = position_offs + (bezier_degree-1)*3
gravity_offset = accel_bias_offset + 3
# Setup linear system
accel_res = evaluate_accel_residuals(np.zeros((bezier_degree-1, 3)), np.zeros(3), np.zeros(3),
observed_accel_timestamps, observed_accel_readings, observed_accel_orientations)
accel_jac = evaluate_accel_jacobians(bezier_degree-1, observed_accel_timestamps, observed_accel_orientations)
epipolar_res = evaluate_epipolar_residuals(np.zeros((bezier_degree-1, 3)), observed_frame_timestamps,
observed_frame_orientations, observed_features)
epipolar_jac = evaluate_epipolar_jacobians(bezier_degree-1, observed_frame_timestamps,
observed_frame_orientations, observed_features)
epipolar_jac = np.hstack((epipolar_jac, np.zeros((epipolar_jac.shape[0], 6))))
residual = np.hstack((accel_res, epipolar_res * vision_weight))
jacobian = np.vstack((accel_jac, epipolar_jac * vision_weight))
# Solve
jtj = np.dot(jacobian.T, jacobian)
jtr = np.dot(jacobian.T, residual)
estimated_vars = np.squeeze(np.linalg.solve(jtj, -jtr))
# Unpack result and compute error
estimated_pos_controls = estimated_vars[position_offs:position_offs+(bezier_degree-1)*3].reshape((bezier_degree-1, 3))
estimated_accel_bias = estimated_vars[accel_bias_offset:accel_bias_offset+3]
estimated_gravity = estimated_vars[gravity_offset:gravity_offset+3]
return estimated_pos_controls, estimated_accel_bias, estimated_gravity
def structure_and_motion_system(bezier_degree,
observed_accel_timestamps,
observed_accel_orientations,
observed_accel_readings,
observed_frame_timestamps,
observed_frame_orientations,
observed_features,
vision_weight=1.):
# Compute offsets for parameter vector
num_frames, num_landmarks = observed_features.shape[:2]
linearization_accel_bias = np.zeros(3)
linearization_gravity = np.zeros(3)
#linearization_gravity = np.array([3.01579968, 8.26799292, 4.31106082])
linearization_pos_controls = np.zeros((bezier_degree-1, 3))
linearization_landmarks = np.zeros((num_landmarks, 3))
linearization_depths = np.zeros((num_frames, num_landmarks))
# Setup linear system
accel_res = evaluate_accel_residuals(linearization_pos_controls,
linearization_accel_bias,
linearization_gravity,
observed_accel_timestamps,
observed_accel_readings,
observed_accel_orientations)
accel_jac = evaluate_accel_jacobians(bezier_degree-1,
observed_accel_timestamps,
observed_accel_orientations)
accel_jac = np.hstack((accel_jac, np.zeros((accel_jac.shape[0], num_landmarks * (3 + num_frames)))))
reproj_res = evaluate_reprojection_residuals(linearization_pos_controls,
linearization_landmarks,
linearization_depths,
observed_frame_timestamps,
observed_frame_orientations,
observed_features)
reproj_jac = evaluate_reprojection_jacobians(bezier_degree-1,
observed_frame_timestamps,
observed_frame_orientations,
observed_features)
residual = np.hstack((accel_res, reproj_res * vision_weight))
jacobian = np.vstack((accel_jac, reproj_jac * vision_weight))
return residual, jacobian
def estimate_structure_and_motion(bezier_degree,
observed_accel_timestamps,
observed_accel_orientations,
observed_accel_readings,
observed_frame_timestamps,
observed_frame_orientations,
observed_features,
vision_weight=1.):
# Compute offsets for parameter vector
num_frames, num_landmarks = observed_features.shape[:2]
position_offset = 0
accel_bias_offset = position_offset + (bezier_degree-1)*3
gravity_offset = accel_bias_offset + 3
landmark_offset = gravity_offset + 3
depth_offset = landmark_offset + num_landmarks * 3
residual, jacobian = structure_and_motion_system(bezier_degree,
observed_accel_timestamps,
observed_accel_orientations,
observed_accel_readings,
observed_frame_timestamps,
observed_frame_orientations,
observed_features,
vision_weight)
# Solve
jtj = np.dot(jacobian.T, jacobian)
jtr = np.dot(jacobian.T, residual)
#jtj[gravity_offset:landmark_offset, :] = 0
#jtj[:, gravity_offset:landmark_offset] = 0
#jtj[gravity_offset:landmark_offset, gravity_offset:landmark_offset] = np.eye(3)
#jtr[gravity_offset:landmark_offset] = 0
estimated_vars = np.squeeze(np.linalg.solve(jtj, -jtr))
print 'Linear system error:', np.linalg.norm(np.dot(jtj, estimated_vars) + jtr)
# Unpack result and compute error
estimated_pos_controls = estimated_vars[position_offset:accel_bias_offset].reshape((-1, 3))
estimated_accel_bias = estimated_vars[accel_bias_offset:gravity_offset]
estimated_gravity = estimated_vars[gravity_offset:landmark_offset]
estimated_landmarks = estimated_vars[landmark_offset:depth_offset].reshape((-1, 3))
estimated_depths = estimated_vars[depth_offset:].reshape((num_frames, num_landmarks))
return estimated_pos_controls, estimated_accel_bias, estimated_gravity, estimated_landmarks, estimated_depths
def optimize_ba(bezier_degree,
init_pos_controls,
init_accel_bias,
init_gravity,
init_landmarks,
observed_accel_timestamps,
observed_accel_orientations,
observed_accel_readings,
observed_frame_timestamps,
observed_frame_orientations,
observed_features,
vision_weight=1.):
# Compute offsets for parameter vector
num_frames, num_landmarks = observed_features.shape[:2]
position_offset = 0
accel_bias_offset = position_offset + (bezier_degree-1)*3
gravity_offset = accel_bias_offset + 3
landmark_offset = gravity_offset + 3
def unpack(x):
pos_controls = x[position_offset:accel_bias_offset].reshape((-1, 3))
accel_bias = x[accel_bias_offset:gravity_offset]
gravity = x[gravity_offset:landmark_offset]
landmarks = x[landmark_offset:].reshape((-1, 3))
return pos_controls, accel_bias, gravity, landmarks
def residual(x):
pos_controls, accel_bias, gravity, landmarks = unpack(x)
reprojection_residual = evaluate_ba_reprojection_residuals(pos_controls,
landmarks,
observed_frame_timestamps,
observed_frame_orientations,
observed_features)
accel_residual = evaluate_accel_residuals(pos_controls,
accel_bias,
gravity,
observed_accel_timestamps,
observed_accel_readings,
observed_accel_orientations)
return np.hstack((reprojection_residual, accel_residual))
residual, jacobian = structure_and_motion_system(bezier_degree,
observed_accel_timestamps,
observed_accel_orientations,
observed_accel_readings,
observed_frame_timestamps,
observed_frame_orientations,
observed_features,
vision_weight)
# Solve
jtj = np.dot(jacobian.T, jacobian)
jtr = np.dot(jacobian.T, residual)
#jtj[gravity_offset:landmark_offset, :] = 0
#jtj[:, gravity_offset:landmark_offset] = 0
#jtj[gravity_offset:landmark_offset, gravity_offset:landmark_offset] = np.eye(3)
#jtr[gravity_offset:landmark_offset] = 0
estimated_vars = np.squeeze(np.linalg.solve(jtj, -jtr))
print 'Linear system error:', np.linalg.norm(np.dot(jtj, estimated_vars) + jtr)
# Unpack result and compute error
estimated_pos_controls = estimated_vars[position_offset:accel_bias_offset].reshape((-1, 3))
estimated_accel_bias = estimated_vars[accel_bias_offset:gravity_offset]
estimated_gravity = estimated_vars[gravity_offset:landmark_offset]
estimated_landmarks = estimated_vars[landmark_offset:depth_offset].reshape((-1, 3))
estimated_depths = estimated_vars[depth_offset:].reshape((num_frames, num_landmarks))
return estimated_pos_controls, estimated_accel_bias, estimated_gravity, estimated_landmarks, estimated_depths
def predict_accel(pos_controls, orient_controls, accel_bias, gravity, t):
orientation = cayley(zero_offset_bezier(orient_controls, t))
global_accel = zero_offset_bezier_second_deriv(pos_controls, t)
return np.dot(orientation, global_accel + gravity) + accel_bias
def predict_feature(pos_controls, orient_controls, landmark, t):
r = cayley(zero_offset_bezier(orient_controls, t))
p = zero_offset_bezier(pos_controls, t)
return normalized(np.dot(r, landmark - p))
def predict_depth(pos_controls, orient_controls, landmark, t):
r = cayley(zero_offset_bezier(orient_controls, t))
p = zero_offset_bezier(pos_controls, t)
return np.linalg.norm(np.dot(r, landmark - p))
def run_position_estimation():
#
# Construct ground truth
#
num_frames = 5
num_landmarks = 150
num_imu_readings = 80
bezier_degree = 4
use_epipolar_constraints = False
print 'Num landmarks:', num_landmarks
print 'Num frames:', num_frames
print 'Num IMU readings:', num_imu_readings
print 'Bezier curve degree:', bezier_degree
# Both splines should start at 0,0,0
true_frame_timestamps = np.linspace(0, .9, num_frames)
true_accel_timestamps = np.linspace(0, 1, num_imu_readings)
true_rot_controls = np.random.randn(bezier_degree-1, 3)
true_pos_controls = np.random.randn(bezier_degree-1, 3)
true_landmarks = np.random.randn(num_landmarks, 3) * 5
true_landmarks[:, 2] += 20
true_frame_orientations = np.array([cayley(zero_offset_bezier(true_rot_controls, t)) for t in true_frame_timestamps])
true_frame_positions = np.array([zero_offset_bezier(true_pos_controls, t) for t in true_frame_timestamps])
true_gravity_magnitude = 9.8
true_gravity = normalized(np.random.rand(3)) * true_gravity_magnitude
true_accel_bias = np.random.randn(3)
print 'True gravity:', true_gravity
true_imu_orientations = np.array([cayley(zero_offset_bezier(true_rot_controls, t)) for t in true_accel_timestamps])
true_accel_readings = np.array([predict_accel(true_pos_controls, true_rot_controls, true_accel_bias, true_gravity, t)
for t in true_accel_timestamps])
true_features = np.array([[predict_feature(true_pos_controls, true_rot_controls, x, t) for x in true_landmarks]
for t in true_frame_timestamps])
true_depths = np.array([[predict_depth(true_pos_controls, true_rot_controls, x, t) for x in true_landmarks]
for t in true_frame_timestamps])
#
# Add sensor noise
#
accel_timestamp_noise = 0
accel_reading_noise = 1e-3
accel_orientation_noise = 1e-3
frame_timestamp_noise = 0
frame_orientation_noise = 1e-3
feature_noise = 5e-3
observed_accel_timestamps = add_white_noise(true_accel_timestamps, accel_timestamp_noise)
observed_accel_readings = add_white_noise(true_accel_readings, accel_reading_noise)
observed_accel_orientations = add_orientation_noise(true_imu_orientations, accel_orientation_noise)
observed_frame_timestamps = add_white_noise(true_frame_timestamps, frame_timestamp_noise)
observed_frame_orientations = add_orientation_noise(true_frame_orientations, frame_orientation_noise)
observed_features = add_white_noise(true_features, feature_noise)
#
# Plot
#
#plt.clf()
#plot_tracks(true_features, 'g-', alpha=.2, limit=1)
#plot_tracks(true_features, 'go', alpha=.6, limit=1)
#plot_tracks(observed_features, 'r-', alpha=.2, limit=1)
#plot_tracks(observed_features, 'rx', alpha=.6, limit=1)
#plt.show()
#
# Solve
#
if use_epipolar_constraints:
estimated_pos_controls, estimated_accel_bias, estimated_gravity = estimate_position(
bezier_degree,
observed_accel_timestamps,
observed_accel_orientations,
observed_accel_readings,
observed_frame_timestamps,
observed_frame_orientations,
observed_features,
vision_weight=1.)
else:
estimated_pos_controls, estimated_accel_bias, estimated_gravity, estimated_landmarks, estimated_depths = \
estimate_structure_and_motion(
bezier_degree,
observed_accel_timestamps,
observed_accel_orientations,
observed_accel_readings,
observed_frame_timestamps,
observed_frame_orientations,
observed_features,
vision_weight=1.)
r, j = structure_and_motion_system(
bezier_degree,
observed_accel_timestamps,
observed_accel_orientations,
observed_accel_readings,
observed_frame_timestamps,
observed_frame_orientations,
observed_features,
vision_weight=1.)
t0 = observed_frame_timestamps[0]
r0 = observed_frame_orientations[0]
z0 = observed_features[0, 0]
p0 = zero_offset_bezier(estimated_pos_controls, t0)
pp0 = zero_offset_bezier(true_pos_controls, t0)
x0 = estimated_landmarks[0]
xx0 = true_landmarks[0]
k0 = estimated_depths[0, 0]
kk0 = np.linalg.norm(np.dot(r0, xx0 - pp0))
print 'residual:'
print reprojection_residual(estimated_pos_controls, x0, k0, t0, z0, r0)
print reprojection_residual(true_pos_controls, xx0, kk0, t0, z0, r0)
print np.dot(r0, x0 - p0) - k0 * z0
print np.dot(r0, xx0 - pp0) - kk0 * z0
#true_structure = np.hstack((true_landmarks, true_depths[:, None]))
#true_params = np.hstack((true_pos_controls.flatten(), true_accel_bias, true_gravity, true_structure.flatten()))
#jtj = np.dot(j.T, j)
#jtr = np.dot(j.T, r)
#print jtj.shape, true_params.shape, jtr.shape
#print np.dot(jtj, true_params) - jtr
#return
estimated_positions = np.array([zero_offset_bezier(estimated_pos_controls, t)
for t in true_frame_timestamps])
estimated_accel_readings = np.array([predict_accel(estimated_pos_controls,
true_rot_controls,
estimated_accel_bias,
estimated_gravity,
t)
for t in true_accel_timestamps])
estimated_pfeatures = np.array([[pr(predict_feature(estimated_pos_controls, true_rot_controls, x, t))
for x in true_landmarks]
for t in true_frame_timestamps])
true_pfeatures = pr(true_features)
observed_pfeatures = pr(observed_features)
#
# Report
#
print 'Accel bias error:', np.linalg.norm(estimated_accel_bias - true_accel_bias)
print ' True accel bias:', true_accel_bias
print ' Estimated accel bias:', estimated_accel_bias
print 'Gravity error:', np.linalg.norm(estimated_gravity - true_gravity)
print ' True gravity:', true_gravity
print ' Estimated gravity:', estimated_gravity
print ' Estimated gravity magnitude:', np.linalg.norm(estimated_gravity)
for i in range(num_frames):
print 'Frame %d position error: %f' % (i, np.linalg.norm(estimated_positions[i] - true_frame_positions[i]))
fig = plt.figure(1, figsize=(14, 10))
ax = fig.add_subplot(2, 2, 1, projection='3d')
ts = np.linspace(0, 1, 100)
true_ps = np.array([zero_offset_bezier(true_pos_controls, t) for t in ts])
estimated_ps = np.array([zero_offset_bezier(estimated_pos_controls, t) for t in ts])
ax.plot(true_ps[:, 0], true_ps[:, 1], true_ps[:, 2], '-b')
ax.plot(estimated_ps[:, 0], estimated_ps[:, 1], estimated_ps[:, 2], '-r')
#ax.plot(true_landmarks[:,0], true_landmarks[:,1], true_landmarks[:,2], '.k')
ax = fig.add_subplot(2, 2, 2)
ax.plot(true_accel_timestamps, true_accel_readings, '-', label='true')
ax.plot(observed_accel_timestamps, observed_accel_readings, 'x', label='observed')
ax.plot(true_accel_timestamps, estimated_accel_readings, ':', label='estimated')
ax.legend()
ax.set_xlim(-.1, 1.5)
ax = fig.add_subplot(2, 2, 3)
ax.plot(true_pfeatures[1, :, 0], true_pfeatures[1, :, 1], 'x', label='true', alpha=.8)
ax.plot(estimated_pfeatures[1, :, 0], estimated_pfeatures[1, :, 1], 'o', label='estimated', alpha=.4)
ax = fig.add_subplot(2, 2, 4)
ax.plot(true_pfeatures[-1, :, 0], true_pfeatures[-1, :, 1], '.', label='true', alpha=.8)
ax.plot(observed_pfeatures[-1, :, 0], observed_pfeatures[-1, :, 1], 'x', label='observed', alpha=.8)
ax.plot(estimated_pfeatures[-1, :, 0], estimated_pfeatures[-1, :, 1], 'o', label='estimated', alpha=.4)
plt.show()
def run_accel_finite_differences():
np.random.seed(0)
bezier_order = 4
pos_controls = np.random.randn(bezier_order, 3)
accel_bias = np.random.randn(3)
gravity = np.random.randn(3)
a = np.random.randn(3)
R = SO3.exp(np.random.randn(3))
t = .5
def r(delta):
k = bezier_order * 3
assert len(delta) == k + 6
return accel_residual(pos_controls + delta[:k].reshape((bezier_order, 3)),
accel_bias + delta[k:k],
gravity + delta[k+3:k+6],
t,
a,
R)
J_numeric = numdifftools.Jacobian(r)(np.zeros(bezier_order*3+6))
J_analytic = accel_jacobian(bezier_order, t, R)
print '\nNumeric:'
print J_numeric
print '\nAnalytic:'
print J_analytic
np.testing.assert_array_almost_equal(J_numeric, J_analytic, decimal=8)
def run_epipolar_finite_differences():
np.random.seed(0)
bezier_order = 4
pos_controls = np.random.randn(bezier_order, 3)
ti, tj = np.random.randn(2)
zi, zj = np.random.randn(2, 3)
Ri = SO3.exp(np.random.randn(3))
Rj = SO3.exp(np.random.randn(3))
def r(delta):
assert len(delta) == bezier_order * 3
return epipolar_residual(pos_controls + delta.reshape((bezier_order, 3)),
ti, tj, zi, zj, Ri, Rj)
J_numeric = np.squeeze(numdifftools.Jacobian(r)(np.zeros(bezier_order*3)))
J_analytic = epipolar_jacobian(bezier_order, ti, tj, zi, zj, Ri, Rj)
print '\nNumeric:'
print J_numeric
print '\nAnalytic:'
print J_analytic
np.testing.assert_array_almost_equal(J_numeric, J_analytic, decimal=8)
def run_reprojection_finite_differences():
np.random.seed(0)
bezier_order = 4
pos_controls = np.random.randn(bezier_order, 3)
landmark = np.random.randn(3)
depth = np.random.randn()
timestamp = .5
feature = np.random.randn(3)
orientation = SO3.exp(np.random.randn(3))
position_offset = 0
accel_bias_offset = position_offset + bezier_order * 3
gravity_offset = accel_bias_offset + 3
landmark_offset = gravity_offset + 3
depth_offset = landmark_offset + 3
size = depth_offset + 1
def r(delta):
return reprojection_residual(
pos_controls + delta[:accel_bias_offset].reshape(pos_controls.shape),
landmark + delta[landmark_offset:depth_offset],
depth + delta[depth_offset],
timestamp,
feature,
orientation)
J_numeric = numdifftools.Jacobian(r)(np.zeros(size))
J_wrt_p, J_wrt_x, J_wrt_k = reprojection_jacobian(bezier_order, timestamp, feature, orientation)
print J_wrt_p.shape, J_wrt_x.shape, J_wrt_k[:,None].shape
J_analytic = np.hstack((J_wrt_p, np.zeros((3, 6)), J_wrt_x, J_wrt_k[:, None]))
print '\nNumeric:'
print J_numeric
print '\nAnalytic:'
print J_analytic
np.testing.assert_array_almost_equal(J_numeric, J_analytic, decimal=8)
if __name__ == '__main__':
np.random.seed(1)
np.set_printoptions(linewidth=500, suppress=True)
matplotlib.rc('font', size=9)
matplotlib.rc('legend', fontsize=9)
#run_reprojection_finite_differences()
run_position_estimation()
|
{"/triangulation.py": ["/utils.py"], "/triangulation_test.py": ["/utils.py", "/lie.py", "/triangulation.py", "/spline_socp.py"], "/spline.py": ["/utils.py"], "/cayley.py": ["/lie.py"], "/sensor_models.py": ["/cayley.py", "/geometry.py"], "/plotting.py": ["/geometry.py"], "/geometry.py": ["/lie.py"], "/spline_test.py": ["/spline.py"], "/fit_bezier.py": ["/bezier.py"], "/structures.py": ["/lie.py"], "/interpolate_imu_orientation.py": ["/lie.py"]}
|
40,124
|
alexflint/spline-initialization
|
refs/heads/master
|
/matching_test.py
|
import numpy as np
import cv2
from matplotlib import pyplot as plt
def main():
img1 = cv2.imread('city1.png', cv2.CV_LOAD_IMAGE_COLOR)
img2 = cv2.imread('city2.png', cv2.CV_LOAD_IMAGE_COLOR)
#img2 = img1
gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
#features = cv2.goodFeaturesToTrack(gray, maxCorners=1000, qualityLevel=.01, minDistance=4).squeeze()
#features = features.astype(int)
#img1[features[:,1], features[:,0]] = [0,0,255]
#sift = cv2.SIFT()
orb = cv2.ORB()
kps1, des1 = orb.detectAndCompute(gray1, None)
kps2, des2 = orb.detectAndCompute(gray2, None)
#canvas = cv2.drawKeypoints(gray, kp)
# create BFMatcher object
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# Match descriptors.
matches = bf.match(des1, des2)
# Sort them in the order of their distance.
matches = sorted(matches, key=lambda x: x.distance)
plt.clf()
plt.hold('on')
plt.imshow(img1, extent=(0, .9, 1, 0))
plt.imshow(img2, extent=(1, 1.9, 1, 0))
plt.xlim(0, 1.9)
plt.ylim(1, 0)
for match in matches[:50]:
kp1 = kps1[match.queryIdx]
kp2 = kps2[match.trainIdx]
x1, y1 = kp1.pt
x2, y2 = kp2.pt
x1 = .9 * x1 / img1.shape[1]
y1 = y1 / img1.shape[0]
x2 = 1. + .9 * x2 / img2.shape[1]
y2 = y2 / img2.shape[0]
print match.queryIdx, match.trainIdx
print x1, y1, x2, y2
plt.plot([x1,x2], [y1,y2], 'r-')
plt.show()
# Draw first 10 matches.
#img3 = cv2.drawMatches(img1, kp1, img2, kp2, matches[:10], flags=2)
#cv2.imshow('Matches', img3)
#cv2.imshow('dst', img1)
#if cv2.waitKey(0) & 0xff == 27:
#cv2.destroyAllWindows()
exit()
# find Harris corners
gray = np.float32(gray)
dst = cv2.cornerHarris(gray, 2, 3, 0.04)
dst = cv2.dilate(dst, None)
ret, dst = cv2.threshold(dst, 0.01*dst.max(), 255, 0)
dst = np.uint8(dst)
# Find corners
ys, xs = np.nonzero(dst)
corners = np.array((xs, ys)).T
# define the criteria to stop and refine the corners
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)
refined_corners = corners.copy().astype(np.float32)
cv2.cornerSubPix(np.uint8(gray), refined_corners, (5,5), (-1,-1), criteria)
print 'Num initial corners:', len(corners)
print 'Num refined corners:', len(refined_corners)
dist = np.sqrt(np.sum(np.square(corners - refined_corners), axis=1))
print 'Min distance: ', np.min(dist)
print 'Max distance: ', np.max(dist)
# Now draw them
corners = np.int0(corners)
refined_corners = np.int0(refined_corners)
img1[corners[:,1], corners[:,0]] = [0,0,255]
img1[refined_corners[:,1], refined_corners[:,0]] = [0,255,0]
cv2.imshow('dst',img1)
if cv2.waitKey(0) & 0xff == 27:
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
{"/triangulation.py": ["/utils.py"], "/triangulation_test.py": ["/utils.py", "/lie.py", "/triangulation.py", "/spline_socp.py"], "/spline.py": ["/utils.py"], "/cayley.py": ["/lie.py"], "/sensor_models.py": ["/cayley.py", "/geometry.py"], "/plotting.py": ["/geometry.py"], "/geometry.py": ["/lie.py"], "/spline_test.py": ["/spline.py"], "/fit_bezier.py": ["/bezier.py"], "/structures.py": ["/lie.py"], "/interpolate_imu_orientation.py": ["/lie.py"]}
|
40,125
|
alexflint/spline-initialization
|
refs/heads/master
|
/plotting.py
|
import numpy as np
import matplotlib.pyplot as plt
from geometry import arctans
def plot_segments(segments, *args, **kwargs):
xs = []
ys = []
for segment in segments:
xs += [p[0] for p in segment] + [None]
ys += [p[1] for p in segment] + [None]
return plt.plot(xs, ys, *args, **kwargs)
def plot_tracks(xs, *args, **kwargs):
xs = np.asarray(xs)
if np.shape(xs)[-1] == 3:
#xs = pr(xs)
xs = arctans(xs)
xs = np.transpose(xs, (1, 0, 2))
if 'limit' in kwargs:
xs = xs[:kwargs.pop('limit')]
plot_segments(xs, *args, **kwargs)
|
{"/triangulation.py": ["/utils.py"], "/triangulation_test.py": ["/utils.py", "/lie.py", "/triangulation.py", "/spline_socp.py"], "/spline.py": ["/utils.py"], "/cayley.py": ["/lie.py"], "/sensor_models.py": ["/cayley.py", "/geometry.py"], "/plotting.py": ["/geometry.py"], "/geometry.py": ["/lie.py"], "/spline_test.py": ["/spline.py"], "/fit_bezier.py": ["/bezier.py"], "/structures.py": ["/lie.py"], "/interpolate_imu_orientation.py": ["/lie.py"]}
|
40,126
|
alexflint/spline-initialization
|
refs/heads/master
|
/simulation.py
|
import numpy as np
import spline
import utils
import cayley
import geometry
import sensor_models
import structures
def simulate_trajectory(calibration,
duration=5.,
num_frames=12,
num_landmarks=50,
num_imu_readings=100,
degree=3,
num_controls=8,
accel_timestamp_noise=0.,
accel_reading_noise=0.,
accel_orientation_noise=0.,
frame_timestamp_noise=0.,
frame_orientation_noise=0.,
feature_noise=0.):
num_knots = num_controls - degree + 1
spline_template = spline.SplineTemplate(np.linspace(0, duration, num_knots), degree, 3)
print 'Num landmarks:', num_landmarks
print 'Num frames:', num_frames
print 'Num IMU readings:', num_imu_readings
print 'Spline curve degree:', degree
# Both splines should start at 0,0,0
true_frame_timestamps = np.linspace(0, duration, num_frames)
true_accel_timestamps = np.linspace(0, duration, num_imu_readings)
true_rot_curve = spline_template.build_random(.1)
true_pos_curve = spline_template.build_random(first_control=np.zeros(3))
landmark_generator = 'normal'
if landmark_generator == 'normal':
true_landmarks = np.random.randn(num_landmarks, 3)*10
elif landmark_generator == 'near':
true_landmarks = []
for i in range(num_landmarks):
p = true_pos_curve.evaluate(true_frame_timestamps[i % len(true_frame_timestamps)])
true_landmarks.append(p + np.random.randn()*.1)
elif landmark_generator == 'far':
true_landmarks = []
for _ in range(num_landmarks):
true_landmarks.append(utils.normalized(np.random.randn(3)) * 100000.)
true_landmarks = np.asarray(true_landmarks)
true_frame_orientations = np.array(map(cayley.cayley, true_rot_curve.evaluate(true_frame_timestamps)))
true_gravity_magnitude = 9.8
true_gravity = utils.normalized(np.random.rand(3)) * true_gravity_magnitude
true_accel_bias = np.random.randn(3) * .01
# Sample IMU readings
true_imu_orientations = np.array(map(cayley.cayley, true_rot_curve.evaluate(true_accel_timestamps)))
true_accel_readings = np.array([
sensor_models.predict_accel(true_pos_curve, true_rot_curve, true_accel_bias, true_gravity, t)
for t in true_accel_timestamps])
# Sample features
num_behind = 0
true_features = []
for frame_id, t in enumerate(true_frame_timestamps):
r = cayley.cayley(true_rot_curve.evaluate(t))
p = true_pos_curve.evaluate(t)
a = np.dot(calibration.camera_matrix, np.dot(calibration.imu_to_camera, r))
ys = np.dot(true_landmarks - p, a.T)
for track_id, y in enumerate(ys):
if y[2] > 0:
true_features.append(structures.FeatureObservation(frame_id, track_id, geometry.pr(y)))
else:
num_behind += 1
if num_behind > 0:
print '%d landmarks were behind the camera (and %d were in front)' % (num_behind, len(true_features))
true_features, true_landmarks = utils.renumber_tracks(true_features, true_landmarks, min_track_length=2)
true_trajectory = structures.PositionEstimate(true_pos_curve, true_gravity, true_accel_bias, true_landmarks)
#
# Add sensor noise
#
observed_accel_timestamps = utils.add_white_noise(true_accel_timestamps, accel_timestamp_noise)
observed_accel_readings = utils.add_white_noise(true_accel_readings, accel_reading_noise)
observed_accel_orientations = utils.add_orientation_noise(true_imu_orientations, accel_orientation_noise)
observed_frame_timestamps = utils.add_white_noise(true_frame_timestamps, frame_timestamp_noise)
observed_frame_orientations = utils.add_orientation_noise(true_frame_orientations, frame_orientation_noise)
observed_features = []
for f in true_features:
observed_features.append(structures.FeatureObservation(f.frame_id,
f.track_id,
utils.add_white_noise(f.position, feature_noise)))
measurements = structures.Measurements(observed_accel_timestamps,
observed_accel_orientations,
observed_accel_readings,
observed_frame_timestamps,
observed_frame_orientations,
observed_features)
return true_trajectory, measurements, spline_template
|
{"/triangulation.py": ["/utils.py"], "/triangulation_test.py": ["/utils.py", "/lie.py", "/triangulation.py", "/spline_socp.py"], "/spline.py": ["/utils.py"], "/cayley.py": ["/lie.py"], "/sensor_models.py": ["/cayley.py", "/geometry.py"], "/plotting.py": ["/geometry.py"], "/geometry.py": ["/lie.py"], "/spline_test.py": ["/spline.py"], "/fit_bezier.py": ["/bezier.py"], "/structures.py": ["/lie.py"], "/interpolate_imu_orientation.py": ["/lie.py"]}
|
40,127
|
alexflint/spline-initialization
|
refs/heads/master
|
/solve_secular.py
|
import numpy as np
import scipy.optimize
import matplotlib
import matplotlib.pyplot as plt
from lie import SO3
from utils import normalized
def main():
a = SO3.exp(np.random.rand(3))
b = np.array((2, 2, 2))
norm_x = 1
true_x = np.dot(a.T, normalized(b))
u, s, vt = np.linalg.svd(a)
v = vt.T
btilde = np.dot(u.T, b)
def secular(k):
return np.sum(np.square(s*btilde / (s*s + k))) - norm_x*norm_x
k = scipy.optimize.fsolve(secular, 1.)
estimated_x = np.dot(v, s*btilde / (s*s + k))
print estimated_x
print np.dot(a, estimated_x)
if __name__ == '__main__':
np.random.seed(0)
np.set_printoptions(suppress=True, linewidth=500)
main()
|
{"/triangulation.py": ["/utils.py"], "/triangulation_test.py": ["/utils.py", "/lie.py", "/triangulation.py", "/spline_socp.py"], "/spline.py": ["/utils.py"], "/cayley.py": ["/lie.py"], "/sensor_models.py": ["/cayley.py", "/geometry.py"], "/plotting.py": ["/geometry.py"], "/geometry.py": ["/lie.py"], "/spline_test.py": ["/spline.py"], "/fit_bezier.py": ["/bezier.py"], "/structures.py": ["/lie.py"], "/interpolate_imu_orientation.py": ["/lie.py"]}
|
40,128
|
alexflint/spline-initialization
|
refs/heads/master
|
/estimate_orientation.py
|
import numpy as np
import scipy.optimize
import numdifftools
from bezier import bezier, zero_offset_bezier, zero_offset_bezier_deriv
from cayley import cayley, angular_velocity_from_cayley_deriv, cayley_inv
from utils import add_white_noise, add_orientation_noise
from lie import SO3
from fit_bezier import fit_zero_offset_bezier
import matplotlib
import matplotlib.pyplot as plt
def predict_orientation(bezier_params, time):
return cayley(zero_offset_bezier(bezier_params, time))
def predict_gyro(bezier_params, gyro_bias, time):
s = zero_offset_bezier(bezier_params, time)
s_deriv = zero_offset_bezier_deriv(bezier_params, time)
orient = cayley(s)
angular_velocity = angular_velocity_from_cayley_deriv(s, s_deriv)
return np.dot(orient, angular_velocity) + gyro_bias
def gyro_residual(bezier_params, gyro_bias, gyro_timestamp, gyro_reading):
return predict_gyro(bezier_params, gyro_bias, gyro_timestamp) - gyro_reading
def gyro_residuals(bezier_params, gyro_bias, gyro_timestamps, gyro_readings):
assert len(gyro_timestamps) == len(gyro_readings)
return np.hstack([gyro_residual(bezier_params, gyro_bias, t, r)
for t, r in zip(gyro_timestamps, gyro_readings)])
def angular_velocity_left(f, t, step=1e-8):
return SO3.log(np.dot(f(t + step), f(t).T)) / step
def angular_velocity_right(f, t, step=1e-8):
return SO3.log(np.dot(f(t).T, f(t + step))) / step
def orientation_residuals(bezier_params, observed_timestamps, observed_orientations):
return np.hstack([SO3.log(np.dot(predict_orientation(bezier_params, t).T, r))
for t, r in zip(observed_timestamps, observed_orientations)])
def run_furgale():
bezier_order = 4
bezier_params = np.random.rand(bezier_order, 3)
bezier_params *= 10
bez = lambda t: bezier(bezier_params, t)
bezderiv = lambda t: np.squeeze(numdifftools.Jacobian(lambda tt: bez(tt))(t))
t0 = 1.23
r0 = cayley(bez(t0))
w0 = angular_velocity_from_cayley_deriv(bez(t0), bezderiv(t0))
print 'Params:'
print bezier_params
print 'Rotation'
print r0
print 'Numeric right:', angular_velocity_right(lambda t: cayley(bez(t)), t0)
print 'Analytic global:', w0
print 'Numeric left:', angular_velocity_left(lambda t: cayley(bez(t)), t0)
print 'Analytic local:', np.dot(r0, w0)
def fit_orientation_bezier(bezier_order, timestamps, orientations):
cayleys = np.array(map(cayley_inv, orientations))
return fit_zero_offset_bezier(timestamps, cayleys, bezier_order)
def estimate_orientation(bezier_order,
observed_gyro_timestamps,
observed_gyro_readings,
observed_frame_timestamps,
observed_frame_orientations,
tol=1e-4,
**kwargs):
#seed_params = np.zeros((bezier_order, 3))
seed_params = fit_orientation_bezier(bezier_order, observed_frame_timestamps, observed_frame_orientations)
seed_gyro_bias = np.zeros(3)
seed = np.hstack((seed_gyro_bias, seed_params.flatten()))
def residuals(x):
gyro_bias = x[:3]
bezier_params = x[3:].reshape((bezier_order, 3))
r_gyro = gyro_residuals(bezier_params, gyro_bias, observed_gyro_timestamps, observed_gyro_readings)
r_orient = orientation_residuals(bezier_params, observed_frame_timestamps, observed_frame_orientations)
return np.hstack((r_gyro, r_orient))
def cost(x):
r = residuals(x)
return np.dot(r, r)
kwargs.setdefault('maxiter', 1500)
out = scipy.optimize.minimize(cost, seed, tol=tol, options=kwargs)
estimated_gyro_bias = out.x[:3]
estimated_params = out.x[3:].reshape((bezier_order, 3))
#print '\nSeed:'
#print seed_params
#print '\nEstimate:'
#print estimated_params
#print '\nOrientation estimation: nfev=%f\n\n' % out.nfev
#exit(0)
return estimated_gyro_bias, estimated_params
def run_optimize():
bezier_order = 3
num_gyro_readings = 50
num_frames = 5
frame_timestamp_noise = 1e-3
frame_orientation_noise = .02
gyro_timestamp_noise = 1e-3
gyro_noise = .01
#path = os.path.expanduser('~/Data/Initialization/closed_flat/gyro.txt')
#gyro_data = np.loadtxt(path)
#gyro_timestamps = gyro_data[:,0]
#gyro_readings = gyro_data[:,1:]
true_gyro_timestamps = np.linspace(0, 1, num_gyro_readings)
true_params = np.random.rand(bezier_order, 3)
true_gyro_bias = np.random.rand(3)
true_gyro_readings = np.array([predict_gyro(true_params, true_gyro_bias, t)
for t in true_gyro_timestamps])
true_frame_timestamps = np.linspace(0, 1, num_frames)
true_frame_orientations = np.array([predict_orientation(true_params, t) for t in true_frame_timestamps])
observed_gyro_timestamps = add_white_noise(true_gyro_timestamps, gyro_timestamp_noise)
observed_gyro_readings = add_white_noise(true_gyro_readings, gyro_noise)
observed_frame_timestamps = add_white_noise(true_frame_timestamps, frame_timestamp_noise)
observed_frame_orientations = add_orientation_noise(true_frame_orientations, frame_orientation_noise)
estimated_gyro_bias, estimated_params = estimate_orientation(bezier_order,
observed_gyro_timestamps,
observed_gyro_readings,
observed_frame_timestamps,
observed_frame_orientations)
print '\nTrue params:'
print true_params
print '\nEstimated params:'
print estimated_params
print '\nTrue gyro bias:'
print true_gyro_bias
print '\nEstimated gyro bias:'
print estimated_gyro_bias
plot_timestamps = np.linspace(0, 1, 50)
estimated_gyro_readings = np.array([predict_gyro(estimated_params, true_gyro_bias, t)
for t in plot_timestamps])
true_orientations = np.array([SO3.log(predict_orientation(true_params, t))
for t in plot_timestamps])
observed_orientations = np.array(map(SO3.log, observed_frame_orientations))
estimated_orientations = np.array([SO3.log(predict_orientation(estimated_params, t))
for t in plot_timestamps])
plt.figure(1)
plt.plot(true_gyro_timestamps, true_gyro_readings, '-', label='true')
plt.plot(true_gyro_timestamps, observed_gyro_readings, 'x', label='observed')
plt.plot(plot_timestamps, estimated_gyro_readings, ':', label='estimated')
plt.xlim(-.1, 1.5)
plt.legend()
plt.figure(2)
plt.plot(plot_timestamps, true_orientations, '-', label='true')
plt.plot(true_frame_timestamps, observed_orientations, 'x', label='observed')
plt.plot(plot_timestamps, estimated_orientations, ':', label='estimated')
plt.xlim(-.1, 1.5)
plt.legend()
plt.show()
if __name__ == '__main__':
np.random.seed(1)
np.set_printoptions(suppress=True)
matplotlib.rc('font', size=9)
matplotlib.rc('legend', fontsize=9)
run_optimize()
#run_derivative_test()
#run_furgale()
|
{"/triangulation.py": ["/utils.py"], "/triangulation_test.py": ["/utils.py", "/lie.py", "/triangulation.py", "/spline_socp.py"], "/spline.py": ["/utils.py"], "/cayley.py": ["/lie.py"], "/sensor_models.py": ["/cayley.py", "/geometry.py"], "/plotting.py": ["/geometry.py"], "/geometry.py": ["/lie.py"], "/spline_test.py": ["/spline.py"], "/fit_bezier.py": ["/bezier.py"], "/structures.py": ["/lie.py"], "/interpolate_imu_orientation.py": ["/lie.py"]}
|
40,129
|
alexflint/spline-initialization
|
refs/heads/master
|
/spline_socp.py
|
import bisect
import numpy as np
import utils
import cayley
import spline
import socp
import sensor_models
import lie
import structures
class InsufficientObservationsError(Exception):
pass
class FeasibilityError(Exception):
pass
def select_by_timestamp(data, timestamps, begin, end):
begin_index = bisect.bisect_left(timestamps, begin)
end_index = bisect.bisect_right(timestamps, end)
return data[begin_index:end_index]
def interpolate(timestamps, data, t):
assert len(timestamps) == len(data)
index = bisect.bisect_left(timestamps, t)
if index == 0:
return data[0]
elif index == len(data):
return data[-1]
else:
x0, x1 = data[index-1:index+1]
t0, t1 = timestamps[index-1:index+1]
a = (t - t0) / (t1 - t0)
return (1.-a)*x0 + a*x1
def interpolate_orientation(timestamps, orientations, t):
assert len(timestamps) == len(orientations)
index = bisect.bisect_left(timestamps, t)
if index == 0:
return orientations[0]
elif index == len(orientations):
return orientations[-1]
else:
r0, r1 = orientations[index-1:index+1]
t0, t1 = timestamps[index-1:index+1]
diff = lie.SO3.log(np.dot(r0.T, r1))
w = diff * (t - t0) / (t1 - t0)
return np.dot(r0, lie.SO3.exp(w))
class FirstOrderRotationCurve(object):
def __init__(self, timestamps, orientations):
self.timestamps = timestamps
self.orientations = orientations
def evaluate(self, t):
return cayley.cayley_inv(interpolate_orientation(self.timestamps, self.orientations, t))
def householder(x):
assert len(x) == 3, 'x=%s' % x
assert np.linalg.norm(x) > 1e-8
a = (np.arange(3) == np.argmin(np.abs(x))).astype(float)
u = utils.normalized(np.cross(x, a))
v = utils.normalized(np.cross(x, u))
return np.array([u, v])
def calibrated(z, k):
return utils.normalized(np.linalg.solve(k, utils.unpr(z)))
def soc_constraint_from_quadratic_constraint(a, b, c):
"""Convert a quadratic constraint of the form
x' A' A x + b' x + c <= 0
to an equivalent SOCP constraint of the form
|| Q x + r ||_2 <= s' x + t
"""
q = np.vstack((b/2., a))
r = np.hstack((c/2. + .5, np.zeros(len(a))))
s = -b/2.
t = -c/2. + .5
return q, r, s, t
def construct_problem_inf(spline_template,
observed_accel_timestamps,
observed_accel_orientations,
observed_accel_readings,
observed_frame_timestamps,
observed_frame_orientations,
observed_features,
imu_to_camera=np.eye(3),
camera_matrix=np.eye(3),
feature_tolerance=1e-2,
accel_tolerance=1e-3,
gravity_magnitude=9.8,
max_bias_magnitude=.1):
# Sanity checks
assert isinstance(spline_template, spline.SplineTemplate)
assert len(observed_accel_orientations) == len(observed_accel_readings)
assert len(observed_accel_timestamps) == len(observed_accel_readings)
assert len(observed_frame_timestamps) == len(observed_frame_orientations)
assert all(0 <= f.frame_id < len(observed_frame_timestamps) for f in observed_features)
assert np.ndim(observed_accel_timestamps) == 1
assert np.ndim(observed_frame_timestamps) == 1
# Compute offsets
position_offset = 0
position_len = spline_template.control_size
gravity_offset = position_offset + position_len
accel_bias_offset = gravity_offset + 3
structure_offset = accel_bias_offset + 3
track_ids = set(f.track_id for f in observed_features)
num_frames = len(observed_frame_timestamps)
num_tracks = max(track_ids) + 1
num_vars = structure_offset + num_tracks * 3
# Make sure each track has at least one observation
counts_by_frame = np.zeros(num_frames, int)
counts_by_track = np.zeros(num_tracks, int)
for f in observed_features:
counts_by_frame[f.frame_id] += 1
counts_by_track[f.track_id] += 1
assert np.all(counts_by_frame > 0),\
'These frames had zero features: ' + ','.join(map(str, np.flatnonzero(counts_by_frame == 0)))
assert np.all(counts_by_track > 0),\
'These tracks had zero features: ' + ','.join(map(str, np.flatnonzero(counts_by_track == 0)))
# Track IDs should be exactly 0..n-1
assert all(track_id < num_tracks for track_id in track_ids)
# Initialize the problem
objective = np.zeros(num_vars)
problem = socp.SocpProblem(objective, [])
# Construct gravity constraints
a_gravity = np.zeros((3, num_vars))
a_gravity[:, gravity_offset:gravity_offset+3] = np.eye(3)
d_gravity = gravity_magnitude
problem.add_constraint(a=a_gravity, d=d_gravity)
# Construct accel bias constraints
a_bias = np.zeros((3, num_vars))
a_bias[:, accel_bias_offset:accel_bias_offset+3] = np.eye(3)
d_bias = max_bias_magnitude
problem.add_constraint(a=a_bias, d=d_bias)
# Construct accel constraints
print 'Constructing constraints for %d accel readings...' % len(observed_accel_readings)
accel_coefficients = spline_template.coefficients_d2(observed_accel_timestamps)
for r, a, c in zip(observed_accel_orientations, observed_accel_readings, accel_coefficients):
amat = spline.diagify(c, 3)
j = np.zeros((3, num_vars))
j[:, :position_len] = np.dot(r, amat)
j[:, gravity_offset:gravity_offset+3] = r
j[:, accel_bias_offset:accel_bias_offset+3] = np.eye(3)
r = -a
problem.add_constraint(a=j, b=r, d=accel_tolerance)
# Construct vision constraints
print 'Constructing constraints for %d features...' % len(observed_features)
pos_coefficients = spline_template.coefficients(observed_frame_timestamps)
pos_multidim_coefs = [spline.diagify(x, 3) for x in pos_coefficients]
for feature in observed_features:
r = observed_frame_orientations[feature.frame_id]
pmat = pos_multidim_coefs[feature.frame_id]
point_offset = structure_offset + feature.track_id*3
assert point_offset + 3 <= num_vars, 'track id was %d, num vars was %d' % (feature.track_id, num_vars)
k_rc_r = np.dot(camera_matrix, np.dot(imu_to_camera, r))
ymat = np.zeros((3, num_vars))
ymat[:, :position_len] = -np.dot(k_rc_r, pmat)
ymat[:, point_offset:point_offset+3] = k_rc_r
a_feature = ymat[:2] - np.outer(feature.position, ymat[2])
c_feature = ymat[2] * feature_tolerance
problem.add_constraint(a=a_feature, c=c_feature)
return problem
def compute_accel_residuals(trajectory,
timestamps,
orientations,
readings):
assert len(orientations) == len(readings)
assert len(timestamps) == len(readings)
assert np.ndim(timestamps) == 1
residuals = []
for t, r, a in zip(timestamps, orientations, readings):
prediction = sensor_models.predict_accel_with_orientation(
trajectory.position_curve,
r,
trajectory.accel_bias,
trajectory.gravity,
t)
residuals.append(prediction - a)
return np.hstack(residuals)
def compute_reprojection_errors(features, frame_timestamps, frame_orientations, estimated,
imu_to_camera, camera_matrix):
errors = []
frame_positions = estimated.position_curve.evaluate(frame_timestamps)
for feature in features:
r = frame_orientations[feature.frame_id]
p = frame_positions[feature.frame_id]
x = estimated.landmarks[feature.track_id]
z = sensor_models.predict_feature_with_pose(r, p, x, imu_to_camera, camera_matrix)
errors.append(z - feature.position)
return np.array(errors)
def construct_problem_mixed(spline_template,
observed_accel_timestamps,
observed_accel_orientations,
observed_accel_readings,
observed_frame_timestamps,
observed_frame_orientations,
observed_features,
imu_to_camera=np.eye(3),
camera_matrix=np.eye(3),
feature_tolerance=1e-2,
gravity_magnitude=9.8,
max_bias_magnitude=.1):
if len(observed_features) < 5:
raise InsufficientObservationsError()
# Sanity checks
assert isinstance(spline_template, spline.SplineTemplate)
assert len(observed_accel_orientations) == len(observed_accel_readings)
assert len(observed_accel_timestamps) == len(observed_accel_readings)
assert len(observed_frame_timestamps) == len(observed_frame_orientations)
assert all(0 <= f.frame_id < len(observed_frame_timestamps) for f in observed_features)
assert np.ndim(observed_accel_timestamps) == 1
assert np.ndim(observed_frame_timestamps) == 1
# Compute offsets
position_offset = 0
position_len = spline_template.control_size
gravity_offset = position_offset + position_len
accel_bias_offset = gravity_offset + 3
structure_offset = accel_bias_offset + 3
track_ids = set(f.track_id for f in observed_features)
num_aux_vars = 1 # one extra variable representing the objective
num_frames = len(observed_frame_timestamps)
num_tracks = max(track_ids) + 1
num_vars = structure_offset + num_tracks * 3 + num_aux_vars
# Make sure each track has at least one observation
counts_by_frame = np.zeros(num_frames, int)
counts_by_track = np.zeros(num_tracks, int)
for f in observed_features:
counts_by_frame[f.frame_id] += 1
counts_by_track[f.track_id] += 1
if not np.all(counts_by_frame > 0):
raise InsufficientObservationsError(
'These frames had zero features: ' + ','.join(map(str, np.flatnonzero(counts_by_frame == 0))))
if not np.all(counts_by_track > 0):
raise InsufficientObservationsError(
'These tracks had zero features: ' + ','.join(map(str, np.flatnonzero(counts_by_track == 0))))
# Track IDs should be exactly 0..n-1
assert all(track_id < num_tracks for track_id in track_ids)
# Initialize the problem
objective = utils.unit(num_vars-1, num_vars) # the last variable is the objective we minimize
problem = socp.SocpProblem(objective)
# Construct accel constraints
print 'Constructing constraints for %d accel readings...' % len(observed_accel_readings)
accel_coefficients = spline_template.coefficients_d2(observed_accel_timestamps)
accel_j_blocks = []
accel_r_blocks = []
for r, a, c in zip(observed_accel_orientations, observed_accel_readings, accel_coefficients):
amat = spline.diagify(c, 3)
j = np.zeros((3, num_vars))
j[:, :position_len] = np.dot(r, amat)
j[:, gravity_offset:gravity_offset+3] = r
j[:, accel_bias_offset:accel_bias_offset+3] = np.eye(3)
accel_j_blocks.append(j)
accel_r_blocks.append(a)
# Form the least squares objective || J*x + r ||^2
accel_j = np.vstack(accel_j_blocks)
accel_r = np.hstack(accel_r_blocks)
# Form the quadratic objective: x' J' J x + b' x + c <= objective ("objective" is the variable we minimize)
accel_c = np.dot(accel_r, accel_r)
accel_b = -2. * np.dot(accel_j.T, accel_r)
accel_b[-1] = -1.
# Convert to an SOCP objective
problem.add_constraint(*soc_constraint_from_quadratic_constraint(accel_j, accel_b, accel_c))
# Construct gravity constraints
a_gravity = np.zeros((3, num_vars))
a_gravity[:, gravity_offset:gravity_offset+3] = np.eye(3)
d_gravity = gravity_magnitude
problem.add_constraint(a=a_gravity, d=d_gravity)
# Construct accel bias constraints
a_bias = np.zeros((3, num_vars))
a_bias[:, accel_bias_offset:accel_bias_offset+3] = np.eye(3)
d_bias = max_bias_magnitude
problem.add_constraint(a=a_bias, d=d_bias)
# Construct vision constraints
print 'Constructing constraints for %d features...' % len(observed_features)
pos_coefficients = spline_template.coefficients(observed_frame_timestamps)
pos_multidim_coefs = [spline.diagify(x, 3) for x in pos_coefficients]
for feature in observed_features:
r = observed_frame_orientations[feature.frame_id]
pmat = pos_multidim_coefs[feature.frame_id]
point_offset = structure_offset + feature.track_id*3
assert point_offset + 3 <= num_vars, 'track id was %d, num vars was %d' % (feature.track_id, num_vars)
k_rc_r = np.dot(camera_matrix, np.dot(imu_to_camera, r))
ymat = np.zeros((3, num_vars))
ymat[:, :position_len] = -np.dot(k_rc_r, pmat)
ymat[:, point_offset:point_offset+3] = k_rc_r
a_feature = ymat[:2] - np.outer(feature.position, ymat[2])
c_feature = ymat[2] * feature_tolerance
problem.add_constraint(a=a_feature, c=c_feature)
return problem
def estimate_trajectory_inf(spline_template,
observed_accel_timestamps,
observed_accel_orientations,
observed_accel_readings,
observed_frame_timestamps,
observed_frame_orientations,
observed_features,
imu_to_camera=np.eye(3),
camera_matrix=np.eye(3),
feature_tolerance=1e-2,
accel_tolerance=1e-3,
gravity_magnitude=9.8,
max_bias_magnitude=.1,
ground_truth=None,
**kwargs):
problem = construct_problem_inf(
spline_template,
observed_accel_timestamps,
observed_accel_orientations,
observed_accel_readings,
observed_frame_timestamps,
observed_frame_orientations,
observed_features,
imu_to_camera=imu_to_camera,
camera_matrix=camera_matrix,
feature_tolerance=feature_tolerance,
accel_tolerance=accel_tolerance,
gravity_magnitude=gravity_magnitude,
max_bias_magnitude=max_bias_magnitude)
print 'Constructed a problem with %d variables and %d constraints' % \
(len(problem.objective), len(problem.constraints))
# Evaluate at ground truth if requested
if ground_truth is not None:
problem.evaluate(ground_truth.flatten())
# Eliminate global position
print 'Eliminating the first position...'
problem = problem.conditionalize_indices(range(3))
# Solve
result = socp.solve(problem, sparse=True, **kwargs)
if result['x'] is None:
raise FeasibilityError('Solver returned status "%s"' % result['status'])
estimated_vars = np.hstack((np.zeros(3), np.squeeze(result['x'])))
spline_vars = spline_template.control_size
pos_controls = estimated_vars[:spline_vars].reshape((-1, 3))
gravity = estimated_vars[spline_vars:spline_vars+3]
accel_bias = estimated_vars[spline_vars+3:spline_vars+6]
landmarks = estimated_vars[spline_vars+6:].reshape((-1, 3))
curve = spline.Spline(spline_template, pos_controls)
return structures.PositionEstimate(curve, gravity, accel_bias, landmarks)
def estimate_trajectory_mixed(spline_template,
observed_accel_timestamps,
observed_accel_orientations,
observed_accel_readings,
observed_frame_timestamps,
observed_frame_orientations,
observed_features,
imu_to_camera=np.eye(3),
camera_matrix=np.eye(3),
feature_tolerance=1e-2,
gravity_magnitude=9.8,
max_bias_magnitude=.1,
ground_truth=None,
**kwargs):
problem = construct_problem_mixed(
spline_template,
observed_accel_timestamps,
observed_accel_orientations,
observed_accel_readings,
observed_frame_timestamps,
observed_frame_orientations,
observed_features,
imu_to_camera=imu_to_camera,
camera_matrix=camera_matrix,
feature_tolerance=feature_tolerance,
gravity_magnitude=gravity_magnitude,
max_bias_magnitude=max_bias_magnitude)
print 'Constructed a problem with %d variables and %d constraints' % \
(len(problem.objective), len(problem.constraints))
# Evaluate at ground truth if requested
if ground_truth is not None:
# In the mixed formulation, the last variable is the sum of squared accel residuals
gt_accel_residuals = compute_accel_residuals(ground_truth,
observed_accel_timestamps,
observed_accel_orientations,
observed_accel_readings)
gt_cost = np.dot(gt_accel_residuals, gt_accel_residuals)
ground_truth_augmented = np.hstack((ground_truth.flatten(), gt_cost * (1. + 1e-8)))
problem.evaluate(ground_truth_augmented)
# Eliminate global position
print 'Eliminating the first position...'
problem = problem.conditionalize_indices(range(3), np.zeros(3))
# Solve
result = socp.solve(problem, sparse=True, **kwargs)
if result['x'] is None:
raise FeasibilityError('Solver returned status "%s"' % result['status'])
estimated_vars = np.hstack((np.zeros(3), np.squeeze(result['x'])))
spline_vars = spline_template.control_size
pos_controls = estimated_vars[:spline_vars].reshape((-1, 3))
gravity = estimated_vars[spline_vars:spline_vars+3]
accel_bias = estimated_vars[spline_vars+3:spline_vars+6]
landmarks = estimated_vars[spline_vars+6:-1].reshape((-1, 3))
curve = spline.Spline(spline_template, pos_controls)
return structures.PositionEstimate(curve, gravity, accel_bias, landmarks)
def estimate_trajectory_linear(spline_template,
observed_accel_timestamps,
observed_accel_orientations,
observed_accel_readings,
observed_frame_timestamps,
observed_frame_orientations,
observed_features,
imu_to_camera=np.eye(3),
camera_matrix=np.eye(3),
accel_weight=1.):
assert isinstance(spline_template, spline.SplineTemplate)
num_tracks = max(f.track_id for f in observed_features) + 1
accel_bias_offset = spline_template.control_size
gravity_offset = spline_template.control_size + 3
structure_offset = spline_template.control_size + 6
num_vars = structure_offset + num_tracks * 3
j_blocks = []
r_blocks = []
# Add terms for accel residuals
print 'Constructing linear systems for %d accel readings...' % len(observed_accel_readings)
accel_coefficients = spline_template.coefficients_d2(observed_accel_timestamps)
for r, a, c in zip(observed_accel_orientations, observed_accel_readings, accel_coefficients):
amat = spline.diagify(c, 3)
j = np.zeros((3, num_vars))
j[:, :spline_template.control_size] = np.dot(r, amat)
j[:, gravity_offset:gravity_offset+3] = r
j[:, accel_bias_offset:accel_bias_offset+3] = np.eye(3)
j_blocks.append(j * accel_weight)
r_blocks.append(a * accel_weight)
# Add terms for features
print 'Constructing linear systems for %d features...' % len(observed_features)
pos_coefficients = spline_template.coefficients(observed_frame_timestamps)
pos_multidim_coefs = [spline.diagify(x, 3) for x in pos_coefficients]
for feature in observed_features:
z = feature.position
r = observed_frame_orientations[feature.frame_id]
q = np.dot(camera_matrix, np.dot(imu_to_camera, r))
c = q[:2] - np.outer(z, q[2])
pmat = pos_multidim_coefs[feature.frame_id]
point_offset = structure_offset + feature.track_id*3
j = np.zeros((2, num_vars))
j[:, :spline_template.control_size] = -np.dot(c, pmat)
j[:, point_offset:point_offset+3] = c
j_blocks.append(j)
r_blocks.append(np.zeros(2))
# Assemble full linear system
j = np.vstack(j_blocks)
r = np.hstack(r_blocks)
# Eliminate global position
j = j[:, 3:]
# Solve
print 'Solving linear system of size %d x %d' % j.shape
solution, _, _, _ = np.linalg.lstsq(j, r)
# Replace global position
solution = np.hstack((np.zeros(3), solution))
# Extract individual variables from solution
position_controls = solution[:spline_template.control_size].reshape((-1, 3))
position_curve = spline.Spline(spline_template, position_controls)
gravity = solution[gravity_offset:gravity_offset+3]
accel_bias = solution[accel_bias_offset:accel_bias_offset+3]
landmarks = solution[structure_offset:].reshape((-1, 3))
return structures.PositionEstimate(position_curve, gravity, accel_bias, landmarks)
def estimate_trajectory_householder(spline_template,
observed_accel_timestamps,
observed_accel_orientations,
observed_accel_readings,
observed_frame_timestamps,
observed_frame_orientations,
observed_features,
imu_to_camera=np.eye(3),
camera_matrix=np.eye(3),
accel_weight=1.):
assert isinstance(spline_template, spline.SplineTemplate)
num_tracks = max(f.track_id for f in observed_features) + 1
accel_bias_offset = spline_template.control_size
gravity_offset = spline_template.control_size + 3
structure_offset = spline_template.control_size + 6
num_vars = structure_offset + num_tracks * 3
j_blocks = []
r_blocks = []
# Add terms for accel residuals
print 'Constructing linear systems for %d accel readings...' % len(observed_accel_readings)
accel_coefficients = spline_template.coefficients_d2(observed_accel_timestamps)
for r, a, c in zip(observed_accel_orientations, observed_accel_readings, accel_coefficients):
amat = spline.diagify(c, 3)
j = np.zeros((3, num_vars))
j[:, :spline_template.control_size] = np.dot(r, amat)
j[:, gravity_offset:gravity_offset+3] = r
j[:, accel_bias_offset:accel_bias_offset+3] = np.eye(3)
j_blocks.append(j * accel_weight)
r_blocks.append(a * accel_weight)
# Add terms for features
print 'Constructing linear systems for %d features...' % len(observed_features)
pos_coefficients = spline_template.coefficients(observed_frame_timestamps)
pos_multidim_coefs = [spline.diagify(x, 3) for x in pos_coefficients]
for feature in observed_features:
z = calibrated(feature.position, camera_matrix)
h = householder(z)
r = observed_frame_orientations[feature.frame_id]
pmat = pos_multidim_coefs[feature.frame_id]
point_offset = structure_offset + feature.track_id*3
j = np.zeros((2, num_vars))
j[:, :spline_template.control_size] = -np.dot(h, np.dot(imu_to_camera, np.dot(r, pmat)))
j[:, point_offset:point_offset+3] = np.dot(h, np.dot(imu_to_camera, r))
j_blocks.append(j)
r_blocks.append(np.zeros(2))
# Assemble full linear system
j = np.vstack(j_blocks)
r = np.hstack(r_blocks)
# Eliminate global position
j = j[:, 3:]
# Solve
print 'Solving linear system of size %d x %d' % j.shape
solution, _, _, _ = np.linalg.lstsq(j, r)
# Replace global position
solution = np.hstack((np.zeros(3), solution))
# Extract individual variables from solution
position_controls = solution[:spline_template.control_size].reshape((-1, 3))
position_curve = spline.Spline(spline_template, position_controls)
gravity = solution[gravity_offset:gravity_offset+3]
accel_bias = solution[accel_bias_offset:accel_bias_offset+3]
landmarks = solution[structure_offset:].reshape((-1, 3))
return structures.PositionEstimate(position_curve, gravity, accel_bias, landmarks)
def estimate_trajectory(calibration,
measurements,
spline_template,
estimator='mixed',
feature_tolerance=5.,
accel_tolerance=.1,
ground_truth=None):
if estimator == 'socp':
raise ValueError("'socp' has been renamed to 'infnorm' to avoid confusion with 'mixed'")
elif estimator == 'infnorm':
return estimate_trajectory_inf(spline_template,
measurements.accel_timestamps,
measurements.accel_orientations,
measurements.accel_readings,
measurements.frame_timestamps,
measurements.frame_orientations,
measurements.features,
imu_to_camera=calibration.imu_to_camera,
camera_matrix=calibration.camera_matrix,
gravity_magnitude=calibration.gravity_magnitude+.1,
feature_tolerance=feature_tolerance,
accel_tolerance=accel_tolerance,
ground_truth=ground_truth)
elif estimator == 'mixed':
return estimate_trajectory_mixed(spline_template,
measurements.accel_timestamps,
measurements.accel_orientations,
measurements.accel_readings,
measurements.frame_timestamps,
measurements.frame_orientations,
measurements.features,
imu_to_camera=calibration.imu_to_camera,
camera_matrix=calibration.camera_matrix,
gravity_magnitude=calibration.gravity_magnitude+.1,
feature_tolerance=feature_tolerance,
ground_truth=ground_truth)
elif estimator == 'householder':
return estimate_trajectory_householder(spline_template,
measurements.accel_timestamps,
measurements.accel_orientations,
measurements.accel_readings,
measurements.frame_timestamps,
measurements.frame_orientations,
measurements.features,
imu_to_camera=calibration.imu_to_camera,
camera_matrix=calibration.camera_matrix)
elif estimator == 'linear':
return estimate_trajectory_linear(spline_template,
measurements.accel_timestamps,
measurements.accel_orientations,
measurements.accel_readings,
measurements.frame_timestamps,
measurements.frame_orientations,
measurements.features,
imu_to_camera=calibration.imu_to_camera,
camera_matrix=calibration.camera_matrix)
else:
raise Exception('Invalid solver:'+str(estimator))
|
{"/triangulation.py": ["/utils.py"], "/triangulation_test.py": ["/utils.py", "/lie.py", "/triangulation.py", "/spline_socp.py"], "/spline.py": ["/utils.py"], "/cayley.py": ["/lie.py"], "/sensor_models.py": ["/cayley.py", "/geometry.py"], "/plotting.py": ["/geometry.py"], "/geometry.py": ["/lie.py"], "/spline_test.py": ["/spline.py"], "/fit_bezier.py": ["/bezier.py"], "/structures.py": ["/lie.py"], "/interpolate_imu_orientation.py": ["/lie.py"]}
|
40,130
|
alexflint/spline-initialization
|
refs/heads/master
|
/geometry.py
|
import numpy as np
import scipy.optimize
from lie import SO3
def skew(m):
m = np.asarray(m)
return np.array([[0., -m[2], m[1]],
[m[2], 0., -m[0]],
[-m[1], m[0], 0.]])
def normalized(x):
x = np.asarray(x)
return x / np.linalg.norm(x)
def essential_residual(M):
"""Compute an error vector that is zero when M is an essential matrix."""
r1 = np.linalg.det(M)
MMT = np.dot(M, M.T)
r2 = 2. * np.dot(MMT, M) - np.trace(MMT)*M
return np.hstack((r1, r2.flatten()))
def essential_matrix(R, t):
"""Compute an error vector that is zero when M is an essential matrix."""
return np.dot(R, skew(t))
def pr(x):
x = np.asarray(x)
return x[..., :-1] / x[..., -1:]
def unpr(x):
if np.ndim(x) == 1:
return np.hstack((x, 1))
else:
return np.hstack((x, np.ones((np.shape(x)[0], 1))))
def arctans(ps):
ps = np.asarray(ps)
return np.arctan2(ps[..., :-1], ps[..., -1:])
def pose_from_essential_matrix(E):
W = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]], float)
U, s, Vt = np.linalg.svd(E)
V = Vt.T
R = np.dot(U, np.dot(W.T, V.T))
skew_t = np.dot(V, np.dot(W, np.dot(np.diag(s), V.T)))
t = np.array((skew_t[2, 1], skew_t[0, 2], skew_t[1, 0]))
return R, t
def epipolar_error_from_pose(R, t, xs0, xs1):
return epipolar_error(essential_matrix(R, t), xs0, xs1)
def epipolar_error(E, xs0, xs1):
return sum(r*r for r in epipolar_residuals(E, xs0, xs1))
def epipolar_residuals(E, xs0, xs1):
xs0 = unpr(xs0)
xs1 = unpr(xs1)
return [np.abs(np.dot(x1, np.dot(E, x0)))
for x0, x1 in zip(xs0, xs1)]
def solve_essential_matrix_via_fmat(xs0, xs1, inlier_threshold):
# Use opencv to solve for fundamental matrix
F, inlier_mask = cv2.findFundamentalMat(xs0,
xs1,
method=cv2.FM_RANSAC,
param1=inlier_threshold)
assert F.shape == (3, 3)
# Decompose and replace singular values
u, _, v = np.linalg.svd(F)
E = np.dot(u, np.dot(np.diag((1., 1., 0.)), v.T))
E /= np.sum(E)
return E, np.bool_(np.squeeze(inlier_mask))
def refine_epipolar_pose(R, t, xs0, xs1):
ax = np.array([i == np.argmin(t) for i in range(3)], int)
u = normalized(np.cross(ax, t))
v = normalized(np.cross(u, t))
def perturb_normalized(R, t, delta):
assert len(delta) == 5
return np.dot(SO3.exp(delta[:3]), R), normalized(t + delta[3]*u + delta[4]*v)
def cost(delta):
RR, tt = perturb_normalized(R, t, delta)
return epipolar_error_from_pose(RR, tt, xs0, xs1)
delta = scipy.optimize.fmin(cost, np.zeros(5), maxiter=500)
RR, tt = perturb_normalized(R, t, delta)
return RR, tt
def estimate_epipolar_pose(xs0, xs1, inlier_threshold, refine=True):
# Solve for essential matrix using 8-point RANSAC
E, inlier_mask = solve_essential_matrix_via_fmat(xs0, xs1, inlier_threshold)
R, t = pose_from_essential_matrix(E)
R = np.eye(3) # temp hack
# Polish pose using gradient descent
if refine:
RR, tt = refine_epipolar_pose(R, t, xs0[inlier_mask], xs1[inlier_mask])
else:
RR, tt = R, t
# Report
#print 'Num Inliers: %d (of %d)' % (np.sum(inlier_mask), len(xs0))
#print 'R:\n', R
#print 'RR:\n', RR
#print 'Error after RANSAC:', epipolar_error_from_pose(R, t, xs0[inlier_mask], xs1[inlier_mask])
#print 'Error after polishing:', epipolar_error_from_pose(RR, tt, xs0[inlier_mask], xs1[inlier_mask])
return RR, tt, inlier_mask
|
{"/triangulation.py": ["/utils.py"], "/triangulation_test.py": ["/utils.py", "/lie.py", "/triangulation.py", "/spline_socp.py"], "/spline.py": ["/utils.py"], "/cayley.py": ["/lie.py"], "/sensor_models.py": ["/cayley.py", "/geometry.py"], "/plotting.py": ["/geometry.py"], "/geometry.py": ["/lie.py"], "/spline_test.py": ["/spline.py"], "/fit_bezier.py": ["/bezier.py"], "/structures.py": ["/lie.py"], "/interpolate_imu_orientation.py": ["/lie.py"]}
|
40,131
|
alexflint/spline-initialization
|
refs/heads/master
|
/spline_test.py
|
import numpy as np
import numpy.testing as tst
import unittest
import numdifftools
import spline
class SplineTest(unittest.TestCase):
def test_derivative_degree2(self):
np.random.seed(0)
knots = np.linspace(0, 10, 8)
t0 = 3.5
f = lambda t: spline.basis(t, 2, knots, degree=2)
j_numeric = numdifftools.Derivative(f)(t0)
j_analytic = spline.basis_d1(t0, 2, knots, degree=2)
self.assertAlmostEqual(j_numeric, j_analytic)
def test_derivative_degree3(self):
np.random.seed(0)
knots = np.linspace(0, 10, 8)
t0 = 2.
f = lambda t: spline.basis(t, 2, knots, degree=3)
j_numeric = numdifftools.Derivative(f)(t0)
j_analytic = spline.basis_d1(t0, 2, knots, degree=3)
self.assertAlmostEqual(j_numeric, j_analytic)
def test_derivative_degree3_t0(self):
np.random.seed(0)
knots = np.linspace(0, 10, 8)
t0 = .5
f = lambda t: spline.basis(t, 0, knots, degree=3)
j_numeric = numdifftools.Derivative(f)(t0)
j_analytic = spline.basis_d1(t0, 0, knots, degree=3)
self.assertAlmostEqual(j_numeric, j_analytic)
def test_derivative2_degree2(self):
np.random.seed(0)
knots = np.linspace(0, 10, 8)
t0 = 3.5
f = lambda t: spline.basis(t, 2, knots, degree=2)
h_numeric = numdifftools.Derivative(f, 2)(t0)
h_analytic = spline.basis_d2(t0, 2, knots, degree=2)
self.assertAlmostEqual(h_numeric, h_analytic)
def test_derivative2_degree3(self):
np.random.seed(0)
knots = np.linspace(0, 10, 8)
t0 = 2.
f = lambda t: spline.basis(t, 2, knots, degree=3)
h_numeric = numdifftools.Derivative(f, 2)(t0)
h_analytic = spline.basis_d2(t0, 2, knots, degree=3)
self.assertAlmostEqual(h_numeric, h_analytic)
def test_derivative2_degree3_t0(self):
np.random.seed(0)
knots = np.linspace(0, 10, 8)
t0 = .5
f = lambda t: spline.basis(t, 0, knots, degree=3)
h_numeric = numdifftools.Derivative(f, 2)(t0)
h_analytic = spline.basis_d2(t0, 0, knots, degree=3)
self.assertAlmostEqual(h_numeric, h_analytic)
def test_evaluate(self):
np.random.seed(0)
curve = spline.Spline.canonical(np.random.randn(8))
j_analytic = curve.evaluate_d1(.5)
j_numeric = numdifftools.Derivative(curve.evaluate)(.5)
h_analytic = curve.evaluate_d2(.5)
h_numeric = numdifftools.Derivative(curve.evaluate, 2)(.5)
self.assertAlmostEqual(j_numeric, j_analytic)
self.assertAlmostEqual(h_numeric, h_analytic)
def test_evaluate_multidim(self):
np.random.seed(0)
curve = spline.Spline.canonical(np.random.randn(8, 3))
# First derivative
j_analytic = curve.evaluate_d1(.5)
j_numeric = np.squeeze(numdifftools.Jacobian(curve.evaluate)(.5))
tst.assert_array_almost_equal(j_numeric, j_analytic)
# Second derivative
# We should be able to use numdifftools.Hessian below but I could not get it to work
h_analytic = curve.evaluate_d2(.5)
h_numeric = np.squeeze([numdifftools.Derivative(lambda t: curve.evaluate(t)[i], 2)(.5) for i in range(3)])
tst.assert_array_almost_equal(h_numeric, h_analytic)
|
{"/triangulation.py": ["/utils.py"], "/triangulation_test.py": ["/utils.py", "/lie.py", "/triangulation.py", "/spline_socp.py"], "/spline.py": ["/utils.py"], "/cayley.py": ["/lie.py"], "/sensor_models.py": ["/cayley.py", "/geometry.py"], "/plotting.py": ["/geometry.py"], "/geometry.py": ["/lie.py"], "/spline_test.py": ["/spline.py"], "/fit_bezier.py": ["/bezier.py"], "/structures.py": ["/lie.py"], "/interpolate_imu_orientation.py": ["/lie.py"]}
|
40,132
|
alexflint/spline-initialization
|
refs/heads/master
|
/estimate_all.py
|
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
from lie import SO3
from geometry import pr, arctans
from utils import normalized, add_white_noise, add_orientation_noise
from bezier import zero_offset_bezier
from cayley import cayley
from plotting import plot_tracks
from estimate_orientation import estimate_orientation, predict_gyro, predict_orientation
from estimate_position import estimate_position, predict_accel, predict_feature
def run_position_estimation():
#
# Parameters
#
bezier_degree = 4
num_frames = 8
num_landmarks = 120
num_accel_readings = 50
num_gyro_readings = 60
gyro_timestamp_noise = 0
gyro_reading_noise = 1e-3
accel_timestamp_noise = 0
accel_reading_noise = 1e-3
frame_timestamp_noise = 0
frame_orientation_noise = 1e-3
feature_noise = 1e-4
print 'Num landmarks:', num_landmarks
print 'Num frames:', num_frames
print 'Num accel readings:', num_accel_readings
print 'Num gyro readings:', num_gyro_readings
print 'Bezier curve degree:', bezier_degree
#
# Construct ground truth
#
true_frame_timestamps = np.linspace(0, 1, num_frames)
true_accel_timestamps = np.linspace(0, 1, num_accel_readings)
true_gyro_bias = np.random.rand(3)
true_accel_bias = np.random.randn(3)
true_gravity_magnitude = 9.8
true_gravity = normalized(np.random.rand(3)) * true_gravity_magnitude
true_rot_controls = np.random.randn(bezier_degree-1, 3)
true_pos_controls = np.random.randn(bezier_degree-1, 3)
true_landmarks = np.random.randn(num_landmarks, 3) * 5
true_landmarks[:, 2] += 20
true_frame_orientations = np.array([cayley(zero_offset_bezier(true_rot_controls, t)) for t in true_frame_timestamps])
true_frame_positions = np.array([zero_offset_bezier(true_pos_controls, t) for t in true_frame_timestamps])
true_accel_readings = np.array([predict_accel(true_pos_controls, true_rot_controls, true_accel_bias, true_gravity, t)
for t in true_accel_timestamps])
true_features = np.array([[predict_feature(true_pos_controls, true_rot_controls, x, t) for x in true_landmarks]
for t in true_frame_timestamps])
true_gyro_timestamps = np.linspace(0, 1, num_gyro_readings)
true_gyro_readings = np.array([predict_gyro(true_rot_controls, true_gyro_bias, t)
for t in true_gyro_timestamps])
#
# Add sensor noise
#
observed_gyro_timestamps = add_white_noise(true_gyro_timestamps, gyro_timestamp_noise)
observed_gyro_readings = add_white_noise(true_gyro_readings, gyro_reading_noise)
observed_accel_timestamps = add_white_noise(true_accel_timestamps, accel_timestamp_noise)
observed_accel_readings = add_white_noise(true_accel_readings, accel_reading_noise)
observed_frame_timestamps = add_white_noise(true_frame_timestamps, frame_timestamp_noise)
observed_frame_orientations = add_orientation_noise(true_frame_orientations, frame_orientation_noise)
observed_frame_orientations[0] = true_frame_orientations[0] # do not add noise to first frame
observed_features = add_white_noise(true_features, feature_noise)
#
# Plot features
#
plt.clf()
plot_tracks(true_features, 'x-g', limit=10, alpha=.4)
plot_tracks(observed_features, 'o-r', limit=10, alpha=.4)
plt.show()
return
#
# Solve for orientation and gyro bias
#
print 'Estimating orientation...'
estimated_gyro_bias, estimated_rot_controls = estimate_orientation(
bezier_degree,
observed_gyro_timestamps,
observed_gyro_readings,
observed_frame_timestamps,
observed_frame_orientations)
estimated_accel_orientations = np.array([predict_orientation(estimated_rot_controls, t)
for t in observed_accel_timestamps])
#
# Solve for position, accel bias, and gravity
#
print 'Estimating position...'
estimated_pos_controls, estimated_accel_bias, estimated_gravity = estimate_position(
bezier_degree,
observed_accel_timestamps,
estimated_accel_orientations,
observed_accel_readings,
observed_frame_timestamps,
observed_frame_orientations,
observed_features)
estimated_positions = np.array([zero_offset_bezier(estimated_pos_controls, t) for t in true_frame_timestamps])
estimated_pfeatures = np.array([[pr(predict_feature(estimated_pos_controls, true_rot_controls, x, t))
for x in true_landmarks]
for t in true_frame_timestamps])
true_pfeatures = pr(true_features)
observed_pfeatures = pr(observed_features)
#
# Report
#
print 'Gyro bias error:', np.linalg.norm(estimated_gyro_bias - true_gyro_bias)
print ' True gyro bias:', true_gyro_bias
print ' Estimated gyro bias:', estimated_gyro_bias
print 'Accel bias error:', np.linalg.norm(estimated_accel_bias - true_accel_bias)
print ' True accel bias:', true_accel_bias
print ' Estimated accel bias:', estimated_accel_bias
print 'Gravity error:', np.linalg.norm(estimated_gravity - true_gravity)
print ' True gravity:', true_gravity
print ' Estimated gravity:', estimated_gravity
print ' Estimated gravity magnitude:', np.linalg.norm(estimated_gravity)
for i in range(num_frames):
print 'Frame %d position error: %f' % (i, np.linalg.norm(estimated_positions[i] - true_frame_positions[i]))
#
# Plot orientation results
#
plot_timestamps = np.linspace(0, 1, 50)
estimated_gyro_readings = np.array([predict_gyro(estimated_rot_controls, true_gyro_bias, t)
for t in plot_timestamps])
true_orientations = np.array([SO3.log(predict_orientation(true_rot_controls, t))
for t in plot_timestamps])
estimated_orientations = np.array([SO3.log(predict_orientation(estimated_rot_controls, t))
for t in plot_timestamps])
observed_orientations = np.array(map(SO3.log, observed_frame_orientations))
plt.figure(figsize=(14, 6))
plt.subplot(1, 2, 1)
plt.title('Gyro readings')
plt.plot(plot_timestamps, estimated_gyro_readings, ':', label='estimated', alpha=1)
plt.plot(true_gyro_timestamps, true_gyro_readings, '-', label='true', alpha=.3)
plt.plot(true_gyro_timestamps, observed_gyro_readings, 'x', label='observed')
plt.xlim(-.1, 1.5)
plt.legend()
plt.subplot(1, 2, 2)
plt.title('Orientation')
plt.plot(plot_timestamps, estimated_orientations, ':', label='estimated', alpha=1)
plt.plot(plot_timestamps, true_orientations, '-', label='true', alpha=.3)
plt.plot(true_frame_timestamps, observed_orientations, 'x', label='observed')
plt.xlim(-.1, 1.5)
plt.legend()
#
# Plot position results
#
plot_timestamps = np.linspace(0, 1, 100)
true_positions = np.array([zero_offset_bezier(true_pos_controls, t) for t in plot_timestamps])
estimated_positions = np.array([zero_offset_bezier(estimated_pos_controls, t) for t in plot_timestamps])
true_accels = np.array([predict_accel(true_pos_controls, true_rot_controls, true_accel_bias, true_gravity, t)
for t in plot_timestamps])
estimated_accels = np.array([predict_accel(estimated_pos_controls, true_rot_controls, estimated_accel_bias, estimated_gravity, t)
for t in plot_timestamps])
fig = plt.figure(figsize=(14, 10))
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.plot(true_positions[:, 0], true_positions[:, 1], true_positions[:, 2], '-b')
ax.plot(estimated_positions[:, 0], estimated_positions[:, 1], estimated_positions[:, 2], '-r')
#ax.plot(true_landmarks[:,0], true_landmarks[:,1], true_landmarks[:,2], '.k', alpha=.2)
ax = fig.add_subplot(2, 2, 2)
ax.plot(plot_timestamps, estimated_accels, ':', label='estimated', alpha=1)
ax.plot(plot_timestamps, true_accels, '-', label='true', alpha=.3)
ax.plot(observed_accel_timestamps, observed_accel_readings, 'x', label='observed')
ax.legend()
ax.set_xlim(-.1, 1.5)
ax = fig.add_subplot(2, 2, 3)
ax.plot(true_pfeatures[1, :, 0], true_pfeatures[1, :, 1], 'x', label='true', alpha=.8)
ax.plot(estimated_pfeatures[1, :, 0], estimated_pfeatures[1, :, 1], 'o', label='estimated', alpha=.4)
ax = fig.add_subplot(2, 2, 4)
ax.plot(true_pfeatures[-1, :, 0], true_pfeatures[-1, :, 1], '.', label='true', alpha=.8)
ax.plot(observed_pfeatures[-1, :, 0], observed_pfeatures[-1, :, 1], 'x', label='observed', alpha=.8)
ax.plot(estimated_pfeatures[-1, :, 0], estimated_pfeatures[-1, :, 1], 'o', label='estimated', alpha=.4)
plt.show()
if __name__ == '__main__':
np.random.seed(1)
np.set_printoptions(linewidth=500, suppress=True)
matplotlib.rc('font', size=9)
matplotlib.rc('legend', fontsize=9)
run_position_estimation()
|
{"/triangulation.py": ["/utils.py"], "/triangulation_test.py": ["/utils.py", "/lie.py", "/triangulation.py", "/spline_socp.py"], "/spline.py": ["/utils.py"], "/cayley.py": ["/lie.py"], "/sensor_models.py": ["/cayley.py", "/geometry.py"], "/plotting.py": ["/geometry.py"], "/geometry.py": ["/lie.py"], "/spline_test.py": ["/spline.py"], "/fit_bezier.py": ["/bezier.py"], "/structures.py": ["/lie.py"], "/interpolate_imu_orientation.py": ["/lie.py"]}
|
40,133
|
alexflint/spline-initialization
|
refs/heads/master
|
/fit_bezier.py
|
import numpy as np
from bezier import bezier, bezier_coefs, zero_offset_bezier, zero_offset_bezier_coefs
import matplotlib
import matplotlib.pyplot as plt
def fit_bezier_1d(ts, ys, bezier_order):
jacobian = np.array([bezier_coefs(t, bezier_order) for t in ts])
residual = np.array([bezier(np.zeros(bezier_order), t) - y for t, y in zip(ts, ys)])
return np.linalg.lstsq(jacobian, -residual)[0]
def fit_bezier(ts, ys, bezier_order):
ys = np.asarray(ys)
if ys.ndim == 1:
return fit_bezier_1d(ts, ys, bezier_order)
else:
return np.hstack([fit_bezier_1d(ts, ys[:, i], bezier_order)[:, np.newaxis]
for i in range(ys.shape[1])])
def fit_zero_offset_bezier_1d(ts, ys, bezier_order):
jacobian = np.array([zero_offset_bezier_coefs(t, bezier_order) for t in ts])
residual = np.array([zero_offset_bezier(np.zeros(bezier_order), t) - y for t, y in zip(ts, ys)])
return np.linalg.lstsq(jacobian, -residual)[0]
def fit_zero_offset_bezier(ts, ys, bezier_order):
ys = np.asarray(ys)
if ys.ndim == 1:
return fit_zero_offset_bezier_1d(ts, ys, bezier_order)
else:
return np.hstack([fit_zero_offset_bezier_1d(ts, ys[:, i], bezier_order)[:, np.newaxis]
for i in range(ys.shape[1])])
def main():
bezier_order = 4
num_samples = 10
true_controls = np.random.rand(bezier_order, 3)
true_ts = np.linspace(0, 1, num_samples)
true_ys = np.array([zero_offset_bezier(true_controls, t) for t in true_ts])
estimated_controls = fit_zero_offset_bezier(true_ts, true_ys, bezier_order)
plot_ts = np.linspace(0, 1, 50)
plot_true_ys = np.array([zero_offset_bezier(true_controls, t) for t in plot_ts])
estimated_ys = np.array([zero_offset_bezier(estimated_controls, t) for t in plot_ts])
plt.clf()
plt.plot()
plt.plot(plot_ts, estimated_ys, ':', alpha=1, label='estimated')
plt.plot(plot_ts, plot_true_ys, '-', alpha=.3, label='true')
plt.plot(true_ts, true_ys, 'x', alpha=1, label='observed')
plt.legend()
plt.show()
if __name__ == '__main__':
np.random.seed(1)
np.set_printoptions(linewidth=500, suppress=True)
matplotlib.rc('font', size=9)
matplotlib.rc('legend', fontsize=9)
main()
|
{"/triangulation.py": ["/utils.py"], "/triangulation_test.py": ["/utils.py", "/lie.py", "/triangulation.py", "/spline_socp.py"], "/spline.py": ["/utils.py"], "/cayley.py": ["/lie.py"], "/sensor_models.py": ["/cayley.py", "/geometry.py"], "/plotting.py": ["/geometry.py"], "/geometry.py": ["/lie.py"], "/spline_test.py": ["/spline.py"], "/fit_bezier.py": ["/bezier.py"], "/structures.py": ["/lie.py"], "/interpolate_imu_orientation.py": ["/lie.py"]}
|
40,134
|
alexflint/spline-initialization
|
refs/heads/master
|
/calibration.py
|
import numpy as np
def perspective_distorted_to_calibrated(distorted, radial_params, tangential_params, num_iters=20):
assert len(distorted) == 2
k1, k2, k3 = radial_params
p1, p2 = tangential_params
undistorted = np.asarray(distorted).copy()
for i in range(num_iters):
r2 = np.dot(undistorted, undistorted)
k_radial = 1. + k1 * r2 + k2 * r2*r2 + k3 * r2 * r2 * r2
delta_x_0 = 2.*p1*undistorted[0]*undistorted[1] + p2*(r2 + 2.*undistorted[0]*undistorted[0])
delta_x_1 = p1*(r2 + 2.*undistorted[1]*undistorted[1]) + 2.*p2*undistorted[0]*undistorted[1]
undistorted[0] = (distorted[0]-delta_x_0) / k_radial
undistorted[1] = (distorted[1]-delta_x_1) / k_radial
return undistorted
def perspective_image_to_calibrated(image, camera_matrix_inv, radial_params, tangential_params, num_iters=20):
image = np.asarray(image)
if len(image) == 2:
image = np.array([image[0], image[1], 1.])
distorted = np.dot(camera_matrix_inv, image)
distorted = distorted[:2] / distorted[2]
#return distorted
return perspective_distorted_to_calibrated(distorted, radial_params, tangential_params, num_iters)
class LensModel(object):
def __init__(self, calibration, scaling=1., **kwargs):
self.calibration = calibration
self.image_size = (np.array(calibration['image_size']) * scaling).astype(int)
self.camera_matrix = np.array(calibration['camera_matrix']) * scaling
print scaling
print self.camera_matrix
self.camera_matrix_inv = np.linalg.inv(self.camera_matrix)
def image_to_calibrated(self, image):
return perspective_image_to_calibrated(image,
self.camera_matrix_inv,
self.calibration['radial_params'],
self.calibration['tangential_params'])
def image_distance_to_calibrated(self, distance):
"""Compute a distance in the calibrated domain equivalent to the specified
distance in the image domain. This is an approximation and only applies near
the center of the image."""
return np.mean(np.diag(self.camera_matrix_inv)[:2] * distance)
# Taken from:
# https://github.com/FlybyMedia/Nestor/blob/master/data/camera/iPhone5640h360wparameters_Oleg.xml
IPHONE_5S_CALIBRATION = dict(
image_size=(1280,720),
camera_matrix=np.array([[625.39885, 0, 321.32716],
[0., 624.71256, 175.33386],
[0., 0., 1.]]),
radial_params=[0.08238, -0.03432, 0.],
tangential_params=[0., 0.],
camera_to_imu_rotation=np.array([[-0.99731099, -0.0675295, -0.02847041],
[0.06712113, -0.9976311, 0.01506464],
[-0.02942027, 0.01311316, 0.99948111]])
)
|
{"/triangulation.py": ["/utils.py"], "/triangulation_test.py": ["/utils.py", "/lie.py", "/triangulation.py", "/spline_socp.py"], "/spline.py": ["/utils.py"], "/cayley.py": ["/lie.py"], "/sensor_models.py": ["/cayley.py", "/geometry.py"], "/plotting.py": ["/geometry.py"], "/geometry.py": ["/lie.py"], "/spline_test.py": ["/spline.py"], "/fit_bezier.py": ["/bezier.py"], "/structures.py": ["/lie.py"], "/interpolate_imu_orientation.py": ["/lie.py"]}
|
40,135
|
alexflint/spline-initialization
|
refs/heads/master
|
/structures.py
|
import numpy as np
import lie
class FeatureObservation(object):
def __init__(self, frame_id, track_id, position):
self.frame_id = frame_id
self.track_id = track_id
self.position = position
class Measurements(object):
def __init__(self,
accel_timestamps,
accel_orientations,
accel_readings,
frame_timestamps,
frame_orientations,
features):
self.accel_timestamps = accel_timestamps
self.accel_orientations = accel_orientations
self.accel_readings = accel_readings
self.frame_timestamps = frame_timestamps
self.frame_orientations = frame_orientations
self.features = features
class Calibration(object):
def __init__(self,
imu_to_camera,
camera_matrix,
gravity_magnitude):
self.imu_to_camera = imu_to_camera
self.camera_matrix = camera_matrix
self.gravity_magnitude = gravity_magnitude
@classmethod
def random(cls, image_width=320., image_height=240., gravity_magnitude=9.8):
imu_to_camera = lie.SO3.exp(np.random.randn(3))
camera_matrix = np.array([[image_width, 0., image_width/2.],
[0., image_height, image_height/2.],
[0., 0., 1.]])
return Calibration(imu_to_camera, camera_matrix, gravity_magnitude)
class PositionEstimate(object):
def __init__(self, position_curve, gravity, accel_bias, landmarks):
self.position_curve = position_curve
self.gravity = gravity
self.accel_bias = accel_bias
self.landmarks = landmarks
@property
def size(self):
return len(self.flatten())
def flatten(self):
return np.hstack((self.position_curve.controls.flatten(),
self.gravity,
self.accel_bias,
self.landmarks.flatten()))
|
{"/triangulation.py": ["/utils.py"], "/triangulation_test.py": ["/utils.py", "/lie.py", "/triangulation.py", "/spline_socp.py"], "/spline.py": ["/utils.py"], "/cayley.py": ["/lie.py"], "/sensor_models.py": ["/cayley.py", "/geometry.py"], "/plotting.py": ["/geometry.py"], "/geometry.py": ["/lie.py"], "/spline_test.py": ["/spline.py"], "/fit_bezier.py": ["/bezier.py"], "/structures.py": ["/lie.py"], "/interpolate_imu_orientation.py": ["/lie.py"]}
|
40,136
|
alexflint/spline-initialization
|
refs/heads/master
|
/interpolate_imu_orientation.py
|
import bisect
import numpy as np
import matplotlib.pyplot as plt
from lie import SO3
def main():
orientation_data = np.loadtxt('out/frame_orientations.txt')
accel_data = np.loadtxt('out/accelerometer.txt')
frame_timestamps = orientation_data[:,0]
frame_orientations = orientation_data[:, 1:].reshape((-1, 3, 3))
accel_timestamps = accel_data[:,0]
accel_readings = accel_data[:,1:]
accel_orientations = []
for accel_time in accel_timestamps:
frame_index = bisect.bisect_left(frame_timestamps, accel_time)
assert 0 < frame_index < len(frame_timestamps), 't='+accel_time
t0 = frame_timestamps[frame_index-1]
r0 = frame_orientations[frame_index-1]
t1 = frame_timestamps[frame_index]
r1 = frame_orientations[frame_index]
w01 = SO3.log(np.dot(r1, r0.T))
a = (accel_time - t0) / (t1 - t0)
assert 0 <= a <= 1
accel_orientations.append(np.dot(SO3.exp(a*w01), r0))
frame_ws = []
accel_ws = []
rbase = frame_orientations[0]
for r in frame_orientations:
frame_ws.append(SO3.log(np.dot(r, rbase.T)))
for r in accel_orientations:
accel_ws.append(SO3.log(np.dot(r, rbase.T)))
np.savetxt('out/accel_orientations.txt',
np.hstack((accel_timestamps[:,None], np.reshape(accel_orientations, (-1, 9)))))
plt.clf()
plt.hold('on')
plt.plot(frame_timestamps, np.asarray(frame_orientations).reshape((-1, 9)))
plt.plot(accel_timestamps, np.asarray(accel_orientations).reshape((-1, 9)), '.')
#plt.plot(frame_timestamps, map(np.linalg.norm, frame_ws))
#plt.plot(accel_timestamps, map(np.linalg.norm, accel_ws), '.')
plt.show()
if __name__ == '__main__':
main()
|
{"/triangulation.py": ["/utils.py"], "/triangulation_test.py": ["/utils.py", "/lie.py", "/triangulation.py", "/spline_socp.py"], "/spline.py": ["/utils.py"], "/cayley.py": ["/lie.py"], "/sensor_models.py": ["/cayley.py", "/geometry.py"], "/plotting.py": ["/geometry.py"], "/geometry.py": ["/lie.py"], "/spline_test.py": ["/spline.py"], "/fit_bezier.py": ["/bezier.py"], "/structures.py": ["/lie.py"], "/interpolate_imu_orientation.py": ["/lie.py"]}
|
40,137
|
alexflint/spline-initialization
|
refs/heads/master
|
/track_sequence.py
|
import os
import numpy as np
import scipy.optimize
from pathlib import Path
from matplotlib import pyplot as plt
import cv2
import calibration
import geometry
from lie import SO3
def enumerate_frame_files(path, skip=0):
for entry in os.listdir(str(path)):
if entry.endswith('.pgm'):
if skip > 0:
skip -= 1
else:
yield float(entry[:-4]), path/entry
def plot_correspondences(img1, img2, kps1, kps2, matches, style):
plt.set_cmap('gray')
plt.imshow(img1, extent=(0, .9, 1, 0))
plt.imshow(img2, extent=(1, 1.9, 1, 0))
plt.xlim(0, 1.9)
plt.ylim(1, 0)
for match in matches:
kp1 = kps1[match.queryIdx]
kp2 = kps2[match.trainIdx]
x1, y1 = kp1.pt
x2, y2 = kp2.pt
x1 = .9 * x1 / img1.shape[1]
y1 = y1 / img1.shape[0]
x2 = 1. + .9 * x2 / img2.shape[1]
y2 = y2 / img2.shape[0]
plt.plot([x1,x2], [y1,y2], style)
def main():
np.random.seed(123)
# Load calibration
lens = calibration.LensModel(calibration.IPHONE_5S_CALIBRATION, scaling=.5)
epipolar_threshold = lens.image_distance_to_calibrated(2.)
print 'Epipolar threshold:', epipolar_threshold
# Load images
skip_frames = 45
data_dir = Path('/Users/alexflint/Data/Initialization/Painting')
all_frame_timestamps, all_frame_paths = zip(*enumerate_frame_files(data_dir / 'imageDump',
skip=skip_frames))
# Select frames to track
#frame_indices = np.linspace(0, len(all_frame_timestamps)-1, num_frames).round().astype(int)
frame_indices = range(0, 60, 5)
num_frames = len(frame_indices)
frame_timestamps = [all_frame_timestamps[i] for i in frame_indices]
frame_orientations = [np.eye(3)]
begin_time = frame_timestamps[0]
end_time = frame_timestamps[-1]
print 'Begin time:', begin_time
print 'End time:', end_time
# Create descriptor
orb = cv2.ORB()
# Compute keypoints and descriptors
timestamps = []
color_images = []
gray_images = []
keypoints = []
descriptors = []
for i in frame_indices:
image = cv2.imread(str(all_frame_paths[i]), cv2.CV_LOAD_IMAGE_COLOR)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
color_images.append(image)
gray_images.append(gray)
timestamps.append(all_frame_timestamps[i])
kps, descr = orb.detectAndCompute(gray, None)
keypoints.append(kps)
descriptors.append(descr)
# Create exhaustive matcher
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# Initialize tracks
raw_tracks = [[(0, kp.pt[0], kp.pt[1])] for kp in keypoints[0]]
inlier_tracks = [[(0, kp.pt[0], kp.pt[1])] for kp in keypoints[0]]
# Match features between frame 1 and frame i
for i in range(1, num_frames):
print 'Processing frame %d of %d' % (i, num_frames-1)
matches = bf.match(descriptors[0], descriptors[i])
# Sort matches in the order of their distance.
matches = sorted(matches, key=lambda x: x.distance)
pts1 = [keypoints[0][m.queryIdx].pt for m in matches]
pts2 = [keypoints[i][m.trainIdx].pt for m in matches]
pts1 = np.asarray(map(lens.image_to_calibrated, pts1))
pts2 = np.asarray(map(lens.image_to_calibrated, pts2))
#xmin, ymin = np.min(pts1, axis=0)
#xmax, ymax = np.max(pts1, axis=0)
#print 'X: %f ... %f' % (xmin, xmax)
#print 'Y: %f ... %f' % (ymin, ymax)
R, t, inlier_mask = geometry.estimate_epipolar_pose(pts1, pts2, epipolar_threshold, refine=True)
print R
frame_orientations.append(R)
inlier_matches = []
outlier_matches = []
for match, is_inlier in zip(matches, inlier_mask):
kp = keypoints[i][match.trainIdx]
raw_tracks[match.queryIdx].append((i, kp.pt[0], kp.pt[1]))
if is_inlier:
inlier_matches.append(match)
inlier_tracks[match.queryIdx].append((i, kp.pt[0], kp.pt[1]))
else:
outlier_matches.append(match)
#plt.clf()
#plt.hold('on')
#plot_correspondences(gray_images[0],
# gray_images[i],
# keypoints[0],
# keypoints[i],
# outlier_matches,
# 'r-')
#plot_correspondences(gray_images[0],
# gray_images[i],
# keypoints[0],
# keypoints[i],
# inlier_matches,
# 'b-')
#plt.show()
def compute_correspondence_matrix(tracks, num_frames):
mat = np.zeros((num_frames, num_frames), int)
for track in tracks:
for fi, xi, yi in track:
for fj, xj, yj in track:
mat[fi, fj] += 1
return mat
raw_cmatrix = compute_correspondence_matrix(raw_tracks, num_frames)
inlier_cmatrix = compute_correspondence_matrix(inlier_tracks, num_frames)
print 'Raw correspondence counts:'
print raw_cmatrix
np.savetxt('out/raw_cmatrix.txt', raw_cmatrix, fmt='%3d')
print 'Inlier correspondence counts:'
print inlier_cmatrix
np.savetxt('out/inlier_cmatrix.txt', inlier_cmatrix, fmt='%3d')
np.savetxt('out/frame_orientations.txt',
np.hstack((np.asarray(frame_timestamps)[:,None],
np.asarray(frame_orientations).reshape((-1, 9)))))
accel_data = np.loadtxt(str(data_dir / 'accelerometer.txt'))
accel_data_out = []
for row in accel_data:
timestamp = row[0]
if begin_time < timestamp < end_time:
cam_reading = np.dot(calibration.IPHONE_5S_CALIBRATION['camera_to_imu_rotation'].T, row[1:])
accel_data_out.append(np.hstack((row[0], cam_reading)))
np.savetxt('out/accelerometer.txt', accel_data_out)
with open('out/features.txt', 'w') as fd:
for i, track in enumerate(inlier_tracks):
if len(track) >= 2:
for feature in track:
fd.write('%d %d %f %f\n' % (i, feature[0], feature[1], feature[2]))
if __name__ == '__main__':
np.set_printoptions(suppress=True)
main()
|
{"/triangulation.py": ["/utils.py"], "/triangulation_test.py": ["/utils.py", "/lie.py", "/triangulation.py", "/spline_socp.py"], "/spline.py": ["/utils.py"], "/cayley.py": ["/lie.py"], "/sensor_models.py": ["/cayley.py", "/geometry.py"], "/plotting.py": ["/geometry.py"], "/geometry.py": ["/lie.py"], "/spline_test.py": ["/spline.py"], "/fit_bezier.py": ["/bezier.py"], "/structures.py": ["/lie.py"], "/interpolate_imu_orientation.py": ["/lie.py"]}
|
40,138
|
alexflint/spline-initialization
|
refs/heads/master
|
/run_simulations.py
|
import numpy as np
import copy
import structures
import simulation
import socp
import spline_socp
import utils
simulator_options = dict(
duration=5.,
num_frames=8,
num_landmarks=50,
num_imu_readings=100,
degree=3,
num_controls=8,
accel_timestamp_noise=0,
accel_reading_noise=1e-3,
accel_orientation_noise=0,
frame_timestamp_noise=0,
frame_orientation_noise=0,
feature_noise=1.
)
def mean_position_error(true_trajectory, estimated_trajectory, frame_timestamps):
actual = true_trajectory.position_curve.evaluate(frame_timestamps)
estimated = estimated_trajectory.position_curve.evaluate(frame_timestamps)
return np.mean(np.linalg.norm(estimated - actual, axis=1))
def mean_velocity_error(true_trajectory, estimated_trajectory, frame_timestamps):
actual = true_trajectory.position_curve.evaluate_d1(frame_timestamps)
estimated = estimated_trajectory.position_curve.evaluate_d1(frame_timestamps)
return np.mean(np.linalg.norm(estimated - actual, axis=1))
def gravity_direction_error(true_trajectory, estimated_trajectory):
actual = utils.normalized(true_trajectory.gravity)
estimated = utils.normalized(estimated_trajectory.gravity)
return np.arccos(np.dot(actual, estimated))
def gravity_magnitude_error(true_trajectory, estimated_trajectory):
actual = np.linalg.norm(true_trajectory.gravity)
estimated = np.linalg.norm(estimated_trajectory.gravity)
return np.abs(actual - estimated)
def accel_bias_error(true_trajectory, estimated_trajectory):
actual = true_trajectory.accel_bias
estimated = estimated_trajectory.accel_bias
return np.linalg.norm(actual - estimated)
def evaluate(calibration, measurements, spline_template, estimator, tol, true_trajectory):
estimated_trajectory = spline_socp.estimate_trajectory(
calibration,
measurements,
spline_template,
estimator=estimator,
feature_tolerance=tol)
pos_err = mean_position_error(true_trajectory, estimated_trajectory, measurements.frame_timestamps)
vel_err = mean_velocity_error(true_trajectory, estimated_trajectory, measurements.frame_timestamps)
bias_err = accel_bias_error(true_trajectory, estimated_trajectory)
g_err = gravity_direction_error(true_trajectory, estimated_trajectory)
return pos_err, vel_err, bias_err, g_err
def simulate_and_evaluate(num_trials, calibration, estimators=None, **options):
if estimators is None:
estimators = ['mixed']
trials = []
while len(trials) < num_trials:
try:
true_trajectory, measurements, spline_template = simulation.simulate_trajectory(
calibration, **options)
row = []
for estimator in estimators:
row.extend(evaluate(
calibration,
measurements,
spline_template,
estimator,
simulator_options['feature_noise']*3,
true_trajectory))
trials.append(row)
except spline_socp.FeasibilityError:
print 'Simulator failed to generate trajectory. Retrying...'
except spline_socp.InsufficientObservationsError:
print 'Simulator failed to generate trajectory. Retrying...'
return np.asarray(trials)
def run_accuracy_comparison():
np.random.seed(0)
calibration = structures.Calibration.random()
trials = simulate_and_evaluate(1000, calibration, ['mixed', 'householder'], **simulator_options)
np.savetxt('results/accuracy_comparison.txt', trials)
def run_accuracy_vs_feature_noise():
np.random.seed(1)
calibration = structures.Calibration.random()
options = simulator_options.copy()
options['feature_noise'] = 0.
options['accel_reading_noise'] = 1e-2
true_trajectory, measurements, spline_template = simulation.simulate_trajectory(calibration, **options)
results = []
for feature_noise in np.linspace(0, 10, 25):
print 'Trying feature noise = %f' % feature_noise
noisy_measurements = copy.deepcopy(measurements)
for f in noisy_measurements.features:
f.position += np.random.randn(2) * feature_noise
try:
pos_err, vel_err, bias_err, g_err = evaluate(
calibration,
noisy_measurements,
spline_template,
'mixed',
feature_noise*3+1e-3,
true_trajectory)
results.append((feature_noise, pos_err))
except spline_socp.FeasibilityError:
pass
np.savetxt('results/accuracy_vs_feature_noise.txt', results)
def run_timings_vs_num_landmarks():
np.random.seed(0)
options = simulator_options.copy()
options['num_landmarks'] = 1000
calibration = structures.Calibration.random()
trials = []
true_trajectory, measurements, spline_template, true_frame_timestamps = simulation.simulate_trajectory(
calibration, **options)
all_features = measurements.features
for n in np.linspace(10, 400, 25):
measurements.features = filter(lambda f: f.track_id < n, all_features)
try:
spline_socp.estimate_trajectory(
calibration,
measurements,
spline_template,
estimator='mixed',
feature_tolerance=options['feature_noise']*3)
trials.append((n, socp.timings['last_solve']))
except spline_socp.InsufficientObservationsError:
print 'Simulator failed to generate trajectory. Retrying...'
np.savetxt('results/timings_vs_num_landmarks.txt', trials)
def run_timings_vs_num_knots():
np.random.seed(0)
calibration = structures.Calibration.random()
trials = []
true_trajectory, measurements, spline_template, true_frame_timestamps = simulation.simulate_trajectory(
calibration, **simulator_options)
for n in np.arange(2, 21):
spline_template.knots = np.linspace(0, simulator_options['duration'], n)
try:
spline_socp.estimate_trajectory(
calibration,
measurements,
spline_template,
estimator='mixed',
feature_tolerance=simulator_options['feature_noise']*3)
trials.append((n, socp.timings['last_solve']))
except spline_socp.InsufficientObservationsError:
print 'Simulator failed to generate trajectory. Retrying...'
np.savetxt('results/timings_vs_num_knots.txt', trials)
if __name__ == '__main__':
run_accuracy_vs_feature_noise()
#run_accuracy_comparison()
#run_timings_vs_num_knots()
|
{"/triangulation.py": ["/utils.py"], "/triangulation_test.py": ["/utils.py", "/lie.py", "/triangulation.py", "/spline_socp.py"], "/spline.py": ["/utils.py"], "/cayley.py": ["/lie.py"], "/sensor_models.py": ["/cayley.py", "/geometry.py"], "/plotting.py": ["/geometry.py"], "/geometry.py": ["/lie.py"], "/spline_test.py": ["/spline.py"], "/fit_bezier.py": ["/bezier.py"], "/structures.py": ["/lie.py"], "/interpolate_imu_orientation.py": ["/lie.py"]}
|
40,145
|
Maria173/weather_app
|
refs/heads/master
|
/widget_weather.py
|
import sys
from PyQt5 import QtCore
from PyQt5.QtGui import QPixmap, QImage
from PyQt5 import uic
from PyQt5.QtWidgets import QApplication, QWidget
from PyQt5.QtWidgets import QMainWindow
from weather_yandex import Request
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
class MyWidget(QMainWindow):
def __init__(self):
super().__init__()
uic.loadUi('w_vidget.ui', self)
intro = 'Думаю, будет не лишним захватить с собой '
self.dict_conditions = {intro + 'солнечные очки.': 'sun.png',
intro + 'шарф и головной убор.': 'clouds.png',
intro + 'зонт и резиновые сапоги.': 'rain.png',
intro + 'дождевик и резиновые сапоги.': 'rain.png',
intro + 'высокие сапоги и тёплый головной убор.': 'snow.png',
intro + 'возможность взять такси (на улице гроза, и я очень волнуюсь за тебя)!':
'thunder.png',
intro + 'хорошее настроение и наслаждаться погодой!': 'def.png'}
self.image_profile = QImage('kevin.png')
self.main_text.setText('Привет! Меня зовут Кевин :)\nХочешь узнать погоду? Тогда укажи город,\nв котором находишься!')
self.main_text.setAlignment(Qt.AlignCenter)
self.image_profile = self.image_profile.scaled(200, 200,
aspectRatioMode=QtCore.Qt.KeepAspectRatio,
transformMode=QtCore.Qt.SmoothTransformation)
self.pict.setPixmap(QPixmap.fromImage(self.image_profile))
self.show()
self.btn.clicked.connect(self.run)
def run(self):
self.value = self.citylist.currentText()
if self.value != 'Выберите город':
url = self.getSelectedCityUrl()
self.rq = Request(url)
self.rq.onSuccess(self.getCityWeather)
else:
self.main_text.setText('Пожалуйста, укажи город :)')
def getSelectedCityUrl(self):
return 'http://dataservice.accuweather.com/locations/v1/cities/search?' \
'apikey=icFNU4x13nHy5dJy0mIdXIAYi0E8TJF8&q={}'.format(self.value)
def getWeatherUrl(self):
key = self.rq.getKey()
return "http://dataservice.accuweather.com/currentconditions/v1/{}.json?" \
"language=ru-ru&details=true&apikey=icFNU4x13nHy5dJy0mIdXIAYi0E8TJF8".format(key)
def getCityWeather(self):
url = self.getWeatherUrl()
self.req = Request(url)
self.req.onSuccess(self.req.generateText)
self.req.onSuccess(self.showTemp)
self.req.onSuccess(self.showDateTime)
self.req.onSuccess(self.showWhatToWear)
self.req.onSuccess(self.change_Image)
def change_Image(self):
self.image_profile = QImage(self.dict_conditions[self.req.getConditions()])
self.image_profile = self.image_profile.scaled(200, 200,
aspectRatioMode=QtCore.Qt.KeepAspectRatio,
transformMode=QtCore.Qt.SmoothTransformation)
self.pict.setPixmap(QPixmap.fromImage(self.image_profile))
self.repaint()
def showTemp(self):
self.temperature.setText(str(self.req.getTemp()))
def showDateTime(self):
self.date_time.setText(str(self.req.getDateTime()))
def showWhatToWear(self):
self.main_text.setText(str(self.req.what_to_wear()))
self.main_text.setWordWrap(True)
if __name__ == '__main__':
app = QApplication(sys.argv)
widget = MyWidget()
widget.show()
sys.exit(app.exec())
|
{"/widget_weather.py": ["/weather_yandex.py"]}
|
40,146
|
Maria173/weather_app
|
refs/heads/master
|
/weather_yandex.py
|
from PyQt5 import QtNetwork
from PyQt5.QtCore import QUrl
import json
class Request:
def __init__(self, url):
self.url = url
# self.doRequestCity()
self.doRequest()
self.callbacks = []
def doRequest(self):
req = QtNetwork.QNetworkRequest(QUrl(self.url))
self.nam = QtNetwork.QNetworkAccessManager()
self.nam.finished.connect(self.handleResponse)
self.nam.get(req)
def handleResponse(self, reply):
er = reply.error()
if er == QtNetwork.QNetworkReply.NoError:
bytes_string = reply.readAll()
resp_str = str(bytes_string, 'utf-8')
self.resp_obj = json.loads(resp_str)
for cb in self.callbacks:
cb()
else:
print("Error occured: ", er)
print(reply.errorString())
bytes_string = reply.readAll()
resp_str = str(bytes_string, 'utf-8')
self.resp_obj = json.loads(resp_str)
print ('[DEBUG] ' + self.resp_obj['Message'])
def getKey(self):
return self.resp_obj[0]['Key']
def generateText(self):
LocalObservationDateTime = self.resp_obj[0]['LocalObservationDateTime']
self.weather_text = self.resp_obj[0]['WeatherText'].lower()
self.temp = self.resp_obj[0]['Temperature']['Metric']['Value']
self.humidity = self.resp_obj[0]['RelativeHumidity']
Date = LocalObservationDateTime[:10]
Time = LocalObservationDateTime[11:19]
self.date_time = Date + '\n' + Time
def compare_temp(self, t):
if t < -30.0:
return 'пуховик или шубу; приветствуется многослойность.'
elif (t >= -30.0) and (t < -20.0):
return 'куртку на синтепоне или дублёнку.'
elif (t >= -20.0) and (t < -10.0):
return 'шерстяное пальто или шубу из искусственного меха.'
elif (t >= -10.0) and (t < 0.0):
return 'пальто или куртку.'
elif (t >= 0.0) and (t < 10.0):
return 'теплый плащ или лёгкое пальто.'
elif (t >= 10.0) and (t < 20.0):
return 'ветровку или джинсовую куртку.'
elif (t >= 20.0) and (t < 30.0):
return 'что-нибудь лёгкое из хлопка или смесовых тканей.'
elif t >= 30.0:
return 'что-нибудь лёгкое из натуральных тканей (лен, хлопок, шелк и т.д.).'
def compare_humidity(self, humidity):
if (humidity >= 0) and (humidity <= 33):
return 'пару бутылочек с водой и влажные салфетки, потому что на улице очень сухо!\n(влажность около 10%)'
elif (humidity > 33) and (humidity <= 66):
return 'бутылочку с водой и пачку бумажных салфеток:)'
elif (humidity > 66) and (humidity <= 100):
return 'пачку бумажных салфеток, потому что на улице очень влажно!\n(влажность около 90%)'
def compare_conditions(self, text):
if 'солнечно' in text or 'ясно' in text:
return 'солнечные очки.'
elif 'облачно' in text or 'пасмурно' in text or 'ветренно' in text or 'ветер' in text:
return 'шарф и головной убор.'
elif 'дождь' in text:
return 'зонт и резиновые сапоги.'
elif 'ливень' in text:
return 'дождевик и резиновые сапоги.'
elif 'снег' in text or 'лёд' in text or 'лед' in text:
return 'высокие сапоги и тёплый головной убор.'
elif 'гроза' in text or 'гром' in text or 'гром' in text:
return 'возможность взять такси (на улице гроза, и я очень волнуюсь за тебя)!'
else:
return 'хорошее настроение и наслаждаться погодой!'
self.generation_by_temp = 'Из одежды рекомендую тебе выбрать ' + compare_temp(self, self.temp)
self.generation_by_humidity = 'Советую не забыть про ' + compare_humidity(self, self.humidity)
self.generation_by_text = 'Думаю, будет не лишним захватить с собой ' + \
compare_conditions(self, self.weather_text)
def getConditions(self):
return self.generation_by_text
def getTemp(self):
return str(self.temp) + '°C'
def getDateTime(self):
return self.date_time
def what_to_wear(self):
return self.generation_by_temp + ' ' + self.generation_by_text + ' ' + self.generation_by_humidity
def onSuccess(self, callback):
self.callbacks.append(callback)
|
{"/widget_weather.py": ["/weather_yandex.py"]}
|
40,148
|
armintalaie/Movie-Recommender-Program
|
refs/heads/master
|
/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py
|
from abc import ABC, abstractmethod
import io
class Codec(ABC):
"""
Base class for a codec.
Attributes:
NAME(str): the name for this codec, used by :func:`get_codec` and in index entries.
"""
def encode(self, buf):
"""
Encode a buffer.
Args:
buf(bytes-like): the buffer to encode.
Returns:
bytes-like: the encoded data
"""
out = io.BytesIO()
self.encode_to(buf, out)
return out.getbuffer()
@abstractmethod
def encode_to(self, buf, out):
"""
Encode a buffer to a binary output stream.
Args:
buf(bytes-like): the buffer to encode.
out(file-like):
the output stream. Must have a ``write`` method
taking a :class:`bytes`.
"""
def decode(self, buf):
"""
Decode a buffer.
Args:
buf(bytes-like): the buffer to decode.
Returns:
bytes-like: the decoded data
"""
out = bytearray()
self.decode_to(buf, out)
return out
@abstractmethod
def decode_to(self, buf, out):
"""
Decode a buffer into a bytearray.
Args:
buf(bytes-like): the buffer to decode.
out(bytearray):
the bytearray to receive the output. This method will resize the
bytearray as needed to accomodate the output.
"""
@abstractmethod
def config(self):
"""
Get a JSON-serializable configuration for this codec. It should be able
to be passed as ``**kwargs`` to the constructor.
"""
|
{"/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/chain.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/blosc.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/__init__.py": ["/venv/lib/python3.7/site-packages/binpickle/write.py", "/venv/lib/python3.7/site-packages/binpickle/read.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/null.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/intro/views.py": ["/Rec.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/numcodecs.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/read.py": ["/venv/lib/python3.7/site-packages/binpickle/compat.py", "/venv/lib/python3.7/site-packages/binpickle/format.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/chain.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/gz.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/write.py": ["/venv/lib/python3.7/site-packages/binpickle/compat.py", "/venv/lib/python3.7/site-packages/binpickle/format.py", "/venv/lib/python3.7/site-packages/binpickle/__init__.py"]}
|
40,149
|
armintalaie/Movie-Recommender-Program
|
refs/heads/master
|
/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py
|
"""
Codecs for encoding and decoding buffers in BinPickle.
This is similar in spirit to numcodecs_, but automatically handles some cases
such as splitting arrays into blocks.
.. _numcodecs: https://numcodecs.readthedocs.io/en/stable/
"""
from ._base import Codec # noqa: F401
import logging
from . import null
from . import gz
from . import blosc
from . import numcodecs
_log = logging.getLogger(__name__)
CODECS = {}
Null = null.Null
GZ = gz.GZ
Blosc = blosc.Blosc
NC = numcodecs.NC
def register(cls):
CODECS[cls.NAME] = cls
def make_codec(codec, *, null_as_none=False, list_is_tuple=False):
"""
Resolve a codec into a BinPickle-compatible codec.
Args:
codec(obj):
The codec to resolve into a codec. Can be one of:
* ``None`` (returns :class:`Null`)
* A :class:`Codec` object (returned as-is)
* A string (look up codec by name and return with default options)
* A tuple ``(name, config)`` (pass to :func:`get_config`)
* A list (wrapped in :class:`Chain`)
* A :class:`numcodecs.abc.Codec` (wrapped in :class:`NC` and returned)
Returns:
Codec: the codec.
"""
if codec is None and not null_as_none:
return Null()
elif isinstance(codec, str):
return CODECS[codec]()
elif isinstance(codec, tuple) or (list_is_tuple and isinstance(codec, list)):
name, config = codec
return get_codec(name, config)
elif isinstance(codec, list):
return Chain(codec)
elif numcodecs.is_numcodec(codec):
return NC(codec)
elif isinstance(codec, Null) and null_as_none:
return None
else:
return codec
def get_codec(name, config):
"""
Get a codec by name and configuration (as stored in the BinPickle manifest).
Args:
name(str or None): the codec name.
config: the codec configuration, as returned by :meth:`Codec.config`.
Returns:
Codec: the configured codec.
"""
if name is None:
return Null()
elif name in CODECS:
_log.debug('configuring %s: %s', name, config)
return CODECS[name](**config)
else:
raise ValueError(f'unknown codec {name}')
from .chain import Chain # noqa: E402
register(Null)
register(Chain)
register(GZ)
if Blosc.AVAILABLE:
register(Blosc)
if NC.AVAILABLE:
register(NC)
|
{"/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/chain.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/blosc.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/__init__.py": ["/venv/lib/python3.7/site-packages/binpickle/write.py", "/venv/lib/python3.7/site-packages/binpickle/read.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/null.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/intro/views.py": ["/Rec.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/numcodecs.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/read.py": ["/venv/lib/python3.7/site-packages/binpickle/compat.py", "/venv/lib/python3.7/site-packages/binpickle/format.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/chain.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/gz.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/write.py": ["/venv/lib/python3.7/site-packages/binpickle/compat.py", "/venv/lib/python3.7/site-packages/binpickle/format.py", "/venv/lib/python3.7/site-packages/binpickle/__init__.py"]}
|
40,150
|
armintalaie/Movie-Recommender-Program
|
refs/heads/master
|
/venv/lib/python3.7/site-packages/binpickle/codecs/blosc.py
|
import logging
import msgpack
from importlib.util import find_spec
from ._base import Codec
DEFAULT_BLOCKSIZE = 1024 * 1024 * 1024
_log = logging.getLogger(__name__)
def _split_blocks(buf, blocksize):
if buf.itemsize > 1:
buf = buf.cast('B')
length = buf.nbytes
chunks = []
for start in range(0, length, blocksize):
end = start + blocksize
if end > length:
end = length
chunks.append(buf[start:end])
if not chunks:
chunks.append(memoryview(b''))
return chunks
class Blosc(Codec):
"""
Blosc codec.
"""
NAME = 'blosc'
AVAILABLE = find_spec('blosc') is not None
def __init__(self, name='blosclz', level=9,
shuffle=1, blocksize=DEFAULT_BLOCKSIZE):
if not self.AVAILABLE:
raise ImportError('blosc is not available')
self.name = name
self.level = level
self.shuffle = shuffle
self.blocksize = blocksize
def encode_to(self, buf, out):
# We have to encode by chunks
import blosc
pack = msgpack.Packer()
mv = memoryview(buf)
_log.debug('encoding %d bytes (itemsize=%d, format=%s)',
mv.nbytes, mv.itemsize, mv.format)
_log.debug('splitting with block size %d', self.blocksize)
blocks = _split_blocks(mv, self.blocksize)
out.write(pack.pack_array_header(len(blocks)))
for block in blocks:
assert block.nbytes <= self.blocksize
comp = blosc.compress(block, cname=self.name, clevel=self.level,
shuffle=self.shuffle, typesize=mv.itemsize)
out.write(pack.pack(comp))
block.release()
def decode_to(self, buf, out):
import blosc
blocks = msgpack.unpackb(buf, use_list=True)
pos = 0
for block in blocks:
dec = blosc.decompress(block)
dmv = memoryview(dec) # to reduce copies
n = len(dec)
e1 = min(pos + n, len(out))
n1 = e1 - pos
out[pos:e1] = dmv[:n1]
if n1 < n:
out.extend(dmv[n1:])
pos += n
if len(out) > pos:
del out[pos:]
def config(self):
return {
'name': self.name,
'level': self.level,
'shuffle': self.shuffle
}
|
{"/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/chain.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/blosc.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/__init__.py": ["/venv/lib/python3.7/site-packages/binpickle/write.py", "/venv/lib/python3.7/site-packages/binpickle/read.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/null.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/intro/views.py": ["/Rec.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/numcodecs.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/read.py": ["/venv/lib/python3.7/site-packages/binpickle/compat.py", "/venv/lib/python3.7/site-packages/binpickle/format.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/chain.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/gz.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/write.py": ["/venv/lib/python3.7/site-packages/binpickle/compat.py", "/venv/lib/python3.7/site-packages/binpickle/format.py", "/venv/lib/python3.7/site-packages/binpickle/__init__.py"]}
|
40,151
|
armintalaie/Movie-Recommender-Program
|
refs/heads/master
|
/venv/lib/python3.7/site-packages/binpickle/format.py
|
"""
Constants and functions defining the binpickle format.
"""
import struct
from typing import NamedTuple
MAGIC = b'BPCK'
VERSION = 1
HEADER_FORMAT = struct.Struct('!4sHHq')
TRAILER_FORMAT = struct.Struct('!QLL')
class FileHeader(NamedTuple):
"""
File header for a BinPickle file. The header is a 16-byte sequence containing the
magic (``BPCK``) followed by version and offset information:
1. File version (2 bytes, big-endian). Currently only version 1 exists.
2. Reserved (2 bytes). Set to 0.
3. File length (8 bytes, big-endian). Length is signed; if the file length is not known,
this field is set to -1.
"""
version: int = VERSION
"The NumPy file version."
length: int = -1
"The length of the file (-1 for unknown)."
def encode(self):
"Encode the file header as bytes."
return HEADER_FORMAT.pack(MAGIC, self.version, 0, self.length)
@classmethod
def decode(cls, buf, *, verify=True):
"Decode a file header from bytes."
m, v, pad, off = HEADER_FORMAT.unpack(buf)
if verify and m != MAGIC:
raise ValueError('invalid magic {}'.format(m))
if verify and v != VERSION:
raise ValueError('invalid version {}'.format(v))
if verify and pad != 0:
raise ValueError('invalid padding')
return cls(v, off)
@classmethod
def read(cls, file, **kwargs):
buf = file.read(HEADER_FORMAT.size)
return cls.decode(buf, **kwargs)
def trailer_pos(self):
"Get the position of the start of the file trailer."
if self.length >= HEADER_FORMAT.size + TRAILER_FORMAT.size:
return self.length - TRAILER_FORMAT.size
elif self.length > 0:
raise ValueError('file size {} not enough for BinPickle'.format(self.length))
else:
return None # We do not know the file size
class FileTrailer(NamedTuple):
"""
File trailer for a BinPickle file. The trailer is a 16-byte sequence that tells the
reader where to find the rest of the binpickle data. It consists of the following
fields:
1. Index start (8 bytes, big-endian). Measured in bytes from the start of the file.
2. Index length (4 bytes, big-endian). The number of bytes in the index.
3. Index checksum (4 bytes, big-endian). The Adler32 checksum of the index data.
"""
offset: int
length: int
checksum: int
def encode(self):
"Encode the file trailer as bytes."
return TRAILER_FORMAT.pack(self.offset, self.length, self.checksum)
@classmethod
def decode(cls, buf, *, verify=True):
"Decode a file trailer from bytes."
o, l, c = TRAILER_FORMAT.unpack(buf)
return cls(o, l, c)
class IndexEntry(NamedTuple):
"""
Index entry for a buffer in the BinPickle index.
"""
offset: int
"The position in the file where the buffer begins (bytes)."
enc_length: int
"The encoded length of the buffer data in bytes."
dec_length: int
"The decoded length of the buffer in bytes."
checksum: int
"The Adler-32 checksum of the encoded buffer data."
codec: tuple = None
"The codec used to encode the buffer, or None."
def to_repr(self):
"Convert an index entry to its MsgPack-compatible representation"
return dict((k, getattr(self, k)) for k in self._fields)
@classmethod
def from_repr(cls, repr):
"Convert an index entry from its MsgPack-compatible representation"
if not isinstance(repr, dict):
raise TypeError("IndexEntry representation must be a dict")
return cls(**repr)
|
{"/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/chain.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/blosc.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/__init__.py": ["/venv/lib/python3.7/site-packages/binpickle/write.py", "/venv/lib/python3.7/site-packages/binpickle/read.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/null.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/intro/views.py": ["/Rec.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/numcodecs.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/read.py": ["/venv/lib/python3.7/site-packages/binpickle/compat.py", "/venv/lib/python3.7/site-packages/binpickle/format.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/chain.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/gz.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/write.py": ["/venv/lib/python3.7/site-packages/binpickle/compat.py", "/venv/lib/python3.7/site-packages/binpickle/format.py", "/venv/lib/python3.7/site-packages/binpickle/__init__.py"]}
|
40,152
|
armintalaie/Movie-Recommender-Program
|
refs/heads/master
|
/venv/lib/python3.7/site-packages/binpickle/__init__.py
|
"""
Optimized format for pickling binary data.
"""
__version__ = '0.3.2'
from .write import dump, BinPickler # noqa: F401
from .read import load, BinPickleFile # noqa: F401
|
{"/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/chain.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/blosc.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/__init__.py": ["/venv/lib/python3.7/site-packages/binpickle/write.py", "/venv/lib/python3.7/site-packages/binpickle/read.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/null.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/intro/views.py": ["/Rec.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/numcodecs.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/read.py": ["/venv/lib/python3.7/site-packages/binpickle/compat.py", "/venv/lib/python3.7/site-packages/binpickle/format.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/chain.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/gz.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/write.py": ["/venv/lib/python3.7/site-packages/binpickle/compat.py", "/venv/lib/python3.7/site-packages/binpickle/format.py", "/venv/lib/python3.7/site-packages/binpickle/__init__.py"]}
|
40,153
|
armintalaie/Movie-Recommender-Program
|
refs/heads/master
|
/venv/lib/python3.7/site-packages/lenskit/algorithms/tf/__init__.py
|
"""
TensorFlow-based algorithms.
"""
import logging
from .biasedmf import BiasedMF # noqa: F401
from .ibmf import IntegratedBiasMF # noqa: F401
from .bpr import BPR # noqa: F401
from lenskit.util.parallel import is_mp_worker
try:
import tensorflow as _tf
TF_AVAILABLE = True
except ImportError:
TF_AVAILABLE = False
_log = logging.getLogger(__name__)
if is_mp_worker():
_log.info('disabling GPUs in worker process')
_tf.config.set_visible_devices([], 'GPU')
|
{"/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/chain.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/blosc.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/__init__.py": ["/venv/lib/python3.7/site-packages/binpickle/write.py", "/venv/lib/python3.7/site-packages/binpickle/read.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/null.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/intro/views.py": ["/Rec.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/numcodecs.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/read.py": ["/venv/lib/python3.7/site-packages/binpickle/compat.py", "/venv/lib/python3.7/site-packages/binpickle/format.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/chain.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/gz.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/write.py": ["/venv/lib/python3.7/site-packages/binpickle/compat.py", "/venv/lib/python3.7/site-packages/binpickle/format.py", "/venv/lib/python3.7/site-packages/binpickle/__init__.py"]}
|
40,154
|
armintalaie/Movie-Recommender-Program
|
refs/heads/master
|
/venv/lib/python3.7/site-packages/binpickle/codecs/null.py
|
from ._base import Codec
class Null(Codec):
"""
Null codec (passthrough).
"""
NAME = 'null'
def encode(self, buf):
return buf
def encode_to(self, buf, out):
out.write(buf)
def decode(self, buf, length=None):
return buf
def decode_to(self, buf, out):
out[:] = buf
def config(self):
return {}
|
{"/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/chain.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/blosc.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/__init__.py": ["/venv/lib/python3.7/site-packages/binpickle/write.py", "/venv/lib/python3.7/site-packages/binpickle/read.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/null.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/intro/views.py": ["/Rec.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/numcodecs.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/read.py": ["/venv/lib/python3.7/site-packages/binpickle/compat.py", "/venv/lib/python3.7/site-packages/binpickle/format.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/chain.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/gz.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/write.py": ["/venv/lib/python3.7/site-packages/binpickle/compat.py", "/venv/lib/python3.7/site-packages/binpickle/format.py", "/venv/lib/python3.7/site-packages/binpickle/__init__.py"]}
|
40,155
|
armintalaie/Movie-Recommender-Program
|
refs/heads/master
|
/venv/lib/python3.7/site-packages/lenskit/algorithms/tf/util.py
|
from logging import getLogger
from lenskit import util
try:
import tensorflow as tf
except ImportError:
tf = None
_log = getLogger(__name__)
def init_tf_rng(spec):
if spec is None:
return
seed = util.random.rng_seed(spec)
seed, = seed.generate_state(1)
tf.random.set_seed(seed)
def make_graph(rng_spec=None):
"Construct a TensorFlow graph (with an optional random seed)"
rng = util.rng(rng_spec)
graph = tf.Graph()
graph.seed = rng.integers(2**31 - 1)
_log.info('using effective random seed %s (from %s)', graph.seed, rng_spec)
return graph
|
{"/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/chain.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/blosc.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/__init__.py": ["/venv/lib/python3.7/site-packages/binpickle/write.py", "/venv/lib/python3.7/site-packages/binpickle/read.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/null.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/intro/views.py": ["/Rec.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/numcodecs.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/read.py": ["/venv/lib/python3.7/site-packages/binpickle/compat.py", "/venv/lib/python3.7/site-packages/binpickle/format.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/chain.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/gz.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/write.py": ["/venv/lib/python3.7/site-packages/binpickle/compat.py", "/venv/lib/python3.7/site-packages/binpickle/format.py", "/venv/lib/python3.7/site-packages/binpickle/__init__.py"]}
|
40,156
|
armintalaie/Movie-Recommender-Program
|
refs/heads/master
|
/intro/views.py
|
from django.shortcuts import render
from django.shortcuts import redirect
from django.template.backends import django
from django.views.decorators.csrf import csrf_exempt
from Rec import RecommendProgram
from django.http import HttpRequest
from django.views.decorators.csrf import csrf_protect
from django.template import loader
import json
from django.http import HttpResponse
reco = RecommendProgram("")
movie_to_add = None
moviesoo = []
def intro(request):
return render(request, "Intro.html")
@csrf_exempt
def external(request):
global movie_to_add, moviesoo
movie_to_add = None
print(request.POST['option'])
passing = {}
if request.POST['option'] == "show":
template = loader.get_template('Intro.html')
movie = reco.show_movies_to_rate(True)
movie_to_add = movie
passing = {"movie": movie['title']}
pass
elif request.POST['option'] == "result":
rec1 = reco.recommend_movies()
passing = {'result': rec1}
else:
movies = reco.search_movie(request.POST['option'])
print(movies)
passing = {"movies": movies}
moviesoo = movies
return render(request, "Intro.html", passing)
@csrf_exempt
def add_movie(request):
ss = json.loads(request.body)
print(ss["movie"])
print(ss['rate'])
global movie_to_add, moviesoo
# print(request.POST['but'])
# reco.add_new_movie_rating(movie_to_add,request.POST['but'])
if ( movie_to_add is None):
for i, m in moviesoo.iterrows():
if ss['movie'] == m['title']:
movie_to_add = m
break
else:
reco.add_new_movie_rating(movie_to_add, ss['rate'])
aa = HttpRequest()
aa.POST['option'] = "show"
movie_to_add = None
moviesoo = None
reco.add_new_movie_rating(movie_to_add, ss['rate'])
aa = HttpRequest()
aa.POST['option'] = "show"
movie_to_add = None
return external(aa)
@csrf_exempt
def add_movie_shown(request):
print(request.POST['but'])
reco.add_new_movie_rating(movie_to_add, request.POST['but'])
aa = HttpRequest()
aa.POST['option'] = "show"
return external(aa)
@csrf_exempt
def give_results(request):
global movie_to_add, moviesoo
# rec1 = reco.recommend_movies()
passing = {}
aa = HttpRequest()
# for i,moviess in rec1.iterrows():
# print(moviess.title)
aa.POST['option'] = "result"
passings = {'ola': 'pp'}
return external(aa)
# coool vlue 1E96FC
# original yellow #ffed66;
#original blue 26547c;
|
{"/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/chain.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/blosc.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/__init__.py": ["/venv/lib/python3.7/site-packages/binpickle/write.py", "/venv/lib/python3.7/site-packages/binpickle/read.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/null.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/intro/views.py": ["/Rec.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/numcodecs.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/read.py": ["/venv/lib/python3.7/site-packages/binpickle/compat.py", "/venv/lib/python3.7/site-packages/binpickle/format.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/chain.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/gz.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/write.py": ["/venv/lib/python3.7/site-packages/binpickle/compat.py", "/venv/lib/python3.7/site-packages/binpickle/format.py", "/venv/lib/python3.7/site-packages/binpickle/__init__.py"]}
|
40,157
|
armintalaie/Movie-Recommender-Program
|
refs/heads/master
|
/venv/lib/python3.7/site-packages/binpickle/codecs/numcodecs.py
|
from importlib.util import find_spec
from ._base import Codec
def is_numcodec(codec):
"Test whether a codec is a NumCodecs codec."
if NC.AVAILABLE:
import numcodecs
return isinstance(codec, numcodecs.abc.Codec)
else:
return False # if numcodecs aren't available, it can't be one
class NC(Codec):
"""
NumCodec wrapper.
"""
NAME = 'numcodec'
AVAILABLE = find_spec('numcodecs') is not None
def __init__(self, codec=None, **kwargs):
if codec is None:
import numcodecs
self.codec = numcodecs.get_codec(kwargs)
else:
self.codec = codec
def encode(self, buf):
return self.codec.encode(buf)
def encode_to(self, buf, w):
w.write(self.encode(buf))
def decode(self, buf):
return memoryview(self.codec.decode(buf))
def decode_to(self, buf, out):
out[:] = self.decode(buf)
def config(self):
return self.codec.get_config()
|
{"/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/chain.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/blosc.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/__init__.py": ["/venv/lib/python3.7/site-packages/binpickle/write.py", "/venv/lib/python3.7/site-packages/binpickle/read.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/null.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/intro/views.py": ["/Rec.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/numcodecs.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/read.py": ["/venv/lib/python3.7/site-packages/binpickle/compat.py", "/venv/lib/python3.7/site-packages/binpickle/format.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/chain.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/gz.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/write.py": ["/venv/lib/python3.7/site-packages/binpickle/compat.py", "/venv/lib/python3.7/site-packages/binpickle/format.py", "/venv/lib/python3.7/site-packages/binpickle/__init__.py"]}
|
40,158
|
armintalaie/Movie-Recommender-Program
|
refs/heads/master
|
/venv/lib/python3.7/site-packages/binpickle/read.py
|
import mmap
import logging
import io
from zlib import adler32
import msgpack
from .compat import pickle
from .format import FileHeader, IndexEntry, FileTrailer
from .codecs import get_codec
_log = logging.getLogger(__name__)
class BinPickleFile:
"""
Class representing a binpickle file in memory.
Args:
filename(str or pathlib.Path):
The name of the file to load.
direct(bool):
If ``True``, returned objects zero-copy when possible, but cannot
outlast the :class:`BinPickleFile` instance. If ``False``, they
are copied from the file and do not need to be freed before
:meth:`close` is called.
"""
def __init__(self, filename, *, direct=False):
self.filename = filename
self.direct = direct
with open(filename, 'rb') as bpf:
self.header = FileHeader.read(bpf)
self._map = mmap.mmap(bpf.fileno(), self.header.length,
access=mmap.ACCESS_READ)
self._mv = memoryview(self._map)
self._read_index()
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
return False
def load(self):
"""
Load the object from the binpickle file.
"""
if not self.entries:
raise ValueError('empty pickle file has no objects')
p_bytes = self._read_buffer(self.entries[-1], direct=True)
_log.debug('unpickling %d bytes and %d buffers',
len(p_bytes), len(self.entries) - 1)
buf_gen = (self._read_buffer(e) for e in self.entries[:-1])
up = pickle.Unpickler(io.BytesIO(p_bytes), buffers=buf_gen)
return up.load()
def find_errors(self):
"""
Verify binpickle data structure validity. If the file is invalid, returns
a list of errors.
Fatal index errors will result in a failure to open the file, so things such as
invalid msgpack formats in the index won't be detected here. This method checks
buffer checksums, offset overlaps, and such.
"""
errors = []
i_sum = adler32(self._index_buf)
if i_sum != self.trailer.checksum:
errors.append(f'invalid index checksum ({i_sum} != {self.trailer.checksum})')
position = 16
for i, e in enumerate(self.entries):
if e.offset < position:
errors.append(f'entry {i}: offset {e.offset} before expected start {position}')
buf = self._read_buffer(e, direct=True)
ndec = len(buf)
if ndec != e.dec_length:
errors.append(f'entry {i}: decoded to {ndec} bytes, expected {e.dec_length}')
cks = adler32(self._read_buffer(e, direct=True, decode=False))
if cks != e.checksum:
errors.append('entry {i}: invalid checksum ({cks} != {e.checksum}')
return errors
def close(self):
"""
Close the BinPickle file. If the file is in direct mode, all
retrieved objects and associated views must first be deleted.
"""
self._index_buf = None
self._mv = None
if self._map is not None:
self._map.close()
self._map = None
def _read_index(self):
tpos = self.header.trailer_pos()
if tpos is None:
raise ValueError('no file length, corrupt binpickle file?')
buf = self._mv[tpos:]
assert len(buf) == 16
self.trailer = FileTrailer.decode(buf)
i_start = self.trailer.offset
i_end = i_start + self.trailer.length
self._index_buf = self._mv[i_start:i_end]
self.entries = [IndexEntry.from_repr(e) for e in msgpack.unpackb(self._index_buf)]
_log.debug('read %d entries from file', len(self.entries))
def _read_buffer(self, entry: IndexEntry, *, direct=None, decode=True):
start = entry.offset
length = entry.enc_length
end = start + length
if direct is None:
direct = self.direct
if decode and entry.codec:
name, cfg = entry.codec
_log.debug('decoding %d bytes from %d with %s', length, start, name)
out = bytearray(entry.dec_length)
codec = get_codec(name, cfg)
codec.decode_to(self._mv[start:end], out)
return out
if direct:
_log.debug('mapping %d bytes from %d', length, start)
return self._mv[start:end]
else:
_log.debug('copying %d bytes from %d', length, start)
return self._map[start:end]
def load(file):
"""
Load an object from a BinPickle file.
Args:
file(str or pathlib.Path): The file to load.
"""
with BinPickleFile(file) as bpf:
return bpf.load()
|
{"/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/chain.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/blosc.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/__init__.py": ["/venv/lib/python3.7/site-packages/binpickle/write.py", "/venv/lib/python3.7/site-packages/binpickle/read.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/null.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/intro/views.py": ["/Rec.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/numcodecs.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/read.py": ["/venv/lib/python3.7/site-packages/binpickle/compat.py", "/venv/lib/python3.7/site-packages/binpickle/format.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/chain.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/gz.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/write.py": ["/venv/lib/python3.7/site-packages/binpickle/compat.py", "/venv/lib/python3.7/site-packages/binpickle/format.py", "/venv/lib/python3.7/site-packages/binpickle/__init__.py"]}
|
40,159
|
armintalaie/Movie-Recommender-Program
|
refs/heads/master
|
/venv/lib/python3.7/site-packages/binpickle/codecs/chain.py
|
from ._base import Codec
from . import make_codec
class Chain(Codec):
"""
Codec that chains together other codecs in sequence. The codecs are applied
in the provided order for encoding, and reverse order for decoding.
"""
NAME = 'chain'
def __init__(self, codecs=()):
self.codecs = [make_codec(c, list_is_tuple=True) for c in codecs]
def encode(self, buf):
data = buf
for codec in self.codecs:
data = codec.encode(data)
return data
def encode_to(self, buf, w):
w.write(self.encode(buf))
def decode(self, buf):
data = buf
for codec in self.codecs[::-1]:
data = codec.decode(data)
return data
def decode_to(self, buf, out):
out[:] = self.decode(buf)
def config(self):
return {
'codecs': [(c.NAME, c.config()) for c in self.codecs]
}
|
{"/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/chain.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/blosc.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/__init__.py": ["/venv/lib/python3.7/site-packages/binpickle/write.py", "/venv/lib/python3.7/site-packages/binpickle/read.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/null.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/intro/views.py": ["/Rec.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/numcodecs.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/read.py": ["/venv/lib/python3.7/site-packages/binpickle/compat.py", "/venv/lib/python3.7/site-packages/binpickle/format.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/chain.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/gz.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/write.py": ["/venv/lib/python3.7/site-packages/binpickle/compat.py", "/venv/lib/python3.7/site-packages/binpickle/format.py", "/venv/lib/python3.7/site-packages/binpickle/__init__.py"]}
|
40,160
|
armintalaie/Movie-Recommender-Program
|
refs/heads/master
|
/venv/lib/python3.7/site-packages/binpickle/compat.py
|
"""
Compatibility support.
"""
import pickle
# Make sure we have Pickle 5
if pickle.HIGHEST_PROTOCOL < 5:
import pickle5 as pickle
|
{"/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/chain.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/blosc.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/__init__.py": ["/venv/lib/python3.7/site-packages/binpickle/write.py", "/venv/lib/python3.7/site-packages/binpickle/read.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/null.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/intro/views.py": ["/Rec.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/numcodecs.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/read.py": ["/venv/lib/python3.7/site-packages/binpickle/compat.py", "/venv/lib/python3.7/site-packages/binpickle/format.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/chain.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/gz.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/write.py": ["/venv/lib/python3.7/site-packages/binpickle/compat.py", "/venv/lib/python3.7/site-packages/binpickle/format.py", "/venv/lib/python3.7/site-packages/binpickle/__init__.py"]}
|
40,161
|
armintalaie/Movie-Recommender-Program
|
refs/heads/master
|
/venv/lib/python3.7/site-packages/binpickle/codecs/gz.py
|
import zlib
from ._base import Codec
class GZ(Codec):
"""
Zlib (gzip-compatible) codec.
"""
NAME = 'gz'
def __init__(self, level=9):
self.level = level
def encode(self, buf):
return zlib.compress(buf, self.level)
def encode_to(self, buf, out):
# We have to encode by chunks
out.write(self.encode(buf))
def decode(self, buf):
return zlib.decompress(buf)
def decode_to(self, buf, out):
out[:] = self.decode(buf)
def config(self):
return {
'level': self.level
}
|
{"/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/chain.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/blosc.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/__init__.py": ["/venv/lib/python3.7/site-packages/binpickle/write.py", "/venv/lib/python3.7/site-packages/binpickle/read.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/null.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/intro/views.py": ["/Rec.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/numcodecs.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/read.py": ["/venv/lib/python3.7/site-packages/binpickle/compat.py", "/venv/lib/python3.7/site-packages/binpickle/format.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/chain.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/gz.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/write.py": ["/venv/lib/python3.7/site-packages/binpickle/compat.py", "/venv/lib/python3.7/site-packages/binpickle/format.py", "/venv/lib/python3.7/site-packages/binpickle/__init__.py"]}
|
40,162
|
armintalaie/Movie-Recommender-Program
|
refs/heads/master
|
/venv/lib/python3.7/site-packages/binpickle/write.py
|
import mmap
import warnings
import logging
import io
from zlib import adler32
import msgpack
from .compat import pickle
from .format import FileHeader, FileTrailer, IndexEntry
from . import codecs
_log = logging.getLogger(__name__)
def _align_pos(pos, size=mmap.PAGESIZE):
"Advance a position to be aligned."
rem = pos % size
if rem:
return pos + (size - rem)
else:
return pos
class CKOut:
"""
Wrapper for binary output that computes checksums and sizes on the fly.
"""
def __init__(self, base):
self.bytes = 0
self.checksum = 1
self.delegate = base
def write(self, data):
# get a memory view so we have a portable count of bytes
mv = memoryview(data)
self.bytes += mv.nbytes
self.checksum = adler32(data, self.checksum)
return self.delegate.write(data)
def flush(self):
self.delegate.flush()
class BinPickler:
"""
Save an object into a binary pickle file. This is like :class:`pickle.Pickler`,
except it works on file paths instead of byte streams.
A BinPickler is also a context manager that closes itself when exited::
with BinPickler('file.bpk') as bpk:
bpk.dump(obj)
Args:
filename(str or pathlib.Path):
The path to the file to write.
align(bool):
If ``True``, align buffers to the page size.
codec:
The codec to use for encoding buffers. This can be anything that can be
passed to :func:`binpickle.codecs.make_codec`, or it can be a function
that takes a buffer and returns the codec to use for that buffer (to
use different codecs for different types or sizes of buffers).
"""
def __init__(self, filename, *, align=False, codec=None):
self.filename = filename
self.align = align
self._file = open(filename, 'wb')
self.entries = []
self.codec = codec
self._init_header()
@classmethod
def mappable(cls, filename):
"Convenience method to construct a pickler for memory-mapped use."
return cls(filename, align=True)
@classmethod
def compressed(cls, filename, codec=codecs.GZ()):
"Convenience method to construct a pickler for compressed storage."
return cls(filename, codec=codec)
def dump(self, obj):
"Dump an object to the file. Can only be called once."
bio = io.BytesIO()
pk = pickle.Pickler(bio, protocol=pickle.HIGHEST_PROTOCOL,
buffer_callback=self._write_buffer)
pk.dump(obj)
buf = bio.getbuffer()
_log.info('pickled %d bytes with %d buffers', buf.nbytes, len(self.entries))
self._write_buffer(buf)
self._finish_file()
def close(self):
"Close the bin pickler."
self._file.close()
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
return False
def _init_header(self):
pos = self._file.tell()
if pos > 0:
warnings.warn('BinPickler not at beginning of file')
h = FileHeader()
_log.debug('initializing header for %s', self.filename)
self._file.write(h.encode())
assert self._file.tell() == pos + 16
def _encode_buffer(self, buf, out):
if self.codec is None:
out.write(buf)
return None
elif hasattr(self.codec, '__call__'):
# codec is callable, call it to get the codec
codec = self.codec(buf)
codec = codecs.make_codec(codec)
else:
codec = codecs.make_codec(self.codec)
codec.encode_to(buf, out)
return (codec.NAME, codec.config())
def _write_buffer(self, buf):
mv = memoryview(buf)
offset = self._file.tell()
if self.align:
off2 = _align_pos(offset)
if off2 > offset:
nzeds = off2 - offset
zeds = b'\x00' * nzeds
self._file.write(zeds)
assert self._file.tell() == off2
offset = off2
length = mv.nbytes
_log.debug('writing %d bytes at position %d', length, offset)
cko = CKOut(self._file)
c_spec = self._encode_buffer(buf, cko)
_log.debug('encoded %d bytes to %d (%.2f%% saved)', length, cko.bytes,
(length - cko.bytes) / length * 100 if length else -0.0)
_log.debug('used codec %s', c_spec)
assert self._file.tell() == offset + cko.bytes
self.entries.append(IndexEntry(offset, cko.bytes, length, cko.checksum,
c_spec))
def _write_index(self):
buf = msgpack.packb([e.to_repr() for e in self.entries])
pos = self._file.tell()
nbs = len(buf)
_log.debug('writing %d index entries (%d bytes) at position %d',
len(self.entries), nbs, pos)
self._file.write(buf)
ft = FileTrailer(pos, nbs, adler32(buf))
self._file.write(ft.encode())
return ft
def _finish_file(self):
self._write_index()
pos = self._file.tell()
_log.debug('finalizing file with length %d', pos)
h = FileHeader(length=pos)
self._file.seek(0)
self._file.write(h.encode())
self._file.flush()
def dump(obj, file, *, mappable=False, codec=codecs.GZ()):
"""
Dump an object to a BinPickle file. This is a convenience wrapper
around :class:`BinPickler`.
To save with default compression for storage or transport::
dump(obj, 'file.bpk')
To save in a file optimized for memory-mapping::
dump(obj, 'file.bpk', mappable=True)
Args:
obj: The object to dump.
file(str or pathlib.Path): The file in which to save the object.
mappable(bool):
If ``True``, save for memory-mapping. ``codec`` is ignored
in this case.
codec(codecs.Codec):
The codec to use to compress the data, when not saving for
memory-mapping.
"""
if mappable:
bpk = BinPickler(file, align=True)
else:
bpk = BinPickler(file, align=False, codec=codec)
with bpk:
bpk.dump(obj)
|
{"/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/chain.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/blosc.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/__init__.py": ["/venv/lib/python3.7/site-packages/binpickle/write.py", "/venv/lib/python3.7/site-packages/binpickle/read.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/null.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/intro/views.py": ["/Rec.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/numcodecs.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/read.py": ["/venv/lib/python3.7/site-packages/binpickle/compat.py", "/venv/lib/python3.7/site-packages/binpickle/format.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/chain.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/gz.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/write.py": ["/venv/lib/python3.7/site-packages/binpickle/compat.py", "/venv/lib/python3.7/site-packages/binpickle/format.py", "/venv/lib/python3.7/site-packages/binpickle/__init__.py"]}
|
40,163
|
armintalaie/Movie-Recommender-Program
|
refs/heads/master
|
/Rec.py
|
import lenskit.datasets as ds
import csv
import pandas as pd
from lenskit.algorithms import Recommender
from lenskit.algorithms.user_knn import UserUser
from os import path
import re
MOVIE_DATA_LOC ="ml-latest-small"
SEARCH_RESULT_COUNT = 10
MOVIES_TO_RECOMMEND = 10
pd.set_option('display.max_columns', 7)
movie_data = ds.MovieLens(MOVIE_DATA_LOC)
class RecommendProgram(object):
username = "curr.csv"
def __init__(self, username):
if path.exists(username + ".csv"):
self.username = username + ".csv"
else:
with open(self.username, 'w') as csvfile:
field_names = ['item', 'title', 'genres', 'ratings']
file_writer = csv.DictWriter(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL, fieldnames=field_names)
file_writer.writeheader()
self.user_data = self.load_user_file()
self.data = movie_data
self.combined = self.data.ratings.join(self.data.movies['genres'], on='item')
self.combined = self.combined.join(self.data.movies['title'], on='item')
self.enriched_movies = self.data.movies.copy()
self.enriched_movies.rename(columns={"movieId": "item"})
counts = self.data.ratings.groupby(by="item").count()
counts = counts.rename(columns={'user': 'count'})
self.enriched_movies = self.enriched_movies.join(counts['rating'], on='item')
self.removed = pd.merge(self.data.ratings, counts['count'], on='item')
self.removed = self.removed.sort_values(by='count', ascending=False)
self.removed = self.removed.loc[self.removed['count'] > 10]
def search_movies(self):
command = None
while command != "done":
command = input()
result = self.enriched_movies[self.enriched_movies['title'].str.contains(command)]
result = result.sort_values(by='rating', ascending=False)
index = 1
for i, movie in result[0:SEARCH_RESULT_COUNT].iterrows():
print(index, movie['title'], movie['genres'])
index += 1
choose = int(input("type number"))
movie = result.iloc[choose - 1:choose]
movie = movie.reset_index()
rate = input("How do you rate this from 0 to 5\n")
self.add_new_movie_rating(movie, rate)
def search_movie(self, title):
result = self.enriched_movies[self.enriched_movies['title'].str.contains(title, flags=re.IGNORECASE)]
result = result.sort_values(by='rating', ascending=False)
result = result.reset_index()
index = 1
for i, movie in result[0:SEARCH_RESULT_COUNT].iterrows():
print(index, movie['title'], movie['genres'])
index += 1
return result[0:SEARCH_RESULT_COUNT]
def show_movies_to_rate(self):
confirmed_movies = self.load_user_file()
movies = self.enriched_movies.sort_values(by='rating', ascending=False)
movies = movies.reset_index()
movies = movies.loc[movies['rating'] > 1000]
print(movies.head(5))
for i, movie in movies.iterrows():
if movie['item'] in confirmed_movies:
continue
print(movie['title'])
print("if seen movie, rate out of 5. otherwise press enter for more or done to quit")
command = input()
if command == "done":
break
if command == "":
continue
else:
self.add_new_movie_rating(movie, int(command))
def show_movies_to_rate(self, is_wb):
confirmed_movies = self.load_user_file()
movies = self.enriched_movies.sort_values(by='rating', ascending=False)
movies = movies.reset_index()
print(movies.head(5))
for i, movie in movies.iterrows():
if movie['item'] in confirmed_movies:
continue
print(movie['title'])
return movie
def load_user_file(self):
confirmed_movies = []
with open(self.username, newline='') as file:
rating_reader = csv.DictReader(file)
for row in rating_reader:
confirmed_movies.append(int(row['item']))
return confirmed_movies
def add_new_movie_rating(self, movie, rate):
with open(self.username, "a", newline='') as f:
names = ['item', 'title', 'genres', 'ratings']
wr = csv.DictWriter(f, fieldnames=names)
wr.writerow({names[0]: movie['item'], names[1]: movie['title'],
names[2]: movie['genres'], names[3]: rate})
def recommend_movies(self):
first_data = {}
with open(self.username, newline='') as csvfile:
ratings_reader = csv.DictReader(csvfile)
for row in ratings_reader:
if (row['ratings'] != "") and (float(row['ratings']) > 0) and (float(row['ratings']) < 6):
first_data.update({int(row['item']): float(row['ratings'])})
user_user = UserUser(10, min_nbrs=5)
print(self.removed)
algo = Recommender.adapt(user_user)
algo.fit(self.removed)
rec1 = algo.recommend(-1, 10, ratings=pd.Series(first_data))
joined_data = rec1.join(self.data.movies['genres'], on='item')
joined_data = joined_data.join(self.data.movies['title'], on='item')
print(joined_data[joined_data.columns[2:]])
return joined_data
def fetch_more_movie_data(self):
# box office, language
pass
def filter_movie_input_collection(self):
# post-2000, genre, box office, language
# by choosing an option the random would filter input
pass
def remove_useless_data(self):
# low number of rated movies
# very old movies
pass
ACTION, COMEDY = 1.5, 1.5
DOCUMENTARY = 0.5
# print(combined.head(5))
""" my code -------------------------------- """
"""
dd = data.movies
rating_counts = data.ratings.groupby(['item']).count()
new_data = dd.join(rating_counts['rating'], on='item')
new_data = new_data.sort_values(by='rating', ascending=False)
print(new_data)
confirmed = []
with open("newer_data.csv", newline='') as csvfile:
ratings_reader = csv.DictReader(csvfile)
for row in ratings_reader:
confirmed.append(int(row['item']))
with open("newer_data.csv", "a", newline='') as f:
names = ['item', 'title', 'genres', 'ratings']
wr = csv.DictWriter(f, fieldnames=names)
for movie in new_data.itertuples():
if movie.Index in confirmed:
continue
if not re.search('\(2[0-9][1-2][8-9]\)', movie.title):
continue
print("please rate this out of 5, dear ho: ", movie.title)
print("if you haven't watched it press enter!")
r = input()
if r == 'done':
break
wr.writerow({names[0]: movie.Index, names[1]: movie.title,
names[2]: movie.genres, names[3]: r})
print("adfgds")
first_data = {}
with open("newer_data.csv", newline='') as csvfile:
ratings_reader = csv.DictReader(csvfile)
for row in ratings_reader:
if (row['ratings'] != "") and (float(row['ratings']) > 0) and (float(row['ratings']) < 6):
num = y = 1
first_data.update({int(row['item']): num * float(row['ratings']) * y})
num_recs = 30 # <---- This is the number of recommendations to generate.
average_ratings = data.ratings.groupby(['item']).mean()
rating_counts = data.ratings.groupby(['item']).count()
print("meow")
print(rating_counts['rating'])
bbb = []
#df.name.str.extract(r'([\d]+)',expand=False)
years = data.movies.loc[data.movies['title'].str.contains('\(2[0-9][0-9][0-9]\)', regex=True)]
years = years.loc[~years['title'].str.contains('Action')]
print(years.head(6))
years = years.reset_index()
print(years.head(6))
years = years['item']
print(years.head(6))
#bool(re.search("2[0-9][0-9][0-9]",str(data.movies['title'])))]
#and ~re.search("Action", str(data.movies['title']))
# find promising list of movies
rating_counts = rating_counts.rename(columns={'user': 'size'})
rating_counts = rating_counts.loc[(rating_counts['size'] > 1500)]
rating_counts = rating_counts.reset_index()
print(rating_counts.head(5))
rating_counts = pd.merge(rating_counts, years, on='item')
print("ss")
print(rating_counts.head(10))
# remove user rating for movies with low number of ratings
filtered = data.ratings
filtered = pd.merge(filtered, rating_counts, on='item')
print("last")
filtered = filtered.drop(columns=['size', 'rating_y', 'timestamp_y'])
filtered = filtered.rename(columns={'rating_x': 'rating', 'timestamp_x': 'timestamp'})
print(filtered.head(5))
# recommendation algorithm
user_user = UserUser(20, min_nbrs=5)
algo = Recommender.adapt(user_user)
algo.fit(filtered)
rec1 = algo.recommend(-1, num_recs, ratings=pd.Series(first_data))
# prepare result
joined_data = rec1.join(data.movies['genres'], on='item')
joined_data = joined_data.join(data.movies['title'], on='item')
joined_data = joined_data[joined_data['genres'].str.contains('Animation|Comedy|Romance|Thriller', regex=True)]
print(joined_data[joined_data.columns[2:]])
"""
"""joined_data = joined_data[~joined_data['genres'].str.contains("Documentary")]
joined_data = joined_data[joined_data['genres'].str.contains("Comedy")]"""
""" my code -------------------------------- """
"""
average_ratings = data.ratings.groupby(['item']).mean()
sorted_avg_ratings = average_ratings.sort_values(by="rating", ascending=False)
joined_data = sorted_avg_ratings.join(data.movies['genres'], on='item')
joined_data = joined_data.join(data.movies['title'], on='item')
joined_data = joined_data[joined_data.columns[1:]]
print(joined_data.head(5))
average_ratings = data.ratings.groupby('item').agg(count=('user', 'size'), rating=('rating', 'mean')).reset_index()
sorted_avg_ratings = average_ratings.sort_values(by="rating", ascending=False)
joined_data = sorted_avg_ratings.join(data.movies['genres'], on='item')
joined_data = joined_data.join(data.movies['title'], on='item')
joined_data = joined_data[joined_data.columns[1:]]
print(joined_data.head(5))"""
"""
minimum_to_include = 70 # <-- You can try changing this minimum to include movies rated by fewer or more people
average_ratings = data.ratings.groupby(['item']).mean()
rating_counts = data.ratings.groupby(['item']).count()
# print(rating_counts)
average_ratings = average_ratings.loc[rating_counts['rating'] > minimum_to_include]
sorted_avg_ratings = average_ratings.sort_values(by="rating", ascending=False)
joined_data = sorted_avg_ratings.join(data.movies['genres'], on='item')
joined_data = joined_data.join(data.movies['title'], on='item')
# joined_data = joined_data[joined_data.columns[3:]]
print(joined_data.head(5))
print(joined_data.columns)
joined_data = joined_data[joined_data.columns[3:]]
print(joined_data.head(5))
print(joined_data.columns)
first_data = {}
second_data = {}
with open("jabril-movie-ratings.csv", newline='') as csvfile:
ratings_reader = csv.DictReader(csvfile)
for row in ratings_reader:
if (row['ratings'] != "") and (float(row['ratings']) > 0) and (float(row['ratings']) < 6):
first_data.update({int(row['item']): float(row['ratings'])})
with open("jgb-movie-ratings.csv", newline='') as csvfile:
ratings_reader = csv.DictReader(csvfile)
for row in ratings_reader:
if (row['ratings'] != "") and (float(row['ratings']) > 0) and (float(row['ratings']) < 6):
second_data.update({int(row['item']): float(row['ratings'])})
print("Rating dictionaries assembled!")
print("Sanity check:")
print("\tJabril's rating for 1197 (The Princess Bride) is " + str(first_data[1197]))
print("\tJohn-Green-Bot's rating for 1197 (The Princess Bride) is " + str(second_data[1197]))
from lenskit.algorithms import Recommender
from lenskit.algorithms.user_knn import UserUser
num_recs = 10 #<---- This is the number of recommendations to generate. You can change this if you want to see more recommendations
user_user = UserUser(10, min_nbrs=3)
algo = Recommender.adapt(user_user)
algo.fit(data.ratings)
rec1 = algo.recommend(-1, num_recs, ratings=pd.Series(first_data))
joined_data = rec1.join(data.movies['genres'], on='item')
joined_data = rec1.join(data.movies['title'], on='item')
print(joined_data)
rec2 = algo.recommend(-1, num_recs, ratings=pd.Series(second_data))
joined_data = rec2.join(data.movies['genres'], on='item')
joined_data = rec2.join(data.movies['title'], on='item')
print(joined_data)
combined_rating_dict = {}
for k in first_data:
if k in second_data:
combined_rating_dict.update({k: float((first_data[k] + second_data[k]) / 2)})
else:
combined_rating_dict.update({k: first_data[k]})
for k in second_data:
if k not in combined_rating_dict:
combined_rating_dict.update({k: second_data[k]})
combined_recs = algo.recommend(-1, num_recs, ratings=pd.Series(combined_rating_dict)) #Here, -1 tells it that it's not an existing user in the set, that we're giving new ratings, while 10 is how many recommendations it should generate
joined_data = combined_recs.join(data.movies['genres'], on='item')
joined_data = joined_data.join(data.movies['title'], on='item')
joined_data = joined_data[joined_data.columns[2:]]
print("\n\nRECOMMENDED FOR JABRIL / JOHN-GREEN-BOT HYBRID:")
print("\n\n\n\n")
print(joined_data)"""
|
{"/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/chain.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/blosc.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/__init__.py": ["/venv/lib/python3.7/site-packages/binpickle/write.py", "/venv/lib/python3.7/site-packages/binpickle/read.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/null.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/intro/views.py": ["/Rec.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/numcodecs.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/read.py": ["/venv/lib/python3.7/site-packages/binpickle/compat.py", "/venv/lib/python3.7/site-packages/binpickle/format.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/chain.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py", "/venv/lib/python3.7/site-packages/binpickle/codecs/__init__.py"], "/venv/lib/python3.7/site-packages/binpickle/codecs/gz.py": ["/venv/lib/python3.7/site-packages/binpickle/codecs/_base.py"], "/venv/lib/python3.7/site-packages/binpickle/write.py": ["/venv/lib/python3.7/site-packages/binpickle/compat.py", "/venv/lib/python3.7/site-packages/binpickle/format.py", "/venv/lib/python3.7/site-packages/binpickle/__init__.py"]}
|
40,167
|
onkelrow/First
|
refs/heads/master
|
/sqlite geopackage.py
|
__author__ = '53_e_rv'
import arcpy
# Set local variables
sqlite_database_path = ‘C:\data\example.gpkg’
# Execute CreateSQLiteDatabase
arcpy.gp.CreateSQLiteDatabase(sqlite_database_path, “GEOPACKAGE”)
|
{"/osgeo.py": ["/gdal.py"]}
|
40,168
|
onkelrow/First
|
refs/heads/master
|
/arcsde connect disconnect.py
|
import arcpy
users = arcpy.ListUsers("Database Connections/Connection to sde_sde_db_rac.sde")
# for user in users:
# print("Username: {0}, Connected at: {1}".format(
# user.Name, user.ConnectionTime))
# import arcpy
# Set the admistrative workspace connection
# arcpy.env.workspace = "Database Connections/tenone@sde.sde"
# Create a list of users
'''
NOTE: When the arcpy.env.workspace environment is set, a workspace
does not need to be provided to the function.
'''
# users = arcpy.ListUsers()
# Create a list of SDE ID's.
# Use a list comprehension to get the ID values in a new list.
id_users = [user.ID for user in users]
print(id_users)
# import arcpy
arcpy.DisconnectUser("Database Connections/Connection to sde_sde_db_rac.sde", "ALL")
|
{"/osgeo.py": ["/gdal.py"]}
|
40,169
|
onkelrow/First
|
refs/heads/master
|
/sde export.py
|
__author__ = '53_e_rv'
# -*- coding: utf-8 -*-
import arcpy
# set the workspace
arcpy.env.workspace = 'Database Connections/Connection sde to sdetest.sde'
# set a variable for the workspace
workspace = arcpy.env.workspace
#ent_gdb = "C:\\gdbs\\enterprisegdb.sde"
output_file = "C:\\keyword.txt"
#arcpy.ExportGeodatabaseConfigurationKeywords_management(ent_gdb,output_file)
arcpy.ExportGeodatabaseConfigurationKeywords_management(workspace,output_file)
|
{"/osgeo.py": ["/gdal.py"]}
|
40,170
|
onkelrow/First
|
refs/heads/master
|
/update services in context.py
|
import arcpy
import xml.dom.minidom as DOM
#define local variables
# wrkspc mxd document directory
# mxdName mxd document name
# con ArcGIS Server Catalog path
# service service name (include service direcotry)
# summary service summary
# tags services tags
wrkspc = 'C:/test/'
mxdName = 'sample.mxd'
con = 'GIS Servers/arcgis on localhost_6080 (admin)'
service = 'MyMapService'
summary = 'Population Density by County'
tags = 'county, counties, population, density, census'
mapDoc = arcpy.mapping.MapDocument(wrkspc + mxdName)
sddraft = wrkspc + service + '.sddraft'
sd = wrkspc + service + '.sd'
# create service definition draft
analysis = arcpy.mapping.CreateMapSDDraft(mapDoc, sddraft, service, 'ARCGIS_SERVER',
con, True, None, summary, tags)
# set service type to esriServiceDefinitionType_Replacement
newType = 'esriServiceDefinitionType_Replacement'
xml = sddraft
doc = DOM.parse(xml)
descriptions = doc.getElementsByTagName('Type')
for desc in descriptions:
if desc.parentNode.tagName == 'SVCManifest':
if desc.hasChildNodes():
desc.firstChild.data = newType
outXml = xml
f = open(outXml, 'w')
doc.writexml( f )
f.close()
# stage and upload the service if the sddraft analysis did not contain errors
if analysis['errors'] == {}:
# Execute StageService
arcpy.StageService_server(sddraft, sd)
# Execute UploadServiceDefinition
arcpy.UploadServiceDefinition_server(sd, con)
else:
# if the sddraft analysis contained errors, display them
print analysis['errors']
|
{"/osgeo.py": ["/gdal.py"]}
|
40,171
|
onkelrow/First
|
refs/heads/master
|
/securesd layer render and export.py
|
__author__ = 'Roman'
import arcpy, os
#Remove temporary connection file if it already exists
sdeFile = r"C:\Project\Output\TempSDEConnectionFile.sde"
if os.path.exists(sdeFile):
os.remove(sdeFile)
#Create temporary connection file in memory
arcpy.CreateArcSDEConnectionFile_management(r"C:\Project\Output", "TempConnection", "myServerName", "5151", "myDatabase", "DATABASE_AUTH", "myUserName", "myPassword", "SAVE_USERNAME", "myUser.DEFAULT", "SAVE_VERSION")
#Export a map document to verify that secured layers are present
mxd = arcpy.mapping.MapDocument(r"C:\Project\SDEdata.mxd")
arcpy.mapping.ExportToPDF(mxd, r"C:\Project\output\SDEdata.pdf")
os.remove(sdeFile)
del mxd__author__ = 'Administrator'
|
{"/osgeo.py": ["/gdal.py"]}
|
40,172
|
onkelrow/First
|
refs/heads/master
|
/layer rename.py
|
__author__ = 'Administrator'
import arcpy
import os
#mxd = arcpy.mapping.MapDocument(r"C:\Users\Administrator\Desktop\ripsgdi14\wrrl\automated\layerrenamed\wrrl_k13_3.mxd")
mxd = arcpy.mapping.MapDocument(r"C:\Users\Administrator\Desktop\ripsgdi14\wrrl\automated\layerrenamed\wrrl_k13_3.mxd")
#mxd = arcpy.mapping.MapDocument("CURRENT")
for lyr in arcpy.mapping.ListLayers(mxd):
if lyr.supports("DATASOURCE"):
arcpy.AddMessage("Layer: " + lyr.name + " Source: " + lyr.dataSource)
for lyr in arcpy.mapping.ListLayers(mxd):
#lyrname = str(lyr.name)
print lyrname
#if lyrname == " ":
# lyrname_replaced = lyrname.replace(" ","1")
# lyr.name = lyrname_replaced
# arcpy.AddMessage(lyrname_replaced)
#mxd.save()
#arcpy.RefreshTOC()
for lyr in arcpy.mapping.ListLayers(mxd):
if lyr.supports("DATASOURCE"):
arcpy.AddMessage("Layer: " + lyr.name + " Source: " + lyr.dataSource)
for lyr in arcpy.mapping.ListLayers(mxd):
lyrname = str(lyr.name)
print lyrname
if lyrname == " ":
lyrname_replaced = lyrname.replace(" ","1")
lyr.name = lyrname_replaced
arcpy.AddMessage(lyrname_replaced)
mxd.save()
arcpy.RefreshTOC()
|
{"/osgeo.py": ["/gdal.py"]}
|
40,173
|
onkelrow/First
|
refs/heads/master
|
/Mxd_sources checker.py
|
# import os
# import csv
# import arcpy
#
# def ListMXDSources(path,extension):
# list_dir = []
# CountList = []
# MapList = []
# list_dir = os.listdir(path)
# count = 0
# for paths, dirctory, files in os.walk(path):
# for file in files:
# if file.endswith(extension): # eg: '.mxd'
# MapList.append(os.path.join(paths, file))
# print MapList
# for m in MapList:
# count += 1
# mxd = arcpy.mapping.MapDocument(m)
# ## --------- For each map list layers
# for lyr in arcpy.mapping.ListLayers(mxd):
# with open("ListOfDataSources.csv", 'wb') as csvfile:
# csvwriter = csv.writer(csvfile)
# for dirpath, dirnames, filenames in arcpy.da.Walk(MapList):
# for filename in filenames:
# desc = arcpy.Describe(os.path.join(dirpath, filename))
# csvwriter.writerow([desc.catalogPath, desc.name, desc.dataType])
#import arcpy, os, csv
import os
import csv
import arcpy
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
def main(folder, outputfile):
with open(outputfile, "wb") as f:
w = csv.writer(f)
header = ("Map Document", "MXD Path", "DataFrame Name", "DataFrame Description", "Layer name", "Layer Datasource")
w.writerow(header)
rows = crawlmxds(folder)
w.writerows(rows)
def crawlmxds(folder):
for root, dirs, files in os.walk(folder):
for f in files:
if f.lower().endswith(".mxd"):
mxdName = os.path.splitext(f)[0]
mxdPath = os.path.join(root, f)
mxd = arcpy.mapping.MapDocument(mxdPath)
dataframes = arcpy.mapping.ListDataFrames(mxd)
for df in dataframes:
dfDesc = df.description if df.description != "" else "None"
layers = arcpy.mapping.ListLayers(mxd, "", df)
for lyr in layers:
lyrName = lyr.name
lyrDatasource = lyr.dataSource if lyr.supports("dataSource") else "N/A"
seq = (mxdName, mxdPath, df.name, dfDesc, lyrName, lyrDatasource);
yield seq
del mxd
if __name__ == "__main__":
folderPath = r"C:\temp1\test" # or arcpy.GetParameterAsText(0)
output = r"c:\temp1\mxdcrawler.csv" # or arcpy.GetParameterAsText(1)
main(folderPath, output)
|
{"/osgeo.py": ["/gdal.py"]}
|
40,174
|
onkelrow/First
|
refs/heads/master
|
/etrs89.py
|
# D A T E N E X P O R T D O B E T R S 8 9
#
# Autor: Barbara Herwig
# Erstellt am: 15.08.2018
# Beschreibung:
# Export DOB-Daten von ITZGS2 in ETRS89
#
# letzte Aenderungen am: 03.09.2018
# letzte Aenderungen von: F.Wagner
# letzte Aenderung: Auslagern der Eingangsparamter AOI und ZIELVERZEICHNIS zum externen Aufruf ueber Batch-Skript, Kombination DOP und DOP_SW
#
# Bsiepielaufruf: Python <Path>SRRM_ETRS89_DOP_DOPSW.py E:\TEMP\aoi.shp F:\gis_data
#
#######################################################################################################################
import sys, os, arcpy, time, shutil
from arcpy import env
from arcpy.sa import *
from shutil import copyfile
arcpy.env.overwriteOutput = True
# in eine LogDatei schreiben und auf der CommandLine ausgeben
#######################################################################################################################
def logIt(myString, myLogPathFile):
try:
myLogFile = open(myLogPathFile, "a")
except IOError: # EA-Fehler
print 'Konnte Datei ' + myLogPathFile + ' nicht oeffnen'
else:
myLogFile.write(myString + "\n")
myLogFile.close
def myLogPrint(myString, myLogPathFile):
print myString
logIt(myString, myLogPathFile)
# Skript Input Parameter
#######################################################################################################################
# Toolparamater
# Auschneide-geometrie
# AOI = r'D:\DATENABGABE\2018_08_31_Test_Gis_Data_Export\extent.shp'
AOI = sys.argv[1] # Beispiel E:\TEMP\aoi.shp
# Ablageordner
# ZIELVERZEICHNIS = r'D:\DATENABGABE\2018_08_31_Test_Gis_Data_Export'
ZIELVERZEICHNIS = sys.argv[2] # Beispiel F:\gis_data
# LogDatei: Wird eine Ebene hoeher als Export-Daten ausgegeben
myLogDatei = os.path.abspath(os.path.join(os.path.dirname(ZIELVERZEICHNIS), '.')) + "\\__loggmich__" + time.strftime(
"%Y_%m_%d__%H_%M_%S", time.localtime()) + ".txt"
# lokale Variablen...
#######################################################################################################################
myLogPrint("\t Basis Pfade", myLogDatei)
myLogPrint("\t\t Output-Wurzelverzeichnis ist: \t\t" + ZIELVERZEICHNIS, myLogDatei)
# Blattschnitt Kilometerquadrant (Orthophoto)
# temporaere Ablage bis UIS-Update
BS_DOP = r'E:\WIBAS_2018_11\_TOOLS\ErsatzArcView\Daten\Joachim_Nov_2018.gdb\UIS_0100000017200002'
# Blattschnitt fuer AOI
BS_DOB_AOI = ZIELVERZEICHNIS + "\\Dateneingang_ETRS89.gdb\Blattschnitt_Orthobilder"
# DOB auf ITZGS2
ORTHO_ITZGS2 = r'\\itzgs2\gis_data_Auslieferung_ETRS89_November_2018\rips\images\dop'
ORTHO_SW = r'\\itzgs2\gis_data_Auslieferung_ETRS89_November_2018\rips\images\dop_sw'
# Original-Image Catalog landesweit
# Lokale Kopie mit angpassten Pfaden
IMAGE_CATALOG_ORIG = r'E:\WIBAS_2018_11\_TOOLS\ErsatzArcView\Daten\gc_dobco_jpg.dbf'
# IMAGE_CATALOG_ORIG = r'\\itzgs2\gis_data_Auslieferung_ETRS89_November_2018\rips\images\catalog\gc_dobco_jpg.dbf'
# Image Catalog fuer AOI
IMAGE_CATALOG_AOI = ZIELVERZEICHNIS + "\\dop\_ic_dobco_jpg.dbf"
# Ergebnisse...
#######################################################################################################################
OUTPUT = ZIELVERZEICHNIS + "\\dop"
OUTPUT_SW = ZIELVERZEICHNIS + "\\dop_sw"
# Steuerung fuer den Ablauf...
#######################################################################################################################
myLogPrint("\n\n", myLogDatei)
myLogPrint("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++", myLogDatei)
myLogPrint(" Skript zur Ausgabe AAA-Daten fuer SSRM startet: " + time.strftime("%Y_%m_%d__%H_%M_%S", time.localtime()),
myLogDatei)
myLogPrint("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++", myLogDatei)
myLogPrint("\n", myLogDatei)
myLogPrint("\n\t Steuerung fuer den Ablauf wird initialisiert...", myLogDatei)
doDateneingang = 1 # Pruefen ob FGDB fuer Ausgabe vorhanden ist
doInputCheck = 1 # checken ob alle benoetigten Datensaetze im Inputverzeichnis gefunden werden
doBS = 1 # BS ausschneiden
doAuswahl = 1 # Auswahl nach AOI
doAuswahl_SW = 1 # Auswahl DOP_SW
doIMAGECATALOG = 1 # ImageCatalog erstellen
# Pruefen ob in Zielverzeichnis FGDB Dateneingang vorhanden ist. Falls nicht wird es angelegt
#######################################################################################################################
myLogPrint("\n\n", myLogDatei)
myLogPrint("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++", myLogDatei)
myLogPrint(" Pruefen ob Zielverzeichnis zur Ablage der Ergebnisse vorhanden ist", myLogDatei)
myLogPrint("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++", myLogDatei)
if doDateneingang == 1:
if arcpy.Exists(OUTPUT):
myLogPrint(" Ordner fuer Ouput ist vorhanden", myLogDatei)
else:
myLogPrint(" Ordner fuer Ouput und wird angelegt", myLogDatei)
os.makedirs(OUTPUT)
else:
myLogPrint("\t uebersprungen", myLogDatei)
# Inputdatensaetze suchen
#######################################################################################################################
myLogPrint("\n\n", myLogDatei)
myLogPrint("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++", myLogDatei)
myLogPrint(" Input-Datenquellen werden gesucht", myLogDatei)
myLogPrint("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++", myLogDatei)
if doInputCheck == 1:
tmp = 1
# Blattschnitt Kilometerquadrant (Orthophoto)
myLogPrint("\t\t BS_DOB...", myLogDatei)
if arcpy.Exists(BS_DOP):
myLogPrint("\t\t vorhanden", myLogDatei)
else:
myLogPrint("\t\t nicht vorhanden " + BS_DOB, myLogDatei)
tmp = 0
# Image Catalog
myLogPrint("\t\t Image Catalog...", myLogDatei)
if arcpy.Exists(IMAGE_CATALOG_ORIG):
myLogPrint("\t\t vorhanden", myLogDatei)
else:
myLogPrint("\t\t nicht vorhanden " + IMAGE_CATALOG_ORIG, myLogDatei)
tmp = 0
if tmp == 0:
myLogPrint("\n\n ---> Mindestens ein Datensatz wurde nicht gefunden.", myLogDatei)
tmp = raw_input(" fortfahren? (j/n): ")
if tmp != "j":
sys.exit()
else:
myLogPrint("\n\n Inputdatensatzpruefung ok.", myLogDatei)
else:
myLogPrint("\t uebersprungen", myLogDatei)
# FGDB erstellen
arcpy.CreateFileGDB_management(ZIELVERZEICHNIS, "Dateneingang_ETRS89.gdb", "CURRENT")
# BS
#######################################################################################################################
myLogPrint("\n\n", myLogDatei)
myLogPrint("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++", myLogDatei)
myLogPrint(" BS ausschneiden", myLogDatei)
myLogPrint("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++", myLogDatei)
if doBS == 1:
# Blattschnitt Kilometerquadrant (Orthophoto)
if arcpy.Exists("LAYER"):
arcpy.gp.delete("LAYER")
arcpy.MakeFeatureLayer_management(BS_DOP, "LAYER")
arcpy.SelectLayerByLocation_management("LAYER", "INTERSECT", AOI)
arcpy.CopyFeatures_management("LAYER", BS_DOB_AOI, "", "0", "0", "0")
arcpy.SelectLayerByAttribute_management("LAYER", "CLEAR_SELECTION", "")
else:
myLogPrint("\t uebersprungen", myLogDatei)
# --> Select nur fuer ausgewaehlte Themen
#######################################################################################################################
myLogPrint("\n\n", myLogDatei)
myLogPrint("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++", myLogDatei)
myLogPrint(" Auswahl der Dateien nach AOI", myLogDatei)
arcpy.AddMessage("Auswahl der Dateien nach AOI")
myLogPrint("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++", myLogDatei)
if doAuswahl == 1:
fields = ['OBJECT_ID', 'KB']
with arcpy.da.SearchCursor(BS_DOB_AOI, fields) as cursor:
for row in cursor:
ORDNER = row[1]
BS_NR = (str(row[0])[0:-2])
BS_NR_01 = (str(row[0])[:-6])
BS_NR_02 = (str(row[0])[5:-2])
if ORDNER <> None:
if os.path.exists(OUTPUT + "\\" + ORDNER):
print "Gibt es bereits"
else:
os.makedirs(OUTPUT + "\\" + ORDNER)
ORIGINAL_JPG = ORTHO_ITZGS2 + "\\" + ORDNER + "\\FDOP20_" + BS_NR_01 + "_" + BS_NR_02 + "_rgbi.jpg"
NEU_JPG = OUTPUT + "\\" + ORDNER + "\\FDOP20_" + BS_NR_01 + "_" + BS_NR_02 + "_rgbi.jpg"
if os.path.exists(ORIGINAL_JPG):
copyfile(ORIGINAL_JPG, NEU_JPG)
else:
print " Fehler bei " + str(BS_NR)
ORIGINAL_JGW = ORTHO_ITZGS2 + "\\" + ORDNER + "\\FDOP20_" + BS_NR_01 + "_" + BS_NR_02 + "_rgbi.jgw"
NEU_JGW = OUTPUT + "\\" + ORDNER + "\\FDOP20_" + BS_NR_01 + "_" + BS_NR_02 + "_rgbi.jgw"
if os.path.exists(ORIGINAL_JGW):
copyfile(ORIGINAL_JGW, NEU_JGW)
else:
print " Fehler bei " + str(BS_NR)
else:
myLogPrint("\t uebersprungen", myLogDatei)
myLogPrint("\t ...beendet", myLogDatei)
# DOP_SW ausschneiden
if doAuswahl_SW == 1:
fields = ['OBJECT_ID', 'KB']
with arcpy.da.SearchCursor(BS_DOB_AOI, fields) as cursor:
for row in cursor:
ORDNER_SW = row[1]
BS_NR = (str(row[0])[0:-2])
BS_NR_01 = (str(row[0])[:-6])
BS_NR_02 = (str(row[0])[5:-2])
if ORDNER_SW <> None:
if os.path.exists(OUTPUT_SW + "\\" + ORDNER_SW):
print "Gibt es bereits"
else:
os.makedirs(OUTPUT_SW + "\\" + ORDNER_SW)
ORIGINAL_JPG_SW = ORTHO_SW + "\\" + ORDNER_SW + "\\FDOP20_" + BS_NR_01 + "_" + BS_NR_02 + "_rgbi.jpg"
NEU_JPG_SW = OUTPUT_SW + "\\" + ORDNER_SW + "\\FDOP20_" + BS_NR_01 + "_" + BS_NR_02 + "_rgbi.jpg"
if os.path.exists(ORIGINAL_JPG_SW):
copyfile(ORIGINAL_JPG_SW, NEU_JPG_SW)
else:
print " Fehler bei " + str(BS_NR)
ORIGINAL_JGW_SW = ORTHO_SW + "\\" + ORDNER_SW + "\\FDOP20_" + BS_NR_01 + "_" + BS_NR_02 + "_rgbi.jgw"
NEU_JGW_SW = OUTPUT_SW + "\\" + ORDNER_SW + "\\FDOP20_" + BS_NR_01 + "_" + BS_NR_02 + "_rgbi.jgw"
if os.path.exists(ORIGINAL_JGW_SW):
copyfile(ORIGINAL_JGW_SW, NEU_JGW_SW)
else:
print " Fehler bei " + str(BS_NR)
else:
myLogPrint("\t uebersprungen", myLogDatei)
myLogPrint("\t ...beendet", myLogDatei)
#######################################################################################################################
myLogPrint("\n\n", myLogDatei)
myLogPrint("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++", myLogDatei)
myLogPrint(" Image Catalog erstellen", myLogDatei)
arcpy.AddMessage("Image Catalog erstellen")
myLogPrint("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++", myLogDatei)
if doIMAGECATALOG == 1:
# Kopieren des landesweiten Image Catalogs
arcpy.CopyRows_management(IMAGE_CATALOG_ORIG, IMAGE_CATALOG_AOI)
arcpy.AddField_management(IMAGE_CATALOG_AOI, "TMP", "SHORT", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
arcpy.CalculateField_management(IMAGE_CATALOG_AOI, "TMP", "0", "PYTHON_9.3", "")
fields01 = ['OBJECT_ID', 'KB']
with arcpy.da.SearchCursor(BS_DOB_AOI, fields01) as cursor01:
for row01 in cursor01:
ORDNER = row01[1]
BS_NR = (str(row01[0])[0:-2])
BS_NR_01 = (str(row01[0])[:-6])
BS_NR_02 = (str(row01[0])[5:-2])
if ORDNER <> None:
PATH_ORTHO = ORDNER + "\\FDOP20_" + BS_NR_01 + "_" + BS_NR_02 + "_rgbi.jpg"
fields02 = ['IMAGE', 'TMP']
with arcpy.da.UpdateCursor(IMAGE_CATALOG_AOI, fields02) as cursor02:
for row02 in cursor02:
if row02[0] == PATH_ORTHO:
row02[1] = 1
cursor02.updateRow(row02)
fields03 = ['IMAGE', 'TMP']
with arcpy.da.UpdateCursor(IMAGE_CATALOG_AOI, fields03) as cursor03:
for row03 in cursor03:
if row03[1] == 0:
cursor03.deleteRow()
arcpy.DeleteField_management(IMAGE_CATALOG_AOI, "TMP")
else:
myLogPrint("\t uebersprungen", myLogDatei)
myLogPrint("\t ...beendet", myLogDatei)
#######################################################################################################################
myLogPrint("\n\n", myLogDatei)
myLogPrint("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++", myLogDatei)
myLogPrint(" Skript beendet: " + time.strftime("%Y_%m_%d__%H_%M_%S", time.localtime()), myLogDatei)
myLogPrint("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++", myLogDatei)
|
{"/osgeo.py": ["/gdal.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.