index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
45,577 | torresdaniel11/innova6_backend | refs/heads/master | /gti/serializers.py | from rest_framework import serializers
from .models import Articles
from .models import Category
from .models import ConversationLevels
from .models import Conversations
from .models import QuestionArticles
from .models import QuestionRecords
from .models import Questions
class ArticlesSerializers(serializers.ModelSerializer):
class Meta:
model = Articles
fields = (
'id', 'article_tittle', 'article_content', 'article_slug', 'article_create_date', 'article_update_date')
class ConversationLevelsSerializer(serializers.ModelSerializer):
class Meta:
model = ConversationLevels
fields = ('id', 'conversation_level_name', 'conversation_color')
class ConversationsSerializers(serializers.HyperlinkedModelSerializer):
conversation_conversation_level = ConversationLevelsSerializer(many=False, read_only=True, required=False)
class Meta:
model = Conversations
fields = (
'id', 'conversation_token', 'conversation_name', 'conversation_email', 'conversation_platform',
'conversation_faculty', 'conversation_create_date', 'conversation_update_date',
'conversation_conversation_level')
class CategorySerializers(serializers.HyperlinkedModelSerializer):
class Meta:
model = Category
fields = ('id', 'category_name')
class QuestionsSerializers(serializers.HyperlinkedModelSerializer):
question_conversation_level = ConversationLevelsSerializer(many=False, read_only=True)
question_category = CategorySerializers(many=False, read_only=True)
class Meta:
model = Questions
fields = (
'id', 'question_name', 'question_description', 'question_keywords', 'question_conversation_level',
'question_category', 'question_replace', 'question_update', 'question_field_update')
class QuestionArticlesSerializers(serializers.HyperlinkedModelSerializer):
question_article_question = QuestionsSerializers(many=False)
question_article_category = CategorySerializers(many=False)
class Meta:
model = QuestionArticles
fields = (
'id', 'question_article_name', 'question_article_description', 'question_article_keywords',
'question_article_question', 'question_article_category')
class QuestionRecordsSerializers(serializers.HyperlinkedModelSerializer):
question_record_conversation = ConversationsSerializers(many=False)
question_record_question = QuestionsSerializers(many=False)
class Meta:
model = QuestionRecords
fields = (
'id', 'question_record_response', 'question_record_conversation', 'question_record_question',
'question_record_token', 'question_record_create_date')
| {"/gti/views.py": ["/gti/models.py"], "/gti/admin.py": ["/gti/models.py"], "/gti/serializers.py": ["/gti/models.py"]} |
45,578 | torresdaniel11/innova6_backend | refs/heads/master | /gti/migrations/0004_auto_20180330_0357.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-03-30 03:57
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('gti', '0003_auto_20180327_0528'),
]
operations = [
migrations.RenameField(
model_name='questionrecords',
old_name='question_response',
new_name='question_record_response',
),
migrations.RemoveField(
model_name='questionarticles',
name='question_article_article',
),
migrations.AddField(
model_name='questionrecords',
name='question_record_question',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='gti.Questions'),
),
migrations.AddField(
model_name='questionrecords',
name='question_record_token',
field=models.CharField(blank=True, editable=False, max_length=200, null=True),
),
migrations.AlterField(
model_name='conversations',
name='conversation_conversation_level',
field=models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='gti.ConversationLevels'),
),
migrations.AlterField(
model_name='conversations',
name='conversation_email',
field=models.CharField(blank=True, max_length=200),
),
migrations.AlterField(
model_name='conversations',
name='conversation_faculty',
field=models.CharField(blank=True, max_length=200),
),
migrations.AlterField(
model_name='conversations',
name='conversation_name',
field=models.CharField(blank=True, max_length=200),
),
migrations.AlterField(
model_name='conversations',
name='conversation_platform',
field=models.CharField(blank=True, max_length=200),
),
migrations.AlterField(
model_name='questionrecords',
name='question_record_conversation',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='gti.Conversations'),
),
]
| {"/gti/views.py": ["/gti/models.py"], "/gti/admin.py": ["/gti/models.py"], "/gti/serializers.py": ["/gti/models.py"]} |
45,579 | torresdaniel11/innova6_backend | refs/heads/master | /gti/apps.py | from __future__ import unicode_literals
from django.apps import AppConfig
class GtiConfig(AppConfig):
name = 'gti'
| {"/gti/views.py": ["/gti/models.py"], "/gti/admin.py": ["/gti/models.py"], "/gti/serializers.py": ["/gti/models.py"]} |
45,580 | torresdaniel11/innova6_backend | refs/heads/master | /gti/migrations/0003_auto_20180327_0528.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-03-27 05:28
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('gti', '0002_auto_20180325_1056'),
]
operations = [
migrations.CreateModel(
name='ConversationLevels',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('conversation_level_name', models.CharField(max_length=200)),
('conversation_color', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Conversations',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('conversation_token', models.CharField(editable=False, max_length=200)),
('conversation_name', models.CharField(max_length=200)),
('conversation_email', models.CharField(max_length=200)),
('conversation_platform', models.CharField(max_length=200)),
('conversation_faculty', models.CharField(max_length=200)),
('conversation_create_date', models.DateTimeField(auto_now_add=True, null=True)),
('conversation_update_date', models.DateTimeField(auto_now=True, null=True)),
('conversation_conversation_level', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='gti.ConversationLevels')),
],
),
migrations.CreateModel(
name='QuestionArticles',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_article_name', models.CharField(max_length=200)),
('question_article_description', models.TextField()),
('question_article_keywords', models.TextField()),
('question_article_article', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='gti.Articles')),
],
),
migrations.CreateModel(
name='QuestionRecords',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_record_create_date', models.DateTimeField(auto_now_add=True, null=True)),
('question_response', models.TextField()),
],
),
migrations.CreateModel(
name='Questions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_name', models.CharField(max_length=200)),
('question_description', models.TextField()),
('question_keywords', models.TextField()),
('question_conversation_level', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='gti.ConversationLevels')),
],
),
migrations.AddField(
model_name='questionrecords',
name='question_record_conversation',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='gti.Questions'),
),
migrations.AddField(
model_name='questionarticles',
name='question_article_question',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='gti.Questions'),
),
]
| {"/gti/views.py": ["/gti/models.py"], "/gti/admin.py": ["/gti/models.py"], "/gti/serializers.py": ["/gti/models.py"]} |
45,581 | torresdaniel11/innova6_backend | refs/heads/master | /gti/migrations/0008_questions_question_replace.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-04-01 19:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gti', '0007_auto_20180330_0622'),
]
operations = [
migrations.AddField(
model_name='questions',
name='question_replace',
field=models.BooleanField(default=False),
),
]
| {"/gti/views.py": ["/gti/models.py"], "/gti/admin.py": ["/gti/models.py"], "/gti/serializers.py": ["/gti/models.py"]} |
45,582 | ActiveLoiterer/CarreRouge | refs/heads/master | /Modele_carreRouge.py | import time
import random
import math
import threading
class Jeu:
def __init__(self):
self.nomJoueur = "Anonymous"
self.limiteX = 500
self.limiteY = 500
self.joueur = Pion(self,self.limiteX/2 - 20,self.limiteY/2 - 20,self.limiteX/2 + 20,self.limiteY/2 + 20)
self.listeCarreBleu = []
#coords carre 1 100,100,160,160
#coords carre 2 500,85,560,135
#coords carre 3 85,570,105,630
#coords carre 4 475,580,575,600
#Set method note: Il faut garder les quatre prochaines lignes sinon les objets carres ne sont pas crees
self.carrebleu1 = CarreBleu(self,100,100,160,160,math.pi/4)
self.carrebleu2 = CarreBleu(self,300,85,360,135,math.pi/4*3)
self.carrebleu3 = CarreBleu(self,85,350,115,410,math.pi/4*7)
self.carrebleu4 = CarreBleu(self,355,340,455,360,math.pi/4*5)
self.listeCarreBleu.append(self.carrebleu1)
self.listeCarreBleu.append(self.carrebleu2)
self.listeCarreBleu.append(self.carrebleu3)
self.listeCarreBleu.append(self.carrebleu4)
#Random method
self.initCarreBleuRandom(self.listeCarreBleu)
self.listeNom = self.lireHighscore() #pour les highscore
self.tempsDepart = None # pour le temps ou commence la partie
self.tempsFinal = None #total du temps du joueur
self.var = 0
def setNom(self,nom):
self.nomJoueur = nom;
def startTimer(self):
self.tempsDepart = time.time()
def calculTempsTotal(self):
tempsFin = time.time()
self.tempsFinal = tempsFin - self.tempsDepart
def getTemps(self):
return self.tempsFinal
def getListeNom(self):
return self.listeNom
def lireHighscore(self):
highscoreFile = open( "highscore.txt", "r" )
listeNoms = []
for line in highscoreFile:
listeNoms.append(line.splitlines())
highscoreFile.close()
self.listeNom = listeNoms
return listeNoms # pour la premiere lecture du fichier quand on init le jeu
def ecrireHighscore(self):
highscoreFile = open("Highscore.txt", "a")
toWrite = str("{:10.2f}".format(self.tempsFinal)+"\n")
#print(self.nomJoueur)
highscoreFile.write(self.nomJoueur + " " + toWrite )
highscoreFile.close()
def updateCarreBleu(self):
for i in self.listeCarreBleu:
i.changePos()
i.collisionAvecMur(0,self.limiteX,0,self.limiteY)
self.incremVitesse()
def updateJeu(self):
self.calculTempsTotal()
self.updateCarreBleu()
self.checkRedSqCollision()
def incremVitesse(self):
for i in self.listeCarreBleu:
i.vitesse += 0.1
self.var += 1
def checkRedSqCollision(self):
for i in self.listeCarreBleu:
#Check sur l'axe des x en permier, pour le premier point du carre
if self.joueur.posX1 >= i.posX1 and self.joueur.posX1 <= i.posX2:
#Ensuite l'axe des y
if self.joueur.posY1 >= i.posY1 and self.joueur.posY1 <= i.posY2:
self.joueur.dead = True
if self.joueur.posY2 <= i.posY2 and self.joueur.posY2 >= i.posY1:
self.joueur.dead = True
#Ensuite check l'axe des x pour le deuxieme point du carre
if self.joueur.posX2 >= i.posX1 and self.joueur.posX2 <= i.posX2:
if self.joueur.posY1 >= i.posY1 and self.joueur.posY1 <= i.posY2:
self.joueur.dead = True
if self.joueur.posY2 <= i.posY2 and self.joueur.posY2 >= i.posY1:
self.joueur.dead = True
self.joueur.isOutOfBounds(30,self.limiteX - 30,30,self.limiteY - 30)
def initCarreBleuRandom(self, listeCarre):
#Area = forme 2d (carré dans notre cas) ou un carré bleu peut apparaitre
#Area 1 = (0,0) to (limiteX/2,limiteY/2)
#Area 2 = (limiteX/2,0) to (limiteX,limiteY/2)
#Area 3 = (0,limiteY/2) to (limiteX/2, limiteY)
#Area 4 = (limiteX/2,limiteY/2), (limiteX, limiteY)
#Il faut aussi prendre en compte la bordure et le carré du milieu
#Premier carré peut apparaitre dans Area 1
listeCarre[0].posX1 = random.randrange(30,self.limiteX/2-130)
listeCarre[0].posX2 = random.randrange(listeCarre[0].posX1+10,listeCarre[0].posX1+100)
listeCarre[0].posY1 = random.randrange(30,self.limiteY/2-130)
listeCarre[0].posY2 = random.randrange(listeCarre[0].posY1+10,listeCarre[0].posY1+100)
#Deuxième carré peut apparaitre dans Area 2
listeCarre[1].posX1 = random.randrange(self.limiteX/2+30,self.limiteX-110)
listeCarre[1].posX2 = random.randrange(listeCarre[1].posX1+10,listeCarre[1].posX1+100)
listeCarre[1].posY1 = random.randrange(30,self.limiteY/2-130)
listeCarre[1].posY2 = random.randrange(listeCarre[1].posY1+10,listeCarre[1].posY1+100)
#Troisième carré peut apparaitre dans area 3
listeCarre[2].posX1 = random.randrange(30,self.limiteX/2-130)
listeCarre[2].posX2 = random.randrange(listeCarre[2].posX1+10,listeCarre[2].posX1+100)
listeCarre[2].posY1 = random.randrange(self.limiteY/2+30, self.limiteY-130)
listeCarre[2].posY2 = random.randrange(listeCarre[2].posY1+10,listeCarre[2].posY1+100)
#Quatrième carré peut apparaitre dans Area 4
listeCarre[3].posX1 = random.randrange(self.limiteX/2+30,self.limiteX-130)
listeCarre[3].posX2 = random.randrange(listeCarre[3].posX1+10,listeCarre[3].posX1+100)
listeCarre[3].posY1 = random.randrange(self.limiteY/2+30, self.limiteY-130)
listeCarre[3].posY2 = random.randrange(listeCarre[3].posY1+10,listeCarre[3].posY1+100)
class Pion:
def __init__(self,parent,x1,y1,x2,y2):
self.posX1=x1
self.posY1=y1
self.posX2=x2
self.posY2=y2
self.dead = False
def changePos(self,x,y):
self.posX1=x
self.posY1=y
self.posX2=x+40
self.posY2=y+40
def isDead(self):
if self.dead == False:
return False
else:
return True
def isOutOfBounds(self, gauche, droite, haut, bas ):
if self.posX1 <= gauche: #collision avec la bordure vers la gauche
self.dead = True
elif self.posX2 >= droite: #collision avec la bordure vers la droite
self.dead = True
elif self.posY1 <= haut: #collision avec la bordure vers le haut
self.dead = True
elif self.posY2 >= bas: #collision avec la bordure vers le bas
self.dead = True
class CarreBleu:
def __init__(self,parent,x1,y1,x2,y2, angle):
self.posX1=x1
self.posY1=y1
self.posX2=x2
self.posY2=y2
self.vitesse = 5
self.angleCourant = angle
def changePos(self):
self.posX1=(math.cos(self.angleCourant)*self.vitesse)+self.posX1
self.posY1=(math.sin(self.angleCourant)*self.vitesse)+self.posY1
self.posX2=(math.cos(self.angleCourant)*self.vitesse)+self.posX2
self.posY2=(math.sin(self.angleCourant)*self.vitesse)+self.posY2
def collisionAvecMur(self, gauche, droite, haut, bas ):
#NON-RANDOM METHOD
"""if self.posX1 <= gauche:
if self.angleCourant < math.pi: #collision avec la bordure vers la gauche
self.angleCourant = math.pi/4
else:
self.angleCourant = math.pi/4 * 7
elif self.posX2 >= droite:
if self.angleCourant < math.pi: #collision avec la bordure vers la droite
self.angleCourant = math.pi/4 * 3
else:
self.angleCourant = math.pi/4 * 5
elif self.posY1 <= haut: #collision avec la bordure vers le haut
if self.angleCourant > math.pi/2:
self.angleCourant = math.pi/4 * 3
else:
self.angleCourant = math.pi/4 * 7
elif self.posY2 >= bas:
if self.angleCourant > math.pi*1.5: #collision avec la bordure vers le bas
self.angleCourant = math.pi/4 * 5
else:
self.angleCourant = math.pi/4 * 7"""
#RANDOM METHOD
if self.posX1 <= gauche:
self.angleCourant = random.uniform(math.pi*1.5,math.pi*2.5) #random.uniform pour accepter les float
elif self.posX2 >= droite:
self.angleCourant = random.uniform(math.pi/2,math.pi*1.5)
elif self.posY1 <= haut:
self.angleCourant = random.uniform(0,math.pi)
elif self.posY2 >= bas:
self.angleCourant = random.uniform(math.pi,math.pi*2)
| {"/Controlleur_carreRouge.py": ["/Vue_carreRouge.py", "/Modele_carreRouge.py"]} |
45,583 | ActiveLoiterer/CarreRouge | refs/heads/master | /Vue_carreRouge.py | from tkinter import *
from tkinter import messagebox
import os
class Vue:
def __init__(self,parent,controlleur):
self.parent = parent
self.root = Tk()
self.root.title('CarreRouge')
self.controlleur = controlleur
self.canvasWidth = self.parent.limiteX
self.canvasHeight = self.parent.limiteY
self.canvasPrincipal = Canvas(self.root,width=self.canvasWidth,height=self.canvasHeight,bg="black")
self.drawMainMenu()
self.listeNom = []
self.pret = -1
self.cliqueSurPion = False
self.nomJoueur = ""
self.highscoreOuvert = False
self.textEntry = Entry(self.canvasPrincipal)
self.canvasPrincipal.bind("<Button-1>",self.click)
self.canvasPrincipal.bind("<ButtonRelease>",self.relacheClick)
self.canvasPrincipal.bind("<Motion>",self.mouseMotion)
def click(self,event):
lestags=self.canvasPrincipal.gettags("current")
if "pion" in lestags:
self.cliqueSurPion = True
self.pret = 1
if(not self.controlleur.jeu.tempsDepart):
self.controlleur.jeu.startTimer()
self.controlleur.gameLoop()
def mouseMotion(self,event):
if self.cliqueSurPion and self.controlleur.jeu :
self.controlleur.jeu.joueur.changePos(event.x,event.y)
def relacheClick(self,event):
self.cliqueSurPion = False
def actionBoutonPlay(self):
self.getNomJoueur()
def actionBoutonQuitter(self):
self.canvasPrincipal.delete("all")
os._exit(0)
def actionBoutonFermerHighscore(self):
self.canvasPrincipal.delete('highscore')
self.highscoreOuvert = False
self.drawMainMenu()
def getNomJoueur(self):
self.textEntry = Entry(self.canvasPrincipal)
b = Button(self.canvasPrincipal,text='choisir un nom',command=self.boutonGetJoueur)
self.canvasPrincipal.create_window(self.canvasWidth/2,self.canvasHeight-100,window=self.textEntry,tags='choixNom')
self.canvasPrincipal.create_window(self.canvasWidth/2,self.canvasHeight-150,window=b,tags='choixNom')
self.textEntry.focus_set()
def boutonGetJoueur(self):
self.drawSurfaceJeu()
self.controlleur.refairePartie()
if (self.textEntry.get() is not ""):
self.controlleur.jeu.setNom(self.textEntry.get())
self.drawPions()
def drawListeNomHighscore(self):
self.canvasPrincipal.delete('Menu')
boutonFermerHS = Button(self.root,text="fermer highscore!",width=20,height=2, command= lambda: self.actionBoutonFermerHighscore())
scrollbar = Scrollbar(self.root)
listbox = Listbox(self.root,yscrollcommand=scrollbar.set)
for item in self.listeNom:
listbox.insert(END,item)
scrollbar.config(command=listbox.yview)
self.canvasPrincipal.create_window(self.canvasWidth/2,300,window=listbox,tags='highscore')
self.canvasPrincipal.create_window(350,300,window=scrollbar,height=160,tags='highscore')
self.canvasPrincipal.create_window(self.canvasWidth/2,450,window=boutonFermerHS,tags='highscore')
def actionBoutonHighscore(self):
if(not self.highscoreOuvert):
self.controlleur.jeu.lireHighscore()
self.listeNom = self.controlleur.jeu.getListeNom()
self.drawListeNomHighscore()
self.highscoreOuvert = True
def drawMainMenu(self):
self.canvasPrincipal.delete('all')
self.boutonPlay = Button(self.root,text="Jouer",width=20,height=5, command= lambda: self.actionBoutonPlay())
self.boutonQuit = Button(self.root,text="Quitter",width=20,height=5, command= lambda: self.actionBoutonQuitter())
self.boutonHighscore = Button(self.root,text="Highscore",width=20,height=5, command= lambda: self.actionBoutonHighscore())
self.canvasPrincipal.create_window(self.canvasWidth/2,self.canvasHeight-(self.canvasHeight-70),window=self.boutonPlay,tags='Menu')
self.canvasPrincipal.create_window(self.canvasWidth/2,self.canvasHeight-(self.canvasHeight-270),window=self.boutonQuit,tags='Menu')
self.canvasPrincipal.create_window(self.canvasWidth/2,self.canvasHeight-(self.canvasHeight-170),window=self.boutonHighscore,tags='Menu')
self.canvasPrincipal.pack()
def drawSurfaceJeu(self):
self.canvasPrincipal.delete('all')
self.canvasPrincipal.create_rectangle(30,30,self.canvasWidth-30,self.canvasHeight-30,fill="white",tags='jeu')
def drawTemps(self):
temps = StringVar()
temps.set(str("{:10.2f}".format(self.controlleur.jeu.getTemps())))
labelTemps = Label(self.canvasPrincipal,textvariable=temps,fg="black")
self.canvasPrincipal.create_window(self.canvasWidth/2,15,window=labelTemps,tags='jeu')
def drawPions(self):
self.canvasPrincipal.delete('pion')
self.canvasPrincipal.delete('carreBleu')
pion = self.controlleur.jeu.joueur
self.canvasPrincipal.create_rectangle(pion.posX1,pion.posY1,pion.posX2,pion.posY2,fill="red", tags=("pion"))
for i in self.controlleur.jeu.listeCarreBleu:
self.canvasPrincipal.create_rectangle(i.posX1,i.posY1,i.posX2,i.posY2,fill="blue", tags=("carreBleu"))
def updateVue(self):
self.drawTemps()
self.drawPions()
def drawDialogRejouer(self):
if(messagebox.askyesno("nouvelle partie","voulez-vous rejouer une partie?",parent=self.canvasPrincipal)):
return True
else:
return False
| {"/Controlleur_carreRouge.py": ["/Vue_carreRouge.py", "/Modele_carreRouge.py"]} |
45,584 | ActiveLoiterer/CarreRouge | refs/heads/master | /Controlleur_carreRouge.py | import Vue_carreRouge
import Modele_carreRouge
class Controlleur:
def __init__(self):
self.jeu = Modele_carreRouge.Jeu()
self.vue = Vue_carreRouge.Vue(self.jeu,self)
self.vue.root.mainloop()
def gameLoop(self):
if(self.vue.pret == 1):
if( not self.jeu.joueur.isDead()):
self.jeu.updateJeu()
self.vue.updateVue()
self.vue.root.after(50,self.gameLoop)
else:
self.vue.drawMainMenu()
self.jeu.ecrireHighscore()
self.vue.pret = 0
def refairePartie(self):
self.jeu = None
self.jeu = Modele_carreRouge.Jeu()
if __name__ == '__main__':
c=Controlleur() | {"/Controlleur_carreRouge.py": ["/Vue_carreRouge.py", "/Modele_carreRouge.py"]} |
45,585 | AloneGu/flask-web-car-detector | refs/heads/master | /config.py | #!/usr/bin/env python
# encoding: utf-8
"""
@author: Jackling Gu
@file: config.py
@time: 16-10-28 09:54
"""
UPLOAD_FOLDER = '/tmp/uploads'
RES_FOLDER = '/tmp/results'
ALLOWED_EXTENSIONS = ['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'] | {"/app.py": ["/config.py", "/car_detection.py"]} |
45,586 | AloneGu/flask-web-car-detector | refs/heads/master | /car_detection.py | #!/usr/bin/env python
# encoding: utf-8
from skimage.transform import pyramid_gaussian
from skimage.io import imread
from skimage.feature import hog
from sklearn.externals import joblib
import cv2
import os
import pandas as pd
import ConfigParser as cp
import json
from config import RES_FOLDER
class CarDetector(object):
def __init__(self, model_path="./data/models/car_LIN_SVM.model"):
config = cp.RawConfigParser()
config.read('./data/config.cfg')
self.model_path = model_path
self.clf = joblib.load(model_path)
self.image_path = None
self.test_dir = None
self.annotation_path = ""
self.min_wdw_sz = json.loads(config.get("hog", "min_wdw_sz"))
self.step_size = json.loads(config.get("hog", "step_size"))
self.orientations = config.getint("hog", "orientations")
self.pixels_per_cell = json.loads(config.get("hog", "pixels_per_cell"))
self.cells_per_block = json.loads(config.get("hog", "cells_per_block"))
self.downscale = config.getfloat("nms", "downscale")
self.visualize = config.getboolean("hog", "visualize")
self.normalize = config.getboolean("hog", "normalize")
self.threshold = config.getfloat("nms", "threshold")
self.output_dir = RES_FOLDER
def run(self, params, params_path):
self.params = params
assert type(self.params) == str
if params == 'image':
org_img, nms_img, nms_num = self._process_one_img(params_path, self.clf)
cv2.imwrite(os.path.join(self.output_dir, 'org_' + os.path.split(params_path)[1]), org_img)
cv2.imwrite(os.path.join(self.output_dir, os.path.split(params_path)[1]), nms_img)
nms_fn = os.path.split(params_path)[1]
return nms_num,nms_fn
elif params == 'dir':
self._process_dir_img(params_path, self.clf)
else:
raise Exception("unsupported parameters : {}".format(params))
def _overlapping_area(self, detection_1, detection_2):
'''
Function to calculate overlapping area'si
`detection_1` and `detection_2` are 2 detections whose area
of overlap needs to be found out.
Each detection is list in the format ->
[x-top-left, y-top-left, confidence-of-detections, width-of-detection, height-of-detection]
The function returns a value between 0 and 1,
which represents the area of overlap.
0 is no overlap and 1 is complete overlap.
Area calculated from ->
http://math.stackexchange.com/questions/99565/simplest-way-to-calculate-the-intersect-area-of-two-rectangles
'''
# Calculate the x-y co-ordinates of the
# rectangles
x1_tl = detection_1[0]
x2_tl = detection_2[0]
x1_br = detection_1[0] + detection_1[3]
x2_br = detection_2[0] + detection_2[3]
y1_tl = detection_1[1]
y2_tl = detection_2[1]
y1_br = detection_1[1] + detection_1[4]
y2_br = detection_2[1] + detection_2[4]
# Calculate the overlapping Area
x_overlap = max(0, min(x1_br, x2_br) - max(x1_tl, x2_tl))
y_overlap = max(0, min(y1_br, y2_br) - max(y1_tl, y2_tl))
overlap_area = x_overlap * y_overlap
area_1 = detection_1[3] * detection_2[4]
area_2 = detection_2[3] * detection_2[4]
total_area = area_1 + area_2 - overlap_area
return overlap_area / float(total_area)
def _nms(self, detections, threshold=.5):
'''
This function performs Non-Maxima Suppression.
`detections` consists of a list of detections.
Each detection is in the format ->
[x-top-left, y-top-left, confidence-of-detections, width-of-detection, height-of-detection]
If the area of overlap is greater than the `threshold`,
the area with the lower confidence score is removed.
The output is a list of detections.
'''
if len(detections) == 0:
return []
# Sort the detections based on confidence score
detections = sorted(detections, key=lambda detections: detections[2],
reverse=True)
# Unique detections will be appended to this list
new_detections = []
# Append the first detection
new_detections.append(detections[0])
# Remove the detection from the original list
del detections[0]
# For each detection, calculate the overlapping area
# and if area of overlap is less than the threshold set
# for the detections in `new_detections`, append the
# detection to `new_detections`.
# In either case, remove the detection from `detections` list.
for index, detection in enumerate(detections):
for new_detection in new_detections:
if self._overlapping_area(detection, new_detection) > threshold:
del detections[index]
break
else:
new_detections.append(detection)
del detections[index]
return new_detections
def _sliding_window(self, image, window_size, step_size):
'''
This function returns a patch of the input image `image` of size equal
to `window_size`. The first image returned top-left co-ordinates (0, 0)
and are increment in both x and y directions by the `step_size` supplied.
So, the input parameters are -
* `image` - Input Image
* `window_size` - Size of Sliding Window
* `step_size` - Incremented Size of Window
The function returns a tuple -
(x, y, im_window)
where
* x is the top-left x co-ordinate
* y is the top-left y co-ordinate
* im_window is the sliding window image
'''
for y in xrange(0, image.shape[0], step_size[1]):
for x in xrange(0, image.shape[1], step_size[0]):
yield (x, y, image[y:y + window_size[1], x:x + window_size[0]])
def _process_one_img(self, im_path, clf):
# Read the image
im = imread(im_path, as_grey=True)
# cv2.imshow('t',im)
# cv2.waitKey(0)
# List to store the detections
detections = []
# The current scale of the image
scale = 0
# Downscale the image and iterate
for im_scaled in pyramid_gaussian(im, downscale=self.downscale):
# This list contains detections at the current scale
cd = []
# If the width or height of the scaled image is less than
# the width or height of the window, then end the iterations.
if im_scaled.shape[0] < self.min_wdw_sz[1] or im_scaled.shape[1] < self.min_wdw_sz[0]:
break
for (x, y, im_window) in self._sliding_window(im_scaled, self.min_wdw_sz, self.step_size):
if im_window.shape[0] != self.min_wdw_sz[1] or im_window.shape[1] != self.min_wdw_sz[0]:
continue
# Calculate the HOG features
fd = hog(im_window, self.orientations, self.pixels_per_cell, self.cells_per_block, self.visualize,
self.normalize)
fd = fd.reshape(1, -1)
pred = clf.predict(fd)
if pred == 1:
# print "Detection:: Location -> ({}, {})".format(x, y)
# print "Scale -> {} | Confidence Score {} \n".format(scale, clf.decision_function(fd))
detections.append((x, y, clf.decision_function(fd),
int(self.min_wdw_sz[0] * (self.downscale ** scale)),
int(self.min_wdw_sz[1] * (self.downscale ** scale))))
cd.append(detections[-1])
# If visualize is set to true, display the working
# of the sliding window
if self.visualize:
clone = im_scaled.copy()
for x1, y1, _, _, _ in cd:
# Draw the detections at this scale
cv2.rectangle(clone, (x1, y1), (x1 + im_window.shape[1], y1 +
im_window.shape[0]), (0, 0, 0), thickness=2)
cv2.rectangle(clone, (x, y), (x + im_window.shape[1], y +
im_window.shape[0]), (255, 255, 255), thickness=2)
cv2.imshow("Sliding Window in Progress", clone)
cv2.waitKey(30)
# Move the the next scale
scale += 1
# Display the results before performing NMS
org_res = im.copy()
for (x_tl, y_tl, _, w, h) in detections:
# Draw the detections
cv2.rectangle(org_res, (x_tl, y_tl), (x_tl + w, y_tl + h), (0, 0, 0), thickness=2)
# Perform Non Maxima Suppression
detections = self._nms(detections, self.threshold)
nms_res = im.copy()
# Display the results after performing NMS
for (x_tl, y_tl, _, w, h) in detections:
# Draw the detections
cv2.rectangle(nms_res, (x_tl, y_tl), (x_tl + w, y_tl + h), (0, 0, 0), thickness=2)
# cv2.imshow("Final Detections after applying NMS", nms_res)
# cv2.waitKey(0)
return org_res, nms_res, len(detections)
def _process_dir_img(self, params_path, clf):
input_dir = params_path
# process dir
fl = os.listdir(input_dir)
test_result = pd.DataFrame(columns=['img_name', 'truth', 'predict_cnt', 'rate'])
test_result['img_name'] = pd.Series(fl)
image_current = 0
for fn in fl:
im_path = os.path.join(input_dir, fn)
print im_path
org_img, nms_img, nms_num = self._process_one_img(im_path, clf)
cv2.imwrite(os.path.join(self.output_dir, 'org_' + fn), org_img)
cv2.imwrite(os.path.join(self.output_dir, 'nms_' + fn), nms_img)
test_result.iloc[image_current, 2] = nms_num
if self.annotation_path != "":
temp = os.path.split(os.path.join(self.annotation_path, fn))
fr = open(temp[0] + '/' + temp[1].split(".")[0] + '.txt')
aonnotation = fr.read()
real_count = aonnotation.count('Original label for object')
test_result.iloc[image_current, 1] = real_count
test_result.iloc[image_current, 3] = max(0, 1 - float(abs(real_count - nms_num)) / real_count)
image_current += 1
test_result.to_csv(self.output_dir + 'test_result.csv', header=True, index=False)
| {"/app.py": ["/config.py", "/car_detection.py"]} |
45,587 | AloneGu/flask-web-car-detector | refs/heads/master | /app.py | import os
from flask import Flask, request, redirect, url_for, send_from_directory, render_template
from werkzeug import secure_filename
from config import *
from car_detection import CarDetector
global curr_file_name
curr_file_name = None
global car_worker
car_worker = CarDetector()
if not os.path.exists(UPLOAD_FOLDER):
os.makedirs(UPLOAD_FOLDER)
if not os.path.exists(RES_FOLDER):
os.makedirs(RES_FOLDER)
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['RES_FOLDER'] = RES_FOLDER
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET', 'POST'])
def upload_file():
global curr_file_name
global car_worker
if request.method == 'POST':
file = request.files['file']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
curr_file_name = filename
curr_file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
file.save(curr_file_path)
car_worker.run('image',curr_file_path) # process this image
return redirect(url_for('upload_file'))
return render_template('index.html',fn=curr_file_name)
@app.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'],
filename)
@app.route('/results/<filename>')
def processed_file(filename):
return send_from_directory(app.config['RES_FOLDER'],filename)
if __name__ == '__main__':
app.run(debug=True,host='0.0.0.0',port=5001) | {"/app.py": ["/config.py", "/car_detection.py"]} |
45,589 | Ho-a-ki/uospy | refs/heads/master | /report.py |
def get_desc():
"""
Return random weather, just like the pros
"""
from random import choice
possi = ['rain', 'snow', 'sleet', 'fog', 'sun', 'who knows?']
return choice(possi)
| {"/te2.py": ["/testing.py"]} |
45,590 | Ho-a-ki/uospy | refs/heads/master | /testing.py |
def func(a):
print("input number : ", a)
if __name__ == '__main__':
#직접 실행되었을 때.
print("direct access")
func(3)
func(4)
else:
#모듈로서 import 되었을 때.
print('import acesss') | {"/te2.py": ["/testing.py"]} |
45,591 | Ho-a-ki/uospy | refs/heads/master | /lecture4.py |
animal = 'fruitbat'
| {"/te2.py": ["/testing.py"]} |
45,592 | Ho-a-ki/uospy | refs/heads/master | /weather.py |
from source import daily, weekly
print('Daily Forecast : ', daily.forecast())
print('Weekly Forecast:')
for i, o in enumerate(weekly.forecast(), 1):
print( i, o) | {"/te2.py": ["/testing.py"]} |
45,593 | Ho-a-ki/uospy | refs/heads/master | /te2.py |
import testing
print("te2를 실행합니다.") | {"/te2.py": ["/testing.py"]} |
45,602 | neu5ron/domaininformation | refs/heads/master | /setup.py | from distutils.core import setup
import sys
import io
NAME = 'domaininformation'
VERSION = '1.0.24'
AUTHOR = 'neu5ron'
AUTHOR_EMAIL = 'therealneu5ron AT gmail DOT com'
DESCRIPTION = "Combine information about a domain in JSON format"
URL = "https://github.com/neu5ron/domaininformation"
DOWNLOAD_URL = "https://github.com/neu5ron/domaininformation/tarball/master"
LONG_DESCRIPTION = '\n\n'.join([io.open('README.md', 'r',
encoding='utf-8').read(),
io.open('CHANGES.md', 'r',
encoding='utf-8').read()])
PACKAGES = ['domaininformation']
INSTALL_REQUIRES = []
if sys.version_info >= (3,):
print 'Requires python 2.7'
sys.exit(1)
else:
INSTALL_REQUIRES.append("requests[security]")
INSTALL_REQUIRES.append("requests")
INSTALL_REQUIRES.append("dateutils")
setup(
name=NAME,
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
url=URL,
download_url=DOWNLOAD_URL,
packages=PACKAGES,
install_requires=INSTALL_REQUIRES
) | {"/domaininformation/__init__.py": ["/domaininformation/domaininformation.py"]} |
45,603 | neu5ron/domaininformation | refs/heads/master | /domaininformation/__init__.py | from .domaininformation import DomainInformation | {"/domaininformation/__init__.py": ["/domaininformation/domaininformation.py"]} |
45,604 | neu5ron/domaininformation | refs/heads/master | /domaininformation/domaininformation.py | #!/usr/bin/env python2.7
import re
try:
from . import DBConnections
from DBConnections import logging_file
except ImportError as error:
print 'Could not import function.'
print error
# Regex to verify a valid domain. Uses most of the RFC, although still allows things not necessarily in the the RFC like two or more -- ie: google--analytics.com (which is malicious) and allows for IDN domain names.
domain_regex = r'(([\da-zA-Z])([_\w-]{,62})\.){,127}(([\da-zA-Z])[_\w-]{,61})?([\da-zA-Z]\.((xn\-\-[a-zA-Z\d]+)|([a-zA-Z\d]{2,})))'
domain_regex = '{0}$'.format(domain_regex)
valid_domain_name_regex = re.compile(domain_regex, re.IGNORECASE)
class DomainInvalidLevels():
pass
class DomainInformation(object):
def __init__(self, domain_name):
"""
:param domain_name: The Domain name to gather information for
:return:
"""
self.domain_name = domain_name
try:
self.domain_name = self.domain_name.lower().strip().encode('ascii')
self.valid_encoding = True
if re.match(valid_domain_name_regex, self.domain_name):
self.IsDomain = True
else:
raise DomainInvalidLevels
except (UnicodeEncodeError, ValueError, AttributeError) as error:
self.valid_encoding = False
self.IsDomain = False
print r'{0} is not valid. It should be input as an ascii string.'.format(self.domain_name)
logging_file.error( r'{0} is not valid. It should be input as an ascii string.'.format(self.domain_name))
except DomainInvalidLevels:
self.IsDomain = False
print r'{0} is not valid. It does not match the RFC.'.format(self.domain_name)
logging_file.error( r'{0} is not valid. It does not match the RFC.'.format(self.domain_name))
@classmethod
def AlexaDatabase(self, load_in_memory=False, number_to_load_in_memory=1000000):
"""
:param load_in_memory: Bool. load the database into memory, only recommend if one script is calling this..
Not if
script will be run over and over
:param number_to_load_in_memory: Int. if loading into memory how many of the database to load
"""
self.LoadAlexaDBInMemory = load_in_memory
self.AlexaDBNumberToLoadInMemory = number_to_load_in_memory
# Download Alexa DB
DBConnections.DownloadAlexaDB()
self.AlexaDB = DBConnections.GetAlexaDB(self.LoadAlexaDBInMemory, self.AlexaDBNumberToLoadInMemory)
@classmethod
def OpenDNSDatabase(self, load_in_memory=False, number_to_load_in_memory=1000000):
"""
:param load_in_memory: Bool. load the database into memory, only recommend if one script is calling this..
Not if
script will be run over and over
:param number_to_load_in_memory: Int. if loading into memory how many of the database to load
"""
self.LoadOpenDNSDBInMemory = load_in_memory
self.OpenDNSDBNumberToLoadInMemory = number_to_load_in_memory
# Download penDNS DB
DBConnections.DownloadOpenDNSDB()
self.OpenDNSDB = DBConnections.GetOpenDNSDB(self.LoadOpenDNSDBInMemory, self.OpenDNSDBNumberToLoadInMemory)
def level_domain_info(self):
"""
Get the length and level of each domain/http host split by level(ie:'.').
>>> from domaininformation import DomainInformation
>>> from pprint import pprint
>>> pprint( DomainInformation(domain_name='www.google.com').level_domain_info() )
{'level_domain': {'1': 'com',
'length_1': 3,
'2': 'google',
'length_2': 6,
'3': 'www',
'length_3': 3,
'any': ['com', 'google', 'www'],
'any_length': [3, 6, 3],
'total': 2,
'total_length': 12}}
>>> pprint( DomainInformation(domain_name='NotADomain').level_domain_info() )
"notadomain" Is not a domain
{'level_domain': None}
"""
ld_information = {'level_domain': None}
if self.IsDomain:
domain_split = self.domain_name.split('.')
total = len(domain_split)
total_length = len(self.domain_name.replace('.', ''))
ld_information = {'level_domain': {}}
any_ld = list()
any_ld_length = list()
for ld_number, ld_value in enumerate(reversed(domain_split)):
ld_number += 1
ld_length = len(ld_value)
ld_information['level_domain'].update({'%s' % ld_number: ld_value})
ld_information['level_domain'].update({'length_%s' % ld_number: ld_length})
any_ld.append(ld_value)
any_ld_length.append(ld_length)
ld_information['level_domain'].update({'any': list(set(any_ld))})
ld_information['level_domain'].update({'any_length': list(set(any_ld_length))})
ld_information['level_domain'].update({'total_length': total_length})
ld_information['level_domain'].update({'total': total})
return ld_information
return ld_information
def get_alexa_rank(self):
"""
Get the alexa rank of the first and second level domain (ie: google.com).
Rank will be based on the max of the sixth level domain and will iterate all the way down to the
first and second level.
ie: www.google.com would match google.com in the database.
Returns:
Dictionary: {'alexa_rank': Int(AlexaRank)}
>>> from domaininformation import DomainInformation
>>> print DomainInformation(domain_name='www.google.com').get_alexa_rank()
{'alexa_rank': 1}
>>> print DomainInformation(domain_name='NotADomain').get_alexa_rank()
"notadomain" Is not a domain
{'alexa_rank': None}
"""
alexa_rank = {'alexa_rank': None}
if self.IsDomain:
level_domain = self.domain_name.split('.')[-6:]
level_domain_length = len(level_domain)
try:
if self.AlexaDB:
for n in range(level_domain_length - 1):
domain = '%s.%s' % ('.'.join(level_domain[:-1][n:]), level_domain[-1])
rank = self.AlexaDB.get(domain)
if rank:
alexa_rank['alexa_rank'] = rank
break
else:
raise AttributeError
except AttributeError:
domains = ['%s.%s' % ('.'.join(level_domain[:-1][n:]), level_domain[-1]) for n in
range(level_domain_length - 1)]
# Sometimes trouble reading file / index out of range
try:
self.AlexaDatabase()
for ranking in self.AlexaDB:
for domain in domains:
if domain == ranking[1]:
alexa_rank['alexa_rank'] = int(ranking[0])
break
except IndexError:
pass
return alexa_rank
def get_opendns_rank(self):
"""
Get the opendns rank of the first and second level domain (ie: google.com).
Rank will be based on the max of the sixth level domain and will iterate all the way down to the
first and second level.
ie: www.google.com would match google.com in the database.
Returns:
Dictionary: {'opendns_rank': Int(OpenDNSRank)}
>>> from domaininformation import DomainInformation
>>> print DomainInformation(domain_name='www.google.com').get_opendns_rank()
{'opendns_rank': 1}
>>> print DomainInformation(domain_name='NotADomain').get_opendns_rank()
"notadomain" Is not a domain
{'opendns_rank': None}
"""
opendns_rank = {'opendns_rank': None}
if self.IsDomain:
level_domain = self.domain_name.split('.')[-6:]
level_domain_length = len(level_domain)
try:
if self.OpenDNSDB:
for n in range(level_domain_length - 1):
domain = '%s.%s' % ('.'.join(level_domain[:-1][n:]), level_domain[-1])
rank = self.OpenDNSDB.get(domain)
if rank:
opendns_rank['opendns_rank'] = rank
break
else:
raise AttributeError
except AttributeError:
domains = ['%s.%s' % ('.'.join(level_domain[:-1][n:]), level_domain[-1]) for n in
range(level_domain_length - 1)]
# Sometimes trouble reading file / index out of range
try:
self.OpenDNSDatabase()
for ranking in self.OpenDNSDB:
for domain in domains:
if domain == ranking[1]:
opendns_rank['opendns_rank'] = int(ranking[0])
break
except IndexError:
pass
return opendns_rank
def is_domain(self):
return self.IsDomain | {"/domaininformation/__init__.py": ["/domaininformation/domaininformation.py"]} |
45,605 | neu5ron/domaininformation | refs/heads/master | /domaininformation/DBConnections.py | #!/usr/bin/env python2.7
from datetime import datetime
import os, sys
import csv
from StringIO import StringIO
from zipfile import ZipFile
import requests
from dateutil import parser
import logging
from logging import handlers
######################################## # Edit If Need Be
base_directory = os.path.join( os.path.expanduser("~"), "domaininformation" ) # Directory where the databases will be stored (currently home directory)
hours_to_pull_new_geoip_db = 120#TOOD:Change back # Use this variable in hours to determine how often to download and update the local databases
########################################
# Set logging
log_file = os.path.join ( '/tmp', 'domaininformation.log' )
logging_file = logging.getLogger(__name__)
logging_file.setLevel(logging.DEBUG)
logging_file_handler = handlers.RotatingFileHandler( log_file, maxBytes=5, backupCount=0 )
info_format = logging.Formatter('%(asctime)s - %(filename)s - %(levelname)s - Function: %(funcName)s - LineNumber: %(lineno)s - %(message)s')
logging_file_handler.setFormatter(info_format)
logging_file.addHandler(logging_file_handler)
def CreateDBDirectory(Directory):
if not os.path.exists(Directory):
logging_file.info('{0} does not exist. Creating it now.'.format(Directory))
try:
os.mkdir(Directory)
except OSError as error:
print 'Failed to create {0}. Due to:\n{1}'.format(Directory, error)
logging_file.error('Failed to create {0}. Due to:\n{1}'.format(Directory, error))
sys.exit(1)
# Create Base Directory
CreateDBDirectory(base_directory)
# Create Alexa Directory
Alexa_directory = os.path.join(base_directory, 'alexa')
Alexa_filename = 'top-1m.csv'# Default alexa filename
CreateDBDirectory(Alexa_directory)
# Create OpenDNS Directory
OpenDNS_directory = os.path.join( base_directory, 'opendns' )
OpenDNS_filename = 'top-1m.csv'# Default alexa filename
CreateDBDirectory(OpenDNS_directory)
def DownloadAlexaDB(filename=Alexa_filename, download_url='https://s3.amazonaws.com/alexa-static/top-1m.csv.zip'):
"""
Update Or Download information from Alexa
:param filename:
:param download_url:
:return:
"""
current_time = datetime.utcnow()
need_to_download = False
file_last_downloaded = os.path.join(Alexa_directory, 'last_downloaded_%s.txt') % filename # File that will be used to determine the last time the DBs were downloaded
if os.path.exists(os.path.join(Alexa_directory, filename)):
try:
# Check to see if download timestamp exists and if it does see time diff since download
if os.path.exists(file_last_downloaded):
with open(file_last_downloaded, 'r+') as lastdlf:
last_downloaded = lastdlf.read().strip()
# File is blank
if not last_downloaded:
lastdlf.write(str(current_time))
else:
time_diff = (current_time - parser.parse(last_downloaded)).total_seconds()
if time_diff > hours_to_pull_new_geoip_db*3600:
need_to_download = True
else:
# Set download timestamp if it was not downloaded using this script and this is the first time the script is ran
with open(file_last_downloaded, 'w+') as lastdlf:
lastdlf.write(str(current_time))
except IOError as error:
print 'Could not download and write Alexa database due to %s.\n'%error
logging_file.error( 'Could not download and write Alexa database. Due to:\n{0}'.format( error ) )
sys.exit(1)
else:
need_to_download = True
if need_to_download:
print 'Alexa file needs to be updated or does not exist!\nTrying to download it to %s/%s\n'%(Alexa_directory, filename)
logging_file.info( 'Alexa file needs to be updated or does not exist! Trying to download it to "{0}/{1}"'.format(Alexa_directory, filename))
try:
response = requests.get( download_url, timeout=(10,5) )
with open(os.path.join(Alexa_directory, filename), 'wb') as downloaded_file, open(file_last_downloaded, 'w') as lastdownloadf, ZipFile(StringIO(response.content), 'r') as zipfile:
downloaded_file.write(zipfile.open(filename).read())
lastdownloadf.write(str(current_time))
zipfile.close()
downloaded_file.close()
lastdownloadf.close()
except IOError as error:
print 'Could not download and write Alexa database due to %s.\n'%error
logging_file.error( 'Could not download and write Alexa database. Due to:\n{0}'.format( error ) )
sys.exit(1)
except requests.HTTPError as error:
print 'Could not download and write Alexa database due to %s.\n'%error
logging_file.error( 'Could not download and write Alexa database. Due to:\n{0}'.format( error ) )
sys.exit(1)
except requests.Timeout as error:
print 'Could not download and write Alexa database due to %s.\n'%error
logging_file.error( 'Could not download and write Alexa database. Due to:\n{0}'.format( error ) )
sys.exit(1)
except requests.ConnectionError as error:
print 'Could not download and write Alexa database due to %s.\n'%error
logging_file.error( 'Could not download and write Alexa database. Due to:\n{0}'.format( error ) )
sys.exit(1)
except requests.TooManyRedirects as error:
print 'Could not download and write Alexa database due to %s.\n'%error
logging_file.error( 'Could not download and write Alexa database. Due to:\n{0}'.format( error ) )
sys.exit(1)
except requests.URLRequired as error:
print 'Could not download and write Alexa database due to %s.\n'%error
logging_file.error( 'Could not download and write Alexa database. Due to:\n{0}'.format( error ) )
sys.exit(1)
def DownloadOpenDNSDB(filename=OpenDNS_filename, download_url='https://s3-us-west-1.amazonaws.com/umbrella-static/top-1m.csv.zip'):
"""
Update Or Download information from OpenDNS
:param filename:
:param download_url:
:return:
"""
current_time = datetime.utcnow()
need_to_download = False
file_last_downloaded = os.path.join(OpenDNS_directory, 'last_downloaded_%s.txt') % filename # File that will be used to determine the last time the DBs were downloaded
if os.path.exists(os.path.join(OpenDNS_directory, filename)):
try:
# Check to see if download timestamp exists and if it does see time diff since download
if os.path.exists(file_last_downloaded):
with open(file_last_downloaded, 'r+') as lastdlf:
last_downloaded = lastdlf.read().strip()
# File is blank
if not last_downloaded:
lastdlf.write(str(current_time))
else:
time_diff = (current_time - parser.parse(last_downloaded)).total_seconds()
if time_diff > hours_to_pull_new_geoip_db*3600:
need_to_download = True
else:
# Set download timestamp if it was not downloaded using this script and this is the first time the script is ran
with open(file_last_downloaded, 'w+') as lastdlf:
lastdlf.write(str(current_time))
except IOError as error:
print 'Could not download and write OpenDNS database due to %s.\n'%error
logging_file.error( 'Could not download and write OpenDNS database. Due to:\n{0}'.format( error ) )
sys.exit(1)
else:
need_to_download = True
if need_to_download:
print 'OpenDNS file needs to be updated or does not exist!\nTrying to download it to %s/%s\n'%(OpenDNS_directory, filename)
logging_file.info( 'OpenDNS file needs to be updated or does not exist! Trying to download it to "{0}/{1}"'.format(OpenDNS_directory, filename))
try:
response = requests.get( download_url, timeout=(10,5) )
with open(os.path.join(OpenDNS_directory, filename), 'wb') as downloaded_file, open(file_last_downloaded, 'w') as lastdownloadf, ZipFile(StringIO(response.content), 'r') as zipfile:
downloaded_file.write(zipfile.open(filename).read())
lastdownloadf.write(str(current_time))
zipfile.close()
downloaded_file.close()
lastdownloadf.close()
except IOError as error:
print 'Could not download and write OpenDNS database due to %s.\n'%error
logging_file.error( 'Could not download and write OpenDNS database. Due to:\n{0}'.format( error ) )
sys.exit(1)
except requests.HTTPError as error:
print 'Could not download and write OpenDNS database due to %s.\n'%error
logging_file.error( 'Could not download and write OpenDNS database. Due to:\n{0}'.format( error ) )
sys.exit(1)
except requests.Timeout as error:
print 'Could not download and write OpenDNS database due to %s.\n'%error
logging_file.error( 'Could not download and write OpenDNS database. Due to:\n{0}'.format( error ) )
sys.exit(1)
except requests.ConnectionError as error:
print 'Could not download and write OpenDNS database due to %s.\n'%error
logging_file.error( 'Could not download and write OpenDNS database. Due to:\n{0}'.format( error ) )
sys.exit(1)
except requests.TooManyRedirects as error:
print 'Could not download and write OpenDNS database due to %s.\n'%error
logging_file.error( 'Could not download and write OpenDNS database. Due to:\n{0}'.format( error ) )
sys.exit(1)
except requests.URLRequired as error:
print 'Could not download and write OpenDNS database due to %s.\n'%error
logging_file.error( 'Could not download and write OpenDNS database. Due to:\n{0}'.format( error ) )
sys.exit(1)
def GetAlexaDB( load_in_memory=False, number_to_load_in_memory=1000000 ):
"""
:param load_in_memory: whether to load the alexa DB into memory as a dictionary for fast lookups. Default=False
:param number_to_load_in_memory: if load_in_memory=True than decide how much of the alexa DB to load. Default=1,000,000
:return: alexa_db
"""
if not load_in_memory:
alexacsvfile = open(os.path.join(Alexa_directory, Alexa_filename), 'rb')
alexa_db = csv.reader(alexacsvfile, delimiter=',')
else:
alexa_db = dict()
with open(os.path.join(Alexa_directory, Alexa_filename), 'rb') as alexacsvfile:
alexa_file = csv.reader( alexacsvfile, delimiter=',' )
for num, row in enumerate(alexa_file):
if num != number_to_load_in_memory and num != 1000001:
alexa_db.setdefault( row[1], int(row[0]) )
else:
break
alexacsvfile.close()
return alexa_db
def GetOpenDNSDB( load_in_memory=False, number_to_load_in_memory=1000000 ):
"""
:param load_in_memory: whether to load the alexa DB into memory as a dictionary for fast lookups. Default=False
:param number_to_load_in_memory: if load_in_memory=True than decide how much of the alexa DB to load. Default=1,000,000
:return: opendns_db
"""
if not load_in_memory:
opendnscsvfile = open(os.path.join(OpenDNS_directory, Alexa_filename), 'rb')
opendns_db = csv.reader(opendnscsvfile, delimiter=',')
else:
opendns_db = dict()
with open(os.path.join(OpenDNS_directory, OpenDNS_filename), 'rb') as opendnscsvfile:
opendns_file = csv.reader( opendnscsvfile, delimiter=',' )
for num, row in enumerate(opendns_file):
if num != number_to_load_in_memory and num != 1000001:
opendns_db.setdefault( row[1], int(row[0]) )
else:
break
opendnscsvfile.close()
return opendns_db
| {"/domaininformation/__init__.py": ["/domaininformation/domaininformation.py"]} |
45,606 | xizhou175/microblog | refs/heads/master | /mysite/microblog/urls.py | from django.urls import path
from . import views
app_name = 'microblog'
urlpatterns = [
path('index/', views.index, name='index'),
path('login/', views.login, name='login'),
path('logout/', views.logout, name='logout'),
path('register/', views.register, name='register'),
path('user/<str:username>', views.user, name='user'),
path('edit_profile/', views.edit_profile, name='edit_profile'),
path('follow/<str:username>', views.follow, name='follow'),
path('unfollow/<str:username>', views.unfollow, name='unfollow'),
path('explore/', views.explore, name='explore')
]
| {"/mysite/microblog/views.py": ["/mysite/microblog/forms.py", "/mysite/microblog/models.py"], "/mysite/microblog/forms.py": ["/mysite/microblog/models.py"], "/mysite/microblog/tests.py": ["/mysite/microblog/models.py"]} |
45,607 | xizhou175/microblog | refs/heads/master | /mysite/microblog/models.py | from django.db import models
from datetime import datetime
from django.contrib.auth.hashers import make_password, check_password
from django.contrib.auth.models import AbstractUser
from hashlib import md5
# Create your models here.
class User(AbstractUser):
username = models.CharField(max_length=64, db_index=True, unique=True)
email = models.EmailField(unique=True, db_index=True)
password = models.CharField(max_length=128)
about_me = models.CharField(max_length=140, null=True)
following = models.ManyToManyField(to='User') # users followed by this user
def __str__(self):
return '<User {}>'.format(self.username)
def avatar_128(self):
digest = md5(self.email.lower().encode('utf-8')).hexdigest()
return 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format(
digest, 128)
def avatar_36(self):
digest = md5(self.email.lower().encode('utf-8')).hexdigest()
return 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format(
digest, 36)
def is_following(self, user):
return self.following.filter(username=user.username).count() > 0
def follow(self, user):
if not self.is_following(user):
self.following.add(user)
def unfollow(self, user):
if self.is_following(user):
self.following.remove(user)
def followed_posts(self):
selfuser = User.objects.prefetch_related('following__post').get(username=self.username)
all_posts = Post.objects.filter(user__username=selfuser.username).all()
for u in selfuser.following.all():
all_posts = all_posts | u.post.all()
return all_posts.order_by('timestamp')
class Post(models.Model):
body = models.CharField(max_length=140)
timestamp = models.DateTimeField(db_index=True, default=datetime.utcnow)
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='post')
def __repr__(self):
return '<Post {}>'.format(self.body)
| {"/mysite/microblog/views.py": ["/mysite/microblog/forms.py", "/mysite/microblog/models.py"], "/mysite/microblog/forms.py": ["/mysite/microblog/models.py"], "/mysite/microblog/tests.py": ["/mysite/microblog/models.py"]} |
45,608 | xizhou175/microblog | refs/heads/master | /mysite/microblog/views.py | from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from .forms import LoginForm, RegistrationForm, EditProfileForm, EmptyForm, PostForm
from django.urls import reverse
from django.contrib import messages
from django.contrib.auth import get_user, login as Login, logout as Logout
from django.contrib.auth.decorators import login_required
from .models import User, Post
from django.core.paginator import Paginator
import json
@login_required(login_url='/microblog/login/')
# Create your views here.
def index(request):
current_user = get_user(request)
form = PostForm(request.POST)
if form.is_valid():
formdata = form.cleaned_data
post = Post(body=formdata['post'], user=current_user)
post.save()
messages.add_message(request, messages.INFO, 'Your post is now live')
return HttpResponseRedirect(reverse('microblog:index'))
'''
posts = [
{
'author': {'username': 'John'},
'body': 'Beautiful day in Portland!'
},
{
'author': {'username': 'Susan'},
'body': 'The Avengers movie was so cool!'
}
]
'''
posts = current_user.followed_posts().all()
paginator = Paginator(posts, 2)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
return render(request, 'microblog/index.html', {'title': 'Home', 'current_user': current_user, 'posts': posts,
'form': form, 'page_obj': page_obj})
@login_required(login_url='/microblog/login/')
def user(request, username):
current_user = get_user(request)
user = User.objects.filter(username=username).first()
following = current_user.is_following(user)
posts = user.post.order_by('-timestamp')
paginator = Paginator(posts, 2)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
return render(request, 'microblog/user.html', {'user': user, 'posts': posts, 'current_user': current_user,
'following': following, 'page_obj': page_obj})
@login_required(login_url='/microblog/login')
def explore(request):
current_user = get_user(request)
posts = Post.objects.order_by('-timestamp').all()
paginator = Paginator(posts, 2)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
return render(request, 'microblog/index.html', {'title': 'Explore', 'posts': posts, 'current_user': current_user,
'page_obj': page_obj})
def login(request):
if get_user(request).is_authenticated:
return HttpResponseRedirect(reverse('microblog:index'))
form = LoginForm(request.POST)
if form.is_valid():
formdata = form.cleaned_data
user = User.objects.filter(username=formdata['username']).first()
if user is None or not user.check_password(formdata['password']):
messages.add_message(request, messages.INFO, 'Invalid username or password')
return HttpResponseRedirect(reverse('microblog:login'))
Login(request, user)
next_page = request.GET.get('next')
if not next_page:
next_page = reverse('microblog:index')
messages.add_message(request, messages.INFO, 'Login requested for user {}, remember_me={}'.format(
formdata['username'], formdata['remember_me']))
return HttpResponseRedirect(next_page)
return render(request, 'microblog/login.html', {'form': form})
def logout(request):
Logout(request)
return HttpResponseRedirect(reverse('microblog:index'))
def register(request):
if get_user(request).is_authenticated:
return HttpResponseRedirect(reverse('microblog:index'))
form = RegistrationForm(request.POST)
if form.is_valid():
formdata = form.cleaned_data
user = User(username=formdata['username'], email=formdata['email'])
user.set_password(formdata['password'])
user.save()
messages.add_message(request, messages.INFO, 'Congratulations, you are now a registered user!')
return HttpResponseRedirect(reverse('microblog:login'))
return render(request, 'microblog/register.html', {'title': 'Register', 'form': form})
@login_required(login_url='/microblog/login/')
def edit_profile(request):
form = EditProfileForm(request.POST)
current_user = get_user(request)
if form.is_valid():
formdata = form.cleaned_data
current_user.username = formdata['username']
current_user.about_me = formdata['about_me']
current_user.save()
return HttpResponseRedirect(reverse('microblog:user', args=(current_user.username,)))
return render(request, 'microblog/edit_profile.html', {'form': form, 'current_user': current_user})
@login_required(login_url='/microblog/login/')
def follow(request, username):
current_user = get_user(request)
form = EmptyForm(request)
if form.is_valid():
user = User.objects.filter(username=username).first()
if user is None:
messages.add_message(request, messages.INFO, 'User {} not found.'.format(username))
return HttpResponseRedirect(reverse('microblog:index'))
if user == current_user:
messages.add_message(request, messages.INFO, 'You cannot follow yourself'.format(username))
return HttpResponseRedirect(reverse('microblog:user', args=(user.username,)))
current_user.follow(user)
messages.add_message(request, messages.INFO, 'You are following {}'.format(username))
return HttpResponseRedirect(reverse('microblog:user', args=(user.username,)))
else:
print('invalid')
return HttpResponseRedirect(reverse('microblog:index'))
@login_required(login_url='/microblog/login/')
def unfollow(request, username):
current_user = get_user(request)
form = EmptyForm(request)
if form.is_valid():
user = User.objects.filter(username=username).first()
if user is None:
messages.add_message(request, messages.INFO, 'User {} not found.'.format(username))
return HttpResponseRedirect(reverse('microblog:index'))
if user == current_user:
messages.add_message(request, messages.INFO, 'You cannot unfollow yourself'.format(username))
return HttpResponseRedirect(reverse('microblog:user', args=(user.username,)))
current_user.unfollow(user)
messages.add_message(request, messages.INFO, 'You are not following {}'.format(username))
return HttpResponseRedirect(reverse('microblog:user', args=(user.username,)))
else:
print('invalid')
return HttpResponseRedirect(reverse('microblog:index'))
| {"/mysite/microblog/views.py": ["/mysite/microblog/forms.py", "/mysite/microblog/models.py"], "/mysite/microblog/forms.py": ["/mysite/microblog/models.py"], "/mysite/microblog/tests.py": ["/mysite/microblog/models.py"]} |
45,609 | xizhou175/microblog | refs/heads/master | /mysite/microblog/forms.py | from django import forms
from django.forms import fields, EmailField
from .models import User
from django.core.exceptions import ValidationError
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
class LoginForm(forms.Form):
username = fields.CharField(required=True)
password = fields.CharField(widget=forms.PasswordInput, required=True)
remember_me = fields.BooleanField(required=False)
class EditProfileForm(forms.Form):
username = fields.CharField(label='Username')
about_me = fields.CharField(label='About me', min_length=0, max_length=140, required=False,
widget=forms.Textarea(attrs={'cols': 50, 'rows': 20}))
class RegistrationForm(forms.Form):
username = fields.CharField(required=True, error_messages={'required': 'username cannot be empty'})
email = fields.EmailField(required=True, error_messages={'required': 'email cannot be empty'})
password = fields.CharField(widget=forms.PasswordInput, required=True,
error_messages={'required': 'password cannot be empty'})
password2 = fields.CharField(widget=forms.PasswordInput, required=True,
error_messages={'required': 'password cannot be empty'})
def clean_username(self):
user = User.objects.filter(username=self.cleaned_data['username']).first()
if user is not None:
raise ValidationError(message='Please use a different username.', code='invalid')
else:
return self.cleaned_data['username']
def clean_email(self):
user = User.objects.filter(email=self.cleaned_data['email']).first()
if user is not None:
raise ValidationError(message='Please use a different email address.', code='invalid')
else:
return self.cleaned_data['email']
def clean_password2(self):
if self.cleaned_data.get('password') != self.cleaned_data.get('password2'):
raise ValidationError(message='field must be equal to password', code='invalid')
else:
return self.cleaned_data
class EmptyForm(forms.Form):
pass
class PostForm(forms.Form):
post = fields.CharField(required=True, max_length=140, min_length=1,
widget=forms.Textarea(attrs={'cols': 50, 'rows': 10}))
'''
class LoginForm(forms.ModelForm):
class Meta:
model = User
fields = ('username', 'password')
widgets = {
'password': forms.PasswordInput()
}
def clean_username(self):
user = User.objects.filter(username=self.cleaned_data['username']).first()
if user is None:
raise ValidationError(message='username is invalid.', code='invalid')
else:
return self.cleaned_data['username']
'''
'''
class RegistrationForm(forms.ModelForm):
class Meta:
model = User
fields = ('username', 'email')
widgets = {
'email': forms.EmailInput(),
}
def clean_username(self):
user = User.objects.filter(username=self.cleaned_data['username']).first()
if user is not None:
raise ValidationError(message='Please use a different username.', code='invalid')
else:
return self.cleaned_data['username']
def clean_email(self):
user = User.objects.filter(email=self.cleaned_data['email']).first()
if user is not None:
raise ValidationError(message='Please use a different email address.', code='invalid')
else:
return self.cleaned_data['email']
'''
| {"/mysite/microblog/views.py": ["/mysite/microblog/forms.py", "/mysite/microblog/models.py"], "/mysite/microblog/forms.py": ["/mysite/microblog/models.py"], "/mysite/microblog/tests.py": ["/mysite/microblog/models.py"]} |
45,610 | xizhou175/microblog | refs/heads/master | /mysite/microblog/tests.py | from datetime import datetime, timedelta
from django.test import TestCase
from django.utils import timezone
from django.urls import reverse
from .models import User, Post
class UserModelTests(TestCase):
def test_follow(self):
u1 = User(username='john', email='john@example.com')
u2 = User(username='susan', email='susan@example.com')
u1.save()
u2.save()
self.assertQuerysetEqual(u1.following.all(), [])
self.assertQuerysetEqual(u1.user_set.all(), [])
u1.follow(u2)
self.assertTrue(u1.is_following(u2))
self.assertEqual(u1.following.count(), 1)
self.assertEqual(u1.following.first().username, 'susan')
self.assertEqual(u2.user_set.count(), 1)
self.assertEqual(u2.user_set.first().username, 'john')
u1.unfollow(u2)
self.assertFalse(u1.is_following(u2))
self.assertEqual(u1.following.count(), 0)
self.assertEqual(u2.user_set.count(), 0)
def test_followed_posts(self):
u1 = User(username='john', email='john@example.com')
u2 = User(username='susan', email='susan@example.com')
u3 = User(username='mary', email='mary@example.com')
u4 = User(username='david', email='david@example.com')
u1.save()
u2.save()
u3.save()
u4.save()
now = datetime.utcnow()
p1 = Post(body="post from john", user=u1,
timestamp=now + timedelta(seconds=1))
p2 = Post(body="post from susan", user=u2,
timestamp=now + timedelta(seconds=4))
p3 = Post(body="post from mary", user=u3,
timestamp=now + timedelta(seconds=3))
p4 = Post(body="post from david", user=u4,
timestamp=now + timedelta(seconds=2))
p1.save()
p2.save()
p3.save()
p4.save()
u1.follow(u2) # john follows susan
u1.follow(u4) # john follows david
u2.follow(u3) # susan follows mary
u3.follow(u4) # mary follows david
f1 = u1.followed_posts().all()
f2 = u2.followed_posts().all()
f3 = u3.followed_posts().all()
f4 = u4.followed_posts().all()
l1 = []
l2 = []
l3 = []
l4 = []
for q in f1:
l1.append(q.id)
for q in f2:
l2.append(q.id)
for q in f3:
l3.append(q.id)
for q in f4:
l4.append(q.id)
self.assertEqual(l1, [p1.pk, p4.pk, p2.pk])
self.assertEqual(l2, [p3.pk, p2.pk])
self.assertEqual(l3, [p4.pk, p3.pk])
self.assertEqual(l4, [p4.pk])
| {"/mysite/microblog/views.py": ["/mysite/microblog/forms.py", "/mysite/microblog/models.py"], "/mysite/microblog/forms.py": ["/mysite/microblog/models.py"], "/mysite/microblog/tests.py": ["/mysite/microblog/models.py"]} |
45,611 | deleteSomeDay/CFree | refs/heads/master | /venv/Scripts/cfree/blog/views.py | from urllib.parse import quote_plus
from django.shortcuts import render
from django.shortcuts import render_to_response, get_object_or_404
from blog.models import Post
# Create your views here.
def blogs(req):
entries = Post.objects.all()[:10]
return render_to_response('blogs/blog_main.html', {'posts': entries})
def post_detail(req, slug):
postd = get_object_or_404(Post, slug=slug)
shared_quote = quote_plus(postd.bodytext)
context = {
'post': postd,
'shared_string': shared_quote,
}
return render_to_response('blogs/post_detail.html', context)
| {"/venv/Scripts/cfree/misc/urls.py": ["/venv/Scripts/cfree/misc/views.py"], "/venv/Scripts/cfree/blog/urls.py": ["/venv/Scripts/cfree/blog/views.py"]} |
45,612 | deleteSomeDay/CFree | refs/heads/master | /venv/Scripts/cfree/blog/migrations/0005_posts_slug.py | # Generated by Django 2.0.10 on 2019-02-03 03:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_auto_20181215_2011'),
]
operations = [
migrations.AddField(
model_name='posts',
name='slug',
field=models.SlugField(default=1, max_length=40),
preserve_default=False,
),
]
| {"/venv/Scripts/cfree/misc/urls.py": ["/venv/Scripts/cfree/misc/views.py"], "/venv/Scripts/cfree/blog/urls.py": ["/venv/Scripts/cfree/blog/views.py"]} |
45,613 | deleteSomeDay/CFree | refs/heads/master | /venv/Scripts/cfree/cfree/settings.py | """
Django settings for cfree project.
Generated by 'django-admin startproject' using Django 2.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
from decouple import config
from oscar.defaults import *
from oscar import OSCAR_MAIN_TEMPLATE_DIR
from oscar import get_core_apps
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PREPEND_WWW = 'True'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
USE_I18N = True
PAYPAL_CURRENCY = 'USD'
PAYPAL_SANDBOX_MODE = config('PAYPAL_SANDBOX_MODE', cast=bool)
PAYPAL_SOLUTION_TYPE = 'Sole'
PAYPAL_BRAND_NAME = 'CFree'
PAYPAL_LANDING_PAGE = 'Billing'
PAYPAL_API_USERNAME = config('PAYPAL_API_USERNAME')
PAYPAL_API_PASSWORD = config('PAYPAL_API_PASSWORD')
PAYPAL_API_SIGNATURE = config('PAYPAL_API_SIGNATURE')
OSCAR_FROM_EMAIL = "cfree-notifications@cfreeforlife.com"
OSCAR_SEND_REGISTRATION_EMAIL = 'True'
OSCAR_SHOP_TAGLINE = ""
OSCAR_DEFAULT_CURRENCY = 'USD'
SESSION_COOKIE_SECURE = config('SESSION_COOKIE_SECURE')
CSRF_COOKIE_SECURE = config('CSRF_COOKIE_SECURE')
SECURE_SSL_REDIRECT = config('SECURE_SSL_REDIRECT')
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
OSCAR_ALLOW_ANON_CHECKOUT = True
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', cast=bool)
ALLOWED_HOSTS = ['www.cfreeforlife.com']
OSCAR_GOOGLE_ANALYTICS_ID = 'UA-134860038-1'
ADMINS = [('Aidan', 'razorprobowl@gmail.com')]
# Application definition
INSTALLED_APPS = [
'blog',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'tinymce',
'django.contrib.sites',
'django.contrib.sitemaps',
'django.contrib.flatpages',
'widget_tweaks',
'misc',
'paypal',
'compressor',
'newsletter',
'django_summernote'
] + get_core_apps(['oscarfork.order'])
SITE_ID = 7
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'oscar.apps.basket.middleware.BasketMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
]
ROOT_URLCONF = 'cfree.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ["CFree/venv/Scripts/cfree/templates", OSCAR_MAIN_TEMPLATE_DIR,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'oscar.apps.search.context_processors.search_form',
'oscar.apps.promotions.context_processors.promotions',
'oscar.apps.checkout.context_processors.checkout',
'oscar.apps.customer.notifications.context_processors.notifications',
'oscar.core.context_processors.metadata',
],
},
},
]
WSGI_APPLICATION = 'cfree.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'SomeDay$cfree',
'USER': 'SomeDay',
'PASSWORD': '********',
'HOST': '*******',
'ATOMIC_REQUESTS': True,
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTHENTICATION_BACKENDS = (
'oscar.apps.customer.auth_backends.EmailBackend',
'django.contrib.auth.backends.ModelBackend',
)
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_cdn")
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "media_cdn")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
os.path.join(BASE_DIR, "media")
]
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
]
from django.utils.translation import ugettext_lazy as _
OSCAR_DASHBOARD_NAVIGATION.append(
{
'label': _('PayPal'),
'icon': 'icon-globe',
'children': [
{
'label': _('Express transactions'),
'url_name': 'paypal-express-list',
},
]
})
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
}
OSCAR_INITIAL_ORDER_STATUS = 'Pending'
OSCAR_INITIAL_LINE_STATUS = 'Pending'
OSCAR_ORDER_STATUS_PIPELINE = {
'Pending': ('Being processed', 'Cancelled',),
'Being processed': ('Processed', 'Cancelled',),
'Cancelled': (),
}
SUMMERNOTE_CONFIG = {
# Using SummernoteWidget - iframe mode, default
'iframe': True,
# You can put custom Summernote settings
'summernote': {
# Change editor size
'width': '100%',
'height': '480',
# Codemirror as codeview
# If any codemirror settings are defined, it will include codemirror files automatically.
'css': (
'//cdnjs.cloudflare.com/ajax/libs/codemirror/5.29.0/theme/monokai.min.css',
),
'codemirror': {
'mode': 'htmlmixed',
'lineNumbers': 'true',
# You have to include theme file in 'css' or 'css_for_inplace' before using it.
'theme': 'monokai',
},
}
}
| {"/venv/Scripts/cfree/misc/urls.py": ["/venv/Scripts/cfree/misc/views.py"], "/venv/Scripts/cfree/blog/urls.py": ["/venv/Scripts/cfree/blog/views.py"]} |
45,614 | deleteSomeDay/CFree | refs/heads/master | /venv/Scripts/cfree/blog/templatetags/blog_tags.py | from django import template
from blog.models import Post
register = template.Library()
@register.simple_tag()
def render_bloglist():
entries = Post.objects.all()[:6]
return entries
@register.simple_tag()
def render_topblog():
entry = Post.objects.all()[:1].get()
return entry
| {"/venv/Scripts/cfree/misc/urls.py": ["/venv/Scripts/cfree/misc/views.py"], "/venv/Scripts/cfree/blog/urls.py": ["/venv/Scripts/cfree/blog/views.py"]} |
45,615 | deleteSomeDay/CFree | refs/heads/master | /venv/Scripts/cfree/blog/migrations/0002_posts_image.py | # Generated by Django 2.1.4 on 2018-12-14 19:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='posts',
name='image',
field=models.ImageField(default=0, upload_to='media_cdn/img'),
preserve_default=False,
),
]
| {"/venv/Scripts/cfree/misc/urls.py": ["/venv/Scripts/cfree/misc/views.py"], "/venv/Scripts/cfree/blog/urls.py": ["/venv/Scripts/cfree/blog/views.py"]} |
45,616 | deleteSomeDay/CFree | refs/heads/master | /venv/Scripts/cfree/misc/views.py | from django.shortcuts import render
from django.shortcuts import render_to_response
# Create your views here.
def mission(req):
return render_to_response('misc/Mission.html')
def spotlightcharity(req):
return render_to_response('misc/SpotlightCharity.html')
def about(req):
return render_to_response('misc/about.html')
def signup(req):
return render_to_response('misc/signup.html') | {"/venv/Scripts/cfree/misc/urls.py": ["/venv/Scripts/cfree/misc/views.py"], "/venv/Scripts/cfree/blog/urls.py": ["/venv/Scripts/cfree/blog/views.py"]} |
45,617 | deleteSomeDay/CFree | refs/heads/master | /venv/Scripts/cfree/blog/migrations/0008_auto_20190508_1327.py | # Generated by Django 2.0.13 on 2019-05-08 13:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0007_auto_20190419_0237'),
]
operations = [
migrations.AddField(
model_name='post',
name='altimg',
field=models.CharField(default='t', max_length=100),
preserve_default=False,
),
migrations.AlterField(
model_name='post',
name='bodytext',
field=models.TextField(),
),
]
| {"/venv/Scripts/cfree/misc/urls.py": ["/venv/Scripts/cfree/misc/views.py"], "/venv/Scripts/cfree/blog/urls.py": ["/venv/Scripts/cfree/blog/views.py"]} |
45,618 | deleteSomeDay/CFree | refs/heads/master | /venv/Scripts/cfree/blog/admin.py | from django.contrib import admin
from blog.models import Post
from django import forms
from django_summernote.admin import SummernoteModelAdmin
# Register your models here.
class PostAdmin(SummernoteModelAdmin):
summernote_fields = '__all__'
admin.site.register(Post, PostAdmin)
| {"/venv/Scripts/cfree/misc/urls.py": ["/venv/Scripts/cfree/misc/views.py"], "/venv/Scripts/cfree/blog/urls.py": ["/venv/Scripts/cfree/blog/views.py"]} |
45,619 | deleteSomeDay/CFree | refs/heads/master | /venv/Scripts/cfree/blog/templatetags/filters.py | #from django import template
#register = template.Library()
##def file_exists(value):
| {"/venv/Scripts/cfree/misc/urls.py": ["/venv/Scripts/cfree/misc/views.py"], "/venv/Scripts/cfree/blog/urls.py": ["/venv/Scripts/cfree/blog/views.py"]} |
45,620 | deleteSomeDay/CFree | refs/heads/master | /venv/Scripts/cfree/blog/models.py | from django.db import models
import datetime
from tinymce import models as tinymce_models
# Create your models here.
class Post(models.Model):
author = models.CharField(max_length=30)
title = models.CharField(max_length=100)
slug = models.SlugField(max_length=40)
bodytext = models.TextField()
timestamp = models.DateTimeField()
image = models.ImageField(upload_to='img')
altimg = models.CharField(max_length=100)
class Meta:
ordering = ['-timestamp',]
| {"/venv/Scripts/cfree/misc/urls.py": ["/venv/Scripts/cfree/misc/views.py"], "/venv/Scripts/cfree/blog/urls.py": ["/venv/Scripts/cfree/blog/views.py"]} |
45,621 | deleteSomeDay/CFree | refs/heads/master | /venv/Scripts/cfree/blog/migrations/0003_auto_20181214_1959.py | # Generated by Django 2.1.4 on 2018-12-15 00:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_posts_image'),
]
operations = [
migrations.AlterField(
model_name='posts',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='media_cdn/img'),
),
]
| {"/venv/Scripts/cfree/misc/urls.py": ["/venv/Scripts/cfree/misc/views.py"], "/venv/Scripts/cfree/blog/urls.py": ["/venv/Scripts/cfree/blog/views.py"]} |
45,622 | deleteSomeDay/CFree | refs/heads/master | /venv/Scripts/cfree/misc/urls.py | from django.urls import path
from .views import mission
from .views import about
from .views import signup
app_name = 'misc'
urlpatterns = [
path('mission', mission, name='mission'),
path('about', about, name='about'),
path('signup', signup, name='signup')
] | {"/venv/Scripts/cfree/misc/urls.py": ["/venv/Scripts/cfree/misc/views.py"], "/venv/Scripts/cfree/blog/urls.py": ["/venv/Scripts/cfree/blog/views.py"]} |
45,623 | deleteSomeDay/CFree | refs/heads/master | /venv/Scripts/cfree/cfree/urls.py | """cfree URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
from django.views.static import serve
from oscar.app import application
from paypal.express.dashboard.app import application as paypal
from django.contrib.sitemaps import views
from sitemaps import base_sitemaps
urlpatterns = [
path('admin/', admin.site.urls),
path('tinymce/', include('tinymce.urls')),
path('', application.urls, name='home'),
path('summernote/', include('django_summernote.urls')),
path('newsletter/', include('newsletter.urls')),
path('blog/', include('blog.urls', namespace='blogs')),
path('', include('misc.urls', namespace='misc')),
path('checkout/paypal/', include('paypal.express.urls')),
path('sitemap.xml', views.sitemap, {'sitemaps': base_sitemaps}, name='django.contrib.sitemaps.views.sitemap'),
path('sitemap\.xml$', views.sitemap,{'sitemaps': base_sitemaps},name='django.contrib.sitemaps.views.sitemap'),
path('sitemap-(?P<section>.+)\.xml$', views.sitemap, {'sitemaps': base_sitemaps}, name='django.contrib.sitemaps.views.sitemap'),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| {"/venv/Scripts/cfree/misc/urls.py": ["/venv/Scripts/cfree/misc/views.py"], "/venv/Scripts/cfree/blog/urls.py": ["/venv/Scripts/cfree/blog/views.py"]} |
45,624 | deleteSomeDay/CFree | refs/heads/master | /venv/Scripts/cfree/oscarfork/order/config.py | from oscar.apps.order import config
class OrderConfig(config.OrderConfig):
name = 'oscarfork.order'
| {"/venv/Scripts/cfree/misc/urls.py": ["/venv/Scripts/cfree/misc/views.py"], "/venv/Scripts/cfree/blog/urls.py": ["/venv/Scripts/cfree/blog/views.py"]} |
45,625 | deleteSomeDay/CFree | refs/heads/master | /venv/Scripts/cfree/oscarfork/order/__init__.py | default_app_config = 'oscarfork.order.config.OrderConfig'
| {"/venv/Scripts/cfree/misc/urls.py": ["/venv/Scripts/cfree/misc/views.py"], "/venv/Scripts/cfree/blog/urls.py": ["/venv/Scripts/cfree/blog/views.py"]} |
45,626 | deleteSomeDay/CFree | refs/heads/master | /venv/Scripts/cfree/blog/urls.py | from django.urls import path
from .views import blogs, post_detail
app_name = 'blog'
urlpatterns = [
path('', blogs, name='home'),
path('post/<slug>', post_detail, name='post'),
]
| {"/venv/Scripts/cfree/misc/urls.py": ["/venv/Scripts/cfree/misc/views.py"], "/venv/Scripts/cfree/blog/urls.py": ["/venv/Scripts/cfree/blog/views.py"]} |
45,627 | deleteSomeDay/CFree | refs/heads/master | /venv/Scripts/cfree/blog/migrations/0004_auto_20181215_2011.py | # Generated by Django 2.1.4 on 2018-12-16 01:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_auto_20181214_1959'),
]
operations = [
migrations.AlterField(
model_name='posts',
name='image',
field=models.ImageField(upload_to='img'),
),
]
| {"/venv/Scripts/cfree/misc/urls.py": ["/venv/Scripts/cfree/misc/views.py"], "/venv/Scripts/cfree/blog/urls.py": ["/venv/Scripts/cfree/blog/views.py"]} |
45,639 | DarrenIce/bilibili-recording-monitor | refs/heads/master | /main.py | from display import Display
d = Display()
d.run() | {"/main.py": ["/display.py"]} |
45,640 | DarrenIce/bilibili-recording-monitor | refs/heads/master | /display.py | import datetime
import time
from rich.live import Live
from rich.table import Table
from rich.console import Console
from rich import box
from rich.console import RenderGroup
from dataclasses import dataclass
import psutil
import json
import requests
from win10toast import ToastNotifier
@dataclass
class Info:
rowID: int
roomID: str
startTime: str
endTime: str
autoRecord: bool
autoUpload: bool
liveStatus: int
lockStatus: int
uname: str
title: str
liveStartTime: int
recordStatus: int
recordStartTime: int
recordEndTime: int
decodeStatus: int
decodeStartTime: int
decodeEndTime: int
uploadStatus: int
uploadStartTime: int
uploadEndTime: int
needUpload: bool
state: int
areaName: str
@property
def stateMap(self) -> str:
if self.state == 0:
return 'iinit'
elif self.state == 1:
return 'start'
elif self.state == 3:
return 'running'
elif self.state == 4:
return 'waiting'
elif self.state == 2:
return 'restart'
elif self.state == 5:
return 'decoding'
elif self.state == 6:
return 'decodeEnd'
elif self.state == 7:
return 'uploadWait'
elif self.state == 8:
return 'uploading'
elif self.state == 9:
return 'uploadEnd'
elif self.state == 10:
return 'stop'
@property
def recordTimeMap(self) -> str:
return cacUseTime(self.recordStartTime, self.recordEndTime)
@property
def decodeTimeMap(self) -> str:
return cacUseTime(self.decodeStartTime, self.decodeEndTime)
@property
def uploadTimeMap(self) -> str:
return cacUseTime(self.uploadStartTime, self.uploadEndTime)
@property
def liveStartTimeMap(self) -> str:
return timeStamp2time(self.liveStartTime)
def timeStamp2time(ts):
return datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
def cacUseTime(startTime, endTime):
if startTime < endTime:
return str((datetime.datetime.fromtimestamp(endTime) - datetime.datetime.fromtimestamp(startTime))).split('.')[0]
elif startTime > 0:
return str((datetime.datetime.now() - datetime.datetime.fromtimestamp(startTime))).split('.')[0]
else:
return 'nil'
def bytes2human(n):
symbols = ('K','M','G','T','P','E','Z','Y')
prefix = {}
for i,s in enumerate(symbols):
prefix[s] = 1 << (i + 1) * 10
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return '%.1f%s' % (value,s)
return '%.1fB' % float(n)
class Display():
def __init__(self):
self.console = Console(force_terminal=True, color_system='truecolor')
self.console._environ['TERM'] = 'SMART'
self.last_time = datetime.datetime.now()
self.last_net_sent = 0.0
self.last_net_recv = 0.0
def generateInfo(self, rowID, liveInfo):
return Info(
rowID=rowID,
roomID=liveInfo['RoomID'],
startTime=liveInfo['StartTime'],
endTime=liveInfo['EndTime'],
autoRecord=liveInfo['AutoRecord'],
autoUpload=liveInfo['AutoUpload'],
liveStatus=liveInfo['LiveStatus'],
lockStatus=liveInfo['LockStatus'],
uname=liveInfo['Uname'],
title=liveInfo['Title'],
liveStartTime=liveInfo['LiveStartTime'],
recordStatus=liveInfo['RecordStatus'],
recordStartTime=liveInfo['RecordStartTime'],
recordEndTime=liveInfo['RecordEndTime'],
decodeStatus=liveInfo['DecodeStatus'],
decodeStartTime=liveInfo['DecodeStartTime'],
decodeEndTime=liveInfo['DecodeEndTime'],
uploadStatus=liveInfo['UploadStatus'],
uploadStartTime=liveInfo['UploadStartTime'],
uploadEndTime=liveInfo['UploadEndTime'],
needUpload=liveInfo['NeedUpload'],
state=liveInfo['State'],
areaName=liveInfo['AreaName']
)
def createInfoTable(self, liveInfos):
infos = sorted(
[self.generateInfo(rid, liveInfos[key]) for key, rid in
zip(liveInfos.keys(), range(len(liveInfos)))],
key=lambda i: i.state * 10 - i.rowID,
reverse=True
)
table1 = Table(
"行号", "房间ID", "主播", "分区", "直播标题", "直播状态", "开播时间", "录制时间", "转码用时", "上传用时", "当前状态",
title="%s" % datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
box=box.SIMPLE
)
for info in infos:
table1.add_row(
str(info.rowID),
info.roomID,
info.uname,
info.areaName,
info.title,
str(info.liveStatus),
info.liveStartTimeMap,
info.recordTimeMap,
info.decodeTimeMap,
info.uploadTimeMap,
info.stateMap
)
table2 = Table(
"CPU","Memory","NetSent","NetRecv",
box=box.SIMPLE
)
time_now = datetime.datetime.now()
now_recv = psutil.net_io_counters().bytes_recv
now_sent = psutil.net_io_counters().bytes_sent
table2.add_row(
str(psutil.cpu_percent(None))+'%',
str(psutil.virtual_memory().percent)+'%' + ' %s/%s' % (bytes2human(psutil.virtual_memory().used),bytes2human(psutil.virtual_memory().total)),
bytes2human((now_sent-self.last_net_sent)/(time_now - self.last_time).total_seconds())+'/s',
bytes2human((now_recv-self.last_net_recv)/(time_now - self.last_time).total_seconds())+'/s'
)
self.last_time = time_now
self.last_net_sent = now_sent
self.last_net_recv = now_recv
return RenderGroup(
table1,table2
)
def run(self):
try:
r = requests.get("http://127.0.0.1:18080/api/infos")
self.infos = json.loads(r.text)['RoomInfos']
except:
self.infos = {}
with Live(console=self.console, auto_refresh=False) as live:
while True:
self.notify()
live.update(self.createInfoTable(self.infos), refresh=True)
time.sleep(1)
def notify(self):
try:
r = requests.get("http://127.0.0.1:18080/api/infos")
infos = json.loads(r.text)['RoomInfos']
for roomID in infos:
if infos[roomID]['LiveStatus'] == 1 and self.infos[roomID]['LiveStatus'] != 1:
toaster = ToastNotifier()
toaster.show_toast("开播通知", '%s[RoomID:%s]开播了' % (infos[roomID]['Uname'], roomID), icon_path=None, duration=3)
self.infos = infos
except:
pass | {"/main.py": ["/display.py"]} |
45,649 | juancalderonbustillo/bilby | refs/heads/master | /test/gw_likelihood_test.py | from __future__ import division, absolute_import
import unittest
import bilby
import numpy as np
class TestBasicGWTransient(unittest.TestCase):
def setUp(self):
np.random.seed(500)
self.parameters = dict(
mass_1=31., mass_2=29., a_1=0.4, a_2=0.3, tilt_1=0.0, tilt_2=0.0,
phi_12=1.7, phi_jl=0.3, luminosity_distance=4000., iota=0.4,
psi=2.659, phase=1.3, geocent_time=1126259642.413, ra=1.375,
dec=-1.2108)
self.interferometers = bilby.gw.detector.InterferometerList(['H1'])
self.interferometers.set_strain_data_from_power_spectral_densities(
sampling_frequency=2048, duration=4)
self.waveform_generator = bilby.gw.waveform_generator.WaveformGenerator(
duration=4, sampling_frequency=2048,
frequency_domain_source_model=bilby.gw.source.lal_binary_black_hole,
)
self.likelihood = bilby.gw.likelihood.BasicGravitationalWaveTransient(
interferometers=self.interferometers,
waveform_generator=self.waveform_generator
)
self.likelihood.parameters = self.parameters.copy()
def tearDown(self):
del self.parameters
del self.interferometers
del self.waveform_generator
del self.likelihood
def test_noise_log_likelihood(self):
"""Test noise log likelihood matches precomputed value"""
self.likelihood.noise_log_likelihood()
self.assertAlmostEqual(-4037.0994372143414, self.likelihood.noise_log_likelihood(), 3)
def test_log_likelihood(self):
"""Test log likelihood matches precomputed value"""
self.likelihood.log_likelihood()
self.assertAlmostEqual(self.likelihood.log_likelihood(),
-4055.236283345252, 3)
def test_log_likelihood_ratio(self):
"""Test log likelihood ratio returns the correct value"""
self.assertAlmostEqual(
self.likelihood.log_likelihood()
- self.likelihood.noise_log_likelihood(),
self.likelihood.log_likelihood_ratio(), 3)
def test_likelihood_zero_when_waveform_is_none(self):
"""Test log likelihood returns np.nan_to_num(-np.inf) when the
waveform is None"""
self.likelihood.parameters['mass_2'] = 32
self.assertEqual(self.likelihood.log_likelihood_ratio(),
np.nan_to_num(-np.inf))
self.likelihood.parameters['mass_2'] = 29
def test_repr(self):
expected = 'BasicGravitationalWaveTransient(interferometers={},\n\twaveform_generator={})'.format(
self.interferometers, self.waveform_generator)
self.assertEqual(expected, repr(self.likelihood))
class TestGWTransient(unittest.TestCase):
def setUp(self):
np.random.seed(500)
self.duration = 4
self.sampling_frequency = 2048
self.parameters = dict(
mass_1=31., mass_2=29., a_1=0.4, a_2=0.3, tilt_1=0.0, tilt_2=0.0,
phi_12=1.7, phi_jl=0.3, luminosity_distance=4000., iota=0.4,
psi=2.659, phase=1.3, geocent_time=1126259642.413, ra=1.375,
dec=-1.2108)
self.interferometers = bilby.gw.detector.InterferometerList(['H1'])
self.interferometers.set_strain_data_from_power_spectral_densities(
sampling_frequency=self.sampling_frequency, duration=self.duration)
self.waveform_generator = bilby.gw.waveform_generator.WaveformGenerator(
duration=self.duration, sampling_frequency=self.sampling_frequency,
frequency_domain_source_model=bilby.gw.source.lal_binary_black_hole,
)
self.prior = bilby.gw.prior.BBHPriorDict()
self.prior['geocent_time'] = bilby.prior.Uniform(
minimum=self.parameters['geocent_time'] - self.duration / 2,
maximum=self.parameters['geocent_time'] + self.duration / 2)
self.likelihood = bilby.gw.likelihood.GravitationalWaveTransient(
interferometers=self.interferometers,
waveform_generator=self.waveform_generator, priors=self.prior.copy()
)
self.likelihood.parameters = self.parameters.copy()
def tearDown(self):
del self.parameters
del self.interferometers
del self.waveform_generator
del self.prior
del self.likelihood
def test_noise_log_likelihood(self):
"""Test noise log likelihood matches precomputed value"""
self.likelihood.noise_log_likelihood()
self.assertAlmostEqual(-4037.0994372143414, self.likelihood.noise_log_likelihood(), 3)
def test_log_likelihood(self):
"""Test log likelihood matches precomputed value"""
self.likelihood.log_likelihood()
self.assertAlmostEqual(self.likelihood.log_likelihood(),
-4055.236283345252, 3)
def test_log_likelihood_ratio(self):
"""Test log likelihood ratio returns the correct value"""
self.assertAlmostEqual(
self.likelihood.log_likelihood()
- self.likelihood.noise_log_likelihood(),
self.likelihood.log_likelihood_ratio(), 3)
def test_likelihood_zero_when_waveform_is_none(self):
"""Test log likelihood returns np.nan_to_num(-np.inf) when the
waveform is None"""
self.likelihood.parameters['mass_2'] = 32
self.assertEqual(self.likelihood.log_likelihood_ratio(),
np.nan_to_num(-np.inf))
self.likelihood.parameters['mass_2'] = 29
def test_repr(self):
expected = 'GravitationalWaveTransient(interferometers={},\n\twaveform_generator={},\n\t' \
'time_marginalization={}, distance_marginalization={}, phase_marginalization={}, ' \
'priors={})'.format(self.interferometers, self.waveform_generator, False, False, False, self.prior)
self.assertEqual(expected, repr(self.likelihood))
class TestTimeMarginalization(unittest.TestCase):
def setUp(self):
np.random.seed(500)
self.duration = 4
self.sampling_frequency = 2048
self.parameters = dict(
mass_1=31., mass_2=29., a_1=0.4, a_2=0.3, tilt_1=0.0, tilt_2=0.0,
phi_12=1.7, phi_jl=0.3, luminosity_distance=4000., iota=0.4,
psi=2.659, phase=1.3, geocent_time=1126259640, ra=1.375,
dec=-1.2108)
self.interferometers = bilby.gw.detector.InterferometerList(['H1'])
self.interferometers.set_strain_data_from_power_spectral_densities(
sampling_frequency=self.sampling_frequency, duration=self.duration,
start_time=1126259640)
self.waveform_generator = bilby.gw.waveform_generator.WaveformGenerator(
duration=self.duration, sampling_frequency=self.sampling_frequency,
frequency_domain_source_model=bilby.gw.source.lal_binary_black_hole,
start_time=1126259640)
self.prior = bilby.gw.prior.BBHPriorDict()
self.likelihood = bilby.gw.likelihood.GravitationalWaveTransient(
interferometers=self.interferometers,
waveform_generator=self.waveform_generator, priors=self.prior.copy()
)
self.likelihood.parameters = self.parameters.copy()
def tearDown(self):
del self.duration
del self.sampling_frequency
del self.parameters
del self.interferometers
del self.waveform_generator
del self.prior
del self.likelihood
def test_time_marginalisation_full_segment(self):
"""
Test time marginalised likelihood matches brute force version over the
whole segment.
"""
likes = []
lls = []
self.prior['geocent_time'] = bilby.prior.Uniform(
minimum=self.waveform_generator.start_time,
maximum=self.waveform_generator.start_time + self.duration)
self.time = bilby.gw.likelihood.GravitationalWaveTransient(
interferometers=self.interferometers,
waveform_generator=self.waveform_generator,
time_marginalization=True, priors=self.prior.copy()
)
times = self.waveform_generator.start_time + np.linspace(
0, self.duration, 4097)[:-1]
for time in times:
self.likelihood.parameters['geocent_time'] = time
lls.append(self.likelihood.log_likelihood_ratio())
likes.append(np.exp(lls[-1]))
marg_like = np.log(np.trapz(
likes * self.prior['geocent_time'].prob(times), times))
self.time.parameters = self.parameters.copy()
self.time.parameters['geocent_time'] = self.waveform_generator.start_time
self.assertAlmostEqual(marg_like, self.time.log_likelihood_ratio(),
delta=0.5)
def test_time_marginalisation_partial_segment(self):
"""
Test time marginalised likelihood matches brute force version over the
whole segment.
"""
likes = []
lls = []
self.prior['geocent_time'] = bilby.prior.Uniform(
minimum=self.parameters['geocent_time'] + 1 - 0.1,
maximum=self.parameters['geocent_time'] + 1 + 0.1)
self.time = bilby.gw.likelihood.GravitationalWaveTransient(
interferometers=self.interferometers,
waveform_generator=self.waveform_generator,
time_marginalization=True, priors=self.prior.copy()
)
times = self.waveform_generator.start_time + np.linspace(
0, self.duration, 4097)[:-1]
for time in times:
self.likelihood.parameters['geocent_time'] = time
lls.append(self.likelihood.log_likelihood_ratio())
likes.append(np.exp(lls[-1]))
marg_like = np.log(np.trapz(
likes * self.prior['geocent_time'].prob(times), times))
self.time.parameters = self.parameters.copy()
self.time.parameters['geocent_time'] = self.waveform_generator.start_time
self.assertAlmostEqual(marg_like, self.time.log_likelihood_ratio(),
delta=0.5)
class TestMarginalizedLikelihood(unittest.TestCase):
def setUp(self):
np.random.seed(500)
self.duration = 4
self.sampling_frequency = 2048
self.parameters = dict(
mass_1=31., mass_2=29., a_1=0.4, a_2=0.3, tilt_1=0.0, tilt_2=0.0,
phi_12=1.7, phi_jl=0.3, luminosity_distance=4000., iota=0.4,
psi=2.659, phase=1.3, geocent_time=1126259642.413, ra=1.375,
dec=-1.2108)
self.interferometers = bilby.gw.detector.InterferometerList(['H1'])
self.interferometers.set_strain_data_from_power_spectral_densities(
sampling_frequency=self.sampling_frequency, duration=self.duration,
start_time=self.parameters['geocent_time'] - self.duration / 2)
self.waveform_generator = bilby.gw.waveform_generator.WaveformGenerator(
duration=self.duration, sampling_frequency=self.sampling_frequency,
frequency_domain_source_model=bilby.gw.source.lal_binary_black_hole,
)
self.prior = bilby.gw.prior.BBHPriorDict()
self.prior['geocent_time'] = bilby.prior.Uniform(
minimum=self.parameters['geocent_time'] - self.duration / 2,
maximum=self.parameters['geocent_time'] + self.duration / 2)
def test_cannot_instantiate_marginalised_likelihood_without_prior(self):
self.assertRaises(
ValueError,
lambda: bilby.gw.likelihood.GravitationalWaveTransient(
interferometers=self.interferometers,
waveform_generator=self.waveform_generator,
phase_marginalization=True))
def test_generating_default_time_prior(self):
temp = self.prior.pop('geocent_time')
new_prior = self.prior.copy()
like = bilby.gw.likelihood.GravitationalWaveTransient(
interferometers=self.interferometers,
waveform_generator=self.waveform_generator, priors=new_prior,
time_marginalization=True
)
same = all([temp.minimum == like.priors['geocent_time'].minimum,
temp.maximum == like.priors['geocent_time'].maximum,
new_prior['geocent_time'] == temp.minimum])
self.assertTrue(same)
self.prior['geocent_time'] = temp
def test_generating_default_phase_prior(self):
temp = self.prior.pop('phase')
new_prior = self.prior.copy()
like = bilby.gw.likelihood.GravitationalWaveTransient(
interferometers=self.interferometers,
waveform_generator=self.waveform_generator, priors=new_prior,
phase_marginalization=True
)
same = all([temp.minimum == like.priors['phase'].minimum,
temp.maximum == like.priors['phase'].maximum,
new_prior['phase'] == float(0)])
self.assertTrue(same)
self.prior['phase'] = temp
class TestPhaseMarginalization(unittest.TestCase):
def setUp(self):
np.random.seed(500)
self.duration = 4
self.sampling_frequency = 2048
self.parameters = dict(
mass_1=31., mass_2=29., a_1=0.4, a_2=0.3, tilt_1=0.0, tilt_2=0.0,
phi_12=1.7, phi_jl=0.3, luminosity_distance=4000., iota=0.4,
psi=2.659, phase=1.3, geocent_time=1126259642.413, ra=1.375,
dec=-1.2108)
self.interferometers = bilby.gw.detector.InterferometerList(['H1'])
self.interferometers.set_strain_data_from_power_spectral_densities(
sampling_frequency=self.sampling_frequency, duration=self.duration)
self.waveform_generator = bilby.gw.waveform_generator.WaveformGenerator(
duration=self.duration, sampling_frequency=self.sampling_frequency,
frequency_domain_source_model=bilby.gw.source.lal_binary_black_hole,
)
self.prior = bilby.gw.prior.BBHPriorDict()
self.prior['geocent_time'] = bilby.prior.Uniform(
minimum=self.parameters['geocent_time'] - self.duration / 2,
maximum=self.parameters['geocent_time'] + self.duration / 2)
self.likelihood = bilby.gw.likelihood.GravitationalWaveTransient(
interferometers=self.interferometers,
waveform_generator=self.waveform_generator, priors=self.prior.copy()
)
self.phase = bilby.gw.likelihood.GravitationalWaveTransient(
interferometers=self.interferometers,
waveform_generator=self.waveform_generator,
phase_marginalization=True, priors=self.prior.copy()
)
for like in [self.likelihood, self.phase]:
like.parameters = self.parameters.copy()
def tearDown(self):
del self.duration
del self.sampling_frequency
del self.parameters
del self.interferometers
del self.waveform_generator
del self.prior
del self.likelihood
del self.phase
def test_phase_marginalisation(self):
"""Test phase marginalised likelihood matches brute force version"""
like = []
phases = np.linspace(0, 2 * np.pi, 1000)
for phase in phases:
self.likelihood.parameters['phase'] = phase
like.append(np.exp(self.likelihood.log_likelihood_ratio()))
marg_like = np.log(np.trapz(like, phases) / (2 * np.pi))
self.phase.parameters = self.parameters.copy()
self.assertAlmostEqual(marg_like, self.phase.log_likelihood_ratio(),
delta=0.5)
class TestTimePhaseMarginalization(unittest.TestCase):
def setUp(self):
np.random.seed(500)
self.duration = 4
self.sampling_frequency = 2048
self.parameters = dict(
mass_1=31., mass_2=29., a_1=0.4, a_2=0.3, tilt_1=0.0, tilt_2=0.0,
phi_12=1.7, phi_jl=0.3, luminosity_distance=4000., iota=0.4,
psi=2.659, phase=1.3, geocent_time=1126259642.413, ra=1.375,
dec=-1.2108)
self.interferometers = bilby.gw.detector.InterferometerList(['H1'])
self.interferometers.set_strain_data_from_power_spectral_densities(
sampling_frequency=self.sampling_frequency, duration=self.duration,
start_time=1126259640)
self.waveform_generator = bilby.gw.waveform_generator.WaveformGenerator(
duration=self.duration, sampling_frequency=self.sampling_frequency,
frequency_domain_source_model=bilby.gw.source.lal_binary_black_hole,
start_time=1126259640)
self.prior = bilby.gw.prior.BBHPriorDict()
self.prior['geocent_time'] = bilby.prior.Uniform(
minimum=self.parameters['geocent_time'] - self.duration / 2,
maximum=self.parameters['geocent_time'] + self.duration / 2)
self.likelihood = bilby.gw.likelihood.GravitationalWaveTransient(
interferometers=self.interferometers,
waveform_generator=self.waveform_generator, priors=self.prior.copy()
)
self.time = bilby.gw.likelihood.GravitationalWaveTransient(
interferometers=self.interferometers,
waveform_generator=self.waveform_generator,
time_marginalization=True, priors=self.prior.copy()
)
self.phase = bilby.gw.likelihood.GravitationalWaveTransient(
interferometers=self.interferometers,
waveform_generator=self.waveform_generator,
phase_marginalization=True, priors=self.prior.copy()
)
self.time_phase = bilby.gw.likelihood.GravitationalWaveTransient(
interferometers=self.interferometers,
waveform_generator=self.waveform_generator,
time_marginalization=True, phase_marginalization=True,
priors=self.prior.copy()
)
for like in [self.likelihood, self.time, self.phase, self.time_phase]:
like.parameters = self.parameters.copy()
def tearDown(self):
del self.duration
del self.sampling_frequency
del self.parameters
del self.interferometers
del self.waveform_generator
del self.prior
del self.likelihood
del self.time
del self.phase
del self.time_phase
def test_time_phase_marginalisation(self):
"""Test time and marginalised likelihood matches brute force version"""
like = []
times = np.linspace(self.prior['geocent_time'].minimum,
self.prior['geocent_time'].maximum, 4097)[:-1]
for time in times:
self.phase.parameters['geocent_time'] = time
like.append(np.exp(self.phase.log_likelihood_ratio()))
marg_like = np.log(np.trapz(like, times)
/ self.waveform_generator.duration)
self.time_phase.parameters = self.parameters.copy()
self.assertAlmostEqual(marg_like,
self.time_phase.log_likelihood_ratio(),
delta=0.5)
like = []
phases = np.linspace(0, 2 * np.pi, 1000)
for phase in phases:
self.time.parameters['phase'] = phase
like.append(np.exp(self.time.log_likelihood_ratio()))
marg_like = np.log(np.trapz(like, phases) / (2 * np.pi))
self.time_phase.parameters = self.parameters.copy()
self.assertAlmostEqual(marg_like,
self.time_phase.log_likelihood_ratio(),
delta=0.5)
class TestROQLikelihood(unittest.TestCase):
def setUp(self):
self.duration = 4
self.sampling_frequency = 2048
roq_dir = '/roq_basis'
linear_matrix_file = "{}/B_linear.npy".format(roq_dir)
quadratic_matrix_file = "{}/B_quadratic.npy".format(roq_dir)
fnodes_linear_file = "{}/fnodes_linear.npy".format(roq_dir)
fnodes_linear = np.load(fnodes_linear_file).T
fnodes_quadratic_file = "{}/fnodes_quadratic.npy".format(roq_dir)
fnodes_quadratic = np.load(fnodes_quadratic_file).T
self.test_parameters = dict(
mass_1=36.0, mass_2=36.0, a_1=0.0, a_2=0.0, tilt_1=0.0,
tilt_2=0.0, phi_12=1.7, phi_jl=0.3, luminosity_distance=5000.,
iota=0.4, psi=0.659, phase=1.3, geocent_time=1.2, ra=1.3, dec=-1.2)
ifos = bilby.gw.detector.InterferometerList(['H1'])
ifos.set_strain_data_from_power_spectral_densities(
sampling_frequency=self.sampling_frequency, duration=self.duration)
self.priors = bilby.gw.prior.BBHPriorDict()
self.priors['geocent_time'] = bilby.core.prior.Uniform(1.1, 1.3)
non_roq_wfg = bilby.gw.WaveformGenerator(
duration=self.duration, sampling_frequency=self.sampling_frequency,
frequency_domain_source_model=bilby.gw.source.lal_binary_black_hole,
waveform_arguments=dict(
reference_frequency=20.0, minimum_frequency=20.0,
approximant='IMRPhenomPv2'))
ifos.inject_signal(
parameters=self.test_parameters, waveform_generator=non_roq_wfg)
roq_wfg = bilby.gw.waveform_generator.WaveformGenerator(
duration=self.duration, sampling_frequency=self.sampling_frequency,
frequency_domain_source_model=bilby.gw.source.roq,
waveform_arguments=dict(
frequency_nodes_linear=fnodes_linear,
frequency_nodes_quadratic=fnodes_quadratic,
reference_frequency=20., minimum_frequency=20.,
approximant='IMRPhenomPv2'))
self.non_roq_likelihood = bilby.gw.likelihood.GravitationalWaveTransient(
interferometers=ifos, waveform_generator=non_roq_wfg)
self.roq_likelihood = bilby.gw.likelihood.ROQGravitationalWaveTransient(
interferometers=ifos, waveform_generator=roq_wfg,
linear_matrix=linear_matrix_file,
quadratic_matrix=quadratic_matrix_file, priors=self.priors)
pass
def tearDown(self):
pass
def test_matches_non_roq(self):
self.non_roq_likelihood.parameters.update(self.test_parameters)
self.roq_likelihood.parameters.update(self.test_parameters)
self.assertAlmostEqual(
self.non_roq_likelihood.log_likelihood_ratio(),
self.roq_likelihood.log_likelihood_ratio(), 0)
def test_time_prior_out_of_bounds_returns_zero(self):
self.roq_likelihood.parameters.update(self.test_parameters)
self.roq_likelihood.parameters['geocent_time'] = -5
self.assertEqual(
self.roq_likelihood.log_likelihood_ratio(), np.nan_to_num(-np.inf))
class TestBBHLikelihoodSetUp(unittest.TestCase):
def setUp(self):
self.ifos = bilby.gw.detector.InterferometerList(['H1'])
def tearDown(self):
del self.ifos
def test_instantiation(self):
self.like = bilby.gw.likelihood.get_binary_black_hole_likelihood(
self.ifos)
if __name__ == '__main__':
unittest.main()
| {"/bilby/gw/result.py": ["/bilby/core/result.py"], "/bilby/core/result.py": ["/bilby/core/__init__.py", "/bilby/core/prior.py"], "/bilby/gw/likelihood.py": ["/bilby/core/__init__.py", "/bilby/core/prior.py", "/bilby/gw/detector.py", "/bilby/gw/source.py"], "/bilby/gw/source.py": ["/bilby/core/__init__.py"], "/bilby/gw/__init__.py": ["/bilby/gw/likelihood.py"], "/bilby/gw/detector.py": ["/bilby/gw/__init__.py", "/bilby/core/__init__.py"]} |
45,650 | juancalderonbustillo/bilby | refs/heads/master | /bilby/core/sampler/cpnest.py | from __future__ import absolute_import
import numpy as np
from pandas import DataFrame
from .base_sampler import NestedSampler
from ..utils import logger, check_directory_exists_and_if_not_mkdir
class Cpnest(NestedSampler):
""" bilby wrapper of cpnest (https://github.com/johnveitch/cpnest)
All positional and keyword arguments (i.e., the args and kwargs) passed to
`run_sampler` will be propagated to `cpnest.CPNest`, see documentation
for that class for further help. Under Other Parameters, we list commonly
used kwargs and the bilby defaults.
Other Parameters
----------------
nlive: int
The number of live points, note this can also equivalently be given as
one of [npoints, nlives, n_live_points]
seed: int (1234)
Initialised random seed
nthreads: int, (1)
Number of threads to use
maxmcmc: int (1000)
The maximum number of MCMC steps to take
verbose: Bool (True)
If true, print information information about the convergence during
resume: Bool (False)
Whether or not to resume from a previous run
output: str
Where to write the CPNest, by default this is
{self.outdir}/cpnest_{self.label}/
"""
default_kwargs = dict(verbose=1, nthreads=1, nlive=500, maxmcmc=1000,
seed=None, poolsize=100, nhamiltonian=0, resume=False,
output=None)
def _translate_kwargs(self, kwargs):
if 'nlive' not in kwargs:
for equiv in self.npoints_equiv_kwargs:
if equiv in kwargs:
kwargs['nlive'] = kwargs.pop(equiv)
if 'seed' not in kwargs:
logger.warning('No seed provided, cpnest will use 1234.')
def run_sampler(self):
from cpnest import model as cpmodel, CPNest
class Model(cpmodel.Model):
""" A wrapper class to pass our log_likelihood into cpnest """
def __init__(self, names, bounds):
self.names = names
self.bounds = bounds
self._check_bounds()
@staticmethod
def log_likelihood(x, **kwargs):
theta = [x[n] for n in self.search_parameter_keys]
return self.log_likelihood(theta)
@staticmethod
def log_prior(x, **kwargs):
theta = [x[n] for n in self.search_parameter_keys]
return self.log_prior(theta)
def _check_bounds(self):
for bound in self.bounds:
if not all(np.isfinite(bound)):
raise ValueError(
'CPNest requires priors to have finite bounds.')
bounds = [[self.priors[key].minimum, self.priors[key].maximum]
for key in self.search_parameter_keys]
model = Model(self.search_parameter_keys, bounds)
out = CPNest(model, **self.kwargs)
out.run()
if self.plot:
out.plot()
self.result.posterior = DataFrame(out.posterior_samples)
self.result.posterior.rename(columns=dict(
logL='log_likelihood', logPrior='log_prior'), inplace=True)
self.result.log_evidence = out.NS.state.logZ
self.result.log_evidence_err = np.nan
return self.result
def _verify_kwargs_against_default_kwargs(self):
"""
Set the directory where the output will be written.
"""
if not self.kwargs['output']:
self.kwargs['output'] = \
'{}/cpnest_{}/'.format(self.outdir, self.label)
if self.kwargs['output'].endswith('/') is False:
self.kwargs['output'] = '{}/'.format(self.kwargs['output'])
check_directory_exists_and_if_not_mkdir(self.kwargs['output'])
NestedSampler._verify_kwargs_against_default_kwargs(self)
| {"/bilby/gw/result.py": ["/bilby/core/result.py"], "/bilby/core/result.py": ["/bilby/core/__init__.py", "/bilby/core/prior.py"], "/bilby/gw/likelihood.py": ["/bilby/core/__init__.py", "/bilby/core/prior.py", "/bilby/gw/detector.py", "/bilby/gw/source.py"], "/bilby/gw/source.py": ["/bilby/core/__init__.py"], "/bilby/gw/__init__.py": ["/bilby/gw/likelihood.py"], "/bilby/gw/detector.py": ["/bilby/gw/__init__.py", "/bilby/core/__init__.py"]} |
45,651 | juancalderonbustillo/bilby | refs/heads/master | /bilby/gw/result.py | from __future__ import division
from ..core.result import Result as CoreResult
from ..core.utils import logger
class CompactBinaryCoalesenceResult(CoreResult):
def __init__(self, **kwargs):
super(CompactBinaryCoalesenceResult, self).__init__(**kwargs)
def __get_from_nested_meta_data(self, *keys):
dictionary = self.meta_data
try:
for k in keys:
item = dictionary[k]
dictionary = item
return item
except KeyError:
raise ValueError(
"No information stored for {}".format('/'.join(keys)))
@property
def time_marginalization(self):
""" Boolean for if the likelihood used time marginalization """
return self.__get_from_nested_meta_data(
'likelihood', 'time_marginalization')
@property
def phase_marginalization(self):
""" Boolean for if the likelihood used phase marginalization """
return self.__get_from_nested_meta_data(
'likelihood', 'phase_marginalization')
@property
def distance_marginalization(self):
""" Boolean for if the likelihood used distance marginalization """
return self.__get_from_nested_meta_data(
'likelihood', 'distance_marginalization')
@property
def waveform_approximant(self):
""" String of the waveform approximant """
return self.__get_from_nested_meta_data(
'likelihood', 'waveform_arguments', 'waveform_approximant')
@property
def reference_frequency(self):
""" Float of the reference frequency """
return self.__get_from_nested_meta_data(
'likelihood', 'waveform_arguments', 'reference_frequency')
@property
def frequency_domain_source_model(self):
""" The frequency domain source model (function)"""
return self.__get_from_nested_meta_data(
'likelihood', 'frequency_domain_source_model')
def detector_injection_properties(self, detector):
""" Returns a dictionary of the injection properties for each detector
The injection properties include the parameters injected, and
information about the signal to noise ratio (SNR) given the noise
properties.
Parameters
----------
detector: str [H1, L1, V1]
Detector name
Returns
-------
injection_properties: dict
A dictionary of the injection properties
"""
try:
return self.__get_from_nested_meta_data(
'likelihood', 'interferometers', detector)
except ValueError:
logger.info("No injection for detector {}".format(detector))
return None
CBCResult = CompactBinaryCoalesenceResult
| {"/bilby/gw/result.py": ["/bilby/core/result.py"], "/bilby/core/result.py": ["/bilby/core/__init__.py", "/bilby/core/prior.py"], "/bilby/gw/likelihood.py": ["/bilby/core/__init__.py", "/bilby/core/prior.py", "/bilby/gw/detector.py", "/bilby/gw/source.py"], "/bilby/gw/source.py": ["/bilby/core/__init__.py"], "/bilby/gw/__init__.py": ["/bilby/gw/likelihood.py"], "/bilby/gw/detector.py": ["/bilby/gw/__init__.py", "/bilby/core/__init__.py"]} |
45,652 | juancalderonbustillo/bilby | refs/heads/master | /bilby/core/result.py | from __future__ import division
import os
from distutils.version import LooseVersion
from collections import OrderedDict, namedtuple
import numpy as np
import deepdish
import pandas as pd
import corner
import scipy.stats
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import lines as mpllines
from . import utils
from .utils import (logger, infer_parameters_from_function,
check_directory_exists_and_if_not_mkdir)
from .prior import Prior, PriorDict, DeltaFunction
def result_file_name(outdir, label):
""" Returns the standard filename used for a result file
Parameters
----------
outdir: str
Name of the output directory
label: str
Naming scheme of the output file
Returns
-------
str: File name of the output file
"""
return '{}/{}_result.h5'.format(outdir, label)
def read_in_result(filename=None, outdir=None, label=None):
""" Wrapper to bilby.core.result.Result.from_hdf5 """
return Result.from_hdf5(filename=filename, outdir=outdir, label=label)
class Result(object):
def __init__(self, label='no_label', outdir='.', sampler=None,
search_parameter_keys=None, fixed_parameter_keys=None,
priors=None, sampler_kwargs=None, injection_parameters=None,
meta_data=None, posterior=None, samples=None,
nested_samples=None, log_evidence=np.nan,
log_evidence_err=np.nan, log_noise_evidence=np.nan,
log_bayes_factor=np.nan, log_likelihood_evaluations=None,
log_prior_evaluations=None, sampling_time=None, nburn=None,
walkers=None, max_autocorrelation_time=None,
parameter_labels=None, parameter_labels_with_unit=None,
version=None):
""" A class to store the results of the sampling run
Parameters
----------
label, outdir, sampler: str
The label, output directory, and sampler used
search_parameter_keys, fixed_parameter_keys: list
Lists of the search and fixed parameter keys. Elemenents of the
list should be of type `str` and matchs the keys of the `prior`
priors: dict, bilby.core.prior.PriorDict
A dictionary of the priors used in the run
sampler_kwargs: dict
Key word arguments passed to the sampler
injection_parameters: dict
A dictionary of the injection parameters
meta_data: dict
A dictionary of meta data to store about the run
posterior: pandas.DataFrame
A pandas data frame of the posterior
samples, nested_samples: array_like
An array of the output posterior samples and the unweighted samples
log_evidence, log_evidence_err, log_noise_evidence, log_bayes_factor: float
Natural log evidences
log_likelihood_evaluations: array_like
The evaluations of the likelihood for each sample point
log_prior_evaluations: array_like
The evaluations of the prior for each sample point
sampling_time: float
The time taken to complete the sampling
nburn: int
The number of burn-in steps discarded for MCMC samplers
walkers: array_like
The samplers taken by a ensemble MCMC samplers
max_autocorrelation_time: float
The estimated maximum autocorrelation time for MCMC samplers
parameter_labels, parameter_labels_with_unit: list
Lists of the latex-formatted parameter labels
version: str,
Version information for software used to generate the result. Note,
this information is generated when the result object is initialized
Note
---------
All sampling output parameters, e.g. the samples themselves are
typically not given at initialisation, but set at a later stage.
"""
self.label = label
self.outdir = os.path.abspath(outdir)
self.sampler = sampler
self.search_parameter_keys = search_parameter_keys
self.fixed_parameter_keys = fixed_parameter_keys
self.parameter_labels = parameter_labels
self.parameter_labels_with_unit = parameter_labels_with_unit
self.priors = priors
self.sampler_kwargs = sampler_kwargs
self.meta_data = meta_data
self.injection_parameters = injection_parameters
self.posterior = posterior
self.samples = samples
self.nested_samples = nested_samples
self.walkers = walkers
self.nburn = nburn
self.log_evidence = log_evidence
self.log_evidence_err = log_evidence_err
self.log_noise_evidence = log_noise_evidence
self.log_bayes_factor = log_bayes_factor
self.log_likelihood_evaluations = log_likelihood_evaluations
self.log_prior_evaluations = log_prior_evaluations
self.sampling_time = sampling_time
self.version = version
self.max_autocorrelation_time = max_autocorrelation_time
self.prior_values = None
self._kde = None
@classmethod
def from_hdf5(cls, filename=None, outdir=None, label=None):
""" Read in a saved .h5 data file
Parameters
----------
filename: str
If given, try to load from this filename
outdir, label: str
If given, use the default naming convention for saved results file
Returns
-------
result: bilby.core.result.Result
Raises
-------
ValueError: If no filename is given and either outdir or label is None
If no bilby.core.result.Result is found in the path
"""
if filename is None:
if (outdir is None) and (label is None):
raise ValueError("No information given to load file")
else:
filename = result_file_name(outdir, label)
if os.path.isfile(filename):
dictionary = deepdish.io.load(filename)
# Some versions of deepdish/pytables return the dictionanary as
# a dictionary with a kay 'data'
if len(dictionary) == 1 and 'data' in dictionary:
dictionary = dictionary['data']
try:
return cls(**dictionary)
except TypeError as e:
raise IOError("Unable to load dictionary, error={}".format(e))
else:
raise IOError("No result '{}' found".format(filename))
def __str__(self):
"""Print a summary """
if getattr(self, 'posterior', None) is not None:
if getattr(self, 'log_noise_evidence', None) is not None:
return ("nsamples: {:d}\n"
"log_noise_evidence: {:6.3f}\n"
"log_evidence: {:6.3f} +/- {:6.3f}\n"
"log_bayes_factor: {:6.3f} +/- {:6.3f}\n"
.format(len(self.posterior), self.log_noise_evidence, self.log_evidence,
self.log_evidence_err, self.log_bayes_factor,
self.log_evidence_err))
else:
return ("nsamples: {:d}\n"
"log_evidence: {:6.3f} +/- {:6.3f}\n"
.format(len(self.posterior), self.log_evidence, self.log_evidence_err))
else:
return ''
@property
def priors(self):
if self._priors is not None:
return self._priors
else:
raise ValueError('Result object has no priors')
@priors.setter
def priors(self, priors):
if isinstance(priors, dict):
self._priors = PriorDict(priors)
if self.parameter_labels is None:
self.parameter_labels = [self.priors[k].latex_label for k in
self.search_parameter_keys]
if self.parameter_labels_with_unit is None:
self.parameter_labels_with_unit = [
self.priors[k].latex_label_with_unit for k in
self.search_parameter_keys]
elif priors is None:
self._priors = priors
self.parameter_labels = self.search_parameter_keys
self.parameter_labels_with_unit = self.search_parameter_keys
else:
raise ValueError("Input priors not understood")
@property
def samples(self):
""" An array of samples """
if self._samples is not None:
return self._samples
else:
raise ValueError("Result object has no stored samples")
@samples.setter
def samples(self, samples):
self._samples = samples
@property
def nested_samples(self):
"""" An array of unweighted samples """
if self._nested_samples is not None:
return self._nested_samples
else:
raise ValueError("Result object has no stored nested samples")
@nested_samples.setter
def nested_samples(self, nested_samples):
self._nested_samples = nested_samples
@property
def walkers(self):
"""" An array of the ensemble walkers """
if self._walkers is not None:
return self._walkers
else:
raise ValueError("Result object has no stored walkers")
@walkers.setter
def walkers(self, walkers):
self._walkers = walkers
@property
def nburn(self):
"""" An array of the ensemble walkers """
if self._nburn is not None:
return self._nburn
else:
raise ValueError("Result object has no stored nburn")
@nburn.setter
def nburn(self, nburn):
self._nburn = nburn
@property
def posterior(self):
""" A pandas data frame of the posterior """
if self._posterior is not None:
return self._posterior
else:
raise ValueError("Result object has no stored posterior")
@posterior.setter
def posterior(self, posterior):
self._posterior = posterior
@property
def version(self):
return self._version
@version.setter
def version(self, version):
if version is None:
self._version = 'bilby={}'.format(utils.get_version_information())
else:
self._version = version
def _get_save_data_dictionary(self):
# This list defines all the parameters saved in the result object
save_attrs = [
'label', 'outdir', 'sampler', 'log_evidence', 'log_evidence_err',
'log_noise_evidence', 'log_bayes_factor', 'priors', 'posterior',
'injection_parameters', 'meta_data', 'search_parameter_keys',
'fixed_parameter_keys', 'sampling_time', 'sampler_kwargs',
'log_likelihood_evaluations', 'log_prior_evaluations', 'samples',
'nested_samples', 'walkers', 'nburn', 'parameter_labels',
'parameter_labels_with_unit', 'version']
dictionary = OrderedDict()
for attr in save_attrs:
try:
dictionary[attr] = getattr(self, attr)
except ValueError as e:
logger.debug("Unable to save {}, message: {}".format(attr, e))
pass
return dictionary
def save_to_file(self, overwrite=False, outdir=None):
"""
Writes the Result to a deepdish h5 file
Parameters
----------
overwrite: bool, optional
Whether or not to overwrite an existing result file.
default=False
outdir: str, optional
Path to the outdir. Default is the one stored in the result object.
"""
outdir = self._safe_outdir_creation(outdir, self.save_to_file)
file_name = result_file_name(outdir, self.label)
if os.path.isfile(file_name):
if overwrite:
logger.debug('Removing existing file {}'.format(file_name))
os.remove(file_name)
else:
logger.debug(
'Renaming existing file {} to {}.old'.format(file_name,
file_name))
os.rename(file_name, file_name + '.old')
logger.debug("Saving result to {}".format(file_name))
# Convert the prior to a string representation for saving on disk
dictionary = self._get_save_data_dictionary()
if dictionary.get('priors', False):
dictionary['priors'] = {key: str(self.priors[key]) for key in self.priors}
# Convert callable sampler_kwargs to strings to avoid pickling issues
if dictionary.get('sampler_kwargs', None) is not None:
for key in dictionary['sampler_kwargs']:
if hasattr(dictionary['sampler_kwargs'][key], '__call__'):
dictionary['sampler_kwargs'][key] = str(dictionary['sampler_kwargs'])
try:
deepdish.io.save(file_name, dictionary)
except Exception as e:
logger.error("\n\n Saving the data has failed with the "
"following message:\n {} \n\n".format(e))
def save_posterior_samples(self, outdir=None):
"""Saves posterior samples to a file"""
outdir = self._safe_outdir_creation(outdir, self.save_posterior_samples)
filename = '{}/{}_posterior_samples.txt'.format(outdir, self.label)
self.posterior.to_csv(filename, index=False, header=True)
def get_latex_labels_from_parameter_keys(self, keys):
""" Returns a list of latex_labels corresponding to the given keys
Parameters
----------
keys: list
List of strings corresponding to the desired latex_labels
Returns
-------
list: The desired latex_labels
"""
latex_labels = []
for k in keys:
if k in self.search_parameter_keys:
idx = self.search_parameter_keys.index(k)
latex_labels.append(self.parameter_labels_with_unit[idx])
elif k in self.parameter_labels:
latex_labels.append(k)
else:
logger.debug(
'key {} not a parameter label or latex label'.format(k))
latex_labels.append(' '.join(k.split('_')))
return latex_labels
@property
def covariance_matrix(self):
""" The covariance matrix of the samples the posterior """
samples = self.posterior[self.search_parameter_keys].values
return np.cov(samples.T)
@property
def posterior_volume(self):
""" The posterior volume """
if self.covariance_matrix.ndim == 0:
return np.sqrt(self.covariance_matrix)
else:
return 1 / np.sqrt(np.abs(np.linalg.det(
1 / self.covariance_matrix)))
@staticmethod
def prior_volume(priors):
""" The prior volume, given a set of priors """
return np.prod([priors[k].maximum - priors[k].minimum for k in priors])
def occam_factor(self, priors):
""" The Occam factor,
See Chapter 28, `Mackay "Information Theory, Inference, and Learning
Algorithms" <http://www.inference.org.uk/itprnn/book.html>`_ Cambridge
University Press (2003).
"""
return self.posterior_volume / self.prior_volume(priors)
def get_one_dimensional_median_and_error_bar(self, key, fmt='.2f',
quantiles=(0.16, 0.84)):
""" Calculate the median and error bar for a given key
Parameters
----------
key: str
The parameter key for which to calculate the median and error bar
fmt: str, ('.2f')
A format string
quantiles: list, tuple
A length-2 tuple of the lower and upper-quantiles to calculate
the errors bars for.
Returns
-------
summary: namedtuple
An object with attributes, median, lower, upper and string
"""
summary = namedtuple('summary', ['median', 'lower', 'upper', 'string'])
if len(quantiles) != 2:
raise ValueError("quantiles must be of length 2")
quants_to_compute = np.array([quantiles[0], 0.5, quantiles[1]])
quants = np.percentile(self.posterior[key], quants_to_compute * 100)
summary.median = quants[1]
summary.plus = quants[2] - summary.median
summary.minus = summary.median - quants[0]
fmt = "{{0:{0}}}".format(fmt).format
string_template = r"${{{0}}}_{{-{1}}}^{{+{2}}}$"
summary.string = string_template.format(
fmt(summary.median), fmt(summary.minus), fmt(summary.plus))
return summary
def plot_single_density(self, key, prior=None, cumulative=False,
title=None, truth=None, save=True,
file_base_name=None, bins=50, label_fontsize=16,
title_fontsize=16, quantiles=(0.16, 0.84), dpi=300):
""" Plot a 1D marginal density, either probability or cumulative.
Parameters
----------
key: str
Name of the parameter to plot
prior: {bool (True), bilby.core.prior.Prior}
If true, add the stored prior probability density function to the
one-dimensional marginal distributions. If instead a Prior
is provided, this will be plotted.
cumulative: bool
If true plot the CDF
title: bool
If true, add 1D title of the median and (by default 1-sigma)
error bars. To change the error bars, pass in the quantiles kwarg.
See method `get_one_dimensional_median_and_error_bar` for further
details). If `quantiles=None` is passed in, no title is added.
truth: {bool, float}
If true, plot self.injection_parameters[parameter].
If float, plot this value.
save: bool:
If true, save plot to disk.
file_base_name: str, optional
If given, the base file name to use (by default `outdir/label_` is
used)
bins: int
The number of histogram bins
label_fontsize, title_fontsize: int
The fontsizes for the labels and titles
quantiles: tuple
A length-2 tuple of the lower and upper-quantiles to calculate
the errors bars for.
dpi: int
Dots per inch resolution of the plot
Returns
-------
figure: matplotlib.pyplot.figure
A matplotlib figure object
"""
logger.info('Plotting {} marginal distribution'.format(key))
label = self.get_latex_labels_from_parameter_keys([key])[0]
fig, ax = plt.subplots()
try:
ax.hist(self.posterior[key].values, bins=bins, density=True,
histtype='step', cumulative=cumulative)
except ValueError as e:
logger.info(
'Failed to generate 1d plot for {}, error message: {}'
.format(key, e))
return
ax.set_xlabel(label, fontsize=label_fontsize)
if truth is not None:
ax.axvline(truth, ls='-', color='orange')
summary = self.get_one_dimensional_median_and_error_bar(
key, quantiles=quantiles)
ax.axvline(summary.median - summary.minus, ls='--', color='C0')
ax.axvline(summary.median + summary.plus, ls='--', color='C0')
if title:
ax.set_title(summary.string, fontsize=title_fontsize)
if isinstance(prior, Prior):
theta = np.linspace(ax.get_xlim()[0], ax.get_xlim()[1], 300)
ax.plot(theta, prior.prob(theta), color='C2')
if save:
fig.tight_layout()
if cumulative:
file_name = file_base_name + key + '_cdf'
else:
file_name = file_base_name + key + '_pdf'
fig.savefig(file_name, dpi=dpi)
plt.close(fig)
else:
return fig
def plot_marginals(self, parameters=None, priors=None, titles=True,
file_base_name=None, bins=50, label_fontsize=16,
title_fontsize=16, quantiles=(0.16, 0.84), dpi=300,
outdir=None):
""" Plot 1D marginal distributions
Parameters
----------
parameters: (list, dict), optional
If given, either a list of the parameter names to include, or a
dictionary of parameter names and their "true" values to plot.
priors: {bool (False), bilby.core.prior.PriorDict}
If true, add the stored prior probability density functions to the
one-dimensional marginal distributions. If instead a PriorDict
is provided, this will be plotted.
titles: bool
If true, add 1D titles of the median and (by default 1-sigma)
error bars. To change the error bars, pass in the quantiles kwarg.
See method `get_one_dimensional_median_and_error_bar` for further
details). If `quantiles=None` is passed in, no title is added.
file_base_name: str, optional
If given, the base file name to use (by default `outdir/label_` is
used)
bins: int
The number of histogram bins
label_fontsize, title_fontsize: int
The font sizes for the labels and titles
quantiles: tuple
A length-2 tuple of the lower and upper-quantiles to calculate
the errors bars for.
dpi: int
Dots per inch resolution of the plot
outdir: str, optional
Path to the outdir. Default is the one store in the result object.
Returns
-------
"""
if isinstance(parameters, dict):
plot_parameter_keys = list(parameters.keys())
truths = parameters
elif parameters is None:
plot_parameter_keys = self.posterior.keys()
if self.injection_parameters is None:
truths = dict()
else:
truths = self.injection_parameters
else:
plot_parameter_keys = list(parameters)
if self.injection_parameters is None:
truths = dict()
else:
truths = self.injection_parameters
if file_base_name is None:
outdir = self._safe_outdir_creation(outdir, self.plot_marginals)
file_base_name = '{}/{}_1d/'.format(outdir, self.label)
check_directory_exists_and_if_not_mkdir(file_base_name)
if priors is True:
priors = getattr(self, 'priors', dict())
elif isinstance(priors, dict):
pass
elif priors in [False, None]:
priors = dict()
else:
raise ValueError('Input priors={} not understood'.format(priors))
for i, key in enumerate(plot_parameter_keys):
if not isinstance(self.posterior[key].values[0], float):
continue
prior = priors.get(key, None)
truth = truths.get(key, None)
for cumulative in [False, True]:
self.plot_single_density(
key, prior=prior, cumulative=cumulative, title=titles,
truth=truth, save=True, file_base_name=file_base_name,
bins=bins, label_fontsize=label_fontsize, dpi=dpi,
title_fontsize=title_fontsize, quantiles=quantiles)
def plot_corner(self, parameters=None, priors=None, titles=True, save=True,
filename=None, dpi=300, **kwargs):
""" Plot a corner-plot
Parameters
----------
parameters: (list, dict), optional
If given, either a list of the parameter names to include, or a
dictionary of parameter names and their "true" values to plot.
priors: {bool (False), bilby.core.prior.PriorDict}
If true, add the stored prior probability density functions to the
one-dimensional marginal distributions. If instead a PriorDict
is provided, this will be plotted.
titles: bool
If true, add 1D titles of the median and (by default 1-sigma)
error bars. To change the error bars, pass in the quantiles kwarg.
See method `get_one_dimensional_median_and_error_bar` for further
details). If `quantiles=None` is passed in, no title is added.
save: bool, optional
If true, save the image using the given label and outdir
filename: str, optional
If given, overwrite the default filename
dpi: int, optional
Dots per inch resolution of the plot
**kwargs:
Other keyword arguments are passed to `corner.corner`. We set some
defaults to improve the basic look and feel, but these can all be
overridden. Also optional an 'outdir' argument which can be used
to override the outdir set by the absolute path of the result object.
Notes
-----
The generation of the corner plot themselves is done by the corner
python module, see https://corner.readthedocs.io for more
information.
Returns
-------
fig:
A matplotlib figure instance
"""
# If in testing mode, not corner plots are generated
if utils.command_line_args.test:
return
# bilby default corner kwargs. Overwritten by anything passed to kwargs
defaults_kwargs = dict(
bins=50, smooth=0.9, label_kwargs=dict(fontsize=16),
title_kwargs=dict(fontsize=16), color='#0072C1',
truth_color='tab:orange', quantiles=[0.16, 0.84],
levels=(1 - np.exp(-0.5), 1 - np.exp(-2), 1 - np.exp(-9 / 2.)),
plot_density=False, plot_datapoints=True, fill_contours=True,
max_n_ticks=3)
if LooseVersion(matplotlib.__version__) < "2.1":
defaults_kwargs['hist_kwargs'] = dict(normed=True)
else:
defaults_kwargs['hist_kwargs'] = dict(density=True)
if 'lionize' in kwargs and kwargs['lionize'] is True:
defaults_kwargs['truth_color'] = 'tab:blue'
defaults_kwargs['color'] = '#FF8C00'
defaults_kwargs.update(kwargs)
kwargs = defaults_kwargs
# Handle if truths was passed in
if 'truth' in kwargs:
kwargs['truths'] = kwargs.pop('truth')
if kwargs.get('truths'):
truths = kwargs.get('truths')
if isinstance(parameters, list) and isinstance(truths, list):
if len(parameters) != len(truths):
raise ValueError(
"Length of parameters and truths don't match")
elif isinstance(truths, dict) and parameters is None:
parameters = kwargs.pop('truths')
else:
raise ValueError(
"Combination of parameters and truths not understood")
# If injection parameters where stored, use these as parameter values
# but do not overwrite input parameters (or truths)
cond1 = getattr(self, 'injection_parameters', None) is not None
cond2 = parameters is None
if cond1 and cond2:
parameters = {key: self.injection_parameters[key] for key in
self.search_parameter_keys}
# If parameters is a dictionary, use the keys to determine which
# parameters to plot and the values as truths.
if isinstance(parameters, dict):
plot_parameter_keys = list(parameters.keys())
kwargs['truths'] = list(parameters.values())
elif parameters is None:
plot_parameter_keys = self.search_parameter_keys
else:
plot_parameter_keys = list(parameters)
# Get latex formatted strings for the plot labels
kwargs['labels'] = kwargs.get(
'labels', self.get_latex_labels_from_parameter_keys(
plot_parameter_keys))
# Unless already set, set the range to include all samples
# This prevents ValueErrors being raised for parameters with no range
kwargs['range'] = kwargs.get('range', [1] * len(plot_parameter_keys))
# Create the data array to plot and pass everything to corner
xs = self.posterior[plot_parameter_keys].values
fig = corner.corner(xs, **kwargs)
axes = fig.get_axes()
# Add the titles
if titles and kwargs.get('quantiles', None) is not None:
for i, par in enumerate(plot_parameter_keys):
ax = axes[i + i * len(plot_parameter_keys)]
if ax.title.get_text() == '':
ax.set_title(self.get_one_dimensional_median_and_error_bar(
par, quantiles=kwargs['quantiles']).string,
**kwargs['title_kwargs'])
# Add priors to the 1D plots
if priors is True:
priors = getattr(self, 'priors', False)
if isinstance(priors, dict):
for i, par in enumerate(plot_parameter_keys):
ax = axes[i + i * len(plot_parameter_keys)]
theta = np.linspace(ax.get_xlim()[0], ax.get_xlim()[1], 300)
ax.plot(theta, priors[par].prob(theta), color='C2')
elif priors in [False, None]:
pass
else:
raise ValueError('Input priors={} not understood'.format(priors))
if save:
if filename is None:
outdir = self._safe_outdir_creation(kwargs.get('outdir'), self.plot_corner)
filename = '{}/{}_corner.png'.format(outdir, self.label)
logger.debug('Saving corner plot to {}'.format(filename))
fig.savefig(filename, dpi=dpi)
plt.close(fig)
return fig
def plot_walkers(self, **kwargs):
""" Method to plot the trace of the walkers in an ensemble MCMC plot """
if hasattr(self, 'walkers') is False:
logger.warning("Cannot plot_walkers as no walkers are saved")
return
if utils.command_line_args.test:
return
nwalkers, nsteps, ndim = self.walkers.shape
idxs = np.arange(nsteps)
fig, axes = plt.subplots(nrows=ndim, figsize=(6, 3 * ndim))
walkers = self.walkers[:, :, :]
for i, ax in enumerate(axes):
ax.plot(idxs[:self.nburn + 1], walkers[:, :self.nburn + 1, i].T,
lw=0.1, color='r')
ax.set_ylabel(self.parameter_labels[i])
for i, ax in enumerate(axes):
ax.plot(idxs[self.nburn:], walkers[:, self.nburn:, i].T, lw=0.1,
color='k')
ax.set_ylabel(self.parameter_labels[i])
fig.tight_layout()
outdir = self._safe_outdir_creation(kwargs.get('outdir'), self.plot_walkers)
filename = '{}/{}_walkers.png'.format(outdir, self.label)
logger.debug('Saving walkers plot to {}'.format('filename'))
fig.savefig(filename)
plt.close(fig)
def plot_with_data(self, model, x, y, ndraws=1000, npoints=1000,
xlabel=None, ylabel=None, data_label='data',
data_fmt='o', draws_label=None, filename=None,
maxl_label='max likelihood', dpi=300, outdir=None):
""" Generate a figure showing the data and fits to the data
Parameters
----------
model: function
A python function which when called as `model(x, **kwargs)` returns
the model prediction (here `kwargs` is a dictionary of key-value
pairs of the model parameters.
x, y: np.ndarray
The independent and dependent data to plot
ndraws: int
Number of draws from the posterior to plot
npoints: int
Number of points used to plot the smoothed fit to the data
xlabel, ylabel: str
Labels for the axes
data_label, draws_label, maxl_label: str
Label for the data, draws, and max likelihood legend
data_fmt: str
Matpltolib fmt code, defaults to `'-o'`
dpi: int
Passed to `plt.savefig`
filename: str
If given, the filename to use. Otherwise, the filename is generated
from the outdir and label attributes.
outdir: str, optional
Path to the outdir. Default is the one store in the result object.
"""
# Determine model_posterior, the subset of the full posterior which
# should be passed into the model
model_keys = infer_parameters_from_function(model)
model_posterior = self.posterior[model_keys]
xsmooth = np.linspace(np.min(x), np.max(x), npoints)
fig, ax = plt.subplots()
logger.info('Plotting {} draws'.format(ndraws))
for _ in range(ndraws):
s = model_posterior.sample().to_dict('records')[0]
ax.plot(xsmooth, model(xsmooth, **s), alpha=0.25, lw=0.1, color='r',
label=draws_label)
try:
if all(~np.isnan(self.posterior.log_likelihood)):
logger.info('Plotting maximum likelihood')
s = model_posterior.iloc[self.posterior.log_likelihood.idxmax()]
ax.plot(xsmooth, model(xsmooth, **s), lw=1, color='k',
label=maxl_label)
except (AttributeError, TypeError):
logger.debug(
"No log likelihood values stored, unable to plot max")
ax.plot(x, y, data_fmt, markersize=2, label=data_label)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
handles, labels = plt.gca().get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys())
ax.legend(numpoints=3)
fig.tight_layout()
if filename is None:
outdir = self._safe_outdir_creation(outdir, self.plot_with_data)
filename = '{}/{}_plot_with_data'.format(outdir, self.label)
fig.savefig(filename, dpi=dpi)
plt.close(fig)
def samples_to_posterior(self, likelihood=None, priors=None,
conversion_function=None):
"""
Convert array of samples to posterior (a Pandas data frame)
Also applies the conversion function to any stored posterior
Parameters
----------
likelihood: bilby.likelihood.GravitationalWaveTransient, optional
GravitationalWaveTransient likelihood used for sampling.
priors: dict, optional
Dictionary of prior object, used to fill in delta function priors.
conversion_function: function, optional
Function which adds in extra parameters to the data frame,
should take the data_frame, likelihood and prior as arguments.
"""
try:
data_frame = self.posterior
except ValueError:
data_frame = pd.DataFrame(
self.samples, columns=self.search_parameter_keys)
for key in priors:
if isinstance(priors[key], DeltaFunction):
data_frame[key] = priors[key].peak
elif isinstance(priors[key], float):
data_frame[key] = priors[key]
data_frame['log_likelihood'] = getattr(
self, 'log_likelihood_evaluations', np.nan)
if self.log_prior_evaluations is None:
data_frame['log_prior'] = self.priors.ln_prob(
data_frame[self.search_parameter_keys], axis=0)
else:
data_frame['log_prior'] = self.log_prior_evaluations
if conversion_function is not None:
data_frame = conversion_function(data_frame, likelihood, priors)
self.posterior = data_frame
def calculate_prior_values(self, priors):
"""
Evaluate prior probability for each parameter for each sample.
Parameters
----------
priors: dict, PriorDict
Prior distributions
"""
self.prior_values = pd.DataFrame()
for key in priors:
if key in self.posterior.keys():
if isinstance(priors[key], DeltaFunction):
continue
else:
self.prior_values[key]\
= priors[key].prob(self.posterior[key].values)
def get_all_injection_credible_levels(self):
"""
Get credible levels for all parameters in self.injection_parameters
Returns
-------
credible_levels: dict
The credible levels at which the injected parameters are found.
"""
if self.injection_parameters is None:
raise(TypeError, "Result object has no 'injection_parameters'. "
"Cannot copmute credible levels.")
credible_levels = {key: self.get_injection_credible_level(key)
for key in self.search_parameter_keys
if isinstance(self.injection_parameters[key], float)}
return credible_levels
def get_injection_credible_level(self, parameter):
"""
Get the credible level of the injected parameter
Calculated as CDF(injection value)
Parameters
----------
parameter: str
Parameter to get credible level for
Returns
-------
float: credible level
"""
if self.injection_parameters is None:
raise(TypeError, "Result object has no 'injection_parameters'. "
"Cannot copmute credible levels.")
if parameter in self.posterior and\
parameter in self.injection_parameters:
credible_level =\
sum(self.posterior[parameter].values <
self.injection_parameters[parameter]) / len(self.posterior)
return credible_level
else:
return np.nan
def _check_attribute_match_to_other_object(self, name, other_object):
""" Check attribute name exists in other_object and is the same
Parameters
----------
name: str
Name of the attribute in this instance
other_object: object
Other object with attributes to compare with
Returns
-------
bool: True if attribute name matches with an attribute of other_object, False otherwise
"""
a = getattr(self, name, False)
b = getattr(other_object, name, False)
logger.debug('Checking {} value: {}=={}'.format(name, a, b))
if (a is not False) and (b is not False):
type_a = type(a)
type_b = type(b)
if type_a == type_b:
if type_a in [str, float, int, dict, list]:
try:
return a == b
except ValueError:
return False
elif type_a in [np.ndarray]:
return np.all(a == b)
return False
@property
def kde(self):
""" Kernel density estimate built from the stored posterior
Uses `scipy.stats.gaussian_kde` to generate the kernel density
"""
if self._kde:
return self._kde
else:
self._kde = scipy.stats.gaussian_kde(
self.posterior[self.search_parameter_keys].values.T)
return self._kde
def posterior_probability(self, sample):
""" Calculate the posterior probabily for a new sample
This queries a Kernel Density Estimate of the posterior to calculate
the posterior probability density for the new sample.
Parameters
----------
sample: dict, or list of dictionaries
A dictionary containing all the keys from
self.search_parameter_keys and corresponding values at which to
calculate the posterior probability
Returns
-------
p: array-like,
The posterior probability of the sample
"""
if isinstance(sample, dict):
sample = [sample]
ordered_sample = [[s[key] for key in self.search_parameter_keys]
for s in sample]
return self.kde(ordered_sample)
def _safe_outdir_creation(self, outdir=None, caller_func=None):
if outdir is None:
outdir = self.outdir
try:
utils.check_directory_exists_and_if_not_mkdir(outdir)
except PermissionError:
raise FileMovedError("Can not write in the out directory.\n"
"Did you move the here file from another system?\n"
"Try calling " + caller_func.__name__ + " with the 'outdir' "
"keyword argument, e.g. " + caller_func.__name__ + "(outdir='.')")
return outdir
def plot_multiple(results, filename=None, labels=None, colours=None,
save=True, evidences=False, **kwargs):
""" Generate a corner plot overlaying two sets of results
Parameters
----------
results: list
A list of `bilby.core.result.Result` objects containing the samples to
plot.
filename: str
File name to save the figure to. If None (default), a filename is
constructed from the outdir of the first element of results and then
the labels for all the result files.
labels: list
List of strings to use when generating a legend. If None (default), the
`label` attribute of each result in `results` is used.
colours: list
The colours for each result. If None, default styles are applied.
save: bool
If true, save the figure
kwargs: dict
All other keyword arguments are passed to `result.plot_corner`.
However, `show_titles` and `truths` are ignored since they would be
ambiguous on such a plot.
evidences: bool, optional
Add the log-evidence calculations to the legend. If available, the
Bayes factor will be used instead.
Returns
-------
fig:
A matplotlib figure instance
"""
kwargs['show_titles'] = False
kwargs['truths'] = None
fig = results[0].plot_corner(save=False, **kwargs)
default_filename = '{}/{}'.format(results[0].outdir, 'combined')
lines = []
default_labels = []
for i, result in enumerate(results):
if colours:
c = colours[i]
else:
c = 'C{}'.format(i)
hist_kwargs = kwargs.get('hist_kwargs', dict())
hist_kwargs['color'] = c
fig = result.plot_corner(fig=fig, save=False, color=c, **kwargs)
default_filename += '_{}'.format(result.label)
lines.append(mpllines.Line2D([0], [0], color=c))
default_labels.append(result.label)
# Rescale the axes
for i, ax in enumerate(fig.axes):
ax.autoscale()
plt.draw()
if labels is None:
labels = default_labels
if evidences:
if np.isnan(results[0].log_bayes_factor):
template = ' $\mathrm{{ln}}(Z)={lnz:1.3g}$'
else:
template = ' $\mathrm{{ln}}(B)={lnbf:1.3g}$'
labels = [template.format(lnz=result.log_evidence,
lnbf=result.log_bayes_factor)
for ii, result in enumerate(results)]
axes = fig.get_axes()
ndim = int(np.sqrt(len(axes)))
axes[ndim - 1].legend(lines, labels)
if filename is None:
filename = default_filename
if save:
fig.savefig(filename)
return fig
def make_pp_plot(results, filename=None, save=True, **kwargs):
"""
Make a P-P plot for a set of runs with injected signals.
Parameters
----------
results: list
A list of Result objects, each of these should have injected_parameters
filename: str, optional
The name of the file to save, the default is "outdir/pp.png"
save: bool, optional
Whether to save the file, default=True
kwargs:
Additional kwargs to pass to matplotlib.pyplot.plot
Returns
-------
fig:
matplotlib figure
"""
fig = plt.figure()
credible_levels = pd.DataFrame()
for result in results:
credible_levels = credible_levels.append(
result.get_all_injection_credible_levels(), ignore_index=True)
n_parameters = len(credible_levels.keys())
x_values = np.linspace(0, 1, 101)
for key in credible_levels:
plt.plot(x_values, [sum(credible_levels[key].values < xx) /
len(credible_levels) for xx in x_values],
color='k', alpha=min([1, 4 / n_parameters]), **kwargs)
plt.plot([0, 1], [0, 1], linestyle='--', color='r')
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.tight_layout()
if save:
if filename is None:
filename = 'outdir/pp.png'
plt.savefig(filename)
return fig
class ResultError(Exception):
""" Base exception for all Result related errors """
class FileMovedError(ResultError):
""" Exceptions that occur when files have been moved """
| {"/bilby/gw/result.py": ["/bilby/core/result.py"], "/bilby/core/result.py": ["/bilby/core/__init__.py", "/bilby/core/prior.py"], "/bilby/gw/likelihood.py": ["/bilby/core/__init__.py", "/bilby/core/prior.py", "/bilby/gw/detector.py", "/bilby/gw/source.py"], "/bilby/gw/source.py": ["/bilby/core/__init__.py"], "/bilby/gw/__init__.py": ["/bilby/gw/likelihood.py"], "/bilby/gw/detector.py": ["/bilby/gw/__init__.py", "/bilby/core/__init__.py"]} |
45,653 | juancalderonbustillo/bilby | refs/heads/master | /bilby/gw/likelihood.py | from __future__ import division
import numpy as np
from scipy.interpolate import interp1d
try:
from scipy.special import logsumexp
except ImportError:
from scipy.misc import logsumexp
from scipy.special import i0e
from ..core import likelihood
from ..core.utils import logger, UnsortedInterp2d
from ..core.prior import Prior, Uniform
from .detector import InterferometerList
from .prior import BBHPriorDict
from .source import lal_binary_black_hole
from .utils import noise_weighted_inner_product, build_roq_weights, blockwise_dot_product
from .waveform_generator import WaveformGenerator
from math import ceil
class GravitationalWaveTransient(likelihood.Likelihood):
""" A gravitational-wave transient likelihood object
This is the usual likelihood object to use for transient gravitational
wave parameter estimation. It computes the log-likelihood in the frequency
domain assuming a colored Gaussian noise model described by a power
spectral density
Parameters
----------
interferometers: list, bilby.gw.detector.InterferometerList
A list of `bilby.detector.Interferometer` instances - contains the
detector data and power spectral densities
waveform_generator: `bilby.waveform_generator.WaveformGenerator`
An object which computes the frequency-domain strain of the signal,
given some set of parameters
distance_marginalization: bool, optional
If true, marginalize over distance in the likelihood.
This uses a look up table calculated at run time.
time_marginalization: bool, optional
If true, marginalize over time in the likelihood.
This uses a FFT.
phase_marginalization: bool, optional
If true, marginalize over phase in the likelihood.
This is done analytically using a Bessel function.
priors: dict, optional
If given, used in the distance and phase marginalization.
Returns
-------
Likelihood: `bilby.core.likelihood.Likelihood`
A likelihood object, able to compute the likelihood of the data given
some model parameters
"""
def __init__(self, interferometers, waveform_generator, time_marginalization=False, distance_marginalization=False,
phase_marginalization=False, priors=None):
self.waveform_generator = waveform_generator
likelihood.Likelihood.__init__(self, dict())
self.interferometers = InterferometerList(interferometers)
self.time_marginalization = time_marginalization
self.distance_marginalization = distance_marginalization
self.phase_marginalization = phase_marginalization
self.priors = priors
self._check_set_duration_and_sampling_frequency_of_waveform_generator()
self.meta_data = dict(
interferometers=self.interferometers.meta_data,
time_marginalization=self.time_marginalization,
phase_marginalization=self.phase_marginalization,
distance_marginalization=self.distance_marginalization,
waveform_arguments=waveform_generator.waveform_arguments,
frequency_domain_source_model=str(waveform_generator.frequency_domain_source_model))
if self.time_marginalization:
self._check_prior_is_set(key='geocent_time')
self._setup_time_marginalization()
priors['geocent_time'] = float(self.interferometers.start_time)
if self.phase_marginalization:
self._check_prior_is_set(key='phase')
self._bessel_function_interped = None
self._setup_phase_marginalization()
priors['phase'] = float(0)
if self.distance_marginalization:
self._check_prior_is_set(key='luminosity_distance')
self._distance_array = np.linspace(self.priors['luminosity_distance'].minimum,
self.priors['luminosity_distance'].maximum, int(1e4))
self._setup_distance_marginalization()
priors['luminosity_distance'] = float(self._ref_dist)
def __repr__(self):
return self.__class__.__name__ + '(interferometers={},\n\twaveform_generator={},\n\ttime_marginalization={}, ' \
'distance_marginalization={}, phase_marginalization={}, priors={})'\
.format(self.interferometers, self.waveform_generator, self.time_marginalization,
self.distance_marginalization, self.phase_marginalization, self.priors)
def _check_set_duration_and_sampling_frequency_of_waveform_generator(self):
""" Check the waveform_generator has the same duration and
sampling_frequency as the interferometers. If they are unset, then
set them, if they differ, raise an error
"""
attributes = ['duration', 'sampling_frequency', 'start_time']
for attr in attributes:
wfg_attr = getattr(self.waveform_generator, attr)
ifo_attr = getattr(self.interferometers, attr)
if wfg_attr is None:
logger.debug(
"The waveform_generator {} is None. Setting from the "
"provided interferometers.".format(attr))
elif wfg_attr != ifo_attr:
logger.warning(
"The waveform_generator {} is not equal to that of the "
"provided interferometers. Overwriting the "
"waveform_generator.".format(attr))
setattr(self.waveform_generator, attr, ifo_attr)
def _check_prior_is_set(self, key):
if key not in self.priors or not isinstance(
self.priors[key], Prior):
logger.warning(
'Prior not provided for {}, using the BBH default.'.format(key))
if key == 'geocent_time':
self.priors[key] = Uniform(
self.interferometers.start_time,
self.interferometers.start_time + self.interferometers.duration)
else:
self.priors[key] = BBHPriorDict()[key]
@property
def priors(self):
return self.__prior
@priors.setter
def priors(self, priors):
if priors is not None:
self.__prior = priors.copy()
elif any([self.time_marginalization, self.phase_marginalization,
self.distance_marginalization]):
raise ValueError("You can't use a marginalized likelihood without specifying a priors")
else:
self.__prior = None
def noise_log_likelihood(self):
log_l = 0
for interferometer in self.interferometers:
log_l -= noise_weighted_inner_product(
interferometer.frequency_domain_strain,
interferometer.frequency_domain_strain,
interferometer.power_spectral_density_array,
self.waveform_generator.duration) / 2
return log_l.real
def log_likelihood_ratio(self):
waveform_polarizations =\
self.waveform_generator.frequency_domain_strain(self.parameters)
if waveform_polarizations is None:
return np.nan_to_num(-np.inf)
d_inner_h = 0
optimal_snr_squared = 0
d_inner_h_squared_tc_array = np.zeros(
self.interferometers.frequency_array[0:-1].shape,
dtype=np.complex128)
for interferometer in self.interferometers:
signal_ifo = interferometer.get_detector_response(
waveform_polarizations, self.parameters)
d_inner_h += interferometer.inner_product(signal=signal_ifo)
optimal_snr_squared += interferometer.optimal_snr_squared(signal=signal_ifo)
if self.time_marginalization:
d_inner_h_squared_tc_array +=\
4 / self.waveform_generator.duration * np.fft.fft(
signal_ifo[0:-1] *
interferometer.frequency_domain_strain.conjugate()[0:-1] /
interferometer.power_spectral_density_array[0:-1])
if self.time_marginalization:
if self.distance_marginalization:
rho_mf_ref_tc_array, rho_opt_ref = self._setup_rho(
d_inner_h_squared_tc_array, optimal_snr_squared)
if self.phase_marginalization:
dist_marged_log_l_tc_array = self._interp_dist_margd_loglikelihood(
abs(rho_mf_ref_tc_array), rho_opt_ref)
else:
dist_marged_log_l_tc_array = self._interp_dist_margd_loglikelihood(
rho_mf_ref_tc_array.real, rho_opt_ref)
log_l = logsumexp(dist_marged_log_l_tc_array,
b=self.time_prior_array)
elif self.phase_marginalization:
log_l = logsumexp(self._bessel_function_interped(abs(
d_inner_h_squared_tc_array)),
b=self.time_prior_array) - optimal_snr_squared / 2
else:
log_l = logsumexp(
d_inner_h_squared_tc_array.real,
b=self.time_prior_array) - optimal_snr_squared / 2
elif self.distance_marginalization:
rho_mf_ref, rho_opt_ref = self._setup_rho(d_inner_h, optimal_snr_squared)
if self.phase_marginalization:
rho_mf_ref = abs(rho_mf_ref)
log_l = self._interp_dist_margd_loglikelihood(rho_mf_ref.real, rho_opt_ref)[0]
elif self.phase_marginalization:
d_inner_h = self._bessel_function_interped(abs(d_inner_h))
log_l = d_inner_h - optimal_snr_squared / 2
else:
log_l = d_inner_h.real - optimal_snr_squared / 2
return log_l.real
def _setup_rho(self, d_inner_h, optimal_snr_squared):
rho_opt_ref = (optimal_snr_squared.real *
self.parameters['luminosity_distance'] ** 2 /
self._ref_dist ** 2.)
rho_mf_ref = (d_inner_h * self.parameters['luminosity_distance'] /
self._ref_dist)
return rho_mf_ref, rho_opt_ref
def log_likelihood(self):
return self.log_likelihood_ratio() + self.noise_log_likelihood()
@property
def _delta_distance(self):
return self._distance_array[1] - self._distance_array[0]
@property
def _ref_dist(self):
""" Smallest distance contained in priors """
return self._distance_array[0]
@property
def _rho_opt_ref_array(self):
""" Optimal filter snr at fiducial distance of ref_dist Mpc """
return np.logspace(-5, 10, self._dist_margd_loglikelihood_array.shape[0])
@property
def _rho_mf_ref_array(self):
""" Matched filter snr at fiducial distance of ref_dist Mpc """
if self.phase_marginalization:
return np.logspace(-5, 10, self._dist_margd_loglikelihood_array.shape[1])
else:
return np.hstack((-np.logspace(3, -3, self._dist_margd_loglikelihood_array.shape[1] / 2),
np.logspace(-3, 10, self._dist_margd_loglikelihood_array.shape[1] / 2)))
def _setup_distance_marginalization(self):
self._create_lookup_table()
self._interp_dist_margd_loglikelihood = UnsortedInterp2d(
self._rho_mf_ref_array, self._rho_opt_ref_array,
self._dist_margd_loglikelihood_array)
def _create_lookup_table(self):
""" Make the lookup table """
self.distance_prior_array = np.array([self.priors['luminosity_distance'].prob(distance)
for distance in self._distance_array])
logger.info('Building lookup table for distance marginalisation.')
self._dist_margd_loglikelihood_array = np.zeros((400, 800))
for ii, rho_opt_ref in enumerate(self._rho_opt_ref_array):
for jj, rho_mf_ref in enumerate(self._rho_mf_ref_array):
optimal_snr_squared_array = rho_opt_ref * self._ref_dist ** 2. / self._distance_array ** 2
d_inner_h_array = rho_mf_ref * self._ref_dist / self._distance_array
if self.phase_marginalization:
d_inner_h_array =\
self._bessel_function_interped(abs(d_inner_h_array))
self._dist_margd_loglikelihood_array[ii][jj] = \
logsumexp(d_inner_h_array - optimal_snr_squared_array / 2,
b=self.distance_prior_array * self._delta_distance)
log_norm = logsumexp(0. / self._distance_array - 0. / self._distance_array ** 2.,
b=self.distance_prior_array * self._delta_distance)
self._dist_margd_loglikelihood_array -= log_norm
def _setup_phase_marginalization(self):
self._bessel_function_interped = interp1d(
np.logspace(-5, 10, int(1e6)), np.logspace(-5, 10, int(1e6)) +
np.log([i0e(snr) for snr in np.logspace(-5, 10, int(1e6))]),
bounds_error=False, fill_value=(0, np.nan))
def _setup_time_marginalization(self):
delta_tc = 2 / self.waveform_generator.sampling_frequency
times =\
self.interferometers.start_time + np.linspace(
0, self.interferometers.duration,
int(self.interferometers.duration / 2 *
self.waveform_generator.sampling_frequency + 1))[1:]
self.time_prior_array =\
self.priors['geocent_time'].prob(times) * delta_tc
class BasicGravitationalWaveTransient(likelihood.Likelihood):
def __init__(self, interferometers, waveform_generator):
"""
A likelihood object, able to compute the likelihood of the data given
some model parameters
The simplest frequency-domain gravitational wave transient likelihood. Does
not include distance/phase marginalization.
Parameters
----------
interferometers: list
A list of `bilby.gw.detector.Interferometer` instances - contains the
detector data and power spectral densities
waveform_generator: bilby.gw.waveform_generator.WaveformGenerator
An object which computes the frequency-domain strain of the signal,
given some set of parameters
"""
likelihood.Likelihood.__init__(self, dict())
self.interferometers = interferometers
self.waveform_generator = waveform_generator
def __repr__(self):
return self.__class__.__name__ + '(interferometers={},\n\twaveform_generator={})'\
.format(self.interferometers, self.waveform_generator)
def noise_log_likelihood(self):
""" Calculates the real part of noise log-likelihood
Returns
-------
float: The real part of the noise log likelihood
"""
log_l = 0
for interferometer in self.interferometers:
log_l -= 2. / self.waveform_generator.duration * np.sum(
abs(interferometer.frequency_domain_strain) ** 2 /
interferometer.power_spectral_density_array)
return log_l.real
def log_likelihood(self):
""" Calculates the real part of log-likelihood value
Returns
-------
float: The real part of the log likelihood
"""
log_l = 0
waveform_polarizations =\
self.waveform_generator.frequency_domain_strain(
self.parameters.copy())
if waveform_polarizations is None:
return np.nan_to_num(-np.inf)
for interferometer in self.interferometers:
log_l += self.log_likelihood_interferometer(
waveform_polarizations, interferometer)
return log_l.real
def log_likelihood_interferometer(self, waveform_polarizations,
interferometer):
"""
Parameters
----------
waveform_polarizations: dict
Dictionary containing the desired waveform polarization modes and the related strain
interferometer: bilby.gw.detector.Interferometer
The Interferometer object we want to have the log-likelihood for
Returns
-------
float: The real part of the log-likelihood for this interferometer
"""
signal_ifo = interferometer.get_detector_response(
waveform_polarizations, self.parameters)
log_l = - 2. / self.waveform_generator.duration * np.vdot(
interferometer.frequency_domain_strain - signal_ifo,
(interferometer.frequency_domain_strain - signal_ifo) /
interferometer.power_spectral_density_array)
return log_l.real
class ROQGravitationalWaveTransient(GravitationalWaveTransient):
"""A reduced order quadrature likelihood object
This uses the method described in Smith et al., (2016) Phys. Rev. D 94,
044031. A public repository of the ROQ data is available from
https://git.ligo.org/lscsoft/ROQ_data.
Parameters
----------
interferometers: list, bilby.gw.detector.InterferometerList
A list of `bilby.detector.Interferometer` instances - contains the
detector data and power spectral densities
waveform_generator: `bilby.waveform_generator.WaveformGenerator`
An object which computes the frequency-domain strain of the signal,
given some set of parameters
linear_matrix: str, array
Either a string point to the file from which to load the linear_matrix
array, or the array itself.
quadratic_matrix: str, array
Either a string point to the file from which to load the quadratic_matrix
array, or the array itself.
priors: dict, bilby.prior.PriorDict
A dictionary of priors containing at least the geocent_time prior
"""
def __init__(self, interferometers, waveform_generator,
linear_matrix, quadratic_matrix, priors):
GravitationalWaveTransient.__init__(
self, interferometers=interferometers,
waveform_generator=waveform_generator, priors=priors)
if isinstance(linear_matrix, str):
logger.info("Loading linear matrix from {}".format(linear_matrix))
linear_matrix = np.load(linear_matrix).T
if isinstance(quadratic_matrix, str):
logger.info("Loading quadratic_matrix from {}".format(quadratic_matrix))
quadratic_matrix = np.load(quadratic_matrix).T
self.linear_matrix = linear_matrix
self.quadratic_matrix = quadratic_matrix
self.time_samples = None
self.weights = dict()
self._set_weights()
self.frequency_nodes_linear =\
waveform_generator.waveform_arguments['frequency_nodes_linear']
def log_likelihood_ratio(self):
optimal_snr_squared = 0.
matched_filter_snr_squared = 0.
indices, in_bounds = self._closest_time_indices(
self.parameters['geocent_time'] - self.interferometers.start_time)
if not in_bounds:
return np.nan_to_num(-np.inf)
waveform = self.waveform_generator.frequency_domain_strain(
self.parameters)
if waveform is None:
return np.nan_to_num(-np.inf)
for ifo in self.interferometers:
f_plus = ifo.antenna_response(
self.parameters['ra'], self.parameters['dec'],
self.parameters['geocent_time'], self.parameters['psi'], 'plus')
f_cross = ifo.antenna_response(
self.parameters['ra'], self.parameters['dec'],
self.parameters['geocent_time'], self.parameters['psi'], 'cross')
dt = ifo.time_delay_from_geocenter(
self.parameters['ra'], self.parameters['dec'],
ifo.strain_data.start_time)
ifo_time = self.parameters['geocent_time'] + dt - \
ifo.strain_data.start_time
h_plus_linear = f_plus * waveform['linear']['plus']
h_cross_linear = f_cross * waveform['linear']['cross']
h_plus_quadratic = f_plus * waveform['quadratic']['plus']
h_cross_quadratic = f_cross * waveform['quadratic']['cross']
indices, in_bounds = self._closest_time_indices(ifo_time)
if not in_bounds:
return np.nan_to_num(-np.inf)
matched_filter_snr_squared_array = np.einsum(
'i,ji->j', np.conjugate(h_plus_linear + h_cross_linear),
self.weights[ifo.name + '_linear'][indices])
matched_filter_snr_squared += interp1d(
self.time_samples[indices],
matched_filter_snr_squared_array, kind='quadratic')(ifo_time)
optimal_snr_squared += \
np.vdot(np.abs(h_plus_quadratic + h_cross_quadratic)**2,
self.weights[ifo.name + '_quadratic'])
log_l = matched_filter_snr_squared - optimal_snr_squared / 2
return log_l.real
def _closest_time_indices(self, time):
"""
Get the closest an two neighbouring times
Parameters
----------
time: float
Time to check
Returns
-------
indices: list
Indices nearest to time.
in_bounds: bool
Whether the indices are for valid times.
"""
closest = np.argmin(np.absolute(self.time_samples - time))
indices = [closest + ii for ii in [-1, 0, 1]]
in_bounds = (indices[0] >= 0) & (indices[2] < self.time_samples.size)
return indices, in_bounds
def _set_weights(self):
"""
Setup the time-dependent ROQ weights.
This follows FIXME: Smith et al.
The times are chosen to allow all the merger times allows in the time
prior.
"""
for ifo in self.interferometers:
# only get frequency components up to maximum_frequency
self.linear_matrix = \
self.linear_matrix[:, :sum(ifo.frequency_mask)]
self.quadratic_matrix = \
self.quadratic_matrix[:, :sum(ifo.frequency_mask)]
# array of relative time shifts to be applied to the data
# 0.045s comes from time for GW to traverse the Earth
self.time_samples = np.linspace(
self.priors['geocent_time'].minimum - 0.045,
self.priors['geocent_time'].maximum + 0.045,
int(ceil((self.priors['geocent_time'].maximum -
self.priors['geocent_time'].minimum + 0.09) *
ifo.strain_data.sampling_frequency)))
self.time_samples -= ifo.strain_data.start_time
time_space = self.time_samples[1] - self.time_samples[0]
# array to be filled with data, shifted by discrete time_samples
tc_shifted_data = np.zeros([
len(self.time_samples),
len(ifo.frequency_array[ifo.frequency_mask])], dtype=complex)
# shift data to beginning of the prior
# increment by the time step
shifted_data =\
ifo.frequency_domain_strain[ifo.frequency_mask] * \
np.exp(2j * np.pi * ifo.frequency_array[ifo.frequency_mask] *
self.time_samples[0])
single_time_shift = np.exp(
2j * np.pi * ifo.frequency_array[ifo.frequency_mask] *
time_space)
for j in range(len(self.time_samples)):
tc_shifted_data[j] = shifted_data
shifted_data *= single_time_shift
# to not kill all computers this minimises the memory usage of the
# required inner products
max_block_gigabytes = 4
max_elements = int((max_block_gigabytes * 2 ** 30) / 8)
self.weights[ifo.name + '_linear'] = blockwise_dot_product(
tc_shifted_data /
ifo.power_spectral_density_array[ifo.frequency_mask],
self.linear_matrix, max_elements) * 4 / ifo.strain_data.duration
del tc_shifted_data
self.weights[ifo.name + '_quadratic'] = build_roq_weights(
1 / ifo.power_spectral_density_array[ifo.frequency_mask],
self.quadratic_matrix.real, 1 / ifo.strain_data.duration)
def get_binary_black_hole_likelihood(interferometers):
""" A rapper to quickly set up a likelihood for BBH parameter estimation
Parameters
----------
interferometers: {bilby.gw.detector.InterferometerList, list}
A list of `bilby.detector.Interferometer` instances, typically the
output of either `bilby.detector.get_interferometer_with_open_data`
or `bilby.detector.get_interferometer_with_fake_noise_and_injection`
Returns
-------
bilby.GravitationalWaveTransient: The likelihood to pass to `run_sampler`
"""
waveform_generator = WaveformGenerator(
duration=interferometers.duration,
sampling_frequency=interferometers.sampling_frequency,
frequency_domain_source_model=lal_binary_black_hole,
waveform_arguments={'waveform_approximant': 'IMRPhenomPv2',
'reference_frequency': 50})
return GravitationalWaveTransient(interferometers, waveform_generator)
| {"/bilby/gw/result.py": ["/bilby/core/result.py"], "/bilby/core/result.py": ["/bilby/core/__init__.py", "/bilby/core/prior.py"], "/bilby/gw/likelihood.py": ["/bilby/core/__init__.py", "/bilby/core/prior.py", "/bilby/gw/detector.py", "/bilby/gw/source.py"], "/bilby/gw/source.py": ["/bilby/core/__init__.py"], "/bilby/gw/__init__.py": ["/bilby/gw/likelihood.py"], "/bilby/gw/detector.py": ["/bilby/gw/__init__.py", "/bilby/core/__init__.py"]} |
45,654 | juancalderonbustillo/bilby | refs/heads/master | /bilby/gw/source.py | from __future__ import division, print_function
import numpy as np
from ..core import utils
from ..core.utils import logger
from .utils import (lalsim_SimInspiralTransformPrecessingNewInitialConditions,
lalsim_GetApproximantFromString,
lalsim_SimInspiralChooseFDWaveform,
lalsim_SimInspiralWaveformParamsInsertTidalLambda1,
lalsim_SimInspiralWaveformParamsInsertTidalLambda2,
lalsim_SimIMRPhenomPCalculateModelParametersFromSourceFrame,
lalsim_SimIMRPhenomPFrequencySequence)
try:
import lal
import lalsimulation as lalsim
except ImportError:
logger.warning("You do not have lalsuite installed currently. You will"
" not be able to use some of the prebuilt functions.")
def lal_binary_black_hole(
frequency_array, mass_1, mass_2, luminosity_distance, a_1, tilt_1, phi_12, a_2, tilt_2, phi_jl,
iota, phase, **kwargs):
""" A Binary Black Hole waveform model using lalsimulation
Parameters
----------
frequency_array: array_like
The frequencies at which we want to calculate the strain
mass_1: float
The mass of the heavier object in solar masses
mass_2: float
The mass of the lighter object in solar masses
luminosity_distance: float
The luminosity distance in megaparsec
a_1: float
Dimensionless primary spin magnitude
tilt_1: float
Primary tilt angle
phi_12: float
a_2: float
Dimensionless secondary spin magnitude
tilt_2: float
Secondary tilt angle
phi_jl: float
iota: float
Orbital inclination
phase: float
The phase at coalescence
kwargs: dict
Optional keyword arguments
Returns
-------
dict: A dictionary with the plus and cross polarisation strain modes
"""
waveform_kwargs = dict(waveform_approximant='IMRPhenomPv2', reference_frequency=50.0,
minimum_frequency=20.0)
waveform_kwargs.update(kwargs)
waveform_approximant = waveform_kwargs['waveform_approximant']
reference_frequency = waveform_kwargs['reference_frequency']
minimum_frequency = waveform_kwargs['minimum_frequency']
if mass_2 > mass_1:
return None
luminosity_distance = luminosity_distance * 1e6 * utils.parsec
mass_1 = mass_1 * utils.solar_mass
mass_2 = mass_2 * utils.solar_mass
if tilt_1 == 0 and tilt_2 == 0:
spin_1x = 0
spin_1y = 0
spin_1z = a_1
spin_2x = 0
spin_2y = 0
spin_2z = a_2
else:
iota, spin_1x, spin_1y, spin_1z, spin_2x, spin_2y, spin_2z = (
lalsim_SimInspiralTransformPrecessingNewInitialConditions(
iota, phi_jl, tilt_1, tilt_2, phi_12, a_1, a_2, mass_1,
mass_2, reference_frequency, phase))
longitude_ascending_nodes = 0.0
eccentricity = 0.0
mean_per_ano = 0.0
waveform_dictionary = None
approximant = lalsim_GetApproximantFromString(waveform_approximant)
maximum_frequency = frequency_array[-1]
delta_frequency = frequency_array[1] - frequency_array[0]
hplus, hcross = lalsim_SimInspiralChooseFDWaveform(
mass_1, mass_2, spin_1x, spin_1y, spin_1z, spin_2x, spin_2y,
spin_2z, luminosity_distance, iota, phase,
longitude_ascending_nodes, eccentricity, mean_per_ano, delta_frequency,
minimum_frequency, maximum_frequency, reference_frequency,
waveform_dictionary, approximant)
h_plus = hplus.data.data
h_cross = hcross.data.data
h_plus = h_plus[:len(frequency_array)]
h_cross = h_cross[:len(frequency_array)]
return {'plus': h_plus, 'cross': h_cross}
def lal_eccentric_binary_black_hole_no_spins(
frequency_array, mass_1, mass_2, eccentricity, luminosity_distance, iota, phase, **kwargs):
""" Eccentric binary black hole waveform model using lalsimulation (EccentricFD)
Parameters
----------
frequency_array: array_like
The frequencies at which we want to calculate the strain
mass_1: float
The mass of the heavier object in solar masses
mass_2: float
The mass of the lighter object in solar masses
eccentricity: float
The orbital eccentricity of the system
luminosity_distance: float
The luminosity distance in megaparsec
iota: float
Orbital inclination
phase: float
The phase at coalescence
kwargs: dict
Optional keyword arguments
Returns
-------
dict: A dictionary with the plus and cross polarisation strain modes
"""
waveform_kwargs = dict(waveform_approximant='EccentricFD', reference_frequency=10.0,
minimum_frequency=10.0)
waveform_kwargs.update(kwargs)
waveform_approximant = waveform_kwargs['waveform_approximant']
reference_frequency = waveform_kwargs['reference_frequency']
minimum_frequency = waveform_kwargs['minimum_frequency']
if mass_2 > mass_1:
return None
luminosity_distance = luminosity_distance * 1e6 * utils.parsec
mass_1 = mass_1 * utils.solar_mass
mass_2 = mass_2 * utils.solar_mass
spin_1x = 0.0
spin_1y = 0.0
spin_1z = 0.0
spin_2x = 0.0
spin_2y = 0.0
spin_2z = 0.0
longitude_ascending_nodes = 0.0
mean_per_ano = 0.0
waveform_dictionary = None
approximant = lalsim_GetApproximantFromString(waveform_approximant)
maximum_frequency = frequency_array[-1]
delta_frequency = frequency_array[1] - frequency_array[0]
hplus, hcross = lalsim_SimInspiralChooseFDWaveform(
mass_1, mass_2, spin_1x, spin_1y, spin_1z, spin_2x, spin_2y,
spin_2z, luminosity_distance, iota, phase,
longitude_ascending_nodes, eccentricity, mean_per_ano, delta_frequency,
minimum_frequency, maximum_frequency, reference_frequency,
waveform_dictionary, approximant)
h_plus = hplus.data.data
h_cross = hcross.data.data
return {'plus': h_plus, 'cross': h_cross}
def sinegaussian(frequency_array, hrss, Q, frequency, **kwargs):
tau = Q / (np.sqrt(2.0) * np.pi * frequency)
temp = Q / (4.0 * np.sqrt(np.pi) * frequency)
fm = frequency_array - frequency
fp = frequency_array + frequency
h_plus = ((hrss / np.sqrt(temp * (1 + np.exp(-Q**2)))) *
((np.sqrt(np.pi) * tau) / 2.0) *
(np.exp(-fm**2 * np.pi**2 * tau**2) +
np.exp(-fp**2 * np.pi**2 * tau**2)))
h_cross = (-1j * (hrss / np.sqrt(temp * (1 - np.exp(-Q**2)))) *
((np.sqrt(np.pi) * tau) / 2.0) *
(np.exp(-fm**2 * np.pi**2 * tau**2) -
np.exp(-fp**2 * np.pi**2 * tau**2)))
return{'plus': h_plus, 'cross': h_cross}
def supernova(
frequency_array, realPCs, imagPCs, file_path, luminosity_distance, **kwargs):
""" A supernova NR simulation for injections """
realhplus, imaghplus, realhcross, imaghcross = np.loadtxt(
file_path, usecols=(0, 1, 2, 3), unpack=True)
# waveform in file at 10kpc
scaling = 1e-3 * (10.0 / luminosity_distance)
h_plus = scaling * (realhplus + 1.0j * imaghplus)
h_cross = scaling * (realhcross + 1.0j * imaghcross)
return {'plus': h_plus, 'cross': h_cross}
def supernova_pca_model(
frequency_array, pc_coeff1, pc_coeff2, pc_coeff3, pc_coeff4, pc_coeff5,
luminosity_distance, **kwargs):
""" Supernova signal model """
realPCs = kwargs['realPCs']
imagPCs = kwargs['imagPCs']
pc1 = realPCs[:, 0] + 1.0j * imagPCs[:, 0]
pc2 = realPCs[:, 1] + 1.0j * imagPCs[:, 1]
pc3 = realPCs[:, 2] + 1.0j * imagPCs[:, 2]
pc4 = realPCs[:, 3] + 1.0j * imagPCs[:, 3]
pc5 = realPCs[:, 4] + 1.0j * imagPCs[:, 5]
# file at 10kpc
scaling = 1e-23 * (10.0 / luminosity_distance)
h_plus = scaling * (pc_coeff1 * pc1 + pc_coeff2 * pc2 + pc_coeff3 * pc3 +
pc_coeff4 * pc4 + pc_coeff5 * pc5)
h_cross = scaling * (pc_coeff1 * pc1 + pc_coeff2 * pc2 + pc_coeff3 * pc3 +
pc_coeff4 * pc4 + pc_coeff5 * pc5)
return {'plus': h_plus, 'cross': h_cross}
def lal_binary_neutron_star(
frequency_array, mass_1, mass_2, luminosity_distance, chi_1, chi_2,
iota, phase, lambda_1, lambda_2, **kwargs):
""" A Binary Neutron Star waveform model using lalsimulation
Parameters
----------
frequency_array: array_like
The frequencies at which we want to calculate the strain
mass_1: float
The mass of the heavier object in solar masses
mass_2: float
The mass of the lighter object in solar masses
luminosity_distance: float
The luminosity distance in megaparsec
chi_1: float
Dimensionless aligned spin
chi_2: float
Dimensionless aligned spin
iota: float
Orbital inclination
phase: float
The phase at coalescence
ra: float
The right ascension of the binary
dec: float
The declination of the object
geocent_time: float
The time at coalescence
psi: float
Orbital polarisation
lambda_1: float
Dimensionless tidal deformability of mass_1
lambda_2: float
Dimensionless tidal deformability of mass_2
kwargs: dict
Optional keyword arguments
Returns
-------
dict: A dictionary with the plus and cross polarisation strain modes
"""
waveform_kwargs = dict(waveform_approximant='TaylorF2', reference_frequency=50.0,
minimum_frequency=20.0)
waveform_kwargs.update(kwargs)
waveform_approximant = waveform_kwargs['waveform_approximant']
reference_frequency = waveform_kwargs['reference_frequency']
minimum_frequency = waveform_kwargs['minimum_frequency']
if mass_2 > mass_1:
return None
luminosity_distance = luminosity_distance * 1e6 * utils.parsec
mass_1 = mass_1 * utils.solar_mass
mass_2 = mass_2 * utils.solar_mass
spin_1x = 0
spin_1y = 0
spin_1z = chi_1
spin_2x = 0
spin_2y = 0
spin_2z = chi_2
longitude_ascending_nodes = 0.0
eccentricity = 0.0
mean_per_ano = 0.0
waveform_dictionary = lal.CreateDict()
lalsim_SimInspiralWaveformParamsInsertTidalLambda1(waveform_dictionary, lambda_1)
lalsim_SimInspiralWaveformParamsInsertTidalLambda2(waveform_dictionary, lambda_2)
approximant = lalsim_GetApproximantFromString(waveform_approximant)
maximum_frequency = frequency_array[-1]
delta_frequency = frequency_array[1] - frequency_array[0]
hplus, hcross = lalsim_SimInspiralChooseFDWaveform(
mass_1, mass_2, spin_1x, spin_1y, spin_1z, spin_2x, spin_2y,
spin_2z, luminosity_distance, iota, phase,
longitude_ascending_nodes, eccentricity, mean_per_ano, delta_frequency,
minimum_frequency, maximum_frequency, reference_frequency,
waveform_dictionary, approximant)
h_plus = hplus.data.data
h_cross = hcross.data.data
h_plus = h_plus[:len(frequency_array)]
h_cross = h_cross[:len(frequency_array)]
return {'plus': h_plus, 'cross': h_cross}
def roq(frequency_array, mass_1, mass_2, luminosity_distance, a_1, tilt_1,
phi_12, a_2, tilt_2, phi_jl, iota, phase, **waveform_arguments):
"""
See https://git.ligo.org/lscsoft/lalsuite/blob/master/lalsimulation/src/LALSimInspiral.c#L1460
Parameters
----------
frequency_array: np.array
This input is ignored for the roq source model
mass_1: float
The mass of the heavier object in solar masses
mass_2: float
The mass of the lighter object in solar masses
luminosity_distance: float
The luminosity distance in megaparsec
a_1: float
Dimensionless primary spin magnitude
tilt_1: float
Primary tilt angle
phi_12: float
a_2: float
Dimensionless secondary spin magnitude
tilt_2: float
Secondary tilt angle
phi_jl: float
iota: float
Orbital inclination
phase: float
The phase at coalescence
Waveform arguments
------------------
Non-sampled extra data used in the source model calculation
frequency_nodes_linear: np.array
frequency_nodes_quadratic: np.array
reference_frequency: float
version: str
Note: for the frequency_nodes_linear and frequency_nodes_quadratic arguments,
if using data from https://git.ligo.org/lscsoft/ROQ_data, this should be
loaded as `np.load(filename).T`.
Returns
-------
waveform_polarizations: dict
Dict containing plus and cross modes evaluated at the linear and
quadratic frequency nodes.
"""
if mass_2 > mass_1:
return None
frequency_nodes_linear = waveform_arguments['frequency_nodes_linear']
frequency_nodes_quadratic = waveform_arguments['frequency_nodes_quadratic']
reference_frequency = getattr(waveform_arguments,
'reference_frequency', 20.0)
versions = dict(IMRPhenomPv2=lalsim.IMRPhenomPv2_V)
version = versions[getattr(waveform_arguments, 'version', 'IMRPhenomPv2')]
luminosity_distance = luminosity_distance * 1e6 * utils.parsec
mass_1 = mass_1 * utils.solar_mass
mass_2 = mass_2 * utils.solar_mass
if tilt_1 == 0 and tilt_2 == 0:
spin_1x = 0
spin_1y = 0
spin_1z = a_1
spin_2x = 0
spin_2y = 0
spin_2z = a_2
else:
iota, spin_1x, spin_1y, spin_1z, spin_2x, spin_2y, spin_2z = \
lalsim_SimInspiralTransformPrecessingNewInitialConditions(
iota, phi_jl, tilt_1, tilt_2, phi_12, a_1, a_2, mass_1, mass_2,
reference_frequency, phase)
chi_1_l, chi_2_l, chi_p, theta_jn, alpha, phase_aligned, zeta =\
lalsim_SimIMRPhenomPCalculateModelParametersFromSourceFrame(
mass_1, mass_2, reference_frequency, phase, iota, spin_1x,
spin_1y, spin_1z, spin_2x, spin_2y, spin_2z, version)
waveform_polarizations = dict()
h_linear_plus, h_linear_cross = lalsim_SimIMRPhenomPFrequencySequence(
frequency_nodes_linear, chi_1_l, chi_2_l, chi_p, theta_jn,
mass_1, mass_2, luminosity_distance,
alpha, phase_aligned, reference_frequency, version)
h_quadratic_plus, h_quadratic_cross = lalsim_SimIMRPhenomPFrequencySequence(
frequency_nodes_quadratic, chi_1_l, chi_2_l, chi_p, theta_jn,
mass_1, mass_2, luminosity_distance,
alpha, phase_aligned, reference_frequency, version)
waveform_polarizations['linear'] = dict(
plus=(np.cos(2 * zeta) * h_linear_plus.data.data +
np.sin(2 * zeta) * h_linear_cross.data.data),
cross=(np.cos(2 * zeta) * h_linear_cross.data.data -
np.sin(2 * zeta) * h_linear_plus.data.data))
waveform_polarizations['quadratic'] = dict(
plus=(np.cos(2 * zeta) * h_quadratic_plus.data.data +
np.sin(2 * zeta) * h_quadratic_cross.data.data),
cross=(np.cos(2 * zeta) * h_quadratic_cross.data.data -
np.sin(2 * zeta) * h_quadratic_plus.data.data))
return waveform_polarizations
| {"/bilby/gw/result.py": ["/bilby/core/result.py"], "/bilby/core/result.py": ["/bilby/core/__init__.py", "/bilby/core/prior.py"], "/bilby/gw/likelihood.py": ["/bilby/core/__init__.py", "/bilby/core/prior.py", "/bilby/gw/detector.py", "/bilby/gw/source.py"], "/bilby/gw/source.py": ["/bilby/core/__init__.py"], "/bilby/gw/__init__.py": ["/bilby/gw/likelihood.py"], "/bilby/gw/detector.py": ["/bilby/gw/__init__.py", "/bilby/core/__init__.py"]} |
45,655 | juancalderonbustillo/bilby | refs/heads/master | /bilby/gw/__init__.py | from . import (calibration, conversion, cosmology, detector, likelihood, prior,
result, source, utils, waveform_generator)
from .waveform_generator import WaveformGenerator
from .likelihood import GravitationalWaveTransient
| {"/bilby/gw/result.py": ["/bilby/core/result.py"], "/bilby/core/result.py": ["/bilby/core/__init__.py", "/bilby/core/prior.py"], "/bilby/gw/likelihood.py": ["/bilby/core/__init__.py", "/bilby/core/prior.py", "/bilby/gw/detector.py", "/bilby/gw/source.py"], "/bilby/gw/source.py": ["/bilby/core/__init__.py"], "/bilby/gw/__init__.py": ["/bilby/gw/likelihood.py"], "/bilby/gw/detector.py": ["/bilby/gw/__init__.py", "/bilby/core/__init__.py"]} |
45,656 | juancalderonbustillo/bilby | refs/heads/master | /bilby/core/prior.py | from __future__ import division
import os
from collections import OrderedDict
from future.utils import iteritems
import numpy as np
import scipy.stats
from scipy.integrate import cumtrapz
from scipy.interpolate import interp1d
from scipy.special import erf, erfinv
# Keep import bilby statement, it is necessary for some eval() statements
import bilby # noqa
from .utils import logger, infer_args_from_method, check_directory_exists_and_if_not_mkdir
class PriorDict(OrderedDict):
def __init__(self, dictionary=None, filename=None):
""" A set of priors
Parameters
----------
dictionary: dict, None
If given, a dictionary to generate the prior set.
filename: str, None
If given, a file containing the prior to generate the prior set.
"""
OrderedDict.__init__(self)
if isinstance(dictionary, dict):
self.from_dictionary(dictionary)
elif type(dictionary) is str:
logger.debug('Argument "dictionary" is a string.' +
' Assuming it is intended as a file name.')
self.from_file(dictionary)
elif type(filename) is str:
self.from_file(filename)
elif dictionary is not None:
raise ValueError("PriorDict input dictionary not understood")
self.convert_floats_to_delta_functions()
def to_file(self, outdir, label):
""" Write the prior distribution to file.
Parameters
----------
outdir: str
output directory name
label: str
Output file naming scheme
"""
check_directory_exists_and_if_not_mkdir(outdir)
prior_file = os.path.join(outdir, "{}.prior".format(label))
logger.debug("Writing priors to {}".format(prior_file))
with open(prior_file, "w") as outfile:
for key in self.keys():
outfile.write(
"{} = {}\n".format(key, self[key]))
def from_file(self, filename):
""" Reads in a prior from a file specification
Parameters
----------
filename: str
Name of the file to be read in
Notes
-----
Lines beginning with '#' or empty lines will be ignored.
"""
comments = ['#', '\n']
prior = dict()
with open(filename, 'r') as f:
for line in f:
if line[0] in comments:
continue
elements = line.split('=')
key = elements[0].replace(' ', '')
val = '='.join(elements[1:])
prior[key] = eval(val)
self.update(prior)
def from_dictionary(self, dictionary):
for key, val in iteritems(dictionary):
if isinstance(val, str):
try:
prior = eval(val)
if isinstance(prior, (Prior, float, int, str)):
val = prior
except (NameError, SyntaxError, TypeError):
logger.debug(
"Failed to load dictionary value {} correctlty"
.format(key))
pass
self[key] = val
def convert_floats_to_delta_functions(self):
""" Convert all float parameters to delta functions """
for key in self:
if isinstance(self[key], Prior):
continue
elif isinstance(self[key], float) or isinstance(self[key], int):
self[key] = DeltaFunction(self[key])
logger.debug(
"{} converted to delta function prior.".format(key))
else:
logger.debug(
"{} cannot be converted to delta function prior."
.format(key))
def fill_priors(self, likelihood, default_priors_file=None):
"""
Fill dictionary of priors based on required parameters of likelihood
Any floats in prior will be converted to delta function prior. Any
required, non-specified parameters will use the default.
Note: if `likelihood` has `non_standard_sampling_parameter_keys`, then
this will set-up default priors for those as well.
Parameters
----------
likelihood: bilby.likelihood.GravitationalWaveTransient instance
Used to infer the set of parameters to fill the prior with
default_priors_file: str, optional
If given, a file containing the default priors.
Returns
-------
prior: dict
The filled prior dictionary
"""
self.convert_floats_to_delta_functions()
missing_keys = set(likelihood.parameters) - set(self.keys())
for missing_key in missing_keys:
if not self.test_redundancy(missing_key):
default_prior = create_default_prior(missing_key, default_priors_file)
if default_prior is None:
set_val = likelihood.parameters[missing_key]
logger.warning(
"Parameter {} has no default prior and is set to {}, this"
" will not be sampled and may cause an error."
.format(missing_key, set_val))
else:
self[missing_key] = default_prior
for key in self:
self.test_redundancy(key)
def sample(self, size=None):
"""Draw samples from the prior set
Parameters
----------
size: int or tuple of ints, optional
See numpy.random.uniform docs
Returns
-------
dict: Dictionary of the samples
"""
return self.sample_subset(keys=self.keys(), size=size)
def sample_subset(self, keys=iter([]), size=None):
"""Draw samples from the prior set for parameters which are not a DeltaFunction
Parameters
----------
keys: list
List of prior keys to draw samples from
size: int or tuple of ints, optional
See numpy.random.uniform docs
Returns
-------
dict: Dictionary of the drawn samples
"""
self.convert_floats_to_delta_functions()
samples = dict()
for key in keys:
if isinstance(self[key], Prior):
samples[key] = self[key].sample(size=size)
else:
logger.debug('{} not a known prior.'.format(key))
return samples
def prob(self, sample, **kwargs):
"""
Parameters
----------
sample: dict
Dictionary of the samples of which we want to have the probability of
kwargs:
The keyword arguments are passed directly to `np.product`
Returns
-------
float: Joint probability of all individual sample probabilities
"""
return np.product([self[key].prob(sample[key]) for key in sample], **kwargs)
def ln_prob(self, sample, axis=None):
"""
Parameters
----------
sample: dict
Dictionary of the samples of which to calculate the log probability
axis: None or int
Axis along which the summation is performed
Returns
-------
float or ndarray:
Joint log probability of all the individual sample probabilities
"""
return np.sum([self[key].ln_prob(sample[key]) for key in sample],
axis=axis)
def rescale(self, keys, theta):
"""Rescale samples from unit cube to prior
Parameters
----------
keys: list
List of prior keys to be rescaled
theta: list
List of randomly drawn values on a unit cube associated with the prior keys
Returns
-------
list: List of floats containing the rescaled sample
"""
return [self[key].rescale(sample) for key, sample in zip(keys, theta)]
def test_redundancy(self, key):
"""Empty redundancy test, should be overwritten in subclasses"""
return False
def copy(self):
"""
We have to overwrite the copy method as it fails due to the presence of
defaults.
"""
return self.__class__(dictionary=OrderedDict(self))
class PriorSet(PriorDict):
def __init__(self, dictionary=None, filename=None):
""" DEPRECATED: USE PriorDict INSTEAD"""
logger.warning("The name 'PriorSet' is deprecated use 'PriorDict' instead")
super(PriorSet, self).__init__(dictionary, filename)
def create_default_prior(name, default_priors_file=None):
"""Make a default prior for a parameter with a known name.
Parameters
----------
name: str
Parameter name
default_priors_file: str, optional
If given, a file containing the default priors.
Return
------
prior: Prior
Default prior distribution for that parameter, if unknown None is
returned.
"""
if default_priors_file is None:
logger.debug(
"No prior file given.")
prior = None
else:
default_priors = PriorDict(filename=default_priors_file)
if name in default_priors.keys():
prior = default_priors[name]
else:
logger.debug(
"No default prior found for variable {}.".format(name))
prior = None
return prior
class Prior(object):
_default_latex_labels = dict()
def __init__(self, name=None, latex_label=None, unit=None, minimum=-np.inf,
maximum=np.inf):
""" Implements a Prior object
Parameters
----------
name: str, optional
Name associated with prior.
latex_label: str, optional
Latex label associated with prior, used for plotting.
unit: str, optional
If given, a Latex string describing the units of the parameter.
minimum: float, optional
Minimum of the domain, default=-np.inf
maximum: float, optional
Maximum of the domain, default=np.inf
"""
self.name = name
self.latex_label = latex_label
self.unit = unit
self.minimum = minimum
self.maximum = maximum
def __call__(self):
"""Overrides the __call__ special method. Calls the sample method.
Returns
-------
float: The return value of the sample method.
"""
return self.sample()
def __eq__(self, other):
if self.__class__ != other.__class__:
return False
if sorted(self.__dict__.keys()) != sorted(other.__dict__.keys()):
return False
for key in self.__dict__:
if type(self.__dict__[key]) is np.ndarray:
if not np.array_equal(self.__dict__[key], other.__dict__[key]):
return False
else:
if not self.__dict__[key] == other.__dict__[key]:
return False
return True
def sample(self, size=None):
"""Draw a sample from the prior
Parameters
----------
size: int or tuple of ints, optional
See numpy.random.uniform docs
Returns
-------
float: A random number between 0 and 1, rescaled to match the distribution of this Prior
"""
return self.rescale(np.random.uniform(0, 1, size))
def rescale(self, val):
"""
'Rescale' a sample from the unit line element to the prior.
This should be overwritten by each subclass.
Parameters
----------
val: float
A random number between 0 and 1
Returns
-------
None
"""
return None
def prob(self, val):
"""Return the prior probability of val, this should be overwritten
Parameters
----------
val: float
Returns
-------
np.nan
"""
return np.nan
def ln_prob(self, val):
"""Return the prior ln probability of val, this should be overwritten
Parameters
----------
val: float
Returns
-------
np.nan
"""
return np.log(self.prob(val))
def is_in_prior_range(self, val):
"""Returns True if val is in the prior boundaries, zero otherwise
Parameters
----------
val: float
Returns
-------
np.nan
"""
return (val >= self.minimum) & (val <= self.maximum)
@staticmethod
def test_valid_for_rescaling(val):
"""Test if 0 < val < 1
Parameters
----------
val: float
Raises
-------
ValueError: If val is not between 0 and 1
"""
val = np.atleast_1d(val)
tests = (val < 0) + (val > 1)
if np.any(tests):
raise ValueError("Number to be rescaled should be in [0, 1]")
def __repr__(self):
"""Overrides the special method __repr__.
Returns a representation of this instance that resembles how it is instantiated.
Works correctly for all child classes
Returns
-------
str: A string representation of this instance
"""
subclass_args = infer_args_from_method(self.__init__)
prior_name = self.__class__.__name__
property_names = [p for p in dir(self.__class__) if isinstance(getattr(self.__class__, p), property)]
dict_with_properties = self.__dict__.copy()
for key in property_names:
dict_with_properties[key] = getattr(self, key)
args = ', '.join(['{}={}'.format(key, repr(dict_with_properties[key])) for key in subclass_args])
return "{}({})".format(prior_name, args)
@property
def is_fixed(self):
"""
Returns True if the prior is fixed and should not be used in the sampler. Does this by checking if this instance
is an instance of DeltaFunction.
Returns
-------
bool: Whether it's fixed or not!
"""
return isinstance(self, DeltaFunction)
@property
def latex_label(self):
"""Latex label that can be used for plots.
Draws from a set of default labels if no label is given
Returns
-------
str: A latex representation for this prior
"""
return self.__latex_label
@latex_label.setter
def latex_label(self, latex_label=None):
if latex_label is None:
self.__latex_label = self.__default_latex_label
else:
self.__latex_label = latex_label
@property
def unit(self):
return self.__unit
@unit.setter
def unit(self, unit):
self.__unit = unit
@property
def latex_label_with_unit(self):
""" If a unit is specifed, returns a string of the latex label and unit """
if self.unit is not None:
return "{} [{}]".format(self.latex_label, self.unit)
else:
return self.latex_label
@property
def minimum(self):
return self._minimum
@minimum.setter
def minimum(self, minimum):
self._minimum = minimum
@property
def maximum(self):
return self._maximum
@maximum.setter
def maximum(self, maximum):
self._maximum = maximum
@property
def __default_latex_label(self):
if self.name in self._default_latex_labels.keys():
label = self._default_latex_labels[self.name]
else:
label = self.name
return label
class DeltaFunction(Prior):
def __init__(self, peak, name=None, latex_label=None, unit=None):
"""Dirac delta function prior, this always returns peak.
Parameters
----------
peak: float
Peak value of the delta function
name: str
See superclass
latex_label: str
See superclass
unit: str
See superclass
"""
Prior.__init__(self, name=name, latex_label=latex_label, unit=unit,
minimum=peak, maximum=peak)
self.peak = peak
def rescale(self, val):
"""Rescale everything to the peak with the correct shape.
Parameters
----------
val: float
Returns
-------
float: Rescaled probability, equivalent to peak
"""
Prior.test_valid_for_rescaling(val)
return self.peak * val ** 0
def prob(self, val):
"""Return the prior probability of val
Parameters
----------
val: float
Returns
-------
float: np.inf if val = peak, 0 otherwise
"""
at_peak = (val == self.peak)
return np.nan_to_num(np.multiply(at_peak, np.inf))
class PowerLaw(Prior):
def __init__(self, alpha, minimum, maximum, name=None, latex_label=None,
unit=None):
"""Power law with bounds and alpha, spectral index
Parameters
----------
alpha: float
Power law exponent parameter
minimum: float
See superclass
maximum: float
See superclass
name: str
See superclass
latex_label: str
See superclass
unit: str
See superclass
"""
Prior.__init__(self, name=name, latex_label=latex_label,
minimum=minimum, maximum=maximum, unit=unit)
self.alpha = alpha
def rescale(self, val):
"""
'Rescale' a sample from the unit line element to the power-law prior.
This maps to the inverse CDF. This has been analytically solved for this case.
Parameters
----------
val: float
Uniform probability
Returns
-------
float: Rescaled probability
"""
Prior.test_valid_for_rescaling(val)
if self.alpha == -1:
return self.minimum * np.exp(val * np.log(self.maximum / self.minimum))
else:
return (self.minimum ** (1 + self.alpha) + val *
(self.maximum ** (1 + self.alpha) - self.minimum ** (1 + self.alpha))) ** (1. / (1 + self.alpha))
def prob(self, val):
"""Return the prior probability of val
Parameters
----------
val: float
Returns
-------
float: Prior probability of val
"""
if self.alpha == -1:
return np.nan_to_num(1 / val / np.log(self.maximum / self.minimum)) * self.is_in_prior_range(val)
else:
return np.nan_to_num(val ** self.alpha * (1 + self.alpha) /
(self.maximum ** (1 + self.alpha) -
self.minimum ** (1 + self.alpha))) * self.is_in_prior_range(val)
def ln_prob(self, val):
"""Return the logarithmic prior probability of val
Parameters
----------
val: float
Returns
-------
float:
"""
if self.alpha == -1:
normalising = 1. / np.log(self.maximum / self.minimum)
else:
normalising = (1 + self.alpha) / (self.maximum ** (1 + self.alpha) -
self.minimum ** (1 + self.alpha))
return (self.alpha * np.nan_to_num(np.log(val)) + np.log(normalising)) + np.log(1. * self.is_in_prior_range(val))
class Uniform(Prior):
def __init__(self, minimum, maximum, name=None, latex_label=None,
unit=None):
"""Uniform prior with bounds
Parameters
----------
minimum: float
See superclass
maximum: float
See superclass
name: str
See superclass
latex_label: str
See superclass
unit: str
See superclass
"""
Prior.__init__(self, name=name, latex_label=latex_label,
minimum=minimum, maximum=maximum, unit=unit)
def rescale(self, val):
Prior.test_valid_for_rescaling(val)
return self.minimum + val * (self.maximum - self.minimum)
def prob(self, val):
"""Return the prior probability of val
Parameters
----------
val: float
Returns
-------
float: Prior probability of val
"""
return scipy.stats.uniform.pdf(val, loc=self.minimum,
scale=self.maximum - self.minimum)
def ln_prob(self, val):
"""Return the log prior probability of val
Parameters
----------
val: float
Returns
-------
float: log probability of val
"""
return scipy.stats.uniform.logpdf(val, loc=self.minimum,
scale=self.maximum - self.minimum)
class LogUniform(PowerLaw):
def __init__(self, minimum, maximum, name=None, latex_label=None,
unit=None):
"""Log-Uniform prior with bounds
Parameters
----------
minimum: float
See superclass
maximum: float
See superclass
name: str
See superclass
latex_label: str
See superclass
unit: str
See superclass
"""
PowerLaw.__init__(self, name=name, latex_label=latex_label, unit=unit,
minimum=minimum, maximum=maximum, alpha=-1)
if self.minimum <= 0:
logger.warning('You specified a uniform-in-log prior with minimum={}'.format(self.minimum))
class Cosine(Prior):
def __init__(self, name=None, latex_label=None, unit=None,
minimum=-np.pi / 2, maximum=np.pi / 2):
"""Cosine prior with bounds
Parameters
----------
minimum: float
See superclass
maximum: float
See superclass
name: str
See superclass
latex_label: str
See superclass
unit: str
See superclass
"""
Prior.__init__(self, name=name, latex_label=latex_label, unit=unit,
minimum=minimum, maximum=maximum)
def rescale(self, val):
"""
'Rescale' a sample from the unit line element to a uniform in cosine prior.
This maps to the inverse CDF. This has been analytically solved for this case.
"""
Prior.test_valid_for_rescaling(val)
return np.arcsin(-1 + val * 2)
def prob(self, val):
"""Return the prior probability of val. Defined over [-pi/2, pi/2].
Parameters
----------
val: float
Returns
-------
float: Prior probability of val
"""
return np.cos(val) / 2 * self.is_in_prior_range(val)
class Sine(Prior):
def __init__(self, name=None, latex_label=None, unit=None, minimum=0,
maximum=np.pi):
"""Sine prior with bounds
Parameters
----------
minimum: float
See superclass
maximum: float
See superclass
name: str
See superclass
latex_label: str
See superclass
unit: str
See superclass
"""
Prior.__init__(self, name=name, latex_label=latex_label, unit=unit,
minimum=minimum, maximum=maximum)
def rescale(self, val):
"""
'Rescale' a sample from the unit line element to a uniform in sine prior.
This maps to the inverse CDF. This has been analytically solved for this case.
"""
Prior.test_valid_for_rescaling(val)
return np.arccos(1 - val * 2)
def prob(self, val):
"""Return the prior probability of val. Defined over [0, pi].
Parameters
----------
val: float
Returns
-------
float: Prior probability of val
"""
return np.sin(val) / 2 * self.is_in_prior_range(val)
class Gaussian(Prior):
def __init__(self, mu, sigma, name=None, latex_label=None, unit=None):
"""Gaussian prior with mean mu and width sigma
Parameters
----------
mu: float
Mean of the Gaussian prior
sigma:
Width/Standard deviation of the Gaussian prior
name: str
See superclass
latex_label: str
See superclass
unit: str
See superclass
"""
Prior.__init__(self, name=name, latex_label=latex_label, unit=unit)
self.mu = mu
self.sigma = sigma
def rescale(self, val):
"""
'Rescale' a sample from the unit line element to the appropriate Gaussian prior.
This maps to the inverse CDF. This has been analytically solved for this case.
"""
Prior.test_valid_for_rescaling(val)
return self.mu + erfinv(2 * val - 1) * 2 ** 0.5 * self.sigma
def prob(self, val):
"""Return the prior probability of val.
Parameters
----------
val: float
Returns
-------
float: Prior probability of val
"""
return np.exp(-(self.mu - val) ** 2 / (2 * self.sigma ** 2)) / (2 * np.pi) ** 0.5 / self.sigma
def ln_prob(self, val):
return -0.5 * ((self.mu - val) ** 2 / self.sigma ** 2 + np.log(2 * np.pi * self.sigma ** 2))
class Normal(Gaussian):
def __init__(self, mu, sigma, name=None, latex_label=None, unit=None):
"""A synonym for the Gaussian distribution.
Parameters
----------
mu: float
Mean of the Gaussian prior
sigma: float
Width/Standard deviation of the Gaussian prior
name: str
See superclass
latex_label: str
See superclass
unit: str
See superclass
"""
Gaussian.__init__(self, mu=mu, sigma=sigma, name=name,
latex_label=latex_label, unit=unit)
class TruncatedGaussian(Prior):
def __init__(self, mu, sigma, minimum, maximum, name=None,
latex_label=None, unit=None):
"""Truncated Gaussian prior with mean mu and width sigma
https://en.wikipedia.org/wiki/Truncated_normal_distribution
Parameters
----------
mu: float
Mean of the Gaussian prior
sigma:
Width/Standard deviation of the Gaussian prior
minimum: float
See superclass
maximum: float
See superclass
name: str
See superclass
latex_label: str
See superclass
unit: str
See superclass
"""
Prior.__init__(self, name=name, latex_label=latex_label, unit=unit,
minimum=minimum, maximum=maximum)
self.mu = mu
self.sigma = sigma
@property
def normalisation(self):
""" Calculates the proper normalisation of the truncated Gaussian
Returns
-------
float: Proper normalisation of the truncated Gaussian
"""
return (erf((self.maximum - self.mu) / 2 ** 0.5 / self.sigma) - erf(
(self.minimum - self.mu) / 2 ** 0.5 / self.sigma)) / 2
def rescale(self, val):
"""
'Rescale' a sample from the unit line element to the appropriate truncated Gaussian prior.
This maps to the inverse CDF. This has been analytically solved for this case.
"""
Prior.test_valid_for_rescaling(val)
return erfinv(2 * val * self.normalisation + erf(
(self.minimum - self.mu) / 2 ** 0.5 / self.sigma)) * 2 ** 0.5 * self.sigma + self.mu
def prob(self, val):
"""Return the prior probability of val.
Parameters
----------
val: float
Returns
-------
float: Prior probability of val
"""
return np.exp(-(self.mu - val) ** 2 / (2 * self.sigma ** 2)) / (
2 * np.pi) ** 0.5 / self.sigma / self.normalisation * self.is_in_prior_range(val)
class TruncatedNormal(TruncatedGaussian):
def __init__(self, mu, sigma, minimum, maximum, name=None,
latex_label=None, unit=None):
"""A synonym for the TruncatedGaussian distribution.
Parameters
----------
mu: float
Mean of the Gaussian prior
sigma:
Width/Standard deviation of the Gaussian prior
minimum: float
See superclass
maximum: float
See superclass
name: str
See superclass
latex_label: str
See superclass
unit: str
See superclass
"""
TruncatedGaussian.__init__(self, mu=mu, sigma=sigma, minimum=minimum,
maximum=maximum, name=name,
latex_label=latex_label, unit=unit)
class HalfGaussian(TruncatedGaussian):
def __init__(self, sigma, name=None, latex_label=None, unit=None):
"""A Gaussian with its mode at zero, and truncated to only be positive.
Parameters
----------
sigma: float
See superclass
name: str
See superclass
latex_label: str
See superclass
unit: str
See superclass
"""
TruncatedGaussian.__init__(self, 0., sigma, minimum=0., maximum=np.inf,
name=name, latex_label=latex_label,
unit=unit)
class HalfNormal(HalfGaussian):
def __init__(self, sigma, name=None, latex_label=None, unit=None):
"""A synonym for the HalfGaussian distribution.
Parameters
----------
sigma: float
See superclass
name: str
See superclass
latex_label: str
See superclass
unit: str
See superclass
"""
HalfGaussian.__init__(self, sigma=sigma, name=name,
latex_label=latex_label, unit=unit)
class LogNormal(Prior):
def __init__(self, mu, sigma, name=None, latex_label=None, unit=None):
"""Log-normal prior with mean mu and width sigma
https://en.wikipedia.org/wiki/Log-normal_distribution
Parameters
----------
mu: float
Mean of the Gaussian prior
sigma:
Width/Standard deviation of the Gaussian prior
name: str
See superclass
latex_label: str
See superclass
unit: str
See superclass
"""
Prior.__init__(self, name=name, minimum=0., latex_label=latex_label,
unit=unit)
if sigma <= 0.:
raise ValueError("For the LogGaussian prior the standard deviation must be positive")
self.mu = mu
self.sigma = sigma
def rescale(self, val):
"""
'Rescale' a sample from the unit line element to the appropriate LogNormal prior.
This maps to the inverse CDF. This has been analytically solved for this case.
"""
Prior.test_valid_for_rescaling(val)
return scipy.stats.lognorm.ppf(val, self.sigma, scale=np.exp(self.mu))
def prob(self, val):
"""Return the prior probability of val.
Parameters
----------
val: float
Returns
-------
float: Prior probability of val
"""
return scipy.stats.lognorm.pdf(val, self.sigma, scale=np.exp(self.mu))
def ln_prob(self, val):
return scipy.stats.lognorm.logpdf(val, self.sigma, scale=np.exp(self.mu))
class LogGaussian(LogNormal):
def __init__(self, mu, sigma, name=None, latex_label=None, unit=None):
"""Synonym of LogNormal prior
https://en.wikipedia.org/wiki/Log-normal_distribution
Parameters
----------
mu: float
Mean of the Gaussian prior
sigma:
Width/Standard deviation of the Gaussian prior
name: str
See superclass
latex_label: str
See superclass
unit: str
See superclass
"""
LogNormal.__init__(self, mu=mu, sigma=sigma, name=name,
latex_label=latex_label, unit=unit)
class Exponential(Prior):
def __init__(self, mu, name=None, latex_label=None, unit=None):
"""Exponential prior with mean mu
Parameters
----------
mu: float
Mean of the Exponential prior
name: str
See superclass
latex_label: str
See superclass
unit: str
See superclass
"""
Prior.__init__(self, name=name, minimum=0., latex_label=latex_label,
unit=unit)
self.mu = mu
def rescale(self, val):
"""
'Rescale' a sample from the unit line element to the appropriate Exponential prior.
This maps to the inverse CDF. This has been analytically solved for this case.
"""
Prior.test_valid_for_rescaling(val)
return scipy.stats.expon.ppf(val, scale=self.mu)
def prob(self, val):
"""Return the prior probability of val.
Parameters
----------
val: float
Returns
-------
float: Prior probability of val
"""
return scipy.stats.expon.pdf(val, scale=self.mu)
def ln_prob(self, val):
return scipy.stats.expon.logpdf(val, scale=self.mu)
class StudentT(Prior):
def __init__(self, df, mu=0., scale=1., name=None, latex_label=None,
unit=None):
"""Student's t-distribution prior with number of degrees of freedom df,
mean mu and scale
https://en.wikipedia.org/wiki/Student%27s_t-distribution#Generalized_Student's_t-distribution
Parameters
----------
df: float
Number of degrees of freedom for distribution
mu: float
Mean of the Student's t-prior
scale:
Width of the Student's t-prior
name: str
See superclass
latex_label: str
See superclass
unit: str
See superclass
"""
Prior.__init__(self, name=name, latex_label=latex_label, unit=unit)
if df <= 0. or scale <= 0.:
raise ValueError("For the StudentT prior the number of degrees of freedom and scale must be positive")
self.df = df
self.mu = mu
self.scale = scale
def rescale(self, val):
"""
'Rescale' a sample from the unit line element to the appropriate Student's t-prior.
This maps to the inverse CDF. This has been analytically solved for this case.
"""
Prior.test_valid_for_rescaling(val)
# use scipy distribution percentage point function (ppf)
return scipy.stats.t.ppf(val, self.df, loc=self.mu, scale=self.scale)
def prob(self, val):
"""Return the prior probability of val.
Parameters
----------
val: float
Returns
-------
float: Prior probability of val
"""
return scipy.stats.t.pdf(val, self.df, loc=self.mu, scale=self.scale)
def ln_prob(self, val):
return scipy.stats.t.logpdf(val, self.df, loc=self.mu, scale=self.scale)
class Beta(Prior):
def __init__(self, alpha, beta, minimum=0, maximum=1, name=None,
latex_label=None, unit=None):
"""Beta distribution
https://en.wikipedia.org/wiki/Beta_distribution
This wraps around
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.beta.html
Parameters
----------
alpha: float
first shape parameter
beta: float
second shape parameter
minimum: float
See superclass
maximum: float
See superclass
name: str
See superclass
latex_label: str
See superclass
unit: str
See superclass
"""
Prior.__init__(self, minimum=minimum, maximum=maximum, name=name,
latex_label=latex_label, unit=unit)
if alpha <= 0. or beta <= 0.:
raise ValueError("alpha and beta must both be positive values")
self.alpha = alpha
self.beta = beta
self._loc = minimum
self._scale = maximum - minimum
def rescale(self, val):
"""
'Rescale' a sample from the unit line element to the appropriate Beta prior.
This maps to the inverse CDF. This has been analytically solved for this case.
"""
Prior.test_valid_for_rescaling(val)
# use scipy distribution percentage point function (ppf)
return scipy.stats.beta.ppf(
val, self.alpha, self.beta, loc=self._loc, scale=self._scale)
def prob(self, val):
"""Return the prior probability of val.
Parameters
----------
val: float
Returns
-------
float: Prior probability of val
"""
spdf = scipy.stats.beta.pdf(
val, self.alpha, self.beta, loc=self._loc, scale=self._scale)
if np.all(np.isfinite(spdf)):
return spdf
# deal with the fact that if alpha or beta are < 1 you get infinities at 0 and 1
if isinstance(val, np.ndarray):
pdf = np.zeros(len(val))
pdf[np.isfinite(spdf)] = spdf[np.isfinite]
return spdf
else:
return 0.
def ln_prob(self, val):
spdf = scipy.stats.beta.logpdf(
val, self.alpha, self.beta, loc=self._loc, scale=self._scale)
if np.all(np.isfinite(spdf)):
return spdf
if isinstance(val, np.ndarray):
pdf = -np.inf * np.ones(len(val))
pdf[np.isfinite(spdf)] = spdf[np.isfinite]
return spdf
else:
return -np.inf
class Logistic(Prior):
def __init__(self, mu, scale, name=None, latex_label=None, unit=None):
"""Logistic distribution
https://en.wikipedia.org/wiki/Logistic_distribution
Parameters
----------
mu: float
Mean of the distribution
scale: float
Width of the distribution
name: str
See superclass
latex_label: str
See superclass
unit: str
See superclass
"""
Prior.__init__(self, name=name, latex_label=latex_label, unit=unit)
if scale <= 0.:
raise ValueError("For the Logistic prior the scale must be positive")
self.mu = mu
self.scale = scale
def rescale(self, val):
"""
'Rescale' a sample from the unit line element to the appropriate Logistic prior.
This maps to the inverse CDF. This has been analytically solved for this case.
"""
Prior.test_valid_for_rescaling(val)
# use scipy distribution percentage point function (ppf)
return scipy.stats.logistic.ppf(val, loc=self.mu, scale=self.scale)
def prob(self, val):
"""Return the prior probability of val.
Parameters
----------
val: float
Returns
-------
float: Prior probability of val
"""
return scipy.stats.logistic.pdf(val, loc=self.mu, scale=self.scale)
def ln_prob(self, val):
return scipy.stats.logistic.logpdf(val, loc=self.mu, scale=self.scale)
class Cauchy(Prior):
def __init__(self, alpha, beta, name=None, latex_label=None, unit=None):
"""Cauchy distribution
https://en.wikipedia.org/wiki/Cauchy_distribution
Parameters
----------
alpha: float
Location parameter
beta: float
Scale parameter
name: str
See superclass
latex_label: str
See superclass
unit: str
See superclass
"""
Prior.__init__(self, name=name, latex_label=latex_label, unit=unit)
if beta <= 0.:
raise ValueError("For the Cauchy prior the scale must be positive")
self.alpha = alpha
self.beta = beta
def rescale(self, val):
"""
'Rescale' a sample from the unit line element to the appropriate Cauchy prior.
This maps to the inverse CDF. This has been analytically solved for this case.
"""
Prior.test_valid_for_rescaling(val)
# use scipy distribution percentage point function (ppf)
return scipy.stats.cauchy.ppf(val, loc=self.alpha, scale=self.beta)
def prob(self, val):
"""Return the prior probability of val.
Parameters
----------
val: float
Returns
-------
float: Prior probability of val
"""
return scipy.stats.cauchy.pdf(val, loc=self.alpha, scale=self.beta)
def ln_prob(self, val):
return scipy.stats.cauchy.logpdf(val, loc=self.alpha, scale=self.beta)
class Lorentzian(Cauchy):
def __init__(self, alpha, beta, name=None, latex_label=None, unit=None):
"""Synonym for the Cauchy distribution
https://en.wikipedia.org/wiki/Cauchy_distribution
Parameters
----------
alpha: float
Location parameter
beta: float
Scale parameter
name: str
See superclass
latex_label: str
See superclass
unit: str
See superclass
"""
Cauchy.__init__(self, alpha=alpha, beta=beta, name=name,
latex_label=latex_label, unit=unit)
class Gamma(Prior):
def __init__(self, k, theta=1., name=None, latex_label=None, unit=None):
"""Gamma distribution
https://en.wikipedia.org/wiki/Gamma_distribution
Parameters
----------
k: float
The shape parameter
theta: float
The scale parameter
name: str
See superclass
latex_label: str
See superclass
unit: str
See superclass
"""
Prior.__init__(self, name=name, minimum=0., latex_label=latex_label,
unit=unit)
if k <= 0 or theta <= 0:
raise ValueError("For the Gamma prior the shape and scale must be positive")
self.k = k
self.theta = theta
def rescale(self, val):
"""
'Rescale' a sample from the unit line element to the appropriate Gamma prior.
This maps to the inverse CDF. This has been analytically solved for this case.
"""
Prior.test_valid_for_rescaling(val)
# use scipy distribution percentage point function (ppf)
return scipy.stats.gamma.ppf(val, self.k, loc=0., scale=self.theta)
def prob(self, val):
"""Return the prior probability of val.
Parameters
----------
val: float
Returns
-------
float: Prior probability of val
"""
return scipy.stats.gamma.pdf(val, self.k, loc=0., scale=self.theta)
def ln_prob(self, val):
return scipy.stats.gamma.logpdf(val, self.k, loc=0., scale=self.theta)
class ChiSquared(Gamma):
def __init__(self, nu, name=None, latex_label=None, unit=None):
"""Chi-squared distribution
https://en.wikipedia.org/wiki/Chi-squared_distribution
Parameters
----------
nu: int
Number of degrees of freedom
name: str
See superclass
latex_label: str
See superclass
unit: str
See superclass
"""
if nu <= 0 or not isinstance(nu, int):
raise ValueError("For the ChiSquared prior the number of degrees of freedom must be a positive integer")
Gamma.__init__(self, name=name, k=nu / 2., theta=2.,
latex_label=latex_label, unit=unit)
@property
def nu(self):
return int(self.k * 2)
@nu.setter
def nu(self, nu):
self.k = nu / 2.
class Interped(Prior):
def __init__(self, xx, yy, minimum=np.nan, maximum=np.nan, name=None,
latex_label=None, unit=None):
"""Creates an interpolated prior function from arrays of xx and yy=p(xx)
Parameters
----------
xx: array_like
x values for the to be interpolated prior function
yy: array_like
p(xx) values for the to be interpolated prior function
minimum: float
See superclass
maximum: float
See superclass
name: str
See superclass
latex_label: str
See superclass
unit: str
See superclass
Attributes
-------
probability_density: scipy.interpolate.interp1d
Interpolated prior probability distribution
cumulative_distribution: scipy.interpolate.interp1d
Interpolated cumulative prior probability distribution
inverse_cumulative_distribution: scipy.interpolate.interp1d
Inverted cumulative prior probability distribution
YY: array_like
Cumulative prior probability distribution
"""
self.xx = xx
self.yy = yy
self.__all_interpolated = interp1d(x=xx, y=yy, bounds_error=False, fill_value=0)
Prior.__init__(self, name=name, latex_label=latex_label, unit=unit,
minimum=np.nanmax(np.array((min(xx), minimum))),
maximum=np.nanmin(np.array((max(xx), maximum))))
self.__update_instance()
def __eq__(self, other):
if self.__class__ != other.__class__:
return False
if np.array_equal(self.xx, other.xx) and np.array_equal(self.yy, other.yy):
return True
return False
def prob(self, val):
"""Return the prior probability of val.
Parameters
----------
val: float
Returns
-------
float: Prior probability of val
"""
return self.probability_density(val)
def rescale(self, val):
"""
'Rescale' a sample from the unit line element to the prior.
This maps to the inverse CDF. This is done using interpolation.
"""
Prior.test_valid_for_rescaling(val)
rescaled = self.inverse_cumulative_distribution(val)
if rescaled.shape == ():
rescaled = float(rescaled)
return rescaled
@property
def minimum(self):
"""Return minimum of the prior distribution.
Updates the prior distribution if minimum is set to a different value.
Returns
-------
float: Minimum of the prior distribution
"""
return self._minimum
@minimum.setter
def minimum(self, minimum):
self._minimum = minimum
if '_maximum' in self.__dict__ and self._maximum < np.inf:
self.__update_instance()
@property
def maximum(self):
"""Return maximum of the prior distribution.
Updates the prior distribution if maximum is set to a different value.
Returns
-------
float: Maximum of the prior distribution
"""
return self._maximum
@maximum.setter
def maximum(self, maximum):
self._maximum = maximum
if '_minimum' in self.__dict__ and self._minimum < np.inf:
self.__update_instance()
def __update_instance(self):
self.xx = np.linspace(self.minimum, self.maximum, len(self.xx))
self.yy = self.__all_interpolated(self.xx)
self.__initialize_attributes()
def __initialize_attributes(self):
if np.trapz(self.yy, self.xx) != 1:
logger.debug('Supplied PDF for {} is not normalised, normalising.'.format(self.name))
self.yy /= np.trapz(self.yy, self.xx)
self.YY = cumtrapz(self.yy, self.xx, initial=0)
# Need last element of cumulative distribution to be exactly one.
self.YY[-1] = 1
self.probability_density = interp1d(x=self.xx, y=self.yy, bounds_error=False, fill_value=0)
self.cumulative_distribution = interp1d(x=self.xx, y=self.YY, bounds_error=False, fill_value=0)
self.inverse_cumulative_distribution = interp1d(x=self.YY, y=self.xx, bounds_error=True)
class FromFile(Interped):
def __init__(self, file_name, minimum=None, maximum=None, name=None,
latex_label=None, unit=None):
"""Creates an interpolated prior function from arrays of xx and yy=p(xx) extracted from a file
Parameters
----------
file_name: str
Name of the file containing the xx and yy arrays
minimum: float
See superclass
maximum: float
See superclass
name: str
See superclass
latex_label: str
See superclass
unit: str
See superclass
Attributes
-------
all_interpolated: scipy.interpolate.interp1d
Interpolated prior function
"""
try:
self.id = file_name
xx, yy = np.genfromtxt(self.id).T
Interped.__init__(self, xx=xx, yy=yy, minimum=minimum,
maximum=maximum, name=name,
latex_label=latex_label, unit=unit)
except IOError:
logger.warning("Can't load {}.".format(self.id))
logger.warning("Format should be:")
logger.warning(r"x\tp(x)")
| {"/bilby/gw/result.py": ["/bilby/core/result.py"], "/bilby/core/result.py": ["/bilby/core/__init__.py", "/bilby/core/prior.py"], "/bilby/gw/likelihood.py": ["/bilby/core/__init__.py", "/bilby/core/prior.py", "/bilby/gw/detector.py", "/bilby/gw/source.py"], "/bilby/gw/source.py": ["/bilby/core/__init__.py"], "/bilby/gw/__init__.py": ["/bilby/gw/likelihood.py"], "/bilby/gw/detector.py": ["/bilby/gw/__init__.py", "/bilby/core/__init__.py"]} |
45,657 | juancalderonbustillo/bilby | refs/heads/master | /bilby/core/sampler/ptemcee.py | from __future__ import absolute_import, division, print_function
import numpy as np
from ..utils import get_progress_bar
from . import Emcee
from .base_sampler import SamplerError
class Ptemcee(Emcee):
"""bilby wrapper ptemcee (https://github.com/willvousden/ptemcee)
All positional and keyword arguments (i.e., the args and kwargs) passed to
`run_sampler` will be propagated to `ptemcee.Sampler`, see
documentation for that class for further help. Under Other Parameters, we
list commonly used kwargs and the bilby defaults.
Other Parameters
----------------
nwalkers: int, (100)
The number of walkers
nsteps: int, (100)
The number of steps to take
nburn: int (50)
The fixed number of steps to discard as burn-in
ntemps: int (2)
The number of temperatures used by ptemcee
"""
default_kwargs = dict(ntemps=2, nwalkers=500,
Tmax=None, betas=None,
threads=1, pool=None, a=2.0,
loglargs=[], logpargs=[],
loglkwargs={}, logpkwargs={},
adaptation_lag=10000, adaptation_time=100,
random=None, iterations=100, thin=1,
storechain=True, adapt=True,
swap_ratios=False,
)
def __init__(self, likelihood, priors, outdir='outdir', label='label', use_ratio=False, plot=False,
skip_import_verification=False, nburn=None, burn_in_fraction=0.25,
burn_in_act=3, **kwargs):
Emcee.__init__(self, likelihood=likelihood, priors=priors, outdir=outdir, label=label,
use_ratio=use_ratio, plot=plot, skip_import_verification=skip_import_verification,
nburn=nburn, burn_in_fraction=burn_in_fraction, burn_in_act=burn_in_act, **kwargs)
@property
def sampler_function_kwargs(self):
keys = ['iterations', 'thin', 'storechain', 'adapt', 'swap_ratios']
return {key: self.kwargs[key] for key in keys}
@property
def sampler_init_kwargs(self):
return {key: value
for key, value in self.kwargs.items()
if key not in self.sampler_function_kwargs}
def run_sampler(self):
import ptemcee
tqdm = get_progress_bar()
sampler = ptemcee.Sampler(dim=self.ndim, logl=self.log_likelihood,
logp=self.log_prior, **self.sampler_init_kwargs)
self.pos0 = [[self.get_random_draw_from_prior()
for _ in range(self.nwalkers)]
for _ in range(self.kwargs['ntemps'])]
log_likelihood_evaluations = []
log_prior_evaluations = []
for pos, logpost, loglike in tqdm(
sampler.sample(self.pos0, **self.sampler_function_kwargs),
total=self.nsteps):
log_likelihood_evaluations.append(loglike)
log_prior_evaluations.append(logpost - loglike)
pass
self.calculate_autocorrelation(sampler.chain.reshape((-1, self.ndim)))
self.result.sampler_output = np.nan
self.print_nburn_logging_info()
self.result.nburn = self.nburn
if self.result.nburn > self.nsteps:
raise SamplerError(
"The run has finished, but the chain is not burned in: "
"`nburn < nsteps`. Try increasing the number of steps.")
self.result.samples = sampler.chain[0, :, self.nburn:, :].reshape(
(-1, self.ndim))
self.result.log_likelihood_evaluations = np.array(
log_likelihood_evaluations)[self.nburn:, 0, :].reshape((-1))
self.result.log_prior_evaluations = np.array(
log_prior_evaluations)[self.nburn:, 0, :].reshape((-1))
self.result.betas = sampler.betas
self.result.log_evidence, self.result.log_evidence_err =\
sampler.log_evidence_estimate(
sampler.loglikelihood, self.nburn / self.nsteps)
self.result.walkers = sampler.chain[0, :, :, :]
return self.result
| {"/bilby/gw/result.py": ["/bilby/core/result.py"], "/bilby/core/result.py": ["/bilby/core/__init__.py", "/bilby/core/prior.py"], "/bilby/gw/likelihood.py": ["/bilby/core/__init__.py", "/bilby/core/prior.py", "/bilby/gw/detector.py", "/bilby/gw/source.py"], "/bilby/gw/source.py": ["/bilby/core/__init__.py"], "/bilby/gw/__init__.py": ["/bilby/gw/likelihood.py"], "/bilby/gw/detector.py": ["/bilby/gw/__init__.py", "/bilby/core/__init__.py"]} |
45,658 | juancalderonbustillo/bilby | refs/heads/master | /bilby/core/__init__.py | from __future__ import absolute_import
from . import likelihood, prior, result, sampler, series, utils
| {"/bilby/gw/result.py": ["/bilby/core/result.py"], "/bilby/core/result.py": ["/bilby/core/__init__.py", "/bilby/core/prior.py"], "/bilby/gw/likelihood.py": ["/bilby/core/__init__.py", "/bilby/core/prior.py", "/bilby/gw/detector.py", "/bilby/gw/source.py"], "/bilby/gw/source.py": ["/bilby/core/__init__.py"], "/bilby/gw/__init__.py": ["/bilby/gw/likelihood.py"], "/bilby/gw/detector.py": ["/bilby/gw/__init__.py", "/bilby/core/__init__.py"]} |
45,659 | juancalderonbustillo/bilby | refs/heads/master | /bilby/gw/detector.py | from __future__ import division, print_function, absolute_import
import os
import sys
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal.windows import tukey
from scipy.interpolate import interp1d
import deepdish as dd
from . import utils as gwutils
from ..core import utils
from ..core.utils import logger
from ..core.series import CoupledTimeAndFrequencySeries
from .calibration import Recalibrate
try:
import gwpy
import gwpy.signal
except ImportError:
logger.warning("You do not have gwpy installed currently. You will "
" not be able to use some of the prebuilt functions.")
try:
import lal
except ImportError:
logger.warning("You do not have lalsuite installed currently. You will"
" not be able to use some of the prebuilt functions.")
class InterferometerList(list):
""" A list of Interferometer objects """
def __init__(self, interferometers):
""" Instantiate a InterferometerList
The InterferometerList is a list of Interferometer objects, each
object has the data used in evaluating the likelihood
Parameters
----------
interferometers: iterable
The list of interferometers
"""
list.__init__(self)
if type(interferometers) == str:
raise TypeError("Input must not be a string")
for ifo in interferometers:
if type(ifo) == str:
ifo = get_empty_interferometer(ifo)
if type(ifo) not in [Interferometer, TriangularInterferometer]:
raise TypeError("Input list of interferometers are not all Interferometer objects")
else:
self.append(ifo)
self._check_interferometers()
def _check_interferometers(self):
""" Check certain aspects of the set are the same """
consistent_attributes = ['duration', 'start_time', 'sampling_frequency']
for attribute in consistent_attributes:
x = [getattr(interferometer.strain_data, attribute)
for interferometer in self]
if not all(y == x[0] for y in x):
raise ValueError("The {} of all interferometers are not the same".format(attribute))
def set_strain_data_from_power_spectral_densities(self, sampling_frequency, duration, start_time=0):
""" Set the `Interferometer.strain_data` from the power spectal densities of the detectors
This uses the `interferometer.power_spectral_density` object to set
the `strain_data` to a noise realization. See
`bilby.gw.detector.InterferometerStrainData` for further information.
Parameters
----------
sampling_frequency: float
The sampling frequency (in Hz)
duration: float
The data duration (in s)
start_time: float
The GPS start-time of the data
"""
for interferometer in self:
interferometer.set_strain_data_from_power_spectral_density(sampling_frequency=sampling_frequency,
duration=duration,
start_time=start_time)
def inject_signal(self, parameters=None, injection_polarizations=None, waveform_generator=None):
""" Inject a signal into noise in each of the three detectors.
Parameters
----------
parameters: dict
Parameters of the injection.
injection_polarizations: dict
Polarizations of waveform to inject, output of
`waveform_generator.frequency_domain_strain()`. If
`waveform_generator` is also given, the injection_polarizations will
be calculated directly and this argument can be ignored.
waveform_generator: bilby.gw.waveform_generator.WaveformGenerator
A WaveformGenerator instance using the source model to inject. If
`injection_polarizations` is given, this will be ignored.
Note
----------
if your signal takes a substantial amount of time to generate, or
you experience buggy behaviour. It is preferable to provide the
injection_polarizations directly.
Returns
-------
injection_polarizations: dict
"""
if injection_polarizations is None:
if waveform_generator is not None:
injection_polarizations = \
waveform_generator.frequency_domain_strain(parameters)
else:
raise ValueError(
"inject_signal needs one of waveform_generator or "
"injection_polarizations.")
all_injection_polarizations = list()
for interferometer in self:
all_injection_polarizations.append(
interferometer.inject_signal(parameters=parameters, injection_polarizations=injection_polarizations))
return all_injection_polarizations
def save_data(self, outdir, label=None):
""" Creates a save file for the data in plain text format
Parameters
----------
outdir: str
The output directory in which the data is supposed to be saved
label: str
The string labelling the data
"""
for interferometer in self:
interferometer.save_data(outdir=outdir, label=label)
def plot_data(self, signal=None, outdir='.', label=None):
if utils.command_line_args.test:
return
for interferometer in self:
interferometer.plot_data(signal=signal, outdir=outdir, label=label)
@property
def number_of_interferometers(self):
return len(self)
@property
def duration(self):
return self[0].strain_data.duration
@property
def start_time(self):
return self[0].strain_data.start_time
@property
def sampling_frequency(self):
return self[0].strain_data.sampling_frequency
@property
def frequency_array(self):
return self[0].strain_data.frequency_array
def append(self, interferometer):
if isinstance(interferometer, InterferometerList):
super(InterferometerList, self).extend(interferometer)
else:
super(InterferometerList, self).append(interferometer)
self._check_interferometers()
def extend(self, interferometers):
super(InterferometerList, self).extend(interferometers)
self._check_interferometers()
def insert(self, index, interferometer):
super(InterferometerList, self).insert(index, interferometer)
self._check_interferometers()
@property
def meta_data(self):
""" Dictionary of the per-interferometer meta_data """
return {interferometer.name: interferometer.meta_data
for interferometer in self}
@staticmethod
def _hdf5_filename_from_outdir_label(outdir, label):
return os.path.join(outdir, label + '.h5')
def to_hdf5(self, outdir='outdir', label='ifo_list'):
""" Saves the object to a hdf5 file
Parameters
----------
outdir: str, optional
Output directory name of the file
label: str, optional
Output file name, is 'ifo_list' if not given otherwise. A list of
the included interferometers will be appended.
"""
if sys.version_info[0] < 3:
raise NotImplementedError('Pickling of InterferometerList is not supported in Python 2.'
'Use Python 3 instead.')
label = label + '_' + ''.join(ifo.name for ifo in self)
utils.check_directory_exists_and_if_not_mkdir(outdir)
dd.io.save(self._hdf5_filename_from_outdir_label(outdir, label), self)
@classmethod
def from_hdf5(cls, filename=None):
""" Loads in an InterferometerList object from an hdf5 file
Parameters
----------
filename: str
If given, try to load from this filename
"""
if sys.version_info[0] < 3:
raise NotImplementedError('Pickling of InterferometerList is not supported in Python 2.'
'Use Python 3 instead.')
res = dd.io.load(filename)
if res.__class__ == list:
res = cls(res)
if res.__class__ != cls:
raise TypeError('The loaded object is not a InterferometerList')
return res
class InterferometerStrainData(object):
""" Strain data for an interferometer """
def __init__(self, minimum_frequency=0, maximum_frequency=np.inf,
roll_off=0.2):
""" Initiate an InterferometerStrainData object
The initialised object contains no data, this should be added using one
of the `set_from..` methods.
Parameters
----------
minimum_frequency: float
Minimum frequency to analyse for detector. Default is 0.
maximum_frequency: float
Maximum frequency to analyse for detector. Default is infinity.
roll_off: float
The roll-off (in seconds) used in the Tukey window, default=0.2s.
This corresponds to alpha * duration / 2 for scipy tukey window.
"""
self.minimum_frequency = minimum_frequency
self.maximum_frequency = maximum_frequency
self.roll_off = roll_off
self.window_factor = 1
self._times_and_frequencies = CoupledTimeAndFrequencySeries()
# self._set_time_and_frequency_array_parameters(None, None, None)
self._frequency_domain_strain = None
self._frequency_array = None
self._time_domain_strain = None
self._time_array = None
def __eq__(self, other):
if self.minimum_frequency == other.minimum_frequency \
and self.maximum_frequency == other.maximum_frequency \
and self.roll_off == other.roll_off \
and self.window_factor == other.window_factor \
and self.sampling_frequency == other.sampling_frequency \
and self.duration == other.duration \
and self.start_time == other.start_time \
and np.array_equal(self.time_array, other.time_array) \
and np.array_equal(self.frequency_array, other.frequency_array) \
and np.array_equal(self.frequency_domain_strain, other.frequency_domain_strain) \
and np.array_equal(self.time_domain_strain, other.time_domain_strain):
return True
return False
def time_within_data(self, time):
""" Check if time is within the data span
Parameters
----------
time: float
The time to check
Returns
-------
bool:
A boolean stating whether the time is inside or outside the span
"""
if time < self.start_time:
logger.debug("Time is before the start_time")
return False
elif time > self.start_time + self.duration:
logger.debug("Time is after the start_time + duration")
return False
else:
return True
@property
def minimum_frequency(self):
return self.__minimum_frequency
@minimum_frequency.setter
def minimum_frequency(self, minimum_frequency):
self.__minimum_frequency = minimum_frequency
@property
def maximum_frequency(self):
""" Force the maximum frequency be less than the Nyquist frequency """
if self.sampling_frequency is not None:
if 2 * self.__maximum_frequency > self.sampling_frequency:
self.__maximum_frequency = self.sampling_frequency / 2.
return self.__maximum_frequency
@maximum_frequency.setter
def maximum_frequency(self, maximum_frequency):
self.__maximum_frequency = maximum_frequency
@property
def frequency_mask(self):
"""Masking array for limiting the frequency band.
Returns
-------
array_like: An array of boolean values
"""
return ((self.frequency_array >= self.minimum_frequency) &
(self.frequency_array <= self.maximum_frequency))
@property
def alpha(self):
return 2 * self.roll_off / self.duration
def time_domain_window(self, roll_off=None, alpha=None):
"""
Window function to apply to time domain data before FFTing.
This defines self.window_factor as the power loss due to the windowing.
See https://dcc.ligo.org/DocDB/0027/T040089/000/T040089-00.pdf
Parameters
----------
roll_off: float
Rise time of window in seconds
alpha: float
Parameter to pass to tukey window, how much of segment falls
into windowed part
Return
------
window: array
Window function over time array
"""
if roll_off is not None:
self.roll_off = roll_off
elif alpha is not None:
self.roll_off = alpha * self.duration / 2
window = tukey(len(self._time_domain_strain), alpha=self.alpha)
self.window_factor = np.mean(window ** 2)
return window
@property
def time_domain_strain(self):
""" The time domain strain, in units of strain """
if self._time_domain_strain is not None:
return self._time_domain_strain
elif self._frequency_domain_strain is not None:
self._time_domain_strain = utils.infft(
self.frequency_domain_strain, self.sampling_frequency)
return self._time_domain_strain
else:
raise ValueError("time domain strain data not yet set")
@property
def frequency_domain_strain(self):
""" Returns the frequency domain strain
This is the frequency domain strain normalised to units of
strain / Hz, obtained by a one-sided Fourier transform of the
time domain data, divided by the sampling frequency.
"""
if self._frequency_domain_strain is not None:
return self._frequency_domain_strain * self.frequency_mask
elif self._time_domain_strain is not None:
logger.info("Generating frequency domain strain from given time "
"domain strain.")
logger.info("Applying a tukey window with alpha={}, roll off={}".format(
self.alpha, self.roll_off))
# self.low_pass_filter()
window = self.time_domain_window()
self._frequency_domain_strain, self.frequency_array = utils.nfft(
self._time_domain_strain * window, self.sampling_frequency)
return self._frequency_domain_strain * self.frequency_mask
else:
raise ValueError("frequency domain strain data not yet set")
@frequency_domain_strain.setter
def frequency_domain_strain(self, frequency_domain_strain):
if not len(self.frequency_array) == len(frequency_domain_strain):
raise ValueError("The frequency_array and the set strain have different lengths")
self._frequency_domain_strain = frequency_domain_strain
def add_to_frequency_domain_strain(self, x):
"""Deprecated"""
self._frequency_domain_strain += x
def low_pass_filter(self, filter_freq=None):
""" Low pass filter the data """
if filter_freq is None:
logger.debug(
"Setting low pass filter_freq using given maximum frequency")
filter_freq = self.maximum_frequency
if 2 * filter_freq >= self.sampling_frequency:
logger.info(
"Low pass filter frequency of {}Hz requested, this is equal"
" or greater than the Nyquist frequency so no filter applied"
.format(filter_freq))
return
logger.debug("Applying low pass filter with filter frequency {}".format(filter_freq))
bp = gwpy.signal.filter_design.lowpass(
filter_freq, self.sampling_frequency)
strain = gwpy.timeseries.TimeSeries(
self.time_domain_strain, sample_rate=self.sampling_frequency)
strain = strain.filter(bp, filtfilt=True)
self._time_domain_strain = strain.value
def create_power_spectral_density(
self, fft_length, overlap=0, name='unknown', outdir=None,
analysis_segment_start_time=None):
""" Use the time domain strain to generate a power spectral density
This create a Tukey-windowed power spectral density and writes it to a
PSD file.
Parameters
----------
fft_length: float
Duration of the analysis segment.
overlap: float
Number of seconds of overlap between FFTs.
name: str
The name of the detector, used in storing the PSD. Defaults to
"unknown".
outdir: str
The output directory to write the PSD file too. If not given,
the PSD will not be written to file.
analysis_segment_start_time: float
The start time of the analysis segment, if given, this data will
be removed before creating the PSD.
Returns
-------
frequency_array, psd : array_like
The frequencies and power spectral density array
"""
if analysis_segment_start_time is not None:
logger.info("Removing analysis segment data from the PSD data")
analysis_segment_end_time = analysis_segment_start_time + fft_length
idxs = (
(self.time_array < analysis_segment_start_time) +
(self.time_array > analysis_segment_end_time))
data = self.time_domain_strain[idxs]
else:
data = self.time_domain_strain
strain = gwpy.timeseries.TimeSeries(data=data, sample_rate=self.sampling_frequency)
psd_alpha = 2 * self.roll_off / fft_length
logger.info(
"Tukey window PSD data with alpha={}, roll off={}".format(
psd_alpha, self.roll_off))
psd = strain.psd(
fftlength=fft_length, overlap=overlap, window=('tukey', psd_alpha))
if outdir:
psd_file = '{}/{}_PSD_{}_{}.txt'.format(outdir, name, self.start_time, self.duration)
with open('{}'.format(psd_file), 'w+') as opened_file:
for f, p in zip(psd.frequencies.value, psd.value):
opened_file.write('{} {}\n'.format(f, p))
return psd.frequencies.value, psd.value
def _infer_time_domain_dependence(
self, start_time, sampling_frequency, duration, time_array):
""" Helper function to figure out if the time_array, or
sampling_frequency and duration where given
"""
self._infer_dependence(domain='time', array=time_array, duration=duration,
sampling_frequency=sampling_frequency, start_time=start_time)
def _infer_frequency_domain_dependence(
self, start_time, sampling_frequency, duration, frequency_array):
""" Helper function to figure out if the frequency_array, or
sampling_frequency and duration where given
"""
self._infer_dependence(domain='frequency', array=frequency_array,
duration=duration, sampling_frequency=sampling_frequency, start_time=start_time)
def _infer_dependence(self, domain, array, duration, sampling_frequency, start_time):
if (sampling_frequency is not None) and (duration is not None):
if array is not None:
raise ValueError(
"You have given the sampling_frequency, duration, and "
"an array")
pass
elif array is not None:
if domain == 'time':
self.time_array = array
elif domain == 'frequency':
self.frequency_array = array
return
elif sampling_frequency is None or duration is None:
raise ValueError(
"You must provide both sampling_frequency and duration")
else:
raise ValueError(
"Insufficient information given to set arrays")
self._set_time_and_frequency_array_parameters(duration=duration,
sampling_frequency=sampling_frequency,
start_time=start_time)
def set_from_time_domain_strain(
self, time_domain_strain, sampling_frequency=None, duration=None,
start_time=0, time_array=None):
""" Set the strain data from a time domain strain array
This sets the time_domain_strain attribute, the frequency_domain_strain
is automatically calculated after a low-pass filter and Tukey window
is applied.
Parameters
----------
time_domain_strain: array_like
An array of the time domain strain.
sampling_frequency: float
The sampling frequency (in Hz).
duration: float
The data duration (in s).
start_time: float
The GPS start-time of the data.
time_array: array_like
The array of times, if sampling_frequency and duration not
given.
"""
self._infer_time_domain_dependence(start_time=start_time,
sampling_frequency=sampling_frequency,
duration=duration,
time_array=time_array)
logger.debug('Setting data using provided time_domain_strain')
if np.shape(time_domain_strain) == np.shape(self.time_array):
self._time_domain_strain = time_domain_strain
self._frequency_domain_strain = None
else:
raise ValueError("Data times do not match time array")
def set_from_gwpy_timeseries(self, time_series):
""" Set the strain data from a gwpy TimeSeries
This sets the time_domain_strain attribute, the frequency_domain_strain
is automatically calculated after a low-pass filter and Tukey window
is applied.
Parameters
----------
time_series: gwpy.timeseries.timeseries.TimeSeries
"""
logger.debug('Setting data using provided gwpy TimeSeries object')
if type(time_series) != gwpy.timeseries.TimeSeries:
raise ValueError("Input time_series is not a gwpy TimeSeries")
self._set_time_and_frequency_array_parameters(duration=time_series.duration.value,
sampling_frequency=time_series.sample_rate.value,
start_time=time_series.epoch.value)
self._time_domain_strain = time_series.value
self._frequency_domain_strain = None
def set_from_open_data(
self, name, start_time, duration=4, outdir='outdir', cache=True,
**kwargs):
""" Set the strain data from open LOSC data
This sets the time_domain_strain attribute, the frequency_domain_strain
is automatically calculated after a low-pass filter and Tukey window
is applied.
Parameters
----------
name: str
Detector name, e.g., 'H1'.
start_time: float
Start GPS time of segment.
duration: float, optional
The total time (in seconds) to analyse. Defaults to 4s.
outdir: str
Directory where the psd files are saved
cache: bool, optional
Whether or not to store/use the acquired data.
**kwargs:
All keyword arguments are passed to
`gwpy.timeseries.TimeSeries.fetch_open_data()`.
"""
timeseries = gwutils.get_open_strain_data(
name, start_time, start_time + duration, outdir=outdir, cache=cache,
**kwargs)
self.set_from_gwpy_timeseries(timeseries)
def set_from_csv(self, filename):
""" Set the strain data from a csv file
Parameters
----------
filename: str
The path to the file to read in
"""
timeseries = gwpy.timeseries.TimeSeries.read(filename, format='csv')
self.set_from_gwpy_timeseries(timeseries)
def set_from_frequency_domain_strain(
self, frequency_domain_strain, sampling_frequency=None,
duration=None, start_time=0, frequency_array=None):
""" Set the `frequency_domain_strain` from a numpy array
Parameters
----------
frequency_domain_strain: array_like
The data to set.
sampling_frequency: float
The sampling frequency (in Hz).
duration: float
The data duration (in s).
start_time: float
The GPS start-time of the data.
frequency_array: array_like
The array of frequencies, if sampling_frequency and duration not
given.
"""
self._infer_frequency_domain_dependence(start_time=start_time,
sampling_frequency=sampling_frequency,
duration=duration,
frequency_array=frequency_array)
logger.debug('Setting data using provided frequency_domain_strain')
if np.shape(frequency_domain_strain) == np.shape(self.frequency_array):
self._frequency_domain_strain = frequency_domain_strain
self.window_factor = 1
else:
raise ValueError("Data frequencies do not match frequency_array")
def set_from_power_spectral_density(
self, power_spectral_density, sampling_frequency, duration,
start_time=0):
""" Set the `frequency_domain_strain` by generating a noise realisation
Parameters
----------
power_spectral_density: bilby.gw.detector.PowerSpectralDensity
A PowerSpectralDensity object used to generate the data
sampling_frequency: float
The sampling frequency (in Hz)
duration: float
The data duration (in s)
start_time: float
The GPS start-time of the data
"""
self._set_time_and_frequency_array_parameters(duration=duration,
sampling_frequency=sampling_frequency,
start_time=start_time)
logger.debug(
'Setting data using noise realization from provided'
'power_spectal_density')
frequency_domain_strain, frequency_array = \
power_spectral_density.get_noise_realisation(
self.sampling_frequency, self.duration)
if np.array_equal(frequency_array, self.frequency_array):
self._frequency_domain_strain = frequency_domain_strain
else:
raise ValueError("Data frequencies do not match frequency_array")
def set_from_zero_noise(self, sampling_frequency, duration, start_time=0):
""" Set the `frequency_domain_strain` to zero noise
Parameters
----------
sampling_frequency: float
The sampling frequency (in Hz)
duration: float
The data duration (in s)
start_time: float
The GPS start-time of the data
"""
self._set_time_and_frequency_array_parameters(duration=duration,
sampling_frequency=sampling_frequency,
start_time=start_time)
logger.debug('Setting zero noise data')
self._frequency_domain_strain = np.zeros_like(self.frequency_array,
dtype=np.complex)
def set_from_frame_file(
self, frame_file, sampling_frequency, duration, start_time=0,
channel=None, buffer_time=1):
""" Set the `frequency_domain_strain` from a frame fiile
Parameters
----------
frame_file: str
File from which to load data.
channel: str
Channel to read from frame.
sampling_frequency: float
The sampling frequency (in Hz)
duration: float
The data duration (in s)
start_time: float
The GPS start-time of the data
buffer_time: float
Read in data with `start_time-buffer_time` and
`start_time+duration+buffer_time`
"""
self._set_time_and_frequency_array_parameters(duration=duration,
sampling_frequency=sampling_frequency,
start_time=start_time)
logger.info('Reading data from frame')
strain = gwutils.read_frame_file(
frame_file, start_time=start_time, end_time=start_time + duration,
buffer_time=buffer_time, channel=channel,
resample=sampling_frequency)
self.set_from_gwpy_timeseries(strain)
def _set_time_and_frequency_array_parameters(self, duration, sampling_frequency, start_time):
self._times_and_frequencies = CoupledTimeAndFrequencySeries(duration=duration,
sampling_frequency=sampling_frequency,
start_time=start_time)
@property
def sampling_frequency(self):
return self._times_and_frequencies.sampling_frequency
@sampling_frequency.setter
def sampling_frequency(self, sampling_frequency):
self._times_and_frequencies.sampling_frequency = sampling_frequency
@property
def duration(self):
return self._times_and_frequencies.duration
@duration.setter
def duration(self, duration):
self._times_and_frequencies.duration = duration
@property
def start_time(self):
return self._times_and_frequencies.start_time
@start_time.setter
def start_time(self, start_time):
self._times_and_frequencies.start_time = start_time
@property
def frequency_array(self):
""" Frequencies of the data in Hz """
return self._times_and_frequencies.frequency_array
@frequency_array.setter
def frequency_array(self, frequency_array):
self._times_and_frequencies.frequency_array = frequency_array
@property
def time_array(self):
""" Time of the data in seconds """
return self._times_and_frequencies.time_array
@time_array.setter
def time_array(self, time_array):
self._times_and_frequencies.time_array = time_array
class Interferometer(object):
"""Class for the Interferometer """
def __init__(self, name, power_spectral_density, minimum_frequency, maximum_frequency,
length, latitude, longitude, elevation, xarm_azimuth, yarm_azimuth,
xarm_tilt=0., yarm_tilt=0., calibration_model=Recalibrate()):
"""
Instantiate an Interferometer object.
Parameters
----------
name: str
Interferometer name, e.g., H1.
power_spectral_density: PowerSpectralDensity
Power spectral density determining the sensitivity of the detector.
minimum_frequency: float
Minimum frequency to analyse for detector.
maximum_frequency: float
Maximum frequency to analyse for detector.
length: float
Length of the interferometer in km.
latitude: float
Latitude North in degrees (South is negative).
longitude: float
Longitude East in degrees (West is negative).
elevation: float
Height above surface in metres.
xarm_azimuth: float
Orientation of the x arm in degrees North of East.
yarm_azimuth: float
Orientation of the y arm in degrees North of East.
xarm_tilt: float, optional
Tilt of the x arm in radians above the horizontal defined by
ellipsoid earth model in LIGO-T980044-08.
yarm_tilt: float, optional
Tilt of the y arm in radians above the horizontal.
calibration_model: Recalibration
Calibration model, this applies the calibration correction to the
template, the default model applies no correction.
"""
self.__x_updated = False
self.__y_updated = False
self.__vertex_updated = False
self.__detector_tensor_updated = False
self.name = name
self.length = length
self.latitude = latitude
self.longitude = longitude
self.elevation = elevation
self.xarm_azimuth = xarm_azimuth
self.yarm_azimuth = yarm_azimuth
self.xarm_tilt = xarm_tilt
self.yarm_tilt = yarm_tilt
self.power_spectral_density = power_spectral_density
self.calibration_model = calibration_model
self._strain_data = InterferometerStrainData(
minimum_frequency=minimum_frequency,
maximum_frequency=maximum_frequency)
self.meta_data = dict()
def __eq__(self, other):
if self.name == other.name and \
self.length == other.length and \
self.latitude == other.latitude and \
self.longitude == other.longitude and \
self.elevation == other.elevation and \
self.xarm_azimuth == other.xarm_azimuth and \
self.xarm_tilt == other.xarm_tilt and \
self.yarm_azimuth == other.yarm_azimuth and \
self.yarm_tilt == other.yarm_tilt and \
self.power_spectral_density.__eq__(other.power_spectral_density) and \
self.calibration_model == other.calibration_model and \
self.strain_data == other.strain_data:
return True
return False
def __repr__(self):
return self.__class__.__name__ + '(name=\'{}\', power_spectral_density={}, minimum_frequency={}, ' \
'maximum_frequency={}, length={}, latitude={}, longitude={}, elevation={}, ' \
'xarm_azimuth={}, yarm_azimuth={}, xarm_tilt={}, yarm_tilt={})' \
.format(self.name, self.power_spectral_density, float(self.minimum_frequency),
float(self.maximum_frequency), float(self.length), float(self.latitude), float(self.longitude),
float(self.elevation), float(self.xarm_azimuth), float(self.yarm_azimuth), float(self.xarm_tilt),
float(self.yarm_tilt))
@property
def minimum_frequency(self):
return self.strain_data.minimum_frequency
@minimum_frequency.setter
def minimum_frequency(self, minimum_frequency):
self._strain_data.minimum_frequency = minimum_frequency
@property
def maximum_frequency(self):
return self.strain_data.maximum_frequency
@maximum_frequency.setter
def maximum_frequency(self, maximum_frequency):
self._strain_data.maximum_frequency = maximum_frequency
@property
def strain_data(self):
""" A bilby.gw.detector.InterferometerStrainData instance """
return self._strain_data
@strain_data.setter
def strain_data(self, strain_data):
""" Set the strain_data
This sets the Interferometer.strain_data equal to the provided
strain_data. This will override the minimum_frequency and
maximum_frequency of the provided strain_data object with those of
the Interferometer object.
"""
strain_data.minimum_frequency = self.minimum_frequency
strain_data.maximum_frequency = self.maximum_frequency
self._strain_data = strain_data
def set_strain_data_from_frequency_domain_strain(
self, frequency_domain_strain, sampling_frequency=None,
duration=None, start_time=0, frequency_array=None):
""" Set the `Interferometer.strain_data` from a numpy array
Parameters
----------
frequency_domain_strain: array_like
The data to set.
sampling_frequency: float
The sampling frequency (in Hz).
duration: float
The data duration (in s).
start_time: float
The GPS start-time of the data.
frequency_array: array_like
The array of frequencies, if sampling_frequency and duration not
given.
"""
self.strain_data.set_from_frequency_domain_strain(
frequency_domain_strain=frequency_domain_strain,
sampling_frequency=sampling_frequency, duration=duration,
start_time=start_time, frequency_array=frequency_array)
def set_strain_data_from_power_spectral_density(
self, sampling_frequency, duration, start_time=0):
""" Set the `Interferometer.strain_data` from a power spectal density
This uses the `interferometer.power_spectral_density` object to set
the `strain_data` to a noise realization. See
`bilby.gw.detector.InterferometerStrainData` for further information.
Parameters
----------
sampling_frequency: float
The sampling frequency (in Hz)
duration: float
The data duration (in s)
start_time: float
The GPS start-time of the data
"""
self.strain_data.set_from_power_spectral_density(
self.power_spectral_density, sampling_frequency=sampling_frequency,
duration=duration, start_time=start_time)
def set_strain_data_from_frame_file(
self, frame_file, sampling_frequency, duration, start_time=0,
channel=None, buffer_time=1):
""" Set the `Interferometer.strain_data` from a frame file
Parameters
----------
frame_file: str
File from which to load data.
channel: str
Channel to read from frame.
sampling_frequency: float
The sampling frequency (in Hz)
duration: float
The data duration (in s)
start_time: float
The GPS start-time of the data
buffer_time: float
Read in data with `start_time-buffer_time` and
`start_time+duration+buffer_time`
"""
self.strain_data.set_from_frame_file(
frame_file=frame_file, sampling_frequency=sampling_frequency,
duration=duration, start_time=start_time,
channel=channel, buffer_time=buffer_time)
def set_strain_data_from_csv(self, filename):
""" Set the `Interferometer.strain_data` from a csv file
Parameters
----------
filename: str
The path to the file to read in
"""
self.strain_data.set_from_csv(filename)
def set_strain_data_from_zero_noise(
self, sampling_frequency, duration, start_time=0):
""" Set the `Interferometer.strain_data` to zero noise
Parameters
----------
sampling_frequency: float
The sampling frequency (in Hz)
duration: float
The data duration (in s)
start_time: float
The GPS start-time of the data
"""
self.strain_data.set_from_zero_noise(
sampling_frequency=sampling_frequency, duration=duration,
start_time=start_time)
@property
def latitude(self):
""" Saves latitude in rad internally. Updates related quantities if set to a different value.
Returns
-------
float: The latitude position of the detector in degree
"""
return self.__latitude * 180 / np.pi
@latitude.setter
def latitude(self, latitude):
self.__latitude = latitude * np.pi / 180
self.__x_updated = False
self.__y_updated = False
self.__vertex_updated = False
@property
def longitude(self):
""" Saves longitude in rad internally. Updates related quantities if set to a different value.
Returns
-------
float: The longitude position of the detector in degree
"""
return self.__longitude * 180 / np.pi
@longitude.setter
def longitude(self, longitude):
self.__longitude = longitude * np.pi / 180
self.__x_updated = False
self.__y_updated = False
self.__vertex_updated = False
@property
def elevation(self):
""" Updates related quantities if set to a different values.
Returns
-------
float: The height about the surface in meters
"""
return self.__elevation
@elevation.setter
def elevation(self, elevation):
self.__elevation = elevation
self.__vertex_updated = False
@property
def xarm_azimuth(self):
""" Saves the x-arm azimuth in rad internally. Updates related quantities if set to a different values.
Returns
-------
float: The x-arm azimuth in degrees.
"""
return self.__xarm_azimuth * 180 / np.pi
@xarm_azimuth.setter
def xarm_azimuth(self, xarm_azimuth):
self.__xarm_azimuth = xarm_azimuth * np.pi / 180
self.__x_updated = False
@property
def yarm_azimuth(self):
""" Saves the y-arm azimuth in rad internally. Updates related quantities if set to a different values.
Returns
-------
float: The y-arm azimuth in degrees.
"""
return self.__yarm_azimuth * 180 / np.pi
@yarm_azimuth.setter
def yarm_azimuth(self, yarm_azimuth):
self.__yarm_azimuth = yarm_azimuth * np.pi / 180
self.__y_updated = False
@property
def xarm_tilt(self):
""" Updates related quantities if set to a different values.
Returns
-------
float: The x-arm tilt in radians.
"""
return self.__xarm_tilt
@xarm_tilt.setter
def xarm_tilt(self, xarm_tilt):
self.__xarm_tilt = xarm_tilt
self.__x_updated = False
@property
def yarm_tilt(self):
""" Updates related quantities if set to a different values.
Returns
-------
float: The y-arm tilt in radians.
"""
return self.__yarm_tilt
@yarm_tilt.setter
def yarm_tilt(self, yarm_tilt):
self.__yarm_tilt = yarm_tilt
self.__y_updated = False
@property
def vertex(self):
""" Position of the IFO vertex in geocentric coordinates in meters.
Is automatically updated if related quantities are modified.
Returns
-------
array_like: A 3D array representation of the vertex
"""
if not self.__vertex_updated:
self.__vertex = gwutils.get_vertex_position_geocentric(self.__latitude, self.__longitude,
self.elevation)
self.__vertex_updated = True
return self.__vertex
@property
def x(self):
""" A unit vector along the x-arm
Is automatically updated if related quantities are modified.
Returns
-------
array_like: A 3D array representation of a unit vector along the x-arm
"""
if not self.__x_updated:
self.__x = self.unit_vector_along_arm('x')
self.__x_updated = True
self.__detector_tensor_updated = False
return self.__x
@property
def y(self):
""" A unit vector along the y-arm
Is automatically updated if related quantities are modified.
Returns
-------
array_like: A 3D array representation of a unit vector along the y-arm
"""
if not self.__y_updated:
self.__y = self.unit_vector_along_arm('y')
self.__y_updated = True
self.__detector_tensor_updated = False
return self.__y
@property
def detector_tensor(self):
"""
Calculate the detector tensor from the unit vectors along each arm of the detector.
See Eq. B6 of arXiv:gr-qc/0008066
Is automatically updated if related quantities are modified.
Returns
-------
array_like: A 3x3 array representation of the detector tensor
"""
if not self.__x_updated or not self.__y_updated:
_, _ = self.x, self.y # noqa
if not self.__detector_tensor_updated:
self.__detector_tensor = 0.5 * (np.einsum('i,j->ij', self.x, self.x) - np.einsum('i,j->ij', self.y, self.y))
self.__detector_tensor_updated = True
return self.__detector_tensor
def antenna_response(self, ra, dec, time, psi, mode):
"""
Calculate the antenna response function for a given sky location
See Nishizawa et al. (2009) arXiv:0903.0528 for definitions of the polarisation tensors.
[u, v, w] represent the Earth-frame
[m, n, omega] represent the wave-frame
Note: there is a typo in the definition of the wave-frame in Nishizawa et al.
Parameters
-------
ra: float
right ascension in radians
dec: float
declination in radians
time: float
geocentric GPS time
psi: float
binary polarisation angle counter-clockwise about the direction of propagation
mode: str
polarisation mode (e.g. 'plus', 'cross')
Returns
-------
array_like: A 3x3 array representation of the antenna response for the specified mode
"""
polarization_tensor = gwutils.get_polarization_tensor(ra, dec, time, psi, mode)
return np.einsum('ij,ij->', self.detector_tensor, polarization_tensor)
def get_detector_response(self, waveform_polarizations, parameters):
""" Get the detector response for a particular waveform
Parameters
-------
waveform_polarizations: dict
polarizations of the waveform
parameters: dict
parameters describing position and time of arrival of the signal
Returns
-------
array_like: A 3x3 array representation of the detector response (signal observed in the interferometer)
"""
signal = {}
for mode in waveform_polarizations.keys():
det_response = self.antenna_response(
parameters['ra'],
parameters['dec'],
parameters['geocent_time'],
parameters['psi'], mode)
signal[mode] = waveform_polarizations[mode] * det_response
signal_ifo = sum(signal.values())
signal_ifo *= self.strain_data.frequency_mask
time_shift = self.time_delay_from_geocenter(
parameters['ra'], parameters['dec'], parameters['geocent_time'])
dt = parameters['geocent_time'] + time_shift - self.strain_data.start_time
signal_ifo = signal_ifo * np.exp(
-1j * 2 * np.pi * dt * self.frequency_array)
signal_ifo *= self.calibration_model.get_calibration_factor(
self.frequency_array, prefix='recalib_{}_'.format(self.name), **parameters)
return signal_ifo
def inject_signal(self, parameters=None, injection_polarizations=None,
waveform_generator=None):
""" Inject a signal into noise
Parameters
----------
parameters: dict
Parameters of the injection.
injection_polarizations: dict
Polarizations of waveform to inject, output of
`waveform_generator.frequency_domain_strain()`. If
`waveform_generator` is also given, the injection_polarizations will
be calculated directly and this argument can be ignored.
waveform_generator: bilby.gw.waveform_generator.WaveformGenerator
A WaveformGenerator instance using the source model to inject. If
`injection_polarizations` is given, this will be ignored.
Note
-------
if your signal takes a substantial amount of time to generate, or
you experience buggy behaviour. It is preferable to provide the
injection_polarizations directly.
Returns
-------
injection_polarizations: dict
"""
if injection_polarizations is None:
if waveform_generator is not None:
injection_polarizations = \
waveform_generator.frequency_domain_strain(parameters)
else:
raise ValueError(
"inject_signal needs one of waveform_generator or "
"injection_polarizations.")
if injection_polarizations is None:
raise ValueError(
'Trying to inject signal which is None. The most likely cause'
' is that waveform_generator.frequency_domain_strain returned'
' None. This can be caused if, e.g., mass_2 > mass_1.')
if not self.strain_data.time_within_data(parameters['geocent_time']):
logger.warning(
'Injecting signal outside segment, start_time={}, merger time={}.'
.format(self.strain_data.start_time, parameters['geocent_time']))
signal_ifo = self.get_detector_response(injection_polarizations, parameters)
if np.shape(self.frequency_domain_strain).__eq__(np.shape(signal_ifo)):
self.strain_data.frequency_domain_strain = \
signal_ifo + self.strain_data.frequency_domain_strain
else:
logger.info('Injecting into zero noise.')
self.set_strain_data_from_frequency_domain_strain(
signal_ifo,
sampling_frequency=self.strain_data.sampling_frequency,
duration=self.strain_data.duration,
start_time=self.strain_data.start_time)
self.meta_data['optimal_SNR'] = (
np.sqrt(self.optimal_snr_squared(signal=signal_ifo)).real)
self.meta_data['matched_filter_SNR'] = (
self.matched_filter_snr(signal=signal_ifo))
self.meta_data['parameters'] = parameters
logger.info("Injected signal in {}:".format(self.name))
logger.info(" optimal SNR = {:.2f}".format(self.meta_data['optimal_SNR']))
logger.info(" matched filter SNR = {:.2f}".format(self.meta_data['matched_filter_SNR']))
for key in parameters:
logger.info(' {} = {}'.format(key, parameters[key]))
return injection_polarizations
def unit_vector_along_arm(self, arm):
"""
Calculate the unit vector pointing along the specified arm in cartesian Earth-based coordinates.
See Eqs. B14-B17 in arXiv:gr-qc/0008066
Parameters
-------
arm: str
'x' or 'y' (arm of the detector)
Returns
-------
array_like: 3D unit vector along arm in cartesian Earth-based coordinates
Raises
-------
ValueError: If arm is neither 'x' nor 'y'
"""
if arm == 'x':
return self.__calculate_arm(self.__xarm_tilt, self.__xarm_azimuth)
elif arm == 'y':
return self.__calculate_arm(self.__yarm_tilt, self.__yarm_azimuth)
else:
raise ValueError("Arm must either be 'x' or 'y'.")
def __calculate_arm(self, arm_tilt, arm_azimuth):
e_long = np.array([-np.sin(self.__longitude), np.cos(self.__longitude), 0])
e_lat = np.array([-np.sin(self.__latitude) * np.cos(self.__longitude),
-np.sin(self.__latitude) * np.sin(self.__longitude), np.cos(self.__latitude)])
e_h = np.array([np.cos(self.__latitude) * np.cos(self.__longitude),
np.cos(self.__latitude) * np.sin(self.__longitude), np.sin(self.__latitude)])
return (np.cos(arm_tilt) * np.cos(arm_azimuth) * e_long +
np.cos(arm_tilt) * np.sin(arm_azimuth) * e_lat +
np.sin(arm_tilt) * e_h)
@property
def amplitude_spectral_density_array(self):
""" Returns the amplitude spectral density (ASD) given we know a power spectral denstiy (PSD)
Returns
-------
array_like: An array representation of the ASD
"""
return self.power_spectral_density_array ** 0.5
@property
def power_spectral_density_array(self):
""" Returns the power spectral density (PSD)
This accounts for whether the data in the interferometer has been windowed.
Returns
-------
array_like: An array representation of the PSD
"""
return (self.power_spectral_density.power_spectral_density_interpolated(self.frequency_array) *
self.strain_data.window_factor)
@property
def frequency_array(self):
return self.strain_data.frequency_array
@property
def frequency_mask(self):
return self.strain_data.frequency_mask
@property
def frequency_domain_strain(self):
""" The frequency domain strain in units of strain / Hz """
return self.strain_data.frequency_domain_strain
@property
def time_domain_strain(self):
""" The time domain strain in units of s """
return self.strain_data.time_domain_strain
@property
def time_array(self):
return self.strain_data.time_array
def time_delay_from_geocenter(self, ra, dec, time):
"""
Calculate the time delay from the geocenter for the interferometer.
Use the time delay function from utils.
Parameters
-------
ra: float
right ascension of source in radians
dec: float
declination of source in radians
time: float
GPS time
Returns
-------
float: The time delay from geocenter in seconds
"""
return gwutils.time_delay_geocentric(self.vertex, np.array([0, 0, 0]), ra, dec, time)
def vertex_position_geocentric(self):
"""
Calculate the position of the IFO vertex in geocentric coordinates in meters.
Based on arXiv:gr-qc/0008066 Eqs. B11-B13 except for the typo in the definition of the local radius.
See Section 2.1 of LIGO-T980044-10 for the correct expression
Returns
-------
array_like: A 3D array representation of the vertex
"""
return gwutils.get_vertex_position_geocentric(self.__latitude, self.__longitude, self.__elevation)
def optimal_snr_squared(self, signal):
"""
Parameters
----------
signal: array_like
Array containing the signal
Returns
-------
float: The optimal signal to noise ratio possible squared
"""
return gwutils.optimal_snr_squared(
signal=signal,
power_spectral_density=self.power_spectral_density_array,
duration=self.strain_data.duration)
def inner_product(self, signal):
"""
Parameters
----------
signal: array_like
Array containing the signal
Returns
-------
float: The optimal signal to noise ratio possible squared
"""
return gwutils.noise_weighted_inner_product(
aa=signal, bb=self.frequency_domain_strain,
power_spectral_density=self.power_spectral_density_array,
duration=self.strain_data.duration)
def matched_filter_snr(self, signal):
"""
Parameters
----------
signal: array_like
Array containing the signal
Returns
-------
float: The matched filter signal to noise ratio squared
"""
return gwutils.matched_filter_snr(
signal=signal, frequency_domain_strain=self.frequency_domain_strain,
power_spectral_density=self.power_spectral_density_array,
duration=self.strain_data.duration)
@property
def whitened_frequency_domain_strain(self):
""" Calculates the whitened data by dividing data by the amplitude spectral density
Returns
-------
array_like: The whitened data
"""
return self.strain_data.frequency_domain_strain / self.amplitude_spectral_density_array
def save_data(self, outdir, label=None):
""" Creates a save file for the data in plain text format
Parameters
----------
outdir: str
The output directory in which the data is supposed to be saved
label: str
The name of the output files
"""
np.savetxt('{}/{}_frequency_domain_data.dat'.format(outdir, self.name),
np.array(
[self.frequency_array,
self.frequency_domain_strain.real,
self.frequency_domain_strain.imag]).T,
header='f real_h(f) imag_h(f)')
if label is None:
filename = '{}/{}_psd.dat'.format(outdir, self.name)
else:
filename = '{}/{}_{}_psd.dat'.format(outdir, self.name, label)
np.savetxt(filename,
np.array(
[self.frequency_array,
self.amplitude_spectral_density_array]).T,
header='f h(f)')
def plot_data(self, signal=None, outdir='.', label=None):
if utils.command_line_args.test:
return
fig, ax = plt.subplots()
ax.loglog(self.frequency_array,
gwutils.asd_from_freq_series(freq_data=self.frequency_domain_strain,
df=(self.frequency_array[1] - self.frequency_array[0])),
color='C0', label=self.name)
ax.loglog(self.frequency_array,
self.amplitude_spectral_density_array,
color='C1', lw=0.5, label=self.name + ' ASD')
if signal is not None:
ax.loglog(self.frequency_array,
gwutils.asd_from_freq_series(freq_data=signal,
df=(self.frequency_array[1] - self.frequency_array[0])),
color='C2',
label='Signal')
ax.grid('on')
ax.set_ylabel(r'strain [strain/$\sqrt{\rm Hz}$]')
ax.set_xlabel(r'frequency [Hz]')
ax.set_xlim(20, 2000)
ax.legend(loc='best')
if label is None:
fig.savefig(
'{}/{}_frequency_domain_data.png'.format(outdir, self.name))
else:
fig.savefig(
'{}/{}_{}_frequency_domain_data.png'.format(
outdir, self.name, label))
def plot_time_domain_data(
self, outdir='.', label=None, bandpass_frequencies=(50, 250),
notches=None, start_end=None, t0=None):
""" Plots the strain data in the time domain
Parameters
----------
outdir, label: str
Used in setting the saved filename.
bandpass: tuple, optional
A tuple of the (low, high) frequencies to use when bandpassing the
data, if None no bandpass is applied.
notches: list, optional
A list of frequencies specifying any lines to notch
start_end: tuple
A tuple of the (start, end) range of GPS times to plot
t0: float
If given, the reference time to subtract from the time series before
plotting.
"""
# We use the gwpy timeseries to perform bandpass and notching
if notches is None:
notches = list()
timeseries = gwpy.timeseries.TimeSeries(
data=self.time_domain_strain, times=self.time_array)
zpks = []
if bandpass_frequencies is not None:
zpks.append(gwpy.signal.filter_design.bandpass(
bandpass_frequencies[0], bandpass_frequencies[1],
self.strain_data.sampling_frequency))
if notches is not None:
for line in notches:
zpks.append(gwpy.signal.filter_design.notch(
line, self.strain_data.sampling_frequency))
if len(zpks) > 0:
zpk = gwpy.signal.filter_design.concatenate_zpks(*zpks)
strain = timeseries.filter(zpk, filtfilt=True)
else:
strain = timeseries
fig, ax = plt.subplots()
if t0:
x = self.time_array - t0
xlabel = 'GPS time [s] - {}'.format(t0)
else:
x = self.time_array
xlabel = 'GPS time [s]'
ax.plot(x, strain)
ax.set_xlabel(xlabel)
ax.set_ylabel('Strain')
if start_end is not None:
ax.set_xlim(*start_end)
fig.tight_layout()
if label is None:
fig.savefig(
'{}/{}_time_domain_data.png'.format(outdir, self.name))
else:
fig.savefig(
'{}/{}_{}_time_domain_data.png'.format(outdir, self.name, label))
@staticmethod
def _hdf5_filename_from_outdir_label(outdir, label):
return os.path.join(outdir, label + '.h5')
def to_hdf5(self, outdir='outdir', label=None):
""" Save the object to a hdf5 file
Attributes
----------
outdir: str, optional
Output directory name of the file, defaults to 'outdir'.
label: str, optional
Output file name, is self.name if not given otherwise.
"""
if sys.version_info[0] < 3:
raise NotImplementedError('Pickling of Interferometer is not supported in Python 2.'
'Use Python 3 instead.')
if label is None:
label = self.name
utils.check_directory_exists_and_if_not_mkdir('outdir')
filename = self._hdf5_filename_from_outdir_label(outdir, label)
dd.io.save(filename, self)
@classmethod
def from_hdf5(cls, filename=None):
""" Loads in an Interferometer object from an hdf5 file
Parameters
----------
filename: str
If given, try to load from this filename
"""
if sys.version_info[0] < 3:
raise NotImplementedError('Pickling of Interferometer is not supported in Python 2.'
'Use Python 3 instead.')
res = dd.io.load(filename)
if res.__class__ != cls:
raise TypeError('The loaded object is not an Interferometer')
return res
class TriangularInterferometer(InterferometerList):
def __init__(self, name, power_spectral_density, minimum_frequency, maximum_frequency,
length, latitude, longitude, elevation, xarm_azimuth, yarm_azimuth,
xarm_tilt=0., yarm_tilt=0.):
InterferometerList.__init__(self, [])
self.name = name
# for attr in ['power_spectral_density', 'minimum_frequency', 'maximum_frequency']:
if isinstance(power_spectral_density, PowerSpectralDensity):
power_spectral_density = [power_spectral_density] * 3
if isinstance(minimum_frequency, float) or isinstance(minimum_frequency, int):
minimum_frequency = [minimum_frequency] * 3
if isinstance(maximum_frequency, float) or isinstance(maximum_frequency, int):
maximum_frequency = [maximum_frequency] * 3
for ii in range(3):
self.append(Interferometer(
'{}{}'.format(name, ii + 1), power_spectral_density[ii], minimum_frequency[ii], maximum_frequency[ii],
length, latitude, longitude, elevation, xarm_azimuth, yarm_azimuth, xarm_tilt, yarm_tilt))
xarm_azimuth += 240
yarm_azimuth += 240
latitude += np.arctan(length * np.sin(xarm_azimuth * np.pi / 180) * 1e3 / utils.radius_of_earth)
longitude += np.arctan(length * np.cos(xarm_azimuth * np.pi / 180) * 1e3 / utils.radius_of_earth)
class PowerSpectralDensity(object):
def __init__(self, frequency_array=None, psd_array=None, asd_array=None,
psd_file=None, asd_file=None):
"""
Instantiate a new PowerSpectralDensity object.
Example
-------
Using the `from` method directly (here `psd_file` is a string
containing the path to the file to load):
>>> power_spectral_density = PowerSpectralDensity.from_power_spectral_density_file(psd_file)
Alternatively (and equivalently) setting the psd_file directly:
>>> power_spectral_density = PowerSpectralDensity(psd_file=psd_file)
Attributes
----------
asd_array: array_like
Array representation of the ASD
asd_file: str
Name of the ASD file
frequency_array: array_like
Array containing the frequencies of the ASD/PSD values
psd_array: array_like
Array representation of the PSD
psd_file: str
Name of the PSD file
power_spectral_density_interpolated: scipy.interpolated.interp1d
Interpolated function of the PSD
"""
self.frequency_array = np.array(frequency_array)
if psd_array is not None:
self.psd_array = psd_array
if asd_array is not None:
self.asd_array = asd_array
self.psd_file = psd_file
self.asd_file = asd_file
def __eq__(self, other):
if self.psd_file == other.psd_file \
and self.asd_file == other.asd_file \
and np.array_equal(self.frequency_array, other.frequency_array) \
and np.array_equal(self.psd_array, other.psd_array) \
and np.array_equal(self.asd_array, other.asd_array):
return True
return False
def __repr__(self):
if self.asd_file is not None or self.psd_file is not None:
return self.__class__.__name__ + '(psd_file=\'{}\', asd_file=\'{}\')' \
.format(self.psd_file, self.asd_file)
else:
return self.__class__.__name__ + '(frequency_array={}, psd_array={}, asd_array={})' \
.format(self.frequency_array, self.psd_array, self.asd_array)
@staticmethod
def from_amplitude_spectral_density_file(asd_file):
""" Set the amplitude spectral density from a given file
Parameters
----------
asd_file: str
File containing amplitude spectral density, format 'f h_f'
"""
return PowerSpectralDensity(asd_file=asd_file)
@staticmethod
def from_power_spectral_density_file(psd_file):
""" Set the power spectral density from a given file
Parameters
----------
psd_file: str, optional
File containing power spectral density, format 'f h_f'
"""
return PowerSpectralDensity(psd_file=psd_file)
@staticmethod
def from_frame_file(frame_file, psd_start_time, psd_duration,
fft_length=4, sampling_frequency=4096, roll_off=0.2,
overlap=0, channel=None, name=None, outdir=None,
analysis_segment_start_time=None):
""" Generate power spectral density from a frame file
Parameters
----------
frame_file: str, optional
Frame file to read data from.
psd_start_time: float
Beginning of segment to analyse.
psd_duration: float, optional
Duration of data (in seconds) to generate PSD from.
fft_length: float, optional
Number of seconds in a single fft.
sampling_frequency: float, optional
Sampling frequency for time series.
This is twice the maximum frequency.
roll_off: float, optional
Rise time in seconds of tukey window.
overlap: float,
Number of seconds of overlap between FFTs.
channel: str, optional
Name of channel to use to generate PSD.
name, outdir: str, optional
Name (and outdir) of the detector for which a PSD is to be
generated.
analysis_segment_start_time: float, optional
The start time of the analysis segment, if given, this data will
be removed before creating the PSD.
"""
strain = InterferometerStrainData(roll_off=roll_off)
strain.set_from_frame_file(
frame_file, start_time=psd_start_time, duration=psd_duration,
channel=channel, sampling_frequency=sampling_frequency)
frequency_array, psd_array = strain.create_power_spectral_density(
fft_length=fft_length, name=name, outdir=outdir, overlap=overlap,
analysis_segment_start_time=analysis_segment_start_time)
return PowerSpectralDensity(frequency_array=frequency_array, psd_array=psd_array)
@staticmethod
def from_amplitude_spectral_density_array(frequency_array, asd_array):
return PowerSpectralDensity(frequency_array=frequency_array, asd_array=asd_array)
@staticmethod
def from_power_spectral_density_array(frequency_array, psd_array):
return PowerSpectralDensity(frequency_array=frequency_array, psd_array=psd_array)
@staticmethod
def from_aligo():
logger.info("No power spectral density provided, using aLIGO,"
"zero detuning, high power.")
return PowerSpectralDensity.from_power_spectral_density_file(psd_file='aLIGO_ZERO_DET_high_P_psd.txt')
@property
def psd_array(self):
return self.__psd_array
@psd_array.setter
def psd_array(self, psd_array):
self.__check_frequency_array_matches_density_array(psd_array)
self.__psd_array = np.array(psd_array)
self.__asd_array = psd_array ** 0.5
self.__interpolate_power_spectral_density()
@property
def asd_array(self):
return self.__asd_array
@asd_array.setter
def asd_array(self, asd_array):
self.__check_frequency_array_matches_density_array(asd_array)
self.__asd_array = np.array(asd_array)
self.__psd_array = asd_array ** 2
self.__interpolate_power_spectral_density()
def __check_frequency_array_matches_density_array(self, density_array):
if len(self.frequency_array) != len(density_array):
raise ValueError('Provided spectral density does not match frequency array. Not updating.\n'
'Length spectral density {}\n Length frequency array {}\n'
.format(density_array, self.frequency_array))
def __interpolate_power_spectral_density(self):
"""Interpolate the loaded power spectral density so it can be resampled
for arbitrary frequency arrays.
"""
self.__power_spectral_density_interpolated = interp1d(self.frequency_array,
self.psd_array,
bounds_error=False,
fill_value=np.inf)
@property
def power_spectral_density_interpolated(self):
return self.__power_spectral_density_interpolated
@property
def asd_file(self):
return self._asd_file
@asd_file.setter
def asd_file(self, asd_file):
asd_file = self.__validate_file_name(file=asd_file)
self._asd_file = asd_file
if asd_file is not None:
self.__import_amplitude_spectral_density()
self.__check_file_was_asd_file()
def __check_file_was_asd_file(self):
if min(self.asd_array) < 1e-30:
logger.warning("You specified an amplitude spectral density file.")
logger.warning("{} WARNING {}".format("*" * 30, "*" * 30))
logger.warning("The minimum of the provided curve is {:.2e}.".format(min(self.asd_array)))
logger.warning("You may have intended to provide this as a power spectral density.")
@property
def psd_file(self):
return self._psd_file
@psd_file.setter
def psd_file(self, psd_file):
psd_file = self.__validate_file_name(file=psd_file)
self._psd_file = psd_file
if psd_file is not None:
self.__import_power_spectral_density()
self.__check_file_was_psd_file()
def __check_file_was_psd_file(self):
if min(self.psd_array) > 1e-30:
logger.warning("You specified a power spectral density file.")
logger.warning("{} WARNING {}".format("*" * 30, "*" * 30))
logger.warning("The minimum of the provided curve is {:.2e}.".format(min(self.psd_array)))
logger.warning("You may have intended to provide this as an amplitude spectral density.")
@staticmethod
def __validate_file_name(file):
"""
Test if the file contains a path (i.e., contains '/').
If not assume the file is in the default directory.
"""
if file is not None and '/' not in file:
file = os.path.join(os.path.dirname(__file__), 'noise_curves', file)
return file
def __import_amplitude_spectral_density(self):
""" Automagically load an amplitude spectral density curve """
self.frequency_array, self.asd_array = np.genfromtxt(self.asd_file).T
def __import_power_spectral_density(self):
""" Automagically load a power spectral density curve """
self.frequency_array, self.psd_array = np.genfromtxt(self.psd_file).T
def get_noise_realisation(self, sampling_frequency, duration):
"""
Generate frequency Gaussian noise scaled to the power spectral density.
Parameters
-------
sampling_frequency: float
sampling frequency of noise
duration: float
duration of noise
Returns
-------
array_like: frequency domain strain of this noise realisation
array_like: frequencies related to the frequency domain strain
"""
white_noise, frequencies = utils.create_white_noise(sampling_frequency, duration)
frequency_domain_strain = self.__power_spectral_density_interpolated(frequencies) ** 0.5 * white_noise
out_of_bounds = (frequencies < min(self.frequency_array)) | (frequencies > max(self.frequency_array))
frequency_domain_strain[out_of_bounds] = 0 * (1 + 1j)
return frequency_domain_strain, frequencies
def get_empty_interferometer(name):
"""
Get an interferometer with standard parameters for known detectors.
These objects do not have any noise instantiated.
The available instruments are:
H1, L1, V1, GEO600, CE
Detector positions taken from:
L1/H1: LIGO-T980044-10
V1/GEO600: arXiv:gr-qc/0008066 [45]
CE: located at the site of H1
Detector sensitivities:
H1/L1/V1: https://dcc.ligo.org/LIGO-P1200087-v42/public
GEO600: http://www.geo600.org/1032083/GEO600_Sensitivity_Curves
CE: https://dcc.ligo.org/LIGO-P1600143/public
Parameters
----------
name: str
Interferometer identifier.
Returns
-------
interferometer: Interferometer
Interferometer instance
"""
filename = os.path.join(os.path.dirname(__file__), 'detectors', '{}.interferometer'.format(name))
try:
interferometer = load_interferometer(filename)
return interferometer
except OSError:
raise ValueError('Interferometer {} not implemented'.format(name))
def load_interferometer(filename):
"""Load an interferometer from a file."""
parameters = dict()
with open(filename, 'r') as parameter_file:
lines = parameter_file.readlines()
for line in lines:
if line[0] == '#':
continue
split_line = line.split('=')
key = split_line[0].strip()
value = eval('='.join(split_line[1:]))
parameters[key] = value
if 'shape' not in parameters.keys():
interferometer = Interferometer(**parameters)
logger.debug('Assuming L shape for {}'.format('name'))
elif parameters['shape'].lower() in ['l', 'ligo']:
parameters.pop('shape')
interferometer = Interferometer(**parameters)
elif parameters['shape'].lower() in ['triangular', 'triangle']:
parameters.pop('shape')
interferometer = TriangularInterferometer(**parameters)
else:
raise IOError("{} could not be loaded. Invalid parameter 'shape'.".format(filename))
return interferometer
def get_interferometer_with_open_data(
name, trigger_time, duration=4, start_time=None, roll_off=0.2,
psd_offset=-1024, psd_duration=100, cache=True, outdir='outdir',
label=None, plot=True, filter_freq=None, **kwargs):
"""
Helper function to obtain an Interferometer instance with appropriate
PSD and data, given an center_time.
Parameters
----------
name: str
Detector name, e.g., 'H1'.
trigger_time: float
Trigger GPS time.
duration: float, optional
The total time (in seconds) to analyse. Defaults to 4s.
start_time: float, optional
Beginning of the segment, if None, the trigger is placed 2s before the end
of the segment.
roll_off: float
The roll-off (in seconds) used in the Tukey window.
psd_offset, psd_duration: float
The power spectral density (psd) is estimated using data from
`center_time+psd_offset` to `center_time+psd_offset + psd_duration`.
cache: bool, optional
Whether or not to store the acquired data
outdir: str
Directory where the psd files are saved
label: str
If given, an identifying label used in generating file names.
plot: bool
If true, create an ASD + strain plot
filter_freq: float
Low pass filter frequency
**kwargs:
All keyword arguments are passed to
`gwpy.timeseries.TimeSeries.fetch_open_data()`.
Returns
-------
bilby.gw.detector.Interferometer: An Interferometer instance with a PSD and frequency-domain strain data.
"""
logger.warning(
"Parameter estimation for real interferometer data in bilby is in "
"alpha testing at the moment: the routines for windowing and filtering"
" have not been reviewed.")
utils.check_directory_exists_and_if_not_mkdir(outdir)
if start_time is None:
start_time = trigger_time + 2 - duration
strain = InterferometerStrainData(roll_off=roll_off)
strain.set_from_open_data(
name=name, start_time=start_time, duration=duration,
outdir=outdir, cache=cache, **kwargs)
strain.low_pass_filter(filter_freq)
strain_psd = InterferometerStrainData(roll_off=roll_off)
strain_psd.set_from_open_data(
name=name, start_time=start_time + duration + psd_offset,
duration=psd_duration, outdir=outdir, cache=cache, **kwargs)
# Low pass filter
strain_psd.low_pass_filter(filter_freq)
# Create and save PSDs
psd_frequencies, psd_array = strain_psd.create_power_spectral_density(
name=name, outdir=outdir, fft_length=strain.duration)
interferometer = get_empty_interferometer(name)
interferometer.power_spectral_density = PowerSpectralDensity(
psd_array=psd_array, frequency_array=psd_frequencies)
interferometer.strain_data = strain
if plot:
interferometer.plot_data(outdir=outdir, label=label)
return interferometer
def get_interferometer_with_fake_noise_and_injection(
name, injection_parameters, injection_polarizations=None,
waveform_generator=None, sampling_frequency=4096, duration=4,
start_time=None, outdir='outdir', label=None, plot=True, save=True,
zero_noise=False):
"""
Helper function to obtain an Interferometer instance with appropriate
power spectral density and data, given an center_time.
Note: by default this generates an Interferometer with a power spectral
density based on advanced LIGO.
Parameters
----------
name: str
Detector name, e.g., 'H1'.
injection_parameters: dict
injection parameters, needed for sky position and timing
injection_polarizations: dict
Polarizations of waveform to inject, output of
`waveform_generator.frequency_domain_strain()`. If
`waveform_generator` is also given, the injection_polarizations will
be calculated directly and this argument can be ignored.
waveform_generator: bilby.gw.waveform_generator.WaveformGenerator
A WaveformGenerator instance using the source model to inject. If
`injection_polarizations` is given, this will be ignored.
sampling_frequency: float
sampling frequency for data, should match injection signal
duration: float
length of data, should be the same as used for signal generation
start_time: float
Beginning of data segment, if None, injection is placed 2s before
end of segment.
outdir: str
directory in which to store output
label: str
If given, an identifying label used in generating file names.
plot: bool
If true, create an ASD + strain plot
save: bool
If true, save frequency domain data and PSD to file
zero_noise: bool
If true, set noise to zero.
Returns
-------
bilby.gw.detector.Interferometer: An Interferometer instance with a PSD and frequency-domain strain data.
"""
utils.check_directory_exists_and_if_not_mkdir(outdir)
if start_time is None:
start_time = injection_parameters['geocent_time'] + 2 - duration
interferometer = get_empty_interferometer(name)
interferometer.power_spectral_density = PowerSpectralDensity.from_aligo()
if zero_noise:
interferometer.set_strain_data_from_zero_noise(
sampling_frequency=sampling_frequency, duration=duration,
start_time=start_time)
else:
interferometer.set_strain_data_from_power_spectral_density(
sampling_frequency=sampling_frequency, duration=duration,
start_time=start_time)
injection_polarizations = interferometer.inject_signal(
parameters=injection_parameters,
injection_polarizations=injection_polarizations,
waveform_generator=waveform_generator)
signal = interferometer.get_detector_response(
injection_polarizations, injection_parameters)
if plot:
interferometer.plot_data(signal=signal, outdir=outdir, label=label)
if save:
interferometer.save_data(outdir, label=label)
return interferometer
def get_event_data(
event, interferometer_names=None, duration=4, roll_off=0.2,
psd_offset=-1024, psd_duration=100, cache=True, outdir='outdir',
label=None, plot=True, filter_freq=None, **kwargs):
"""
Get open data for a specified event.
Parameters
----------
event: str
Event descriptor, this can deal with some prefixes, e.g., '150914',
'GW150914', 'LVT151012'
interferometer_names: list, optional
List of interferometer identifiers, e.g., 'H1'.
If None will look for data in 'H1', 'V1', 'L1'
duration: float
Time duration to search for.
roll_off: float
The roll-off (in seconds) used in the Tukey window.
psd_offset, psd_duration: float
The power spectral density (psd) is estimated using data from
`center_time+psd_offset` to `center_time+psd_offset + psd_duration`.
cache: bool
Whether or not to store the acquired data.
outdir: str
Directory where the psd files are saved
label: str
If given, an identifying label used in generating file names.
plot: bool
If true, create an ASD + strain plot
filter_freq: float
Low pass filter frequency
**kwargs:
All keyword arguments are passed to
`gwpy.timeseries.TimeSeries.fetch_open_data()`.
Return
------
list: A list of bilby.gw.detector.Interferometer objects
"""
event_time = gwutils.get_event_time(event)
interferometers = []
if interferometer_names is None:
interferometer_names = ['H1', 'L1', 'V1']
for name in interferometer_names:
try:
interferometers.append(get_interferometer_with_open_data(
name, trigger_time=event_time, duration=duration, roll_off=roll_off,
psd_offset=psd_offset, psd_duration=psd_duration, cache=cache,
outdir=outdir, label=label, plot=plot, filter_freq=filter_freq,
**kwargs))
except ValueError as e:
logger.debug("Error raised {}".format(e))
logger.warning('No data found for {}.'.format(name))
return InterferometerList(interferometers)
def load_data_from_cache_file(
cache_file, start_time, segment_duration, psd_duration, psd_start_time,
channel_name=None, sampling_frequency=4096, roll_off=0.2,
overlap=0, outdir=None):
""" Helper routine to generate an interferometer from a cache file
Parameters
----------
cache_file: str
Path to the location of the cache file
start_time, psd_start_time: float
GPS start time of the segment and data stretch used for the PSD
segment_duration, psd_duration: float
Segment duration and duration of data to use to generate the PSD (in
seconds).
roll_off: float, optional
Rise time in seconds of tukey window.
overlap: float,
Number of seconds of overlap between FFTs.
channel_name: str
Channel name
sampling_frequency: int
Sampling frequency
outdir: str, optional
The output directory in which the data is saved
Returns
-------
ifo: bilby.gw.detector.Interferometer
An initialised interferometer object with strain data set to the
appropriate data in the cache file and a PSD.
"""
data_set = False
psd_set = False
with open(cache_file, 'r') as ff:
for line in ff:
cache = lal.utils.cache.CacheEntry(line)
data_in_cache = (
(cache.segment[0].gpsSeconds < start_time) &
(cache.segment[1].gpsSeconds > start_time + segment_duration))
psd_in_cache = (
(cache.segment[0].gpsSeconds < psd_start_time) &
(cache.segment[1].gpsSeconds > psd_start_time + psd_duration))
ifo = get_empty_interferometer(
"{}1".format(cache.observatory))
if not data_set & data_in_cache:
ifo.set_strain_data_from_frame_file(
frame_file=cache.path,
sampling_frequency=sampling_frequency,
duration=segment_duration,
start_time=start_time,
channel=channel_name, buffer_time=0)
data_set = True
if not psd_set & psd_in_cache:
ifo.power_spectral_density = \
PowerSpectralDensity.from_frame_file(
cache.path,
psd_start_time=psd_start_time,
psd_duration=psd_duration,
fft_length=segment_duration,
sampling_frequency=sampling_frequency,
roll_off=roll_off,
overlap=overlap,
channel=channel_name,
name=cache.observatory,
outdir=outdir,
analysis_segment_start_time=start_time)
psd_set = True
if data_set and psd_set:
return ifo
elif not data_set:
raise ValueError('Data not loaded for {}'.format(ifo.name))
elif not psd_set:
raise ValueError('PSD not created for {}'.format(ifo.name))
| {"/bilby/gw/result.py": ["/bilby/core/result.py"], "/bilby/core/result.py": ["/bilby/core/__init__.py", "/bilby/core/prior.py"], "/bilby/gw/likelihood.py": ["/bilby/core/__init__.py", "/bilby/core/prior.py", "/bilby/gw/detector.py", "/bilby/gw/source.py"], "/bilby/gw/source.py": ["/bilby/core/__init__.py"], "/bilby/gw/__init__.py": ["/bilby/gw/likelihood.py"], "/bilby/gw/detector.py": ["/bilby/gw/__init__.py", "/bilby/core/__init__.py"]} |
45,660 | juancalderonbustillo/bilby | refs/heads/master | /test/result_test.py | from __future__ import absolute_import, division
import bilby
import unittest
import numpy as np
import pandas as pd
import shutil
import os
class TestResult(unittest.TestCase):
def setUp(self):
np.random.seed(7)
bilby.utils.command_line_args.test = False
priors = bilby.prior.PriorDict(dict(
x=bilby.prior.Uniform(0, 1, 'x', latex_label='$x$', unit='s'),
y=bilby.prior.Uniform(0, 1, 'y', latex_label='$y$', unit='m'),
c=1,
d=2))
result = bilby.core.result.Result(
label='label', outdir='outdir', sampler='nestle',
search_parameter_keys=['x', 'y'], fixed_parameter_keys=['c', 'd'],
priors=priors, sampler_kwargs=dict(test='test', func=lambda x: x),
injection_parameters=dict(x=0.5, y=0.5),
meta_data=dict(test='test'))
n = 100
posterior = pd.DataFrame(dict(x=np.random.normal(0, 1, n),
y=np.random.normal(0, 1, n)))
result.posterior = posterior
result.log_evidence = 10
result.log_evidence_err = 11
result.log_bayes_factor = 12
result.log_noise_evidence = 13
self.result = result
pass
def tearDown(self):
bilby.utils.command_line_args.test = True
try:
shutil.rmtree(self.result.outdir)
except OSError:
pass
del self.result
pass
def test_result_file_name(self):
outdir = 'outdir'
label = 'label'
self.assertEqual(bilby.core.result.result_file_name(outdir, label),
'{}/{}_result.h5'.format(outdir, label))
def test_fail_save_and_load(self):
with self.assertRaises(ValueError):
bilby.core.result.read_in_result()
with self.assertRaises(IOError):
bilby.core.result.read_in_result(filename='not/a/file')
def test_unset_priors(self):
result = bilby.core.result.Result(
label='label', outdir='outdir', sampler='nestle',
search_parameter_keys=['x', 'y'], fixed_parameter_keys=['c', 'd'],
priors=None, sampler_kwargs=dict(test='test'),
injection_parameters=dict(x=0.5, y=0.5),
meta_data=dict(test='test'))
with self.assertRaises(ValueError):
_ = result.priors
self.assertEqual(result.parameter_labels, result.search_parameter_keys)
self.assertEqual(result.parameter_labels_with_unit, result.search_parameter_keys)
def test_unknown_priors_fail(self):
with self.assertRaises(ValueError):
bilby.core.result.Result(
label='label', outdir='outdir', sampler='nestle',
search_parameter_keys=['x', 'y'], fixed_parameter_keys=['c', 'd'],
priors=['a', 'b'], sampler_kwargs=dict(test='test'),
injection_parameters=dict(x=0.5, y=0.5),
meta_data=dict(test='test'))
def test_set_samples(self):
samples = [1, 2, 3]
self.result.samples = samples
self.assertEqual(samples, self.result.samples)
def test_set_nested_samples(self):
nested_samples = [1, 2, 3]
self.result.nested_samples = nested_samples
self.assertEqual(nested_samples, self.result.nested_samples)
def test_set_walkers(self):
walkers = [1, 2, 3]
self.result.walkers = walkers
self.assertEqual(walkers, self.result.walkers)
def test_set_nburn(self):
nburn = 1
self.result.nburn = nburn
self.assertEqual(nburn, self.result.nburn)
def test_unset_posterior(self):
self.result.posterior = None
with self.assertRaises(ValueError):
_ = self.result.posterior
def test_save_and_load(self):
self.result.save_to_file()
loaded_result = bilby.core.result.read_in_result(
outdir=self.result.outdir, label=self.result.label)
self.assertTrue(pd.DataFrame.equals
(self.result.posterior, loaded_result.posterior))
self.assertTrue(self.result.fixed_parameter_keys == loaded_result.fixed_parameter_keys)
self.assertTrue(self.result.search_parameter_keys == loaded_result.search_parameter_keys)
self.assertEqual(self.result.meta_data, loaded_result.meta_data)
self.assertEqual(self.result.injection_parameters, loaded_result.injection_parameters)
self.assertEqual(self.result.log_evidence, loaded_result.log_evidence)
self.assertEqual(self.result.log_noise_evidence, loaded_result.log_noise_evidence)
self.assertEqual(self.result.log_evidence_err, loaded_result.log_evidence_err)
self.assertEqual(self.result.log_bayes_factor, loaded_result.log_bayes_factor)
self.assertEqual(self.result.priors['x'], loaded_result.priors['x'])
self.assertEqual(self.result.priors['y'], loaded_result.priors['y'])
self.assertEqual(self.result.priors['c'], loaded_result.priors['c'])
self.assertEqual(self.result.priors['d'], loaded_result.priors['d'])
def test_save_and_dont_overwrite(self):
shutil.rmtree(
'{}/{}_result.h5.old'.format(self.result.outdir, self.result.label),
ignore_errors=True)
self.result.save_to_file(overwrite=False)
self.result.save_to_file(overwrite=False)
self.assertTrue(os.path.isfile(
'{}/{}_result.h5.old'.format(self.result.outdir, self.result.label)))
def test_save_and_overwrite(self):
shutil.rmtree(
'{}/{}_result.h5.old'.format(self.result.outdir, self.result.label),
ignore_errors=True)
self.result.save_to_file(overwrite=True)
self.result.save_to_file(overwrite=True)
self.assertFalse(os.path.isfile(
'{}/{}_result.h5.old'.format(self.result.outdir, self.result.label)))
def test_save_samples(self):
self.result.save_posterior_samples()
filename = '{}/{}_posterior_samples.txt'.format(self.result.outdir, self.result.label)
self.assertTrue(os.path.isfile(filename))
df = pd.read_csv(filename)
self.assertTrue(np.allclose(self.result.posterior.values, df.values))
def test_samples_to_posterior(self):
self.result.posterior = None
x = [1, 2, 3]
y = [4, 6, 8]
log_likelihood = np.array([6, 7, 8])
self.result.samples = np.array([x, y]).T
self.result.log_likelihood_evaluations = log_likelihood
self.result.samples_to_posterior(priors=self.result.priors)
self.assertTrue(all(self.result.posterior['x'] == x))
self.assertTrue(all(self.result.posterior['y'] == y))
self.assertTrue(np.array_equal(self.result.posterior.log_likelihood.values, log_likelihood))
self.assertTrue(all(self.result.posterior.c.values == self.result.priors['c'].peak))
self.assertTrue(all(self.result.posterior.d.values == self.result.priors['d'].peak))
def test_calculate_prior_values(self):
self.result.calculate_prior_values(priors=self.result.priors)
self.assertEqual(len(self.result.posterior), len(self.result.prior_values))
def test_plot_multiple(self):
filename = 'multiple.png'.format(self.result.outdir)
bilby.core.result.plot_multiple([self.result, self.result],
filename=filename)
self.assertTrue(os.path.isfile(filename))
os.remove(filename)
def test_plot_walkers(self):
self.result.walkers = np.random.uniform(0, 1, (10, 11, 2))
self.result.nburn = 5
self.result.plot_walkers()
self.assertTrue(
os.path.isfile('{}/{}_walkers.png'.format(
self.result.outdir, self.result.label)))
def test_plot_with_data(self):
x = np.linspace(0, 1, 10)
y = np.linspace(0, 1, 10)
def model(xx):
return xx
self.result.plot_with_data(model, x, y, ndraws=10)
self.assertTrue(
os.path.isfile('{}/{}_plot_with_data.png'.format(
self.result.outdir, self.result.label)))
self.result.posterior['log_likelihood'] = np.random.uniform(0, 1, len(self.result.posterior))
self.result.plot_with_data(model, x, y, ndraws=10, xlabel='a', ylabel='y')
def test_plot_corner(self):
self.result.injection_parameters = dict(x=0.8, y=1.1)
self.result.plot_corner()
self.result.plot_corner(parameters=['x', 'y'])
self.result.plot_corner(parameters=['x', 'y'], truths=[1, 1])
self.result.plot_corner(parameters=dict(x=1, y=1))
self.result.plot_corner(truths=dict(x=1, y=1))
self.result.plot_corner(truth=dict(x=1, y=1))
with self.assertRaises(ValueError):
self.result.plot_corner(truths=dict(x=1, y=1),
parameters=dict(x=1, y=1))
with self.assertRaises(ValueError):
self.result.plot_corner(truths=[1, 1],
parameters=dict(x=1, y=1))
with self.assertRaises(ValueError):
self.result.plot_corner(parameters=['x', 'y'],
truths=dict(x=1, y=1))
def test_plot_corner_with_injection_parameters(self):
self.result.plot_corner()
self.result.plot_corner(parameters=['x', 'y'])
self.result.plot_corner(parameters=['x', 'y'], truths=[1, 1])
self.result.plot_corner(parameters=dict(x=1, y=1))
def test_plot_corner_with_priors(self):
priors = bilby.core.prior.PriorDict()
priors['x'] = bilby.core.prior.Uniform(-1, 1, 'x')
priors['y'] = bilby.core.prior.Uniform(-1, 1, 'y')
self.result.plot_corner(priors=priors)
self.result.priors = priors
self.result.plot_corner(priors=True)
with self.assertRaises(ValueError):
self.result.plot_corner(priors='test')
def test_get_credible_levels(self):
levels = self.result.get_all_injection_credible_levels()
self.assertDictEqual(levels, dict(x=0.68, y=0.72))
def test_get_credible_levels_raises_error_if_no_injection_parameters(self):
self.result.injection_parameters = None
with self.assertRaises(TypeError):
self.result.get_all_injection_credible_levels()
def test_kde(self):
kde = self.result.kde
import scipy.stats
self.assertEqual(type(kde), scipy.stats.kde.gaussian_kde)
self.assertEqual(kde.d, 2)
def test_posterior_probability(self):
sample = dict(x=0, y=0.1)
self.assertTrue(
isinstance(self.result.posterior_probability(sample), np.ndarray))
self.assertTrue(
len(self.result.posterior_probability(sample)), 1)
self.assertEqual(
self.result.posterior_probability(sample)[0],
self.result.kde([0, 0.1]))
def test_multiple_posterior_probability(self):
sample = [dict(x=0, y=0.1), dict(x=0.8, y=0)]
self.assertTrue(
isinstance(self.result.posterior_probability(sample), np.ndarray))
self.assertTrue(np.array_equal(self.result.posterior_probability(sample),
self.result.kde([[0, 0.1], [0.8, 0]])))
if __name__ == '__main__':
unittest.main()
| {"/bilby/gw/result.py": ["/bilby/core/result.py"], "/bilby/core/result.py": ["/bilby/core/__init__.py", "/bilby/core/prior.py"], "/bilby/gw/likelihood.py": ["/bilby/core/__init__.py", "/bilby/core/prior.py", "/bilby/gw/detector.py", "/bilby/gw/source.py"], "/bilby/gw/source.py": ["/bilby/core/__init__.py"], "/bilby/gw/__init__.py": ["/bilby/gw/likelihood.py"], "/bilby/gw/detector.py": ["/bilby/gw/__init__.py", "/bilby/core/__init__.py"]} |
45,661 | juancalderonbustillo/bilby | refs/heads/master | /examples/injection_examples/roq_example.py | #!/usr/bin/env python
"""
Example of how to use the Reduced Order Quadrature method (see Smith et al.,
(2016) Phys. Rev. D 94, 044031) for a Binary Black hole simulated signal in
Gaussian noise.
This requires files specifying the appropriate basis weights.
These aren't shipped with Bilby, but are available on LDG clusters and
from the public repository https://git.ligo.org/lscsoft/ROQ_data.
"""
from __future__ import division, print_function
import numpy as np
import bilby
outdir = 'outdir'
label = 'roq'
# Load in the pieces for the linear part of the ROQ. Note you will need to
# adjust the filenames here to the correct paths on your machine
basis_matrix_linear = np.load("B_linear.npy").T
freq_nodes_linear = np.load("fnodes_linear.npy")
# Load in the pieces for the quadratic part of the ROQ
basic_matrix_quadratic = np.load("B_quadratic.npy").T
freq_nodes_quadratic = np.load("fnodes_quadratic.npy")
np.random.seed(170808)
duration = 4
sampling_frequency = 2048
injection_parameters = dict(
chirp_mass=36., mass_ratio=0.9, a_1=0.4, a_2=0.3, tilt_1=0.0, tilt_2=0.0,
phi_12=1.7, phi_jl=0.3, luminosity_distance=1000., iota=0.4, psi=0.659,
phase=1.3, geocent_time=1126259642.413, ra=1.375, dec=-1.2108)
waveform_arguments = dict(waveform_approximant='IMRPhenomPv2',
reference_frequency=20., minimum_frequency=20.)
waveform_generator = bilby.gw.WaveformGenerator(
duration=duration, sampling_frequency=sampling_frequency,
frequency_domain_source_model=bilby.gw.source.lal_binary_black_hole,
waveform_arguments=waveform_arguments,
parameter_conversion=bilby.gw.conversion.convert_to_lal_binary_black_hole_parameters)
ifos = bilby.gw.detector.InterferometerList(['H1', 'L1', 'V1'])
ifos.set_strain_data_from_power_spectral_densities(
sampling_frequency=sampling_frequency, duration=duration,
start_time=injection_parameters['geocent_time'] - 3)
ifos.inject_signal(waveform_generator=waveform_generator,
parameters=injection_parameters)
# make ROQ waveform generator
search_waveform_generator = bilby.gw.waveform_generator.WaveformGenerator(
duration=duration, sampling_frequency=sampling_frequency,
frequency_domain_source_model=bilby.gw.source.roq,
waveform_arguments=dict(frequency_nodes_linear=freq_nodes_linear,
frequency_nodes_quadratic=freq_nodes_quadratic,
reference_frequency=20., minimum_frequency=20.,
approximant='IMRPhenomPv2'),
parameter_conversion=bilby.gw.conversion.convert_to_lal_binary_black_hole_parameters)
priors = bilby.gw.prior.BBHPriorDict()
for key in ['a_1', 'a_2', 'tilt_1', 'tilt_2', 'iota', 'phase', 'psi', 'ra',
'dec', 'phi_12', 'phi_jl', 'luminosity_distance']:
priors[key] = injection_parameters[key]
priors.pop('mass_1')
priors.pop('mass_2')
priors['chirp_mass'] = bilby.core.prior.Uniform(
15, 40, latex_label='$\\mathcal{M}$')
priors['mass_ratio'] = bilby.core.prior.Uniform(0.5, 1, latex_label='$q$')
priors['geocent_time'] = bilby.core.prior.Uniform(
injection_parameters['geocent_time'] - 0.1,
injection_parameters['geocent_time'] + 0.1, latex_label='$t_c$', unit='s')
likelihood = bilby.gw.likelihood.ROQGravitationalWaveTransient(
interferometers=ifos, waveform_generator=search_waveform_generator,
linear_matrix=basis_matrix_linear, quadratic_matrix=basic_matrix_quadratic,
prior=priors)
result = bilby.run_sampler(
likelihood=likelihood, priors=priors, sampler='dynesty', npoints=500,
injection_parameters=injection_parameters, outdir=outdir, label=label)
# Make a corner plot.
result.plot_corner()
| {"/bilby/gw/result.py": ["/bilby/core/result.py"], "/bilby/core/result.py": ["/bilby/core/__init__.py", "/bilby/core/prior.py"], "/bilby/gw/likelihood.py": ["/bilby/core/__init__.py", "/bilby/core/prior.py", "/bilby/gw/detector.py", "/bilby/gw/source.py"], "/bilby/gw/source.py": ["/bilby/core/__init__.py"], "/bilby/gw/__init__.py": ["/bilby/gw/likelihood.py"], "/bilby/gw/detector.py": ["/bilby/gw/__init__.py", "/bilby/core/__init__.py"]} |
45,662 | juancalderonbustillo/bilby | refs/heads/master | /bilby/core/sampler/emcee.py | from __future__ import absolute_import, print_function
import os
import numpy as np
from pandas import DataFrame
from distutils.version import LooseVersion
from ..utils import (
logger, get_progress_bar, check_directory_exists_and_if_not_mkdir)
from .base_sampler import MCMCSampler, SamplerError
class Emcee(MCMCSampler):
"""bilby wrapper emcee (https://github.com/dfm/emcee)
All positional and keyword arguments (i.e., the args and kwargs) passed to
`run_sampler` will be propagated to `emcee.EnsembleSampler`, see
documentation for that class for further help. Under Other Parameters, we
list commonly used kwargs and the bilby defaults.
Other Parameters
----------------
nwalkers: int, (100)
The number of walkers
nsteps: int, (100)
The number of steps
nburn: int (None)
If given, the fixed number of steps to discard as burn-in. These will
be discarded from the total number of steps set by `nsteps` and
therefore the value must be greater than `nsteps`. Else, nburn is
estimated from the autocorrelation time
burn_in_fraction: float, (0.25)
The fraction of steps to discard as burn-in in the event that the
autocorrelation time cannot be calculated
burn_in_act: float
The number of autocorrelation times to discard as burn-in
a: float (2)
The proposal scale factor
"""
default_kwargs = dict(nwalkers=500, a=2, args=[], kwargs={},
postargs=None, pool=None, live_dangerously=False,
runtime_sortingfn=None, lnprob0=None, rstate0=None,
blobs0=None, iterations=100, thin=1, storechain=True,
mh_proposal=None)
def __init__(self, likelihood, priors, outdir='outdir', label='label',
use_ratio=False, plot=False, skip_import_verification=False,
pos0=None, nburn=None, burn_in_fraction=0.25, resume=True,
burn_in_act=3, **kwargs):
import emcee
if LooseVersion(emcee.__version__) > LooseVersion('2.2.1'):
self.prerelease = True
else:
self.prerelease = False
MCMCSampler.__init__(
self, likelihood=likelihood, priors=priors, outdir=outdir,
label=label, use_ratio=use_ratio, plot=plot,
skip_import_verification=skip_import_verification, **kwargs)
self.resume = resume
self.pos0 = pos0
self.nburn = nburn
self.burn_in_fraction = burn_in_fraction
self.burn_in_act = burn_in_act
self._old_chain = None
def _translate_kwargs(self, kwargs):
if 'nwalkers' not in kwargs:
for equiv in self.nwalkers_equiv_kwargs:
if equiv in kwargs:
kwargs['nwalkers'] = kwargs.pop(equiv)
if 'iterations' not in kwargs:
if 'nsteps' in kwargs:
kwargs['iterations'] = kwargs.pop('nsteps')
if 'threads' in kwargs:
if kwargs['threads'] != 1:
logger.warning("The 'threads' argument cannot be used for "
"parallelisation. This run will proceed "
"without parallelisation, but consider the use "
"of an appropriate Pool object passed to the "
"'pool' keyword.")
kwargs['threads'] = 1
@property
def sampler_function_kwargs(self):
import emcee
keys = ['lnprob0', 'rstate0', 'blobs0', 'iterations', 'thin', 'storechain', 'mh_proposal']
# updated function keywords for emcee > v2.2.1
updatekeys = {'p0': 'initial_state',
'lnprob0': 'log_prob0',
'storechain': 'store'}
function_kwargs = {key: self.kwargs[key] for key in keys if key in self.kwargs}
function_kwargs['p0'] = self.pos0
if self.prerelease:
if function_kwargs['mh_proposal'] is not None:
logger.warning("The 'mh_proposal' option is no longer used "
"in emcee v{}, and will be ignored.".format(emcee.__version__))
del function_kwargs['mh_proposal']
for key in updatekeys:
if updatekeys[key] not in function_kwargs:
function_kwargs[updatekeys[key]] = function_kwargs.pop(key)
else:
del function_kwargs[key]
return function_kwargs
@property
def sampler_init_kwargs(self):
init_kwargs = {key: value
for key, value in self.kwargs.items()
if key not in self.sampler_function_kwargs}
init_kwargs['lnpostfn'] = self.lnpostfn
init_kwargs['dim'] = self.ndim
# updated init keywords for emcee > v2.2.1
updatekeys = {'dim': 'ndim',
'lnpostfn': 'log_prob_fn'}
if self.prerelease:
for key in updatekeys:
if key in init_kwargs:
init_kwargs[updatekeys[key]] = init_kwargs.pop(key)
oldfunckeys = ['p0', 'lnprob0', 'storechain', 'mh_proposal']
for key in oldfunckeys:
if key in init_kwargs:
del init_kwargs[key]
return init_kwargs
@property
def nburn(self):
if type(self.__nburn) in [float, int]:
return int(self.__nburn)
elif self.result.max_autocorrelation_time is None:
return int(self.burn_in_fraction * self.nsteps)
else:
return int(self.burn_in_act * self.result.max_autocorrelation_time)
@nburn.setter
def nburn(self, nburn):
if isinstance(nburn, (float, int)):
if nburn > self.kwargs['iterations'] - 1:
raise ValueError('Number of burn-in samples must be smaller '
'than the total number of iterations')
self.__nburn = nburn
@property
def nwalkers(self):
return self.kwargs['nwalkers']
@property
def nsteps(self):
return self.kwargs['iterations']
@nsteps.setter
def nsteps(self, nsteps):
self.kwargs['iterations'] = nsteps
def __getstate__(self):
# In order to be picklable with dill, we need to discard the pool
# object before trying.
d = self.__dict__
d["_Sampler__kwargs"]["pool"] = None
return d
def run_sampler(self):
import emcee
tqdm = get_progress_bar()
sampler = emcee.EnsembleSampler(**self.sampler_init_kwargs)
out_dir = os.path.join(self.outdir, 'emcee_{}'.format(self.label))
out_file = os.path.join(out_dir, 'chain.dat')
if self.resume:
self.load_old_chain(out_file)
else:
self._set_pos0()
check_directory_exists_and_if_not_mkdir(out_dir)
if not os.path.isfile(out_file):
with open(out_file, "w") as ff:
ff.write('walker\t{}\tlog_l'.format(
'\t'.join(self.search_parameter_keys)))
template =\
'{:d}' + '\t{:.9e}' * (len(self.search_parameter_keys) + 2) + '\n'
for sample in tqdm(sampler.sample(**self.sampler_function_kwargs),
total=self.nsteps):
if self.prerelease:
points = np.hstack([sample.coords, sample.blobs])
else:
points = np.hstack([sample[0], np.array(sample[3])])
with open(out_file, "a") as ff:
for ii, point in enumerate(points):
ff.write(template.format(ii, *point))
self.result.sampler_output = np.nan
blobs_flat = np.array(sampler.blobs).reshape((-1, 2))
log_likelihoods, log_priors = blobs_flat.T
if self._old_chain is not None:
chain = np.vstack([self._old_chain[:, :-2],
sampler.chain.reshape((-1, self.ndim))])
log_ls = np.hstack([self._old_chain[:, -2], log_likelihoods])
log_ps = np.hstack([self._old_chain[:, -1], log_priors])
self.nsteps = chain.shape[0] // self.nwalkers
else:
chain = sampler.chain.reshape((-1, self.ndim))
log_ls = log_likelihoods
log_ps = log_priors
self.calculate_autocorrelation(chain)
self.print_nburn_logging_info()
self.result.nburn = self.nburn
n_samples = self.nwalkers * self.nburn
if self.result.nburn > self.nsteps:
raise SamplerError(
"The run has finished, but the chain is not burned in: "
"`nburn < nsteps`. Try increasing the number of steps.")
self.result.samples = chain[n_samples:, :]
self.result.log_likelihood_evaluations = log_ls[n_samples:]
self.result.log_prior_evaluations = log_ps[n_samples:]
self.result.walkers = sampler.chain
self.result.log_evidence = np.nan
self.result.log_evidence_err = np.nan
return self.result
def _set_pos0(self):
if self.pos0 is not None:
logger.debug("Using given initial positions for walkers")
if isinstance(self.pos0, DataFrame):
self.pos0 = self.pos0[self.search_parameter_keys].values
elif type(self.pos0) in (list, np.ndarray):
self.pos0 = np.squeeze(self.kwargs['pos0'])
if self.pos0.shape != (self.nwalkers, self.ndim):
raise ValueError(
'Input pos0 should be of shape ndim, nwalkers')
logger.debug("Checking input pos0")
for draw in self.pos0:
self.check_draw(draw)
else:
logger.debug("Generating initial walker positions from prior")
self.pos0 = [self.get_random_draw_from_prior()
for _ in range(self.nwalkers)]
def load_old_chain(self, file_name=None):
if file_name is None:
out_dir = os.path.join(self.outdir, 'emcee_{}'.format(self.label))
file_name = os.path.join(out_dir, 'chain.dat')
if os.path.isfile(file_name):
old_chain = np.genfromtxt(file_name, skip_header=1)
self.pos0 = [np.squeeze(old_chain[-(self.nwalkers - ii), 1:-2])
for ii in range(self.nwalkers)]
self._old_chain = old_chain[:-self.nwalkers + 1, 1:]
logger.info('Resuming from {}'.format(os.path.abspath(file_name)))
else:
logger.warning('Failed to resume. {} not found.'.format(file_name))
self._set_pos0()
def lnpostfn(self, theta):
log_prior = self.log_prior(theta)
if np.isinf(log_prior):
return -np.inf, [np.nan, np.nan]
else:
log_likelihood = self.log_likelihood(theta)
return log_likelihood + log_prior, [log_likelihood, log_prior]
| {"/bilby/gw/result.py": ["/bilby/core/result.py"], "/bilby/core/result.py": ["/bilby/core/__init__.py", "/bilby/core/prior.py"], "/bilby/gw/likelihood.py": ["/bilby/core/__init__.py", "/bilby/core/prior.py", "/bilby/gw/detector.py", "/bilby/gw/source.py"], "/bilby/gw/source.py": ["/bilby/core/__init__.py"], "/bilby/gw/__init__.py": ["/bilby/gw/likelihood.py"], "/bilby/gw/detector.py": ["/bilby/gw/__init__.py", "/bilby/core/__init__.py"]} |
45,664 | iwt-cmd/babytrack | refs/heads/master | /app/migrations/0003_auto_20201017_1927.py | # Generated by Django 3.1.2 on 2020-10-17 19:27
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0002_auto_20201004_1944'),
]
operations = [
migrations.RemoveField(
model_name='babytrack',
name='sup_amount',
),
migrations.RemoveField(
model_name='babytrack',
name='sup_b',
),
migrations.RemoveField(
model_name='babytrack',
name='sup_f',
),
migrations.AddField(
model_name='babytrack',
name='sup_b_amt',
field=models.DecimalField(decimal_places=1, default=0, max_digits=3),
preserve_default=False,
),
migrations.AddField(
model_name='babytrack',
name='sup_f_amt',
field=models.DecimalField(decimal_places=1, default=0, max_digits=3),
preserve_default=False,
),
migrations.AlterField(
model_name='babytrack',
name='date',
field=models.DateTimeField(default=datetime.datetime(2020, 10, 17, 19, 27, 0, 739481)),
),
]
| {"/app/views.py": ["/app/models.py", "/app/forms.py"], "/baby/admin.py": ["/baby/models.py"], "/app/models.py": ["/baby/models.py"], "/app/forms.py": ["/app/models.py"], "/app/admin.py": ["/app/models.py"], "/baby/views.py": ["/baby/models.py"]} |
45,665 | iwt-cmd/babytrack | refs/heads/master | /app/views.py | from django.shortcuts import render, redirect
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.contrib import messages
from .models import BabyTrack
from .forms import EntryForm
def index(request):
if request.method == 'POST':
form = EntryForm(request.POST)
if form.is_valid():
form.save(commit=True)
messages.success(request, "Saved!!")
return redirect('index')
messages.warning(request, "Not Saved!!")
form = EntryForm()
return render(request, 'index.html', {'form':form})
def entries(request):
entries = BabyTrack.objects.order_by('-date')
context = {
'entries':entries
}
return render(request, 'entries.html', context) | {"/app/views.py": ["/app/models.py", "/app/forms.py"], "/baby/admin.py": ["/baby/models.py"], "/app/models.py": ["/baby/models.py"], "/app/forms.py": ["/app/models.py"], "/app/admin.py": ["/app/models.py"], "/baby/views.py": ["/baby/models.py"]} |
45,666 | iwt-cmd/babytrack | refs/heads/master | /app/migrations/0008_auto_20201018_1945.py | # Generated by Django 3.1.2 on 2020-10-18 19:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0007_auto_20201018_1943'),
]
operations = [
migrations.AlterField(
model_name='babytrack',
name='date',
field=models.DateTimeField(),
),
]
| {"/app/views.py": ["/app/models.py", "/app/forms.py"], "/baby/admin.py": ["/baby/models.py"], "/app/models.py": ["/baby/models.py"], "/app/forms.py": ["/app/models.py"], "/app/admin.py": ["/app/models.py"], "/baby/views.py": ["/baby/models.py"]} |
45,667 | iwt-cmd/babytrack | refs/heads/master | /app/migrations/0001_initial.py | # Generated by Django 3.1.2 on 2020-10-04 19:38
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('baby', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='BabyTrack',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(default=datetime.datetime(2020, 10, 4, 19, 38, 31, 980773))),
('time', models.TimeField(default='19:38')),
('wet', models.BooleanField()),
('dirty', models.BooleanField()),
('leftside', models.IntegerField()),
('rightside', models.IntegerField()),
('sup_amount', models.DecimalField(decimal_places=1, max_digits=3)),
('sup_f', models.BooleanField()),
('sup_b', models.BooleanField()),
('name', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='baby.baby')),
],
),
]
| {"/app/views.py": ["/app/models.py", "/app/forms.py"], "/baby/admin.py": ["/baby/models.py"], "/app/models.py": ["/baby/models.py"], "/app/forms.py": ["/app/models.py"], "/app/admin.py": ["/app/models.py"], "/baby/views.py": ["/baby/models.py"]} |
45,668 | iwt-cmd/babytrack | refs/heads/master | /baby/models.py | from django.db import models
class Baby(models.Model):
name = models.CharField(max_length=15)
image = models.ImageField(upload_to='photos')
def __str__(self):
return self.name | {"/app/views.py": ["/app/models.py", "/app/forms.py"], "/baby/admin.py": ["/baby/models.py"], "/app/models.py": ["/baby/models.py"], "/app/forms.py": ["/app/models.py"], "/app/admin.py": ["/app/models.py"], "/baby/views.py": ["/baby/models.py"]} |
45,669 | iwt-cmd/babytrack | refs/heads/master | /baby/admin.py | from django.contrib import admin
from .models import Baby
admin.site.register(Baby) | {"/app/views.py": ["/app/models.py", "/app/forms.py"], "/baby/admin.py": ["/baby/models.py"], "/app/models.py": ["/baby/models.py"], "/app/forms.py": ["/app/models.py"], "/app/admin.py": ["/app/models.py"], "/baby/views.py": ["/baby/models.py"]} |
45,670 | iwt-cmd/babytrack | refs/heads/master | /app/models.py | from django.db import models
from datetime import datetime
from baby.models import Baby
class BabyTrack(models.Model):
name = models.ForeignKey(Baby, on_delete=models.DO_NOTHING)
date = models.DateTimeField(blank=False)
wet = models.BooleanField()
dirty = models.BooleanField()
leftside = models.IntegerField(default=0)
rightside = models.IntegerField(default=0)
sup_b_amt = models.DecimalField(max_digits=3, decimal_places=1)
sup_f_amt = models.DecimalField(max_digits=3, decimal_places=1)
| {"/app/views.py": ["/app/models.py", "/app/forms.py"], "/baby/admin.py": ["/baby/models.py"], "/app/models.py": ["/baby/models.py"], "/app/forms.py": ["/app/models.py"], "/app/admin.py": ["/app/models.py"], "/baby/views.py": ["/baby/models.py"]} |
45,671 | iwt-cmd/babytrack | refs/heads/master | /app/forms.py | from django import forms
from .models import BabyTrack, Baby
class DateInput(forms.DateInput):
input_type = 'datetime-local'
def __init__(self, **kwargs):
kwargs["format"] = "%Y-%m-%dT%H:%M"
super().__init__(**kwargs)
class EntryForm(forms.ModelForm):
class Meta:
model = BabyTrack
fields = '__all__'
widgets = {
'date': DateInput()
}
| {"/app/views.py": ["/app/models.py", "/app/forms.py"], "/baby/admin.py": ["/baby/models.py"], "/app/models.py": ["/baby/models.py"], "/app/forms.py": ["/app/models.py"], "/app/admin.py": ["/app/models.py"], "/baby/views.py": ["/baby/models.py"]} |
45,672 | iwt-cmd/babytrack | refs/heads/master | /app/admin.py | from django.contrib import admin
from .models import BabyTrack
class BabyTrackAdmin(admin.ModelAdmin):
list_display=('name', 'date', 'wet', 'dirty', 'leftside', 'rightside', 'sup_f_amt', 'sup_b_amt')
list_display_links=('name', 'date')
admin.site.register(BabyTrack, BabyTrackAdmin) | {"/app/views.py": ["/app/models.py", "/app/forms.py"], "/baby/admin.py": ["/baby/models.py"], "/app/models.py": ["/baby/models.py"], "/app/forms.py": ["/app/models.py"], "/app/admin.py": ["/app/models.py"], "/baby/views.py": ["/baby/models.py"]} |
45,673 | iwt-cmd/babytrack | refs/heads/master | /app/urls.py | from django.urls import path, include
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('entries', views.entries, name='entries'),
] | {"/app/views.py": ["/app/models.py", "/app/forms.py"], "/baby/admin.py": ["/baby/models.py"], "/app/models.py": ["/baby/models.py"], "/app/forms.py": ["/app/models.py"], "/app/admin.py": ["/app/models.py"], "/baby/views.py": ["/baby/models.py"]} |
45,674 | iwt-cmd/babytrack | refs/heads/master | /baby/views.py | from django.shortcuts import render
from .models import Baby
def babies(request):
babies = Baby.objects.all()
context = {
'babies':babies
}
return render(request, 'babies.html', context) | {"/app/views.py": ["/app/models.py", "/app/forms.py"], "/baby/admin.py": ["/baby/models.py"], "/app/models.py": ["/baby/models.py"], "/app/forms.py": ["/app/models.py"], "/app/admin.py": ["/app/models.py"], "/baby/views.py": ["/baby/models.py"]} |
45,675 | iwt-cmd/babytrack | refs/heads/master | /app/migrations/0002_auto_20201004_1944.py | # Generated by Django 3.1.2 on 2020-10-04 19:44
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='babytrack',
name='time',
),
migrations.AlterField(
model_name='babytrack',
name='date',
field=models.DateTimeField(default=datetime.datetime(2020, 10, 4, 19, 44, 5, 250402)),
),
migrations.AlterField(
model_name='babytrack',
name='leftside',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='babytrack',
name='rightside',
field=models.IntegerField(default=0),
),
]
| {"/app/views.py": ["/app/models.py", "/app/forms.py"], "/baby/admin.py": ["/baby/models.py"], "/app/models.py": ["/baby/models.py"], "/app/forms.py": ["/app/models.py"], "/app/admin.py": ["/app/models.py"], "/baby/views.py": ["/baby/models.py"]} |
45,676 | iwt-cmd/babytrack | refs/heads/master | /app/migrations/0007_auto_20201018_1943.py | # Generated by Django 3.1.2 on 2020-10-18 19:43
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0006_auto_20201018_1942'),
]
operations = [
migrations.AlterField(
model_name='babytrack',
name='date',
field=models.DateTimeField(default=datetime.datetime(2020, 10, 18, 19, 43, 38, 106363)),
),
]
| {"/app/views.py": ["/app/models.py", "/app/forms.py"], "/baby/admin.py": ["/baby/models.py"], "/app/models.py": ["/baby/models.py"], "/app/forms.py": ["/app/models.py"], "/app/admin.py": ["/app/models.py"], "/baby/views.py": ["/baby/models.py"]} |
45,693 | vikas753/RailwayDBMS | refs/heads/master | /userdefs/user.py | ##################################################################
# User class : Functionalities like book and cancel a ticket
# are implemented in this class
#
##################################################################
import mysql.connector
from mysql.connector import Error
from tabulate import tabulate
class user_object:
def __init__(self,database_cursor,connection):
self.main_function(database_cursor,connection)
# register an user , insert the details onto passenger tables
def register_user(self,email_id):
passenger_name = input(" Please enter your name : ");
passenger_secret_key = input(" Please enter the secret key for identification : ");
sql_command_insert_passenger_details = " INSERT INTO passenger(name,email_id,secret_key) VALUES ('"+passenger_name+"','"+email_id+"','"+passenger_secret_key+"')";
self.database_cursor.execute(sql_command_insert_passenger_details)
self.connection.commit()
# Displays all user services provided for passenger
def user_services(self,email_id):
secret_key_input = input(" Please enter the secret key ! : ")
sql_command_check_secret_key = " SELECT 1 FROM passenger WHERE email_id = '" + email_id + "' and secret_key = '" + secret_key_input + "'";
self.database_cursor.execute(sql_command_check_secret_key)
check_secret_key = self.database_cursor.fetchone()
if check_secret_key:
print(" Welcome ! , Please find the functions that user can perform ")
typeof_function = input(" 1 . Book a seat \n 2 . Display Tickets \n 3 . Cancel Ticket \n Input : ")
if typeof_function == '1':
self.book_seat(email_id)
elif typeof_function == '2':
self.display_tickets(email_id)
elif typeof_function == '3':
self.cancel_tickets(email_id)
else:
# go back to main function and try the process again for incorrect input entered
print(" Incorrect Code Entered ! ")
else:
print(" Invalid Secret Key Entered , please try once more ")
self.user_services(email_id)
# Main function to login or register and invoke the user services
# Diplay user services or prompt options for a new user to register
def main_function(self,database_cursor,connection):
email_id_input = input(" Please enter the email id : ")
self.database_cursor = database_cursor
self.connection = connection
sql_command_check_email_id = " SELECT 1 FROM passenger WHERE email_id = '" + email_id_input + "'"
self.database_cursor.execute(sql_command_check_email_id)
check_email_id = self.database_cursor.fetchone()
if check_email_id:
print(" Found your records ! ")
self.user_services(email_id_input)
else:
print(" No Match For Email-Id Provided , please follow registration process below ( your entered email-id would be used as is , in registering ) ! ")
self.register_user(email_id_input)
admin_continuation = input(" Do you want to continue with any other service of passenger ? - Yes or No : ")
if admin_continuation == "Yes":
self.main_function(database_cursor,connection)
else:
print(" Thanks for using the services!")
# Book a ticket for an user
# Generate available stations from the table
# Request for boarding and arrival station , sanitise them
# Display whether trains are available or not
# Book a ticket based on user response
def book_seat(self,email_id):
sql_command_get_station_names = " SELECT station_name FROM stations "
self.database_cursor.execute(sql_command_get_station_names)
result_cursor = self.database_cursor.fetchall()
print(" Please find available stations : " )
print(tabulate(result_cursor,headers=['Station Name']))
boarding_station = input(" Please enter boarding station : ")
sql_command_get_boarding_station_id = " SELECT station_id FROM stations WHERE station_name = '" + boarding_station + "'"
self.database_cursor.execute(sql_command_get_boarding_station_id)
boarding_station_id_tuple = self.database_cursor.fetchone()
if boarding_station_id_tuple:
boarding_station_id = boarding_station_id_tuple[0]
arrival_station = input(" Please enter arrival station : ")
sql_command_get_arrival_station_id = " SELECT station_id FROM stations WHERE station_name = '" + arrival_station + "'"
self.database_cursor.execute(sql_command_get_arrival_station_id)
arrival_station_id_tuple = self.database_cursor.fetchone()
if arrival_station_id_tuple:
arrival_station_id = arrival_station_id_tuple[0]
station_details_args = [boarding_station_id,arrival_station_id]
result = self.database_cursor.callproc("display_trains",station_details_args)
results_data = [result.fetchall() for result in self.database_cursor.stored_results()]
if(len(results_data[0]) > 0):
print(" Weekly Train Time Table is below for trains between those stations , it also has number of seats for each day ")
print(tabulate(results_data[0], headers=['Train Id','Train_Name','Boarding_Time','Arrival_Time','Mon','Tue','Wed','Thu','Fri','Sat','Sun'], tablefmt='psql'))
train_id_local_db = input(" Please enter the train id : " )
train_name_local_db = input(" Please enter the train name : " )
day_week_local_db = input(" Please enter the day in that week : " )
num_seats_local_db = input(" Please enter the number of seats : " )
sql_command_update_week_sched = "UPDATE weekly_availability_seats SET "+str(day_week_local_db)+"="+ str(day_week_local_db)+"-"+ str(num_seats_local_db)+" WHERE train_id="+str(train_id_local_db)+";"
if self.sanity_check(results_data[0],train_id_local_db,day_week_local_db,num_seats_local_db) > 0:
self.database_cursor.execute(sql_command_update_week_sched)
insert_records_args = [train_id_local_db,boarding_station_id,arrival_station_id,num_seats_local_db,day_week_local_db,email_id]
self.database_cursor.callproc("insert_records",insert_records_args)
print(str(num_seats_local_db) + " Tickets Booked on " + str(train_name_local_db) + " for " + str(day_week_local_db))
self.connection.commit()
else:
self.book_seat(email_id)
else:
print(" No trains available for those stations yet ! ")
else:
print(" Incorrect Arrival Station Name ! " );
self.book_seat(email_id)
else:
print(" Incorrect Boarding Station Name ! " );
self.book_seat(email_id)
# Display Tickets in tabular format using tabulate module
def display_tickets(self,email_id):
display_tickets_args = [email_id]
result = self.database_cursor.callproc("display_tickets",display_tickets_args)
results_data = [result.fetchall() for result in self.database_cursor.stored_results()]
if(len(results_data[0]) > 0):
print(" Please find all tickets details and booking history ")
print(tabulate(results_data[0], headers=['Ticket Id','Boarding Station Name','Destination Station Name','Train_Name','Number of seats','Day'], tablefmt='psql'))
else:
print( " No ticket details or booking history found ! ")
return results_data
# Cancel Tickets as provided by user
# update weekly_availability_seats table and remove the entry from tickets table .
def cancel_tickets(self,email_id):
results_data = self.display_tickets(email_id)
ticket_id_local = input(" Please enter the ticket id : ")
if len(results_data[0]) > 0:
if self.sanitise_ticket_id(results_data[0],ticket_id_local) == 1:
sql_command_get_ticket_details = " SELECT train_id,num_seats,Day FROM passenger_train WHERE passenger_train_id = " + str(ticket_id_local)
self.database_cursor.execute(sql_command_get_ticket_details)
ticket_details_tuple = self.database_cursor.fetchone()
train_id_local = ticket_details_tuple[0]
num_seats_local = ticket_details_tuple[1]
day_week = ticket_details_tuple[2]
sql_command_update_week_schedule = " UPDATE weekly_availability_seats SET " + str(day_week) + " = " + str(day_week) + " + " + str(num_seats_local) + " WHERE train_id = " + str(train_id_local)
self.database_cursor.execute(sql_command_update_week_schedule)
sql_command_delete_week_schedule = " DELETE FROM passenger_train WHERE passenger_train_id = " + str(ticket_id_local)
self.database_cursor.execute(sql_command_delete_week_schedule)
self.connection.commit()
print( " Ticket has been successfully cancelled ! " )
# Perform a sanity on input ticket id to see if it is present in data.
def sanitise_ticket_id(self,results,ticket_id):
for result_row in results:
if int(result_row[0]) == int(ticket_id):
return 1
print("Invalid ticket_id please refer table above for ticket_id column details")
return 0
# Performs a sanity check on user provided input and returns an error or non-error code as below
def sanity_check(self,results_data,train_id,day_week,num_seats):
for result_row in results_data:
if int(result_row[0]) == int(train_id):
final_num_seats = 0
if day_week == "Mon":
final_num_seats = int(result_row[4]) - int(num_seats)
elif day_week == "Tue":
final_num_seats = int(result_row[5]) - int(num_seats)
elif day_week == "Wed":
final_num_seats = int(result_row[6]) - int(num_seats)
elif day_week == "Thu":
final_num_seats = int(result_row[7]) - int(num_seats)
elif day_week == "Fri":
final_num_seats = int(result_row[8]) - int(num_seats)
elif day_week == "Sat":
final_num_seats = int(result_row[9]) - int(num_seats)
elif day_week == "Sun":
final_num_seats = int(result_row[10]) - int(num_seats)
else:
print(" Invalid day of week entered !")
return 0
if final_num_seats < 0:
print(" Invalid number of seats requested , Overflow !")
return 0
else:
return 1
print(" Invalid Train Id entered " )
return 0 | {"/appcode.py": ["/userdefs/admin.py", "/userdefs/user.py"]} |
45,694 | vikas753/RailwayDBMS | refs/heads/master | /appcode.py | ##################################################################
# Main class : Connects to the database with credentials
# Displays different roles and offloads the
# functioning to that object
##################################################################
import mysql.connector
from mysql.connector import Error
from userdefs.admin import admin_object
from userdefs.user import user_object
# Connect to database with username and password in arguments
# Run the application code to take a legal name and track the
# character using stored procedure.
def connect_to_database_and_experiment(username_arg,password_arg):
try:
# Q.1 Solution
connection = mysql.connector.connect(host='127.0.0.1',
database='railway dbms',
user=username_arg,
password=password_arg)
if connection.is_connected():
database_info = connection.get_server_info()
print("Connected to MySQL Server version ", database_info)
db_cursor = connection.cursor()
db_cursor.execute("select database();")
record = db_cursor.fetchone()
print("Connected to database Successful ! : ", record)
prompt_user(db_cursor,connection)
connection.close()
except Error as e:
print("Error while connecting to MySQL", e)
# Main function of the application
def main():
# Q.1 , prompt user for username and password .
username = input(" Please enter the username for your database : ")
password = input(" Please enter the password for your database : ")
# Q.2 Solution is below
lotr_db_cursor = connect_to_database_and_experiment(username,password)
# Prompt user for type of application usage and invoke them.
def prompt_user(db_cursor,connection):
typeof_user = input(" Please enter the type of user 1 - Administrator 2 - Passenger : ")
if typeof_user == '1':
admin_object(db_cursor,connection)
elif typeof_user == '2':
user_object(db_cursor,connection)
else:
prompt_user(db_cursor,connection)
main() | {"/appcode.py": ["/userdefs/admin.py", "/userdefs/user.py"]} |
45,695 | vikas753/RailwayDBMS | refs/heads/master | /userdefs/admin.py | ##################################################################
# Admin class : Functionalities like adding trains , stations and
# their schedules are done by this class
#
##################################################################
import csv
import mysql.connector
from mysql.connector import Error
class admin_object:
def __init__(self,database_cursor,connection):
username = input(" Please enter the admin credentials , username ? : ")
secret_key = input(" Please enter the admin credentials , secret_key ? : ")
self.database_cursor = database_cursor
self.connection = connection
if username == "root" and secret_key == "root":
print(" Admin authorised successfully ")
self.main_function()
else:
print(" Invalid credentials for admin , exiting the program")
def main_function(self):
print(" Please find the functions that admin can perform ")
typeof_function = input(" 1 . Add a station , 2 . Add a train : ")
if typeof_function == '1':
self.add_station()
elif typeof_function == '2':
self.add_train()
else:
# go back to main function and try the process again for incorrect input entered
print(" Incorrect Code Entered ! ")
admin_continuation = input(" Do you want to continue with services of admin ? - Yes or No : ")
if admin_continuation == "Yes":
self.main_function()
else:
print(" Thanks for using the services!")
# Ask user for a station as an input , check it's existence in station table and add it .
def add_station(self):
station_name_input = input(" Please enter the station name to be added : ")
sql_command_check_station_name = " SELECT 1 FROM stations WHERE station_name = '" + station_name_input + "'"
self.database_cursor.execute(sql_command_check_station_name)
check_station_name = self.database_cursor.fetchone()
if check_station_name:
print(" Station Name : " , station_name_input , "already exists")
else:
sql_command_insert_station_name = " INSERT INTO stations(station_name) VALUES ('"+station_name_input+"')"
self.database_cursor.execute(sql_command_insert_station_name)
self.connection.commit()
print("Station Name :" , station_name_input,"added successfully")
# Add a train based on it's check in the db
# Train name is checked for duplication and added
def add_train(self):
train_name_input = input(" Please enter the train name to be added : ")
sql_command_check_train_name = " SELECT 1 FROM trains WHERE train_name = '" + train_name_input + "'"
self.database_cursor.execute(sql_command_check_train_name)
check_train_name = self.database_cursor.fetchone()
if check_train_name:
print("Train name already exists!")
else:
sql_command_insert_train_name = " INSERT INTO trains(train_name) VALUES ('"+train_name_input+"')"
self.database_cursor.execute(sql_command_insert_train_name)
self.connection.commit()
print("Train Name :" , train_name_input,"added successfully")
self.prompt_schedule_csv_parse_and_add(train_name_input)
# Add weekly availability for trains here
# It requests a CSV in a specified format , which is parsed and update into schedule table
def prompt_schedule_csv_parse_and_add(self,train_name_input):
sql_command_get_train_id = " SELECT train_id FROM trains WHERE train_name = '" + train_name_input + "'"
self.database_cursor.execute(sql_command_get_train_id)
train_id = self.database_cursor.fetchone()
num_seats_monday = input(" Weekly Availability - Please enter the number of seats available for train on Monday : ")
num_seats_tuesday = input(" Weekly Availability - Please enter the number of seats available for train on Tuesday : ")
num_seats_wednesday = input(" Weekly Availability - Please enter the number of seats available for train on Wednesday : ")
num_seats_thursday = input(" Weekly Availability - Please enter the number of seats available for train on Thursday : ")
num_seats_friday = input(" Weekly Availability - Please enter the number of seats available for train on Friday : ")
num_seats_saturday = input(" Weekly Availability - Please enter the number of seats available for train on saturday : ")
num_seats_sunday = input(" Weekly Availability - Please enter the number of seats available for train on sunday : ")
sql_command_insert_week_availability = "INSERT INTO weekly_availability_seats(train_id,Mon,Tue,Wed,Thu,Fri,Sat,Sun) VALUES ('"+str(train_id[0])+"','"+num_seats_monday+"','"+num_seats_tuesday+"','"+num_seats_wednesday+"','"+num_seats_thursday+"','"+num_seats_friday+"','"+num_seats_saturday+"','"+num_seats_sunday+"')";
self.database_cursor.execute(sql_command_insert_week_availability)
self.connection.commit()
daily_schedule_csvfile = input(" Please provide csv file that contains schedule for this train :")
with open(daily_schedule_csvfile, 'r') as csvfile:
csvreader = csv.reader(csvfile)
commit = 1
for row in csvreader:
station_name = row[0]
time_of_arrival = row[1]
time_of_departure = row[2]
sequence_number = row[3]
sql_command_get_station_id = " SELECT station_id FROM stations WHERE station_name = '" + station_name + "'"
self.database_cursor.execute(sql_command_get_station_id)
station_id_tuple = self.database_cursor.fetchone()
if station_id_tuple:
station_id = station_id_tuple[0]
sql_command_insert_day_sched = "INSERT INTO daily_station_train_schedule(station_id,train_id,time_of_arrival,time_of_departure,sequence_number) VALUES ('"+str(station_id)+"','"+str(train_id[0])+"','"+time_of_arrival+"','"+time_of_departure+"','"+str(sequence_number)+"')";
self.database_cursor.execute(sql_command_insert_day_sched)
else:
print(" Incorrect station encountered in schedule : " , station_id_tuple)
commit = 0
break
if commit == 1:
print(" Schedule Updated Succesfully ! " )
self.connection.commit()
| {"/appcode.py": ["/userdefs/admin.py", "/userdefs/user.py"]} |
45,696 | unicache/unicache-env | refs/heads/master | /unicache_env/__init__.py | from gym.envs.registration import register
for dataset in ['iqiyi', 'movielens']:
for capDivCont in ['0.00001', '0.0001', '0.001', '0.01']: # Using string because there is precision problems of %f
for sampleSizeStr in [100, 1000, 10000, 'full']: # Size of full set = 233045
for version in range(16):
sampleSize = sampleSizeStr if sampleSizeStr != 'full' else None
register(
id = 'cache-%s-%s-%s-v%d'%(dataset, capDivCont, sampleSizeStr, version), # '-v\d' is required by gym
kwargs = {'dataset': dataset, 'capDivCont': float(capDivCont), 'sampleSize': sampleSize, 'version': version},
entry_point = 'unicache_env.envs:Env'
)
| {"/unicache_env/envs/input_dataset.py": ["/unicache_env/envs/request.py"], "/unicache_env/envs/env.py": ["/unicache_env/envs/request.py", "/unicache_env/envs/input_dataset.py"], "/unicache_env/envs/__init__.py": ["/unicache_env/envs/env.py"]} |
45,697 | unicache/unicache-env | refs/heads/master | /unicache_env/envs/request.py | class Request:
''' One piece of content on given network problem '''
__slots__ = ('longitude', 'latitude', 'time', 'userid', 'content')
# Turn off __dict__ to save memory
def __init__(self, longitude, latitude, time, userid, content):
''' Constructor
@param longitude, latitude : float. Where the request raised
@param time : datetime. When the request raised
@param userid : string. Hashed user ID who raised the request
@param content : int. Content ID (It was once string when initializing)'''
self.longitude = longitude
self.latitude = latitude
self.time = time
self.userid = userid
self.content = content
| {"/unicache_env/envs/input_dataset.py": ["/unicache_env/envs/request.py"], "/unicache_env/envs/env.py": ["/unicache_env/envs/request.py", "/unicache_env/envs/input_dataset.py"], "/unicache_env/envs/__init__.py": ["/unicache_env/envs/env.py"]} |
45,698 | unicache/unicache-env | refs/heads/master | /unicache_env/envs/input_dataset.py | import os
import datetime
from .request import Request
def inputIqiyi():
try:
with open(os.path.dirname(__file__) + '/raw/iqiyi.csv', encoding = 'gb18030') as f:
return list(map(
lambda row: Request(float(row[0]), float(row[1]), datetime.datetime.strptime(row[2], "%Y-%m-%d %H:%M:%S"), row[4].strip(), row[3].strip()),
map(lambda row: row.strip().split('|'), f)
))
except FileNotFoundError:
logger.error('Data file not found. This file may not in the git repo')
def inputMovieLens():
try:
with open(os.path.dirname(__file__) + '/raw/ratings.csv') as f:
next(f)
return list(sorted(map(
lambda row: Request(None, None, datetime.datetime.fromtimestamp(int(row[3])), row[0], row[1]),
map(lambda row: row.strip().split(','), f)
), key = lambda req: req.time))
except FileNotFoundError:
logger.error('Data file not found. This file may not in the git repo')
def inputDataset(dataset):
''' Input a speific dataset
@param dataset : 'iqiyi' or 'movielens'
@return : List of requests '''
return {
'iqiyi': inputIqiyi,
'movielens': inputMovieLens
}[dataset]()
| {"/unicache_env/envs/input_dataset.py": ["/unicache_env/envs/request.py"], "/unicache_env/envs/env.py": ["/unicache_env/envs/request.py", "/unicache_env/envs/input_dataset.py"], "/unicache_env/envs/__init__.py": ["/unicache_env/envs/env.py"]} |
45,699 | unicache/unicache-env | refs/heads/master | /setup.py | from setuptools import setup
setup(
name = 'unicache_env',
version = '0.0.1',
install_requires = [
'gym>=0.7.4',
'future>=0.16.0'
]
)
| {"/unicache_env/envs/input_dataset.py": ["/unicache_env/envs/request.py"], "/unicache_env/envs/env.py": ["/unicache_env/envs/request.py", "/unicache_env/envs/input_dataset.py"], "/unicache_env/envs/__init__.py": ["/unicache_env/envs/env.py"]} |
45,700 | unicache/unicache-env | refs/heads/master | /unicache_env/envs/env.py | from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import (
bytes, dict, int, list, object, range, str,
ascii, chr, hex, input, next, oct, open,
pow, round, super,
filter, map, zip
) # Python2 support
import os
import sys
import gym
import math
import numpy
import pickle
import random
import bisect
import logging
from .request import Request
from .input_dataset import inputDataset
logger = logging.getLogger(__name__)
logger.setLevel('WARNING')
VERSION = 0.02 # Used to identify different version of .tmp file
class State:
''' State returned to users '''
def __init__(self, storeSize, sampleSize):
self.storeSize = storeSize
self.cached = numpy.array([False] * sampleSize, dtype = bool) # Cached contents
self.cachedNum = 0
self.arriving = None # Newly arriving content ID
self.history = [] # Requests handled
def newReq(self, req):
''' [INTERNAL METHOD] Add the next request
@return : bool. Whether need to evict a content from cache '''
assert self.arriving is None
self.history.append(req)
if self.cached[req.content]:
return False
if self.cachedNum < self.storeSize:
self.cached[req.content] = True
self.cachedNum += 1
return False
self.arriving = req.content
return True
def evict(self, content):
''' [INTERNAL METHOD] Remove a content from the cache and accept the newly arriving request
If you are end users, don't call this directly. Call IqiyiEnv.step instead '''
assert self.cached[content]
self.cached[content] = False
self.cached[self.arriving] = True
self.arriving = None
def dist(la1, lo1, la2, lo2):
''' Calculate distance from latitudes and longitudes IN BEIJING
@return : Kilometer '''
if la1 is None or lo1 is None or la2 is None or lo2 is None:
logger.error("Current dataset doesn't have geographic data")
exit(1)
x = (la1 - la2) * 222
y = (lo1 - lo2) * 85
return math.sqrt(x * x + y * y)
def filterVersion(version):
''' Return a filter corresponding to version code '''
LOCAL_MASK = 0x3
LOCAL_FILTER = {
0x0: lambda req: True, # Do nothing
0x1: lambda req: dist(req.latitude, req.longitude, 39.976586, 116.317694) < 1.0, # 中关村
0x2: lambda req: dist(req.latitude, req.longitude, 39.982440, 116.347954) < 1.0, # 北航
}
def f(req):
return LOCAL_FILTER[version & LOCAL_MASK](req)
return f
class Env(gym.Env):
metadata = {
'render.modes': []
}
def __init__(self, dataset, capDivCont, sampleSize, version):
''' Constructor. You have to determine these parameters via envoriment IDs
@param dataset : 'iqiyi' or 'movielens'
@param capDivCont : Storage size / total content number
@param sampleSize : int. Randomly select this size of CONTENTS. `None` means using the whole set (iqiyi = 233045, movielens = 26744 contents)
@param version: int. Version code. See filterVersion '''
fakeSeed = random.randrange(sys.maxsize)
try:
with open(os.path.dirname(__file__) + '/.%s_%s_%s_%s_%s.tmp'%(dataset, capDivCont, sampleSize, fakeSeed, version), 'rb') as f:
self.requests, self.sampleSize, _version = pickle.load(f)
if _version != VERSION:
logger.info("Old cache found, will not use")
raise FileNotFoundError
logger.info('Loading from cache')
except:
logger.info('Input cache not found, loading from raw input')
self.requests = inputDataset(dataset)
self.requests = filter(filterVersion(version), self.requests)
self.requests = list(self.requests) # self.requests will be used twice, so can't be an iterator
def unique(sequence):
last = None
for item in sequence:
if item != last:
last = item
yield item
contents = list(unique(sorted(map(lambda r: r.content, self.requests))))
logger.info('%d contents in total'%(len(contents)))
if sampleSize is not None:
if sampleSize > len(contents):
logger.warning('sampling size larger than total size')
else:
contents = [contents[i] for i in sorted(random.sample(range(len(contents)), sampleSize))]
for req in self.requests:
pos = bisect.bisect_left(contents, req.content)
req.content = pos if pos < len(contents) and contents[pos] == req.content else None
self.requests = list(filter(lambda r: r.content is not None, self.requests))
self.sampleSize = len(contents) # Don't use parameter `sampleSize`, which can be None
with open(os.path.dirname(__file__) + '/.%s_%s_%s_%s_%s.tmp'%(dataset, capDivCont, sampleSize, fakeSeed, version), 'wb') as f:
pickle.dump((self.requests, self.sampleSize, VERSION), f)
logger.info('Cached input')
self.requestsIter = None
self.state = None
self.storeSize = int(capDivCont * self.sampleSize)
self.done = True
if self.storeSize == 0:
logging.warning("Storage size = 0. Please increase capacity.")
def _nextState(self):
''' Get next state which needs eviction
@return Wheter episode ended, how many request hit '''
oldCnt = len(self.state.history)
try:
while self.state.arriving is None:
self.state.newReq(next(self.requestsIter))
except StopIteration:
return True, len(self.state.history) - oldCnt - 1
return False, len(self.state.history) - oldCnt - 1
def _reset(self):
''' Reset the environment
@return : Initial state '''
self.requestsIter = iter(self.requests)
self.state = State(self.storeSize, self.sampleSize)
self.done, hit = self._nextState()
if self.done:
logger.warning('All contents hit. Maybe storage capacity is too high. Returning None')
return None
return self.state
def _step(self, action):
''' Perform an action
@param action : Content ID. Which content to be evicted
@return : next state, reward, wheter ended, extra info (not used yet) '''
if self.state is None:
logger.error('You should call `reset` before `step`')
return
if self.done:
logger.warning('You are stepping after episode has been done')
if not self.state.cached[action]:
logger.error('Invalid action: evicting uncached content')
return
self.state.evict(action)
self.done, hit = self._nextState()
assert not self.done or len(self.state.history) == len(self.requests)
return self.state, hit, self.done, {}
def _render(self, mode = 'human', close = False):
if close:
return
logger.warning('`render` is not implemented yet')
| {"/unicache_env/envs/input_dataset.py": ["/unicache_env/envs/request.py"], "/unicache_env/envs/env.py": ["/unicache_env/envs/request.py", "/unicache_env/envs/input_dataset.py"], "/unicache_env/envs/__init__.py": ["/unicache_env/envs/env.py"]} |
45,701 | unicache/unicache-env | refs/heads/master | /unicache_env/envs/__init__.py | from unicache_env.envs.env import Env
| {"/unicache_env/envs/input_dataset.py": ["/unicache_env/envs/request.py"], "/unicache_env/envs/env.py": ["/unicache_env/envs/request.py", "/unicache_env/envs/input_dataset.py"], "/unicache_env/envs/__init__.py": ["/unicache_env/envs/env.py"]} |
45,703 | pekwm/pekwm-theme-index-validator | refs/heads/main | /entrypoint.py | #!/usr/bin/python3
import os
import sys
import validate_theme
def main(schema_path, theme_dir):
themes = [os.path.join(theme_dir, theme) for theme in os.listdir(theme_dir)
if theme.endswith('.json')]
validate_theme.main(schema_path, themes)
# FIXME: provide informative outputs
if __name__ == '__main__':
if 'GITHUB_WORKSPACE' not in os.environ:
sys.stderr.write('GITHUB_WORKSPACE not set\n')
sys.exit(1)
path = os.environ['GITHUB_WORKSPACE']
schema_path = os.path.join(path, 'theme.schema.json')
themes_dir = os.path.join(path, 'themes')
main(schema_path, themes_dir)
| {"/entrypoint.py": ["/validate_theme.py"]} |
45,704 | pekwm/pekwm-theme-index-validator | refs/heads/main | /validate_theme.py | #!/usr/bin/env python3
#
# Script for validating pekwm theme registry files
#
import json
import jsonschema
import os
import sys
def main(schema_path, themes):
print('loading schema...')
with open(schema_path) as fp:
theme_schema = json.load(fp)
for path in themes:
print('validating {}'.format(path))
with open(path) as fp:
theme = json.load(fp)
jsonschema.validate(instance=theme, schema=theme_schema)
if __name__ == '__main__':
main(sys.argv[1:])
| {"/entrypoint.py": ["/validate_theme.py"]} |
45,707 | pfreixes/gramola | refs/heads/master | /tests/datasources/test_graphite.py | import pytest
from mock import patch, Mock
from requests.exceptions import RequestException
from gramola.utils import parse_date
from gramola.datasources.graphite import (
DATE_FORMAT,
GraphiteDataSource,
GraphiteMetricQuery
)
REQUESTS = 'gramola.datasources.graphite.requests'
@pytest.fixture
def config():
return GraphiteDataSource.DATA_SOURCE_CONFIGURATION_CLS(**{
'type': 'graphite',
'name': 'datasource name',
'url': 'http://localhost:9000'
})
@patch(REQUESTS)
class TestTest(object):
def test_requests_exception(self, prequests, config):
prequests.get.side_effect = RequestException()
graphite = GraphiteDataSource(config)
assert graphite.test() == False
def test_ok(self, prequests, config):
response = Mock()
response.status_code = 200
prequests.get.return_value = response
graphite = GraphiteDataSource(config)
assert graphite.test() == True
@patch(REQUESTS)
class TestDatapoints(object):
@pytest.fixture
def query(self):
return GraphiteDataSource.METRIC_QUERY_CLS(**{
'target': 'foo.bar',
'since': '-24h',
'until': '-12h'
})
def test_query(self, prequests, config, query):
response = Mock()
response.status_code = 200
response.json.return_value = [{
'target': 'foo.bar',
'datapoints': [[1, 1451391760]]
}]
prequests.get.return_value = response
graphite = GraphiteDataSource(config)
assert graphite.datapoints(query) == [(1, 1451391760)]
prequests.get.assert_called_with(
'http://localhost:9000/render',
params={'target': 'foo.bar',
'from': parse_date('-24h').strftime(DATE_FORMAT),
'to': parse_date('-12h').strftime(DATE_FORMAT),
'format': 'json'}
)
def test_query_default_values(self, prequests, config):
response = Mock()
response.status_code = 200
response.json.return_value = [{
'target': 'foo.bar',
'datapoints': [[1, 1451391760]]
}]
prequests.get.return_value = response
graphite = GraphiteDataSource(config)
# build the mvp of query to be filled with the default ones
query = GraphiteDataSource.METRIC_QUERY_CLS(**{
'target': 'foo.bar'
})
assert graphite.datapoints(query) == [(1, 1451391760)]
prequests.get.assert_called_with(
'http://localhost:9000/render',
params={'target': 'foo.bar',
'from': parse_date('-1h').strftime(DATE_FORMAT),
'to': parse_date('now').strftime(DATE_FORMAT),
'format': 'json'}
)
def test_query_remove_last_None(self, prequests, config):
response = Mock()
response.status_code = 200
response.json.return_value = [{
'target': 'foo.bar',
'datapoints': [[1, 1451391760], [None, 1451391770]]
}]
prequests.get.return_value = response
graphite = GraphiteDataSource(config)
# build the mvp of query to be filled with the default ones
query = GraphiteDataSource.METRIC_QUERY_CLS(**{
'target': 'foo.bar'
})
assert graphite.datapoints(query) == [(1, 1451391760)]
def test_requests_exception(self, prequests, config, query):
prequests.get.side_effect = RequestException()
graphite = GraphiteDataSource(config)
assert graphite.datapoints(query) == []
| {"/tests/datasources/test_graphite.py": ["/gramola/utils.py", "/gramola/datasources/graphite.py"], "/tests/datasources/test_base.py": ["/gramola/datasources/base.py"], "/tests/datasources/test_cloudwatch.py": ["/gramola/datasources/cloudwatch.py", "/gramola/datasources/base.py"], "/gramola/datasources/__init__.py": ["/gramola/datasources/graphite.py", "/gramola/datasources/cloudwatch.py"], "/tests/test_commands.py": ["/gramola/commands.py", "/gramola/datasources/base.py", "/tests/fixtures.py"], "/tests/test_utils.py": ["/gramola/utils.py"], "/gramola/store.py": ["/gramola/datasources/base.py"], "/tests/test_plot.py": ["/gramola/plot.py"], "/tests/test_store.py": ["/gramola/store.py", "/tests/fixtures.py"], "/tests/fixtures.py": ["/gramola/store.py", "/gramola/datasources/base.py"]} |
45,708 | pfreixes/gramola | refs/heads/master | /tests/datasources/test_base.py | import pytest
from gramola.datasources.base import (
OptionalKey,
DataSource,
DataSourceConfig,
InvalidDataSourceConfig,
MetricQuery,
InvalidMetricQuery
)
class TestOptionalKey(object):
def test_interface(self):
option = OptionalKey("field", "description")
assert option == "field"
assert option.name == "field"
assert option.description == "description"
d = {option: "value"}
assert d['field'] == "value"
class TestDataSourceConfig(object):
def test_interface(self):
class TestConfig(DataSourceConfig):
REQUIRED_KEYS = ('foo',)
conf = TestConfig(**{'type': 'test', 'name': 'datasource', 'foo': 1})
assert conf.type == 'test'
assert conf.name == 'datasource'
assert conf.foo == 1
def test_custom_raises(self):
class TestConfig(DataSourceConfig):
pass
# requires the name key
with pytest.raises(InvalidDataSourceConfig):
TestConfig(**{})
class TestMetricQuery(object):
def test_interface(self):
class TestQuery(MetricQuery):
REQUIRED_KEYS = ('metric', 'from_', 'to')
query = TestQuery(**{'metric': 'cpu', 'from_': 1, 'to': 2})
assert query.metric == 'cpu'
assert query.from_ == 1
assert query.to == 2
def test_custom_raises(self):
class TestQuery(MetricQuery):
REQUIRED_KEYS = ('metric',)
# requires the metric key
with pytest.raises(InvalidMetricQuery):
TestQuery(**{})
class TestDataSource(object):
def test_find(self):
class TestDataSource(DataSource):
TYPE = 'test_find'
assert DataSource.find('test_find') == TestDataSource
with pytest.raises(KeyError):
DataSource.find('foo')
def test_datapoints(self):
class TestDataSource(DataSource):
TYPE = 'test_find'
def datapoints(self, query, maxdatapoints=None):
return query, maxdatapoints
query = 1
maxdatapoints = 2
assert TestDataSource(None).datapoints(query, maxdatapoints=maxdatapoints) ==\
(query, maxdatapoints)
| {"/tests/datasources/test_graphite.py": ["/gramola/utils.py", "/gramola/datasources/graphite.py"], "/tests/datasources/test_base.py": ["/gramola/datasources/base.py"], "/tests/datasources/test_cloudwatch.py": ["/gramola/datasources/cloudwatch.py", "/gramola/datasources/base.py"], "/gramola/datasources/__init__.py": ["/gramola/datasources/graphite.py", "/gramola/datasources/cloudwatch.py"], "/tests/test_commands.py": ["/gramola/commands.py", "/gramola/datasources/base.py", "/tests/fixtures.py"], "/tests/test_utils.py": ["/gramola/utils.py"], "/gramola/store.py": ["/gramola/datasources/base.py"], "/tests/test_plot.py": ["/gramola/plot.py"], "/tests/test_store.py": ["/gramola/store.py", "/tests/fixtures.py"], "/tests/fixtures.py": ["/gramola/store.py", "/gramola/datasources/base.py"]} |
45,709 | pfreixes/gramola | refs/heads/master | /tests/datasources/test_cloudwatch.py | import pytest
from mock import patch, Mock, MagicMock, ANY
from datetime import datetime
from datetime import timedelta
from botocore.exceptions import (
PartialCredentialsError,
NoRegionError,
ClientError,
)
from gramola.datasources.cloudwatch import (
Boto3ClientError,
CWDataSource,
CWMetricQuery
)
from gramola.datasources.base import InvalidMetricQuery
BOTO3 = 'gramola.datasources.cloudwatch.boto3'
@pytest.fixture
def config():
return CWDataSource.DATA_SOURCE_CONFIGURATION_CLS(**{
'type': 'cw',
'name': 'cw',
})
@pytest.fixture
def config_options():
return CWDataSource.DATA_SOURCE_CONFIGURATION_CLS(**{
'type': 'cw',
'name': 'cw',
'region': 'eu-west-1',
'profile': 'sandbox',
})
@patch(BOTO3)
class TestBotoClient(object):
# Test the method _cw_client
def test_cw_client(self, boto3, config):
cw = CWDataSource(config)
cw._cw_client()
boto3.session.Session.called_with(None, None)
def test_cw_client_opitons(self, boto3, config_options):
cw = CWDataSource(config_options)
cw._cw_client()
boto3.session.Session.called_with(region_name='eu-west-1', profile_name='sandbox')
def test_raises_boto3clienterror(self, boto3, config):
boto3.session.Session.side_effect = PartialCredentialsError(provider=Mock(),
cred_var=Mock())
with pytest.raises(Boto3ClientError):
CWDataSource(config)._cw_client()
boto3.session.Session.side_effect = ClientError(MagicMock(), Mock())
with pytest.raises(Boto3ClientError):
CWDataSource(config)._cw_client()
boto3.session.Session.side_effect = NoRegionError()
with pytest.raises(Boto3ClientError):
CWDataSource(config)._cw_client()
@patch(BOTO3)
class TestTest(object):
def test_boto3_raises_exceptions_fail(self, boto3, config):
boto3.session.Session.side_effect = PartialCredentialsError(provider=Mock(),
cred_var=Mock())
assert CWDataSource(config).test() == False
boto3.session.Session.side_effect = ClientError(MagicMock(), Mock())
assert CWDataSource(config).test() == False
boto3.session.Session.side_effect = NoRegionError()
assert CWDataSource(config).test() == False
def test_ok(self, boto3, config):
cw = CWDataSource(config)
assert cw.test() == True
@patch(BOTO3)
class TestDatapoints(object):
@pytest.fixture
def query_dict(self):
return {
'namespace': 'AWS/EC2',
'metricname': 'CPUUtillization',
'dimension_name': 'AutoScalingGroupName',
'dimension_value': 'foo',
'until': datetime.now().strftime('%Y-%m-%dT%H:%M:%S'),
'since': (datetime.now() - timedelta(hours=12)).strftime('%Y-%m-%dT%H:%M:%S')
}
@pytest.fixture
def response(self):
return {
'Label': 'foo',
'Datapoints': [
{'Timestamp': datetime.now(), 'SampleCount': 1,
'Average': 1, 'Sum': '11', 'Minimum': 1, 'Maximum': 1, 'Unit': 'foos'},
{'Timestamp': datetime.now(), 'SampleCount': 2,
'Average': 2, 'Sum': '22', 'Minimum': 2, 'Maximum': 2, 'Unit': 'foos'}
]
}
def test_query(self, boto3, config, query_dict, response):
query = CWDataSource.METRIC_QUERY_CLS(**query_dict)
boto3.session.Session.return_value.client.return_value.\
get_metric_statistics.return_value = response
cw = CWDataSource(config)
datapoints = cw.datapoints(query)
assert len(datapoints) == len(response['Datapoints'])
for idx, i in enumerate(datapoints):
assert i[0] == response['Datapoints'][idx]['Average']
assert i[0] == response['Datapoints'][idx]['Average']
boto3.session.Session.return_value.client.return_value.\
get_metric_statistics.assert_called_with(
Namespace='AWS/EC2', MetricName='CPUUtillization',
StartTime=datetime.strptime(query.since, '%Y-%m-%dT%H:%M:%S'),
EndTime=datetime.strptime(query.until, '%Y-%m-%dT%H:%M:%S'), Period=60,
Dimensions=[{'Name': 'AutoScalingGroupName', 'Value': 'foo'}],
Statistics=['Average']
)
def test_query_sum(self, boto3, config, query_dict, response):
query_dict.update({'statistics': 'Sum'})
query = CWDataSource.METRIC_QUERY_CLS(**query_dict)
boto3.session.Session.return_value.client.return_value.\
get_metric_statistics.return_value = response
cw = CWDataSource(config)
datapoints = cw.datapoints(query)
assert len(datapoints) == len(response['Datapoints'])
for idx, i in enumerate(datapoints):
assert i[0] == response['Datapoints'][idx]['Sum']
assert i[0] == response['Datapoints'][idx]['Sum']
boto3.session.Session.return_value.client.return_value.\
get_metric_statistics.assert_called_with(
Namespace='AWS/EC2', MetricName='CPUUtillization',
StartTime=datetime.strptime(query.since, '%Y-%m-%dT%H:%M:%S'),
EndTime=datetime.strptime(query.until, '%Y-%m-%dT%H:%M:%S'), Period=60,
Dimensions=[{'Name': 'AutoScalingGroupName', 'Value': 'foo'}],
Statistics=['Sum']
)
def test_query_invalid_statistics(self, boto3, config, query_dict, response):
query_dict.update({'statistics': 'foo'})
query = CWDataSource.METRIC_QUERY_CLS(**query_dict)
cw = CWDataSource(config)
with pytest.raises(InvalidMetricQuery):
datapoints = cw.datapoints(query)
def test_query_maxdatapoints(self, boto3, config, query_dict):
query = CWDataSource.METRIC_QUERY_CLS(**query_dict)
cw = CWDataSource(config)
datapoints = cw.datapoints(query, maxdatapoints=100)
boto3.session.Session.return_value.client.return_value.\
get_metric_statistics.assert_called_with(
Namespace='AWS/EC2', MetricName='CPUUtillization',
StartTime=datetime.strptime(query.since, '%Y-%m-%dT%H:%M:%S'),
EndTime=datetime.strptime(query.until, '%Y-%m-%dT%H:%M:%S'), Period=480,
Dimensions=[{'Name': 'AutoScalingGroupName', 'Value': 'foo'}],
Statistics=['Average']
)
| {"/tests/datasources/test_graphite.py": ["/gramola/utils.py", "/gramola/datasources/graphite.py"], "/tests/datasources/test_base.py": ["/gramola/datasources/base.py"], "/tests/datasources/test_cloudwatch.py": ["/gramola/datasources/cloudwatch.py", "/gramola/datasources/base.py"], "/gramola/datasources/__init__.py": ["/gramola/datasources/graphite.py", "/gramola/datasources/cloudwatch.py"], "/tests/test_commands.py": ["/gramola/commands.py", "/gramola/datasources/base.py", "/tests/fixtures.py"], "/tests/test_utils.py": ["/gramola/utils.py"], "/gramola/store.py": ["/gramola/datasources/base.py"], "/tests/test_plot.py": ["/gramola/plot.py"], "/tests/test_store.py": ["/gramola/store.py", "/tests/fixtures.py"], "/tests/fixtures.py": ["/gramola/store.py", "/gramola/datasources/base.py"]} |
45,710 | pfreixes/gramola | refs/heads/master | /gramola/datasources/__init__.py | # -*- coding: utf-8 -*-
"""
:moduleauthor: Pau Freixes, pfreixes@gmail.com
"""
from gramola.datasources.graphite import GraphiteDataSource
from gramola.datasources.cloudwatch import CWDataSource
IMPLEMENTATIONS = [
GraphiteDataSource,
CWDataSource
]
| {"/tests/datasources/test_graphite.py": ["/gramola/utils.py", "/gramola/datasources/graphite.py"], "/tests/datasources/test_base.py": ["/gramola/datasources/base.py"], "/tests/datasources/test_cloudwatch.py": ["/gramola/datasources/cloudwatch.py", "/gramola/datasources/base.py"], "/gramola/datasources/__init__.py": ["/gramola/datasources/graphite.py", "/gramola/datasources/cloudwatch.py"], "/tests/test_commands.py": ["/gramola/commands.py", "/gramola/datasources/base.py", "/tests/fixtures.py"], "/tests/test_utils.py": ["/gramola/utils.py"], "/gramola/store.py": ["/gramola/datasources/base.py"], "/tests/test_plot.py": ["/gramola/plot.py"], "/tests/test_store.py": ["/gramola/store.py", "/tests/fixtures.py"], "/tests/fixtures.py": ["/gramola/store.py", "/gramola/datasources/base.py"]} |
45,711 | pfreixes/gramola | refs/heads/master | /gramola/utils.py | # -*- coding: utf-8 -*-
"""
Utils used arround the Gramola project.
:moduleauthor: Pau Freixes, pfreixes@gmail.com
"""
import json
from itertools import chain
from datetime import datetime
from datetime import timedelta
class InvalidGramolaDictionary(Exception):
""" Exception raised when the keys given to one GramolaDictionary instance
or a derivated instance class doesn't give all the needed keys.
"""
def __init__(self, errors):
"""
:paramm errors: a dictionary with the keys and their errors.
:type errors: dict.
"""
Exception.__init__(self)
self.errors = errors
class GramolaDictionary(object):
"""
Implements a dictionary store with a interface to declare the allowable
keys, either required or optional.
Each derivated implementation of GramolaDictionary can configure the
required keys using the REQUIRED_KEYS attribute and the optional keys
using OPTIONAL_KEYS attribute. These params will be concatenated with
the params defined by the base class.
As the required keys and the optional keys are automatically published
as properties, for these optional keys that are not given a None value
is returned.
For example:
>>> class Config(GramolaDictionary):
>>> REQUIRED_KEYS = (name,)
>>>
>>> class MyConfig(Config):
>>> REQUIRED_KEYS = (foo,)
>>> OPTIONAL_KEYS = (bar,)
>>>
>>> conf = MyConfig({'name':'hellow', 'foo': 'x'})
>>> conf.name
hellow
>>> conf.foo
x
>>> conf.bar
None
The name of the keys then have to follow the naming convention for Python
class attributes.
"""
REQUIRED_KEYS = ()
OPTIONAL_KEYS = ()
# Use a metaclass to publish dynamic properties
# got from the REQUIRED_KEYS and OPTIONAL_KEYS published
# by the base classes + derivated class
class __metaclass__(type):
def __init__(cls, name, bases, nmspc):
type.__init__(cls, name, bases, nmspc)
# install the property for all keys to make them visible
# as a properties. i.e conf.name
for attr in chain(cls.required_keys(), cls.optional_keys()):
setattr(
cls,
str(attr), # required for non string objects
property(lambda self, a=attr: self._dict.get(a, None))
)
def __init__(self, **kwargs):
"""
Initialize a GramolaDictionary instance.
:param kwargs: key, value pairs used for this configuration.
:type kwargs: dict.
:raises : InvalidDataSourceConfig.
"""
# check that the keys given as a confiugaration keys are allowed either because
# are required or optional.
allowed_keys = self.required_keys() + self.optional_keys()
if filter(lambda k: k not in allowed_keys, kwargs):
raise InvalidGramolaDictionary(
{k: 'Key not expected' for k in
filter(lambda k: k not in allowed_keys, kwargs)})
# all required keys have to be given
if filter(lambda k: k not in kwargs, self.required_keys()):
raise InvalidGramolaDictionary(
{k: 'Key missing' for k in
filter(lambda k: k not in kwargs, self.required_keys())})
self._dict = kwargs
def __eq__(self, b):
# Are equals if they are implemented using the same class and
# have the same dictionary elements
return self.__class__ == b.__class__ and self._dict == b._dict
def dict(self):
""" Returns a copy dict of the internal dict"""
return dict(**self._dict)
def dumps(self):
""" Return a string JSON object to be used as a serialized """
return json.dumps(self._dict)
@classmethod
def loads(cls, buffer_):
""" Return a instancia of cls using the JSON buffer_ as a kwargs
for the constructor.
:param buffer_: str.
"""
return cls(**json.loads(buffer_))
@classmethod
def required_keys(cls):
""" Return all required keys inherited by the whole hierarchy classes """
return cls._pick_up_attr('REQUIRED_KEYS')
@classmethod
def optional_keys(cls):
""" Return all optional keys inherited by the whole hierarchy classes """
return cls._pick_up_attr('OPTIONAL_KEYS')
@classmethod
def _pick_up_attr(cls, attr):
class_values = getattr(cls, attr)
base_values = ()
for base in cls.__bases__:
if hasattr(base, '_pick_up_attr'):
base_values += base._pick_up_attr(attr)
return base_values + class_values
class DateTimeInvalidValue(Exception):
pass
def parse_date(date_value):
""" Parse a date_value expecting at least one of the following
formats, otherwise it raises a DateTimeInvalidValue.
timestamp format: 1454867898
iso8601 format : 2016-02-06T20:37:47
human format : -1h, -5min, 10d, now, ...
([|-](integer)[h|min|s|d]|now)
:param date_value: str
:return: datetime
"""
try:
return datetime.strptime(date_value, "%Y-%m-%dT%H:%M:%S")
except ValueError:
try:
return datetime.fromtimestamp(float(date_value))
except ValueError:
try:
if date_value == 'now':
return datetime.now()
elif date_value.find("h") != -1:
v = date_value.split("h")[0]
if v[0] == '-':
return datetime.now() - timedelta(hours=int(v[1:]))
else:
return datetime.now() + timedelta(hours=int(v))
elif date_value.find("min") != -1:
v = date_value.split("min")[0]
if v[0] == '-':
return datetime.now() - timedelta(minutes=int(v[1:]))
else:
return datetime.now() + timedelta(minutes=int(v))
elif date_value.find("s") != -1:
v = date_value.split("s")[0]
if v[0] == '-':
return datetime.now() - timedelta(seconds=int(v[1:]))
else:
return datetime.now() + timedelta(seconds=int(v))
elif date_value.find("d") != -1:
v = date_value.split("d")[0]
if v[0] == '-':
return datetime.now() - timedelta(days=int(v[1:]))
else:
return datetime.now() + timedelta(days=int(v))
raise DateTimeInvalidValue()
except ValueError:
raise DateTimeInvalidValue()
return data_value
| {"/tests/datasources/test_graphite.py": ["/gramola/utils.py", "/gramola/datasources/graphite.py"], "/tests/datasources/test_base.py": ["/gramola/datasources/base.py"], "/tests/datasources/test_cloudwatch.py": ["/gramola/datasources/cloudwatch.py", "/gramola/datasources/base.py"], "/gramola/datasources/__init__.py": ["/gramola/datasources/graphite.py", "/gramola/datasources/cloudwatch.py"], "/tests/test_commands.py": ["/gramola/commands.py", "/gramola/datasources/base.py", "/tests/fixtures.py"], "/tests/test_utils.py": ["/gramola/utils.py"], "/gramola/store.py": ["/gramola/datasources/base.py"], "/tests/test_plot.py": ["/gramola/plot.py"], "/tests/test_store.py": ["/gramola/store.py", "/tests/fixtures.py"], "/tests/fixtures.py": ["/gramola/store.py", "/gramola/datasources/base.py"]} |
45,712 | pfreixes/gramola | refs/heads/master | /tests/test_commands.py | import pytest
import sparkline
from copy import copy
from json import loads, dumps
from mock import patch, Mock
from gramola.commands import (
InvalidParams,
GramolaCommand,
DataSourceCommand,
DataSourceRmCommand,
DataSourceTestCommand,
DataSourceListCommand,
build_datasource_add_type,
build_datasource_echo_type,
build_datasource_query_type
)
from gramola.datasources.base import (
MetricQuery,
DataSource,
DataSourceConfig
)
from .fixtures import test_data_source
from .fixtures import nonedefault_store
@pytest.fixture
def empty_options(nonedefault_store):
return Mock()
@pytest.fixture
def empty_suboptions():
return Mock()
class TestGramolaCommand(object):
def test_interface(self):
class TestCommand(GramolaCommand):
NAME = 'test'
assert GramolaCommand.find('test') == TestCommand
with pytest.raises(KeyError):
GramolaCommand.find('foo')
assert TestCommand in GramolaCommand.commands()
class TestDataSource(object):
def test_execute(self, empty_options, empty_suboptions, test_data_source, nonedefault_store):
empty_options.store = nonedefault_store.path
output = {
'type': 'test',
'name': 'datasource one',
'bar': 'b',
'foo': 'a'
}
with patch("__builtin__.print") as print_patched:
DataSourceCommand.execute(empty_options, empty_suboptions, "datasource one")
print_patched.assert_called_with(dumps(output))
def test_execute_not_found(self, empty_options, empty_suboptions,
test_data_source, nonedefault_store):
empty_options.store = nonedefault_store.path
with patch("__builtin__.print") as print_patched:
DataSourceCommand.execute(empty_options, empty_suboptions, "xxxx")
print_patched.assert_called_with("Datasource `xxxx` NOT FOUND")
def test_invalid_params(self, empty_options, empty_suboptions,
test_data_source, nonedefault_store):
empty_options.store = nonedefault_store.path
# DataSource takes one param
with pytest.raises(InvalidParams):
DataSourceCommand.execute(empty_options, empty_suboptions)
class TestDataSourceRm(object):
def test_execute(self, empty_options, empty_suboptions, test_data_source, nonedefault_store):
empty_options.store = nonedefault_store.path
with patch("__builtin__.print") as print_patched:
DataSourceRmCommand.execute(empty_options, empty_suboptions, "datasource one")
print_patched.assert_called_with("Datasource `datasource one` removed")
def test_execute_not_found(self, empty_options, empty_suboptions,
test_data_source, nonedefault_store):
empty_options.store = nonedefault_store.path
with patch("__builtin__.print") as print_patched:
DataSourceRmCommand.execute(empty_options, empty_suboptions, "xxxx")
print_patched.assert_called_with("Datasource `xxxx` not found, NOT REMOVED")
def test_invalid_params(self, empty_options, empty_suboptions, test_data_source,
nonedefault_store):
empty_options.store = nonedefault_store.path
# DataSource takes one param
with pytest.raises(InvalidParams):
DataSourceRmCommand.execute(empty_options, empty_suboptions)
class TestDataSourceTest(object):
def test_execute(self, empty_options, empty_suboptions, test_data_source, nonedefault_store):
test_data_source.test.return_value = True
empty_options.store = nonedefault_store.path
DataSourceTestCommand.execute(empty_options, empty_suboptions, "datasource one")
test_data_source.test.assert_called
def test_execute_not_found(self, empty_options, empty_suboptions,
test_data_source, nonedefault_store):
empty_options.store = nonedefault_store.path
with patch("__builtin__.print") as print_patched:
DataSourceTestCommand.execute(empty_options, empty_suboptions, "xxxx")
print_patched.assert_called_with("Datasource `xxxx` not found, NOT TESTED")
def test_invalid_params(self, empty_options, empty_suboptions, test_data_source,
nonedefault_store):
empty_options.store = nonedefault_store.path
# DataSource takes one param
with pytest.raises(InvalidParams):
DataSourceTestCommand.execute(empty_options, empty_suboptions)
class TestDataSourceAdd(object):
def test_execute(self, empty_options, empty_suboptions, test_data_source, nonedefault_store):
empty_options.store = nonedefault_store.path
test_data_source.test.return_value = True
command = build_datasource_add_type(test_data_source)
command.execute(empty_options, empty_suboptions, "test name", 1, 2)
assert len(nonedefault_store.datasources(name="test name")) == 1
def test_execute_service_unavailable(self, empty_options, empty_suboptions, test_data_source,
nonedefault_store):
empty_options.store = nonedefault_store.path
test_data_source.test.return_value = False
empty_suboptions.no_test = False
command = build_datasource_add_type(test_data_source)
with patch("__builtin__.print") as print_patched:
command.execute(empty_options, empty_suboptions, "test name 2", 1, 2)
assert len(nonedefault_store.datasources(name="test name 2")) == 0
print_patched.assert_called_with(
"THIS DATA SOURCE NOT BE ADDED, use --no-test flag to add it even")
def test_execute_test_disabled(self, empty_options, empty_suboptions, test_data_source,
nonedefault_store):
empty_options.store = nonedefault_store.path
test_data_source.test.return_value = False
empty_suboptions.not_test = True
command = build_datasource_add_type(test_data_source)
command.execute(empty_options, empty_suboptions, "test name", 1, 2)
assert len(nonedefault_store.datasources(name="test name")) == 1
def test_invalid_params(self, empty_options, empty_suboptions, test_data_source):
command = build_datasource_add_type(test_data_source)
with pytest.raises(InvalidParams):
command.execute(empty_options, empty_suboptions)
class TestDataSourceEcho(object):
def test_execute(self, empty_options, empty_suboptions, test_data_source):
# otupus returns a hardcoded name and the type of the
# datasource along with the expected keys of datasource
output = {
'type': 'test',
'name': 'stdout',
'foo': 1,
'bar': 2
}
command = build_datasource_echo_type(test_data_source)
with patch("__builtin__.print") as print_patched:
command.execute(empty_options, empty_suboptions, 1, 2)
print_patched.assert_called_with(dumps(output))
def test_invalid_params(self, empty_options, empty_suboptions, test_data_source):
# test_data_source takes two params
command = build_datasource_echo_type(test_data_source)
with pytest.raises(InvalidParams):
command.execute(empty_options, empty_suboptions, 1)
class TestDataSourceList(object):
def test_execute(self, empty_options, empty_suboptions, test_data_source, nonedefault_store):
empty_options.store = nonedefault_store.path
with patch("__builtin__.print") as print_patched:
DataSourceListCommand.execute(empty_options, empty_suboptions)
print_patched.assert_called_with("Datasource `datasource two` (test)")
class TestQueryCommand(object):
@patch("gramola.commands.sys")
@patch("gramola.commands.Plot")
def test_execute_stdin(self, plot_patched, sys_patched, empty_options, empty_suboptions,
test_data_source):
empty_suboptions.refresh = False
datapoints = [(1, 0), (2, 1), (3, 1)]
buffer_ = dumps({'type': 'test', 'name': 'stdout', 'foo': 1, 'bar': 1})
sys_patched.stdin.read.return_value = buffer_
test_data_source.datapoints.return_value = datapoints
command = build_datasource_query_type(test_data_source)
command.execute(empty_options, empty_suboptions, "-", "foo", "-1d", "now")
plot_patched.return_value.draw.assert_called_with(datapoints)
test_data_source.datapoints.assert_call_with(
test_data_source.METRIC_QUERY_CLS(metric='foo', since='-1d', until='now')
)
@patch("gramola.commands.sys")
def test_invalid_params(self, sys_patched, empty_options, empty_suboptions, test_data_source):
# query test_data_source takes four required params
buffer_ = dumps({'type': 'test', 'name': 'stdout', 'foo': 1, 'bar': 1})
sys_patched.stdin.read.return_value = buffer_
command = build_datasource_echo_type(test_data_source)
with pytest.raises(InvalidParams):
command.execute(empty_options, empty_suboptions, "-")
| {"/tests/datasources/test_graphite.py": ["/gramola/utils.py", "/gramola/datasources/graphite.py"], "/tests/datasources/test_base.py": ["/gramola/datasources/base.py"], "/tests/datasources/test_cloudwatch.py": ["/gramola/datasources/cloudwatch.py", "/gramola/datasources/base.py"], "/gramola/datasources/__init__.py": ["/gramola/datasources/graphite.py", "/gramola/datasources/cloudwatch.py"], "/tests/test_commands.py": ["/gramola/commands.py", "/gramola/datasources/base.py", "/tests/fixtures.py"], "/tests/test_utils.py": ["/gramola/utils.py"], "/gramola/store.py": ["/gramola/datasources/base.py"], "/tests/test_plot.py": ["/gramola/plot.py"], "/tests/test_store.py": ["/gramola/store.py", "/tests/fixtures.py"], "/tests/fixtures.py": ["/gramola/store.py", "/gramola/datasources/base.py"]} |
45,713 | pfreixes/gramola | refs/heads/master | /tests/test_utils.py | import pytest
import json
import time
from datetime import datetime
from datetime import timedelta
from gramola.utils import (
parse_date,
DateTimeInvalidValue,
GramolaDictionary,
InvalidGramolaDictionary
)
class TestGramolaDictionary(object):
def test_interface(self):
class ParentConfig(GramolaDictionary):
REQUIRED_KEYS = ('name',)
OPTIONAL_KEYS = ('banana',)
class ChildConfig(ParentConfig):
REQUIRED_KEYS = ('foo',)
OPTIONAL_KEYS = ('bar', 'gramola')
conf = ChildConfig(**{'name': 'hellow', 'foo': 1, 'bar': 2})
assert conf.name == 'hellow'
assert conf.foo == 1
assert conf.bar == 2
assert not conf.banana
assert not conf.gramola
assert conf.dumps() == json.dumps({'name': 'hellow', 'foo': 1, 'bar': 2})
assert ChildConfig.optional_keys() == ('banana', 'bar', 'gramola')
assert ChildConfig.required_keys() == ('name', 'foo')
ChildConfig.loads(json.dumps({'name': 'hellow', 'foo': 1, 'bar': 2}))
# check the __eq__ method
conf2 = ChildConfig(**{'name': 'hellow', 'foo': 1, 'bar': 2})
assert conf == conf2
def test_allowed_keys(self):
class TestConfig(GramolaDictionary):
REQUIRED_KEYS = ('foo',)
OPTIONAL_KEYS = ('bar', 'gramola')
# requires the foo key
with pytest.raises(InvalidGramolaDictionary):
TestConfig(**{})
# invalid key given
with pytest.raises(InvalidGramolaDictionary):
TestConfig(**{'foo': 1, 'whatever': None})
with pytest.raises(InvalidGramolaDictionary):
TestConfig(**{'bar': None})
class TestParseDate(object):
def test_now(self):
dt = datetime.now()
# skip milisecons
assert parse_date('now').timetuple()[:-1] == dt.timetuple()[:-1]
def test_1min(self):
dt = datetime.now() - timedelta(minutes=1)
assert parse_date('-1min').minute == dt.minute
def test_1s(self):
dt = datetime.now() - timedelta(seconds=1)
assert parse_date('-1s').second == dt.second
def test_1h(self):
dt = datetime.now() - timedelta(hours=1)
assert parse_date('-1h').hour == dt.hour
def test_1d(self):
dt = datetime.now() - timedelta(days=1)
assert parse_date('-1d').day == dt.day
def test_timestamp(self):
dt = datetime.now()
assert parse_date(str(round(time.mktime(dt.timetuple()), 0))).timetuple()[:-1] ==\
dt.timetuple()[:-1]
def test_iso8601(self):
dt = datetime.now()
assert parse_date(dt.strftime("%Y-%m-%dT%H:%M:%S")).timetuple()[:-1] ==\
dt.timetuple()[:-1]
def test_invalid(self):
with pytest.raises(DateTimeInvalidValue):
parse_date("asdfasdfasdf")
| {"/tests/datasources/test_graphite.py": ["/gramola/utils.py", "/gramola/datasources/graphite.py"], "/tests/datasources/test_base.py": ["/gramola/datasources/base.py"], "/tests/datasources/test_cloudwatch.py": ["/gramola/datasources/cloudwatch.py", "/gramola/datasources/base.py"], "/gramola/datasources/__init__.py": ["/gramola/datasources/graphite.py", "/gramola/datasources/cloudwatch.py"], "/tests/test_commands.py": ["/gramola/commands.py", "/gramola/datasources/base.py", "/tests/fixtures.py"], "/tests/test_utils.py": ["/gramola/utils.py"], "/gramola/store.py": ["/gramola/datasources/base.py"], "/tests/test_plot.py": ["/gramola/plot.py"], "/tests/test_store.py": ["/gramola/store.py", "/tests/fixtures.py"], "/tests/fixtures.py": ["/gramola/store.py", "/gramola/datasources/base.py"]} |
45,714 | pfreixes/gramola | refs/heads/master | /gramola/log.py | # -*- coding: utf-8 -*-
"""
:moduleauthor: Pau Freixes, pfreixes@gmail.com
"""
import logging
_logger = logging.getLogger('gramola')
warning = _logger.warn
info = _logger.info
debug = _logger.debug
error = _logger.error
def setup(verbose=False, quite=False):
if quite:
_logger.addHandler(logging.NullHandler())
else:
_logger.setLevel(logging.DEBUG if verbose else logging.INFO)
logging.basicConfig(format='%(message)s')
| {"/tests/datasources/test_graphite.py": ["/gramola/utils.py", "/gramola/datasources/graphite.py"], "/tests/datasources/test_base.py": ["/gramola/datasources/base.py"], "/tests/datasources/test_cloudwatch.py": ["/gramola/datasources/cloudwatch.py", "/gramola/datasources/base.py"], "/gramola/datasources/__init__.py": ["/gramola/datasources/graphite.py", "/gramola/datasources/cloudwatch.py"], "/tests/test_commands.py": ["/gramola/commands.py", "/gramola/datasources/base.py", "/tests/fixtures.py"], "/tests/test_utils.py": ["/gramola/utils.py"], "/gramola/store.py": ["/gramola/datasources/base.py"], "/tests/test_plot.py": ["/gramola/plot.py"], "/tests/test_store.py": ["/gramola/store.py", "/tests/fixtures.py"], "/tests/fixtures.py": ["/gramola/store.py", "/gramola/datasources/base.py"]} |
45,715 | pfreixes/gramola | refs/heads/master | /gramola/plot.py | # -*- coding: utf-8 -*-
"""
Implements the ploting module to render the graphic over the console using a graphic
like the following one:
| * *
| ** **
| * *** **
| *** **** ***
| * **** ***** ** ****
| ** ****** ******* **** *****
|*** ******************** ***** ******
|*******************************************
+---+---+---+---+---+---+---+---+---+--+---+
min=1, max=34, last=2
The plot is rendred using 10 rows, it means that all datapoints that have to be displayed
will be scaled until they fit between the range [0, 10]. By default the Plot uses the
max value picked up from the list of datapoints and finds out wich is the fewer number
that divides it getting at max the value 10, for example:
datapoints = 10, 22, 35, 66, 14, 8
max = 66
Fewer division that gets less than 10, 66 / 7 = 9.4
datapoints displayed = 2, 4, 6, 10, 3, 2
The user can also set a maxium value that will be used to get the divission value, for
example if the user is tracking the CPU usage the maxium value that it will get is 100. Using
this 100 the division number turns out 10, so the above datapoints will get the following
values:
datapoints displayed = 1, 3, 4, 7, 2, 1
:moduleauthor: Pau Freixes, pfreixes@gmail.com
"""
import os
import sys
from itertools import dropwhile
DEFAULT_ROWS = 8
class Plot(object):
def __init__(self, max_x=None, rows=DEFAULT_ROWS):
self.rows = rows
self.max_x = max_x
self.__drawn = False
def width(self):
width, _ = getTerminalSize()
# the plot needs the first column
return (width - 1)
def draw(self, datapoints):
""" Render using the the datapoints given as a parameters, Gramola
subministres a list of tuples (value,ts).
"""
if len(datapoints) > self.width():
raise Exception("Given to many datapoints {}, doesnt fit into screen of {}".format(len(datapoints), self.width()))
if self.__drawn:
# remove the two lines used to render the plot by the
# the previous call to refresh the plot using the
# same console space
for i in range(0, self.rows+2):
sys.stdout.write("\033[K") # remove line
sys.stdout.write("\033[1A") # up the cursor
if datapoints:
# FIXME: nowadays Gramola supports only integer values
values = [int(value) for value, ts in datapoints]
if len(values) < self.width():
# padding the queue of the values with 0 to align
# the graphic with the right corner of the screen
values = ([0]*(self.width() - len(values))) + values
# find the right division value
max_x = self.max_x or max(values)
if max_x == 0:
# Edge case where all values are 0
divide_by = self.rows
else:
divide_by = next(dropwhile(lambda i: max_x / i > self.rows, range(1, max_x)))
else:
values = [0]*self.width()
divide_by = self.rows
for row in range(self.rows, 0, -1):
sys.stdout.write("|")
for v in values:
if v / divide_by >= row:
sys.stdout.write("*")
else:
sys.stdout.write(" ")
sys.stdout.write("\n")
if self.width() / 4.0 == 0:
extra = ""
else:
extra = "-"*(self.width() % 4)
sys.stdout.write("+"+"---+"*(self.width()/4) + extra + "\n")
if datapoints:
sys.stdout.write("min={}, max={}, last={}\n".format(min(values), max(values), values[-1]))
else:
sys.stdout.write("no datapoints found ...\n")
sys.stdout.flush()
self.__drawn = True
# Code get from the console module
def getTerminalSize():
env = os.environ
def ioctl_GWINSZ(fd):
try:
import fcntl, termios, struct, os
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ,
'1234'))
except:
return
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
cr = (env.get('LINES', 25), env.get('COLUMNS', 80))
return int(cr[1]), int(cr[0])
| {"/tests/datasources/test_graphite.py": ["/gramola/utils.py", "/gramola/datasources/graphite.py"], "/tests/datasources/test_base.py": ["/gramola/datasources/base.py"], "/tests/datasources/test_cloudwatch.py": ["/gramola/datasources/cloudwatch.py", "/gramola/datasources/base.py"], "/gramola/datasources/__init__.py": ["/gramola/datasources/graphite.py", "/gramola/datasources/cloudwatch.py"], "/tests/test_commands.py": ["/gramola/commands.py", "/gramola/datasources/base.py", "/tests/fixtures.py"], "/tests/test_utils.py": ["/gramola/utils.py"], "/gramola/store.py": ["/gramola/datasources/base.py"], "/tests/test_plot.py": ["/gramola/plot.py"], "/tests/test_store.py": ["/gramola/store.py", "/tests/fixtures.py"], "/tests/fixtures.py": ["/gramola/store.py", "/gramola/datasources/base.py"]} |
45,716 | pfreixes/gramola | refs/heads/master | /setup.py | #!/usr/bin/env python
from setuptools import setup
#from distutils.core import setup
setup(
name='Gramola',
version='0.0.1a0',
packages=['gramola', ],
license='BSD',
description='Gramola is a console port of Grafana_ that uses sparklines_ to render time series data points.',
long_description=open('README.rst').read(),
author='Pau Freixes',
author_email='pfreixes@gmail.com',
entry_points={
'console_scripts': [
'gramola = gramola.commands:gramola',
]
}
)
| {"/tests/datasources/test_graphite.py": ["/gramola/utils.py", "/gramola/datasources/graphite.py"], "/tests/datasources/test_base.py": ["/gramola/datasources/base.py"], "/tests/datasources/test_cloudwatch.py": ["/gramola/datasources/cloudwatch.py", "/gramola/datasources/base.py"], "/gramola/datasources/__init__.py": ["/gramola/datasources/graphite.py", "/gramola/datasources/cloudwatch.py"], "/tests/test_commands.py": ["/gramola/commands.py", "/gramola/datasources/base.py", "/tests/fixtures.py"], "/tests/test_utils.py": ["/gramola/utils.py"], "/gramola/store.py": ["/gramola/datasources/base.py"], "/tests/test_plot.py": ["/gramola/plot.py"], "/tests/test_store.py": ["/gramola/store.py", "/tests/fixtures.py"], "/tests/fixtures.py": ["/gramola/store.py", "/gramola/datasources/base.py"]} |
45,717 | pfreixes/gramola | refs/heads/master | /gramola/store.py | # -*- coding: utf-8 -*-
"""
This module implements the Store interface to save dashboards and datasources
to use them after. By default Gramola uses the directory ~/.gramola, although
all Gramola commands can override this default path for another one.
:moduleauthor: Pau Freixes, pfreixes@gmail.com
"""
import os
from configobj import ConfigObj
from gramola import log
from gramola.datasources.base import DataSource
class NotFound(Exception):
pass
class DuplicateEntry(Exception):
pass
class Store(object):
DEFAULT_DIRNAME = ".gramola"
DEFAULT_DASHBOARDS_FILENAME = "dashboards"
DEFAULT_DATASOURCES_FILENAME = "datasources"
def __init__(self, path=None):
"""
Initialize a Store instane looking into the default store path or an
alternavite given by the `path` keyword.
:param path: string, an alternative path to the default one
"""
if not path:
# Use the default one, in that case first time we create it
path = os.path.join(os.path.expanduser("~"), Store.DEFAULT_DIRNAME)
if not os.path.exists(path):
os.makedirs(path)
self.path = path
else:
# Custom paths have to be checked
if not os.path.exists(path):
raise ValueError("Path {} does not exists".format(path))
self.path = path
self.dashboards_filepath = os.path.join(self.path, Store.DEFAULT_DASHBOARDS_FILENAME)
self.datasources_filepath = os.path.join(self.path, Store.DEFAULT_DATASOURCES_FILENAME)
def datasources(self, name=None, type_=None):
"""
Return all datasources stored as a list of dictionaries, each dictionary
is instance of :class:gramola.datasources.base.DataSourceConfig or derivated
one representing the data source by it self.
Use the keywords `name` and `type_` to filter datasources for one or both
fields.
:param name: string, filter by name.
:param type_: string, filter by type_ of datasource.
:return: list
"""
config = ConfigObj(self.datasources_filepath, create_empty=True)
results = []
for section in config.sections:
# User filters
if name and name != section:
continue
if type_ and type_ != config[section].get('type'):
continue
# The title of the section is the name of the data source, we have to
# pack it by hand. Each section as at least the type key used to find out
# the right DataSourceConfig derivated class.
factory = DataSource.find(config[section].get('type')).DATA_SOURCE_CONFIGURATION_CLS
keys = {k: v for k, v in config[section].items()}
keys.update({'name': section})
results.append(factory(**keys))
return results
def add_datasource(self, datasource):
"""
Store a datasource to the system.
:param datasource: :class:gramola.datasources.base.DatSourceConfig.
:raises gramola.store.DuplicateEntry: If the datasource name already exists.
"""
config = ConfigObj(infile=self.datasources_filepath, create_empty=True)
if datasource.name in config:
raise DuplicateEntry()
config[datasource.name] = datasource.dict()
config.write()
def rm_datasource(self, name):
"""
Remove a datasource from the system.
:param name: string, name of the data source to remove.
:raises gramola.store.NotFound: If the datasource does not exists.
"""
config = ConfigObj(infile=self.datasources_filepath, create_empty=True)
if name not in config:
raise NotFound()
config.pop(name)
config.write()
def dashboards(self, name=None):
"""
Return all datshboards stored as a list of dictionaries, each dictionary
has the name of the dashbaord and the metrics queries related with its.
For
[{"name": "dashboard name", "queries": [{query1}, {query2} ..... ]]
Each query is a dictionary formed by the name of the datasource that the
query uses and the required keys and the optional keys of each kind of metric
query. For example:
{ "dastasource_name": "datasource name", ... metrics query fields ..}
:param name: string, filter by name.
:return: list
"""
raise NotImplemented()
| {"/tests/datasources/test_graphite.py": ["/gramola/utils.py", "/gramola/datasources/graphite.py"], "/tests/datasources/test_base.py": ["/gramola/datasources/base.py"], "/tests/datasources/test_cloudwatch.py": ["/gramola/datasources/cloudwatch.py", "/gramola/datasources/base.py"], "/gramola/datasources/__init__.py": ["/gramola/datasources/graphite.py", "/gramola/datasources/cloudwatch.py"], "/tests/test_commands.py": ["/gramola/commands.py", "/gramola/datasources/base.py", "/tests/fixtures.py"], "/tests/test_utils.py": ["/gramola/utils.py"], "/gramola/store.py": ["/gramola/datasources/base.py"], "/tests/test_plot.py": ["/gramola/plot.py"], "/tests/test_store.py": ["/gramola/store.py", "/tests/fixtures.py"], "/tests/fixtures.py": ["/gramola/store.py", "/gramola/datasources/base.py"]} |
45,718 | pfreixes/gramola | refs/heads/master | /gramola/datasources/cloudwatch.py | # -*- coding: utf-8 -*-
"""
Implements the CloudWatch [1] data source.
[1] https://aws.amazon.com/cloudwatch/
:moduleauthor: Pau Freixes, pfreixes@gmail.com
"""
import time
import boto3
import botocore
from functools import wraps
from itertools import count
from itertools import dropwhile
from gramola import log
from gramola.datasources.base import (
OptionalKey,
DataSource,
MetricQuery,
DataSourceConfig,
InvalidMetricQuery
)
class Boto3ClientError(Exception):
# Global class used to trigger all Exceptions related
# with the Boto3 client.
pass
def _cw_safe_call(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except botocore.exceptions.ClientError, e:
raise Boto3ClientError("Client eroor {}".format(str(e)))
except botocore.exceptions.PartialCredentialsError, e:
raise Boto3ClientError("Partial Credentials error {}".format(str(e)))
except botocore.exceptions.NoRegionError:
raise Boto3ClientError("No default region, give one using the --region option")
return wrapper
class CWDataSourceConfig(DataSourceConfig):
REQUIRED_KEYS = ()
OPTIONAL_KEYS = (
OptionalKey('region', 'Use this region as the default one insted of the' +
' region defined by the profile'),
OptionalKey('profile', 'Use an alternative profile than the default one')
)
class CWMetricQuery(MetricQuery):
REQUIRED_KEYS = ('namespace', 'metricname', 'dimension_name', 'dimension_value')
OPTIONAL_KEYS = (
OptionalKey('region', 'Use this region overriding the region configured by the ' +
' datasource or profile'),
OptionalKey('statistics', 'Override the default Average by Sum, SampleCount, '
'Maximum or Minimum')
)
class CWDataSource(DataSource):
DATA_SOURCE_CONFIGURATION_CLS = CWDataSourceConfig
METRIC_QUERY_CLS = CWMetricQuery
TYPE = 'cw'
@_cw_safe_call
def _cw_client(self, region=None):
return boto3.session.Session(
region_name=region or self.configuration.region,
profile_name=self.configuration.profile).client('cloudwatch')
@_cw_safe_call
def _cw_call(self, client, f, *args, **kwargs):
return getattr(client, f)(*args, **kwargs)
def datapoints(self, query, maxdatapoints=None):
if query.statistics and (query.statistics not in ['Average', 'Sum', 'SampleCount',
'Maximum', 'Minimum']):
raise InvalidMetricQuery("Query statistic invalid value `{}`".format(query.statistics))
elif query.statistics:
statistics = query.statistics
else:
statistics = "Average"
if maxdatapoints:
# Calculate the Period where the number of datapoints
# returned are less than maxdatapoints.
# Get the first granularity that suits for return the maxdatapoints
seconds = (query.get_until() - query.get_since()).total_seconds()
period = next(dropwhile(lambda g: seconds / g > maxdatapoints, count(60, 60)))
else:
period = 60
# get a client using the region given by the query, or if it
# is None using the one given by the datasource or the profile
client = self._cw_client(region=query.region)
kwargs = {
'Namespace': query.namespace,
'MetricName': query.metricname,
'StartTime': query.get_since(),
'EndTime': query.get_until(),
'Period': period,
'Dimensions': [{
'Name': query.dimension_name,
'Value': query.dimension_value,
}],
'Statistics': [statistics]
}
datapoints = self._cw_call(client, "get_metric_statistics", **kwargs)
return [(point[statistics], time.mktime(point['Timestamp'].timetuple()))
for point in datapoints['Datapoints']]
def test(self):
# Just test creating the boto client and trying to get the list of
# available metrics.
try:
self._cw_call(self._cw_client(), "list_metrics")
except Boto3ClientError, e:
log.error("Boto3 client got an exception: {}".format(e.message))
return False
return True
| {"/tests/datasources/test_graphite.py": ["/gramola/utils.py", "/gramola/datasources/graphite.py"], "/tests/datasources/test_base.py": ["/gramola/datasources/base.py"], "/tests/datasources/test_cloudwatch.py": ["/gramola/datasources/cloudwatch.py", "/gramola/datasources/base.py"], "/gramola/datasources/__init__.py": ["/gramola/datasources/graphite.py", "/gramola/datasources/cloudwatch.py"], "/tests/test_commands.py": ["/gramola/commands.py", "/gramola/datasources/base.py", "/tests/fixtures.py"], "/tests/test_utils.py": ["/gramola/utils.py"], "/gramola/store.py": ["/gramola/datasources/base.py"], "/tests/test_plot.py": ["/gramola/plot.py"], "/tests/test_store.py": ["/gramola/store.py", "/tests/fixtures.py"], "/tests/fixtures.py": ["/gramola/store.py", "/gramola/datasources/base.py"]} |
45,719 | pfreixes/gramola | refs/heads/master | /gramola/commands.py | # -*- coding: utf-8 -*-
"""
Commands are called from the entry point generated by the setuptools. The set
of commands supported are:
* gramola types : List of the datasource types supported.
* gramola datasource : Show a specific datasource.
* gramola datasource-test : Test a specific datasource.
* gramola datasource-list : List all datasources.
* gramola datasource-rm : Remove one datasource.
* gramola datasource-add-<type> : Add a new datasource.
* gramola datasource-echo-<type> : Echo a datasource.
* gramola query-<type> : Run a metrics query.
* gramola dashboard : Show a specific dashboard.
* gramola dashboard-list : List all dashboards.
* gramola dashboard-rm : Remove a dashboard.
* gramola dashboard-rm-query : Remove a specific query from one dashboard.
* gramola dashboard-query : Run all dashboard metrics.
:moduleauthor: Pau Freixes, pfreixes@gmail.com
"""
from __future__ import print_function
import sys
import optparse
import sparkline
from time import sleep
from json import loads
from gramola import log
from gramola.plot import Plot, DEFAULT_ROWS
from gramola.store import (
Store,
NotFound,
DuplicateEntry
)
from gramola.contrib.subcommand import (
Subcommand,
SubcommandsOptionParser
)
from gramola.datasources.base import (
DataSource,
InvalidMetricQuery,
InvalidDataSourceConfig
)
class InvalidParams(Exception):
def __init__(self, error_params):
self.error_params = error_params
Exception.__init__(self)
class GramolaCommand(object):
# to be overriden by commands implementations
NAME = None
DESCRIPTION = None
USAGE = None
@staticmethod
def execute(options, suboptions, *subargs):
"""This method is called by gramola entry point to perform
the operations regarding a subcommand, overide this method
with all of this stuff related with the subcomand
:param options: Options given by the user as gramola options
:param suboptions: Options given by the user as subcommand options
:param subargs: Tuple args, each subcommand gives mean to each arg
belonging to this list.
"""
raise NotImplemented()
@staticmethod
def options():
"""List of option args and kwargs to be used as a params for the
parser.add_option function."""
return []
@classmethod
def find(cls, command_name):
"""Returns the Command implementation for a specific command name."""
try:
return next(c for c in cls.__subclasses__() if c.NAME == command_name)
except StopIteration:
raise KeyError(command_name)
@classmethod
def commands(cls):
"""Returns the commands implementations"""
return cls.__subclasses__()
class DataSourceCommand(GramolaCommand):
NAME = 'datasource'
DESCRIPTION = 'View the details of a saved datasource'
USAGE = '%prog NAME'
@staticmethod
def execute(options, suboptions, *subargs):
""" Returns info about one specific datasource as a dictionary with all
key values saved.
"""
try:
name = subargs[0]
except IndexError:
raise InvalidParams("NAME")
store = options.store and Store(path=options.store) or Store()
try:
print(store.datasources(name=name)[0].dumps())
except IndexError:
print("Datasource `{}` NOT FOUND".format(name))
class DataSourceTestCommand(GramolaCommand):
NAME = 'datasource-test'
DESCRIPTION = 'Test if the service behind a data source is available'
USAGE = '%prog NAME'
@staticmethod
def execute(options, suboptions, *subargs):
""" Test an already saved datasource."""
try:
name = subargs[0]
except IndexError:
raise InvalidParams("NAME")
store = options.store and Store(path=options.store) or Store()
try:
config = store.datasources(name=name)[0]
except IndexError:
print("Datasource `{}` not found, NOT TESTED".format(name))
return
if DataSource.find(config.type)(config).test():
print("Datasource `{}` seems ok".format(name))
else:
print("Datasource `{}` FAILED!".format(name))
class DataSourceRmCommand(GramolaCommand):
NAME = 'datasource-rm'
DESCRIPTION = 'Remove an already saved datasource'
USAGE = '%prog NAME'
@staticmethod
def execute(options, suboptions, *subargs):
"""Remove an already saved datasource """
try:
name = subargs[0]
except IndexError:
raise InvalidParams("NAME")
store = options.store and Store(path=options.store) or Store()
try:
store.rm_datasource(name)
print("Datasource `{}` removed".format(name))
except NotFound:
print("Datasource `{}` not found, NOT REMOVED".format(name))
def build_datasource_echo_type(datasource):
"""
Build the datasource-echo command for one type of datasource, it turns out
in a new command named as datasource-echo-<type>.
"""
class DataSourceEchoCommand(GramolaCommand):
NAME = 'datasource-echo-{}'.format(datasource.TYPE)
DESCRIPTION = 'Echo a datasource {} configuration'.format(datasource.TYPE)
# All datasources inherited the `type` and the `name` fields as a
# required params, typed commands have already the type and they are
# removed as command args. And in the Echo case also the name.
USAGE = '%prog {}'.format(" ".join(
[s.upper() for s in datasource.DATA_SOURCE_CONFIGURATION_CLS.required_keys() if
s not in ['name', 'type']]))
@staticmethod
def execute(options, suboptions, *subargs):
""" Echo a datasource configuration to be used by query command, the arguments
depend on the kind of datasource.
"""
datasource_params = {
# Type is a required param that is coupled with
# with the command.
'type': datasource.TYPE,
# A echo command doesnt need the name keyword even it is required,
# we share the same interface than the command to add datasources then
# we have to give a automatic name to avoid raise an
# InvalidDataSourceConfig error
'name': 'stdout'
}
datasource_params.update(
{k: v for v, k in
zip(subargs,
filter(lambda k: k not in ['name', 'type'],
datasource.DATA_SOURCE_CONFIGURATION_CLS.required_keys()))}
)
try:
config = datasource.DATA_SOURCE_CONFIGURATION_CLS(**datasource_params)
except InvalidDataSourceConfig, e:
raise InvalidParams(e.errors)
print(config.dumps())
@staticmethod
def options():
# Command Options
command_options = []
# Datasource Options
datasource_options = [
((option.hyphen_name,), {"dest": option.name, "help": option.description})
for option in datasource.DATA_SOURCE_CONFIGURATION_CLS.optional_keys()]
return command_options + datasource_options
return DataSourceEchoCommand
def build_datasource_add_type(datasource_cls):
"""
Build the datasource-add command for one type of datasource, it turns out
in a new command named as datasource-add-<type>.
"""
class DataSourceAddCommand(GramolaCommand):
NAME = 'datasource-add-{}'.format(datasource_cls.TYPE)
DESCRIPTION = 'Add a datasource {} configuration'.format(datasource_cls.TYPE)
# All datasources inherited the `type` and the `name` fields as a
# required params, typed commands have already the type and they are
# removed as command args. And in the Echo case also the name.
USAGE = '%prog {}'.format(" ".join(
[s.upper() for s in datasource_cls.DATA_SOURCE_CONFIGURATION_CLS.required_keys() if
s not in ['type']]))
@staticmethod
def execute(options, suboptions, *subargs):
""" Add a datasource configuration to user store """
datasource_params = {
# Type is a required param that is coupled with
# with the command.
'type': datasource_cls.TYPE,
}
datasource_params.update(
{k: v for v, k in
zip(subargs,
filter(lambda k: k not in ['type'],
datasource_cls.DATA_SOURCE_CONFIGURATION_CLS.required_keys()))}
)
# set also the optional keys given as suboptional params
datasource_params.update(**{str(k): getattr(suboptions, str(k))
for k in filter(lambda k: getattr(suboptions, str(k)),
datasource_cls.DATA_SOURCE_CONFIGURATION_CLS.optional_keys())})
try:
config = datasource_cls.DATA_SOURCE_CONFIGURATION_CLS(**datasource_params)
except InvalidDataSourceConfig, e:
raise InvalidParams(e.errors)
ds = datasource_cls(config)
if not suboptions.no_test and not ds.test():
# only save if the test passes
print("Data source test failed, might the service not being unavailable ?")
print("THIS DATA SOURCE NOT BE ADDED, use --no-test flag to add it even")
return
store = options.store and Store(path=options.store) or Store()
try:
store.add_datasource(config)
print("Datasource `{}` added".format(subargs[0]))
except DuplicateEntry:
print("Datasource `{}` already exists, NOT SAVED".format(subargs[0]))
@staticmethod
def options():
# Command Options
command_options = [
(("--no-test",), {"action": "store_true", "help": "Dont run the data source test"})]
# Datasource Options
datasource_options = [
((option.hyphen_name,), {"dest": option.name, "help": option.description})
for option in datasource_cls.DATA_SOURCE_CONFIGURATION_CLS.optional_keys()]
return command_options + datasource_options
return DataSourceAddCommand
class DataSourceListCommand(GramolaCommand):
NAME = 'datasource-list'
DESCRIPTION = 'List all saved datasources'
USAGE = '%prog'
@staticmethod
def execute(options, suboptions, *subargs):
""" List all datasouces."""
store = options.store and Store(path=options.store) or Store()
for datasource in store.datasources():
print("Datasource `{}` ({})".format(datasource.name, datasource.type))
def build_datasource_query_type(datasource_cls):
"""
Build the query command for one type of datasource_cls, it turns out
in a new command named as query-<type>.
"""
class QueryCommand(GramolaCommand):
NAME = 'query-{}'.format(datasource_cls.TYPE)
DESCRIPTION = 'Query for a specific metric.'
USAGE = '%prog {}'.format(" ".join(
['DATASOURCE_NAME'] +
[s.upper() for s in datasource_cls.METRIC_QUERY_CLS.required_keys()]))
@staticmethod
def execute(options, suboptions, *subargs):
""" Runs a query using a datasource_cls and prints it as a char graphic."""
try:
name = subargs[0]
except IndexError:
raise InvalidParams("NAME")
if name == '-':
buffer_ = sys.stdin.read()
config = loads(buffer_)
else:
store = options.store and Store(path=options.store) or Store()
try:
config = store.datasources(name=name)[0]
except IndexError:
print("Datasource {} not found".format(name), file=sys.stderr)
return
query_params = {
k: v for v, k in zip(subargs[1:],
datasource_cls.METRIC_QUERY_CLS.required_keys())
}
# set also the optional keys given as suboptional params
query_params.update(**{str(k): getattr(suboptions, str(k))
for k in filter(lambda k: getattr(suboptions, str(k)),
datasource_cls.METRIC_QUERY_CLS.optional_keys())})
try:
query = datasource_cls.METRIC_QUERY_CLS(**query_params)
except InvalidMetricQuery, e:
raise InvalidParams(e.errors)
try:
datasource = datasource_cls(config)
except InvalidDataSourceConfig, e:
print("Datasource config invalid {}".format(e.errors), file=sys.stderr)
else:
plot = Plot(max_x=suboptions.plot_maxx)
while True:
plot.draw(datasource.datapoints(query, maxdatapoints=plot.width()))
if not suboptions.refresh:
break
try:
sleep(int(suboptions.refresh_freq))
except KeyboardInterrupt:
break
@staticmethod
def options():
# Command Options
command_options = [
(("--refresh",), {"action": "store_true", "default": False,
"help": "Keep graphing forever, default False "}),
(("--refresh-freq",), {"action": "store", "type": "int", "default": 10,
"help": "Refresh frequency in seconds, default 10s"}),
(("--plot-maxx",), {"action": "store", "type": "int", "default": None,
"help": "Configure the maxium value X expected, otherwise the plot"+
" will use the maxium value got by the time window" }),
(("--plot-rows",), {"action": "store", "type": "int", "default": DEFAULT_ROWS,
"help": "Configure the maxium value X expected, otherwise the plot"+
" will use the maxium value got by the time window" }),
]
# Datasource Options
datasource_options = [
((option.hyphen_name,), {"dest": option.name, "help": option.description})
for option in datasource_cls.METRIC_QUERY_CLS.optional_keys()]
return command_options + datasource_options
return QueryCommand
def gramola():
""" Entry point called from binary generated by setuptools. Beyond
the main command Gramola immplements a sub set of commands that each one
implements an specific opeartion. The anatomy of a Gramola command looks like:
$ gramola <global options> <subcommand> <subcommand options> <args ..>
"""
# Build as many datasource-echo commands as many types of datasources there are.
echo_commands = [build_datasource_echo_type(datasource)
for datasource in DataSource.implementations()]
# Build as many datasource-add commands as many types of datasources there are.
add_commands = [build_datasource_add_type(datasource)
for datasource in DataSource.implementations()]
# Build as many query commands as many types of datasources there are.
query_commands = [build_datasource_query_type(datasource)
for datasource in DataSource.implementations()]
# Use the gramola.contrib.subcommands implementation to wraper the
# GramolaCommands as a subcommands availables from the main command.
subcommands = []
for gramola_subcommand in GramolaCommand.commands():
cmd = Subcommand(gramola_subcommand.NAME,
optparse.OptionParser(usage=gramola_subcommand.USAGE),
gramola_subcommand.DESCRIPTION)
for option_args, option_kwargs in gramola_subcommand.options():
cmd.parser.add_option(*option_args, **option_kwargs)
subcommands.append(cmd)
parser = SubcommandsOptionParser(subcommands=subcommands)
parser.add_option('-s', '--store', dest='store',
help='alternative store directory, default ~/.gramola')
parser.add_option('-q', dest='quite', help='Be quite', action='store_true')
parser.add_option('-v', dest='verbose', help='Be verbose', action='store_true')
options, subcommand, suboptions, subargs = parser.parse_args()
log.setup(verbose=options.verbose, quite=options.quite)
try:
cmd = GramolaCommand.find(subcommand.name)
except KeyError:
print("Command not found {}".format(subcommand.name))
print("")
parser.print_help()
sys.exit(1)
try:
cmd.execute(options, suboptions, *subargs)
except InvalidParams, e:
print("Invalid params for {} command, error: {}".format(subcommand.name, e.error_params))
print("Get help with gramola {} --help".format(subcommand.name))
sys.exit(1)
| {"/tests/datasources/test_graphite.py": ["/gramola/utils.py", "/gramola/datasources/graphite.py"], "/tests/datasources/test_base.py": ["/gramola/datasources/base.py"], "/tests/datasources/test_cloudwatch.py": ["/gramola/datasources/cloudwatch.py", "/gramola/datasources/base.py"], "/gramola/datasources/__init__.py": ["/gramola/datasources/graphite.py", "/gramola/datasources/cloudwatch.py"], "/tests/test_commands.py": ["/gramola/commands.py", "/gramola/datasources/base.py", "/tests/fixtures.py"], "/tests/test_utils.py": ["/gramola/utils.py"], "/gramola/store.py": ["/gramola/datasources/base.py"], "/tests/test_plot.py": ["/gramola/plot.py"], "/tests/test_store.py": ["/gramola/store.py", "/tests/fixtures.py"], "/tests/fixtures.py": ["/gramola/store.py", "/gramola/datasources/base.py"]} |
45,720 | pfreixes/gramola | refs/heads/master | /tests/test_plot.py | import pytest
import time
from StringIO import StringIO
from mock import patch, Mock
from gramola.plot import Plot
DEFAULT_ROWS_FIXTURE = [
# values given to the Plot to get the graph below
[(10, 1), (20, 1), (30, 1), (40, 1), (50, 1),
(60, 1), (70, 1), (80, 1), (90, 1), (100, 1)],
# grap expected
"""| *
| **
| ***
| *****
| ******
| *******
| ********
| *********
+---+---+--
min=10, max=100, last=100
"""
]
FIVE_ROWS_FIXTURE = [
# values given to the Plot to get the graph below
[(10, 1), (20, 1), (30, 1), (40, 1), (50, 1),
(60, 1), (70, 1), (80, 1), (90, 1), (100, 1)],
# grap expected
"""| **
| ****
| *****
| *******
| *********
+---+---+--
min=10, max=100, last=100
"""
]
MAXX_ROWS_FIXTURE = [
# values given to the Plot to get the graph below
[(10, 1), (50, 1), (10, 1), (10, 1)],
# grap expected
"""|
|
|
|
| *
| *
| *
| *
+---+---+--
min=0, max=50, last=10
"""
]
@patch("gramola.plot.sys")
@patch.object(Plot, "width", return_value=10)
class TestPlotDrawing(object):
def test_draw_default_rows(self, width_patched, sys_patched):
sys_patched.stdout = StringIO()
plot = Plot()
plot.draw(DEFAULT_ROWS_FIXTURE[0])
sys_patched.stdout.seek(0)
output = sys_patched.stdout.read()
assert output == DEFAULT_ROWS_FIXTURE[1]
def test_draw_five_rows(self, width_patched, sys_patched):
width_patched.return_value = 10
sys_patched.stdout = StringIO()
plot = Plot(rows=5)
plot.draw(FIVE_ROWS_FIXTURE[0])
sys_patched.stdout.seek(0)
output = sys_patched.stdout.read()
assert output == FIVE_ROWS_FIXTURE[1]
def test_draw_maxx(self, width_patched, sys_patched):
width_patched.return_value = 10
sys_patched.stdout = StringIO()
plot = Plot(max_x=100)
plot.draw(MAXX_ROWS_FIXTURE[0])
sys_patched.stdout.seek(0)
output = sys_patched.stdout.read()
assert sys_patched.stdout.read() == MAXX_ROWS_FIXTURE[1]
| {"/tests/datasources/test_graphite.py": ["/gramola/utils.py", "/gramola/datasources/graphite.py"], "/tests/datasources/test_base.py": ["/gramola/datasources/base.py"], "/tests/datasources/test_cloudwatch.py": ["/gramola/datasources/cloudwatch.py", "/gramola/datasources/base.py"], "/gramola/datasources/__init__.py": ["/gramola/datasources/graphite.py", "/gramola/datasources/cloudwatch.py"], "/tests/test_commands.py": ["/gramola/commands.py", "/gramola/datasources/base.py", "/tests/fixtures.py"], "/tests/test_utils.py": ["/gramola/utils.py"], "/gramola/store.py": ["/gramola/datasources/base.py"], "/tests/test_plot.py": ["/gramola/plot.py"], "/tests/test_store.py": ["/gramola/store.py", "/tests/fixtures.py"], "/tests/fixtures.py": ["/gramola/store.py", "/gramola/datasources/base.py"]} |
45,721 | pfreixes/gramola | refs/heads/master | /gramola/datasources/graphite.py | # -*- coding: utf-8 -*-
"""
Implements the Grahpite [1] data source.
[1] https://graphite.readthedocs.org/en/latest/
:moduleauthor: Pau Freixes, pfreixes@gmail.com
"""
import requests
from gramola import log
from requests.exceptions import RequestException
from gramola.datasources.base import (
OptionalKey,
DataSource,
MetricQuery,
DataSourceConfig
)
DATE_FORMAT = "%H:%M_%y%m%d"
class GraphiteDataSourceConfig(DataSourceConfig):
REQUIRED_KEYS = ('url',)
OPTIONAL_KEYS = ()
class GraphiteMetricQuery(MetricQuery):
REQUIRED_KEYS = ('target',)
OPTIONAL_KEYS = ()
class GraphiteDataSource(DataSource):
DATA_SOURCE_CONFIGURATION_CLS = GraphiteDataSourceConfig
METRIC_QUERY_CLS = GraphiteMetricQuery
TYPE = 'graphite'
def _safe_request(self, url, params):
try:
response = requests.get(url, params=params)
except RequestException, e:
log.warning("Something was wrong with Graphite service")
log.debug(e)
return None
if response.status_code != 200:
log.warning("Get an invalid {} HTTP code from Grahpite".format(response.status_code))
return None
return response.json()
def datapoints(self, query, maxdatapoints=None):
# Graphite publishes the endpoint `/render` to retrieve
# datapoins from one or mulitple targets, we make sure that
# only retrieve one target at once, Gramola supports only
# rendering of one target.
params = {
'target': query.target,
'from': query.get_since().strftime(DATE_FORMAT),
'to': query.get_until().strftime(DATE_FORMAT),
# graphite supports mulitple output format, we have
# to configure the json output
'format': 'json'
}
if maxdatapoints:
params['maxDataPoints'] = maxdatapoints
if self.configuration.url[-1] != '/':
url = self.configuration.url + '/render'
else:
url = self.configuration.url + 'render'
response = self._safe_request(url, params)
if response is None:
return []
elif len(response) == 0:
log.warning('Metric `{}` not found'.format(query.target))
return []
elif len(response) > 1:
log.warning('Multiple targets found, geting only the first one')
# Grahpite allocate values automatically to a each bucket of time, storage schema, the
# Last value can become Null until a new value arrive for the last bucket, we prefer
# drop this last value if it is Null, waiting for when the real value is available or
# the Null is confirmed because it keeps there.
if not response[0]["datapoints"][-1][0]:
response[0]["datapoints"].pop(len(response[0]["datapoints"])-1)
# Graphite returns a list of lists, we turn it into a list of tuples to
# make it compatible with the datapoints return type.
# FIXME: Gramola not supports None values because we change None values from None
# to 0.0
values = [(col[0] or 0, col[1]) for col in response[0]["datapoints"]]
return values
def test(self):
# test using the metrics find endpoint
url = self.configuration.url + '/metrics/find'
try:
response = requests.get(url, params={'query': '*'})
except RequestException, e:
log.debug('Test failed request error {}'.format(e))
return False
return True
| {"/tests/datasources/test_graphite.py": ["/gramola/utils.py", "/gramola/datasources/graphite.py"], "/tests/datasources/test_base.py": ["/gramola/datasources/base.py"], "/tests/datasources/test_cloudwatch.py": ["/gramola/datasources/cloudwatch.py", "/gramola/datasources/base.py"], "/gramola/datasources/__init__.py": ["/gramola/datasources/graphite.py", "/gramola/datasources/cloudwatch.py"], "/tests/test_commands.py": ["/gramola/commands.py", "/gramola/datasources/base.py", "/tests/fixtures.py"], "/tests/test_utils.py": ["/gramola/utils.py"], "/gramola/store.py": ["/gramola/datasources/base.py"], "/tests/test_plot.py": ["/gramola/plot.py"], "/tests/test_store.py": ["/gramola/store.py", "/tests/fixtures.py"], "/tests/fixtures.py": ["/gramola/store.py", "/gramola/datasources/base.py"]} |
45,722 | pfreixes/gramola | refs/heads/master | /tests/test_store.py | import pytest
import time
from mock import patch, Mock
from gramola.store import (
NotFound,
DuplicateEntry,
Store
)
from .fixtures import test_data_source
from .fixtures import nonedefault_store
class TestStore(object):
def test_datasources(self, nonedefault_store, test_data_source):
datasources = nonedefault_store.datasources()
assert len(datasources) == 2
# just check the second result
assert type(datasources[1]) == test_data_source.DATA_SOURCE_CONFIGURATION_CLS
assert datasources[1].type == "test"
assert datasources[1].name == "datasource two"
assert datasources[1].foo == "c"
assert datasources[1].bar == "d"
assert datasources[1].gramola == "e"
def test_datasources_filter_name(self, nonedefault_store, test_data_source):
datasources = nonedefault_store.datasources(name="datasource two")
assert len(datasources) == 1
def test_datasources_filter_type(self, nonedefault_store, test_data_source):
datasources = nonedefault_store.datasources(type_="notimplemented")
assert len(datasources) == 0
def test_add_datasource(self, nonedefault_store, test_data_source):
params = {"type": "test", "name": "test name", "foo": "a", "bar": "b"}
config = test_data_source.DATA_SOURCE_CONFIGURATION_CLS(**params)
nonedefault_store.add_datasource(config)
assert len(nonedefault_store.datasources(name="test name")) == 1
def test_add_duplicate_datasource(self, nonedefault_store, test_data_source):
# datasource one already exist as a part of the fixture
params = {"type": "test", "name": "datasource one", "foo": "a", "bar": "b"}
config = test_data_source.DATA_SOURCE_CONFIGURATION_CLS(**params)
with pytest.raises(DuplicateEntry):
nonedefault_store.add_datasource(config)
def test_rm_datasource(self, nonedefault_store, test_data_source):
assert len(nonedefault_store.datasources()) == 2
# remove one of the datasources of the fixture
nonedefault_store.rm_datasource("datasource one")
assert len(nonedefault_store.datasources()) == 1
def test_rm_notfound_datasource(self, nonedefault_store, test_data_source):
with pytest.raises(NotFound):
nonedefault_store.rm_datasource("xxxx")
| {"/tests/datasources/test_graphite.py": ["/gramola/utils.py", "/gramola/datasources/graphite.py"], "/tests/datasources/test_base.py": ["/gramola/datasources/base.py"], "/tests/datasources/test_cloudwatch.py": ["/gramola/datasources/cloudwatch.py", "/gramola/datasources/base.py"], "/gramola/datasources/__init__.py": ["/gramola/datasources/graphite.py", "/gramola/datasources/cloudwatch.py"], "/tests/test_commands.py": ["/gramola/commands.py", "/gramola/datasources/base.py", "/tests/fixtures.py"], "/tests/test_utils.py": ["/gramola/utils.py"], "/gramola/store.py": ["/gramola/datasources/base.py"], "/tests/test_plot.py": ["/gramola/plot.py"], "/tests/test_store.py": ["/gramola/store.py", "/tests/fixtures.py"], "/tests/fixtures.py": ["/gramola/store.py", "/gramola/datasources/base.py"]} |
45,723 | pfreixes/gramola | refs/heads/master | /tests/fixtures.py | import pytest
from mock import Mock
from gramola.store import Store
from gramola.datasources.base import (
MetricQuery,
DataSource,
DataSourceConfig
)
@pytest.fixture
def test_data_source():
class TestDataSourceConfig(DataSourceConfig):
REQUIRED_KEYS = ('foo', 'bar')
OPTIONAL_KEYS = ('gramola',)
class TestMetricQuery(MetricQuery):
REQUIRED_KEYS = ('metric',)
OPTIONAL_KEYS = ('since', 'until')
class TestDataSource(DataSource):
TYPE = 'test'
DATA_SOURCE_CONFIGURATION_CLS = TestDataSourceConfig
METRIC_QUERY_CLS = TestMetricQuery
datapoints = Mock()
test = Mock()
return TestDataSource
CONFIG = """
[datasource one]
type=test
foo=a
bar=b
[datasource two]
type=test
foo=c
bar=d
gramola=e
"""
@pytest.fixture
def nonedefault_store(tmpdir):
fd = tmpdir.join(Store.DEFAULT_DATASOURCES_FILENAME)
fd.write(CONFIG)
return Store(path=str(tmpdir))
| {"/tests/datasources/test_graphite.py": ["/gramola/utils.py", "/gramola/datasources/graphite.py"], "/tests/datasources/test_base.py": ["/gramola/datasources/base.py"], "/tests/datasources/test_cloudwatch.py": ["/gramola/datasources/cloudwatch.py", "/gramola/datasources/base.py"], "/gramola/datasources/__init__.py": ["/gramola/datasources/graphite.py", "/gramola/datasources/cloudwatch.py"], "/tests/test_commands.py": ["/gramola/commands.py", "/gramola/datasources/base.py", "/tests/fixtures.py"], "/tests/test_utils.py": ["/gramola/utils.py"], "/gramola/store.py": ["/gramola/datasources/base.py"], "/tests/test_plot.py": ["/gramola/plot.py"], "/tests/test_store.py": ["/gramola/store.py", "/tests/fixtures.py"], "/tests/fixtures.py": ["/gramola/store.py", "/gramola/datasources/base.py"]} |
45,724 | pfreixes/gramola | refs/heads/master | /gramola/datasources/base.py | # -*- coding: utf-8 -*-
"""
This module implements the helper classes to implement new type of data sources
such as Graphite, CloudWatch, OpenTSDB, etc. The `DataSource` is used as a base
class to implement data sources and the `DataSourceConfig` is used to implement
specific configurations.
:moduleauthor: Pau Freixes, pfreixes@gmail.com
"""
from gramola.utils import (
InvalidGramolaDictionary,
GramolaDictionary,
parse_date
)
class InvalidDataSourceConfig(InvalidGramolaDictionary):
""" Raised when a DataSourceConfig doesn't get the right
keys. An empty derivated class from InvalidGramolaDictionary just to
make it readable.
"""
pass
class OptionalKey(object):
""" OptionalKey type is used by :class:DataSourceConfig and :class:MetricsQuery
to store the OPTIONAL_KEYS and helps the gramola commands to build the properly
arguments.
"""
def __init__(self, name, description):
self.name = name
self.description = description
def __hash__(self):
""" OptionalKey instance is hashed over the GramolaDictionary using the name
of the option"""
return hash(self.name)
def __str__(self):
""" OptionalKeys is get from a dictionary using the name of the option"""
return self.name
def __cmp__(self, b):
""" Compare a OptionKey is just compare the name of the option """
if type(b) == str:
return cmp(self.name, b)
else:
return super(OptionalKey, self).__cmp__(b)
@property
def hyphen_name(self):
return "--{}".format(self.name)
class DataSourceConfig(GramolaDictionary):
""" Each data datasource instance is created along with one DataSourceConfig
that will store the especific keys and values expected to configure one
data source.
Each data data source implementation have to implement a derivated of
DataSourceConfig class configuring the required keys using the
REQUIRED_KEYS attribute and the optional keys uing the PTIONAL_KEYS
attribute.
All DataSourceConfig implementation will have at least the following
keys: name.
"""
REQUIRED_KEYS = ('type', 'name')
OPTIONAL_KEYS = () # tuple of OptionalKey values.
def __init__(self, *args, **kwargs):
"""
:raises: InvalidDataSourceConfig
"""
try:
super(DataSourceConfig, self).__init__(*args, **kwargs)
except InvalidGramolaDictionary, e:
raise InvalidDataSourceConfig(e.errors)
class InvalidMetricQuery(InvalidGramolaDictionary):
""" Raised when a MetricQuery doesn't get the right
keys. An empty derivated class from InvalidGramolaDictionary just to
make it readable.
"""
pass
class MetricQuery(GramolaDictionary):
""" Each data datasource implementaiton uses an specific implementation of
MetricQuery class to get suport for these specific params to make queries
for an specific datasource.
Each data data source implementation have to implement a derivated of
MetricQuery class configuring the required keys using the
REQUIRED_KEYS and the optional keys uing OPTIONAL_KEYS.
MetricQuery implements the following keys : since, until.
"""
REQUIRED_KEYS = ()
# All Queries use the since, and until optional parameters.
OPTIONAL_KEYS = (
OptionalKey('since', 'Get values from, default -1h'),
OptionalKey('until', 'Get values until, default now')
)
def __init__(self, *args, **kwargs):
"""
:raises: InvalidMetricQuery
"""
try:
super(MetricQuery, self).__init__(*args, **kwargs)
except InvalidGramolaDictionary, e:
raise InvalidMetricQuery(e.errors)
def get_since(self):
""" Returns the date time used to collect data from
:return: datetime
"""
return parse_date(self.since or '-1h')
def get_until(self):
""" Returns the date time used to collect data until
:return: datetime
"""
return parse_date(self.until or 'now')
class DataSource(object):
""" Used as a base class for specialized data sources such as
Graphite, OpenTSDB, and others.
Those methods that raise a NotImplemented exception have to be
defined by the derivated class.
"""
# Override these class attributes with the specific
# implementation by the type of data source.
DATA_SOURCE_CONFIGURATION_CLS = DataSourceConfig
METRIC_QUERY_CLS = MetricQuery
# Override the TYPE attribute with the short name
# of the DataSource.
TYPE = None
@classmethod
def find(cls, type_):
"""Returns the DataSource implementation for a specific type_."""
try:
return next(c for c in cls.__subclasses__() if c.TYPE == type_)
except StopIteration:
raise KeyError(type_)
@classmethod
def implementations(cls):
"""Returns all implementations."""
return cls.__subclasses__()
def __init__(self, configuration):
"""
Initialize a data source using a configuration. Configuration is, if it
is not override, a instance of the
DataSource.DATA_SOURCE_CONFIGURATION_CLS configuration class.
:param configuration: Data Source configuration
:type configuration: `DataSourceConfig` or a derivated one
"""
self.configuration = configuration
@classmethod
def from_config(cls, **config_params):
""" Build a datasource using the config_params given """
return cls(cls.DATA_SOURCE_CONFIGURATION_CLS(**config_params))
def datapoints(self, query, maxdatapoints=None):
""" This function is used to pick up a set of datapoints
from the data source configured.
The `query` object holds the query params given by the user, is
a instance, if if is not override, of the `DataSource.METRIC_QUERY_CLS`
Example of the list of points returned by this method
[(val, ts), (val, ts) .....]
:param query: Query
:type query: `MetricQuery` or a derivated one
:param maxdatapoints: Restrict the result with a certain amount of datapoints, default All
:rtype: list, or None when where datapoints were not found.
"""
raise NotImplemented()
def test(self):
""" This function is used to test a data source configuration.
Derivated class has to implement this function if its wants to
give support for testing a datasource configuration used by
the commands `gramola datasource-add` and `gramola datasource-test`
:rtype: boolean.
"""
raise NotImplemented()
| {"/tests/datasources/test_graphite.py": ["/gramola/utils.py", "/gramola/datasources/graphite.py"], "/tests/datasources/test_base.py": ["/gramola/datasources/base.py"], "/tests/datasources/test_cloudwatch.py": ["/gramola/datasources/cloudwatch.py", "/gramola/datasources/base.py"], "/gramola/datasources/__init__.py": ["/gramola/datasources/graphite.py", "/gramola/datasources/cloudwatch.py"], "/tests/test_commands.py": ["/gramola/commands.py", "/gramola/datasources/base.py", "/tests/fixtures.py"], "/tests/test_utils.py": ["/gramola/utils.py"], "/gramola/store.py": ["/gramola/datasources/base.py"], "/tests/test_plot.py": ["/gramola/plot.py"], "/tests/test_store.py": ["/gramola/store.py", "/tests/fixtures.py"], "/tests/fixtures.py": ["/gramola/store.py", "/gramola/datasources/base.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.