index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
50,206 | stevenlandis/SBHACKS_V | refs/heads/master | /PC_Control/Keyboard_Control.py | import keyboard as key
def Sleep_Mode():
key.Keyboard.keyDown(key.Keyboard.VK_SLEEP)
key.Keyboard.keyUp(key.Keyboard.VK_SLEEP)
def Tab():
key.Keyboard.keyDown(key.Keyboard.VK_TAB)
key.Keyboard.keyUp(key.Keyboard.VK_TAB)
def Shift_Tab():
key.Keyboard.keyDown(key.Keyboard.VK_SHIFT)
key.Keyboard.keyDown(key.Keyboard.VK_TAB)
key.Keyboard.keyUp(key.Keyboard.VK_TAB)
key.Keyboard.keyUp(key.Keyboard.VK_SHIFT)
def Enter():
key.Keyboard.keyDown(key.Keyboard.VK_ENTER)
key.Keyboard.keyUp(key.Keyboard.VK_ENTER)
def Shift():
key.Keyboard.keyDown(key.Keyboard.VK_SHIFT)
key.Keyboard.keyUp(key.Keyboard.VK_SHIFT)
def Ctrl():
key.Keyboard.keyDown(key.Keyboard.VK_CTRL)
key.Keyboard.keyUp(key.Keyboard.VK_CTRL)
def BackSpace():
key.Keyboard.keyDown(key.Keyboard.VK_BACKSPACE)
key.Keyboard.keyUp(key.Keyboard.VK_BACKSPACE)
def B_Back():
key.Keyboard.keyDown(key.Keyboard.VK_BROWSER_BACK)
key.Keyboard.keyUp(key.Keyboard.VK_BROWSER_BACK)
def B_Favorite():
key.Keyboard.keyDown(key.Keyboard.VK_BROWSER_FAVORITES)
key.Keyboard.keyUp(key.Keyboard.VK_BROWSER_FAVORITES)
def B_Forward():
key.Keyboard.keyDown(key.Keyboard.VK_BROWSER_FORWARD)
key.Keyboard.keyUp(key.Keyboard.VK_BROWSER_FORWARD)
def Alt_Tab():
key.Keyboard.keyDown(key.Keyboard.VK_ALT)
key.Keyboard.keyDown(key.Keyboard.VK_TAB)
key.Keyboard.keyUp(key.Keyboard.VK_TAB)
key.Keyboard.keyUp(key.Keyboard.VK_ALT)
def Alt_Shift_Tab():
key.Keyboard.keyDown(key.Keyboard.VK_SHIFT)
key.Keyboard.keyDown(key.Keyboard.VK_ALT)
key.Keyboard.keyDown(key.Keyboard.VK_TAB)
key.Keyboard.keyUp(key.Keyboard.VK_TAB)
key.Keyboard.keyUp(key.Keyboard.VK_ALT)
key.Keyboard.keyUp(key.Keyboard.VK_SHIFT) | {"/dragonIO/testAll/GUI.py": ["/Youtube_Control.py"], "/Receive.py": ["/Youtube_Control.py"]} |
50,207 | stevenlandis/SBHACKS_V | refs/heads/master | /PC_Control/Volume_Control.py | from sound import Sound
def Increase_Volume(amount):
for i in range(amount):
Sound.volume_up()
def Decrease_Volume(amount):
for i in range(amount):
Sound.volume_down()
def Mute():
Sound.mute()
def Max_Volume():
Sound.volume_max()
def Min_Volume():
Sound.volume_min()
def Get_Volume():
return Sound.current_volume()
| {"/dragonIO/testAll/GUI.py": ["/Youtube_Control.py"], "/Receive.py": ["/Youtube_Control.py"]} |
50,208 | stevenlandis/SBHACKS_V | refs/heads/master | /dragonIO/testA0/main.py | import serial
ard = serial.Serial('/dev/tty96B0', 9600)
if __name__ == '__main__':
print('Starting')
while True:
ardOut = ard.readline()
print(ardOut)
| {"/dragonIO/testAll/GUI.py": ["/Youtube_Control.py"], "/Receive.py": ["/Youtube_Control.py"]} |
50,209 | stevenlandis/SBHACKS_V | refs/heads/master | /board.py | import serial
ard = serial.Serial('/dev/tty96B0', 115200)
if __name__ == '__main__':
print('Starting')
while True:
print(ard.readline()[:-1])
| {"/dragonIO/testAll/GUI.py": ["/Youtube_Control.py"], "/Receive.py": ["/Youtube_Control.py"]} |
50,210 | stevenlandis/SBHACKS_V | refs/heads/master | /Receive.py | from SimpleWebSocketServer import SimpleWebSocketServer, WebSocket
import Keyboard_Control as keyboard
import Volume_Control as volume
import Windows_Control as windows
import Youtube_Control as youtube
import GUI
import sys
import wmi
import _thread as thread
import pythoncom
import Speech_Recognition as SR
DataString = ''
port = 22
def Socket_Function():
pythoncom.CoInitialize()
server = SimpleWebSocketServer('', port, SimpleEcho)
server.serveforever()
class SimpleEcho(WebSocket):
def handleMessage(self):
# echo message back to client
DataString = self.data
GUI.Call_Function(DataString)
# print(DataString)
self.sendMessage(self.data)
def handleConnected(self):
print(self.address, 'Connected')
def handleClose(self):
print(self.address, 'Disconnected')
thread.start_new_thread(Socket_Function, ())
GUI.GUI()
| {"/dragonIO/testAll/GUI.py": ["/Youtube_Control.py"], "/Receive.py": ["/Youtube_Control.py"]} |
50,211 | stevenlandis/SBHACKS_V | refs/heads/master | /dragonIO/testAll/Speech_Recognition.py | import pip
import smtplib
import speech_recognition as sr
import ssl
Sender = 'gareyflee@gmail.com'
Recipient = 'gareyflee@gmail.com'
pw = "Dew61316131"
def Speech_To_Email():
# for index, name in enumerate(sr.Microphone.list_microphone_names()):
# print("Microphone with name \"{1}\" found for `Microphone(device_index={0})`".format(index, name))
mic = sr.Microphone()
r = sr.Recognizer()
with mic as source:
r.adjust_for_ambient_noise(source, duration=1)
print("Say something!")
audio = r.listen(source)
voiceHold = r.recognize_google(audio)
print(voiceHold)
server = smtplib.SMTP('smtp.gmail.com', 587) ## 25 465 587
server.starttls()
server.login(Sender, pw)
msg = voiceHold + "\n\nSent from Hackathon"
server.sendmail(Sender, Recipient, msg)
server.quit()
| {"/dragonIO/testAll/GUI.py": ["/Youtube_Control.py"], "/Receive.py": ["/Youtube_Control.py"]} |
50,212 | stevenlandis/SBHACKS_V | refs/heads/master | /dragonIO/testAll/Keyboard_Control.py | import keyboard as key
from time import sleep
def Scroll_Windows(arg):
if arg == 0:
Alt_Tab()
else:
Alt_Shift_Tab()
def Scroll_Tabs(arg):
if arg == 0:
Ctrl_Tab()
else:
Ctrl_Shift_Tab()
def Tab():
key.Keyboard.keyDown(key.Keyboard.VK_TAB)
key.Keyboard.keyUp(key.Keyboard.VK_TAB)
def Shift_Tab():
key.Keyboard.keyDown(key.Keyboard.VK_SHIFT)
key.Keyboard.keyDown(key.Keyboard.VK_TAB)
key.Keyboard.keyUp(key.Keyboard.VK_TAB)
key.Keyboard.keyUp(key.Keyboard.VK_SHIFT)
def Enter(arg):
if arg == 0:
return
key.Keyboard.keyDown(key.Keyboard.VK_ENTER)
key.Keyboard.keyUp(key.Keyboard.VK_ENTER)
def Shift():
key.Keyboard.keyDown(key.Keyboard.VK_SHIFT)
key.Keyboard.keyUp(key.Keyboard.VK_SHIFT)
def Ctrl():
key.Keyboard.keyDown(key.Keyboard.VK_CTRL)
key.Keyboard.keyUp(key.Keyboard.VK_CTRL)
def BackSpace():
key.Keyboard.keyDown(key.Keyboard.VK_BACKSPACE)
key.Keyboard.keyUp(key.Keyboard.VK_BACKSPACE)
def B_Back():
key.Keyboard.keyDown(key.Keyboard.VK_BROWSER_BACK)
key.Keyboard.keyUp(key.Keyboard.VK_BROWSER_BACK)
def B_Favorite():
key.Keyboard.keyDown(key.Keyboard.VK_BROWSER_FAVORITES)
key.Keyboard.keyUp(key.Keyboard.VK_BROWSER_FAVORITES)
def B_Forward():
key.Keyboard.keyDown(key.Keyboard.VK_BROWSER_FORWARD)
key.Keyboard.keyUp(key.Keyboard.VK_BROWSER_FORWARD)
def Alt_Tab():
key.Keyboard.keyDown(key.Keyboard.VK_ALT)
sleep(0.1)
key.Keyboard.keyDown(key.Keyboard.VK_TAB)
sleep(0.1)
key.Keyboard.keyUp(key.Keyboard.VK_TAB)
sleep(0.1)
key.Keyboard.keyUp(key.Keyboard.VK_ALT)
def Alt_Shift_Tab():
key.Keyboard.keyDown(key.Keyboard.VK_SHIFT)
sleep(0.1)
key.Keyboard.keyDown(key.Keyboard.VK_ALT)
sleep(0.1)
key.Keyboard.keyDown(key.Keyboard.VK_TAB)
sleep(0.1)
key.Keyboard.keyUp(key.Keyboard.VK_TAB)
sleep(0.1)
key.Keyboard.keyUp(key.Keyboard.VK_ALT)
sleep(0.1)
key.Keyboard.keyUp(key.Keyboard.VK_SHIFT)
def Ctrl_Tab():
key.Keyboard.keyDown(key.Keyboard.VK_CTRL)
# sleep(0.1)
key.Keyboard.keyDown(key.Keyboard.VK_TAB)
# sleep(0.1)
key.Keyboard.keyUp(key.Keyboard.VK_TAB)
# sleep(0.1)
key.Keyboard.keyUp(key.Keyboard.VK_CTRL)
def Ctrl_Shift_Tab():
key.Keyboard.keyDown(key.Keyboard.VK_SHIFT)
# sleep(0.1)
key.Keyboard.keyDown(key.Keyboard.VK_CTRL)
# sleep(0.1)
key.Keyboard.keyDown(key.Keyboard.VK_TAB)
# sleep(0.1)
key.Keyboard.keyUp(key.Keyboard.VK_TAB)
# sleep(0.1)
key.Keyboard.keyUp(key.Keyboard.VK_CTRL)
# sleep(0.1)
key.Keyboard.keyUp(key.Keyboard.VK_SHIFT)
def Ctrl_W(arg):
if arg == 0:
return
key.Keyboard.keyDown(key.Keyboard.VK_CTRL)
# sleep(0.1)
key.Keyboard.keyDown(key.Keyboard.VK_W)
# sleep(0.1)
key.Keyboard.keyUp(key.Keyboard.VK_W)
# sleep(0.1)
key.Keyboard.keyUp(key.Keyboard.VK_CTRL) | {"/dragonIO/testAll/GUI.py": ["/Youtube_Control.py"], "/Receive.py": ["/Youtube_Control.py"]} |
50,213 | stevenlandis/SBHACKS_V | refs/heads/master | /dragonIO/testLight/board.py | import serial
ard = serial.Serial('/dev/tty96B0', 9600)
if __name__ == '__main__':
print('Starting')
while True:
print(ard.readline())
| {"/dragonIO/testAll/GUI.py": ["/Youtube_Control.py"], "/Receive.py": ["/Youtube_Control.py"]} |
50,215 | vsegouin/pic-sorter | refs/heads/master | /files/FileSystemManager.py | # coding=utf-8
import os
import re
import time
import platform
from Parameters import Parameters
from Reporting import Reporting
months = ["Janvier", "Fevrier", "Mars", "Avril", "Mai", "Juin", "Juillet", "Aout", "Septembre", "Octobre", "Novembre",
"Decembre"]
unauthorizedExtension = ['.ico', '.gif']
class FileSystemManager:
root_path = ""
database_path = ""
duplicate_folder = ""
processed_folder = ""
def __init__(self, root_path):
self.duplicate_file_path = os.path.join(root_path, "duplicate.txt")
self.root_path = root_path
self.database_path = os.path.join(root_path, "database.txt")
self.duplicate_folder = os.path.join(root_path, "duplicate")
self.processed_folder = os.path.join(root_path, "processed")
self.init_folders()
def init_folders(self):
"""
Create the base folder (processed, duplicate folder)
"""
self.create_folder_if_not_exists(self.duplicate_folder)
self.create_folder_if_not_exists(self.processed_folder)
def extract_datetime(self, file_exif):
"""
try to find the dateTime inside the exif metadata
:param file_exif:
:return: a string composed of the date if found, an empty string if not
"""
new_filename = file_exif.get("EXIF DateTimeOriginal")
new_filename = str(new_filename).replace(" ", "_")
# If DataTimeOriginal doesn't contains data we try another exif meta data
if new_filename == " " or new_filename is None or new_filename == "None":
new_filename = file_exif.get("Image DateTime")
new_filename = str(new_filename).replace(" ", "_")
if new_filename == "" or new_filename is None or str(new_filename) == '':
return ""
else:
Reporting.date_by_exif += 1
return new_filename
def create_folder_if_not_exists(self, folder):
"""
Check if the requested folder exists and create it if not
:param folder:
:return: the folder path created
"""
if not os.path.exists(folder):
if not Parameters.dry_run:
os.makedirs(folder)
Reporting.path_created.append(folder)
return folder
def move_file(self, file_directory, filename, dest_directory, dest_filename):
"""
This method clean the path created, check if a file already exists at this path and rename the file if needed
It's possible that due to an error in the filename the programs can't move it
:param file_directory: directory of the file to move
:param filename: the name of the file to move
:param dest_directory: the new directory of the file
:param dest_filename: the new filename
"""
dest_filename = re.sub('[<>:\"/\|\?*]', '_', dest_filename)
basename, ext = os.path.splitext(dest_filename)
dst_file = os.path.join(dest_directory, dest_filename)
# rename if necessary
count = 0
while os.path.exists(dst_file):
count += 1
dst_file = os.path.join(dest_directory, '%s-%d%s' % (basename, count, ext))
# Reporting.log 'Renaming %s to %s' % (file, dst_file)
try:
if not Parameters.dry_run:
os.rename(os.path.join(file_directory, filename), dst_file)
Reporting.file_moved += 1
except WindowsError or FileNotFoundError:
Reporting.unmovable_file.append(os.path.join(file_directory, filename))
Reporting.log("CAN'T MOVE THIS FILE !!!!!")
def manage_image(self, directory, filename, file_exif):
"""
Methods which manage the image, prepare the new name, check if it's an authorized extension and then
move it
:param directory: directory of the file
:param filename: name of the file
:param file_exif: extracted exif data of the file
"""
basename, ext = os.path.splitext(filename)
try:
dest_name = self.extract_datetime(file_exif)
except AttributeError:
dest_name = ""
Reporting.image_found += 1
root_folder="regular"
if file_exif == {} or dest_name == "" or dest_name == 'None' or re.search(r'(\d{4}):(\d{2}):(\d{2})', dest_name) == 'None':
root_folder = "emptyExif"
if ext in unauthorizedExtension:
root_folder = "unauthorized"
Reporting.increment_unauthorized_extension(ext)
else:
Reporting.image_without_exif += 1
dest_name, dest_directory = self.detect_file_date(directory, filename, root_folder)
else:
try:
match = re.search(r'(\d{4}):(\d{2}):(\d{2})', dest_name).groups()
root_folder = "regular"
if ext in unauthorizedExtension:
root_folder = "unauthorized"
Reporting.increment_unauthorized_extension(ext)
else:
Reporting.image_with_exif += 1
dest_directory = self.create_folder_if_not_exists(os.path.join(self.processed_folder, root_folder, repr(match[0]).replace("\\", "").replace("'", ""), months[int(match[1]) - 1]))
except AttributeError:
Reporting.errors_files_details.append(os.path.join(directory,filename))
Reporting.errors_files+=1
root_folder="error"
dest_directory = self.create_folder_if_not_exists(os.path.join(self.processed_folder, root_folder, filename))
self.move_file(directory, filename, dest_directory, (dest_name + ext).replace(":", "-"))
def manage_non_image(self, directory, filename, file_type):
if file_type == "video":
dest_directory = self.create_folder_if_not_exists(os.path.join(self.processed_folder, "video"))
Reporting.videos_found += 1
elif file_type == "error":
dest_directory = self.create_folder_if_not_exists(os.path.join(self.processed_folder, "error"))
Reporting.errors_files += 1
Reporting.errors_files_details.append(os.path.join(directory, filename))
else:
dest_directory = self.copy_directory_structure(directory, os.path.join(self.processed_folder, "nonImage"))
Reporting.other_found += 1
try:
self.move_file(directory, filename, dest_directory, filename)
except:
Reporting.unmovable_file.append(os.path.join(directory,filename))
def manage_duplicate_file(self, directory, filename, file_type):
Reporting.duplicate_found += 1
if file_type == "image":
dest_directory = self.copy_directory_structure(directory, self.duplicate_folder)
elif file_type == "video":
dest_directory = self.copy_directory_structure(directory, os.path.join(self.duplicate_folder, "video"))
else:
dest_directory = self.copy_directory_structure(directory, os.path.join(self.duplicate_folder, "other"))
self.move_file(directory, filename, dest_directory, filename)
def copy_directory_structure(self, directory_to_copy, directory_destination):
"""
Copy the structure of a directory based on the root of the destination based on the root_path given in
parameter
>>> copy_directory_structure('C:/foo/bar/test/bar/foo','C:/foo/bar/process') #with C:/foo/bar as root_parameter
'C:/foo/bar/process/test/bar/foo'
:param directory_to_copy: the directory to copy (usually where the file is)
:param directory_destination:
:return:
"""
platform.system()
new_directory = directory_to_copy.replace(self.root_path, "")
pattern = "^" + os.sep
if Parameters.is_windows:
pattern = "^" + os.sep + os.sep
new_directory = re.sub(pattern, "", new_directory)
new_directory = os.path.join(directory_destination, new_directory)
try:
self.create_folder_if_not_exists(new_directory)
except FileNotFoundError:
Reporting.unmovable_file.append(new_directory)
return new_directory
def detect_file_date(self, directory, filename, root_folder):
"""
if the exif doesn't contains the date of creation, this method try to detect the date of the file based on the name
if the filename doesn't match any pattern it use the system date of creation
:param directory: directory of the file
:param filename: the current name of the file
:param root_folder: the directory where the file will end up
:return: the final name of the file and the new directory
"""
final_name = ""
dest_directory = ""
possible_pattern = [
# Annee#mois#jour#serie
"[January|February|March|April|May|June|July|August|September|October|November|December]*_[0-9]{2}__[0-9]*",
# mois#Annee#jour#serie
"([0-9]{4})([0-9]{2})([0-9]{2})[-_]([0-9]*)",
# annee mois jour heure minutes secondes
"([0-9]{2})([0-9]{2})([0-9]{2})[-_]([0-9]*)",
# annee mois jour heure minutes secondes
"([0-9]{4})[-_]([0-9]{2})[-_]([0-9]{2})[\-\s]([0-9]{2})[h\:\-\s\.]([0-9]{2})[m\:\-\s\.]([0-9]{2})",
"([0-9]{2})[-_]([0-9]{2})[-_]([0-9]{2})[\-\s]([0-9]{2})[h\:\-\s\.]([0-9]{2})[m\:\-\s\.]([0-9]{2})",
]
for pattern in possible_pattern:
matches = re.match(pattern, final_name)
if (matches != None):
Reporting.log(matches.groups())
# Last chance : get filesystem creation date
if final_name == "":
match = time.gmtime(os.path.getmtime(os.path.join(directory, filename)))
final_name = repr(match[0]) + ":" + repr(match[1]) + ":" + repr(match[2]) + "_" + repr(
match[3]) + ":" + repr(match[4]) + ":" + repr(match[5])
dest_directory = self.create_folder_if_not_exists(
os.path.join(self.processed_folder, root_folder, repr(match[0]), months[match[1] - 1], repr(match[2])))
Reporting.date_by_modified += 1
else:
Reporting.date_by_name += 1
return final_name, dest_directory
| {"/files/FileSystemManager.py": ["/Parameters.py", "/Reporting.py"], "/__main__.py": ["/Parameters.py", "/Reporting.py", "/files/FileBrowser.py", "/files/FileSystemManager.py", "/files/MD5Encoder.py", "/image/ExifReader.py"], "/Reporting.py": ["/Parameters.py"], "/files/MD5Encoder.py": ["/Reporting.py"], "/files/FileBrowser.py": ["/Reporting.py"]} |
50,216 | vsegouin/pic-sorter | refs/heads/master | /__main__.py | import logging
import time
from Parameters import Parameters
from Reporting import *
from files.FileBrowser import FileBrowser
from files.FileSystemManager import FileSystemManager
from files.MD5Encoder import MD5Encoder
from image.ExifReader import ExifReader
import os
import sys
import logging
logging.basicConfig(level=logging.INFO)
try:
Parameters.root_path = sys.argv[1]
except IndexError:
Reporting.log("PLEASE PROVIDE A PATH AS PARAMETER")
exit(0)
try:
if sys.argv[2] == 'false':
Parameters.dry_run = False
else:
Parameters.dry_run = True
except IndexError:
Parameters.dry_run = True
fsManager = FileSystemManager(Parameters.root_path)
md5_encryptor = MD5Encoder(fsManager.root_path)
exif_reader = ExifReader()
browser = FileBrowser(fsManager.root_path)
debut = time.time()
browser.count_processed_file()
# Crawl the processed folder to recreate database with existing files
for root, subdirs, files in browser.crawl_processed_folder():
for file in files:
file_path = os.path.join(root, file)
Reporting.total_file += 1 # increment total number of files
Reporting.showProgress(file,file_path)
try:
if not md5_encryptor.process_md5(file_path):
md5_encryptor.add_file_in_duplicate_list(file_path)
except IOError:
fsManager.manage_non_image(root, file, "error")
browser.count_total_find()
Reporting.log(repr(debut))
for root, subdirs, files in browser.crawl_folders():
for file in files:
# construct full image path
file_path = os.path.join(root, file)
# check if it's the database
if file_path == fsManager.database_path or file_path == fsManager.duplicate_file_path:
continue
Reporting.log("-----------------")
Reporting.total_file += 1 # increment total number of files
Reporting.showProgress(file,file_path)
# find the mime type
file_type = exif_reader.detect_image_file(os.path.join(root, file))
# check if a duplicate is already found
try:
if not md5_encryptor.process_md5(file_path):
Reporting.log("file is duplicate skip")
fsManager.manage_duplicate_file(root, file, file_type)
continue
except IOError:
fsManager.manage_non_image(root, file, "error")
# if it's not a duplicated file
# we increment the number of unique file found
Reporting.total_file_processed += 1
# sort it in the folder if it's not an image
if file_type == "video" or file_type == "other":
fsManager.manage_non_image(root, file, file_type)
# if it's an image
elif file_type == "image":
try:
exif = exif_reader.read_exif(file_path)
fsManager.manage_image(root, file, exif)
except IOError:
fsManager.manage_non_image(root, file, "error")
Reporting.doReporting()
if Parameters.dry_run:
Reporting.log("This was a dry_run test, don't forget to delete the database.txt if it exists")
print("--- %s seconds ---" % (time.time() - debut))
| {"/files/FileSystemManager.py": ["/Parameters.py", "/Reporting.py"], "/__main__.py": ["/Parameters.py", "/Reporting.py", "/files/FileBrowser.py", "/files/FileSystemManager.py", "/files/MD5Encoder.py", "/image/ExifReader.py"], "/Reporting.py": ["/Parameters.py"], "/files/MD5Encoder.py": ["/Reporting.py"], "/files/FileBrowser.py": ["/Reporting.py"]} |
50,217 | vsegouin/pic-sorter | refs/heads/master | /Reporting.py | import logging
from logging import Logger
from Parameters import Parameters
import os
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
# Python2
class Reporting(object):
# global data
hashed = []
calculated_total_file = 0
total_file = 0 #
total_file_processed = 0 #
file_moved = 0
# Type
image_found = 0 #
videos_found = 0 #
duplicate_found = 0 #
other_found = 0 #
# Data on images
image_with_exif = 0
image_without_exif = 0
date_by_exif = 0
date_by_name = 0
date_by_modified = 0
# actions
unauthorized_extension = {}
errors_files = 0
errors_files_details = []
path_created = []
unmovable_file = []
__metaclass__ = Singleton
def reporting(self):
pass
@classmethod
def increment_unauthorized_extension(cls, ext):
try:
cls.unauthorized_extension[ext] += 1
except KeyError:
cls.unauthorized_extension[ext] = 0
cls.unauthorized_extension[ext] += 1
pass
@classmethod
def doReporting(cls):
print("\n\r\n\r===========================\n\r========Global infos=======\n\r===========================" +
"\n\rTotal File found " + repr(cls.total_file) +
"\n\rTotal File processed " + repr(cls.total_file_processed) +
"\n\rTotal File Moved " + repr(cls.file_moved) +
"\n\r\n\r===========================\n\r=======Type Detected=======\n\r===========================" +
"\n\rImages found : " + repr(cls.image_found) +
"\n\rVideos found : " + repr(cls.videos_found) +
"\n\rOther found : " + repr(cls.other_found) +
"\n\rDuplicate found : " + repr(cls.duplicate_found) +
"\n\r\n\r===========================\n\r==Informations about image=\n\r===========================" +
"\n\rImage with exif : " + repr(cls.image_with_exif) +
"\n\rImage without exif : " + repr(cls.image_without_exif) +
"\n\rDate found with exif : " + repr(cls.date_by_exif) +
"\n\rDate found with filename : " + repr(cls.date_by_name) +
"\n\rDate found with modification date : " + repr(cls.date_by_modified) +
"\n\r\n\r===========================\n\r==========Errors===========\n\r===========================" +
"\n\rErrors file : " + repr(cls.errors_files) +
"\n\r\n\r===========================\n\r=Path of problematics file=\n\r===========================" +
"\n\rUnmovable file : " + repr(cls.unmovable_file.__len__()) + "\n\r" +
"\n\r".join(cls.unmovable_file) +
"\n\r\n\r===========================\n\r=======Path created========\n\r===========================" +
"\n\rPath Created : " + repr(list(set(cls.path_created)).__len__()) + "\n\r" +
"\n\r".join(list(set(cls.path_created)))
)
@classmethod
def log(cls, string):
if Parameters.is_verbose:
print(string)
@classmethod
def showProgress(cls,file,filePath):
logging.info(repr(Reporting.total_file) + " / " + repr(Reporting.calculated_total_file)+" | "+file+" | "+repr(os.path.getsize(filePath) >> 20)+"Mo")
| {"/files/FileSystemManager.py": ["/Parameters.py", "/Reporting.py"], "/__main__.py": ["/Parameters.py", "/Reporting.py", "/files/FileBrowser.py", "/files/FileSystemManager.py", "/files/MD5Encoder.py", "/image/ExifReader.py"], "/Reporting.py": ["/Parameters.py"], "/files/MD5Encoder.py": ["/Reporting.py"], "/files/FileBrowser.py": ["/Reporting.py"]} |
50,218 | vsegouin/pic-sorter | refs/heads/master | /image/ExifReader.py | import mimetypes
import exifread
class ExifReader:
def __init__(self):
pass
def read_exif(self, file):
f = open(file, 'rb')
# Return Exif tags
try:
tags = exifread.process_file(f)
except TypeError :
tags = ''
return tags
def detect_image_file(self, file):
type = mimetypes.guess_type(file)[0]
if type == None:
return "other"
if "image/" in type:
return "image"
if "image/" in type:
return "video"
return "other"
| {"/files/FileSystemManager.py": ["/Parameters.py", "/Reporting.py"], "/__main__.py": ["/Parameters.py", "/Reporting.py", "/files/FileBrowser.py", "/files/FileSystemManager.py", "/files/MD5Encoder.py", "/image/ExifReader.py"], "/Reporting.py": ["/Parameters.py"], "/files/MD5Encoder.py": ["/Reporting.py"], "/files/FileBrowser.py": ["/Reporting.py"]} |
50,219 | vsegouin/pic-sorter | refs/heads/master | /files/MD5Encoder.py | import os, hashlib
import errno
from Reporting import Reporting
class MD5Encoder:
m_database_path = ""
m_current_file = ""
m_hashed_value = ""
m_duplicate_file = ""
def __init__(self, database_location):
self.m_database_path = os.path.join(database_location, "database.txt")
self.m_duplicate_file = os.path.join(database_location, "duplicate.txt")
file = open(self.m_database_path, "a", 1)
file.close()
def write_all(self,list):
file = open("test.txt", "a", 1)
for item in list:
file.write(item)
file.write("\n\r")
file.close()
def init_file(self, file_path):
self.m_current_file = file_path
self.m_hashed_value = self.hash_file(file_path)
def hash_file(self, file):
self.m_current_file = file
hash_md5 = hashlib.md5()
try:
with open(file, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
except OSError as e:
if e.errno == errno.ENOENT:
Reporting.log(file + " Not Found continuing")
return ""
#
# Return true if Hash code is not blank and it's not present in database.txt
def is_file_already_present(self):
f = open(self.m_database_path)
contents = f.read()
f.close()
return True if self.m_hashed_value == "" or contents.count(self.m_hashed_value) > 0 else False
def add_file_in_duplicate_list(self,file_path):
file = open(self.m_duplicate_file, "a", 1)
file.write(file_path)
file.write("\n\r")
file.close()
def add_hash_in_database(self):
file = open(self.m_database_path, "a", 1)
file.write(self.m_hashed_value)
file.write("\n\r")
file.close()
def process_md5(self, file_path):
Reporting.log(file_path)
self.init_file(file_path)
# if it's a duplicate or the database itself then there is no reason to continue
if self.is_file_already_present():
Reporting.log(self.m_hashed_value + " already present")
return False
self.add_hash_in_database()
Reporting.log(self.m_hashed_value + " added")
return True
| {"/files/FileSystemManager.py": ["/Parameters.py", "/Reporting.py"], "/__main__.py": ["/Parameters.py", "/Reporting.py", "/files/FileBrowser.py", "/files/FileSystemManager.py", "/files/MD5Encoder.py", "/image/ExifReader.py"], "/Reporting.py": ["/Parameters.py"], "/files/MD5Encoder.py": ["/Reporting.py"], "/files/FileBrowser.py": ["/Reporting.py"]} |
50,220 | vsegouin/pic-sorter | refs/heads/master | /Parameters.py | import platform
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
# Python2
class Parameters(object):
is_verbose = False
root_path = ""
dry_run = False
is_windows = platform.system() == "Windows"
__metaclass__ = Singleton
def Parameters(self):
pass
| {"/files/FileSystemManager.py": ["/Parameters.py", "/Reporting.py"], "/__main__.py": ["/Parameters.py", "/Reporting.py", "/files/FileBrowser.py", "/files/FileSystemManager.py", "/files/MD5Encoder.py", "/image/ExifReader.py"], "/Reporting.py": ["/Parameters.py"], "/files/MD5Encoder.py": ["/Reporting.py"], "/files/FileBrowser.py": ["/Reporting.py"]} |
50,221 | vsegouin/pic-sorter | refs/heads/master | /files/FileBrowser.py | import os
from Reporting import Reporting
class FileBrowser:
root_path = None
def __init__(self, root_path):
self.root_path = root_path
def count_processed_file(self):
Reporting.total_file = 0
Reporting.calculated_total_file = 0
for root, subdirs, files in self.crawl_processed_folder():
Reporting.calculated_total_file += files.__len__()
def count_total_find(self):
Reporting.total_file = 0
Reporting.calculated_total_file = 0
for root, subdirs, files in self.crawl_folders():
Reporting.calculated_total_file += files.__len__()
# Will crawl and generate all the files of the root_path
def crawl_folders(self):
for root, subdirs, files in os.walk(self.root_path):
Reporting.log('-- current directory = ' + root + "\n")
if ("@eaDir" in root):
Reporting.log("it's a eadir folder continue\n")
continue
if "processed" in root or "duplicate" in root:
Reporting.log("it's the processed or the duplicate folder skip")
continue
yield [root, subdirs, files]
# check whether the directory is now empty after deletions, and if so, remove it
if len(os.listdir(root)) == 0:
os.rmdir(root)
def crawl_processed_folder(self):
for root, subdirs, files in os.walk(self.root_path):
if not "processed" in root:
continue
yield [root, subdirs, files]
def getPath(self):
return self.root_path;
| {"/files/FileSystemManager.py": ["/Parameters.py", "/Reporting.py"], "/__main__.py": ["/Parameters.py", "/Reporting.py", "/files/FileBrowser.py", "/files/FileSystemManager.py", "/files/MD5Encoder.py", "/image/ExifReader.py"], "/Reporting.py": ["/Parameters.py"], "/files/MD5Encoder.py": ["/Reporting.py"], "/files/FileBrowser.py": ["/Reporting.py"]} |
50,222 | twolfson/sublime-info | refs/heads/master | /sublime_info/__init__.py | # Load in local modules for extension
from __future__ import absolute_import
from sublime_info.SublimeInfo import SublimeInfo
from sublime_info.errors import *
# Define sugar methods
def get_sublime_path():
return SublimeInfo.get_sublime_path()
def get_sublime_version():
return SublimeInfo.get_sublime_version()
def get_package_directory():
return SublimeInfo.get_package_directory()
| {"/sublime_info/__init__.py": ["/sublime_info/SublimeInfo.py", "/sublime_info/errors.py"], "/sublime_info/SublimeInfo.py": ["/sublime_info/errors.py"], "/test/sublime-info_test.py": ["/sublime_info/__init__.py"]} |
50,223 | twolfson/sublime-info | refs/heads/master | /setup.py | from setuptools import setup, find_packages
setup(
name='sublime_info',
version='0.2.0',
description='Gather information about Sublime Text',
long_description=open('README.rst').read(),
keywords=[
'sublime',
'sublime text',
'info',
'plugin'
],
author='Todd Wolfson',
author_email='todd@twolfson.com',
url='https://github.com/twolfson/sublime-info',
download_url='https://github.com/twolfson/sublime-info/archive/master.zip',
packages=find_packages(),
license='UNLICENSE',
install_requires=open('requirements.txt').readlines(),
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: Public Domain',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Text Editors'
]
)
| {"/sublime_info/__init__.py": ["/sublime_info/SublimeInfo.py", "/sublime_info/errors.py"], "/sublime_info/SublimeInfo.py": ["/sublime_info/errors.py"], "/test/sublime-info_test.py": ["/sublime_info/__init__.py"]} |
50,224 | twolfson/sublime-info | refs/heads/master | /sublime_info/SublimeInfo.py | # Load in core, 3rd party, and local dependencies
from __future__ import absolute_import
import os
import re
import subprocess
from sublime_info.errors import STNotResolvedError, STBadLocationError
try:
from shutil import which
except ImportError: # Python 2 fallback
from shutilwhich import which
class SublimeInfo(object):
# Load in an environment variable constant
sublime_path=os.environ.get('SUBLIME_TEXT_PATH', None)
# Define init
def __init__(self, sublime_path=None):
# Allow for customizaton of sublime_path
if sublime_path:
self.sublime_path = sublime_path
# Define internal lookup which ignores
@classmethod
def _get_sublime_path(cls):
# Attempt to resolve Sublime Text
path = (which('subl') or
which('sublime_text'))
# If Sublime is not found, raise our exception
if not path:
raise STNotResolvedError(
'Sublime Text could not be found via the command "%s" or "%s"' %
('subl',
'sublime_text'))
# Otherwise, return the path
return path
@classmethod
def get_sublime_path(cls):
"""Resolve Sublime Text path (e.g. /usr/bin/subl)
If ``SUBLIME_TEXT_PATH`` is provided via environment variables, it will be used.
Otherwise, a ``which``-like resolution will be returned.
:raises STNotFoundError: If Sublime Text cannot be found, an error will be raised.
:returns: ``SUBLIME_TEXT_PATH`` or ``which``-like resolution
:rtype: str
"""
# If sublime_path is provided, verify it exists
sublime_path = cls.sublime_path
if sublime_path:
if not os.path.exists(sublime_path):
raise STBadLocationError(
'Sublime Text could not be found at "%s"' % sublime_path)
# Otherwise, use the internal lookup
else:
sublime_path = cls._get_sublime_path()
# Return the found path
return sublime_path
@classmethod
def get_sublime_version(cls):
"""Resolve Sublime Text version (e.g. 2221, 3083)
Sublime Text is resolved via ``get_sublime_path``
:raises Exception: If the Sublime Text version cannot be parsed, an error will be raised.
:returns: Version of Sublime Text returned by ``sublime_text --version``.
:rtype: int
"""
# Get the path to sublime and grab the version
sublime_path = cls.get_sublime_path()
child = subprocess.Popen([sublime_path, '--version'], stdout=subprocess.PIPE)
version_stdout = str(child.stdout.read())
# Kill the child
child.kill()
# Parse out build number from stdout
# Sublime Text 2 Build 2221
# Sublime Text Build 3083
version_match = re.search(r'\d{4}', version_stdout)
if not version_match:
raise Exception('Sublime Text version not found in "%s"' % version_stdout)
# Coerce and return the version
return int(version_match.group(0))
@classmethod
def get_package_directory(cls):
"""Resolve Sublime Text package directory (e.g. /home/todd/.config/sublime-text-2/Packages)
:raises Exception: If the Sublime Text version is not recognized, an error will be raised.
:returns: Path to Sublime Text's package directory
:rtype: str
"""
# TODO: On Windows, OSX these will not be the same
# Get the version
version = cls.get_sublime_version()
# Run Linux-only logic for pkg_dir
pkg_dir = None
if version >= 2000 and version < 3000:
pkg_dir = os.path.expanduser('~/.config/sublime-text-2/Packages')
elif version >= 3000 and version < 4000:
pkg_dir = os.path.expanduser('~/.config/sublime-text-3/Packages')
# Assert the package dir was found
if not pkg_dir:
raise Exception('Sublime Text version "%s" not recognized' % version)
# Return the package directory
return pkg_dir
| {"/sublime_info/__init__.py": ["/sublime_info/SublimeInfo.py", "/sublime_info/errors.py"], "/sublime_info/SublimeInfo.py": ["/sublime_info/errors.py"], "/test/sublime-info_test.py": ["/sublime_info/__init__.py"]} |
50,225 | twolfson/sublime-info | refs/heads/master | /test/sublime-info_test.py | # Load in dependencies
import os
from unittest import TestCase
# Load in local dependencies
import sublime_info
# Outline tests
"""
# The majority of these will be satisfied via .travis.yml
Sublime Text as subl
resolved via `get_sublime_path`
has a path of `/usr/bin/subl`
Sublime Text as sublime_text
resolved via `get_sublime_path`
has a path of `/usr/bin/sublime_text`
Sublime Text as sublime_texttt
specified via SUBLIME_TEXT_PATH
resolved via `get_sublime_path`
has a path of `/usr/bin/sublime_texttt`
sublime_info
attempting to resolve Sublime Text
when it does not exist
raises a STNotFoundError
TODO: What about sublime_text.exe
TODO: What about sublime_text.app
TODO: Create test.sh/cmd for OSX and Windows which do both EXPECT ERROR and not
TODO: Installed package dir
TODO: Platform
TODO: Arch
"""
# If we are in an error test
if os.environ.get('EXPECT_ERROR', None):
class TestGetSublimePathError(TestCase):
def test_get_sublime_path_raises(self):
"""Assert that we raise an error when Sublime Text is not on disk."""
self.assertRaises(sublime_info.STNotFoundError,
sublime_info.get_sublime_path)
# Otherwise, run normal tests
else:
class TestGetSublimePathNormal(TestCase):
def test_get_sublime_path_finds_path(self):
"""Assert that we find the proper path to Sublime Text."""
expected_path = os.environ['EXPECTED_PATH']
actual_path = sublime_info.get_sublime_path()
self.assertEqual(expected_path, actual_path)
def test_get_sublime_version_resolves_version(self):
"""Assert that we find the proper version of Sublime Text."""
expected_version = int(os.environ['EXPECTED_VERSION'])
actual_version = sublime_info.get_sublime_version()
self.assertEqual(expected_version, actual_version)
def test_get_sublime_version_locates_pkg_dir(self):
"""Assert that we find the proper package directory."""
expected_pkg_dir = os.environ['EXPECTED_PKG_DIR']
actual_pkg_dir = sublime_info.get_package_directory()
self.assertEqual(expected_pkg_dir, actual_pkg_dir)
| {"/sublime_info/__init__.py": ["/sublime_info/SublimeInfo.py", "/sublime_info/errors.py"], "/sublime_info/SublimeInfo.py": ["/sublime_info/errors.py"], "/test/sublime-info_test.py": ["/sublime_info/__init__.py"]} |
50,226 | twolfson/sublime-info | refs/heads/master | /sublime_info/errors.py | # Define a custom exception for when Sublime cannot be located
class STNotFoundError(Exception):
pass
class STNotResolvedError(STNotFoundError):
pass
class STBadLocationError(STNotFoundError):
pass
| {"/sublime_info/__init__.py": ["/sublime_info/SublimeInfo.py", "/sublime_info/errors.py"], "/sublime_info/SublimeInfo.py": ["/sublime_info/errors.py"], "/test/sublime-info_test.py": ["/sublime_info/__init__.py"]} |
50,240 | rwsproat/writing_evolution | refs/heads/master | /flags.py | ## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
## Author: Richard Sproat (rws@xoba.com)
"""Defines poor man's command-line flag parsing.
"""
import getopt
import sys
_FLAGS = []
_DUMMY_STR_FUNCTION_TEMPLATE = """def __x():
global FLAGS_%s
FLAGS_%s = "%s"
"""
_DUMMY_NUM_FUNCTION_TEMPLATE = """def __x():
global FLAGS_%s
FLAGS_%s = %s
"""
def set_dummy_function_template(option, default_value):
"""Builds the runnable function template.
Args:
option: name of flag
default_value: default value for flag
Returns:
Function template to be compiled that sets the variables
"""
try:
value = int(default_value)
return _DUMMY_NUM_FUNCTION_TEMPLATE % (option, option, value)
except ValueError:
try:
value = float(default_value)
return _DUMMY_NUM_FUNCTION_TEMPLATE % (option, option, value)
except ValueError:
return _DUMMY_STR_FUNCTION_TEMPLATE % (option, option, default_value)
def define_flag(option, default_value, documentation):
"""Defines the flag and sets the default value and documentation.
Args:
option: name of flag
default_value: default value for flag
documentation: documentation string
Returns:
None
"""
_FLAGS.append((option, default_value, documentation))
function_template = set_dummy_function_template(option, default_value)
exec(function_template)
__x()
def usage():
"""Prints usage given the set of supplied flags.
Returns:
None
"""
for option, default_value, documentation in _FLAGS:
print '\t\t--%s\t"%s" (%s)' % (option, documentation, default_value)
def parse_flags(argv):
"""Parses the flags given the argument list.
Args:
argv: argument vector
Returns:
None
"""
try:
optform= map(lambda x: x[0] + '=', _FLAGS)
opts, args = getopt.getopt(argv, '', optform)
except getopt.GetoptError as err:
print str(err)
Usage()
sys.exit(1)
for opt, arg in opts:
opt = opt.replace('--', '')
function_template = set_dummy_function_template(opt, arg)
exec(function_template)
__x()
| {"/builder.py": ["/pynini_interface.py"]} |
50,241 | rwsproat/writing_evolution | refs/heads/master | /concepts.py | # -*- coding: utf-8 -*-
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
## Author: Richard Sproat (rws@xoba.com)
"""Defines the set of concepts, linking them to plausible graphical forms.
"""
CONCEPTS = {
'@PERSON' : '⚥',
'@MAN' : '♂',
'@WOMAN' : '♀',
'@HOUSE' : '☖',
'@BRONZE' : '▰',
'@GOLD' : 'ꃑ',
'@SILVER' : 'ꂢ',
'@SWORD' : '⚔',
'@MEAT' : '☣',
'@SHEEP' : '♈',
'@OX' : '♉',
'@GOAT' : '♑',
'@FISH' : 'ꈊ',
'@TREE' : 'ꈔ',
'@BARLEY' : 'ꆡ',
'@WHEAT' : 'ꎺ',
'@WATER' : '♒',
'@STONE' : '░',
'@CLOTHING' : 'ꆘ',
'@FIELD' : 'ꐚ',
'@TEMPLE' : '▟',
'@GOD' : '☸',
'@AXE' : '⁋',
'@SCYTHE' : '☭',
'@DOG' : 'ꀳ',
'@LION' : '♌',
'@WOLF' : 'ꉴ',
'@DEMON' : '☿',
'@SNAKE' : '⚕',
'@TURTLE' : 'ꉸ',
'@FRUIT' : '☌',
'@HILL' : '☶',
'@CAVE' : '◠',
'@TOWN' : '♖',
'@ENCLOSURE' : '☐',
'@FLOWER' : '⚜',
'@RAIN' : '☔',
'@THUNDER' : '⚡',
'@CLOUD' : '☁',
'@SUN' : '☉',
'@MOON' : '☽',
'@HEART' : '♡',
'@LUNG' : '♺',
'@LEG' : '⼅',
'@ARM' : 'ꁳ',
'@FINGER' : '☚',
'@HEAD' : '☺',
'@TONGUE' : 'ꇩ',
'@EYE' : '◎',
'@EAR' : 'ꎙ',
'@NOSE' : 'ꎔ',
'@GUTS' : '♽',
'@PENIS' : 'ꅏ',
'@VAGINA' : '⚏',
'@HAIR' : 'ꊤ',
'@SKIN' : 'ꁸ',
'@SHELL' : 'ꌋ',
'@BONE' : 'ꀷ',
'@BLOOD' : 'ꋓ',
'@LIVER' : '❦',
'@FARM' : 'ꌆ',
'@LOCUST' : 'ꋸ',
'@STICK' : '▕',
'@STAR' : '☆',
'@EARTH' : '☷',
'@ASS' : '♘',
'@DEATH' : '☠',
'@BIRTH' : '〄',
'@WOMB' : '☤',
'@MILK' : 'ꎍ',
'@COAL' : '■',
'@SEED' : 'ꄍ',
'@LEAF' : '☘',
'@CHILD' : 'ꐕ',
'@ANTELOPE' : 'ꎢ',
'@BEAR' : 'ꄕ',
'@BEE' : 'ꎁ',
'@MOUSE' : 'ꎃ',
'@DUNG' : '♨',
'@PLOUGH' : '♠',
'@SPROUT' : '♧',
'@ICE' : '⼎',
'@DAY' : '☀',
'@NIGHT' : '☾',
'@WINTER' : '☃',
'@SUMMER' : '☼',
'@AUTUMN' : 'ꏃ',
'@SPRING' : 'ꐳ',
'@KING' : '♔',
'@GOOSE' : 'ꎉ',
'@PRIEST' : '♗',
'@ROAD' : '⚌',
'@CART' : 'ꌴ',
'@GRASS' : 'ꍓ',
'@FIRE' : '☲',
'@WIND' : '☴',
'@NAIL' : '丁',
'@BREAST' : '⚆',
'@BOWL' : '◡',
'@CUP' : '☕',}
NUM_CONCEPTS = len(CONCEPTS)
| {"/builder.py": ["/pynini_interface.py"]} |
50,242 | rwsproat/writing_evolution | refs/heads/master | /lexicon.py | #!/usr/bin/env python
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
## Author: Richard Sproat (rws@xoba.com)
"""Defines the lexicon, morphemes, and concepts associated with morphemes.
Then runs the simulation.
"""
# NB: "semantics" and "concept" are used interchangeably
# TODO(rws): This seems to generate rather too many morphemes
# associated with a particular concept (e.g. 36 for TEMPLE).
import builder
import concepts
import flags
import log
import os
import random
import re
import sys
import time
from pynini_interface import sounds_like
# Maximum distance that a closest pronunciation can have
_MAX_DISTANCE = 0.6
# Probability of reusing an existing spelling
# TODO(rws): Make this a paremeter
_PROBABILITY_TO_REUSE_SPELLING = 0.01
# Markup colors
_BLUE = '\033[34m%s\033[0m'
_RED = '\033[31m%s\033[0m'
def _clean_colors(string):
"""Helper to clean up shell color markers from string.
Args:
string: string with possible color markers
Returns:
cleaned string
"""
string = string.replace('\033[34m', '')
string = string.replace('\033[31m', '')
string = string.replace('\033[0m', '')
return string
def _phonetic_color(string):
"""Adds color for phonetic graphemes to string.
Args:
string: string with symbols
Returns:
string with phonetic marker (_RED)
"""
return _RED % (_clean_colors(string))
def _semantic_color(string):
"""Adds color for semantic graphemes to string.
Args:
string: string with symbols
Returns:
string with semantic marker (_BLUE)
"""
return _BLUE % (_clean_colors(string))
def _uniqify_symbol_list(symbols):
"""Removes duplicate symbols from list.
Args:
symbols: list of symbols
Returns:
uniquified list
"""
seen = set()
new_symbols = []
for symbol in symbols:
if symbol.name not in seen:
new_symbols.append(symbol)
seen.add(symbol.name)
return new_symbols
def _clean_name(name):
"""Cleans up symbol name of bracketings for presentation.
Args:
name: symbol name
Returns:
cleaned name
"""
name = re.sub(r'\[[A-Za-z0-9\.]+\]', '', name)
name = re.sub(r'\([A-Za-z0-9\.]+\)', '', name)
name = name.replace('{', '').replace('}', '')
return name
# BEGIN: class Lexicon
class Lexicon(object):
"""Holder for morphemes.
"""
def __init__(self):
"""Sets up tables to allow lookup of morphemes by sound or meaning.
semantics_to_morphemes_primary indicates which morpheme is
considered the primary exponent of a concept.
"""
self._phonology_to_morphemes = {}
self._semantics_to_morphemes = {}
self._semantics_to_morphemes_primary = {}
self._used_spellings = set()
self._used_pron_spellings = set()
self._used_sem_spellings = set()
self._morphemes = []
self._matrix = {} # Distance matrix to be used by PhonologicalDistance
self._phonetics_frozen = False
self._semantics_frozen = False
def add_morpheme(self, morpheme):
"""Adds a morpheme to the lexicon.
Args:
morpheme: a Morpheme instance
Returns:
None
"""
phonology = morpheme.phonology
semantics = morpheme.semantics
spelling = morpheme.symbol
if phonology in self._phonology_to_morphemes:
self._phonology_to_morphemes[phonology].append(morpheme)
else:
self._phonology_to_morphemes[phonology] = [morpheme]
if semantics.name in self._semantics_to_morphemes:
self._semantics_to_morphemes[semantics.name].append(morpheme)
else:
self._semantics_to_morphemes[semantics.name] = [morpheme]
if morpheme.is_primary:
self._semantics_to_morphemes_primary[semantics.name] = morpheme
if spelling:
str_spelling = str(spelling)
self._used_spellings.add(str_spelling)
self._morphemes.append(morpheme)
def find_morphemes(self, key):
"""Finds morphemes by sound or meaning.
Args:
key: key to search in phonology or semantics tables
Returns:
morphemes related to key
"""
try:
return self._phonology_to_morphemes[key]
except KeyError:
pass
try:
return self._semantics_to_morphemes[key]
except KeyError:
return []
def apply_ablaut(self):
"""Applies an ablauting operation to all of the morphs.
This results in multiple phonological forms being associated with the same
morpheme.
Returns:
None
"""
keys = self._phonology_to_morphemes.keys()
ablauted = builder.apply_ablaut(keys)
i = 0
while i < len(keys):
phonology = ablauted[i]
for morpheme in self._phonology_to_morphemes[keys[i]]:
# We have already ablauted this morpheme
if morpheme.marked: continue
morpheme.add_alternative_phonology(phonology)
morpheme.mark()
if phonology in self._phonology_to_morphemes:
if morpheme not in self._phonology_to_morphemes[phonology]:
self._phonology_to_morphemes[phonology].append(morpheme)
else:
self._phonology_to_morphemes[phonology] = [morpheme]
i += 1
# Finally unmark all the morphemes
for key in self._phonology_to_morphemes:
for morpheme in self._phonology_to_morphemes[key]:
morpheme.unmark()
def dump_morphemes(self, outfile = None):
"""Writes out morphemes to a file, or to stdout.
Args:
outfile: An output file, or stdout if None
Returns:
None
"""
stream = sys.stdout
if outfile:
stream = open(outfile, 'w')
for key in self._phonology_to_morphemes:
for morpheme in self._phonology_to_morphemes[key]:
stream.write('%s\t%s\t%s\n' % (key, morpheme.symbol_name(), morpheme))
if outfile:
stream.close()
def pronunciations(self):
"""Returns all pronunciations.
"""
return self._phonology_to_morphemes.keys()
def useful_pronunciations(self):
"""Returns useful pronunciations: those associated with written symbols.
"""
prons = []
for pron in self._phonology_to_morphemes:
for morpheme in self._phonology_to_morphemes[pron]:
if not morpheme.symbol: continue
prons.append(pron)
break
return prons
def dump_morphs(self, outfile = None):
"""Writes out morphs to a file, or to stdout.
Args:
outfile: An output file, or stdout if None
Returns:
None
"""
stream = sys.stdout
if outfile:
stream = open(outfile, 'w')
for key in self._phonology_to_morphemes:
stream.write('%s\n' % key)
if outfile:
stream.close()
def used_spellings(self):
"""Returns list of spellings that are already used.
"""
return list(self._used_spellings)
def get_symbols_from_pron(self, pron):
"""Finds and returns all symbols associated with this pronunciation.
Converts these to phonological components.
"""
if pron not in self._phonology_to_morphemes: return []
result = []
for morpheme in self._phonology_to_morphemes[pron]:
if morpheme.symbol:
symbol = Symbol(morpheme.symbol.name, pron)
if (self._phonetics_frozen and
str(symbol) not in self._used_pron_spellings):
log.log('Disallowing use of {} as phonetic'.format(str(symbol)))
continue
symbol._colored_name = _phonetic_color(morpheme.symbol.colored_name)
result.append(symbol)
return _uniqify_symbol_list(result)
def get_symbols_from_sem(self, sem):
"""Finds and returns all symbols associated with this meaning.
"""
if sem not in self._semantics_to_morphemes: return []
result = []
for morpheme in self._semantics_to_morphemes[sem]:
if morpheme.symbol:
symbol = Symbol(morpheme.symbol.name, sem)
if (self._semantics_frozen and
str(symbol) not in self._used_sem_spellings):
log.log('Disallowing use of {} as semantic'.format(str(symbol)))
continue
symbol._colored_name = _semantic_color(morpheme.symbol.colored_name)
# TODO(rws): This needs to be reworked since we don't necessarily "use"
# this below, so it could be returned to be recycled.
self._used_sem_spellings.add(str(symbol))
result.append(symbol)
return _uniqify_symbol_list(result)
def generate_new_spellings(self):
"""Generates new spellings with some probability for each morpheme.
Works on morphemes that have no spelling. UsefulPronunciations are
updated to PhonologicalDistance once each cycle.
Returns:
None
"""
useful_pronunciations = self.useful_pronunciations()
log.log('# of useful pronunciations = %d' % len(useful_pronunciations))
distance = PhonologicalDistance(useful_pronunciations, self._matrix)
morphemes_without_symbols = []
for morpheme in self._morphemes:
if not morpheme.symbol:
morphemes_without_symbols.append(morpheme)
log.log('# of morphemes without symbols = %d' %
len(morphemes_without_symbols))
init_time = time.clock()
for morpheme in morphemes_without_symbols:
init_time = time.clock()
if random.random() < flags.FLAGS_probability_to_seek_spelling:
## TODO(rws): at some point we should add in the alternative phonology
## for ablauted forms, otherwise those will never participate: actually
## not completely true since the ablauted forms do inherit from the base
## form so that an ablauted form "work" might be spelled based on the
## phonology of the base form "werk"
pron = morpheme.phonology
if pron == '': continue # Shouldn't happen
close_prons = distance.closest_prons(pron)
phonological_spellings = []
spelling_to_pron = {} # Stores pron associated w/ each new spelling
for close_pron, unused_cost in close_prons:
prons = close_pron.split('.')
if len(prons) == 1: # A single pronunciation
spellings = self.get_symbols_from_pron(prons[0])
for spelling in spellings:
spelling_to_pron[spelling.name] = close_pron
phonological_spellings += spellings
elif len(prons) == 2: # A telescoped pronunciation
phonological_spellings1 = self.get_symbols_from_pron(prons[0])
phonological_spellings2 = self.get_symbols_from_pron(prons[1])
for p1 in phonological_spellings1:
for p2 in phonological_spellings2:
spelling = p1 + p2
spelling.set_denotation(close_pron)
phonological_spellings.append(spelling)
spelling_to_pron[spelling.name] = close_pron
concept = morpheme.semantics
semantic_spellings = []
for sem in concept.name.split(','):
semantic_spellings += self.get_symbols_from_sem(sem)
# Also tries the whole composite concept:
if ',' in concept.name:
semantic_spellings += self.get_symbols_from_sem(concept.name)
new_spellings = phonological_spellings + semantic_spellings
log_string = '\n>>>>>>>>>>>>>>>>>>>>>>>>>\n'
log_string += 'For morpheme: %s, %s:\n' % (concept, pron)
log_string += 'Phonetic spellings:\n'
for phonological_spelling in phonological_spellings:
log_string += '%s\n' % phonological_spelling
log_string += 'Semantic spellings:\n'
for semantic_spelling in semantic_spellings:
log_string += '%s\n' % semantic_spelling
log_string += '<<<<<<<<<<<<<<<<<<<<<<<<<'
log.log(log_string)
for phonological_spelling in phonological_spellings:
for semantic_spelling in semantic_spellings:
combo_spelling = semantic_spelling + phonological_spelling
combo_spelling.set_denotation(semantic_spelling.denotation + '+' +
phonological_spelling.denotation)
spelling_to_pron[
combo_spelling.name] = spelling_to_pron[phonological_spelling.name]
new_spellings.append(combo_spelling)
# TODO(rws): this is an experiment. Note that with this setting,
# eliminating the ridiculously long spellings then picking randomly from
# among these, gets a proportion of semantic/phonetic spellings of 0.32
# for the MONOSYLLABLE setting.
tmp = []
for sp in new_spellings:
if len(sp) < 5: tmp.append(sp)
new_spellings = tmp
random.shuffle(new_spellings)
# Whereas with this setting, commented out for now, always favoring the
# absolute shortest, semphon is much lower for 1000, though if you
# increase to 5000 it gets to around 0.22. Presumably that is because
# with the larger vocab one starts to actually need the semphon
# spellings:
#
# new_spellings.sort(lambda x, y: cmp(len(x), len(y)))
for spelling in new_spellings:
reuse = str(spelling) in self._used_spellings
if (not reuse or random.random() < _PROBABILITY_TO_REUSE_SPELLING):
pron = ''
if spelling.name in spelling_to_pron:
pron = spelling_to_pron[spelling.name]
morpheme.set_spelling(spelling)
self._used_spellings.add(str(spelling))
log_string = 'Spelling: %s\t' % spelling
log_string += 'Morpheme: %s\t' % str(morpheme)
if pron:
self._used_pron_spellings.add(str(spelling))
log_string += 'Source-pronunciation: %s\t' % pron
if reuse:
log_string += 'Reuse'
log.log(log_string)
break
def log_pron_to_symbol_map(self):
"""Adds pron/symbol mapping for the (usually final) lexicon.
Returns:
None
"""
for pron in self._phonology_to_morphemes:
for morpheme in self._phonology_to_morphemes[pron]:
if morpheme.symbol:
log.log('SYMBOL:\t{}\t{}'.format(morpheme.symbol, pron))
def freeze_phonetics(self):
"""Freezes the phonetics.
"""
self._phonetics_frozen = True
def freeze_semantics(self):
"""Freezes the semantics.
"""
self._semantics_frozen = True
# END: class Lexicon
# BEGIN: class Morpheme
class Morpheme(object):
"""Container for a morpheme object consisting of sound paired with meaning.
"""
def __init__(self, phonology, semantics, symbol, is_primary):
self._phonology = phonology
self._alternative_phonology = [] # For ablauted forms, etc
self._semantics = semantics
self._symbol = symbol
# set representation of semantics
self._semantics_set = set(semantics.name.split(','))
self._is_primary = is_primary # Is the primary exponent of this concept
# Book-keeping placeholder to mark whether an operation has applied:
self._marked = False
def __repr__(self):
alternative_phonology = ''
if self._alternative_phonology:
alternative_phonology = '(%s)' % ','.join(self._alternative_phonology)
symbol = ''
if self._symbol:
symbol = '<%s:%s:%s>' % (str(self._symbol),
self._symbol.symbols(),
self._symbol.type())
props = '{%s%s:%s:%s:%d}' % (self._phonology,
alternative_phonology,
str(self._semantics).replace('@', ''),
symbol,
self._is_primary)
return props
@property
def phonology(self):
return self._phonology
@property
def semantics(self):
return self._semantics
@property
def is_primary(self):
return self._is_primary
@property
def marked(self):
return self._marked
@property
def symbol(self):
return self._symbol
def symbol_name(self):
if self._symbol:
return self._symbol.colored_name
else:
return '<NO_SYMBOL>'
def mark(self):
self._marked = True
def unmark(self):
self._marked = False
def has_semantics(self, semantics):
return semantics in self._semantics_set
def add_alternative_phonology(self, phonology):
if phonology not in self._alternative_phonology:
self._alternative_phonology.append(phonology)
def set_spelling(self, spelling):
self._symbol = spelling
# END: class Morpheme
# BEGIN: class Concept
class Concept(object):
"""A representation of meaning.
"""
def __init__(self, name):
"""name is a comma-separated set of primitives.
"""
self._name = name
def __repr__(self):
return self._name
@property
def name(self):
return self._name
# END: class Concept
# BEGIN: class Symbol
class Symbol(object):
"""Representation for a symbol and what kind of thing it represents.
name is a string of one or more symbols
"""
def __init__(self, name, denotation=None):
"""Denotation type defaults to semantic
"""
self._name = name
self._denotation = denotation
# _colored_name is the symbol's spelling marked with red or blue according
# to whether it is being used as phonetic or semantic.
if not denotation or denotation.startswith('@'):
self._colored_name = _BLUE % name
else:
self._colored_name = _RED % name
def __repr__(self):
if self._denotation.startswith('@'):
denotation = '[%s]' % self._denotation[1:]
else:
denotation = '(%s)' % self._denotation
return '{%s}%s' % (self._name, denotation)
def __add__(self, other):
new_symbol = Symbol(str(self) + str(other))
new_symbol._colored_name = self.colored_name + other.colored_name
return new_symbol
def __len__(self):
"""Length is the number of basic symbols comprising this.
"""
length = 0
for c in unicode(str(self), 'utf8'):
if ord(c) > 128:
length += 1
return length
@property
def name(self):
return self._name
@property
def denotation(self):
return self._denotation
@property
def colored_name(self):
return self._colored_name
def set_denotation(self, denotation):
self._denotation = denotation
def type(self):
"""Classification as S(emantic), P(honetic), or SP.
"""
if '+' in self._denotation: return 'SP'
if self._denotation.startswith('@'): return 'S'
return 'P'
def symbols(self):
"""Returns string of just the symbols.
"""
return_value = ''
for c in unicode(str(self), 'utf8'):
if ord(c) > 128:
return_value += c
return return_value.encode('utf8')
# END: class Symbol
# BEGIN: class LexiconGenerator
class LexiconGenerator(object):
"""Generator for lexicon with specified number of morphs and base morph type.
"""
def __init__(self, nmorphs = 5000, base_morph = 'MONOSYLLABLE'):
self._nmorphs = nmorphs
self._base_morph = base_morph
self._initial = True
def select_morphs(self, morphs):
"""Helper function to select from 1 to 3 morphs from a sequence.
Args:
morphs: list of morphs
Returns:
random selection among the morphs
"""
selections = set()
num_selections = random.choice([1, 2, 3])
i = 0
while i < num_selections:
morph = random.choice(morphs)
selections.add(morph)
i += 1
return selections
def concept_combinations(self, concepts):
"""Randomly generates a set of one to three concepts.
Args:
concepts: list of concepts
Returns:
random selection among the concepts
"""
num_concepts = random.choice([1, 2, 3])
return ','.join(random.sample(concepts, num_concepts))
def generate(self, force = False):
"""Generates and returns a lexicon.
Args:
force: if True then force building of the grammars.
Returns:
a Lexicon
"""
if self._initial or force:
builder.build_morphology_grammar()
builder.build_soundslike_grammar()
morphs = builder.generate_morphs(self._base_morph, self._nmorphs)
nth_concept = 0
# Gets the concepts
concepts_ = concepts.CONCEPTS
lexicon = Lexicon()
seen_morphs = set()
for concept in concepts_.keys():
selections = self.select_morphs(morphs)
is_primary = True
for morph in selections:
seen_morphs.add(morph)
# As this has a basic concept, assign the symbol associated with this
# concept as the symbol.
# TODO(rws): make it a parameter whether the non-primaries get the
# symbol.
my_symbol = None
if is_primary or flags.FLAGS_initialize_non_primaries_with_symbol:
my_symbol = Symbol(concepts_[concept], concept)
lexicon.add_morpheme(Morpheme(morph,
Concept(concept),
my_symbol,
is_primary))
is_primary = False
# After that the next sets of morphemes are assigned to random combinations
# of concepts, one per morph.
combinations = set()
for morph in morphs:
if morph in seen_morphs: continue
concept = self.concept_combinations(concepts_.keys())
lexicon.add_morpheme(Morpheme(morph,
Concept(concept),
None,
False if concept in combinations else True))
combinations.add(concept)
return lexicon
# END: class LexiconGenerator
# BEGIN: class PhonologicalDistance
class PhonologicalDistance(object):
"""Computes the phonological distance for a set of terms
"""
def __init__(self, pronunciations, matrix = {}):
self._pronunciations = pronunciations
self._matrix = matrix
self._telescopings = {}
self.compute_cross_product()
def __memoize__(self, pron1, pron2):
"""Memoizes the distance for a particular pair of prons for efficiency.
Args:
pron1: first pronunciation
pron2: second pronunciation
Returns:
the sounds_like distance between the prons
"""
if pron1 == pron2: return 0
if (pron1, pron2) in self._matrix:
return self._matrix[pron1, pron2]
length, cost = sounds_like(pron1, pron2)
try:
weighted_cost = cost / length
except ZeroDivisionError:
weighted_cost = float('Infinity')
self._matrix[pron1, pron2] = weighted_cost
return self._matrix[pron1, pron2]
def compute_cross_product(self):
"""Finds all pairs p1, p2, where p1 ends in a V and p2 starts with a V.
Returns:
None
"""
pairs = []
for p1 in self._pronunciations:
if not builder.is_vowel(p1[-1]): continue
for p2 in self._pronunciations:
if p1[-1] == p2[0]:
new_pron = p1 + p2[1:]
if new_pron not in self._pronunciations:
# The only way to get this new pronunciation is via telescoping
# so in that case only we add this to the set of new pairs
pair = p1 + '.' + p2
# Note that this may be ambiguous: this just gets the last pair
# that could produce this pronunciation.
self._telescopings[new_pron] = pair
pairs.append(new_pron)
self._pronunciations += pairs
def expand(self, pron):
"""Possibly expand into a pair of telescoped elements
Args:
pron
Returns:
telescoping of pron if in _telescopings, else pron
"""
if pron in self._telescopings:
return self._telescopings[pron]
return pron
def closest_prons(self, pron1):
"""Returns an ordered list of closest prons to pron.
"""
result = []
for pron2 in self._pronunciations:
result.append((self.expand(pron2), self.__memoize__(pron1, pron2)))
result.sort(lambda x, y: cmp(x[1], y[1]))
return [x for x in result if x[1] <= _MAX_DISTANCE]
# END: class PhonologicalDistance
def main(argv):
global _PROBABILITY_TO_SEEK_SPELLING
flags.define_flag('ablaut',
'0',
'Apply ablaut')
flags.define_flag('base_morph',
'MONOSYLLABLE',
'Base morpheme shape to use')
flags.define_flag('initialize_non_primaries_with_symbol',
'0',
'Sets whether or not non primary morphs get the '
'symbol initially')
flags.define_flag('niter',
'5',
'Number of iterations')
flags.define_flag('nmorphs',
'1000',
'Number of morphs')
flags.define_flag('outdir',
'/var/tmp/simulation',
'Output directory')
# Probability that one will seek a spelling for a morpheme
# TODO(rws): make this sensitive to lexical frequency
flags.define_flag('probability_to_seek_spelling',
'0.3',
'Probability to seek spelling for a form')
flags.define_flag('freeze_phonetics_at_iter',
'0',
'Do not allow any new phonetic symbols after iteration N')
flags.define_flag('freeze_semantics_at_iter',
'0',
'Do not allow any new semantic spread after iteration N')
flags.parse_flags(argv[1:])
generator = LexiconGenerator(nmorphs=flags.FLAGS_nmorphs,
base_morph=flags.FLAGS_base_morph)
lexicon = generator.generate()
print '{} {}'.format('Probability to seek spelling is',
flags.FLAGS_probability_to_seek_spelling)
print 'Base morph is', flags.FLAGS_base_morph
print '{} {}'.format('initialize_non_primaries_with_symbol is',
flags.FLAGS_initialize_non_primaries_with_symbol)
print 'Apply ablaut =', flags.FLAGS_ablaut
print 'outdir =', flags.FLAGS_outdir
print 'niter =', flags.FLAGS_niter
print 'nmorphs =', flags.FLAGS_nmorphs
if flags.FLAGS_ablaut:
lexicon.apply_ablaut()
outdir = flags.FLAGS_outdir
try:
os.makedirs(outdir)
except OSError:
pass
lexicon.dump_morphemes(outdir + '/morphemes_0000.tsv')
with open(outdir + '/log.txt', 'w') as stream:
log.LOG_STREAM = stream
for i in range(1, flags.FLAGS_niter):
if flags.FLAGS_freeze_phonetics_at_iter == i:
lexicon.freeze_phonetics()
if flags.FLAGS_freeze_semantics_at_iter == i:
lexicon.freeze_semantics()
print 'Iteration %d' % i
log.log('Iteration %d' % i)
lexicon.generate_new_spellings()
lexicon.dump_morphemes(outdir + '/morphemes_%04d.tsv' % i)
lexicon.log_pron_to_symbol_map()
if __name__ == '__main__':
main(sys.argv)
| {"/builder.py": ["/pynini_interface.py"]} |
50,243 | rwsproat/writing_evolution | refs/heads/master | /builder.py | ## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
## Author: Richard Sproat (rws@xoba.com)
"""Builds all the needed grammars and lists, placing them in Data directory.
"""
import os
import sys
import pynini_interface
from base import _BASE
_VOWELS = set()
# TODO(rws): Remove dependency on Thrax entirely by rewriting the grammars in
# Pynini.
def build_grammar(name):
"""Builds the grammars using Thrax, and extracts the relevant fsts.
This assumes that thrax utility thraxmakedep is accessible.
Args:
name: name for the grammar.
Returns:
None
"""
os.system('thraxmakedep %s/Grm/%s.grm' % (_BASE, name))
os.system('make')
os.system('rm -f Makefile')
load_vowel_definitions()
def load_vowel_definitions():
"""Loads the vowel definitions from phonemes.tsv.
Returns:
None
"""
with open('%s/Grm/phonemes.tsv' % _BASE) as stream:
for line in stream:
try:
clas, segment = line.split()
except ValueError:
continue
if clas.startswith('V'):
_VOWELS.add(segment)
def is_vowel(segment):
"""Returns true if segment is a vowel.
Args:
segment: name of the segmeent
Returns:
Boolean
"""
return segment in _VOWELS
def build_morphology_grammar():
"""Builds the morphology grammar.
Returns:
None
"""
build_grammar('morphology')
def build_soundslike_grammar():
"""Builds the sounds-like grammar.
Returns:
None
"""
build_grammar('soundslike')
def generate_morphs(base_morph='MONOSYLLABLE', n=1000,
far=("%s/Grm/morphology.far" % _BASE)):
"""Generates a set of morphs according to the base_morph template.
Args:
base_morph: name of the base morph rule, e.g. MONOSYLLABLE
n: number of morphs to generate
Returns:
list of morphs
"""
pynini_interface.load_rule_from_far(base_morph, far)
return pynini_interface.random_paths(base_morph, n)
def dump_morphs(morphs, outfile=None):
"""Dumps the morphs to a file.
Args:
morphs: list of morphs
outfile: output file, or stdout if None
Returns:
None
"""
stream = sys.stdout
if outfile:
stream = open(outfile, 'w')
for morph in morphs:
stream.write(morph + '\n')
if outfile:
stream.close()
def apply_ablaut(morphs):
"""Applies the ablaut rule to a set of morphs.
Args:
morphs: list of morphs
Returns:
ablauted list of morphs
"""
ablaut_rule = pynini_interface.load_rule_from_far(
'ABLAUT',
'%s/Grm/morphology.far' % _BASE)
ablauted = []
for morph in morphs:
morph_fst = pynini_interface.to_fst(morph)
result = pynini_interface.shortestpath(morph_fst * ablaut_rule)
result.project(True)
result = pynini_interface.to_string(result)
# We do not allow it to delete the morph entirely.
if result:
ablauted.append(result)
else:
ablauted.append(morph)
return ablauted
| {"/builder.py": ["/pynini_interface.py"]} |
50,244 | rwsproat/writing_evolution | refs/heads/master | /stats.py | #!/usr/bin/env python
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
## Author: Richard Sproat (rws@xoba.com)
"""Script that computes the basic statistics on the results of a run.
Creates an R script to produce plots in plot.R
Usage: stats.py simulation_output_directory
"""
import glob
import sys
_PLOT = """pdf("plot.pdf")
plot(nsyms, xlab="Epoch", ylab=("Prop"), ylim=c(0, 1),
type="l", col=1)
par(new=TRUE)
plot(nsemphon, xlab="Epoch", ylab=("Prop"), ylim=c(0, 1),
type="l", col=2)
par(new=TRUE)
plot(nphon, xlab="Epoch", ylab=("Prop"), ylim=c(0, 1),
type="l", col=3)
par(new=TRUE)
plot(nsem, xlab="Epoch", ylab=("Prop"), ylim=c(0, 1),
type="l", col=4)
par(new=TRUE)
plot(nphon + nsemphon, xlab="Epoch", ylab=("Prop"), ylim=c(0, 1),
type="l", col=5)
legend(1, 0.9,
col=c(1, 2, 3, 4, 5),
lty=c(1, 1, 1, 1, 1),
legend=c("# spellings", "# sem-phon", "# phon", "# sem", "all phon"))
"""
def main(argv):
print '%10s\t%10s\t%10s\t%10s\t%10s' % ('# morphs',
'prop spell',
'semphon',
'phon',
'sem')
nsym_list = []
nsemphon_list = []
nphon_list = []
nsem_list = []
for morph_file in glob.glob(argv[1] + '/morphemes_*.tsv'):
nsyms = 0
nsemphon = 0
nphon = 0
nsem = 0
tot = 0.0
with open(morph_file) as strm:
for line in strm:
tot += 1
if 'NO_SYMBOL' in line:
continue
nsyms += 1
if ':SP>' in line:
nsemphon +=1
if ':P>' in line:
nphon +=1
if ':S>' in line:
nsem +=1
semphon_str = '%d\t%2.2f' % (nsemphon, nsemphon / float(nsyms))
phon_str = '%d\t%2.2f' % (nphon, nphon / float(nsyms))
sem_str = '%d\t%2.2f' % (nsem, nsem / float(nsyms))
print '%10d\t%10f\t%10s\t%10s\t%10s' % (tot, nsyms/tot,
semphon_str, phon_str, sem_str)
nsym_list.append(str(nsyms/tot))
nsemphon_list.append(str(nsemphon/tot))
nphon_list.append(str(nphon/tot))
nsem_list.append(str(nsem/tot))
with open('plot.R', 'w') as plot:
plot.write('nsyms <- c(%s)\n' % ', '.join(nsym_list))
plot.write('nsemphon <- c(%s)\n' % ', '.join(nsemphon_list))
plot.write('nphon <- c(%s)\n' % ', '.join(nphon_list))
plot.write('nsem <- c(%s)\n' % ', '.join(nsem_list))
plot.write(_PLOT)
if __name__ == '__main__':
main(sys.argv)
| {"/builder.py": ["/pynini_interface.py"]} |
50,245 | rwsproat/writing_evolution | refs/heads/master | /pynini_interface.py | ## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
## Author: Richard Sproat (rws@xoba.com)
import time
import sys
from pynini import *
from base import _BASE
_LOADED_FARS = {}
_LOADED_FSTS = {}
def load_rule_from_far(rule, far, force=False):
"""Loads the rule, and caches it, reloading if force=True
Args:
rule: Rule name
far: Far name
force: If True, forces reload
Returns:
loaded fst or fail if no such fst or far
"""
try:
if force or far not in _LOADED_FARS:
_LOADED_FARS[far] = Far(far)
# TODO(rws): If there is the same rule name in different fars, then this
# will just get the first one we encounter.
if force or rule not in _LOADED_FSTS:
_LOADED_FSTS[rule] = _LOADED_FARS[far][rule]
return _LOADED_FSTS[rule]
except pywrapfst.FstIOError:
sys.stderr.write('Failed loading far from %s\n' % far)
sys.exit(1)
except KeyError:
sys.stderr.write('No rule "%s" in %s\n' % (rule, far))
sys.exit(1)
def to_fst(s, syms='byte'):
"""Constructs an fst from a string.
Args:
s: string
syms: symbol table
Returns:
fst representing the string
"""
return acceptor(s, token_type=syms)
def to_string(t):
"""Constructs a string from a fst.
Args:
t: fst
Returns:
string representing the fst
"""
t.rmepsilon()
return t.stringify()
def random_paths(t, n=1):
"""Computes a set of random paths from an fst
Args:
t: fst
n: number of paths
Returns:
list of random path strings
"""
if type(t) == type('string'):
try:
t = _LOADED_FSTS[t]
except KeyError:
sys.stderr.write('Missing transducer %s\n' % t)
return []
i = 0
paths = []
while i < n:
output = randgen(t, seed=int(time.time() * 1000000), select='uniform')
output.rmepsilon()
output.topsort()
paths.append(output.stringify())
i += 1
return paths
_CACHED_COMPOSITIONS = {}
def sounds_like(s1, s2, rule='EDIT_DISTANCE',
far=('%s/Grm/soundslike.far' % _BASE)):
"""Computes the distance between two phonetic strings given a grammar.
Args:
s1: phonetic string 1
s2: phonetic string 2
grm: phonetic similarity grammar
Returns:
number of arcs in shortest path, shortest distance
"""
grmfst = load_rule_from_far(rule, far)
if (s1, rule) in _CACHED_COMPOSITIONS:
fst1 = _CACHED_COMPOSITIONS[s1, rule]
else:
fst1 = s1 * grmfst
_CACHED_COMPOSITIONS[s1, rule] = fst1
result = shortestpath(fst1 * s2)
result.rmepsilon()
result.topsort()
if result.num_states() > 1:
dist = shortestdistance(result)
dist = (float(str(dist[-1])) +
float(str(result.final(result.num_states() - 1))))
return result.num_states() - 1, dist
else:
return 0, float('inf')
| {"/builder.py": ["/pynini_interface.py"]} |
50,254 | 1dayac/magic_bot | refs/heads/master | /candle_factory_5.py | #clanteam
from selenium import webdriver
import time
import sys
from datetime import datetime
import re
from mtgolibrary import MtgoLibraryParser
from card import Card, Price
import sqlite3
con = sqlite3.connect('cards.db', isolation_level=None)
cursor = con.cursor()
tb_exists = "SELECT name FROM sqlite_master WHERE type='table' AND name='records'"
if not con.execute(tb_exists).fetchone():
c = cursor.execute("CREATE TABLE records(Id TEXT PRIMARY KEY, CardName TEXT, SetName TEXT, BuyPrice REAL, SellPrice REAL, "
"BotNameSeller TEXT, BotNameBuyer TEXT, Time TEXT, Number INT, Foil INT)")
else:
print("Table exists")
def is_basic_land(card):
return card.name == "Swamp" or card.name == "Island" or card.name == "Mountain" or card.name == "Plains" or card.name == "Forest" or card.name.startswith("Urza's")
import win32api, win32con, win32process
def setaffinity():
return
pid = win32api.GetCurrentProcessId()
mask = 3 # core 7
handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, pid)
win32process.SetProcessAffinityMask(handle, mask)
setaffinity()
import platform
if platform.system() == "Windows":
chromedriver_path = r"C:\Users\IEUser\Desktop\magic_bot\chromedriver.exe"
else:
chromedriver_path = "/home/dmm2017/PycharmProjects/candle_factory/chromedriver"
class HotlistProcessor(object):
def __init__(self):
self.start_from = "1"
self.set = "1"
self.rows = []
self.driver_hotlist = None
self.start = None
self.i = 0
self.mtgolibrary_parser = MtgoLibraryParser()
def restart(self):
self.start_from = "1"
self.set = "1"
self.rows = []
try:
self.driver_hotlist.quit()
except:
pass
self.driver_hotlist = None
self.start = None
self.i = 0
self.mtgolibrary_parser.restart()
def openHotlist(self):
url = "http://www.mtgotraders.com/hotlist/#/"
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--headless")
self.driver_hotlist = webdriver.Chrome(chromedriver_path, options = chrome_options)
self.driver_hotlist.get(url)
time.sleep(60)
elems = self.driver_hotlist.find_elements_by_class_name('btn')
elems[0].click()
elems_2 = self.driver_hotlist.find_element_by_xpath(
"//*[@id=\"mainContent\"]/div[2]/div[1]/div[2]/div[4]/div[1]/span[2]/span/ul/li[5]")
elems_2.click()
time.sleep(4)
table = self.driver_hotlist.find_element_by_id('main-table')
rows = table.find_elements_by_tag_name('tr')
return rows
def processHotlist(self):
self.rows = self.openHotlist()
self.start = time.time()
while True:
try:
while self.i < len(self.rows):
self.processRow(self.rows[self.i])
self.i += 1
end = time.time()
if end - self.start > 600:
raise Exception
break
except:
print(sys.exc_info()[1])
while True:
try:
temp_i = self.i
self.restart()
self.i = temp_i
self.rows = self.openHotlist()
print(len(self.rows))
time.sleep(5)
self.start = time.time()
break
except:
pass
def processRow(self, row):
columns = row.find_elements_by_tag_name('td')
if len(columns) < 3:
return
setname = columns[0].text
self.set = setname
cardname = columns[1].text
price = float(columns[3].text)
if setname < self.start_from:
return
if price < 0.05:
return
foil = cardname.endswith("*")
if foil:
cardname = cardname[:-7]
print(setname + " " + cardname + " " + str(price))
price_struct = Price("", price, 10000, "Hotlistbot3", "", 0)
card = Card(cardname, setname, price_struct, foil)
if is_basic_land(card) or ((card.set == "MS2" or card.set == "MS3") and card.foil):
return
p = self.mtgolibrary_parser.get_price(card)
if not p:
return
if price - p.sell_price > 0.025 and p.sell_price != 10000:
print("High diff: " + p.bot_name_sell + " " + str(price - p.sell_price))
cursor.execute("INSERT OR REPLACE INTO records VALUES(?,?,?,?,?,?,?,?,?,?)",
[setname + cardname, cardname, setname, price, p.sell_price, p.bot_name_sell, "HotListBot3",
datetime.now(), min(4, p.number), 1 if foil else 0])
while True:
try:
processeor = HotlistProcessor()
processeor.processHotlist()
except:
processeor.restart()
| {"/candle_factory_5.py": ["/mtgolibrary.py", "/card.py"], "/goatbots_cardhoarder.py": ["/card.py", "/mtgolibrary.py"], "/number_parser.py": ["/card.py", "/queue_with_capacity.py"], "/mtgolibrary.py": ["/card.py", "/number_parser.py"], "/mtgo_controller.py": ["/queue_with_capacity.py"]} |
50,255 | 1dayac/magic_bot | refs/heads/master | /queue_with_capacity.py | import queue
class QueueWithMaxCapacity(object):
def __init__(self, capacity = 100):
self.limit = capacity
self.queue = queue.Queue()
def add(self, item):
if self.queue.qsize() > self.limit:
self.queue.get()
self.queue.put(item)
| {"/candle_factory_5.py": ["/mtgolibrary.py", "/card.py"], "/goatbots_cardhoarder.py": ["/card.py", "/mtgolibrary.py"], "/number_parser.py": ["/card.py", "/queue_with_capacity.py"], "/mtgolibrary.py": ["/card.py", "/number_parser.py"], "/mtgo_controller.py": ["/queue_with_capacity.py"]} |
50,256 | 1dayac/magic_bot | refs/heads/master | /goatbots_cardhoarder.py | from selenium import webdriver
import sqlite3
from datetime import datetime
from card import Card, Price
import time
from mtgolibrary import MtgoLibraryParser
chromedriver_path = r"C:\Users\dmm2017\Desktop\magic_bot\chromedriver.exe"
import win32api, win32con, win32process
def setaffinity():
pid = win32api.GetCurrentProcessId()
mask = 3 # core 7
handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, pid)
win32process.SetProcessAffinityMask(handle, mask)
setaffinity()
class TopLevelProcessor(object):
def __init__(self):
self.processors = []
self.main_processor = None
self.database_path = "cards.db"
con = sqlite3.connect('cards.db', isolation_level=None)
self.cursor = con.cursor()
tb_exists = "SELECT name FROM sqlite_master WHERE type='table' AND name='records'"
if not con.execute(tb_exists).fetchone():
c = self.cursor.execute(
"CREATE TABLE records(Id TEXT PRIMARY KEY, CardName TEXT, SetName TEXT, BuyPrice REAL, SellPrice REAL, "
"BotNameSeller TEXT, BotNameBuyer TEXT, Time TEXT, Number INT, Foil INT)")
else:
print("Table exists")
def RestartAll(self):
for processor in self.processors:
processor.restart()
self.main_processor.restart()
def AddMainProcessor(self, processor):
self.main_processor = processor
def AddProcessor(self, processor):
self.processors.append(processor)
def IsHighDiff(self, card):
best_buy_price = card.BestBuyPrice()
best_sell_price = card.BestSellPrice()
print(best_buy_price)
print(best_sell_price)
return best_buy_price.buy_price - best_sell_price.sell_price >= 0.05
def AddToDatabase(self, card):
best_buy_price = card.BestBuyPrice()
best_sell_price = card.BestSellPrice()
print("High diff: " + best_sell_price.bot_name_sell + "," + str(best_sell_price.sell_price) + " " + best_buy_price.bot_name_buy + "," + str(best_buy_price.buy_price)+": " +
str(best_buy_price.buy_price - best_sell_price.sell_price))
self.cursor.execute("INSERT OR REPLACE INTO records VALUES(?,?,?,?,?,?,?,?,?,?)",
[card.set + card.name, card.name, card.set, best_buy_price.buy_price, best_sell_price.sell_price,
best_sell_price.bot_name_sell, best_buy_price.bot_name_buy,
datetime.now(), min(4, best_buy_price.number), 1 if card.foil else 0])
def ParseCards(self):
for card in self.main_processor.GetCards():
for processor in self.processors:
card.prices.append(processor.get_price(card))
if self.IsHighDiff(card):
self.AddToDatabase(card)
class GoatBotsParser(object):
def __init__(self):
self.main_url = "https://www.goatbots.com/prices"
chrome_options = webdriver.ChromeOptions()
prefs = {'profile.managed_default_content_settings.images': 2}
chrome_options.add_experimental_option("prefs", prefs)
chrome_options.add_argument("--headless")
self.driver = webdriver.Chrome(chromedriver_path, options= chrome_options)
self.second_driver = webdriver.Chrome(chromedriver_path, options= chrome_options)
self.current_set = None
self.card_count = 0
def restart(self):
self.second_driver.quit()
self.main_url = "https://www.goatbots.com/prices"
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--headless")
prefs = {'profile.managed_default_content_settings.images': 2}
chrome_options.add_experimental_option("prefs", prefs)
self.second_driver = webdriver.Chrome(chromedriver_path, options= chrome_options)
def ProcessRow(self, line):
href_cell = line.find_element_by_css_selector('a')
if "/search" in str(href_cell.get_attribute("href")):
return None
card_url = href_cell.get_attribute('href')
self.second_driver.get(card_url)
card_name = " ".join(self.second_driver.title.split(" ")[:-2])
if "Full Set" in card_name or "Booster" in card_name:
return None
print(card_name)
p = Price("", -5, 1000, "GoatBots3", "GoatBots3", 4)
c = Card(card_name, self.current_set, p, 0)
#return c
price_block = self.second_driver.find_element_by_id("info_" + self.current_set.lower())
all_h3 = price_block.find_elements_by_tag_name('h3')
prices = []
for h3 in all_h3:
if "price_value" in h3.get_attribute("class"):
prices.append(h3.text)
try:
buy_price = float(prices[0])
except:
buy_price = -1.0
try:
sell_price = float(prices[1])
except:
sell_price = 100000
self.second_driver.find_element_by_class_name("cart_icon").click()
self.second_driver.find_element_by_id("header_cart_icon").click()
real_sell_price = float(self.second_driver.find_element_by_id("delivery_table").find_elements_by_tag_name("td")[3].find_element_by_tag_name("a").text)
self.second_driver.find_element_by_id("form_cart_empty_cart").click()
p = Price("", buy_price - (sell_price - real_sell_price), real_sell_price, "GoatBots3", "GoatBots3", 4)
c = Card(card_name, self.current_set, p, 0)
return c
def GetSet(self):
set_line = self.driver.find_element_by_xpath("//*[@id=\"text_left\"]/div[1]/h2/a").text
return set_line[set_line.find("(") +1 : set_line.find(")")]
def GetCards(self):
self.driver.get(self.main_url)
sets = self.driver.find_elements_by_class_name('cardset')
refs = []
min_iter = 40
max_iter = 50
current_iter = 0
for set in sets:
current_iter += 1
if current_iter < min_iter:
continue
if current_iter == max_iter:
break
try:
print(set.find_element_by_tag_name('a').get_attribute('href'))
refs.append(set.find_element_by_tag_name('a').get_attribute('href'))
except:
pass
max_ref = 20
current_ref = 0
for ref in refs:
current_ref += 1
print("Processing " + ref)
max_cards = 20
current = 0
self.driver.get(ref)
print(ref)
self.current_set = self.GetSet()
for line in self.driver.find_element_by_id('pricesTable').find_elements_by_tag_name('tr'):
current += 1
if current == max_cards:
break
if "empty_row" in line.get_attribute('class'):
continue
if "card" in line.get_attribute('class'):
try:
card = self.ProcessRow(line)
except:
continue
if card is None:
continue
if card.MinSellPrice() < 0.3:
break
else:
self.card_count += 1
yield card
if self.card_count % 100 == 0:
self.restart()
class CardHoarderParser(object):
def __init__(self):
self.main_url = "https://www.cardhoarder.com/cards"
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--headless")
prefs = {'profile.managed_default_content_settings.images': 2}
chrome_options.add_experimental_option("prefs", prefs)
self.driver = webdriver.Chrome(chromedriver_path, options= chrome_options)
self.driver.get(self.main_url)
def restart(self):
self.driver.quit()
self.main_url = "https://www.cardhoarder.com/cards"
chrome_options = webdriver.ChromeOptions()
prefs = {'profile.managed_default_content_settings.images': 2}
chrome_options.add_experimental_option("prefs", prefs)
chrome_options.add_argument("--headless")
self.driver = webdriver.Chrome(chromedriver_path, options= chrome_options)
self.driver.get(self.main_url)
def get_price(self, card):
input_element = self.driver.find_element_by_id("card-search-input")
input_element.clear()
input_element.send_keys(card.name)
self.driver.find_element_by_class_name("btn-search").click()
table = self.driver.find_element_by_id("search-results-table")
trs = table.find_elements_by_tag_name("tr")
final_href = ""
for tr in trs:
try:
reference = tr.find_element_by_tag_name("a").get_attribute('href')
set = reference.split("/")[-1].split("-")[0].upper()
if set != card.set or reference.endswith("-foil") or reference.endswith("#"):
continue
final_href = reference
break
except:
pass
self.driver.get(final_href)
panel_body = self.driver.find_element_by_class_name('panel-body')
try:
sell_price = float(panel_body.find_element_by_class_name('card-ordering-details').text.strip().split(" ")[0])
except:
sell_price = 100000.0
try:
buy_price = float(panel_body.find_element_by_tag_name('h4').text.split(" ")[-2])
except:
buy_price = -1.0
return Price("", buy_price, sell_price, "CardBuyingBot3", "CardBot3", 4)
import sys, traceback
top_level_processor = TopLevelProcessor()
top_level_processor.AddMainProcessor(GoatBotsParser())
top_level_processor.AddProcessor(CardHoarderParser())
top_level_processor.AddProcessor(MtgoLibraryParser())
while True:
try:
top_level_processor.ParseCards()
top_level_processor.RestartAll()
break
except:
top_level_processor.RestartAll()
print("Unexpected error:", sys.exc_info()[0])
print("Unexpected error:", sys.exc_info()[1])
traceback.print_exc(file=sys.stdout)
pass
time.sleep(600) | {"/candle_factory_5.py": ["/mtgolibrary.py", "/card.py"], "/goatbots_cardhoarder.py": ["/card.py", "/mtgolibrary.py"], "/number_parser.py": ["/card.py", "/queue_with_capacity.py"], "/mtgolibrary.py": ["/card.py", "/number_parser.py"], "/mtgo_controller.py": ["/queue_with_capacity.py"]} |
50,257 | 1dayac/magic_bot | refs/heads/master | /number_parser.py | import pickle
import sys
import queue
from card import Card, Price
from selenium import webdriver
import time
from queue_with_capacity import QueueWithMaxCapacity
import platform
if platform.system() == "Windows":
chromedriver_path = r"C:\Users\IEUser\Desktop\magic_bot\chromedriver.exe"
else:
chromedriver_path = "/home/dmm2017/PycharmProjects/candle_factory/chromedriver"
class DigitsClassifier(object):
def ParseMtgolibraryInternal(self, driver, card, url, botname, sellprice_original):
bot_name_original = botname
setname = card.set
elem = driver.find_elements_by_class_name("sell_row")
if len(elem) == 0:
return False
for e in elem:
try:
table_setname = e.find_elements_by_class_name("setname")[0].text
if table_setname != setname:
continue
bot_name_sell = e.find_elements_by_class_name("bot_name")[0].text
if bot_name_sell != bot_name_original:
continue
except:
continue
sell_price_original = str(sellprice_original)
images = e.find_element_by_class_name("sell_price_round")
images_srcs = [image.get_attribute("src") for image in images.find_elements_by_tag_name('img')]
if len(images_srcs) != len(sell_price_original):
return
for i in range(len(images_srcs)):
if images_srcs[i] not in self.super_dict.keys():
self.super_dict[images_srcs[i]] = {}
if sell_price_original[i] not in self.super_dict[images_srcs[i]].keys():
self.super_dict[images_srcs[i]][sell_price_original[i]] = 0
self.super_dict[images_srcs[i]][sell_price_original[i]] += 1
def ParseMtgolibrary(self, driver, card, botname, sellprice):
setname, url, driver = self.MtgoLibraryGoToCard(driver, card)
time.sleep(7)
return self.ParseMtgolibraryInternal(driver, card, url, botname, sellprice)
def MtgoLibraryGoToCard(self, driver, card):
setname = card.set.upper()
input_element = driver.find_element_by_id("_cardskeyword")
input_element.clear()
input_element.send_keys(card.name + " " + setname)
driver.find_elements_by_css_selector("button")[1].click()
url = driver.current_url
return setname, url, driver
def __init__(self):
try:
f = open("obj/dict.pkl", 'rb')
self.prices_d = pickle.loads(f.read())
except:
self.prices_d = {}
self.old_prices = QueueWithMaxCapacity(100)
self.super_dict = {}
self.index = 0
def smart_way(self, img_src):
if self.old_prices.queue.qsize() < 50:
return False
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--headless")
driver_library = webdriver.Chrome(chromedriver_path, options=chrome_options)
driver_library.get("https://www.mtgowikiprice.com/")
index = 1
print("Run smart algorithm for digit recognition")
index = 0
for (card, botname, sell_price) in list(self.old_prices.queue.queue):
self.ParseMtgolibrary(driver_library, card, botname, sell_price)
index += 1
print("Parsing page number " + str(index))
temp_solution = {}
for img_src in self.super_dict.keys():
candidate_key = max(self.super_dict[img_src], key=self.super_dict[img_src].get)
temp_solution[img_src] = candidate_key
if len(temp_solution.keys()) == 11 and len(temp_solution.values()) == len(set(temp_solution.values())):
for key in temp_solution.keys():
self.prices_d[key] = temp_solution[key]
self.super_dict.clear()
pickle.dump(self.prices_d, open("obj/dict.pkl", "wb"))
return True
self.super_dict.clear()
return False
def get_symbol(self, img_src):
if img_src in self.prices_d.keys():
return self.prices_d[img_src]
elif self.smart_way(img_src):
return self.prices_d[img_src]
else:
print(img_src)
symbol = sys.stdin.readline()
self.prices_d[img_src] = symbol
pickle.dump(self.prices_d, open("obj/dict.pkl", "wb"))
return symbol
def get_price(self, e, botname, card):
find_sell_price = False
if len(e.find_elements_by_class_name("sell_price_round")):
find_sell_price = True
images = None
if find_sell_price:
images = e.find_element_by_class_name("sell_price_round")
else:
images = e.find_element_by_class_name("buy_price_round")
index = 0
res = []
for image in images.find_elements_by_tag_name('img'):
index += 1
img_src = image.get_attribute("src")
symbol = self.get_symbol(img_src)
res.append(symbol)
# print("".join(res).replace('\n', ''))
if find_sell_price:
try:
sell_price = float("".join(res).replace('\n', ''))
except:
sell_price = 100000
self.old_prices.add((card, botname, sell_price))
return sell_price
else:
try:
buy_price = float("".join(res).replace('\n', ''))
except:
buy_price = -1
return buy_price | {"/candle_factory_5.py": ["/mtgolibrary.py", "/card.py"], "/goatbots_cardhoarder.py": ["/card.py", "/mtgolibrary.py"], "/number_parser.py": ["/card.py", "/queue_with_capacity.py"], "/mtgolibrary.py": ["/card.py", "/number_parser.py"], "/mtgo_controller.py": ["/queue_with_capacity.py"]} |
50,258 | 1dayac/magic_bot | refs/heads/master | /mtgolibrary.py | from selenium import webdriver
import sqlite3
from datetime import datetime
from card import Card, Price
import time
import platform
import re
from number_parser import DigitsClassifier
from trusted_bots import *
if platform.system() == "Windows":
chromedriver_path = r"C:\Users\IEUser\Desktop\magic_bot\chromedriver.exe"
else:
chromedriver_path = "/home/dmm2017/PycharmProjects/candle_factory/chromedriver"
class MtgoLibraryParser(object):
digit_clasiffier = DigitsClassifier()
def __init__(self):
self.main_url = "https://www.mtgowikiprice.com/"
chrome_options = webdriver.ChromeOptions()
prefs = {'profile.managed_default_content_settings.images': 1}
chrome_options.add_experimental_option("prefs", prefs)
#chrome_options.add_argument("start-maximized")
#chrome_options.add_argument("--headless")
self.driver = webdriver.Chrome(chromedriver_path, options= chrome_options)
self.driver.get(self.main_url)
self.card_count = 0
def restart(self):
self.driver.quit()
self.main_url = "https://www.mtgowikiprice.com/"
chrome_options = webdriver.ChromeOptions()
#chrome_options.add_argument("--headless")
prefs = {'profile.managed_default_content_settings.images': 2}
chrome_options.add_experimental_option("prefs", prefs)
self.driver = webdriver.Chrome(chromedriver_path, options= chrome_options)
self.driver.get(self.main_url)
def ParseMtgolibraryFoil(self, card, parse_buyers = False):
setname = card.set.upper()
if setname.startswith("BOO") or setname.startswith("PRM"):
return False
setname, url = self.MtgoLibraryGoToCard(card)
try:
link = self.driver.find_element_by_link_text('View Foil')
time.sleep(0.5)
except:
return False
link.click()
time.sleep(4)
return self.ParseMtgolibraryInternal(card, url, parse_buyers)
def get_price_from_image(self, e, botname, card):
return MtgoLibraryParser.digit_clasiffier.get_price(e, botname, card)
def MtgoLibraryGoToCard(self, card):
setname = card.set.upper()
input_element = self.driver.find_element_by_id("_cardskeyword")
input_element.clear()
input_element.send_keys(card.name + " " + setname)
self.driver.find_elements_by_css_selector("button")[1].click()
url = self.driver.current_url
return setname, url
def ParseMtgolibraryInternal(self, card, url, parse_buyers):
setname = card.set
elem = self.driver.find_elements_by_class_name("sell_row")
buy_price = -1
sell_price = 10000
if len(elem) == 0:
return None
first = True
number = 0
bot_name_sell = ""
for e in elem:
try:
table_setname = e.find_elements_by_class_name("setname")[0].text
if table_setname != setname:
continue
number = int(e.find_elements_by_class_name("sell_quantity")[0].text)
bot_name_sell = e.find_elements_by_class_name("bot_name")[0].text
if bot_name_sell == "":
continue
if bot_name_sell not in trusted_sell_bots:
print("Best price - " + bot_name_sell + " " + str(self.get_price_from_image(e, bot_name_sell, card)))
continue
except:
continue
sell_price = self.get_price_from_image(e, bot_name_sell, card)
break
if not parse_buyers:
return Price(url, 0, sell_price, "", bot_name_sell, number)
elem2 = self.driver.find_elements_by_class_name("buy_row")
bot_name_buy = ""
number = 0
for e in elem2:
try:
bot_name_buy = e.find_elements_by_class_name("bot_name")[0].get_attribute('textContent').strip()
number = int(e.find_elements_by_class_name("buy_quantity")[0].get_attribute('textContent').strip().replace("+", ""))
if bot_name_buy == "":
continue
table_setname = e.find_elements_by_class_name("setname")[0].get_attribute('textContent').strip()
if table_setname != setname:
continue
tickets = float(re.split("[+\-]", e.find_elements_by_class_name("tickets")[0].get_attribute('textContent').strip())[0])
except:
continue
buy_price = self.get_price_from_image(e, bot_name_buy, card)
if tickets > buy_price:
break
return Price(url, buy_price, sell_price, bot_name_buy, bot_name_sell, number)
def ParseMtgolibrary(self, card, parse_buyers = False):
setname = card.set.upper()
if setname.startswith("BOO") or setname.startswith("PRM"):
return None
setname, url = self.MtgoLibraryGoToCard(card)
time.sleep(7)
return self.ParseMtgolibraryInternal(card, url, parse_buyers)
def get_price(self, card):
self.card_count += 1
if self.card_count % 100 == 0:
self.restart()
if card.foil:
return self.ParseMtgolibraryFoil(card)
else:
return self.ParseMtgolibrary(card) | {"/candle_factory_5.py": ["/mtgolibrary.py", "/card.py"], "/goatbots_cardhoarder.py": ["/card.py", "/mtgolibrary.py"], "/number_parser.py": ["/card.py", "/queue_with_capacity.py"], "/mtgolibrary.py": ["/card.py", "/number_parser.py"], "/mtgo_controller.py": ["/queue_with_capacity.py"]} |
50,259 | 1dayac/magic_bot | refs/heads/master | /mtgo_controller.py | import sqlite3
from transitions import Machine, State
from pywinauto.application import Application
import pyautogui
import sys
import os
from enum import Enum
import math
import traceback
from trusted_bots import *
pyautogui.FAILSAFE = False
class TradeStatus(Enum):
SUCCESS = 1
BOT_OFFLINE = 2
TRADE_REJECTED = 3
BIG_FAILURE = 4
NONE = 5
import win32api, win32con, win32process
def setaffinity():
pid = win32api.GetCurrentProcessId()
mask = 5 # core 7
handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, pid)
win32process.SetProcessAffinityMask(handle, mask)
#setaffinity()
class NoConfirmTradeException(Exception):
pass
set_abbr = {"ZNR" : "Zendikar Rising", "2XM" : "Double Masters", "AER" : "Aether Revolt", "C19" : "Commander (2019 Edition)", "C20" : "Ikoria Commander","AKH" : "Amonkhet", "EXP" : "Zendikar Expeditions", "PZ2" : "Treasure Chest", "MRD" : "Mirrodin", "KLD" : "Kaladesh", "EMN" : "Eldritch Moon", "ISD" : "Innistrad",
"CMR" : "Commander Legends", "OGW" : "Oath of the Gatewatch", "DKA" : "Dark Ascension", "CMD" : "Commander (2011 Edition)", "ZEN" : "Zendikar", "XLN" : "Ixalan", "RIX" : "Rivals of Ixalan", "AVR" : "Avacyn Restored",
"GTC" : "Gatecrash", "GRN" : "Guilds of Ravnica", "M21" : "Core 2021","BBD" : "Battlebond", "EX" : "Exodus", "MOR" : "Morningtide", "HOU" : "Hour of Devastation", "SOI" : "Shadows over Innistrad", "A25" : "Masters 25",
"BFZ" : "Battle for Zendikar", "IKO" : "Ikoria: Lair of Behemoths", "THB" : "Theros Beyond Death", "JOU" : "Journey into Nyx", "IMA" : "Iconic Masters", "ORI" : "Magic Origins", "TPR" : "Tempest Remastered", "WL" : "Weatherlight","DTK" : "Dragons of Tarkir", "FRF" : "Fate Reforged",
"M15" : "Magic 2015", "M20" : "Core Set 2020", "M14" : "Magic 2014", "M13" : "Magic 2013", "M12" : "Magic 2012", "M11" : "Magic 2011", "WAR" : "War of the Spark",
"MMA" : "Modern Masters (2013 Edition)", "MM2" : "Modern Masters (2015 Edition)", "MM3" : "Modern Masters (2017 Edition)", "RTR" : "Return to Ravnica", "WWK" : "Worldwake", "ARB" : "Alara Reborn", "EVE" : "Eventide",
"SHM" : "Shadowmoor", "10E" : "Tenth Edition", "9ED" : "Ninth Edition", "8ED" : "Eighth Edition", "7E" : "Seventh Edition", "LRW" : "Lorwyn",
"ELD" : "Throne of Eldraine","PLC" : "Planar Chaos", "VMA" : "Vintage Masters", "TSP" : "Time Spiral", "CSP" : "Coldsnap", "DIS" : "Dissension", "AP" : "Apocalypse", "UMA" : "Ultimate Masters" ,
"GPT" : "Guild Pact", "VI" : "Visions", "DAR": "Dominaria", "SOK" : "Saviors of Kamigawa", "BOK" : "Betrayers of Kamigawa", "CHK" : "Champions of Kamigawa",
"ST" : "Stronghold", "SLD" : "Secret Lair", "THB" : "Theros Beyond Death" ,"TE" : "Tempest", "MI" : "Mirage", "ONS" : "Onslaught", "JUD" : "Judgment", "OD" : "Odyssey", "PS" : "Planeshift",
"NE" : "Nemesis", "DGM" : "Dragon's Maze", "MM" : "Mercadian Masques", "THS" : "Theros", "RNA" : "Ravnica Allegiance", "ROE" : "Rise of the Eldrazi", "UZ" : "Urza's Saga", "UL" : "Urza's Legacy",
"M10" : "Magic 2010", "SCG" : "Scourge","UD" : "Urza's Destiny", "LGN" : "Legions", "CON" : "Conflux", "M19" : "Core Set 2019", "C14" : "Commander 2014",
"ARB" : "Alara Reborn", "ALA" : "Shards of Alara", "DST" : "Darksteel", "FUT" : "Future Sight", "EMA" : "Eternal Masters", "MS2" : "Kaladesh Inventions",
"MS3" : "Amonkhet Invocations", "RAV" : "Ravnica: City of Guilds", "5DN" : "Fifth Dawn", "MBS" : "Mirrodin Besieged", "SOM" : "Scars of Mirrodin", "NPH" : "New Phyrexia",
"ME4" : "Masters Edition IV", "ME2" : "Masters Edition II", "PR": "Prophecy", "ME3" : "Masters Edition III", "MED" : "Masters Edition I", "IN" : "Invasion", "BNG" : "Born of the Gods", "KTK" : "Khans of Tarkir", "TOR" : "Torment", "TSB" : "Time Spiral Timeshifted", "MH1" : "Modern Horizons"}
class Card:
def __init__(self):
self.name = ""
self.set = ""
self.prices = []
def __init__(self, name, set, prices):
self.name = name
self.set = set
s
self.prices = [prices]
def AddPrice(self, price):
self.prices.append(price)
def __hash__(self):
return hash(self.name + self.set)
def MaxBuyPrice(self):
prices = [price.buy_price for price in self.prices if price.buy_price > 0]
try:
return max(prices)
except:
return 0.0
def MinSellPrice(self):
prices = [price.sell_price for price in self.prices if price.sell_price > 0]
try:
return min(prices)
except:
return 100000.0
def __str__(self):
str1 = ""
for price in self.prices:
str1 += str(price)
return self.name + "\t" + self.set + "\t" + str1 + "\t" + str(self.MaxBuyPrice() - self.MinSellPrice())
con = sqlite3.connect(sys.argv[1], isolation_level=None)
cursor = con.cursor()
def go_to_rectangle(rect, sleep = 0):
pyautogui.moveTo((rect.left + rect.right)/2, (rect.top + rect.bottom)/2)
time.sleep(sleep)
def double_click_multiple(window, times):
rect = window.rectangle()
for i in range(times):
double_click_rectangle(rect)
def double_click_rectangle(rect, sleep = 0):
pyautogui.click((rect.left + rect.right)/2, (rect.top + rect.bottom)/2, clicks=2, interval=0.1)
time.sleep(sleep)
def click_rectangle(rect, sleep = 0):
pyautogui.click((rect.left + rect.right)/2, (rect.top + rect.bottom)/2)
time.sleep(sleep)
def right_click_rectangle(rect, sleep = 0):
pyautogui.rightClick((rect.left + rect.right)/2, (rect.top + rect.bottom)/2)
time.sleep(sleep)
def click_collection(app):
click_rectangle(app['Magic: The Gathering Online'].window(auto_id="CollectionButton").rectangle(), 5)
def click_trade(app):
click_rectangle(app['Magic: The Gathering Online'].window(auto_id="TradeButton", found_index = 0).rectangle(), 5)
def click_ok_button(app):
click_rectangle(app.top_window().window(auto_id="OkButton").rectangle(), 5)
def close_chat(app):
index = 0
while True:
index += 1
try:
if index == 100:
break
print("Try to close chat")
click_rectangle(app['Magic: The Gathering Online'].window(auto_id="CloseButtom", found_index=0).rectangle())
time.sleep(1)
break
except:
pass
def get_tix_number(app, botname):
import io
import sys
stringio = io.StringIO()
previous_stdout = sys.stdout
sys.stdout = stringio
app.top_window().window(auto_id="ChatItemsControl").print_control_identifiers()
sys.stdout = previous_stdout
string = stringio.getvalue()
num_of_tix = 0
if botname.startswith("Hot") or botname.startswith("CardBuy"):
pos = string.rfind("Take")
pos1 = string.find(" ", pos + 1) + 1
pos2 = string.find(" ", pos1)
num_of_tix = int(string[pos1: pos2])
print("Taking " + str(num_of_tix) + " tix")
else:
pos = string.rfind("take")
pos1 = string.find(" ", pos + 1) + 1
pos2 = string.find(" ", pos1)
num_of_tix = math.floor(float(string[pos1: pos2]))
print("Taking " + str(num_of_tix) + " tix")
return num_of_tix
states_my = [State(name = 'initial'), State(name = 'login', on_enter = ['login']), State(name = 'buy', on_enter = ['buy_card']), State(name = 'update_binder_after_buying', on_enter = ['update_binder_after_buy']),
State(name = 'sell', on_enter = ['sell_card']), State(name = 'update_binder_after_selling', on_enter = ['update_binder_after_sell']), State(name = 'close', on_enter = ['close_mtgo'])]
transitions = [
{ 'trigger': 'go_to_login', 'source': 'initial', 'dest': 'login' },
{'trigger': 'go_to_buy', 'source': 'buy', 'dest': 'buy'},
{'trigger': 'go_to_sell', 'source': 'sell', 'dest': 'sell'},
{ 'trigger': 'go_to_buy', 'source': 'login', 'dest': 'buy' },
{'trigger': 'go_to_buy', 'source': 'update_binder_after_selling', 'dest': 'buy'},
{ 'trigger': 'go_to_update', 'source': 'buy', 'dest': 'update_binder_after_buying'} ,
{ 'trigger': 'go_to_sell', 'source': 'update_binder_after_buying', 'dest': 'sell' },
{'trigger': 'go_to_update', 'source': 'sell', 'dest': 'update_binder_after_selling'},
{'trigger': 'go_to_buy', 'source': 'update_binder_after_selling', 'dest': 'buy'},
{'trigger': 'go_to_restart', 'source': 'update_binder_after_selling', 'dest': 'close'},
{'trigger': 'go_to_restart', 'source': 'sell', 'dest': 'close'},
{'trigger': 'go_to_restart', 'source': 'buy', 'dest': 'close'},
{'trigger': 'go_to_restart', 'source': 'update_binder_after_buying', 'dest': 'close'},
{'trigger': 'go_to_login', 'source': 'close', 'dest': 'login'}
]
import subprocess, time
start = time.time()
from queue_with_capacity import QueueWithMaxCapacity
class MTGO_bot(object):
def __init__(self):
self.last_trades = QueueWithMaxCapacity(10)
try:
self.app = Application(backend="uia").connect(path='MTGO.exe')
self.db_record = ""
self.trade_status = TradeStatus.NONE
except:
subprocess.Popen(['cmd.exe', '/c', r'C:\Users\IEUser\Desktop\mtgo.appref-ms'])
time.sleep(5)
self.app = Application(backend="uia").connect(path='MTGO.exe')
self.db_record = ""
self.trade_status = TradeStatus.NONE
def close_mtgo(self):
os.system("taskkill /f /im MTGO.exe")
def login(self):
print("Starting...")
self.last_trades = QueueWithMaxCapacity(10)
try:
click_rectangle(self.app.top_window().child_window(auto_id = "CloseButton").rectangle())
except:
pass
try:
self.app['Magic: The Gathering Online'].window(auto_id="UsernameTextBox").type_keys("VerzillaBot")
self.app['Magic: The Gathering Online'].window(auto_id="PasswordBox").type_keys("Lastborn220")
time.sleep(2.5)
self.app['Magic: The Gathering Online'].window(auto_id="PasswordBox").type_keys("{ENTER}")
pyautogui.press('enter')
time.sleep(20)
try:
click_rectangle(self.app.top_window().child_window(auto_id="CloseButton").rectangle())
except:
pass
click_collection(self.app)
time.sleep(10)
click_trade(self.app)
time.sleep(10)
except:
pass
try:
click_rectangle(self.app.top_window().child_window(auto_id = "CloseButton").rectangle())
except:
pass
try:
click_collection(self.app)
click_rectangle(self.app['Magic: The Gathering Online'].window(title="ABinder", found_index=0).rectangle())
click_collection(self.app)
except:
pass
while True:
try:
rect = self.app['Magic: The Gathering Online'].child_window(auto_id="DeckPane").child_window(title_re="Item: CardSlot:",
found_index=0).rectangle()
right_click_rectangle(rect)
click_rectangle(self.app['Magic: The Gathering Online'].child_window(title_re="Remove All", found_index=0).rectangle())
except:
break
try:
click_rectangle(self.app['Magic: The Gathering Online'].window(title="Other Products", found_index=1).rectangle())
self.app['Magic: The Gathering Online'].window(auto_id="searchTextBox").type_keys("event{SPACE}tickets{ENTER}")
right_click_rectangle(
self.app['Magic: The Gathering Online'].child_window(title_re="Item: CardSlot: Event", found_index=0).rectangle())
except:
self.close_mtgo()
self.trade_status = TradeStatus.BIG_FAILURE
try:
click_rectangle(self.app['Magic: The Gathering Online'].child_window(title_re="Add All to", found_index=0).rectangle())
except:
try:
click_rectangle(self.app['Magic: The Gathering Online'].child_window(title_re="Add 1 to", found_index=0).rectangle())
except:
pyautogui.moveRel(-10, 0)
pyautogui.click()
pass
def switch_bot(self):
if self.db_record[6] == "HotListBot3":
self.db_record[6] = "HotListBot4"
elif self.db_record[6] == "HotListBot4":
self.db_record[6] = "HotListBot"
elif self.db_record[6] == "HotListBot":
self.db_record[6] = "HotListBot2"
elif self.db_record[6] == "HotListBot2":
self.db_record[6] = "HotListBot3"
if self.db_record[6] == "GoatBots1":
self.db_record[6] = "GoatBots2"
elif self.db_record[6] == "GoatBots2":
self.db_record[6] = "GoatBots3"
elif self.db_record[6] == "GoatBots3":
self.db_record[6] = "GoatBots1"
self.app['Magic: The Gathering Online'].window(auto_id="searchTextBox").type_keys(self.db_record[6] + "{ENTER}")
def all_bad_trades(self):
if self.last_trades.queue.qsize() < 10:
return False
for item in self.last_trades.queue:
if item == TradeStatus.SUCCESS:
return False
return True
def click_bot_trade(self, botname, binder):
index = 0
while True:
try:
index += 1
if index == 5:
return False
go_to_rectangle(self.app['Magic: The Gathering Online'].window(title=botname, found_index=1).rectangle())
click_rectangle(self.app['Magic: The Gathering Online'].window(title="Trade", found_index=1).rectangle())
time.sleep(1)
click_rectangle(self.app.top_window().window(auto_id=binder, found_index=0).rectangle())
click_ok_button(self.app)
return True
except:
pass
def is_trade_cancelled(self):
try:
self.app.top_window().window(title="Trade Canceled", found_index=1).rectangle()
click_rectangle(self.app.top_window().window(auto_id="OkButton", found_index=0).rectangle())
return True
except:
return False
def is_trade_stalled(self):
try:
click_rectangle(self.app.top_window().window(title="Trade Request", found_index=0).window(title="Cancel", found_index = 0).rectangle())
return True
except:
return False
def get_tix_number_buy(self):
try:
import io
import sys
stringio = io.StringIO()
previous_stdout = sys.stdout
sys.stdout = stringio
self.app.top_window().window(auto_id="ChatItemsControl").print_control_identifiers()
sys.stdout = previous_stdout
string = stringio.getvalue()
if self.db_record[5].startswith("Goat"):
pos = string.rfind(self.db_record[1])
pos1 = string.find("(", pos)
pos2 = string.find(")", pos)
price = float(string[pos1 + 1: pos2])
print(price)
return price
else:
if string.rfind(self.db_record[1]) == -1:
stringio = io.StringIO()
previous_stdout = sys.stdout
sys.stdout = stringio
self.app.top_window().window(auto_id="ChatItemsControl").print_control_identifiers()
sys.stdout = previous_stdout
string = stringio.getvalue()
pos = string.rfind("YOU RECEIVE ")
pos1 = string.find("(", pos)
pos2 = string.find(")", pos)
if " " in string[pos1:pos2]:
pos1 = string.find(" ", pos1)
# print(string[pos1 + 1: pos2])
price = float(string[pos1 + 1: pos2])
print(price)
return price
except:
return None
def check_inventory(self):
click_collection(self.app)
print(".")
click_rectangle(self.app.top_window().window(title = "Cards", found_index=1).rectangle())
click_rectangle(self.app.top_window().window(auto_id="FilterCards-ResetFilterText").rectangle())
self.app.top_window().window(auto_id="searchTextBox").type_keys(
self.db_record[1].replace(" ", "{SPACE}") + "{ENTER}")
print("..")
try:
click_rectangle(self.app.top_window().window(auto_id="FilterCards-Option" + set_abbr[self.db_record[2]]).rectangle())
print("..")
except:
click_rectangle(self.app.top_window().window(auto_id="FilterCards-HeaderSet-Text").rectangle())
print(".....")
time.sleep(0.5)
try:
click_rectangle(self.app.top_window().window(auto_id="FilterCards-Option" + set_abbr[self.db_record[2]]).rectangle())
except:
pass
try:
time.sleep(0.5)
click_rectangle(self.app.top_window().child_window(auto_id="CollectionLayoutView").child_window(title_re="Item: Card", found_index = 0))
print("....")
return True
except:
print(".....")
return False
pass
def buy_card(self):
try:
click_rectangle(self.app['ToastView'].child_window(auto_id = "CloseButton").rectangle())
except:
pass
try:
self.trade_status = TradeStatus.NONE
print("Go to buy card...")
cursor = con.cursor()
command = "SELECT * FROM records ORDER BY RANDOM() LIMIT 1;"
records = cursor.execute(command).fetchall()
while len(records) == 0:
time.sleep(10)
records = cursor.execute(command).fetchall()
self.db_record = list(records[0])
while self.db_record[5] not in trusted_sell_bots or (self.db_record[6] not in trusted_buy_bots and self.db_record[6] not in mtgolibrary_buy_bots):
command = "DELETE FROM records WHERE Id = ?;"
cursor.execute(command, [self.db_record[0]]).fetchall()
command = "SELECT * FROM records ORDER BY RANDOM() LIMIT 1;"
records = cursor.execute(command).fetchall()
while len(records) == 0:
time.sleep(10)
records = cursor.execute(command).fetchall()
self.db_record = list(records[0])
appendix = "(foil)" if (int(self.db_record[9]) == 1) else "(regular)"
print("Buying " + str(self.db_record[8]) + "x"+ self.db_record[1] + "(" + self.db_record[2] + ") from " + self.db_record[5] + " " + appendix)
if self.db_record[5] == "Applegrove":
self.db_record[5] = "AppleGrove"
if self.db_record[5] == "VRTStorebot3":
self.db_record[5] = "VRTStoreBot3"
if self.db_record[5] == "VRTStorebot2":
self.db_record[5] = "VRTStoreBot2"
if self.db_record[5] == "VRTStorebot":
self.db_record[5] = "VRTStoreBot"
if self.db_record[5] == "VRTSToreBot":
self.db_record[5] = "VRTStoreBot"
if self.db_record[5] == "VRTSToreBot2":
self.db_record[5] = "VRTStoreBot2"
if self.db_record[5] == "VRTSToreBot3":
self.db_record[5] = "VRTStoreBot3"
if self.db_record[5] == "Manatraders_booster1":
self.db_record[5] = "ManaTraders_Booster1"
if self.db_record[5] == "Manatraders_seller1":
self.db_record[5] = "ManaTraders_Seller1"
if self.db_record[5] == "Manatraders_seller2":
self.db_record[5] = "ManaTraders_Seller2"
if self.db_record[5] == "Manatraders_seller3":
self.db_record[5] = "ManaTraders_Seller3"
if self.db_record[5] == "Manatraders_seller4":
self.db_record[5] = "ManaTraders_Seller4"
if self.db_record[5] == "Manatraders_seller5":
self.db_record[5] = "ManaTraders_Seller5"
if self.db_record[5] == "Vintage-Cardbot2":
self.db_record[5] = "Vintage-cardbot2"
#if self.check_inventory():
# command = "DELETE FROM records WHERE Id = ?;"
# cursor.execute(command, [self.db_record[0]]).fetchall()
# return
time.sleep(1)
try:
click_trade(self.app)
self.app['Magic: The Gathering Online'].window(auto_id="searchTextBox").type_keys(self.db_record[5] + "{ENTER}")
except:
return
if not self.click_bot_trade(self.db_record[5], "ABinder"):
print("Bot is offline")
self.is_trade_cancelled()
self.last_trades.add(TradeStatus.BOT_OFFLINE)
return
time.sleep(5)
number_of_cancelled_trades = 0
while self.is_trade_cancelled():
number_of_cancelled_trades += 1
if number_of_cancelled_trades == 5:
self.last_trades.add(TradeStatus.BOT_OFFLINE)
self.trade_status = TradeStatus.BOT_OFFLINE
return
self.click_bot_trade(self.db_record[5], "ABinder")
time.sleep(3)
if self.is_trade_stalled():
return
print(2)
try:
time.sleep(2)
click_rectangle(self.app.top_window().window(auto_id="FilterCards-ResetFilterText").rectangle())
time.sleep(0.1)
if int(self.db_record[9]) == 1:
self.db_record[8] = str(min(int(self.db_record[8]), 2))
click_rectangle(self.app.top_window().window(title="Versions", found_index = 0).rectangle())
click_rectangle(self.app.top_window().window(title="Show Foils", found_index = 0).rectangle())
time.sleep(0.1)
self.app.top_window().window(auto_id="searchTextBox").type_keys(self.db_record[1].replace(" ", "{SPACE}") + "{ENTER}")
except:
print("Unexpected error:", sys.exc_info()[0])
traceback.print_exc(file=sys.stdout)
return
print(3)
try:
click_rectangle(self.app.top_window().window(auto_id="FilterCards-HeaderSet-Text").rectangle())
click_rectangle(self.app.top_window().window(auto_id="FilterCards-Option" + set_abbr[self.db_record[2]]).rectangle())
time.sleep(1.5)
print(4)
double_click_multiple(self.app.top_window().child_window(title_re="Item: CardSlot: " + self.db_record[1].split(",")[0], found_index = 0), int(self.db_record[8]))
except:
print("Unexpected error:", sys.exc_info()[0])
traceback.print_exc(file=sys.stdout)
command = "DELETE FROM records WHERE Id = ?;"
cursor.execute(command, [self.db_record[0]]).fetchall()
click_rectangle(self.app.top_window().window(title="Cancel Trade", found_index=1).rectangle())
close_chat(self.app)
return
print(5)
time.sleep(8)
price = self.get_tix_number_buy()
if price is not None and price > float(self.db_record[3]):
command = "DELETE FROM records WHERE Id = ?;"
cursor.execute(command, [self.db_record[0]]).fetchall()
click_rectangle(self.app.top_window().window(title="Cancel Trade", found_index=1).rectangle())
close_chat(self.app)
return
time.sleep(2)
click_rectangle(self.app.top_window().window(title="Submit", found_index=1).rectangle())
time.sleep(5)
try:
click_rectangle(self.app.top_window().window(title="Submit", found_index=1).rectangle())
except:
pass
print(6)
time.sleep(1)
index = 0
while True:
try:
index += 1
click_rectangle(self.app.top_window().window(title="Confirm Trade", found_index=1).rectangle())
break
except:
time.sleep(1)
if index >= 10:
self.trade_status = TradeStatus.BIG_FAILURE
return
pass
print(4)
close_chat(self.app)
print(5)
index = 0
while True:
try:
click_rectangle(self.app['ToastView'].child_window(auto_id="CloseButton").rectangle())
except:
pass
try:
index += 1
time.sleep(2)
click_rectangle(self.app.top_window().window(title="Added to your Collection:", found_index = 0).window(auto_id="TitleBarCloseButton").rectangle())
break
except:
try:
click_rectangle(self.app.top_window().window(auto_id="OkButton", found_index=0).rectangle())
break
except:
if index >= 20:
self.trade_status = TradeStatus.BIG_FAILURE
return
pass
command = "DELETE FROM records WHERE Id = ?;"
cursor.execute(command, [self.db_record[0]]).fetchall()
self.trade_status = TradeStatus.SUCCESS
self.last_trades.add(TradeStatus.SUCCESS)
except:
print("Unexpected error:", sys.exc_info()[0])
traceback.print_exc(file=sys.stdout)
command = "DELETE FROM records WHERE Id = ?;"
cursor.execute(command, [self.db_record[0]]).fetchall()
self.trade_status = TradeStatus.BIG_FAILURE
def sell_card(self):
try:
click_rectangle(self.app.top_window().child_window(auto_id = "CloseButton").rectangle())
except:
pass
try:
self.trade_status = TradeStatus.NONE
print("Go to sell card...")
print("Selling " + self.db_record[0] + " to " + self.db_record[6])
try:
click_trade(self.app)
self.app.top_window().window(auto_id="searchTextBox").type_keys(self.db_record[6] + "{ENTER}")
except:
return
while not self.click_bot_trade(self.db_record[6], "Full Trade List") or self.is_trade_cancelled() or self.is_trade_stalled():
self.switch_bot()
time.sleep(6)
window_sell_name = "Trade: " + self.db_record[6]
if self.db_record[6] in mtgolibrary_buy_bots:
try:
self.app.top_window().window(auto_id="ChatSendEditBox").type_keys("sell{ENTER}")
except:
pass
self.app.top_window().window(auto_id="ChatSendEditBox").type_keys("{ENTER}")
try:
num_of_tix = get_tix_number(self.app, self.db_record[6])
except:
raise Exception
try:
if num_of_tix != 0:
click_rectangle(self.app[window_sell_name].window(title="Other Products", found_index=1).rectangle())
if self.db_record[6].startswith("Goat"):
self.app[window_sell_name].window(auto_id="searchTextBox").type_keys("event{SPACE}tickets{ENTER}")
double_click_multiple(self.app[window_sell_name].child_window(title_re="Item: CardSlot: Event", found_index=0), num_of_tix)
except:
pass
click_rectangle(self.app[window_sell_name].window(title="Submit", found_index=1).rectangle())
time.sleep(5)
try:
click_rectangle(self.app[window_sell_name].window(title="Submit", found_index=1).rectangle())
except:
pass
time.sleep(3)
index = 0
while True:
try:
index += 1
click_rectangle(self.app[window_sell_name].window(title="Confirm Trade", found_index=1).rectangle())
time.sleep(1)
break
except:
if index == 10:
raise NoConfirmTradeException()
pass
close_chat(self.app)
index = 0
while True:
try:
index += 1
print("Trying to close window with stuff")
click_rectangle(self.app.top_window().window(title="Added to your Collection:", found_index = 0).window(auto_id="TitleBarCloseButton").rectangle())
time.sleep(1)
break
except:
if index == 20:
self.trade_status = TradeStatus.BIG_FAILURE
return
try:
print("Trying to close window without stuff")
click_rectangle(self.app.top_window().window(auto_id="OkButton", found_index=0).rectangle())
break
except:
pass
self.trade_status = TradeStatus.SUCCESS
except NoConfirmTradeException:
try:
click_rectangle(self.app.top_window().window(title="Cancel Trade", found_index=0).rectangle())
close_chat(self.app)
except:
print(sys.exc_info()[0])
print(sys.exc_info()[1])
traceback.print_exc(file=sys.stdout)
pass
except:
print("Unexpected error:", sys.exc_info()[0])
print("Unexpected error:", sys.exc_info()[1])
traceback.print_exc(file=sys.stdout)
self.trade_status = TradeStatus.BIG_FAILURE
def update_binder_after_buy(self):
try:
click_rectangle(self.app.top_window().child_window(auto_id = "CloseButton").rectangle())
except:
pass
self.trade_status = TradeStatus.NONE
print("Go to update values...")
return
while True:
try:
click_collection(self.app)
break
except:
time.sleep(1)
time.sleep(1)
click_rectangle(self.app.top_window().window(title="Cards", found_index=1).rectangle())
time.sleep(1)
self.app.top_window().window(auto_id="searchTextBox").type_keys(self.db_record[1].replace(" ", "{SPACE}") + "{ENTER}")
time.sleep(1)
try:
double_click_multiple(self.app.top_window().child_window(auto_id="CollectionLayoutView").child_window(title_re="Item: Card", found_index = 0), int(self.db_record[8]))
except:
pass
def update_binder_after_sell(self):
try:
click_rectangle(self.app.top_window().child_window(auto_id = "CloseButton").rectangle())
except:
pass
self.trade_status = TradeStatus.NONE
print("Go to update values...")
click_collection(self.app)
time.sleep(1)
click_rectangle(self.app.top_window().window(title="Other Products", found_index=1).rectangle())
self.app.top_window().window(auto_id="searchTextBox").type_keys("event{SPACE}tickets{ENTER}")
right_click_rectangle(self.app.top_window().child_window(auto_id="CollectionLayoutView").child_window(title_re="Item: CardSlot: Event", found_index=0).rectangle())
try:
click_rectangle(self.app.top_window().child_window(title_re="Add All to", found_index=0).rectangle())
except:
try:
click_rectangle(self.app.top_window().child_window(title_re="Add 1 to", found_index=0).rectangle())
except:
pyautogui.moveRel(-10, 0)
pyautogui.click()
pass
def close(self):
os.system("taskkill /f /im MTGO.exe")
while True:
try:
my_bot = MTGO_bot()
my_MTGO_bot_Machine = Machine(model=my_bot, states=states_my, transitions=transitions, initial='initial')
print(my_bot.state)
my_bot.go_to_login()
while True:
while True:
if my_bot.trade_status == TradeStatus.BIG_FAILURE or my_bot.all_bad_trades():
my_bot.go_to_restart()
my_bot.__init__()
my_bot.go_to_login()
if my_bot.trade_status == TradeStatus.SUCCESS:
break
if my_bot.trade_status == TradeStatus.NONE or my_bot.trade_status == TradeStatus.BOT_OFFLINE:
my_bot.go_to_buy()
my_bot.go_to_update()
if my_bot.trade_status == TradeStatus.NONE or my_bot.trade_status == TradeStatus.BOT_OFFLINE:
my_bot.go_to_sell()
if my_bot.trade_status == TradeStatus.BIG_FAILURE:
my_bot.go_to_restart()
my_bot.__init__()
my_bot.go_to_login()
continue
my_bot.go_to_update()
except:
pass
#app = Application(backend="uia").start("notepad.exe")
| {"/candle_factory_5.py": ["/mtgolibrary.py", "/card.py"], "/goatbots_cardhoarder.py": ["/card.py", "/mtgolibrary.py"], "/number_parser.py": ["/card.py", "/queue_with_capacity.py"], "/mtgolibrary.py": ["/card.py", "/number_parser.py"], "/mtgo_controller.py": ["/queue_with_capacity.py"]} |
50,260 | 1dayac/magic_bot | refs/heads/master | /card.py | class Price:
def __init__(self):
self.url = ""
self.buy_price = 0.0
self.sell_price = 0.0
self.bot_name_buy = ""
self.bot_name_sell = ""
self.number = 0
def __init__(self, url, buy_price, sell_price, bot_name_buy, bot_name_sell, number):
self.url = url
self.buy_price = buy_price
self.sell_price = sell_price
self.bot_name_buy = bot_name_buy
self.bot_name_sell = bot_name_sell
self.number = number
def __str__(self):
return str(self.buy_price) + "\t" + self.bot_name_buy + "\t" + str(self.sell_price) + "\t" + self.bot_name_sell +"\t"
class Card:
def __init__(self):
self.name = ""
self.set = ""
self.foil = False
self.prices = []
def __init__(self, name, set, prices, foil):
self.name = name
self.set = set
self.prices = [prices]
self.foil = foil
def AddPrice(self, price):
self.prices.append(price)
def __hash__(self):
return hash(self.name + self.set)
def MaxBuyPrice(self):
prices = [price.buy_price for price in self.prices if price.buy_price > 0]
try:
return max(prices)
except:
return 0.0
def MinSellPrice(self):
prices = [price.sell_price for price in self.prices if price.sell_price > 0]
try:
return min(prices)
except:
return 100000.0
def __str__(self):
str1 = ""
for price in self.prices:
str1 += str(price)
return self.name + "\t" + self.set + "\t" + str(self.foil) + "\t" + str1 + "\t" + str(self.MaxBuyPrice() - self.MinSellPrice())
def BestSellPrice(self):
best_price = None
for price in self.prices:
if price == None:
continue
if best_price == None:
best_price = price
elif price.sell_price < best_price.sell_price:
best_price = price
return best_price
def BestBuyPrice(self):
best_price = None
for price in self.prices:
if price == None:
continue
if best_price == None:
best_price = price
elif price.buy_price > best_price.buy_price:
best_price = price
return best_price
| {"/candle_factory_5.py": ["/mtgolibrary.py", "/card.py"], "/goatbots_cardhoarder.py": ["/card.py", "/mtgolibrary.py"], "/number_parser.py": ["/card.py", "/queue_with_capacity.py"], "/mtgolibrary.py": ["/card.py", "/number_parser.py"], "/mtgo_controller.py": ["/queue_with_capacity.py"]} |
50,266 | Jcfurtado/PythonProjects | refs/heads/master | /geckoshouse/main.py | from time import sleep
from .gled import clear, cock_shut, cockcrow, overnight
def morning():
cockcrow()
print("\n Cockcrow!! ON!")
sleep(300)
clear()
print("\n Cockcrow!!! OFF!")
def evening():
from time import sleep
cock_shut()
print("\n Cockshut, ON!")
sleep(300)
clear()
print("\n Cockshut, OFF!")
def light_ctrl():
if KEY == 'cockcrow': # wawr LED
morning()
elif KEY == 'day': # day lights
day_ctrl()
elif KEY == 'cockshut': # cockshut LED
cock_shut()
elif KEY == 'overnight': # night lights
night_ctrl()
else:
print('error... esp_light_controller')
'''
def day_ctrl(): # control day lamps
if lpd_hot_day == 'cold_day': # day heat lamp
lpd_day.on()
return print("\n Heat lamp, ON!")
elif lpd_hot_day == 'hot_day':
lpd_day.off()
return print("\n Heat lamp, OFF!")
elif uv_on == 'rooster': # UV light
lpd_uv.on()
return print("\n UV, ON!")
elif uv_off == 'rooster':
lpd_uv.off()
return print("\n UV, OFF!")
elif uva_on == 'rooster': # UVA-B light
lpd_uva.on()
return print("\n UVA-B, ON!")
elif uva_off == 'rooster':
lpd_uva.off()
return print("\n UVA-B, OFF!")
else:
print('error... esp_day_controller')
def night_ctrl(): # control overnight lights
if hot_night == 'cold_night': # heat lamp night
lpd_night.on()
return print("\n Overnight heat lamp, ON!")
elif hot_night == 'hot_night':
lpd_night.off()
print("\n Overnight heat lamp, OFF!")
overnight() # overnight LED
print("\n Overnight LED light, ON!")
else:
clear()
print("\n Overnight LED light, OFF!")
'''
| {"/geckoshouse/main.py": ["/geckoshouse/gled.py"], "/geckoshouse/gpublish.py": ["/geckoshouse/gdht.py", "/geckoshouse/glamp.py"]} |
50,267 | Jcfurtado/PythonProjects | refs/heads/master | /geckoshouse/gjson.py | import ujson
def w_dht(area, temp, humid):
dht_json = {
"dht":
{
"area": area,
"temperature": temp,
"humidity": humid
},
}
with open("gdht.json", "w") as write_file:
ujson.dump(dht_json, write_file)
def r_dht():
with open("gdht.json", "r") as read_file:
measurement = ujson.load(read_file)
return measurement
def w_lamp(uv, uva, rgb, day, night):
lamp_json = {
"lamps": {
"uv": uv,
"uva": uva,
"led": rgb,
"h_day": day,
"h_night": night
}
}
with open("glamp.json", "w") as write_file:
ujson.dump(lamp_json, write_file)
def r_lamp():
with open("glamp.json", "r") as read_file:
lamps = ujson.load(read_file)
return lamps
| {"/geckoshouse/main.py": ["/geckoshouse/gled.py"], "/geckoshouse/gpublish.py": ["/geckoshouse/gdht.py", "/geckoshouse/glamp.py"]} |
50,268 | Jcfurtado/PythonProjects | refs/heads/master | /geckoshouse/gpublish.py | from time import sleep
from umqtt.simple import MQTTClient
from .gdht import d_one, d_two
from .glamp import state_lamp
def data_pub(server="192.168.0.102"):
c = MQTTClient("umqtt_client", server)
c.connect()
# hot side
temp1, humid1 = d_one()
# cold side
temp2, humid2 = d_two()
# Measurements
all_measurements = [temp1, humid1, temp2, humid2]
while True:
for measurement in all_measurements:
c.publish(b"tempone", "Temp: {}°C\nHumid: {}%\n".format(*all_measurements).encode("utf-8"))
c.publish(b"tempone", "Light: {}".format(list(state_lamp())).encode("utf-8"))
sleep(2)
c.disconnect()
| {"/geckoshouse/main.py": ["/geckoshouse/gled.py"], "/geckoshouse/gpublish.py": ["/geckoshouse/gdht.py", "/geckoshouse/glamp.py"]} |
50,269 | Jcfurtado/PythonProjects | refs/heads/master | /geckoshouse/glamp.py | from machine import Pin
def state_lamp():
lpd_uv = Pin(5, Pin.OUT, Pin.PULL_UP) # (D1) UV light
lpd_uva = Pin(5, Pin.OUT, Pin.PULL_UP) # (D1) UVA-B light
led_rgb = Pin(5, Pin.OUT, Pin.PULL_UP) # (D1) RGB LED overnight
lpd_day = Pin(5, Pin.OUT, Pin.PULL_UP) # (D1) day heat lamp
lpd_night = Pin(5, Pin.OUT, Pin.PULL_UP) # (D1) night heat lamp
uv = lpd_uv.value()
uva = lpd_uva.value()
rgb = led_rgb.value()
day = lpd_day.value()
night = lpd_night.value()
return uv, uva, rgb, day, night
| {"/geckoshouse/main.py": ["/geckoshouse/gled.py"], "/geckoshouse/gpublish.py": ["/geckoshouse/gdht.py", "/geckoshouse/glamp.py"]} |
50,270 | Jcfurtado/PythonProjects | refs/heads/master | /geckoshouse/gled.py | from machine import Pin
from neopixel import NeoPixel
# --- rgb led --- #
# the number 12 is number of led
rgb_led = NeoPixel(Pin(5), 12) # (D1) overnight RGB LED
lpd_uv = Pin(5, Pin.OUT, Pin.PULL_UP) # (D1) UV light
lpd_uva = Pin(5, Pin.OUT, Pin.PULL_UP) # (D1) UVA-B light
lpd_day = Pin(5, Pin.OUT, Pin.PULL_UP) # (D1) day heat lamp
lpd_night = Pin(5, Pin.OUT, Pin.PULL_UP) # (D1) night heat lamp
def clear():
for i in range(12):
rgb_led[i] = (0, 0, 0)
rgb_led.write()
def cockcrow():
sunrise = 64, 35, 0
for i in range(4 * 12):
for j in range(12):
rgb_led[j] = sunrise
rgb_led.write()
def cock_shut():
sunset = 128, 88, 0
for i in range(4 * 12):
for j in range(12):
rgb_led[j] = sunset
rgb_led.write()
def overnight():
night = 15, 0, 45
for i in range(4 * 12):
for j in range(12):
rgb_led[j] = night
rgb_led.write()
| {"/geckoshouse/main.py": ["/geckoshouse/gled.py"], "/geckoshouse/gpublish.py": ["/geckoshouse/gdht.py", "/geckoshouse/glamp.py"]} |
50,271 | Jcfurtado/PythonProjects | refs/heads/master | /geckoshouse/gdht.py | from time import sleep
import dht
from machine import Pin
def d_one(): # ( Sensor DHT hot side)
sensor = dht.DHT22(Pin(4, Pin.IN, Pin.PULL_UP))
sensor.measure()
temp = sensor.temperature() # eg. 30.6 (°C)
humid = sensor.humidity() # eg. 25.3 (% RH)
area = "hot"
return area, temp, humid
def d_two(): # ( Sensor DHT cold side)
sensor = dht.DHT22(Pin(4, Pin.IN, Pin.PULL_UP))
sensor.measure()
temp = sensor.temperature() # eg. 30.6 (°C)
humid = sensor.humidity() # eg. 25.3 (% RH)
area = "cold"
return area, temp, humid
def d_main():
while True:
d_one()
d_two()
sleep(2)
if __name__ == '__main__':
d_main()
| {"/geckoshouse/main.py": ["/geckoshouse/gled.py"], "/geckoshouse/gpublish.py": ["/geckoshouse/gdht.py", "/geckoshouse/glamp.py"]} |
50,272 | Jcfurtado/PythonProjects | refs/heads/master | /geckoshouse/gconnect.py | import network
import ujson
def key():
with open("key.json", "r") as read_file:
k = ujson.load(read_file)
return k.ssid, k.pswd
def do_connect():
lan = network.WLAN(network.STA_IF)
lan.active(True)
if not lan.isconnected():
print('connecting to network...')
lan.connect('AG45', 'fortyfive')
while not lan.isconnected():
pass
print('network config:', lan.ifconfig())
print('connected is ', lan.isconnected())
if __name__ == '__main__':
do_connect()
| {"/geckoshouse/main.py": ["/geckoshouse/gled.py"], "/geckoshouse/gpublish.py": ["/geckoshouse/gdht.py", "/geckoshouse/glamp.py"]} |
50,273 | Jcfurtado/PythonProjects | refs/heads/master | /geckoshouse/gfirebase.py | import .firebase.ufirebase as firebase
URL = 'lucid-lychee' # see note on URLs at the bottom of documentation
print
firebase.get(URL) # this is an empty Firebase
None
firebase.put(URL, 'tell me everything') # can take a string
print
firebase.get(URL)
tell
me
everything
firebase.put(URL, {'lucidity': 9001}) # or a dictionary
print
firebase.get(URL)
{u'lucidity': 9001}
firebase.put(URL, {'color': 'red'}) # replaces old value
print
firebase.get(URL)
{u'color': u'red'}
print
firebase.get(URL + '/color')
red
| {"/geckoshouse/main.py": ["/geckoshouse/gled.py"], "/geckoshouse/gpublish.py": ["/geckoshouse/gdht.py", "/geckoshouse/glamp.py"]} |
50,275 | andycasey/velociraptor | refs/heads/master | /scripts/npm_collect_the_battery_stars_comparison.py | import pickle
import yaml
import numpy as np
from astropy.io import fits
from astropy.table import Table
with open("data/the-battery-stars-comparison.indices.pkl", "rb") as fp:
comp_indices = pickle.load(fp)
with open("results/the-battery-stars-comparison.astrometric_unit_weight_error.pkl", "rb") as fp:
astrometric_results = pickle.load(fp)
with open("results/the-battery-stars-comparison.rv_single_epoch_scatter.pkl", "rb") as fp:
rv_results = pickle.load(fp)
with open("the-battery-stars.astrometry.yaml", "r") as fp:
config = yaml.load(fp)
# Load in the data.
data = fits.open(config["data_path"])[1].data
subset = Table(data[comp_indices])
keys = ("theta", "mu_single", "sigma_single", "mu_multi", "sigma_multi")
for i, key in enumerate(keys):
subset["rv_{}".format(key)] = rv_results[comp_indices, i]
subset["astrometric_{}".format(key)] = astrometric_results[comp_indices, i]
finite = np.isfinite(subset["astrometric_theta"] * subset["rv_theta"])
subset = subset[finite]
print(len(subset))
subset.write("results/the-battery-stars-comparison.results.fits", overwrite=True)
| {"/scripts/npm_run_elastic_ball_test.py": ["/npm_utils.py"], "/npm.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation.py": ["/velociraptor.py"], "/run_analysis.py": ["/npm_utils.py"], "/run_probabilities2.py": ["/npm_utils.py"], "/scripts/npm_run_soubiran_2013.py": ["/npm_utils.py"], "/run_probabilities3.py": ["/npm_utils.py"], "/attic/plot_sb2.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation_hrd.py": ["/velociraptor.py"], "/attic/validation_sb9.py": ["/velociraptor.py"]} |
50,276 | andycasey/velociraptor | refs/heads/master | /article/figures/make.py |
"""
Produce figures for the Velociraptor project.
Science verification
--------------------
[X] 1. PDF of known single stars (Soubiran) and binary stars (SB9).
[X] 2. SB9 mass versus period, coloured by tau_single_rv.
[X] 3. SB9 mass versus period, coloured by tau_single_ast.
[X] 4. SB9 RV semi-amplitude vs our estimated RV semi-amplitude, w.r.t alpha, coloured by period.
[X] 5. Like #4, but coloured by other things: eccentricity, etc.
Astrophysics
------------
1. Main-sequence (bp rp vs absolute g-mag), coloured by median single star
probability for each panel: RV, astrometry, joint.
2.
"""
import os
from astropy.io import fits
from astropy.table import Table
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import MaxNLocator
from mpl_utils import mpl_style
plt.style.use(mpl_style)
BASE_PATH = "../../"
try:
velociraptor
except NameError:
velociraptor = fits.open("../../results/die-hard-subset.fits")[1].data
else:
print("Warning: using pre-loaded velociraptor catalog")
cmap_binary = matplotlib.cm.coolwarm
def cross_match(A_source_ids, B_source_ids):
A = np.array(A_source_ids, dtype=np.long)
B = np.array(B_source_ids, dtype=np.long)
ai = np.where(np.in1d(A, B))[0]
bi = np.where(np.in1d(B, A))[0]
return (ai[np.argsort(A[ai])], bi[np.argsort(B[bi])])
def _literature_multiplicity_pdfs(source_ids, pdfs,
soubiran_catalog, sb9_catalog, pdf_idx):
# Cross-match our source ids with literature.
vl_sb9_ids, sb9_ids = cross_match(source_ids, sb9_catalog["source_id"])
vl_soubiran_ids, soubiran_ids = cross_match(source_ids, soubiran_catalog["source_id"])
# Build up PDF density.
pdf_soubiran = pdfs[pdf_idx, vl_soubiran_ids]
pdf_sb9 = pdfs[pdf_idx, vl_sb9_ids]
return (pdf_soubiran, pdf_sb9)
def plot_literature_multiplicity_pdf(source_ids, pdfs,
soubiran_catalog, sb9_catalog,
pdf_idx=0, colors=None,
**kwargs):
pdf_soubiran, pdf_sb9 = _literature_multiplicity_pdfs(source_ids, pdfs,
soubiran_catalog,
sb9_catalog,
pdf_idx=pdf_idx)
bins = np.linspace(0, 1, 21)
if colors is None:
colors = [cmap_binary(255), cmap_binary(0)]
fig, ax = plt.subplots(figsize=(4.5, 4.5))
ax.hist([pdf_soubiran, pdf_sb9][::-1], bins=bins,
label=[
r"$\textrm{Single stars (Soubiran et al. 2009)}$",
r"$\textrm{Binary stars (Pourbaix et al. 2004)}$"
][::-1],
histtype="barstacked", color=colors[::-1])
plt.legend(frameon=False)
ax.set_xlabel(r"$p(\textrm{single}|\sigma_{vr})$")
ax.set_yticks([])
ax.xaxis.set_major_locator(MaxNLocator(6))
fig.tight_layout()
return fig
def plot_literature_multiplicity_classifications(source_ids, pdfs,
soubiran_catalog, sb9_catalog,
colors=None,
pdf_idx=0, **kwargs):
pdf_soubiran, pdf_sb9 = _literature_multiplicity_pdfs(source_ids, pdfs,
soubiran_catalog,
sb9_catalog,
pdf_idx=pdf_idx)
soubiran_class = np.round(np.mean(pdf_soubiran, axis=1)).astype(int)
sb9_class = np.round(np.mean(pdf_sb9, axis=1)).astype(int)
fig, ax = plt.subplots(figsize=(5, 2.5))
S_single = np.sum(soubiran_class == 1)
S_binary = np.sum(soubiran_class == 0)
SB9_single = np.sum(sb9_class == 1)
SB9_binary = np.sum(sb9_class == 0)
if colors is None:
colors = [cmap_binary(255), cmap_binary(0)]
ax.barh(0, S_single, facecolor=colors[0], label=r"$\textrm{Classified as single}$")
ax.barh(0, S_binary, facecolor=colors[1], left=S_single)
ax.barh(1, SB9_binary, facecolor=colors[1], label=r"$\textrm{Classified as binary}$")
ax.barh(1, SB9_single, facecolor=colors[0], left=SB9_binary)
plt.legend(frameon=False)
ax.set_yticks([0, 1])
ax.tick_params(axis="y", pad=60)
ax.set_yticklabels([
r"$\textrm{Known single stars}$" + "\n" + \
r"$\textrm{(Soubiran et al. 2009)}$",
r"$\textrm{Known binary stars}$" + "\n" + \
r"$\textrm{(Pourbaix et al. 2004)}$"
], horizontalalignment="center")
ax.xaxis.set_major_locator(MaxNLocator(6))
ax.set_xlabel(r"$\textrm{Classifications}$")
fig.tight_layout()
return fig
def literature_binary_properties(velociraptor, sb9_catalog, x_label="Per",
y_label="K1", z_label="joint_confidence",
errors=True, log_axes=True, latex_labels=None):
vl_sb9_ids, sb9_ids = cross_match(velociraptor["source_id"], sb9_catalog["source_id"])
latex_labels_ = dict(Per=r"$\textrm{period}$ $\textrm{/\,days}$",
K1=r"$\textrm{radial velocity semi-amplitude}$ $\textrm{/\,km\,s}^{-1}$",
e=r"$\textrm{eccentricity}$",
rv_confidence=r"$\textrm{Single star confidence (rv only)}$",
ast_confidence=r"$\textrm{Single star confidence (ast only)}$",
joint_confidence=r"$\textrm{Single star confidence (joint)}$")
latex_labels_.update(latex_labels or dict())
x = sb9_catalog[x_label][sb9_ids]
y = sb9_catalog[y_label][sb9_ids]
if z_label in velociraptor.dtype.names:
z = velociraptor[z_label][vl_sb9_ids]
else:
z = sb9_catalog[z_label][sb_9_ids]
fig, ax = plt.subplots(figsize=(6.18, 5.0))
kwds = dict(s=15, cmap=cmap_binary)
scat = ax.scatter(x, y, c=z, **kwds)
if errors:
ax.errorbar(x, y,
xerr=sb9_catalog[f"e_{x_label}"][sb9_ids],
yerr=sb9_catalog[f"e_{y_label}"][sb9_ids],
fmt=None, zorder=-1, c="#666666", linewidth=0.5)
if log_axes is True:
ax.loglog()
elif isinstance(log_axes, (tuple, list)):
if log_axes[0]:
ax.semilogx()
if log_axes[1]:
ax.semilogy()
ax.set_xlabel(latex_labels_.get(x_label, x_label))
ax.set_ylabel(latex_labels_.get(y_label, y_label))
cbar = plt.colorbar(scat)
cbar.set_label(latex_labels_.get(z_label, z_label))
fig.tight_layout()
return fig
def plot_semi_amplitude_wrt_literature(velociraptor, sb9_catalog,
scale=True, loglog=True, z_log=False,
z_label="joint_confidence"):
vl_sb9_ids, sb9_ids = cross_match(velociraptor["source_id"], sb9_catalog["source_id"])
K_est = velociraptor["rv_excess"][vl_sb9_ids]
K_err = velociraptor["rv_excess_var"][vl_sb9_ids]**0.5
if scale:
scalar = (1.0 / (sb9_catalog["Per"] * (1 - sb9_catalog["e"]**2)**0.5))[sb9_ids]
else:
scalar = 1.0
y = K_est * scalar
yerr = K_err * scalar
xp = np.array([
sb9_catalog["K1"][sb9_ids] * scalar,
sb9_catalog["K2"][sb9_ids] * scalar
])
xperr = np.array([
sb9_catalog["e_K1"][sb9_ids] * scalar,
sb9_catalog["e_K2"][sb9_ids] * scalar
])
"""
diff = np.abs(xp - y)
diff[~np.isfinite(diff)] = np.inf
idx = np.nanargmin(diff, axis=0)
x = xp[idx, np.arange(y.size)]
xerr = xperr[idx, np.arange(y.size)]
"""
x, xerr = xp[0], xperr[0]
if z_label in velociraptor.dtype.names:
c = velociraptor[z_label][vl_sb9_ids]
else:
c = sb9_catalog[z_label][sb9_ids]
if z_log:
c = np.log10(c)
fig, ax = plt.subplots(1, 1, figsize=(8, 7))
scat = ax.scatter(x, y, c=c, cmap=cmap_binary, s=15, rasterized=True)
ax.errorbar(x, y, xerr=xerr, yerr=yerr, fmt="none", ecolor="#CCCCCC",
zorder=-1, linewidth=1, capsize=0)
if loglog:
ax.loglog()
limits = np.array([ax.get_xlim(), ax.get_ylim()])
limits = (np.min(limits), np.max(limits))
kwds = dict(c="#666666", linestyle=":", zorder=-1, linewidth=0.5)
ax.plot(limits, limits, **kwds)
ax.set_xlim(limits)
ax.set_ylim(limits)
latex_labels_ = dict(Per=r"$\textrm{period}$ $\textrm{/\,days}$",
K1=r"$\textrm{radial velocity semi-amplitude}$ $\textrm{/\,km\,s}^{-1}$",
e=r"$\textrm{eccentricity}$",
rv_confidence=r"$\textrm{Single star confidence (rv only)}$",
ast_confidence=r"$\textrm{Single star confidence (ast only)}$",
joint_confidence=r"$\textrm{Single star confidence (joint)}$",
rv_nb_transits=r"$\textrm{number of radial velocity transits}$")
cbar = plt.colorbar(scat)
cbar.set_label(latex_labels_.get(z_label, z_label))
fig.tight_layout()
if scale:
ax.set_xlabel(r"$K_{1} / \sqrt{P(1-e^2)}$ \textrm{(Pourbaix et al. 2004)}")
ax.set_ylabel(r"$K_{1} / \sqrt{P(1-e^2)}$ \textrm{(this work)}")
else:
ax.set_xlabel(r"$K_{1}\,/\,\textrm{km\,s}^{-1}$ \textrm{(Pourbaix et al. 2004)}")
ax.set_ylabel(r"$K_{1}\,/\,\textrm{km\,s}^{-1}$ \textrm{(this work)}")
fig.tight_layout()
return fig
sb9 = Table.read(os.path.join(BASE_PATH, "data", "sb9_xm_gaia.fits"))
sb9 = sb9.group_by("source_id")
sb9 = sb9[sb9.groups.indices[:-1]]
sb9_mask = (sb9["f_K1"] != ">") \
* (sb9["f_T0"] == 0) \
* (sb9["Grade"] > 0) \
* (sb9["f_omega"] != "a") \
* (sb9["o_K1"] > 0)
sb9 = sb9[sb9_mask]
soubiran = Table.read(os.path.join(
BASE_PATH, "data", "soubiran-2013-xm-gaia.fits"))
soubiran = soubiran.group_by("source_id")
soubiran = soubiran[soubiran.groups.indices[:-1]]
velociraptor_source_ids = np.memmap("../../results/die-hard-subset.sources.memmap",
mode="r", dtype=">i8")
velociraptor_pdf = np.memmap("../../results/die-hard-subset.pdf.memmap",
mode="r", dtype=np.float32,
shape=(3, len(velociraptor_source_ids), 100))
model_names = ["ast", "rv", "joint"]
fig = plot_semi_amplitude_wrt_literature(velociraptor, sb9, z_label="Per",
scale=False, loglog=False, z_log=True)
fig = plot_semi_amplitude_wrt_literature(velociraptor, sb9, z_label="e",
scale=False, loglog=False)
fig = plot_semi_amplitude_wrt_literature(velociraptor, sb9, scale=False, loglog=False)
fig = plot_semi_amplitude_wrt_literature(velociraptor, sb9,
z_label="rv_nb_transits",
scale=False, loglog=False)
fig = literature_binary_properties(velociraptor, sb9)
for i, model_name in enumerate(model_names):
fig = plot_literature_multiplicity_pdf(
velociraptor_source_ids, velociraptor_pdf,
soubiran, sb9, pdf_idx=i)
for i, model_name in enumerate(model_names):
fig_classes = plot_literature_multiplicity_classifications(
velociraptor_source_ids, velociraptor_pdf,
soubiran, sb9,pdf_idx=i)
| {"/scripts/npm_run_elastic_ball_test.py": ["/npm_utils.py"], "/npm.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation.py": ["/velociraptor.py"], "/run_analysis.py": ["/npm_utils.py"], "/run_probabilities2.py": ["/npm_utils.py"], "/scripts/npm_run_soubiran_2013.py": ["/npm_utils.py"], "/run_probabilities3.py": ["/npm_utils.py"], "/attic/plot_sb2.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation_hrd.py": ["/velociraptor.py"], "/attic/validation_sb9.py": ["/velociraptor.py"]} |
50,277 | andycasey/velociraptor | refs/heads/master | /scripts/npm_run_elastic_ball_test.py |
""" Set up and run the non-parametric model. """
import numpy as np
import os
import multiprocessing as mp
import pickle
import yaml
import tqdm
import logging
from time import sleep
from astropy.io import fits
import npm_utils as npm
import stan_utils as stan
with open(npm.CONFIG_PATH, "r") as fp:
config = yaml.load(fp)
# Load in the data.
data = fits.open(config["data_path"])[1].data
all_label_names = list(config["kdtree_label_names"]) \
+ list(config["predictor_label_names"])
# Set up a KD-tree.
X = np.vstack([data[ln] for ln in config["kdtree_label_names"]]).T
finite = np.all([np.isfinite(data[ln]) for ln in all_label_names], axis=0)
finite_indices = np.where(finite)[0]
N, D = X.shape
F = finite_indices.size
L = 4 * len(config["predictor_label_names"]) + 1
C = config.get("share_optimised_result_with_nearest", 0)
kdt, scales, offsets = npm.build_kdtree(X[finite],
relative_scales=config.get("kdtree_relative_scales", None))
kdt_kwds = dict(offsets=offsets, scales=scales, full_output=False)
kdt_kwds.update(
minimum_radius=config.get("kdtree_minimum_radius", None), # DEFAULT
minimum_points=config.get("kdtree_minimum_points", 1024), # DEFAULT
maximum_points=config.get("kdtree_maximum_points", 8192),
minimum_density=config.get("kdtree_minimum_density", None)
) # DEFAULT
logging.info("k-d tree keywords: {}".format(kdt_kwds))
model = stan.load_stan_model(config["model_path"], verbose=False)
default_opt_kwds = dict(
verbose=False,
tol_obj=7./3 - 4./3 - 1, # machine precision
tol_grad=7./3 - 4./3 - 1, # machine precision
tol_rel_grad=1e3,
tol_rel_obj=1e4,
iter=10000)
default_opt_kwds.update(config.get("optimisation_kwds", {}))
# Make sure that some entries have the right units.
for key in ("tol_obj", "tol_grad", "tol_rel_grad", "tol_rel_obj"):
if key in default_opt_kwds:
default_opt_kwds[key] = float(default_opt_kwds[key])
done = np.zeros(N, dtype=bool)
queued = np.zeros(N, dtype=bool)
results = np.nan * np.ones((N, L), dtype=float)
# Set everything done except a box in parameter space.
do = (3 > data["bp_rp"]) \
* (data["bp_rp"] > 2.5) \
* (data["absolute_rp_mag"] > -7.5) \
* (data["absolute_rp_mag"] < -2.5) \
* finite
done[~do] = True
default_init = np.array([0.6985507, 3.0525788, 1.1474566, 1.8999833, 0.6495420])
def optimize_mixture_model(index, init=None):
# Select indices and get data.
indices = finite_indices[npm.query_around_point(kdt, X[index], **kdt_kwds)]
y = np.array([data[ln][indices] for ln in config["predictor_label_names"]]).T
if init is None:
init = npm.get_initialization_point(y)
opt_kwds = dict(init=init, data=dict(y=y, N=y.shape[0], D=y.shape[1]))
opt_kwds.update(default_opt_kwds)
# Do optimization.
with stan.suppress_output():
try:
p_opt = npm._check_params_dict(model.optimizing(**opt_kwds))
except:
p_opt = None
return (index, p_opt, indices)
def swarm(*indices, max_random_starts=3, in_queue=None, candidate_queue=None,
out_queue=None):
def _random_index():
yield from np.random.choice(indices, max_random_starts, replace=False)
_ri = _random_index()
random_start = lambda *_: (_ri.__next__(), default_init)
swarm = True
while swarm:
for func in (in_queue.get_nowait, random_start):
try:
index, init = func()
except mp.queues.Empty:
logging.info("Using a random index to start")
continue
except StopIteration:
logging.warning("Swarm is bored")
sleep(5)
except:
logging.exception("Unexpected exception:")
swarm = False
else:
if index is None and init is False:
swarm = False
break
try:
_, result, kdt_indices = optimize_mixture_model(index, init)
except:
logging.exception("Exception when optimizing on {} from {}"\
.format(index, init))
break
out_queue.put((index, result))
if result is not None:
if C > 0:
# Assign the closest points to have the same result.
# (On the other end of the out_qeue we will deal with
# multiple results.)
out_queue.put((kdt_indices[:C + 1], result))
# Candidate next K points
K = 10
candidate_queue.put((kdt_indices[C + 1:C + 1 + K], result))
break
return None
while not all(done):
P = 20 # mp.cpu_count()
# Only do stars in the box.
do_indices = np.where(~done)[0]
D = do_indices.size
with mp.Pool(processes=P) as pool:
manager = mp.Manager()
in_queue = manager.Queue()
candidate_queue = manager.Queue()
out_queue = manager.Queue()
swarm_kwds = dict(max_random_starts=10,
in_queue=in_queue,
out_queue=out_queue,
candidate_queue=candidate_queue)
j = []
for _ in range(P):
j.append(pool.apply_async(swarm, do_indices, kwds=swarm_kwds))
# The swarm will just run at random initial points until we communicate
# back that the candidates are good.
with tqdm.tqdm(total=D) as pbar:
while True:
has_candidates, has_results = (True, True)
# Check for candidates.
try:
r = candidate_queue.get_nowait()
except mp.queues.Empty:
has_candidates = False
else:
candidate_indices, init = r
candidate_indices = np.atleast_1d(candidate_indices)
for index in candidate_indices:
if not done[index] and not queued[index] and finite[index]:
in_queue.put((index, init))
queued[index] = True
# Check for output.
try:
r = out_queue.get(timeout=5)
except mp.queues.Empty:
has_results = False
else:
index, result = r
index = np.atleast_1d(index)
updated = index.size - sum(done[index])
done[index] = True
if result is not None:
results[index] = npm._pack_params(**result)
pbar.update(updated)
if not has_candidates and not has_results:
break
raise a
# Clean up any difficult cases.
with mp.Pool(processes=P) as pool:
manager = mp.Manager()
in_queue = manager.Queue()
candidate_queue = manager.Queue()
out_queue = manager.Queue()
swarm_kwds = dict(max_random_starts=0,
in_queue=in_queue,
out_queue=out_queue,
candidate_queue=candidate_queue)
# Do a check for entries that are not done.
not_done = np.where((~done * finite))[0]
ND = not_done.size
logging.info("{} not done".format(ND))
for index in not_done:
# Get nearest points that are done.
indices = finite_indices[npm.query_around_point(kdt, X[index], **kdt_kwds)]
in_queue.put((index, results[indices[done[indices]][0]]))
j = []
for _ in range(P):
j.append(pool.apply_async(swarm, finite_indices, kwds=swarm_kwds))
with tqdm.tqdm(total=ND) as pbar:
while True:
has_candidates, has_results = (True, True)
# Check for candidates.
try:
r = candidate_queue.get_nowait()
except mp.queues.Empty:
has_candidates = False
else:
candidate_indices, init = r
candidate_indices = np.atleast_1d(candidate_indices)
for index in candidate_indices:
if not done[index] and not queued[index] and finite[index]:
in_queue.put((index, init))
queued[index] = True
# Check for output.
try:
r = out_queue.get(timeout=5)
except mp.queues.Empty:
has_results = False
else:
index, result = r
index = np.atleast_1d(index)
updated = index.size - sum(done[index])
done[index] = True
if result is not None:
results[index] = npm._pack_params(**result)
pbar.update(updated)
if not has_candidates and not has_results:
sleep(1)
# Do a check for entries that are not done.
not_done = np.where((~done * finite))[0]
ND = not_done.size
if ND == 0:
break
results_path = config.get("results_path", "results.pkl")
with open(results_path, "wb") as fp:
pickle.dump(results, fp, -1)
logging.info("Results written to {}".format(results_path)) | {"/scripts/npm_run_elastic_ball_test.py": ["/npm_utils.py"], "/npm.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation.py": ["/velociraptor.py"], "/run_analysis.py": ["/npm_utils.py"], "/run_probabilities2.py": ["/npm_utils.py"], "/scripts/npm_run_soubiran_2013.py": ["/npm_utils.py"], "/run_probabilities3.py": ["/npm_utils.py"], "/attic/plot_sb2.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation_hrd.py": ["/velociraptor.py"], "/attic/validation_sb9.py": ["/velociraptor.py"]} |
50,278 | andycasey/velociraptor | refs/heads/master | /npm.py |
"""
Non-parametric model of binarity and single stars across the H-R diagram.
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pickle
from astropy.table import Table
from astropy.io import fits
from scipy import (spatial, optimize as op)
from sklearn import neighbors as neighbours
from time import time
import velociraptor
import stan_utils as stan
import npm_utils
from corner import corner
np.random.seed(123)
DEBUG_PLOTS = False
data_path = "data/rv-all-subset-1e4.fits"
#data = fits.open(data_path)[1].data
data = Table.read(data_path)
data["rv_single_epoch_scatter"] = data["rv_single_epoch_variance"]**0.5
# [1] Construct KD-Tree
# [2] Select a random X number of stars
# [3] Construct some way to get nearest neighbour results that are not Null???
# --> select the next nearest star? or one star in the volume that does not have results?
# [3] Run the optimization etc.
# what labels are we going to build the KD-tree in?
kdt_label_names = ("bp_rp", "absolute_rp_mag", "phot_rp_mean_mag")
predictor_label_names = (
"rv_single_epoch_scatter",
"astrometric_unit_weight_error",
"phot_bp_rp_excess_factor",
"rv_abs_diff_template_teff",
)
parameter_names = ["theta", "mu_single", "sigma_single", "mu_multiple",
"sigma_multiple"]
X_kdt = np.vstack([data[ln] for ln in kdt_label_names]).T
# TODO: Right now I am *requiring* that all predictor labels and KD-Tree
# labels are finite, but we may want to change this in the future.
all_label_names = tuple(list(kdt_label_names) + list(predictor_label_names))
subset = np.all(np.isfinite(np.vstack([data[ln] for ln in all_label_names])), axis=0)
X_kdt = X_kdt[subset]
X_scale = np.ptp(X_kdt, axis=0)
X_mean = np.mean(X_kdt, axis=0)
_scale = lambda a: (a - X_mean)/X_scale
_descale = lambda a: a * X_scale + X_mean
# Normalise the array for the KD-tree
X_norm = _scale(X_kdt)
# Construct the KD-Tree
kdt = npm_utils.build_kdt(X_norm)
data = data[subset]
model = stan.load_stan_model("npm.stan")
# Calculate the total number of parameters
M = len(data)
L = len(predictor_label_names)
K = 1 + 4 * L
opt_params = np.empty((M, K))
#subset_points = np.random.choice(M, size=1000, replace=False)
#for i in range(M):
#for j, i in enumerate(subset_points):
for j, i in enumerate(range(M)):
print("At point {}/{}: {}".format(j, M, i))
k_indices, dist = npm_utils.get_ball_around_point(kdt, X_norm[[i]],
full_output=True)
y = np.array([data[ln][k_indices] for ln in predictor_label_names]).T
N, D = y.shape
init_values = npm_utils.get_initialization_point(y)
init_dict = dict(zip(parameter_names, npm_utils._unpack_params(init_values, D)))
init_dict["mu_multiple_uv"] = 0.5 * np.ones(D)
data_dict = dict(y=y, N=N, D=D)
opt_kwds = dict(
data=data_dict,
init=init_dict,
verbose=False,
tol_obj=7./3 - 4./3 - 1, # machine precision
tol_grad=7./3 - 4./3 - 1, # machine precision
tol_rel_grad=1e3,
tol_rel_obj=1e4,
iter=10000)
t_init = time()
p_opt = model.optimizing(**opt_kwds)
t_opt = time() - t_init
print("Optimization took {:.2f} seconds".format(t_opt))
del p_opt["mu_multiple_uv"]
for k in p_opt.keys():
if k == "theta": continue
p_opt[k] = np.atleast_1d(p_opt[k])
opt_params[i] = npm_utils._pack_params(**p_opt)
print("Single star fraction at this point: {:.2f}".format(opt_params[i, 0]))
if DEBUG_PLOTS:
print("Running DEBUG plots")
for l, predictor_label_name in enumerate(predictor_label_names):
fig, ax = plt.subplots()
xi = np.linspace(
np.min(data_dict["y"].T[l].flatten()),
np.max(data_dict["y"].T[l].flatten()),
1000)
ax.hist(data_dict["y"].T[l].flatten(), bins=50, facecolor="#cccccc", zorder=-1)
show = init_dict
ax.plot(xi, N * npm_utils.norm_pdf(xi, show["mu_single"][l], show["sigma_single"][l], show["theta"]), c='r')
ax.plot(xi, N * npm_utils.lognorm_pdf(xi, show["mu_multiple"][l], show["sigma_multiple"][l], show["theta"]), c='b')
show = p_opt
ax.plot(xi, N * npm_utils.norm_pdf(xi, show["mu_single"][l], show["sigma_single"][l], show["theta"]), c='m')
ax.plot(xi, N * npm_utils.lognorm_pdf(xi, show["mu_multiple"][l], show["sigma_multiple"][l], show["theta"]), c='y')
samples = model.sampling(**stan.sampling_kwds(
data=opt_kwds["data"], init=p_opt, iter=2000, chains=2))
chains_dict = samples.extract()
if L == 1:
chains = np.vstack([
chains_dict["theta"],
chains_dict["mu_single"],
chains_dict["sigma_single"],
chains_dict["mu_multiple"],
chains_dict["sigma_multiple"]
]).T
else:
chains = np.hstack([
np.atleast_2d(chains_dict["theta"]).T,
chains_dict["mu_single"],
chains_dict["sigma_single"],
chains_dict["mu_multiple"],
chains_dict["sigma_multiple"]
])
fig = corner(chains)
# Make plots of the pdf of each distribution.
for l, predictor_label_name in enumerate(predictor_label_names):
xi = np.linspace(np.min(y.T[l]), np.max(y.T[l]), 1000)
indices = np.random.choice(len(chains), 100, replace=False)
fig, ax = plt.subplots()
for index in indices:
idx_theta = parameter_names.index("theta")
idx_norm_mu = parameter_names.index("mu_single")
idx_norm_sigma = parameter_names.index("sigma_single")
idx_lognorm_mu = parameter_names.index("mu_multiple")
idx_lognorm_sigma = parameter_names.index("sigma_multiple")
theta = chains[index, 0]
norm_mu = chains[index, 1 + l]
norm_sigma = chains[index, 1 + L + l]
lognorm_mu = chains[index, 1 + 2*L + l]
lognorm_sigma = chains[index, 1 + 3*L + l]
ax.plot(xi, npm_utils.norm_pdf(xi, norm_mu, norm_sigma, theta), c='r', alpha=0.1)
ax.plot(xi, npm_utils.lognorm_pdf(xi, lognorm_mu, lognorm_sigma, theta), c='b', alpha=0.1)
_ = ax.hist(y.T[l], bins=500, facecolor="#000000", zorder=-1, normed=True)
print(y.size)
ax.set_title(predictor_label_name.replace("_", " "))
raise a
with open("rv-all-subset-1e4-optimized-unconstrained.pickle", "wb") as fp:
pickle.dump(opt_params, fp, -1)
def normal_lpdf(y, mu, sigma):
ivar = sigma**(-2)
return 0.5 * (np.log(ivar) - np.log(2 * np.pi) - (y - mu)**2 * ivar)
def lognormal_lpdf(y, mu, sigma):
ivar = sigma**(-2)
return - 0.5 * np.log(2 * np.pi) - np.log(y * sigma) \
- 0.5 * (np.log(y) - mu)**2 * ivar
from scipy.special import logsumexp
# Calculate log-probabilities for all of the stars we considered.
def membership_probability(y, p_opt):
y = np.atleast_1d(y)
theta, s_mu, s_sigma, m_mu, m_sigma = npm_utils._unpack_params(p_opt)
assert s_mu.size == y.size, "The size of y should match the size of mu"
D = y.size
ln_prob = np.zeros((D, 2))
for d in range(D):
ln_prob[d] = [
normal_lpdf(y[d], s_mu[d], s_sigma[d]),
lognormal_lpdf(y[d], m_mu[d], m_sigma[d])
]
# TODO: I am not certain that I am summing these log probabilities correctly
sum_ln_prob = np.sum(ln_prob, axis=0) # per mixture
ln_likelihood = logsumexp(sum_ln_prob)
with np.errstate(under="ignore"):
ln_membership = sum_ln_prob - ln_likelihood
return np.exp(ln_membership)
y = np.array([data[ln] for ln in predictor_label_names]).T
N, D = y.shape
p_single = np.array([membership_probability(y[i], opt_params[i])[0] for i in range(N)])
# Now calculate the amount of excess RV variance
def rv_excess_scatter(y, p_opt, label_index):
y = np.atleast_1d(y)
_, s_mu, s_sigma, __, ___ = npm_utils._unpack_params(p_opt)
assert s_mu.size == y.size, "The size of y should match the size of mu"
rv_single_epoch_excess = np.sqrt(y[label_index]**2 - s_mu[label_index]**2)
rv_single_epoch_significance = rv_single_epoch_excess/s_sigma[label_index]
return (rv_single_epoch_excess, rv_single_epoch_significance)
li = list(predictor_label_names).index("rv_single_epoch_scatter")
rv_excess = np.array([
rv_excess_scatter(y[i], opt_params[i], li) for i in range(N)])
# Remember that the probabilities of binarity take into acocunt more than just
# the radial velocity excess!
# Make a corner plot of the various properties? Coloured by probabilities?
with open("rv-all-subset-1e4-results.pickle", "wb") as fp:
pickle.dump((subset, opt_params, predictor_label_names, p_single, rv_excess), fp, -1)
| {"/scripts/npm_run_elastic_ball_test.py": ["/npm_utils.py"], "/npm.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation.py": ["/velociraptor.py"], "/run_analysis.py": ["/npm_utils.py"], "/run_probabilities2.py": ["/npm_utils.py"], "/scripts/npm_run_soubiran_2013.py": ["/npm_utils.py"], "/run_probabilities3.py": ["/npm_utils.py"], "/attic/plot_sb2.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation_hrd.py": ["/velociraptor.py"], "/attic/validation_sb9.py": ["/velociraptor.py"]} |
50,279 | andycasey/velociraptor | refs/heads/master | /run_probabilities.py |
"""
Run this after run_analysis.py
"""
import logging
import numpy as np
import sys
import pickle
import tqdm
from scipy.special import logsumexp
from astropy.io import fits
def ps(y, theta, s_mu, s_sigma, b_mu, b_sigma):
s_ivar = s_sigma**-2
b_ivar = b_sigma**-2
hl2p = 0.5 * np.log(2*np.pi)
s_lpdf = -hl2p + 0.5 * np.log(s_ivar) \
- 0.5 * (y - s_mu)**2 * s_ivar
b_lpdf = -np.log(y*b_sigma) - hl2p \
- 0.5 * (np.log(y) - b_mu)**2 * b_ivar
b_lpdf[~np.isfinite(b_lpdf)] = -np.inf
foo = np.vstack([s_lpdf, b_lpdf]).T + np.log([theta, 1-theta])
p_single = np.exp(foo.T[0] - logsumexp(foo, axis=1))
assert np.all(np.isfinite(p_single))
return p_single
def lnprob_single(y, theta, s_mu, s_sigma, b_mu, b_sigma):
s_ivar = s_sigma**-2
b_ivar = b_sigma**-2
hl2p = 0.5 * np.log(2*np.pi)
s_lpdf = -hl2p + 0.5 * np.log(s_ivar) \
- 0.5 * (y - s_mu)**2 * s_ivar
b_lpdf = -np.log(y*b_sigma) - hl2p \
- 0.5 * (np.log(y) - b_mu)**2 * b_ivar
b_lpdf[~np.isfinite(b_lpdf)] = -np.inf
return np.vstack([s_lpdf, b_lpdf]).T + np.log([theta, 1-theta])
if __name__ == "__main__":
result_paths = sys.argv[1:]
if len(result_paths) < 1:
raise ValueError("no result paths given")
all_results = []
for result_path in result_paths:
logging.info(f"Loading results from {result_path}")
with open(result_path, "rb") as fp:
r = pickle.load(fp)
all_results.append(r)
# Check seeds are the same.
initial_seed = all_results[0]["seed"]
if initial_seed is None:
raise ValueError("seed is not known")
for r in all_results:
if int(r["seed"]) != int(initial_seed):
raise ValueError(f"seeds differ: {initial_seed} in {result_paths[0]} != {r['seed']}")
# TODO: check that the data files etc are the same between results paths
# TODO: this is so hacky just re-factor the model format and run_analysis.py
results = all_results[0]
config = results["config"]
data = fits.open(config["data_path"])[1].data
all_label_names = list(config["kdtree_label_names"]) \
+ list(config["require_finite_label_names"]) \
+ list(config["predictor_label_names"])
all_label_names = list(np.unique(all_label_names))
# Set up a KD-tree.
finite = np.all([np.isfinite(data[ln]) for ln in all_label_names], axis=0)
y = np.array([data[ln][finite] for ln in config["predictor_label_names"]]).flatten()
K = 100
M = len(result_paths)
lnprobs = np.zeros((M, y.size, K, 2))
p_single = np.zeros((M + 1, y.size, K))
for m, (result_path, results) in enumerate(zip(result_paths, all_results)):
mu_single, mu_single_var, sigma_single, sigma_single_var, \
sigma_multiple, sigma_multiple_var = results["gp_predictions"].T
# Calculate probabilities.
# TODO MAGIC HACK ALL DIRECTLY BELOW
mean_theta = np.mean(results["results"][:, 0])
mask = np.array([1, 2, 4])
rhos = np.corrcoef(results["results"][:, mask].T)
logging.info(f"Calculating probabilities for {result_path}")
for d in tqdm.tqdm(range(y.size)):
diag = np.atleast_2d(np.array([
mu_single_var[d],
sigma_single_var[d],
sigma_multiple_var[d]
]))**0.5
cov = diag * rhos * diag.T
mu = np.array([mu_single[d], sigma_single[d], sigma_multiple[d]])
ms, ss, sm = np.random.multivariate_normal(mu, cov, size=K).T
# Calculate the mu_multiple. TODO HACK MAGIC
mm = np.log(ms + ss) + sm**2
lnprobs[m, d] = lnprob_single(y[[d]], mean_theta, ms, ss, mm, sm)
p_single[m, d] = np.exp(lnprobs[m, d, :, 0] - logsumexp(lnprobs[m, d], axis=1))
# Calculate joint probabilities.
for d in tqdm.tqdm(range(y.size)):
for k in range(K):
foo = lnprobs[:, d, k]
numerator = logsumexp(lnprobs[:, d, k, 0])
denominator = logsumexp(lnprobs[:, d, k, 1])
p_single[-1, d, k] = np.exp(numerator - logsumexp([numerator, denominator]))
# Save percentiles of the probability distributions.
# Save the draws from the probability distributions.
raise a
# Need:
# result_paths
# Check seed for each one and ensure they are the same.
# Calculate the mu_multiple (as the lower value).
# Calculate probabilities for each source, after propagating variance. | {"/scripts/npm_run_elastic_ball_test.py": ["/npm_utils.py"], "/npm.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation.py": ["/velociraptor.py"], "/run_analysis.py": ["/npm_utils.py"], "/run_probabilities2.py": ["/npm_utils.py"], "/scripts/npm_run_soubiran_2013.py": ["/npm_utils.py"], "/run_probabilities3.py": ["/npm_utils.py"], "/attic/plot_sb2.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation_hrd.py": ["/velociraptor.py"], "/attic/validation_sb9.py": ["/velociraptor.py"]} |
50,280 | andycasey/velociraptor | refs/heads/master | /_convert_results.py | """
Convert results files.
"""
import os
import sys
import pickle
import yaml
if __name__ == "__main__":
output_path = sys.argv[1]
joint_config_path = sys.argv[2]
result_paths = sys.argv[3:]
with open(joint_config_path, "r") as fp:
config = yaml.load(fp)
output = dict(config=config)
for i, result_path in enumerate(result_paths):
model_name = os.path.basename(result_path).split(".")[0]
with open(result_path, "rb") as fp:
results = pickle.load(fp)
output[model_name] = [
results["results"],
results["gp_parameters"],
results["gp_predictions"]
]
with open(output_path, "wb") as fp:
pickle.dump(output, fp)
| {"/scripts/npm_run_elastic_ball_test.py": ["/npm_utils.py"], "/npm.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation.py": ["/velociraptor.py"], "/run_analysis.py": ["/npm_utils.py"], "/run_probabilities2.py": ["/npm_utils.py"], "/scripts/npm_run_soubiran_2013.py": ["/npm_utils.py"], "/run_probabilities3.py": ["/npm_utils.py"], "/attic/plot_sb2.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation_hrd.py": ["/velociraptor.py"], "/attic/validation_sb9.py": ["/velociraptor.py"]} |
50,281 | andycasey/velociraptor | refs/heads/master | /npm_utils.py |
import numpy as np
import os
from scipy import (optimize as op, stats)
from sklearn import neighbors as neighbours
from scipy.special import logsumexp
from time import time
import warnings
import stan_utils as stan
CONFIG_PATH = "npm-config.yaml"
def get_output_path(source_id, config, check_path_exists=True):
results_suffix = config.get("results_suffix", None)
path = os.path.join(config["results_path"], "{0}/{1}{2}".format(
split_source_id(source_id),
"+".join(config["predictor_label_names"]),
".{}".format(results_suffix) if results_suffix is not None else ""))
if check_path_exists:
os.makedirs(os.path.dirname(path), exist_ok=True)
return path
def build_kdtree(X, relative_scales=None,**kwargs):
"""
Build a KD-tree from the finite values in the given array.
"""
offset = np.mean(X, axis=0)
if relative_scales is None:
# Whiten the data.
relative_scales = np.ptp(X, axis=0)
X_norm = (X - offset)/relative_scales
kdt_kwds = dict(leaf_size=40, metric="minkowski")
kdt_kwds.update(kwargs)
kdt = neighbours.KDTree(X_norm, **kdt_kwds)
return (kdt, relative_scales, offset)
def query_around_point(kdtree, point, offsets=0, scales=1, minimum_radius=None,
maximum_radius=None, minimum_points=1, maximum_points=None,
minimum_density=None, dualtree=False,
full_output=False, **kwargs):
"""
Query around a point in the KD-Tree until certain conditions are met (e.g.,
the number of points in the ball, and the minimum radius that the ball
expands out to).
:param kdtree:
The pre-computed KD-Tree.
:param point:
The (unscaled) point to query around.
:param offsets: [optional]
The offsets to apply to the query point.
:param scales: [optional]
The scaling to apply to the query point, after subtracting the offsets.
:param minimum_radius: [optional]
The minimum radius (or radii) that the ball must extend to.
:param minimum_points: [optional]
The minimum number of points to return in the ball.
:param maximum_points: [optional]
The maximum number of points to return in the ball. If the number of
points returned exceeds this value, then a random subset of the points
will be returned.
:param minimum_density: [optional]
The minimum average density of points per dimension for the ball. This
can be useful to ensure that points that are in the edge of the k-d tree
parameter space will be compared against points that are representative
of the underlying space, and not just compared against nearest outliers.
:param dualtree: [optional]
Use the dual tree formalism for the query: a tree is built for the query
points, and the pair of trees is used to efficiently search this space.
This can lead to better performance as the number of points grows large.
:param full_output: [optional]
If `True`, return a two length tuple of the distances to each point and
the indicies, otherwise just return the indices.
"""
#print("querying k-d tree")
offsets = np.atleast_1d(offsets)
scales = np.atleast_1d(scales)
point_orig = np.atleast_1d(point).reshape(1, -1)
point = (point_orig - offsets)/scales
# Simple case.
if minimum_radius is None and minimum_density is None:
# We can just query the nearest number of points.
d, indices = kdtree.query(point, k=minimum_points,
sort_results=True, return_distance=True, dualtree=dualtree)
else:
# We need to find the minimum radius that meets our constraints.
if minimum_radius is None:
minimum_radius = 0
if minimum_density is None:
minimum_density = 0
minimum_radius = np.atleast_1d(minimum_radius)
minimum_density = np.atleast_1d(minimum_density)
# Need to scale the minimum radius from the label space to the normalised
# k-d tree space.
minimum_radius_norm = np.max(minimum_radius / np.atleast_1d(scales))
K = kdtree.two_point_correlation(point, minimum_radius_norm)[0]
# "density" = N/(2*R)
# if N > 2 * R * density then our density constraint is met
K_min = np.max(np.hstack([
minimum_points,
2 * minimum_density * minimum_radius
]))
# Check that the minimum radius norm will also meet our minimum number
# of points constraint. Otherwise, we need to use two point
# auto-correlation functions to see how far to go out to.
if K >= K_min:
# All constraints met.
radius_norm = minimum_radius_norm
else:
#print("Using k-dtree to step out in radius because we have {} points within {} but need {}".format(
# K, minimum_radius_norm, K_min))
# We need to use the k-d tree to step out until our constraints are
# met.
maximum_radius_norm = 2 * np.max(np.ptp(kdtree.data, axis=0))
# This is the initial coarse search.
N, D = kdtree.data.shape
left, right = (minimum_radius_norm, maximum_radius_norm)
Q = kwargs.get("Q", 10) # MAGIC HACK
# MAGIC HACK
tolerance = maximum_points if maximum_points is not None \
else 2 * minimum_points
while True:
# Shrink it.
ri = np.logspace(np.log10(left), np.log10(right), Q)
counts = kdtree.two_point_correlation(point, ri)
#print("tolerance {}: {} {} {} {}".format(tolerance, left, right, ri, counts))
minimum_counts = np.clip(2 * np.max(np.dot(ri.reshape(-1, 1),
(minimum_density * scales).reshape(1, -1)), axis=1),
minimum_points, N)
indices = np.arange(Q)[counts >= minimum_counts]
left, right = (ri[indices[0] - 1], ri[indices[0] + 1])
#print("new: {} {}".format(left, right))
if np.diff(counts[indices]).max() < tolerance:
radius_norm = ri[indices[0]]
break
# two_point_correlation(point, minimum_radius_norm)
# is eequivalent to
# query_radius(point, minimum_radius_norm, count_only=True)
# but in my tests two_point_correlation was a little faster.
# kdtree.query_radius returns indices, d
# kdtree.query returns d, indices
# .... are you serious?
indices, d = kdtree.query_radius(point, radius_norm,
return_distance=True, sort_results=True)
d, indices = (d[0], indices[0])
L = len(indices)
if maximum_points is not None and L > maximum_points:
if maximum_points < minimum_points:
raise ValueError("minimum_points must be smaller than maximum_points")
if maximum_radius is not None:
L = np.where(np.all(np.abs(point[0] - np.asarray(kdtree.data)[indices]) <= maximum_radius, axis=1))[0]
maximum_points = min(maximum_points, L.size)
# Sub-sample a random number.
sub_idx = np.random.choice(L, maximum_points, replace=False)
d, indices = (d[sub_idx], indices[sub_idx])
elif maximum_radius is not None:
L = np.where(np.all(np.abs(point[0] - np.asarray(kdtree.data)[indices]) <= maximum_radius, axis=1))[0]
maximum_points = min(maximum_points, L.size)
# Sub-sample a random number.
sub_idx = np.random.choice(L, maximum_points, replace=False)
d, indices = (d[sub_idx], indices[sub_idx])
# Meta should include the PTP values of points in the ball.
meta = dict()
#assert minimum_points is None or indices.size >= minimum_points
return (d, indices, meta) if full_output else indices
def normal_lpdf(y, mu, sigma):
ivar = sigma**(-2)
return 0.5 * (np.log(ivar) - np.log(2 * np.pi) - (y - mu)**2 * ivar)
def lognormal_lpdf(y, mu, sigma):
ivar = sigma**(-2)
return - 0.5 * np.log(2 * np.pi) - np.log(y * sigma) \
- 0.5 * (np.log(y) - mu)**2 * ivar
# Calculate log-probabilities for all of the stars we considered.
def membership_probability(y, p_opt):
y = np.atleast_1d(y)
theta, s_mu, s_sigma, m_mu, m_sigma = _unpack_params(_pack_params(**p_opt))
assert s_mu.size == y.size, "The size of y should match the size of mu"
D = y.size
ln_prob = np.zeros((D, 2))
for d in range(D):
ln_prob[d] = [
normal_lpdf(y[d], s_mu[d], s_sigma[d]),
lognormal_lpdf(y[d], m_mu[d], m_sigma[d])
]
# TODO: I am not certain that I am summing these log probabilities correctly
sum_ln_prob = np.sum(ln_prob, axis=0) # per mixture
ln_likelihood = logsumexp(sum_ln_prob)
with np.errstate(under="ignore"):
ln_membership = sum_ln_prob - ln_likelihood
return np.exp(ln_membership)
def label_excess(y, p_opt, label_index):
y = np.atleast_1d(y)
_, s_mu, s_sigma, __, ___ = _unpack_params(_pack_params(**p_opt))
assert s_mu.size == y.size, "The size of y should match the size of mu"
excess = np.sqrt(y[label_index]**2 - s_mu[label_index]**2)
significance = excess/s_sigma[label_index]
return (excess, significance)
def build_kdt(X_norm, **kwargs):
kdt_kwds = dict(leaf_size=40, metric="minkowski")
kdt_kwds.update(kwargs)
kdt = neighbours.KDTree(X_norm, **kdt_kwds)
return kdt
def get_ball_around_point(kdt, point, K=1000, scale=1, offset=0, full_output=False):
dist, k_indices = kdt.query((point - offset)/scale, K)
dist, k_indices = (dist[0], k_indices[0])
return (k_indices, dist) if full_output else k_indices
# Stan needs a finite value to initialize correctly, so we will use a dumb (more
# robust) optimizer to get an initialization value.
def norm_pdf(x, norm_mu, norm_sigma, theta):
return theta * (2 * np.pi * norm_sigma**2)**(-0.5) * np.exp(-(x - norm_mu)**2/(2*norm_sigma**2))
def lognorm_pdf(x, lognorm_mu, lognorm_sigma, theta):
return (1.0 - theta)/(x * lognorm_sigma * np.sqrt(2*np.pi)) \
* np.exp(-0.5 * ((np.log(x) - lognorm_mu)/lognorm_sigma)**2)
def ln_likelihood(y, theta, s_mu, s_sigma, b_mu, b_sigma):
s_ivar = s_sigma**-2
b_ivar = b_sigma**-2
hl2p = 0.5 * np.log(2*np.pi)
s_lpdf = -hl2p + 0.5 * np.log(s_ivar) \
- 0.5 * (y - s_mu)**2 * s_ivar
b_lpdf = -np.log(y*b_sigma) - hl2p \
- 0.5 * (np.log(y) - b_mu)**2 * b_ivar
foo = np.vstack([s_lpdf, b_lpdf]).T + np.log([theta, 1-theta])
ll = np.sum(logsumexp(foo, axis=1))
#ll = np.sum(s_lpdf) + np.sum(b_lpdf)
##print(lpdf)
#assert np.isfinite(ll)
return ll
def ln_prior(theta, s_mu, s_sigma, b_mu, b_sigma):
# Ensure that the *mode* of the log-normal distribution is larger than the
# mean of the normal distribution
with warnings.catch_warnings():
warnings.simplefilter("ignore")
min_mu_multiple = np.log(s_mu + s_sigma) + b_sigma**2
if not (1 >= theta >= 0) \
or not (15 >= s_mu >= 0.5) \
or not (10 >= s_sigma >= 0.05) \
or not (1.6 >= b_sigma >= 0.20) \
or np.any(b_mu < min_mu_multiple):
return -np.inf
# Beta prior on theta.
return stats.beta.logpdf(theta, 5, 5)
def ln_prob(y, L, *params):
theta, s_mu, s_sigma, b_mu, b_sigma = _unpack_params(params, L=L)
lp = ln_prior(theta, s_mu, s_sigma, b_mu, b_sigma)
if np.isfinite(lp):
return lp + ln_likelihood(y, theta, s_mu, s_sigma, b_mu, b_sigma)
return lp
def _unpack_params(params, L=None):
# unpack the multdimensional values.
if L is None:
L = int((len(params) - 1)/4)
theta = params[0]
mu_single = np.array(params[1:1 + L])
sigma_single = np.array(params[1 + L:1 + 2 * L])
mu_multiple = np.array(params[1 + 2 * L:1 + 3 * L])
sigma_multiple = np.array(params[1 + 3 * L:1 + 4 * L])
return (theta, mu_single, sigma_single, mu_multiple, sigma_multiple)
def _pack_params(theta, mu_single, sigma_single, mu_multiple, sigma_multiple, mu_multiple_uv=None, **kwargs):
if mu_multiple_uv is None:
return np.hstack([theta, mu_single, sigma_single, mu_multiple, sigma_multiple])
else:
return np.hstack([theta, mu_single, sigma_single, mu_multiple, sigma_multiple, mu_multiple_uv])
def _check_params_dict(d, bounds_dict=None, fail_on_bounds=True, tolerance=0.01):
if d is None: return d
dc = {**d}
for k in ("mu_single", "sigma_single", "mu_multiple", "sigma_multiple"):
dc[k] = np.atleast_1d(dc[k]).flatten()[0]
if bounds_dict is not None and k in bounds_dict:
lower, upper = bounds_dict[k]
if (not np.all(upper >= dc[k]) or not np.all(dc[k] >= lower)):
if fail_on_bounds:
raise ValueError("bounds not met: {} = {} not within ({} {})"\
.format(k, dc[k], lower, upper))
else:
print("Clipping initial {} to be within bounds ({}, {}): {}"\
.format(k, lower, upper, dc[k]))
dc[k] = np.clip(dc[k], lower + tolerance, upper - tolerance)
return dc
def nlp(params, y, L):
return -ln_prob(y, L, *params)
def _get_1d_initialisation_point(y, scalar=5, bounds=None):
N= y.size
init = dict(
theta=0.75,
mu_single=np.min([np.median(y, axis=0), 10]),
sigma_single=0.2,
sigma_multiple=0.5)
if bounds is not None:
for k, (lower, upper) in bounds.items():
if not (upper >= init[k] >= lower):
init[k] = np.mean([upper, lower])
lower_mu_multiple = np.log(init["mu_single"] + scalar * init["sigma_single"]) \
+ init["sigma_multiple"]**2
init["mu_multiple"] = 1.1 * lower_mu_multiple
op_kwds = dict(x0=_pack_params(**init), args=(y, 1))
nlp = lambda params, y, L: -ln_prob(y, L, *params)
p_opt = op.minimize(nlp, **op_kwds)
keys = ("theta", "mu_single", "sigma_single", "mu_multiple", "sigma_multiple")
init_dict = _check_params_dict(init)
op_dict = _check_params_dict(dict(zip(keys, _unpack_params(p_opt.x))))
# Only return valid init values.
valid_inits = []
for init in (init_dict, op_dict):
if np.isfinite(nlp(_pack_params(**init), y, 1)):
valid_inits.append(init)
valid_inits.append("random")
return valid_inits
def get_initialization_point(y):
N, D = y.shape
ok = y <= np.mean(y)
init_dict = dict(
theta=0.5,
mu_single=np.median(y[ok], axis=0),
sigma_single=0.1 * np.median(y[ok], axis=0),
sigma_multiple=0.1 * np.ones(D),
)
# mu_multiple is *highly* constrained. Select the mid-point between what is
# OK:
mu_multiple_ranges = np.array([
np.log(init_dict["mu_single"] + 1 * init_dict["sigma_single"]) + init_dict["sigma_multiple"]**2,
np.log(init_dict["mu_single"] + 5 * init_dict["sigma_single"]) + pow(init_dict["sigma_multiple"], 2)
])
init_dict["mu_multiple"] = np.mean(mu_multiple_ranges, axis=0)
#init_dict["mu_multiple_uv"] = 0.5 * np.ones(D)
x0 = _pack_params(**init_dict)
op_kwds = dict(x0=x0, args=(y, D))
p_opt = op.minimize(nlp, **op_kwds)
init_dict = dict(zip(
("theta", "mu_single", "sigma_single", "mu_multiple", "sigma_multiple"),
_unpack_params(p_opt.x)))
init_dict["mu_multiple_uv"] = 0.5 * np.ones(D)
init_dict = _check_params_dict(init_dict)
return init_dict
| {"/scripts/npm_run_elastic_ball_test.py": ["/npm_utils.py"], "/npm.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation.py": ["/velociraptor.py"], "/run_analysis.py": ["/npm_utils.py"], "/run_probabilities2.py": ["/npm_utils.py"], "/scripts/npm_run_soubiran_2013.py": ["/npm_utils.py"], "/run_probabilities3.py": ["/npm_utils.py"], "/attic/plot_sb2.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation_hrd.py": ["/velociraptor.py"], "/attic/validation_sb9.py": ["/velociraptor.py"]} |
50,282 | andycasey/velociraptor | refs/heads/master | /attic/validation.py |
"""
Validation of the radial velocity calibration model.
"""
import numpy as np
import matplotlib.pyplot as plt
import corner
import pickle
from scipy.special import logsumexp
import velociraptor
# Load the data and make some plots.
data_path = "data/rv-all.fits"
sources = velociraptor.load_gaia_sources(data_path)
# Calculate temperature from bp-rp colour and use the distance from the
# template as a predictor for the error in radial velocity (under the assumption
# that the wrong template was used, giving a higher velocity error)
use_in_fit = np.isfinite(sources["radial_velocity"]) \
* (sources["phot_bp_rp_excess_factor"] < 1.5) \
* np.isfinite(sources["bp_rp"]) \
* np.isfinite(sources["teff_val"]) \
* (sources["bp_rp"] < 2.5) \
* (sources["bp_rp"] > 0.5)
x = sources["bp_rp"][use_in_fit]
y = sources["teff_val"][use_in_fit]
coeff = np.polyfit(1.0/x, y, 2)
xi = np.linspace(x.min(), x.max(), 100)
fig, ax = plt.subplots()
ax.scatter(x, y, s=1, alpha=0.05, facecolor="k")
ax.plot(xi, np.polyval(coeff, 1.0/xi), c='r', lw=2)
#sources["approx_teff"] = np.polyval(coeff, 1.0/sources["bp_rp"])
sources["teff_from_rv_template"] = np.abs(
np.polyval(coeff, 1.0/sources["bp_rp"]) - sources["rv_template_teff"])
N_bins = 100
H_all, xedges, yedges = np.histogram2d(
sources["teff_from_rv_template"][use_in_fit],
sources["rv_excess_variance"][use_in_fit],
bins=(
np.linspace(0, 3000, N_bins),
np.linspace(0, 10000, N_bins)
))
kwds = dict(
aspect=np.ptp(xedges)/np.ptp(yedges),
extent=(xedges[0], xedges[-1], yedges[-1], yedges[0]),
)
from matplotlib.colors import LogNorm
fig, ax = plt.subplots(figsize=(7.0, 5.5))
image = ax.imshow(H_all.T, norm=LogNorm(), cmap="Blues", **kwds)
cbar = plt.colorbar(image, ax=ax)
cbar.set_label(r"\textrm{count}")
ax.set_ylim(ax.get_ylim()[::-1])
ax.set_xlabel(r"$|T_{\rm eff} - T_{\rm eff,template}|$ $({\rm K})$")
ax.set_ylabel(r"\textrm{radial velocity excess variance} $({\rm km}^2\,{\rm s}^{-2})$")
fig.tight_layout()
fig.savefig("figures/rv_excess_variance_wrt_teff_diff.pdf", dpi=300)
N_bins = 30
x = sources["teff_from_rv_template"][use_in_fit]
fig, axes = plt.subplots(1, 2, figsize=(8, 4))
for i, (ax, equidensity) in enumerate(zip(axes, (True, False))):
if equidensity:
bins = np.percentile(x, np.linspace(0, 100, N_bins))
else:
bins = np.linspace(np.min(x), np.max(x), N_bins)
binary_fraction = np.zeros(N_bins - 1, dtype=float)
indices = np.digitize(sources["teff_from_rv_template"][use_in_fit], bins) - 1
from collections import Counter
counts = Counter(indices)
N_per_bin = np.array([counts.get(k, 0) for k in range(N_bins - 1)],
dtype=float)
counts_finite = Counter(indices[(sources["p_sb_50"] > 0.5)[use_in_fit]])
N_rv_per_bin = np.array([counts_finite.get(k, 0) for k in range(N_bins - 1)],
dtype=float)
f_rv_per_bin = N_rv_per_bin / N_per_bin
f_rv_per_bin_err = f_rv_per_bin * np.sqrt(
(np.sqrt(N_rv_per_bin)/N_rv_per_bin)**2 + \
(np.sqrt(N_per_bin)/N_per_bin)**2)
centroids = bins[:-1] + 0.5 * np.diff(bins)
#fig, ax = plt.subplots()
ax.plot(centroids, f_rv_per_bin, drawstyle="steps-mid")
ax.errorbar(centroids, f_rv_per_bin, yerr=f_rv_per_bin_err, fmt=None, )
raise a
model, data_dict, init_dict, used_in_fit = velociraptor.prepare_model(
S=1e4, **sources)
# This *works*, but you could start from any random position.
init_dict = dict([
('theta', 0.20),
('mu_coefficients', np.array([0.3, 1e-4, 1e12, 1, 1])),
('sigma_coefficients', np.array([0.3, 1e-4, 4e11, 1, 1])),
])
p_opt = model.optimizing(data=data_dict, init=init_dict)
print(p_opt)
fig, ax = plt.subplots()
ax.scatter(sources["phot_rp_mean_flux"][used_in_fit], data_dict["rv_variance"],
s=1, alpha=0.5, facecolor="k")
x = np.logspace(
np.log10(np.nanmin(sources["phot_rp_mean_flux"])),
np.log10(np.nanmax(sources["phot_rp_mean_flux"])),
1000)
bp_rp = np.nanmean(sources["bp_rp"]) * np.ones(x.size)
mu_opt = np.dot(
p_opt["mu_coefficients"],
velociraptor._rvf_design_matrix(x, bp_rp=bp_rp))
sigma_opt = np.dot(
p_opt["sigma_coefficients"],
velociraptor._rvf_design_matrix(x, bp_rp=bp_rp))
mu_init = np.dot(
init_dict["mu_coefficients"],
velociraptor._rvf_design_matrix(x, bp_rp=bp_rp))
sigma_init = np.dot(
init_dict["sigma_coefficients"],
velociraptor._rvf_design_matrix(x, bp_rp=bp_rp))
ax.plot(x, mu_init, c='r')
ax.fill_between(x, mu_init - sigma_init, mu_init + sigma_init, facecolor="r",
alpha=0.3, edgecolor="none")
ax.plot(x, mu_opt, c='b')
ax.fill_between(x, mu_opt - sigma_opt, mu_opt + sigma_opt, facecolor="b",
alpha=0.3, edgecolor="none")
ax.semilogx()
ax.set_ylim(0, 400)
iterations = 2000
p_samples = model.sampling(**velociraptor.stan.sampling_kwds(
data=data_dict, chains=2, iter=iterations, init=p_opt))
print(p_samples)
parameter_names = ("theta", "mu_coefficients", "sigma_coefficients")
label_names = (r"$\theta$", r"$\mu_0$", r"$\mu_1$", r"$\mu_2$",
r"$\mu_3$", r"$\mu_4$", r"$\sigma_0$", r"$\sigma_1$",
r"$\sigma_2$", r"$\sigma_3$", r"$\sigma_4$")
chains_dict = p_samples.extract(parameter_names, permuted=True)
chains = np.hstack([
chains_dict["theta"].reshape((iterations, 1)),
chains_dict["mu_coefficients"],
chains_dict["sigma_coefficients"]
])
"""
fig = corner.corner(chains, labels=label_names)
# Make many draws.
x = np.logspace(
np.log10(np.nanmin(sources["phot_rp_mean_flux"])),
np.log10(np.nanmax(sources["phot_rp_mean_flux"])),
1000)
bp_rp = np.nanmean(sources["bp_rp"]) * np.ones(x.size)
dm = velociraptor._rvf_design_matrix(x, bp_rp=bp_rp)
fig, ax = plt.subplots()
ax.scatter(sources["phot_rp_mean_flux"][used_in_fit], data_dict["rv_variance"],
s=1, alpha=0.5, facecolor="#000000")
N_mu, N_sigma = 5, 5
for index in np.random.choice(iterations, min(iterations, 250), replace=False):
y = np.dot(chains[index][1:1 + N_mu], dm) \
+ np.random.normal(0, 1) * np.dot(chains[index][1 + N_mu:], dm)
ax.plot(x, y, "r-", alpha=0.05)
ax.semilogx()
ax.set_ylim(0, ax.get_ylim()[1])
"""
# Calculate point estimates of binary probability for all stars.
has_rv = np.isfinite(sources["radial_velocity"])
J = sum(has_rv)
percentiles = [16, 50, 84]
for p in percentiles:
sources["p_sb_{:.0f}".format(p)] = np.nan * np.ones(len(sources))
sources["rv_excess_variance"] = np.nan * np.ones(len(sources))
for j, index in enumerate(np.where(has_rv)[0]):
dm = velociraptor._rvf_design_matrix(**sources[[index]])
mu = np.dot(dm.T, chains_dict["mu_coefficients"].T)
ivar = np.dot(dm.T, chains_dict["sigma_coefficients"].T)**-2
log_pb = np.log(chains_dict["theta"]) - np.log(np.max(data_dict["rv_variance"]))
log_ps = np.log(1 - chains_dict["theta"]) \
- 0.5 * np.log(2 * np.pi) + 0.5 * np.log(ivar) \
- 0.5 * (sources["rv_single_epoch_variance"][index, np.newaxis] - mu)**2 * ivar
log_p_sb = log_pb - logsumexp([log_pb * np.ones_like(log_ps), log_ps], axis=0)
p_sb = np.exp(log_p_sb).flatten()
vs = np.percentile(p_sb, percentiles)
for p, v in zip(percentiles, vs):
sources["p_sb_{:.0f}".format(p)][index] = v
print(j, J, vs)
# Calculate excess variance.
rv_max_single_star_variance = np.percentile(
np.random.normal(0, 1, size=mu.shape) * ivar**-0.5, 99, axis=1)
rv_excess_variance = np.clip(
sources["rv_single_epoch_variance"][index] - rv_max_single_star_variance,
0, np.inf)
sources["rv_excess_variance"][index] = rv_excess_variance
# TODO: Save the PDFs?
sources.write(data_path, overwrite=True)
#with open("data/binary-pdfs.pkl", "wb") as fp:
# pickle.dump((sources["source_id"][has_rv], p_sb), fp, -1)
raise a
# Plot the model against various properties of interest.
shorthand_parameters = [
("phot_rp_mean_flux", "mean rp flux", True),
("phot_bp_mean_flux", "mean bp flux", True),
("phot_g_mean_flux", "mean g flux", True),
("bp_rp", "bp-rp colour", False),
("phot_g_mean_mag", "mean g magnitude", False),
("phot_rp_mean_mag", "mean rp magnitude", False),
("phot_bp_mean_mag", "mean bp magnitude", False),
("teff_val", "inferred temperature", False),
("ra", "right ascension", False),
("dec", "declination", False),
("radial_velocity", "radial velocity", False),
("rv_nb_transits", "number of radial velocity transits", False),
("absolute_g_mag", "absolute g magnitude", False),
("absolute_bp_mag", "absolute bp magnitude", False),
("absolute_rp_mag", "absolute rp magnitude", False),
("rv_template_teff", "rv template teff", False),
("rv_template_logg", "rv template logg", False),
("rv_template_fe_h", "rv template [Fe/H]", False),
]
for label_name, description, is_semilogx in shorthand_parameters:
x = sources[label_name]
y = sources["rv_single_epoch_variance"] - model_rv_sev_mu
idx = np.argsort(x)
fig, axes = plt.subplots(1, 2, figsize=(12, 6))
for ax in axes:
ax.scatter(x, y, **model_scatter_kwds)
if is_semilogx:
ax.semilogx()
ax.set_xlabel(r"\textrm{{{0}}}".format(description))
ax.set_ylabel(r"\textrm{residual single epoch radial velocity variance} $(\textrm{km}^2\,\textrm{s}^{-2})$")
axes[0].set_title(r"\textrm{all points}")
axes[1].set_title(r"\textrm{5th-95th percentile in} $y$\textrm{-axis}")
axes[1].set_ylim(*np.nanpercentile(y, [5, 95]))
fig.tight_layout()
# Calculate excess given number of transits.
# TODO: Do this somewhere else?
scalar = 2.0 / (np.pi * sources["rv_nb_transits"])
model_rv_error_mu = np.sqrt(model_rv_sev_mu * scalar)
model_rv_error_sigma_pos = \
np.sqrt((model_rv_sev_mu + model_rv_sev_sigma) * scalar) - model_rv_error_mu
model_rv_error_sigma_neg = \
np.sqrt((model_rv_sev_mu - model_rv_sev_sigma) * scalar) - model_rv_error_mu
x = sources["phot_rp_mean_flux"]
y = sources["rv_single_epoch_variance"]
idx = np.argsort(x)
fig, ax = plt.subplots()
ax.scatter(x, y, **scatter_kwds)
ax.plot(x[idx], model_rv_sev_mu[idx], "r-")
ax.fill_between(
x[idx],
(model_rv_sev_mu - model_rv_sev_sigma)[idx],
(model_rv_sev_mu + model_rv_sev_sigma)[idx],
facecolor="r", alpha=0.3, edgecolor="none")
ax.set_xlim(np.nanmin(x), np.nanmax(x))
ax.set_ylim(-0.5, 2 * np.nanmax(model_rv_sev_mu + model_rv_sev_sigma))
ax.semilogx()
ax.set_xlabel(r"\textrm{rp mean flux}")
ax.set_ylabel(r"\textrm{single epoch radial velocity variance} $(\textrm{km}^2\,\textrm{s}^{-2})$")
fig.tight_layout()
x = sources["phot_rp_mean_flux"]
y = sources["radial_velocity_error"]
idx = np.argsort(x)
fig, ax = plt.subplots()
ax.scatter(x, y, **scatter_kwds)
ax.plot(x[idx], model_rv_error_mu[idx], "r-")
ax.fill_between(
x[idx],
(model_rv_error_mu + model_rv_error_sigma_pos)[idx],
(model_rv_error_mu + model_rv_error_sigma_neg)[idx],
facecolor="r", alpha=0.3, edgecolor="none")
ax.set_xlim(np.nanmin(x), np.nanmax(x))
ax.set_ylim(-0.5, 2 * np.nanmax(model_rv_error_mu + model_rv_error_sigma_pos))
ax.semilogx()
ax.set_xlabel(r"\textrm{rp mean flux}")
ax.set_ylabel(r"\textrm{radial velocity error} $(\textrm{km\,s}^{-1})$")
fig.tight_layout()
# Plot the model residuals against different stellar properties.
y = sources["rv_single_epoch_variance"]
mu = model_rv_sev_mu
sigma = model_rv_sev_sigma
for label_name, description, is_semilogx in shorthand_parameters:
fig, ax = plt.subplots()
x = sources[label_name]
idx = np.argsort(x)
ax.scatter(x, y - mu, **scatter_kwds)
ax.axhline(0, c="r")
ax.fill_between(x[idx], -sigma[idx], +sigma[idx], facecolor="r", alpha=0.3,
edgecolor="none")
ax.set_xlim(np.nanmin(x), np.nanmax(x))
# TODO: set ylim
abs_ylim = 2 * np.nanmax(sigma)
ax.set_ylim(-abs_ylim, +abs_ylim)
if is_semilogx:
ax.semilogx()
ax.set_xlabel(r"\textrm{{{0}}}".format(description))
ax.set_ylabel(r"\textrm{residual single epoch radial valocity variance} $(\textrm{km}^2\,\textrm{s}^{-2})$")
fig.tight_layout()
# Plot the binary fraction with different parameters.
N_bins = 15
prob_binarity = np.mean(np.exp(p_samples["log_membership_probability"]), axis=0)
for label_name, description, is_semilogx in shorthand_parameters:
x = sources[label_name][used_in_fit]
y = prob_binarity
finite = np.isfinite(x * y)
x, y = (x[finite], y[finite])
if is_semilogx:
es_bins = np.logspace(
np.log10(x.min()), np.log10(x.max()), N_bins)
else:
es_bins = np.linspace(x.min(), x.max(), N_bins)
es_bin_centers = es_bins[:-1] + np.diff(es_bins)/2.
es_binarity = np.zeros(es_bins.size - 1, dtype=float)
es_binarity_error = np.zeros_like(es_binarity)
for i, left_edge in enumerate(es_bins[:-1]):
right_edge = es_bins[i + 1]
in_bin = (right_edge > x) * (x >= left_edge)
es_binarity[i] += np.sum((y > 0.5) * in_bin) \
/ np.sum(in_bin)
es_binarity_error[i] += np.sqrt(np.sum((y > 0.5) * in_bin)) \
/ np.sum(in_bin)
if is_semilogx:
ed_bins = 10**np.percentile(np.log10(x), np.linspace(0, 100, N_bins))
else:
ed_bins = np.percentile(x, np.linspace(0, 100, N_bins))
ed_bin_centers = ed_bins[:-1] + np.diff(ed_bins)/2.
ed_binarity = np.zeros(ed_bins.size - 1, dtype=float)
ed_binarity_error = np.zeros_like(ed_binarity)
for i, left_edge in enumerate(ed_bins[:-1]):
right_edge = ed_bins[i + 1]
in_bin = (right_edge > x) * (x >= left_edge)
ed_binarity[i] += np.sum((y > 0.5) * in_bin) \
/ np.sum(in_bin)
ed_binarity_error[i] += np.sqrt(np.sum((y > 0.5) * in_bin))\
/ np.sum(in_bin)
fig, axes = plt.subplots(1, 2, figsize=(8, 4))
xx, yy = velociraptor.mpl_utils.plot_histogram_steps(
axes[0], es_bin_centers, es_binarity, es_binarity_error)
axes[0].set_title(r"\textrm{equi-spaced bins}")
xx, yy = velociraptor.mpl_utils.plot_histogram_steps(
axes[1], ed_bin_centers, ed_binarity, ed_binarity_error)
axes[1].set_title(r"\textrm{equi-density bins}")
ylims = np.hstack([ax.get_ylim() for ax in axes])
for ax in axes:
ax.set_xlim(xx[0], xx[-1])
ax.set_ylim(np.min(ylims), np.max(ylims))
if is_semilogx: ax.semilogx()
ax.set_ylabel(r"\textrm{binary fraction}")
ax.set_xlabel(r"\textrm{{{0}}}".format(description))
fig.tight_layout()
# OK let's do this for a sample of giants and a sample of dwarfs and then
# compare.
qc = (sources["parallax"] > 0) \
* ((sources["parallax"]/sources["parallax_error"]) > 5)
fig, ax = plt.subplots()
ax.scatter(sources["bp_rp"], sources["absolute_g_mag"], **scatter_kwds)
ax.set_ylim(ax.get_ylim()[::-1])
ax.set_xlabel(r"\textrm{bp - rp}")
ax.set_ylabel(r"\textrm{absolute g magnitude}")
fig.tight_layout()
qc_giants = qc * (3 > sources["bp_rp"]) * (sources["bp_rp"] > 1.0) \
* (sources["absolute_g_mag"] < 3)
qc_dwarfs = qc * (3 > sources["bp_rp"]) * (sources["bp_rp"] > 1.0) \
* (sources["absolute_g_mag"] > 4)
fig, axes = plt.subplots(1, 2, figsize=(8, 4))
for ax in axes:
ax.scatter(
sources["bp_rp"][qc],
sources["absolute_g_mag"][qc],
**scatter_kwds)
ax.set_ylim(ax.get_ylim()[::-1])
ax.set_xlabel(r"\textrm{bp - rp}")
ax.set_ylabel(r"\textrm{absolute g magnitude}")
axes[0].scatter(
sources["bp_rp"][qc_dwarfs],
sources["absolute_g_mag"][qc_dwarfs],
s=5, zorder=10)
axes[1].scatter(
sources["bp_rp"][qc_giants],
sources["absolute_g_mag"][qc_giants],
s=5, zorder=10)
axes[0].set_title(r"\textrm{main-sequence stars}")
axes[1].set_title(r"\textrm{giant stars}")
fig.tight_layout()
print("Number of stars in dwarf model: {}".format(sum(qc_dwarfs)))
print("Number of stars in giant model: {}".format(sum(qc_giants)))
# Run the model for each subset.
dwarf_model, dwarf_data_dict, dwarf_init_dict, dwarf_used_in_fit \
= velociraptor.prepare_model(**sources[qc_dwarfs])
dwarf_p_opt = dwarf_model.optimizing(data=dwarf_data_dict, init=dwarf_init_dict)
dwarf_samples = dwarf_model.sampling(**velociraptor.stan.sampling_kwds(
data=dwarf_data_dict, chains=2, iter=2000, init=dwarf_p_opt))
giant_model, giant_data_dict, giant_init_dict, giant_used_in_fit \
= velociraptor.prepare_model(**sources[qc_giants])
giant_p_opt = giant_model.optimizing(data=giant_data_dict, init=giant_init_dict)
giant_samples = giant_model.sampling(**velociraptor.stan.sampling_kwds(
data=giant_data_dict, chains=2, iter=2000, init=giant_p_opt))
# Plot the performance of the two models on the same figure.
giant_model_rv_sev_mu, giant_model_rv_sev_sigma = \
velociraptor.predict_map_rv_single_epoch_variance(giant_samples, **sources[qc_giants])
dwarf_model_rv_sev_mu, dwarf_model_rv_sev_sigma = \
velociraptor.predict_map_rv_single_epoch_variance(dwarf_samples, **sources[qc_dwarfs])
shorthand_parameters = [
("phot_rp_mean_flux", "mean rp flux", True, True),
("phot_bp_mean_flux", "mean bp flux", True, True),
("phot_g_mean_flux", "mean g flux", True, True),
("bp_rp", "bp-rp colour", False, True),
("phot_g_mean_mag", "mean g magnitude", False, True),
("phot_rp_mean_mag", "mean rp magnitude", False, True),
("phot_bp_mean_mag", "mean bp magnitude", False, True),
("teff_val", "inferred temperature", False, True),
("rv_template_teff", "rv template teff", False, False),
("rv_template_logg", "rv template logg", False, False),
("rv_template_fe_h", "rv template [Fe/H]", False, False),
]
def _show_text_upper_right(ax, text):
return ax.text(0.95, 0.95, text, transform=ax.transAxes,
horizontalalignment="right", verticalalignment="top")
def _show_number_of_data_points(ax, N):
return _show_text_upper_right(ax, r"${0:.0f}$".format(N))
def _smooth_model(x, y, yerr, is_semilogx, average_function=np.mean,
equidensity=True, N_smooth_points=30):
N = min(N_smooth_points, len(set(x)))
if equidensity:
if is_semilogx:
xb = 10**np.percentile(np.log10(x), np.linspace(0, 100, N))
else:
xb = np.percentile(x, np.linspace(0, 100, N))
else:
if is_semilogx:
xb = np.logspace(np.log10(x.min()), np.log10(x.max()), N)
else:
xb = np.linspace(x.min(), x.max(), N)
xi = xb[:-1] + np.diff(xb)/2.
yi = np.zeros_like(xi)
yerri = np.zeros_like(xi)
for i, left_edge in enumerate(xb[:-1]):
right_edge = xb[i + 1]
in_bin = (right_edge > x) * (x >= left_edge)
yi[i] = average_function(y[in_bin])
yerri[i] = average_function(yerr[in_bin])
return (xi, yi, yerri)
for label_name, description, is_semilogx, equidensity in shorthand_parameters:
fig, axes = plt.subplots(1, 3, figsize=(18, 6))
x = sources[label_name][qc_dwarfs][dwarf_used_in_fit]
y = dwarf_model_rv_sev_mu
idx = np.argsort(x)
axes[0].scatter(
x,
sources["rv_single_epoch_variance"][qc_dwarfs][dwarf_used_in_fit],
facecolor="r", s=1, alpha=0.05, rasterized=True)
_show_number_of_data_points(axes[0], len(x))
# Smooth out the effects.
xi, yi, yerri = _smooth_model(
x, dwarf_model_rv_sev_mu, dwarf_model_rv_sev_sigma, is_semilogx,
equidensity=equidensity)
for ax in (axes[0], axes[2]):
ax.plot(xi, yi, "r-")
ax.fill_between(xi, yi - yerri, yi + yerri,
facecolor="r", alpha=0.3, edgecolor="none")
x = sources[label_name][qc_giants][giant_used_in_fit]
y = giant_model_rv_sev_mu
idx = np.argsort(x)
axes[1].scatter(
x,
sources["rv_single_epoch_variance"][qc_giants][giant_used_in_fit],
facecolor="b", s=1, alpha=0.05, rasterized=True)
_show_number_of_data_points(axes[1], len(x))
xi, yi, yerri = _smooth_model(
x, giant_model_rv_sev_mu, giant_model_rv_sev_sigma, is_semilogx,
equidensity=equidensity)
for ax in (axes[1], axes[2]):
ax.plot(xi, yi, "b-")
ax.fill_between(
xi, yi - yerri, yi + yerri,
facecolor="b", alpha=0.3, edgecolor="none")
axes[0].set_title(r"\textrm{main-sequence stars}")
axes[1].set_title(r"\textrm{giant stars}")
x = sources[label_name][qc]
xlims = (np.nanmin(x), np.nanmax(x))
for ax in axes:
ax.set_xlabel(r"\textrm{{{0}}}".format(description))
ax.set_ylabel(r"\textrm{single epoch radial velocity variance} $(\textrm{km}^2\,\textrm{s}^{-2})$")
if is_semilogx:
ax.semilogx()
ax.set_xlim(xlims)
ax.set_ylim(axes[2].get_ylim())
_show_text_upper_right(
axes[2],
r"$N_\textrm{{model bins}} = {0}$ \textrm{{(equi-{1})}}".format(
len(xi) + 1, "density" if equidensity else "spaced"))
fig.tight_layout()
fig.savefig(
"figures/giant-vs-dwarf-{}.pdf".format(label_name.replace("_", "-")),
dpi=150)
| {"/scripts/npm_run_elastic_ball_test.py": ["/npm_utils.py"], "/npm.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation.py": ["/velociraptor.py"], "/run_analysis.py": ["/npm_utils.py"], "/run_probabilities2.py": ["/npm_utils.py"], "/scripts/npm_run_soubiran_2013.py": ["/npm_utils.py"], "/run_probabilities3.py": ["/npm_utils.py"], "/attic/plot_sb2.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation_hrd.py": ["/velociraptor.py"], "/attic/validation_sb9.py": ["/velociraptor.py"]} |
50,283 | andycasey/velociraptor | refs/heads/master | /run_analysis.py |
"""
Analysis script for the Velociraptor project.
"""
import logging
import multiprocessing as mp
import numpy as np
import os
import pickle
import sys
import tqdm
import yaml
from time import (sleep, time)
from astropy.io import fits
from scipy import optimize as op
from scipy.special import logsumexp
import george
import npm_utils as npm
import stan_utils as stan
USE_SV_MASK = True
if __name__ == "__main__":
config_path = sys.argv[1]
with open(config_path, "r") as fp:
config = yaml.load(fp)
random_seed = int(config["random_seed"])
np.random.seed(random_seed)
logging.info(f"Config path: {config_path} with seed {random_seed}")
# Check results path now so we don't die later.
results_path = config["results_path"]
# Load data.
data = fits.open(config["data_path"])[1].data
# Get a list of all relevant label names
all_label_names = []
for model_name, model_config in config["models"].items():
all_label_names.append(model_config["predictor_label_name"])
all_label_names.extend(model_config["kdtree_label_names"])
all_label_names = list(np.unique(all_label_names))
# Mask for finite data points.
finite = np.all([np.isfinite(data[ln]) for ln in all_label_names], axis=0)
USE_SV_MASK = config["sv_mask"]
if USE_SV_MASK:
# Mask for science verifiation
with open("sv.mask", "rb") as fp:
sv_mask = pickle.load(fp)
sv_mask = sv_mask[finite]
# Load the model.
model = stan.load_stan_model(config["model_path"], verbose=False)
# Make sure that some entries have the right type.
default_opt_kwds = config.get("optimisation_kwds", {})
for key in ("tol_obj", "tol_grad", "tol_rel_grad", "tol_rel_obj"):
if key in default_opt_kwds:
default_opt_kwds[key] = float(default_opt_kwds[key])
logging.info("Optimization keywords: {}".format(default_opt_kwds))
default_bounds = dict(bound_theta=[0.5, 1],
bound_mu_single=[0.5, 15],
bound_sigma_single=[0.05, 10],
bound_sigma_multiple=[0.2, 1.6])
M = config["number_of_sources"]
indices = np.random.choice(sum(finite), M, replace=False)
model_results = dict()
for model_name, model_config in config["models"].items():
logging.info(f"Running model {model_name} with config:\n{model_config}")
bounds = default_bounds.copy()
for k, (lower, upper) in model_config["bounds"].items():
bounds[f"bound_{k}"] = [lower, upper]
# Set up a KD-tree.
X = np.vstack([data[ln][finite] for ln in model_config["kdtree_label_names"]]).T
Y = np.array(data[model_config["predictor_label_name"]])[finite]
N, D = X.shape
kdt, scales, offsets = npm.build_kdtree(X,
relative_scales=model_config["kdtree_relative_scales"])
kdt_kwds = dict(offsets=offsets, scales=scales, full_output=True)
kdt_kwds.update(
minimum_radius=model_config["kdtree_minimum_radius"],
maximum_radius=model_config.get("kdtree_maximum_radius", None),
minimum_points=model_config["kdtree_minimum_points"],
maximum_points=model_config["kdtree_maximum_points"],
minimum_density=model_config.get("kdtree_minimum_density", None))
# Optimize the non-parametric model for those sources.
results = np.zeros((M, 5))
done = np.zeros(M, dtype=bool)
def optimize_mixture_model(index, inits=None, scalar=5):
# Select indices and get data.
d, nearby_idx, meta = npm.query_around_point(kdt, X[index], **kdt_kwds)
y = Y[nearby_idx]
ball = X[nearby_idx]
if inits is None:
inits = npm._get_1d_initialisation_point(
y, scalar=scalar, bounds=model_config["bounds"])
# Update meta dictionary with things about the data.
meta = dict(max_log_y=np.log(np.max(y)),
N=nearby_idx.size,
y_percentiles=np.percentile(y, [16, 50, 84]),
ball_ptps=np.ptp(ball, axis=0),
ball_medians=np.median(ball, axis=0),
init_points=inits,
kdt_indices=nearby_idx)
data_dict = dict(y=y,
N=y.size,
scalar=scalar)
data_dict.update(bounds)
#for k, v in model_config["parameter_bounds"].items():
# data_dict["{}_bounds".format(k)] = v
p_opts = []
ln_probs = []
for j, init_dict in enumerate(inits):
opt_kwds = dict(
init=init_dict,
data=data_dict)
opt_kwds.update(default_opt_kwds)
# Do optimization.
# TODO: Suppressing output is always dangerous.
with stan.suppress_output(config.get("suppress_stan_output", True)) as sm:
try:
p_opt = model.optimizing(**opt_kwds)
except:
logging.exception(f"Exception occurred when optimizing index {index}"\
f" from {init_dict}:")
else:
if p_opt is not None:
p_opts.append(p_opt)
ln_probs.append(npm.ln_prob(y, 1, *npm._pack_params(**p_opt)))
try:
p_opt
except UnboundLocalError:
logging.warning("Stan failed. STDOUT & STDERR:")
logging.warning("\n".join(sm.outputs))
if p_opt is None:
stdout, stderr = sm.outputs
logging.warning(f"STDOUT:\n{stdout}\nSTDERR:\n{stderr}")
if len(p_opts) < 1:
logging.warning("Optimization on index {} did not converge from any "\
"initial point trialled. Consider relaxing the "\
"optimization tolerances! If this occurs regularly "\
"then something is very wrong!".format(index))
return (index, None, meta)
# evaluate best.
else:
idx = np.argmax(ln_probs)
p_opt = p_opts[idx]
meta["init_idx"] = idx
"""
if sum(done) > 550 and sum(done) < 570:
theta, mu_single, sigma_single, mu_multiple, sigma_multiple = npm._pack_params(**p_opt)
fig, ax = plt.subplots()
xi = np.linspace(0, 20, 1000)
y_s = npm.norm_pdf(xi, mu_single, sigma_single, theta)
y_m = npm.lognorm_pdf(xi, mu_multiple, sigma_multiple, theta)
ax.plot(xi, y_s, c="tab:blue")
ax.plot(xi, y_m, c="tab:red")
p_single = np.exp(np.log(y_s) - logsumexp([np.log(y_s), np.log(y_m)], axis=0))
ax.plot(xi, p_single, c="k")
ax.set_title(f"{index}: {theta:.1e} {mu_single:.2f} {sigma_single:.2f} {sigma_multiple:.2f}")
ax.hist(y, bins=np.linspace(0, 20, 20), alpha=0.5, facecolor="#666666", normed=True)
if sum(done) > 570:
raise a
"""
return (index, p_opt, meta)
def sp_swarm(*sp_indices, **kwargs):
logging.info("Running single processor swarm")
with tqdm.tqdm(sp_indices, total=len(sp_indices)) as pbar:
for j, index in enumerate(sp_indices):
if done[j]: continue
_, result, meta = optimize_mixture_model(index)
pbar.update()
done[j] = True
if result is not None:
results[j] = npm._pack_params(**result)
return None
def mp_swarm(*mp_indices, in_queue=None, out_queue=None, seed=None):
np.random.seed(seed)
swarm = True
while swarm:
try:
j, index = in_queue.get_nowait()
except mp.queues.Empty:
logging.info("Queue is empty")
break
except StopIteration:
logging.warning("Swarm is bored")
break
except:
logging.exception("Unexpected exception:")
break
else:
if index is None and init is False:
swarm = False
break
try:
_, result, meta = optimize_mixture_model(index)
except:
logging.exception(f"Exception when optimizing on {index}")
out_queue.put((j, index, None, dict()))
else:
out_queue.put((j, index, result, meta))
return None
if not config.get("multiprocessing", False):
sp_swarm(*indices)
else:
P = mp.cpu_count()
with mp.Pool(processes=P) as pool:
manager = mp.Manager()
in_queue = manager.Queue()
out_queue = manager.Queue()
swarm_kwds = dict(in_queue=in_queue,
out_queue=out_queue)
logging.info("Dumping everything into the queue!")
for j, index in enumerate(indices):
in_queue.put((j, index, ))
j = []
for _ in range(P):
j.append(pool.apply_async(mp_swarm, [], kwds=swarm_kwds))
with tqdm.tqdm(total=M) as pbar:
while not np.all(done):
# Check for output.
try:
r = out_queue.get(timeout=30)
except mp.queues.Empty:
logging.info("No results")
break
else:
j, index, result, meta = r
done[j] = True
if result is not None:
results[j] = npm._pack_params(**result)
pbar.update(1)
# Do not use bad results.
# Bad results include:
# - Things that are so clearly discrepant in every parameter.
# - Things that are on the edge of the boundaries of parameter space.
sigma = np.abs(results - np.median(results, axis=0)) \
/ np.std(results, axis=0)
sigma = np.sum(sigma, axis=1)
tol_sigma = model_config["tol_sum_sigma"]
tol_proximity = model_config["tol_proximity"]
parameter_names = (
"theta",
"mu_single", "sigma_single",
"mu_multiple", "sigma_multiple")
lower_bounds = np.array([model_config["bounds"].get(k, [-np.inf])[0] for k in parameter_names])
upper_bounds = np.array([model_config["bounds"].get(k, [+np.inf])[-1] for k in parameter_names])
not_ok_bound = np.any(
(np.abs(results - lower_bounds) <= tol_proximity) \
+ (np.abs(results - upper_bounds) <= tol_proximity), axis=1)
not_ok_sigma = sigma > tol_sigma
not_ok = not_ok_bound + not_ok_sigma
print(f"There were {sum(not_ok_sigma)} results discarded for being outliers")
print(f"There were {sum(not_ok_bound)} results discarded for being close to the edge")
print(f"There were {sum(not_ok)} results discarded in total")
model_indices = indices[~not_ok]
results = results[~not_ok]
# Run the gaussian process on the single star estimates.
gp_block_size = 10000
G = 5 # number of kernel hyperparameters
gp_predict_indices = (0, 1, 2, 3, 4)
gp_parameters = np.zeros((len(gp_predict_indices), G))
gp_predictions = np.nan * np.ones((X.shape[0], 2 * len(gp_predict_indices)))
x = X[model_indices]
#randn = np.random.choice(X.shape[0], 50000, replace=False)
for i, index in enumerate(gp_predict_indices):
y = results[:, index]
metric = np.var(x, axis=0)
kernel = george.kernels.Matern32Kernel(metric, ndim=x.shape[1])
gp = george.GP(kernel,
mean=np.mean(y), fit_mean=True,
white_noise=np.log(np.std(y)), fit_white_noise=True)
assert len(gp.parameter_names) == G
def nll(p):
gp.set_parameter_vector(p)
ll = gp.log_likelihood(y, quiet=True)
return -ll if np.isfinite(ll) else 1e25
def grad_nll(p):
gp.set_parameter_vector(p)
return -gp.grad_log_likelihood(y, quiet=True)
gp.compute(x)
logging.info("Initial \log{{L}} = {:.2f}".format(gp.log_likelihood(y)))
logging.info("initial \grad\log{{L}} = {}".format(gp.grad_log_likelihood(y)))
p0 = gp.get_parameter_vector()
t_init = time()
result = op.minimize(nll, p0, jac=grad_nll, method="L-BFGS-B")
t_opt = time() - t_init
gp.set_parameter_vector(result.x)
logging.info("Result: {}".format(result))
logging.info("Final logL = {:.2f}".format(gp.log_likelihood(y)))
logging.info("Took {:.0f} seconds to optimize".format(t_opt))
gp_parameters[i] = result.x
# Predict the quantity and the variance.
B = int(np.ceil(X.shape[0] / gp_block_size))
logging.info(f"Predicting {model_name} {index}")
if USE_SV_MASK:
p, p_var = gp.predict(y, X[sv_mask], return_var=True)
gp_predictions[sv_mask, 2*i] = p
gp_predictions[sv_mask, 2*i+1] = p_var
else:
with tqdm.tqdm(total=X.shape[0]) as pb:
for b in range(B):
s, e = (b * gp_block_size, (b + 1)*gp_block_size)
p, p_var = gp.predict(y, X[s:1+e], return_var=True)
gp_predictions[s:1+e, 2*i] = p
gp_predictions[s:1+e, 2*i + 1] = p_var
pb.update(e - s)
"""
p, p_var = gp.predict(y, X[randn], return_var=True)
gp_predictions[randn, 2*i] = p
gp_predictions[randn, 2*i + 1] = p_var
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
scat = ax.scatter(X.T[0][randn], X.T[1][randn],
c=gp_predictions[:, 2*i][randn], s=1)
cbar = plt.colorbar(scat)
ax.set_title(f"{index} mu")
fig, ax = plt.subplots()
scat = ax.scatter(X.T[0][randn], X.T[1][randn],
c=np.sqrt(gp_predictions[:, 2*i + 1][randn]), s=1)
cbar = plt.colorbar(scat)
ax.set_title(f"{index} sigma")
"""
model_results[model_name] = [model_indices, results, gp_parameters, gp_predictions]
# Save predictions so far.
logging.info(f"Saved progress to {results_path}")
with open(results_path, "wb") as fp:
pickle.dump(dict(config=config, models=model_results), fp)
# Save the predictions, and the GP hyperparameters.
save_dict = dict(config=config, models=model_results)
with open(results_path, "wb") as fp:
pickle.dump(save_dict, fp)
logging.info(f"Saved output to {results_path}")
raise a | {"/scripts/npm_run_elastic_ball_test.py": ["/npm_utils.py"], "/npm.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation.py": ["/velociraptor.py"], "/run_analysis.py": ["/npm_utils.py"], "/run_probabilities2.py": ["/npm_utils.py"], "/scripts/npm_run_soubiran_2013.py": ["/npm_utils.py"], "/run_probabilities3.py": ["/npm_utils.py"], "/attic/plot_sb2.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation_hrd.py": ["/velociraptor.py"], "/attic/validation_sb9.py": ["/velociraptor.py"]} |
50,284 | andycasey/velociraptor | refs/heads/master | /_convert_config.py |
"""
Convert config format from run_analysis -> run_analysis2.py
"""
import os
import sys
import pickle
import yaml
if __name__ == "__main__":
output_path = sys.argv[1]
model_paths = sys.argv[2:]
config = dict()
models = dict()
for i, model_path in enumerate(model_paths):
model_name = os.path.basename(model_path).split(".")[1]
with open(model_path, "r") as fp:
model_config = yaml.load(fp)
if i == 0:
for key in ("data_path", "results_path", "model_path", "random_seed"
"number_of_sources", "multiprocessing", "suppress_stan_output"):
if key in model_config:
config[key] = model_config[key]
foo = dict()
for key in ("kdtree_label_names", "kdtree_minimum_points",
"kdtree_maximum_points", "kdtree_relative_scales",
"kdtree_minimum_radius", "kdtree_maximum_radius"):
foo[key] = model_config[key]
foo["predictor_label_name"] = model_config["predictor_label_names"][0]
models[model_name] = foo
config["models"] = models
with open(output_path, "w") as fp:
fp.write(yaml.dump(config))
| {"/scripts/npm_run_elastic_ball_test.py": ["/npm_utils.py"], "/npm.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation.py": ["/velociraptor.py"], "/run_analysis.py": ["/npm_utils.py"], "/run_probabilities2.py": ["/npm_utils.py"], "/scripts/npm_run_soubiran_2013.py": ["/npm_utils.py"], "/run_probabilities3.py": ["/npm_utils.py"], "/attic/plot_sb2.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation_hrd.py": ["/velociraptor.py"], "/attic/validation_sb9.py": ["/velociraptor.py"]} |
50,285 | andycasey/velociraptor | refs/heads/master | /run_probabilities2.py |
"""
Run this after run_analysis.py
"""
import logging
import numpy as np
import sys
import pickle
import tqdm
from collections import OrderedDict
from astropy.io import fits
from astropy.table import Table
from scipy.special import logsumexp
from scipy import stats
import npm_utils as npm
def lnprob(y, theta, s_mu, s_sigma, b_mu, b_sigma):
s_ivar = np.abs(s_sigma)**-2
b_ivar = np.abs(b_sigma)**-2
hl2p = 0.5 * np.log(2*np.pi)
s_lpdf = -hl2p + 0.5 * np.log(s_ivar) \
- 0.5 * (y - s_mu)**2 * s_ivar
b_lpdf = -np.log(y*b_sigma) - hl2p \
- 0.5 * (np.log(y) - b_mu)**2 * b_ivar
b_lpdf[~np.isfinite(b_lpdf)] = -np.inf
return np.vstack([s_lpdf, b_lpdf]).T + np.log([theta, 1-theta])
if __name__ == "__main__":
result_path = sys.argv[1]
output_path = sys.argv[2]
with open(result_path, "rb") as fp:
results = pickle.load(fp)
config = results["config"]
data = fits.open(config["data_path"])[1].data
all_label_names = []
for model_name, model_config in config["models"].items():
all_label_names.append(model_config["predictor_label_name"])
all_label_names.extend(model_config["kdtree_label_names"])
all_label_names = list(np.unique(all_label_names))
finite = np.all([np.isfinite(data[ln]) for ln in all_label_names], axis=0)
K = config.get("number_of_draws", 100)
model_names = config["models"]
M = len(model_names)
MJ = M + 1 if M > 1 else 1
N = sum(finite)
lnprobs = np.zeros((M, N, K, 2))
p_single = np.zeros((MJ, N, K))
for m, model_name in enumerate(model_names):
predictor_label_name = config["models"][model_name]["predictor_label_name"]
y = np.array(data[predictor_label_name][finite])
model_indices, model_results, gp_parameters, gp_predictions \
= results["models"][model_name]
theta, theta_var, \
mu_single, mu_single_var, \
sigma_single, sigma_single_var, \
mu_multiple, mu_multiple_var, \
sigma_multiple, sigma_multiple_var = gp_predictions.T
# Calculate probabilities.
rhos = np.corrcoef(model_results.T)
print(f"Calculating probabilities for {model_name}")
for n in tqdm.tqdm(range(N)):
#TODO SKIPPING
if not np.all(np.isfinite(gp_predictions[n])): continue
mu = gp_predictions[n, ::2]
diag = np.atleast_2d(gp_predictions[n, 1::2])**0.5
cov = diag * rhos * diag.T
draws = np.random.multivariate_normal(mu, cov, size=K).T
# Clip things to bounded values.
draws[0] = np.clip(draws[0], 0.5, 1.0)
draws[1] = np.clip(draws[1], 0.5, 15)
draws[2] = np.clip(draws[2], 0.05, 10)
draws[4] = np.clip(draws[4], 0.2, 1.6)
draws_3_min = np.log(draws[1] + 5 * draws[2]) + draws[4]**2
draws[3] = np.max([draws[3], draws_3_min], axis=0)
lnprobs[m, n, :, 0] = np.log(draws[0]) + npm.normal_lpdf(y[n], draws[1], draws[2])
lnprobs[m, n, :, 1] = np.log(1 - draws[0]) + npm.lognormal_lpdf(y[n], draws[3], draws[4])
p_single[m, n] = np.exp(lnprobs[m, n, :, 0] - logsumexp(lnprobs[m, n], axis=1))
# TODO HACK
is_def_single = y[n] < draws[1]
p_single[m, n][is_def_single] = 1.0
"""
xi = np.linspace(0, 20, 1000)
y_s = npm.norm_pdf(xi, np.mean(draws[1]), np.mean(draws[2]), np.mean(draws[0]))
y_m = npm.lognorm_pdf(xi, np.mean(draws[3]), np.mean(draws[4]), np.mean(draws[0]))
p_single = np.exp(np.log(y_s) - logsumexp([np.log(y_s), np.log(y_m)], axis=0))
fig, ax = plt.subplots()
ax.plot(xi, y_s, c="tab:blue")
ax.plot(xi, y_m, c="tab:red")
ax.plot(xi, p_single, c="#000000")
ax.set_title(n)
ax.axvline(y[n], c="#666666")
"""
# Calculate joint probabilities.
if M > 1:
print("Calculating joint probabilities")
for n in tqdm.tqdm(range(N)):
# TODO: SkIPPING
if not np.all(np.isfinite(gp_predictions[n])): continue
for k in range(K):
# TODO: this could be wrong,..
numerator = sum(lnprobs[:, n, k, 0])
denominator = sum(lnprobs[:, n, k, 1])
p_single[-1, n, k] = np.exp(numerator - logsumexp([numerator, denominator]))
"""
numerator = np.sum(lnprobs[:, d, :, 0], axis=0)
denominator = np.sum(lnprobs[:, d, :, 1], axis=0)
p_single[-1, d, :] = np.exp(numerator - logsumexp([numerator, denominator], axis=1))
"""
print("Calculating percentiles")
percentiles = [5, 50, 95]
P = len(percentiles)
p_single_percentiles = np.zeros((P, MJ, N))
for m in tqdm.tqdm(range(MJ)):
for n in range(N):
if not np.all(np.isfinite(gp_predictions[n])): continue
p_single_percentiles[:, m, n] = np.percentile(p_single[m, n], percentiles)
# Do a classification for each star.
print("Classifying")
confidence = np.sum(p_single > 0.5, axis=2)/K
is_single = confidence > 0.5
#confidence[~is_single] = 1 - confidence[~is_single]
print("Aggregating data")
properties = OrderedDict()
properties["source_id"] = data["source_id"][finite]
for label_name in all_label_names:
properties[label_name] = data[label_name][finite]
# Do GP predictions.
for m, model_name in enumerate(model_names):
print(f"Aggregating predictions for {model_name}")
_, __, ___, gp_predictions = results["models"][model_name]
theta, theta_var, \
mu_single, mu_single_var, \
sigma_single, sigma_single_var, \
mu_multiple, mu_multiple_var, \
sigma_multiple, sigma_multiple_var = gp_predictions.T
predictor_label_name = config["models"][model_name]["predictor_label_name"]
properties[predictor_label_name] = np.array(data[predictor_label_name])[finite]
properties[f"{model_name}_gp_theta"] = theta
properties[f"{model_name}_gp_mu_s"] = mu_single
properties[f"{model_name}_gp_sigma_s"] = sigma_single
properties[f"{model_name}_gp_mu_m"] = mu_multiple
properties[f"{model_name}_gp_sigma_m"] = sigma_multiple
"""
properties[f"{model_name}_gp_theta_var"] = theta_var
properties[f"{model_name}_gp_mu_s_var"] = mu_single_var
properties[f"{model_name}_gp_sigma_s_var"] = sigma_single_var
properties[f"{model_name}_gp_mu_m_var"] = mu_multiple_var
properties[f"{model_name}_gp_sigma_m_var"] = sigma_multiple_var
"""
properties[f"{model_name}_is_single"] = is_single[m].astype(int)
properties[f"{model_name}_confidence"] = confidence[m]
for p, percentile in enumerate(percentiles):
properties[f"{model_name}_p{percentile:.0f}"] = p_single_percentiles[p, m]
# Estimate RV semi-amplitude.
if "rv" in model_names.keys():
print("Calculating radial velocity excess")
properties[f"rv_excess"] = properties["rv_single_epoch_scatter"] \
- properties["rv_gp_mu_s"]
rv_gp_mu_s_var = results["models"]["rv"][-1].T[3]
rv_gp_sigma_s_var = results["models"]["rv"][-1].T[5]
properties[f"rv_excess_var"] = properties["rv_gp_sigma_s"]**2 \
+ rv_gp_mu_s_var \
+ rv_gp_sigma_s_var
# Joint probabilities.
if M > 1:
properties["joint_is_single"] = is_single[-1].astype(int)
properties["joint_confidence"] = confidence[-1]
for p, percentile in enumerate(percentiles):
properties[f"joint_p{percentile:.0f}"] = p_single_percentiles[p, -1]
print("Save PDF draws")
# Only save finite predictions for now.
mask = np.all(np.isfinite(results["models"]["rv"][-1]), axis=1)
pdf_output_path = ".".join(output_path.split(".")[:-1]) + f".pdf.memmap"
pdf_sources_output_path = ".".join(output_path.split(".")[:-1]) + f".sources.memmap"
pdf_output = np.memmap(pdf_output_path,
dtype=np.float32,
shape=(MJ, sum(mask), K),
mode="w+")
pdf_output[:] = p_single[:, mask, :]
del pdf_output
pdf_sources_output = np.memmap(pdf_sources_output_path,
shape=(sum(mask), ),
dtype='>i8',
mode="w+")
pdf_sources_output[:] = data["source_id"][finite][mask]
del pdf_sources_output
print("Writing output file..")
Table(data=properties).write(output_path, overwrite=True)
print(f"Output written to {output_path}")
| {"/scripts/npm_run_elastic_ball_test.py": ["/npm_utils.py"], "/npm.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation.py": ["/velociraptor.py"], "/run_analysis.py": ["/npm_utils.py"], "/run_probabilities2.py": ["/npm_utils.py"], "/scripts/npm_run_soubiran_2013.py": ["/npm_utils.py"], "/run_probabilities3.py": ["/npm_utils.py"], "/attic/plot_sb2.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation_hrd.py": ["/velociraptor.py"], "/attic/validation_sb9.py": ["/velociraptor.py"]} |
50,286 | andycasey/velociraptor | refs/heads/master | /velociraptor.py |
"""
Code for the velociraptor project.
"""
import logging as logger
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import Table
from matplotlib.ticker import MaxNLocator
from scipy.stats import norm
from astropy.io import fits
import stan_utils as stan
import mpl_utils
plt.style.use(mpl_utils.mpl_style)
np.random.seed(42)
def load_gaia_sources(path, **kwargs):
"""
Load a subset of Gaia data and calculate additional properties like
`absolute_g_mag`, `absolute_bp_mag`, `absolute_rp_mag`, and
`rv_single_epoch_variance`.
:param path:
The local path to load the sources from.
"""
#try:
# sources = Table(fits.open(path)[1].data)
#except:
sources = Table.read(path, **kwargs)
for band in ("g", "bp", "rp"):
sources["absolute_{}_mag".format(band)] = \
sources["phot_{}_mean_mag".format(band)] \
+ 5 * np.log10(sources["parallax"]/100.0)
sources["rv_single_epoch_variance"] = sources["radial_velocity_error"]**2 \
* sources["rv_nb_transits"] * np.pi/2.0
# Approximate temperature from bp-rp colour
use_in_fit = np.isfinite(sources["radial_velocity"]) \
* (sources["phot_bp_rp_excess_factor"] < 1.5) \
* np.isfinite(sources["bp_rp"]) \
* np.isfinite(sources["teff_val"]) \
* (sources["bp_rp"] < 2.5) \
* (sources["bp_rp"] > 0.5)
x = sources["bp_rp"][use_in_fit]
y = sources["teff_val"][use_in_fit]
coeff = np.polyfit(1.0/x, y, 2)
sources["approx_teff_from_bp_rp"] = np.clip(
np.polyval(coeff, 1.0/sources["bp_rp"]),
3500, 8000)
return sources
def load_gaia_sources_from_fitsio(path, **kwargs):
"""
Load a subset of Gaia data and calculate additional properties like
`absolute_g_mag`, `absolute_bp_mag`, `absolute_rp_mag`, and
`rv_single_epoch_variance`.
:param path:
The local path to load the sources from.
"""
sources = Table(fits.open(path)[1].data)
metadata = dict()
for band in ("g", "bp", "rp"):
metadata["absolute_{}_mag".format(band)] = \
sources["phot_{}_mean_mag".format(band)] \
+ 5 * np.log10(sources["parallax"]/100.0)
metadata["rv_single_epoch_variance"] = sources["radial_velocity_error"]**2 \
* sources["rv_nb_transits"] * np.pi/2.0,
# Approximate temperature from bp-rp colour
use_in_fit = np.isfinite(sources["radial_velocity"]) \
* (sources["phot_bp_rp_excess_factor"] < 1.5) \
* np.isfinite(sources["bp_rp"]) \
* np.isfinite(sources["teff_val"]) \
* (sources["bp_rp"] < 2.5) \
* (sources["bp_rp"] > 0.5)
x = sources["bp_rp"][use_in_fit]
y = sources["teff_val"][use_in_fit]
coeff = np.polyfit(1.0/x, y, 2)
metadata["approx_teff_from_bp_rp"] = np.clip(
np.polyval(coeff, 1.0/sources["bp_rp"]),
3500, 8000)
return (sources, metadata)
def prepare_model(rv_single_epoch_variance, target=0.01, model_path="model.stan",
S=None, design_matrix_function=None, mask=None,
**source_params):
"""
Compile the Stan model, and prepare the data and initialisation dictionaries
for optimization and sampling.
:param rv_single_epoch_variance:
The variance in single epoch measurements of radial velocity.
:param target: [optional]
The target radial velocity variance to use when initialising the model
coefficients.
:param model_path: [optional]
The local path of the Stan model.
:param S: [optional]
If not `None`, draw a random `S` valid sources from the data rather than
using the full data set.
:Keyword Arguments:
* *source_params* (``dict``) These are passed directly to the
`_rvf_design_matrix`, so they should include all of the source labels
needed to construct the design matrix (e.g., `phot_rp_mean_flux`). The
array length for each source label should match that of
`rv_single_epoch_variance`.
:returns:
The compiled Stan model, the data dictionary, the initialsiation
dictionary, and a mask corresponding to which `sources` were used to
construct the data dictionary.
"""
data, indices = prepare_data(rv_single_epoch_variance, S=S, mask=mask,
design_matrix_function=design_matrix_function,
**source_params)
coeff = _rvf_initial_coefficients(data["design_matrix"].T, target=target)
init = dict(theta=0.1, mu_coefficients=coeff, sigma_coefficients=coeff)
model = stan.load_stan_model(model_path)
return (model, data, init, indices)
def prepare_data(rv_single_epoch_variance, S=None, mask=None,
design_matrix_function=None, **source_params):
if design_matrix_function is None:
design_matrix_function = _rvf_design_matrix
dm = design_matrix_function(**source_params)
finite = np.all(np.isfinite(dm), axis=0) \
* np.isfinite(rv_single_epoch_variance)
if mask is not None:
finite *= mask
indices = np.where(finite)[0]
N = sum(finite)
if S is not None:
indices = np.random.choice(indices, size=int(min(N, S)), replace=False)
if (S is not None and S > N) or not all(finite):
logger.warn("Excluding non-finite entries in design matrix! "\
"Number of data points: {0}".format(indices.size))
dm = dm[:, indices]
data = dict(N=indices.size, rv_variance=rv_single_epoch_variance[indices],
design_matrix=dm.T, M=dm.shape[0])
return data, indices
def _rvf_design_matrix(phot_rp_mean_flux, bp_rp, **kwargs):
"""
Design matrix for the radial velocity floor variance.
# TODO: Should we check for finite-ness here?
"""
return np.array([
np.ones(len(phot_rp_mean_flux)),
phot_rp_mean_flux**-1,
phot_rp_mean_flux**-2,
bp_rp**-1,
bp_rp**-2
])
def _rvf_initial_coefficients(design_matrix, target=0.01):
"""
Initial coefficients for the model.
"""
return target / (design_matrix.shape[0] * np.nanmean(design_matrix, axis=1))
def predict_map_rv_single_epoch_variance(samples, **source_params):
"""
Predict the maximum a-posteriori estimate of the radial velocity variance
from a single epoch.
:param samples:
The Stan chains from the model.
"""
params = samples.extract(("mu_coefficients", "sigma_coefficients"))
dm = _rvf_design_matrix(**source_params)
mu = np.dot(np.mean(params["mu_coefficients"], axis=0), dm)
sigma = np.dot(np.mean(params["sigma_coefficients"], axis=0), dm)
return (mu, sigma)
def plot_model_predictions_corner(samples, sources=None, parameter_limits=None,
log_parameters=None, N=100, labels=None, **kwargs):
"""
Make a corner plot showing the maximum a posteori radial velocity variance
for different (stellar) properties that contribute to the model.
:param samples:
The MCMC samples.
:param sources: [optional]
A table of Gaia sources. This is used to determine the bounds on the
parameters. If `None` is given, then `parameter_limits` should be
given instead. If `sources` and `parameter_limits` are given, then the
`parameter_limits` will supercede those calculated from `sources.
:param parameter_limits: [optional]
A dictionary containing source parameters as keys and a two-length
tuple containing the lower and upper bounds of the parameter.
:param log_parameters: [optional]
A tuple containing the parameter names that should be shown in log space
""
"""
parameter_names = tuple(
set(_rvf_design_matrix.__code__.co_varnames).difference(["kwargs"]))
limits = dict()
if sources is not None:
for pn in parameter_names:
limits[pn] = (np.nanmin(sources[pn]), np.nanmax(sources[pn]))
else:
missing = tuple(set(parameter_names).difference(parameter_limits))
if len(missing) > 0:
raise ValueError("missing parameter limits for {}".format(
", ".join(missing)))
if parameter_limits is not None:
limits.update(parameter_limits)
if log_parameters is None:
log_parameters = []
if labels is None:
labels = dict()
def mesh(parameter_name):
v = limits[parameter_name]
s, e = (np.min(v), np.max(v))
if parameter_name not in log_parameters:
return np.linspace(s, e, N)
else:
return np.logspace(np.log10(s), np.log10(e), N)
samples_kwd = kwargs.get("samples_kwd", "mu_coefficients")
coefficients = np.mean(samples.extract((samples_kwd, ))[samples_kwd], axis=0)
P = len(limits)
# Calculate the expected radial velocity variance for all combinations of
# parameters.
combinations = np.meshgrid(*[mesh(pn) for pn in parameter_names])
grid_combinations = np.vstack([comb.flatten() for comb in combinations])
expectation = np.dot(coefficients, _rvf_design_matrix(**dict(zip(
parameter_names, grid_combinations))))
fig, axes = plt.subplots(P, P, figsize=(6 * P, 6 * P))
axes = np.atleast_2d(axes)
for i, x_param in enumerate(parameter_names):
for j, y_param in enumerate(parameter_names):
ax = axes[j, i]
if i > j:
ax.set_visible(False)
continue
elif i == j:
x = grid_combinations[i]
# Get the mean at each unique x.
x_uniques = np.sort(np.unique(x))
y_percentiles = np.zeros((3, x_uniques.size), dtype=float)
for k, x_unique in enumerate(x_uniques):
match = (grid_combinations[i] == x_unique)
y_percentiles[:, k] = np.percentile(
expectation[match], [0, 50, 100])
ax.plot(x_uniques, y_percentiles[1], "r-")
ax.fill_between(
x_uniques, y_percentiles[0], y_percentiles[2],
facecolor="r", alpha=0.3, edgecolor="none")
if x_param in log_parameters:
ax.semilogx()
ax.set_xlabel(labels.get(x_param, x_param))
ax.set_ylabel(r"\textrm{single epoch radial velocity variance}"\
r" $(\textrm{km}^2\,\textrm{s}^{-2})$")
else:
x, y = grid_combinations[[i, j]]
#_x = np.log10(x) if x_param in log_parameters else x
#_y = np.log10(y) if y_param in log_parameters else y
imshow_kwds = dict(cmap="Reds", aspect="equal",
extent=(np.min(x), np.max(x), np.max(y), np.min(y)))
if x_param in log_parameters:
ax.semilogx()
if y_param in log_parameters:
ax.semilogy()
ax.imshow(expectation.reshape((N, N)), **imshow_kwds)
ax.set_xlabel(labels.get(x_param, x_param))
ax.set_ylabel(labels.get(y_param, y_param))
fig.tight_layout()
return fig
| {"/scripts/npm_run_elastic_ball_test.py": ["/npm_utils.py"], "/npm.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation.py": ["/velociraptor.py"], "/run_analysis.py": ["/npm_utils.py"], "/run_probabilities2.py": ["/npm_utils.py"], "/scripts/npm_run_soubiran_2013.py": ["/npm_utils.py"], "/run_probabilities3.py": ["/npm_utils.py"], "/attic/plot_sb2.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation_hrd.py": ["/velociraptor.py"], "/attic/validation_sb9.py": ["/velociraptor.py"]} |
50,287 | andycasey/velociraptor | refs/heads/master | /data/download.py |
"""
Query used to retrieve the radial velocity calibration data from the Gaia
archive.
"""
from astroquery.gaia import Gaia
columns = (
"source_id",
"ra", "dec", "l", "b",
"parallax", "parallax_error",
"phot_g_mean_mag", "phot_bp_mean_mag", "phot_rp_mean_mag",
"phot_g_mean_flux", "phot_g_mean_flux_error",
"phot_bp_mean_flux", "phot_bp_mean_flux_error", "phot_bp_n_obs",
"phot_rp_mean_flux", "phot_rp_mean_flux_error", "phot_rp_n_obs",
"phot_bp_rp_excess_factor",
"bp_rp", "bp_g", "g_rp",
"teff_val", "teff_percentile_lower", "teff_percentile_upper",
"a_g_val", "a_g_percentile_lower", "a_g_percentile_upper",
"radial_velocity", "radial_velocity_error", "rv_nb_transits",
"rv_template_teff", "rv_template_logg", "rv_template_fe_h",
"astrometric_weight_al", "astrometric_gof_al", "astrometric_chi2_al",
"phot_variable_flag"
)
job = Gaia.launch_job_async("""
SELECT {0}
FROM gaiadr2.gaia_source
WHERE radial_velocity IS NOT NULL
AND duplicated_source = 'false'
AND rv_nb_transits > 10
AND visibility_periods_used > 10
AND radial_velocity_error < 20
AND MOD(random_index, 10) = 0
""".format(", ".join(columns)))
subset = job.get_results()
subset.write("rv_calibration_floor_subset-result.fits")
job = Gaia.launch_job_async("""
SELECT {0}
FROM gaiadr2.gaia_source
WHERE radial_velocity IS NOT NULL
AND duplicated_source = 'false'
AND rv_nb_transits > 10
AND visibility_periods_used > 10
AND radial_velocity_error < 20
""".format(", ".join(columns)))
sources = job.get_results()
sources.write("rv_calibration_floor-result.fits")
all_possible_sources = Gaia.launch_job_async("""
SELECT *
FROM gaiadr2.gaia_source
WHERE phot_rp_mean_mag < 13
""")
"""
SELECT *
FROM gaiadr2.gaia_source
WHERE radial_velocity IS NOT NULL
AND duplicated_source = 'false'
AND rv_nb_transits > 10
AND visibility_periods_used > 10
AND radial_velocity_error < 20
AND mod(random_index, 10) = 0
"""
| {"/scripts/npm_run_elastic_ball_test.py": ["/npm_utils.py"], "/npm.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation.py": ["/velociraptor.py"], "/run_analysis.py": ["/npm_utils.py"], "/run_probabilities2.py": ["/npm_utils.py"], "/scripts/npm_run_soubiran_2013.py": ["/npm_utils.py"], "/run_probabilities3.py": ["/npm_utils.py"], "/attic/plot_sb2.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation_hrd.py": ["/velociraptor.py"], "/attic/validation_sb9.py": ["/velociraptor.py"]} |
50,288 | andycasey/velociraptor | refs/heads/master | /create_sv_mask.py |
"""
Create a mask that will be useful for science verification.
"""
import numpy as np
import pickle
from astropy.io import fits
from astropy.table import Table
data = fits.open("data/gaia-sources-for-npm-colsubset.fits")[1].data
cross_match_paths = [
"data/soubiran-2013-xm-gaia.fits",
"data/sb9_xm_gaia.fits",
"data/apw-highK-unimodal-xm-gaia.fits",
"data/apw-lnK-percentiles-xm-gaia.fits",
"data/huang-apogee-rv-standards-xm-gaia.fits"
]
def cross_match(A_source_ids, B_source_ids):
A = np.array(A_source_ids, dtype=np.long)
B = np.array(B_source_ids, dtype=np.long)
ai = np.where(np.in1d(A, B))[0]
bi = np.where(np.in1d(B, A))[0]
assert len(ai) == len(bi)
ai = ai[np.argsort(A[ai])]
bi = bi[np.argsort(B[bi])]
assert all(A[ai] == B[bi])
return (ai, bi)
mask = np.zeros(len(data), dtype=bool)
for path in cross_match_paths:
t = Table.read(path)
t = t.group_by("source_id")
t = t[t.groups.indices[:-1]]
vl_ids, t_ids = cross_match(data["source_id"], t["source_id"])
mask[vl_ids] = True
print(f"Cross-matching with {path} revealed {len(vl_ids)} sources")
with open("sv.mask", "wb") as fp:
pickle.dump(mask, fp)
print(f"There are {sum(mask)} sources for science verification in sv.mask") | {"/scripts/npm_run_elastic_ball_test.py": ["/npm_utils.py"], "/npm.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation.py": ["/velociraptor.py"], "/run_analysis.py": ["/npm_utils.py"], "/run_probabilities2.py": ["/npm_utils.py"], "/scripts/npm_run_soubiran_2013.py": ["/npm_utils.py"], "/run_probabilities3.py": ["/npm_utils.py"], "/attic/plot_sb2.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation_hrd.py": ["/velociraptor.py"], "/attic/validation_sb9.py": ["/velociraptor.py"]} |
50,289 | andycasey/velociraptor | refs/heads/master | /prepare_data.py |
"""
Calculate additional properties (e.g., photometric and astrometric variability)
which we will use in our model, and create a subset file that only contains the
columns we need.
"""
import numpy as np
from astropy.io import fits
from astropy.table import Table
input_path = "data/gaia-sources-for-npm.fits"
output_path = "data/gaia-sources-for-npm-subset.fits"
def create_subset_for_npm(path, hdu=1, additional_label_names=None, **kwargs):
properties = dict()
with fits.open(path) as image:
sources = image[hdu].data
for band in ("g", "bp", "rp"):
# Absolute magnitudes, and photometric variability
properties[f"absolute_{band}_mag"] = sources[f"phot_{band}_mean_mag"] \
+ 5 * np.log10(sources["parallax"]/100.0)
properties[f"phot_{band}_variability"] = np.sqrt(sources["astrometric_n_good_obs_al"]) \
* sources[f"phot_{band}_mean_flux"] \
/ sources[f"phot_{band}_mean_flux_error"]
# Radial velocity scatter
properties["rv_single_epoch_variance"] = sources["radial_velocity_error"]**2 \
* sources["rv_nb_transits"] * np.pi/2.0
properties["rv_single_epoch_scatter"] = properties["rv_single_epoch_variance"]**0.5
# Astrometric unit weight error
properties["astrometric_unit_weight_error"] = np.sqrt(
sources["astrometric_chi2_al"]/(sources["astrometric_n_good_obs_al"] - 5))
for label_name in additional_label_names:
properties[label_name] = sources[label_name]
return properties
data = create_subset_for_npm(
input_path,
hdu=1,
additional_label_names=(
"source_id", "ra", "dec",
"phot_rp_mean_mag",
"phot_bp_mean_mag",
"phot_g_mean_mag",
"phot_rp_mean_flux",
"phot_g_mean_flux",
"bp_rp",
"rv_nb_transits",
"parallax",
"parallax_error",
))
t = Table(data=data)
t.write(output_path, overwrite=True)
| {"/scripts/npm_run_elastic_ball_test.py": ["/npm_utils.py"], "/npm.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation.py": ["/velociraptor.py"], "/run_analysis.py": ["/npm_utils.py"], "/run_probabilities2.py": ["/npm_utils.py"], "/scripts/npm_run_soubiran_2013.py": ["/npm_utils.py"], "/run_probabilities3.py": ["/npm_utils.py"], "/attic/plot_sb2.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation_hrd.py": ["/velociraptor.py"], "/attic/validation_sb9.py": ["/velociraptor.py"]} |
50,290 | andycasey/velociraptor | refs/heads/master | /scripts/npm_run_soubiran_2013.py |
""" Set up and run the non-parametric model. """
import numpy as np
import os
import multiprocessing as mp
import pickle
import yaml
import tqdm
import logging
from time import sleep
from astropy.io import fits
import npm_utils as npm
import stan_utils as stan
with open("the-battery-stars.rv.yaml", "r") as fp:
config = yaml.load(fp)
config["results_path"] = "results/Soubiran_2013.rv_single_epoch_scatter.pkl"
with open("the-battery-stars.astrometry.yaml", "r") as fp:
config = yaml.load(fp)
config["results_path"] = "results/Soubiran_2013.astrometric_unit_weight_error.pkl"
# Load in the data.
data = fits.open(config["data_path"])[1].data
all_label_names = list(config["kdtree_label_names"]) \
+ list(config["predictor_label_names"])
# Set up a KD-tree.
X = np.vstack([data[ln] for ln in config["kdtree_label_names"]]).T
finite = np.all([np.isfinite(data[ln]) for ln in all_label_names], axis=0)
finite_indices = np.where(finite)[0]
N, D = X.shape
F = finite_indices.size
L = 4 * len(config["predictor_label_names"]) + 1 # + 1 if using mu_multiple_uv
C = config["share_optimised_result_with_nearest"]
kdt, scales, offsets = npm.build_kdtree(
X[finite], relative_scales=config["kdtree_relative_scales"])
kdt_kwds = dict(offsets=offsets, scales=scales, full_output=False)
kdt_kwds.update(
minimum_radius=config["kdtree_minimum_radius"],
minimum_points=config["kdtree_minimum_points"],
maximum_points=config["kdtree_maximum_points"],
minimum_density=config.get("kdtree_minimum_density", None))
model = stan.load_stan_model(config["model_path"], verbose=False)
default_opt_kwds = config.get("optimisation_kwds", {})
# Make sure that some entries have the right units.
for key in ("tol_obj", "tol_grad", "tol_rel_grad", "tol_rel_obj"):
if key in default_opt_kwds:
default_opt_kwds[key] = float(default_opt_kwds[key])
logging.info("k-d tree keywords: {}".format(kdt_kwds))
logging.info("optimization keywords: {}".format(default_opt_kwds))
done = np.zeros(N, dtype=bool)
queued = np.zeros(N, dtype=bool)
results = np.nan * np.ones((N, L), dtype=float)
default_init = dict(zip(
("theta", "mu_single", "sigma_single", "mu_multiple", "sigma_multiple"),
np.array([0.75, 1, 0.5, 1, 0.75])))
default_init["mu_multiple_uv"] = 0.1
default_init = npm._check_params_dict(default_init)
bounds = config["parameter_bounds"]
def optimize_mixture_model(index, init=None):
# Select indices and get data.
indices = finite_indices[npm.query_around_point(kdt, X[index], **kdt_kwds)]
y = np.array([data[ln][indices] for ln in config["predictor_label_names"]]).T
if init is None:
init = npm.get_initialization_point(y)
data_dict = dict(y=y,
N=y.shape[0],
D=y.shape[1],
max_log_y=np.log(np.max(y)))
for k, v in bounds.items():
data_dict["{}_bounds".format(k)] = v
trial_results = []
for j, init_dict in enumerate((init, npm.get_initialization_point(y), "random")):
# CHeck that the parameters are bounded?
if isinstance(init_dict, dict):
if bounds is not None:
for k, (lower, upper) in bounds.items():
if not (upper > init_dict[k] > lower):
logging.info("Clipping initial value of {} from {} to within ({}, {})".format(
k, init_dict[k], lower, upper))
offset = 0.01 * (upper - lower)
init_dict[k] = np.clip(init_dict[k], lower + offset, upper - offset)
opt_kwds = dict(
init=init_dict,
data=data_dict)
opt_kwds.update(default_opt_kwds)
# Do optimization.
with stan.suppress_output() as sm:
try:
p_opt = model.optimizing(**opt_kwds)
except:
p_opt = None
# TODO: Consider relaxing the optimization tolerances!
if p_opt is None:
# Capture stdout and stderr so we can read it later.
stdout, stderr = sm.stdout, sm.stderr
trial_results.append(p_opt)
if p_opt is None:
logging.warning("Exception when optimizing on index {} from "\
"initial point {}:".format(index, init_dict))
logging.warning(stdout)
logging.warning(stderr)
raise
else:
tolerance = 1e-2
for k, v in p_opt.items():
if k not in bounds \
or (k == "theta" and (v >= 1-tolerance)): continue
lower, upper = bounds[k]
if np.abs(v - lower) <= tolerance \
or np.abs(v - upper) <= tolerance:
logging.warning("Optimised {} at edge of grid ({} < {} < {})"
" - ignoring".format(k, lower, v, upper))
break
else:
break
else:
# TODO: Consider relaxing optimization tolerances!
logging.warning("Optimization did not converge from any initial point "\
"trialled. Consider relaxing optimization tolerances!")
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.hist(y, bins=100, facecolor="#cccccc", normed=True)
# Plot a model at some optimization point.
init = npm.get_initialization_point(y)
N, D = y.shape
xi = np.linspace(0, max(y), 1000)
ax.plot(xi, npm.norm_pdf(xi, init["mu_single"], init["sigma_single"],
init["theta"]), c="r")
ax.plot(xi, npm.lognorm_pdf(xi, init["mu_multiple"], init["sigma_multiple"],
init["theta"]), c="r", linestyle=":")
ax.plot(xi, npm.norm_pdf(xi, default_init["mu_single"], default_init["sigma_single"],
default_init["theta"]), c="b")
ax.plot(xi, npm.lognorm_pdf(xi, default_init["mu_multiple"], default_init["sigma_multiple"],
default_init["theta"]), c="b", linestyle=":")
raise a
p_opt = npm._check_params_dict(p_opt)
return (index, p_opt, indices)
def sp_swarm(*indices, **kwargs):
logging.info("Running single processor swarm")
with tqdm.tqdm(indices, total=len(indices)) as pbar:
for index in indices:
if done[index]: continue
_, result, kdt_indices = optimize_mixture_model(index, default_init)
pbar.update()
if result is not None:
done[index] = True
results[index] = npm._pack_params(**result)
if C > 0:
nearby_indices = np.atleast_1d(kdt_indices[:C + 1])
updated = nearby_indices.size - sum(done[nearby_indices])
done[nearby_indices] = True
results[nearby_indices] = npm._pack_params(**result)
pbar.update(updated)
return None
def mp_swarm(*indices, max_random_starts=3, in_queue=None, candidate_queue=None,
out_queue=None):
def _random_index():
yield from np.random.choice(indices, max_random_starts, replace=False)
_ri = _random_index()
random_start = lambda *_: (_ri.__next__(), default_init)
swarm = True
while swarm:
for func in (in_queue.get_nowait, random_start):
try:
index, init = func()
except mp.queues.Empty:
#logging.info("Using a random index to start")
continue
except StopIteration:
logging.warning("Swarm is bored")
sleep(5)
except:
logging.exception("Unexpected exception:")
swarm = False
else:
if index is None and init is False:
swarm = False
break
try:
_, result, kdt_indices = optimize_mixture_model(index, init)
except:
logging.exception("Exception when optimizing on {} from {}"\
.format(index, init))
break
out_queue.put((index, result))
if result is not None:
if C > 0:
# Assign the closest points to have the same result.
# (On the other end of the out_qeue we will deal with
# multiple results.)
out_queue.put((kdt_indices[:C + 1], result))
# Candidate next K points
K = 0
#candidate_queue.put((kdt_indices[C + 1:C + 1 + K], result))
break
return None
from astropy.table import Table
soubiran = Table.read("data/Soubiran_2013-xm-Gaia.fits")
soubiran[~np.isfinite(soubiran["source_id"])] = -1
soubiran["source_id"] = soubiran["source_id"].astype(np.int64)
indices = np.in1d(data["source_id"], soubiran["source_id"]) \
* np.all(np.isfinite(X), axis=1)
indices = np.where(indices)[0]
sp_swarm(*indices)
# Save results.
results_path = config.get("results_path", "results.pkl")
with open(results_path, "wb") as fp:
pickle.dump(results, fp, -1)
logging.info("Saved results to {}".format(results_path))
with open("results/Soubiran_2013.rv_single_epoch_scatter.pkl", "rb") as fp:
rv_results = pickle.load(fp)
with open("results/Soubiran_2013.astrometric_unit_weight_error.pkl", "rb") as fp:
astrometric_results = pickle.load(fp)
from astropy.table import Table
subset = Table(data[indices])
keys = ("theta", "mu_single", "sigma_single", "mu_multi", "sigma_multi")
for i, key in enumerate(keys):
subset["rv_{}".format(key)] = rv_results[indices, i]
subset["astrometric_{}".format(key)] = astrometric_results[indices, i]
finite = np.isfinite(subset["astrometric_theta"] * subset["rv_theta"])
subset = subset[finite]
print(len(subset))
subset.write("results/Soubiran_2013.results.fits", overwrite=True)
| {"/scripts/npm_run_elastic_ball_test.py": ["/npm_utils.py"], "/npm.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation.py": ["/velociraptor.py"], "/run_analysis.py": ["/npm_utils.py"], "/run_probabilities2.py": ["/npm_utils.py"], "/scripts/npm_run_soubiran_2013.py": ["/npm_utils.py"], "/run_probabilities3.py": ["/npm_utils.py"], "/attic/plot_sb2.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation_hrd.py": ["/velociraptor.py"], "/attic/validation_sb9.py": ["/velociraptor.py"]} |
50,291 | andycasey/velociraptor | refs/heads/master | /_convert_pkl_to_hdf5.py |
"""
Convert pickle file to HDF5 + yaml.
"""
import h5py
import sys
import pickle
import yaml
if __name__ == "__main__":
input_path = sys.argv[1]
output_prefix = sys.argv[2]
with open(input_path, "rb") as fp:
results = pickle.load(fp)
# Immediately convert to h5.
h = h5py.File(f"{output_prefix}.hdf5", "w")
group = h.create_group("models")
#group.attrs.update(results["config"])
for model_name in results["models"].keys():
sub_group = group.create_group(model_name)
dataset_names = (
"data_indices",
"mixture_model_results",
"gp_parameters",
"gp_predictions"
)
for i, dataset_name in enumerate(dataset_names):
d = sub_group.create_dataset(dataset_name,
data=results["models"][model_name][i])
h.close()
with open(f"{output_prefix}.meta", "w") as fp:
fp.write(yaml.dump(results["config"]))
print(f"Created {output_prefix}.hdf5 and {output_prefix}.meta")
del results
| {"/scripts/npm_run_elastic_ball_test.py": ["/npm_utils.py"], "/npm.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation.py": ["/velociraptor.py"], "/run_analysis.py": ["/npm_utils.py"], "/run_probabilities2.py": ["/npm_utils.py"], "/scripts/npm_run_soubiran_2013.py": ["/npm_utils.py"], "/run_probabilities3.py": ["/npm_utils.py"], "/attic/plot_sb2.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation_hrd.py": ["/velociraptor.py"], "/attic/validation_sb9.py": ["/velociraptor.py"]} |
50,292 | andycasey/velociraptor | refs/heads/master | /scripts/npm_collect_the_battery_stars.py | import pickle
import yaml
import numpy as np
from astropy.io import fits
from astropy.table import Table
with open("data/the-battery-stars.indices.pkl", "rb") as fp:
battery_star_indices = pickle.load(fp)
with open("results/the-battery-stars.astrometric_unit_weight_error.pkl", "rb") as fp:
astrometric_results = pickle.load(fp)
with open("results/the-battery-stars.rv_single_epoch_scatter.pkl", "rb") as fp:
rv_results = pickle.load(fp)
with open("the-battery-stars.astrometry.yaml", "r") as fp:
config = yaml.load(fp)
# Load in the data.
data = fits.open(config["data_path"])[1].data
subset = Table(data[battery_star_indices])
keys = ("theta", "mu_single", "sigma_single", "mu_multi", "sigma_multi")
for i, key in enumerate(keys):
subset["rv_{}".format(key)] = rv_results[battery_star_indices, i]
subset["astrometric_{}".format(key)] = astrometric_results[battery_star_indices, i]
finite = np.isfinite(subset["astrometric_theta"] * subset["rv_theta"])
subset = subset[finite]
print(len(subset))
subset.write("results/the-battery-stars.results.fits", overwrite=True)
| {"/scripts/npm_run_elastic_ball_test.py": ["/npm_utils.py"], "/npm.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation.py": ["/velociraptor.py"], "/run_analysis.py": ["/npm_utils.py"], "/run_probabilities2.py": ["/npm_utils.py"], "/scripts/npm_run_soubiran_2013.py": ["/npm_utils.py"], "/run_probabilities3.py": ["/npm_utils.py"], "/attic/plot_sb2.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation_hrd.py": ["/velociraptor.py"], "/attic/validation_sb9.py": ["/velociraptor.py"]} |
50,293 | andycasey/velociraptor | refs/heads/master | /article/figures/plot_rv_figures.py | import numpy as np
import os
import pickle
from astropy.io import fits
from astropy.table import Table
from scipy.stats import binned_statistic, binned_statistic_2d
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_utils import (mpl_style, plot_binned_statistic, plot_histogram_steps)
plt.style.use(mpl_style)
BASE_PATH = "../../"
RELEASE_CANDIDATE_VERSION = 100
MAKE_FIGURES = [
#"joint_p50_mainsequence_median",
#"joint_p50_mainsequence_mean",
#"joint_p50_wrt_kdt_labels",
#"rv_mean_joint_p50_wrt_kdt_labels",
#"ast_mean_joint_p50_wrt_kdt_labels",
#"mean_joint_p50_wrt_kdt_labels",
#"all_joint_p50_wrt_kdt_labels",
#"hrd_single_star_fraction_hist",
#"ast_hrd_single_star_fraction_hist",
#"rv_hrd_single_star_fraction_hist",
#"rv_hrd_single_star_fraction_scatter",
#"rv_sb9_kalpha_comparison",
"rv_sb9_kalpha_comparison_tau",
"rv_sb9_k_comparison",
"rv_sb9_kalpha_comparison_period",
"rv_sb9_kp_corner",
"rv_apw_unimodal_k_comparison",
"rv_apw_unimodal_kalpha_comparison",
"rv_apw_percentiles_k_comparison",
"rv_gp_hrd",
"rv_gp_wrt_params",
"ast_gp_hrd",
"ast_gp_wrt_params",
"rv_soubiran_hist",
"rv_huang_hist",
"rv_huang_hist2"
]
#MAKE_FIGURES = [
# "ast_gp_hrd",
#]
DEFAULT_SEQUENTIAL_CMAP = "viridis"
"""
velociraptor = fits.open(os.path.join(
BASE_PATH,
"results",
"velociraptor-catalog.rc.{:.0f}.fits".format(RELEASE_CANDIDATE_VERSION)))
velociraptor = velociraptor[1].data
"""
try:
velociraptor
except NameError:
velociraptor = fits.open("../../results/die-hard-subset.fits")[1].data
latex_labels = dict(
bp_rp=r"\textrm{bp - rp}",
phot_rp_mean_mag=r"\textrm{apparent rp mag}",
absolute_rp_mag=r"\textrm{absolute rp mag}",
absolute_g_mag=r"\textrm{absolute g mag}",
rv_mu_single=r"$\mu_s$ \textrm{/ km\,s}$^{-1}$",
rv_sigma_single=r"$\sigma_s$ \textrm{/ km\,s}$^{-1}$",
rv_p50=r"$\tau_\textrm{rv,single}$",
ast_p50=r"$\tau_\textrm{ast,single}$",
rv_single_epoch_scatter=r"\textrm{radial velocity jitter / km\,s}$^{-1}$",
astrometric_unit_weight_error=r"\textrm{astrometric jitter}",
ast_gp_mu_s=r"$\mu_\textrm{s,ast}$",
ast_gp_sigma_s=r"$\sigma_\textrm{s,ast}$",
rv_gp_mu_s=r"$\mu_{rv,s}\,\textrm{km\,s}^{-1}$",
rv_gp_sigma_s=r"$\sigma_{rv,s}\,\textrm{km\,s}^{-1}$",
)
common_kwds = dict([
("bp_rp.limits", (-0.25, 6.25)),
("phot_rp_mean_mag.limits", (13.0, 2.1)),
("absolute_g_mag.limits", (10, -8)),
("absolute_rp_mag.limits", (10.5, -16)),
])
one_to_one_line_kwds = dict(c="#666666", linestyle=":", lw=1, zorder=-1)
def savefig(fig, basename):
prefix =f"v{RELEASE_CANDIDATE_VERSION}-{basename}"
fig.savefig(f"{prefix}.png")
#fig.savefig(f"{prefix}.png", dpi=150)
print(f"Saved figure {prefix}.png")
#plt.close("all")
def cross_match(A_source_ids, B_source_ids):
A = np.array(A_source_ids, dtype=np.long)
B = np.array(B_source_ids, dtype=np.long)
ai = np.where(np.in1d(A, B))[0]
bi = np.where(np.in1d(B, A))[0]
assert len(ai) == len(bi)
ai = ai[np.argsort(A[ai])]
bi = bi[np.argsort(B[bi])]
assert all(A[ai] == B[bi])
return (ai, bi)
def estimate_K(data, **kwargs):
# TODO: Consult with colleagues to see if there is anything more principaled
# we could do without requiring much more work.
K_est = data["rv_single_epoch_scatter"] - data["rv_gp_mu_s"]
K_err = data["rv_gp_sigma_s"]**2 #\
# + data["rv_gp_mu_s_var"] \
# + data["rv_gp_sigma_s_var"]
#K_est = data["rv_excess"]
#K_err = data["rv_excess_var"]**0.5
return (K_est, K_err)
# some metric that is useful for showing "likelihood of binarity" until I decide
# what is best.
figure_name = "joint_p50_mainsequence_median"
if figure_name in MAKE_FIGURES:
ordered, reverse = (True, False)
subsample = None
xlabel = "bp_rp"
ylabels = ("absolute_g_mag", "absolute_g_mag", "absolute_g_mag")
zlabels = ("rv_p50", "ast_p50", "joint_p50")
titles = (
r"\textrm{radial velocity}",
r"\textrm{astrometry}",
r"\textrm{joint information}"
)
K = len(ylabels)
xlims = (0, 3)
ylims = (10, 2)
num_bins = 150
# Use exact same bins for all panels, based on the joint information.
x, y, z = (velociraptor[xlabel], velociraptor[ylabels[-1]], velociraptor[zlabels[-1]])
mask = np.isfinite(x * y * z) \
* (x >= min(xlims)) * (x <= max(xlims)) \
* (y >= min(ylims)) * (y <= max(ylims))
H, xedges, yedges, binnumber = binned_statistic_2d(x[mask],
y[mask],
z[mask],
statistic="count",
bins=num_bins)
bins = (xedges, yedges)
plot_binned_statistic_kwds = dict(function="median", vmin=0, vmax=1,
cmap=DEFAULT_SEQUENTIAL_CMAP, mask=None,
subsample=subsample, min_entries_per_bin=3,
bins=bins)
fig, axes = plt.subplots(1, K, figsize=(3 * K, 3))
for i, (ax, ylabel, zlabel, title) \
in enumerate(zip(axes, ylabels, zlabels, titles)):
plot_binned_statistic(
velociraptor[xlabel],
velociraptor[ylabel],
velociraptor[zlabel],
ax=ax, ylabel=latex_labels.get(ylabel, ylabel),
**plot_binned_statistic_kwds)
ax.set_xlabel(latex_labels.get(xlabel, xlabel))
ax.xaxis.set_major_locator(MaxNLocator(5))
ax.yaxis.set_major_locator(MaxNLocator(5))
ax.set_xlim(xlims)
ax.set_ylim(ylims)
ax.set_title(title)
cbar = plt.colorbar(ax.images[0], ax=list(axes))#=axes[-1])
cbar.set_label(r"\textrm{single star probability} $\tau$")
for ax in axes:
ax.set_aspect(np.ptp(ax.get_xlim())/np.ptp(ax.get_ylim()))
fig.tight_layout()
fig.subplots_adjust(right=0.90)
# Unbelievable that we have to do all this,...
#fig.canvas.update()
fig.canvas.draw()
cbar.ax.yaxis.set_tick_params(width=0)
cax_width = 0.02
for i in range(2):
ax_left, bottom, ax_width, height = axes[-1].get_position().bounds
cbar.ax.set_position([ax_left + ax_width + cax_width, bottom, cax_width, height])
savefig(fig, figure_name)
figure_name = "joint_p50_mainsequence_mean"
if figure_name in MAKE_FIGURES:
ordered, reverse = (True, False)
subsample = None
xlabel = "bp_rp"
ylabels = ("absolute_g_mag", "absolute_g_mag", "absolute_g_mag")
zlabels = ("rv_p50", "ast_p50", "joint_p50")
titles = (
r"\textrm{radial velocity}",
r"\textrm{astrometry}",
r"\textrm{joint information}"
)
K = len(ylabels)
xlims = (0, 3)
ylims = (10, 2)
num_bins = 100
# Use exact same bins for all panels, based on the joint information.
x, y, z = (velociraptor[xlabel], velociraptor[ylabels[-1]], velociraptor[zlabels[-1]])
mask = np.isfinite(x * y * z) \
* (x >= min(xlims)) * (x <= max(xlims)) \
* (y >= min(ylims)) * (y <= max(ylims))
H, xedges, yedges, binnumber = binned_statistic_2d(x[mask],
y[mask],
z[mask],
statistic="count",
bins=num_bins)
bins = (xedges, yedges)
plot_binned_statistic_kwds = dict(function="mean", vmin=0, vmax=1,
cmap=DEFAULT_SEQUENTIAL_CMAP, mask=mask,
subsample=subsample, min_entries_per_bin=5,
bins=bins)
fig, axes = plt.subplots(1, K, figsize=(3 * K, 3))
for i, (ax, ylabel, zlabel, title) \
in enumerate(zip(axes, ylabels, zlabels, titles)):
plot_binned_statistic(
velociraptor[xlabel],
velociraptor[ylabel],
velociraptor[zlabel],
ax=ax, ylabel=latex_labels.get(ylabel, ylabel),
**plot_binned_statistic_kwds)
ax.set_xlabel(latex_labels.get(xlabel, xlabel))
ax.xaxis.set_major_locator(MaxNLocator(5))
ax.yaxis.set_major_locator(MaxNLocator(4))
ax.set_xlim(xlims)
ax.set_ylim(ylims)
ax.set_title(title)
cbar = plt.colorbar(ax.images[0], ax=list(axes))#=axes[-1])
cbar.set_label(r"\textrm{single star probability} $\tau$")
for ax in axes:
ax.set_aspect(np.ptp(ax.get_xlim())/np.ptp(ax.get_ylim()))
fig.tight_layout()
fig.subplots_adjust(right=0.90)
# Unbelievable that we have to do all this,...
#fig.canvas.update()
fig.canvas.draw()
cbar.ax.yaxis.set_tick_params(width=0)
cax_width = 0.02
for i in range(2):
ax_left, bottom, ax_width, height = axes[-1].get_position().bounds
cbar.ax.set_position([ax_left + ax_width + cax_width, bottom, cax_width, height])
savefig(fig, figure_name)
figure_name = "joint_p50_wrt_kdt_labels"
if figure_name in MAKE_FIGURES:
ordered = True
reverse = False
subset = 100000
label_names = ("bp_rp", "phot_rp_mean_mag", "absolute_rp_mag")
ylabels = ("rv_single_epoch_scatter", "astrometric_unit_weight_error")
zlabels = ("rv_p50", "ast_p50")
fig, axes = plt.subplots(2, 3, figsize=(12, 9))
for i, ax_row in enumerate(axes):
ylabel = ylabels[i]
zlabel = zlabels[i]
for j, ax in enumerate(ax_row):
xlabel = label_names[j]
x = velociraptor[xlabel]
y = velociraptor[ylabel]
z = velociraptor[zlabel]
mask = np.isfinite(x * y * z)
x, y, z = (x[mask], y[mask], z[mask])
if subset is not None:
idx = np.random.choice(x.size, subset, replace=False)
x, y, z = (x[idx], y[idx], z[idx])
if ordered:
idx = np.argsort(z)
if reverse:
idx = idx[::-1]
x, y, z = (x[idx], y[idx], z[idx])
ax.scatter(x, y, c=z, s=1, cmap=DEFAULT_SEQUENTIAL_CMAP,
rasterized=True)
ax.set_xlabel(latex_labels.get(xlabel, xlabel))
ax.set_ylabel(latex_labels.get(ylabel, ylabel))
"""
plot_binned_statistic_kwds = dict(function="mean", bins=100,
xlabel=latex_labels.get(xlabel, xlabel),
cmap=DEFAULT_SEQUENTIAL_CMAP,
subsample=None, min_entries_per_bin=5)
plot_binned_statistic(x, y, z,
ax=ax, ylabel=latex_labels.get(ylabel, ylabel),
**plot_binned_statistic_kwds)
"""
ax.semilogy()
fig.tight_layout()
savefig(fig, figure_name)
figure_name = "rv_mean_joint_p50_wrt_kdt_labels"
if figure_name in MAKE_FIGURES:
label_names = ("bp_rp", "phot_rp_mean_mag", "absolute_rp_mag")
ylabel = r"$\langle\tau_\textrm{rv,single}\rangle$"
mask = np.isfinite(velociraptor["rv_p50"])
rv_p50 = velociraptor["rv_p50"]
fig, axes = plt.subplots(1, len(label_names), figsize=(12, 4))
bins = 20
for i, (ax, label_name) in enumerate(zip(axes, label_names)):
mean, edge, bin_index = binned_statistic(velociraptor[label_name][mask],
rv_p50[mask],
statistic="mean", bins=bins)
var, _, __ = binned_statistic(velociraptor[label_name][mask],
rv_p50[mask],
statistic=np.var, bins=bins)
count, _, __ = binned_statistic(velociraptor[label_name][mask],
rv_p50[mask],
statistic="count", bins=bins)
yerr = np.sqrt(var/(count - 1))
#yerr = np.sqrt(var)
center = edge[:-1] + 0.5 * np.diff(edge)
plot_histogram_steps(ax, center, mean, yerr)
ax.set_xlabel(latex_labels.get(label_name, label_name))
ax.set_ylabel(ylabel)
ax.set_xlim(edge[0], edge[-1])
ax.set_ylim(-0.1, 1.1)
ax.xaxis.set_major_locator(MaxNLocator(6))
ax.yaxis.set_major_locator(MaxNLocator(6))
fig.tight_layout()
savefig(fig, figure_name)
figure_name = "ast_mean_joint_p50_wrt_kdt_labels"
if figure_name in MAKE_FIGURES:
label_names = ("bp_rp", "phot_rp_mean_mag", "absolute_rp_mag")
ylabel = r"$\langle\tau_\textrm{ast,single}\rangle$"
fig, axes = plt.subplots(1, len(label_names), figsize=(12, 4))
bins = 20
for i, (ax, label_name) in enumerate(zip(axes, label_names)):
x = velociraptor[label_name]
y = velociraptor["ast_p50"]
mask = np.isfinite(x * y)
mean, edge, bin_index = binned_statistic(x[mask], y[mask],
statistic="mean", bins=bins)
var, _, __ = binned_statistic(x[mask], y[mask],
statistic=np.var, bins=bins)
count, _, __ = binned_statistic(x[mask], y[mask],
statistic="count", bins=bins)
yerr = np.sqrt(var/(count - 1))
#yerr = np.sqrt(var)
center = edge[:-1] + 0.5 * np.diff(edge)
plot_histogram_steps(ax, center, mean, yerr)
ax.set_xlabel(latex_labels.get(label_name, label_name))
ax.set_ylabel(ylabel)
ax.set_xlim(edge[0], edge[-1])
ax.set_ylim(-0.1, 1.1)
ax.xaxis.set_major_locator(MaxNLocator(6))
ax.yaxis.set_major_locator(MaxNLocator(6))
fig.tight_layout()
savefig(fig, figure_name)
figure_name = "mean_joint_p50_wrt_kdt_labels"
if figure_name in MAKE_FIGURES:
label_names = ("bp_rp", "phot_rp_mean_mag", "absolute_rp_mag")
ylabel = r"$\langle\tau_\textrm{single}\rangle$"
fig, axes = plt.subplots(1, len(label_names), figsize=(12, 4))
bins = 20
for i, (ax, label_name) in enumerate(zip(axes, label_names)):
x = velociraptor[label_name]
y = velociraptor["joint_p50"]
mask = np.isfinite(x * y)
mean, edge, bin_index = binned_statistic(x[mask], y[mask],
statistic="mean", bins=bins)
var, _, __ = binned_statistic(x[mask], y[mask],
statistic=np.var, bins=bins)
count, _, __ = binned_statistic(x[mask], y[mask],
statistic="count", bins=bins)
yerr = np.sqrt(var/(count - 1))
#yerr = np.sqrt(var)
center = edge[:-1] + 0.5 * np.diff(edge)
plot_histogram_steps(ax, center, mean, yerr)
ax.set_xlabel(latex_labels.get(label_name, label_name))
ax.set_ylabel(ylabel)
ax.set_xlim(edge[0], edge[-1])
ax.set_ylim(-0.1, 1.1)
ax.xaxis.set_major_locator(MaxNLocator(6))
ax.yaxis.set_major_locator(MaxNLocator(6))
fig.tight_layout()
savefig(fig, figure_name)
figure_name = "all_joint_p50_wrt_kdt_labels"
if figure_name in MAKE_FIGURES:
label_names = ("bp_rp", "phot_rp_mean_mag", "absolute_rp_mag")
ylabel = r"$\langle\tau_\textrm{single}\rangle$"
fig, axes = plt.subplots(1, len(label_names), figsize=(12, 4))
bins = 20
for prefix in (None, "rv", "ast"):
label = "joint_p50" if prefix is None else f"{prefix}_p50"
for i, (ax, label_name) in enumerate(zip(axes, label_names)):
x = velociraptor[label_name]
y = velociraptor[label]
mask = np.isfinite(x * y)
mean, edge, bin_index = binned_statistic(x[mask], y[mask],
statistic="mean", bins=bins)
var, _, __ = binned_statistic(x[mask], y[mask],
statistic=np.var, bins=bins)
count, _, __ = binned_statistic(x[mask], y[mask],
statistic="count", bins=bins)
yerr = np.sqrt(var/(count - 1))
#yerr = np.sqrt(var)
center = edge[:-1] + 0.5 * np.diff(edge)
plot_histogram_steps(ax, center, mean, yerr,
label="joint" if prefix is None else prefix)
ax.set_xlabel(latex_labels.get(label_name, label_name))
ax.set_ylabel(ylabel)
ax.set_xlim(edge[0], edge[-1])
ax.set_ylim(-0.1, 1.1)
ax.xaxis.set_major_locator(MaxNLocator(6))
ax.yaxis.set_major_locator(MaxNLocator(6))
fig.tight_layout()
savefig(fig, figure_name)
figure_name = "hrd_single_star_fraction_hist"
if figure_name in MAKE_FIGURES:
fig, axes = plt.subplots(1, 2, figsize=(9, 4))
xlabel = "bp_rp"
ylabel = "phot_rp_mean_mag"
zlabel = "joint_p50"
# Restrict sensibly.
mask = (velociraptor["absolute_rp_mag"] > -16) \
* (velociraptor["bp_rp"] < 6.5)
plot_binned_statistic_kwds = dict(function="mean", vmin=0, vmax=1, bins=100,
xlabel=latex_labels.get(xlabel, xlabel),
cmap=DEFAULT_SEQUENTIAL_CMAP, mask=mask,
subsample=None, min_entries_per_bin=5)
for ax, ylabel in zip(axes, ("phot_rp_mean_mag", "absolute_rp_mag")):
plot_binned_statistic(
velociraptor[xlabel],
velociraptor[ylabel],
velociraptor[zlabel],
ax=ax, ylabel=latex_labels.get(ylabel, ylabel),
**plot_binned_statistic_kwds)
ax.xaxis.set_major_locator(MaxNLocator(6))
ax.yaxis.set_major_locator(MaxNLocator(6))
ax.set_xlim(common_kwds.get("{}.limits".format(xlabel), None))
ax.set_ylim(common_kwds.get("{}.limits".format(ylabel), None))
cbar = plt.colorbar(ax.images[0], fraction=0.046, pad=0.04)
cbar.set_label(r"\textrm{single star fraction} $\tau_\textrm{rv,single}$")
fig.tight_layout()
savefig(fig, figure_name)
figure_name = "ast_hrd_single_star_fraction_hist"
if figure_name in MAKE_FIGURES or True:
fig, axes = plt.subplots(1, 2, figsize=(8, 4))
xlabel = "bp_rp"
ylabel = "phot_rp_mean_mag"
zlabel = "ast_p50"
# Restrict sensibly.
mask = (velociraptor["absolute_rp_mag"] > -16) \
* (velociraptor["bp_rp"] < 6.5) \
* np.isfinite(velociraptor["rv_single_epoch_scatter"])
plot_binned_statistic_kwds = dict(function="mean", vmin=0, vmax=1, bins=100,
xlabel=latex_labels.get(xlabel, xlabel),
cmap=DEFAULT_SEQUENTIAL_CMAP, mask=mask,
subsample=None, min_entries_per_bin=5)
for ax, ylabel in zip(axes, ("phot_rp_mean_mag", "absolute_rp_mag")):
plot_binned_statistic(
velociraptor[xlabel],
velociraptor[ylabel],
velociraptor[zlabel],
ax=ax, ylabel=latex_labels.get(ylabel, ylabel),
**plot_binned_statistic_kwds)
ax.xaxis.set_major_locator(MaxNLocator(6))
ax.yaxis.set_major_locator(MaxNLocator(6))
ax.set_xlim(common_kwds.get("{}.limits".format(xlabel), None))
ax.set_ylim(common_kwds.get("{}.limits".format(ylabel), None))
cbar = plt.colorbar(ax.images[0], fraction=0.046, pad=0.04)
cbar.set_label(r"\textrm{single star fraction} $\tau_\textrm{ast,single}$")
fig.tight_layout()
savefig(fig, figure_name)
# Plot the H-R diagram coloured by \tau_{single} as a scatter plot and a 2D hist
figure_name = "rv_hrd_single_star_fraction_hist"
if figure_name in MAKE_FIGURES:
fig, axes = plt.subplots(1, 2, figsize=(9, 4))
xlabel = "bp_rp"
ylabel = "phot_rp_mean_mag"
zlabel = "rv_p50"
# Restrict sensibly.
mask = (velociraptor["absolute_rp_mag"] > -16) \
* (velociraptor["bp_rp"] < 6.5)
plot_binned_statistic_kwds = dict(function="mean", vmin=0, vmax=1, bins=100,
xlabel=latex_labels.get(xlabel, xlabel),
cmap=DEFAULT_SEQUENTIAL_CMAP, mask=mask,
subsample=None, min_entries_per_bin=5)
for ax, ylabel in zip(axes, ("phot_rp_mean_mag", "absolute_rp_mag")):
plot_binned_statistic(
velociraptor[xlabel],
velociraptor[ylabel],
velociraptor[zlabel],
ax=ax, ylabel=latex_labels.get(ylabel, ylabel),
**plot_binned_statistic_kwds)
ax.xaxis.set_major_locator(MaxNLocator(6))
ax.yaxis.set_major_locator(MaxNLocator(6))
ax.set_xlim(common_kwds.get("{}.limits".format(xlabel), None))
ax.set_ylim(common_kwds.get("{}.limits".format(ylabel), None))
cbar = plt.colorbar(ax.images[0], fraction=0.046, pad=0.04)
cbar.set_label(r"\textrm{single star fraction} $\tau_\textrm{single}$")
fig.tight_layout()
savefig(fig, figure_name)
figure_name = "rv_hrd_single_star_fraction_scatter"
if figure_name in MAKE_FIGURES:
subsample = None
ordered, reverse = True, False
fig, axes = plt.subplots(1, 2, figsize=(9, 4))
xlabel = "bp_rp"
ylabel = "phot_rp_mean_mag"
zlabel = "rv_p50"
if subsample is not None:
idx = np.random.choice(len(velociraptor), subsample, replace=False)
else:
idx = np.arange(len(velociraptor))
if ordered:
idx = idx[np.argsort(velociraptor[zlabel][idx])]
if reverse:
idx = idx[::-1]
scatter_kwds = dict(vmin=0, vmax=1, cmap=DEFAULT_SEQUENTIAL_CMAP, s=1,
alpha=0.1, c=velociraptor[zlabel][idx], rasterized=True)
for ax, ylabel in zip(axes, ("phot_rp_mean_mag", "absolute_rp_mag")):
ax.scatter(
velociraptor[xlabel][idx],
velociraptor[ylabel][idx],
**scatter_kwds)
ax.set_xlabel(latex_labels.get(xlabel, xlabel))
ax.set_ylabel(latex_labels.get(ylabel, ylabel))
ax.xaxis.set_major_locator(MaxNLocator(6))
ax.yaxis.set_major_locator(MaxNLocator(6))
ax.set_xlim(common_kwds.get("{}.limits".format(xlabel), None))
ax.set_ylim(common_kwds.get("{}.limits".format(ylabel), None))
if scatter_kwds.get("alpha", 1) == 1:
collection = ax.collections[0]
else:
# Note: if you give alpha in scatter_kwds then you should give
# vmin and vmax too otherwise this needs to be updated:
collection = ax.scatter([np.nanmean(velociraptor[xlabel][idx])],
[np.nanmean(velociraptor[ylabel][idx])],
c=[np.nanmean(scatter_kwds["c"])],
vmin=scatter_kwds["vmin"],
vmax=scatter_kwds["vmax"],
cmap=scatter_kwds["cmap"],
alpha=1.0, s=0)
cbar = plt.colorbar(collection, fraction=0.046, pad=0.04)
cbar.set_label(r"\textrm{single star fraction} $\tau_\textrm{single}$")
fig.tight_layout()
savefig(fig, figure_name)
# Cross-match against SB9 catalog only if we have to.
if any(["sb9" in figure_name.lower() for figure_name in MAKE_FIGURES]):
sb9 = Table.read(os.path.join(BASE_PATH, "data", "sb9_xm_gaia.fits"))
# remove duplicates.
sb9 = sb9.group_by("source_id")
sb9 = sb9[sb9.groups.indices[:-1]]
# Only show SB9 stars that meet these criteria:
sb9_mask = (sb9["f_K1"] != ">") \
* (sb9["f_T0"] == 0) \
* (sb9["Grade"] > 0) \
* (sb9["f_omega"] != "a")
sb9 = sb9[sb9_mask]
assert len(set(sb9["source_id"])) == len(sb9)
vl_sb9_ids, sb9_ids = cross_match(velociraptor["source_id"], sb9["source_id"])
figure_name = "rv_sb9_kalpha_comparison"
if figure_name in MAKE_FIGURES:
sort, reverse = (True, False)
vl_sb9_subset = velociraptor[vl_sb9_ids]
K_est, K_est_err = estimate_K(vl_sb9_subset)
scalar = (1.0 / (sb9["Per"] * (1 - sb9["e"]**2)**0.5))[sb9_ids]
x = sb9["K1"][sb9_ids] * scalar
y = K_est * scalar
xerr = sb9["e_K1"][sb9_ids] * scalar
yerr = K_est_err * scalar
c = log_K_significance[vl_sb9_ids]
if sort:
idx = np.argsort(c)
if reverse:
idx = idx[::-1]
else:
idx = np.arange(len(c))
x, y, xerr, yerr, c = (x[idx], y[idx], xerr[idx], yerr[idx], c[idx])
fig, ax = plt.subplots(1, 1, figsize=(8, 7))
scat = ax.scatter(x, y, c=c, s=10, vmax=1.75, rasterized=True)
ax.errorbar(x, y, xerr=xerr, yerr=yerr, fmt="none", ecolor="#CCCCCC",
zorder=-1, linewidth=1, capsize=0)
ax.loglog()
limits = np.array([ax.get_xlim(), ax.get_ylim()])
limits = (np.min(limits), np.max(limits))
ax.plot(limits, limits, **one_to_one_line_kwds)
ax.set_xlim(limits)
ax.set_ylim(limits)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = plt.colorbar(scat, cax=cax)
#cbar.set_label(r"\textrm{single star fraction} $\tau_\textrm{single}$")
cbar.set_label(r"$\log_{10}\left[\sigma(\textrm{V}_\textrm{R}^{t}) - \mu_s\right] - \log_{10}{\sigma_s}$")
fig.tight_layout()
#ax.set_xlabel(r"$K/P\sqrt{1 - e^2}$ \textrm{(SB9)}")
#ax.set_ylabel(r"$K_\textrm{est} /P\sqrt{1 - e^2}$ \textrm{(this work)}")
ax.set_xlabel(r"$K_{1} / \alpha$ \textrm{(SB9)}")
ax.set_ylabel(r"$K_{1,\textrm{est}} / \alpha$ \textrm{(this work)}")
fig.tight_layout()
savefig(fig, figure_name)
figure_name = "rv_sb9_kalpha_comparison_tau"
if figure_name in MAKE_FIGURES:
sort, reverse = (True, False)
vl_sb9_subset = velociraptor[vl_sb9_ids]
K_est, K_est_err = estimate_K(vl_sb9_subset)
scalar = (1.0 / (sb9["Per"] * (1 - sb9["e"]**2)**0.5))[sb9_ids]
x = sb9["K1"][sb9_ids] * scalar
y = K_est * scalar
xerr = sb9["e_K1"][sb9_ids] * scalar
yerr = K_est_err * scalar
c = vl_sb9_subset["rv_p50"]
if sort:
idx = np.argsort(c)
if reverse:
idx = idx[::-1]
else:
idx = np.arange(len(c))
x, y, xerr, yerr, c = (x[idx], y[idx], xerr[idx], yerr[idx], c[idx])
fig, ax = plt.subplots(1, 1, figsize=(8, 7))
scat = ax.scatter(x, y, c=c, s=10, vmin=0, vmax=1, rasterized=True)
ax.errorbar(x, y, xerr=xerr, yerr=yerr, fmt="none", ecolor="#CCCCCC", zorder=-1,
linewidth=1, capsize=0)
ax.loglog()
limits = np.array([ax.get_xlim(), ax.get_ylim()])
limits = (np.min(limits), np.max(limits))
ax.plot(limits, limits, **one_to_one_line_kwds)
ax.set_xlim(limits)
ax.set_ylim(limits)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = plt.colorbar(scat, cax=cax)
cbar.set_label(r"\textrm{single star fraction} $\tau_\textrm{single}$")
fig.tight_layout()
#ax.set_xlabel(r"$K/P\sqrt{1 - e^2}$ \textrm{(SB9)}")
#ax.set_ylabel(r"$K_\textrm{est} /P\sqrt{1 - e^2}$ \textrm{(this work)}")
ax.set_xlabel(r"$K_{1} / \alpha$ \textrm{(SB9)}")
ax.set_ylabel(r"$K_{1,\textrm{est}} / \alpha$ \textrm{(this work)}")
fig.tight_layout()
savefig(fig, figure_name)
figure_name = "rv_sb9_k_comparison"
if figure_name in MAKE_FIGURES:
sort, reverse = (True, False)
vl_sb9_subset = velociraptor[vl_sb9_ids]
"""
K_est, K_est_err = estimate_K(vl_sb9_subset["rv_single_epoch_scatter"],
vl_sb9_subset["rv_mu_single"],
vl_sb9_subset["rv_sigma_single"],
vl_sb9_subset["rv_mu_single_var"],
vl_sb9_subset["rv_sigma_single_var"])
"""
K_est, K_est_err = estimate_K(vl_sb9_subset)
x = sb9["K1"][sb9_ids]
y = K_est
xerr = sb9["e_K1"][sb9_ids]
yerr = K_est_err
#c = log_K_significance[vl_sb9_ids]
c = sb9["e"][sb9_ids]
if sort:
idx = np.argsort(c)
if reverse:
idx = idx[::-1]
else:
idx = np.arange(len(c))
x, y, xerr, yerr, c = (x[idx], y[idx], xerr[idx], yerr[idx], c[idx])
fig, ax = plt.subplots(1, 1, figsize=(8, 7))
scat = ax.scatter(x, y, c=c, s=10, vmin=0, vmax=1, rasterized=True)
ax.errorbar(x, y, xerr=xerr, yerr=yerr, fmt="none", ecolor="#CCCCCC", zorder=-1,
linewidth=1, capsize=0)
ax.loglog()
limits = np.array([ax.get_xlim(), ax.get_ylim()])
limits = (np.min(limits), np.max(limits))
ax.plot(limits, limits, **one_to_one_line_kwds)
ax.set_xlim(limits)
ax.set_ylim(limits)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = plt.colorbar(scat, cax=cax)
cbar.set_label(r"\textrm{eccentricity (SB9)}")
#cbar.set_label(r"\textrm{single star fraction} $\tau_\textrm{rv,single}$")
#cbar.set_label(r"$\log_{10}\left[\sigma(\textrm{V}_\textrm{R}^{t}) - \mu_s\right] - \log_{10}{\sigma_s}$")
fig.tight_layout()
#ax.set_xlabel(r"$K/P\sqrt{1 - e^2}$ \textrm{(SB9)}")
#ax.set_ylabel(r"$K_\textrm{est} /P\sqrt{1 - e^2}$ \textrm{(this work)}")
ax.set_xlabel(r"$K_{1} \textrm{/ km\,s}^{-1}$ \textrm{(SB9)}")
ax.set_ylabel(r"$K_{1,\textrm{est}} \textrm{/ km\,s}^{-1}$ \textrm{(this work)}")
fig.tight_layout()
savefig(fig, figure_name)
figure_name = "rv_sb9_kalpha_comparison_period"
if figure_name in MAKE_FIGURES:
sort, reverse = (True, False)
vl_sb9_subset = velociraptor[vl_sb9_ids]
K_est, K_est_err = estimate_K(vl_sb9_subset)
scalar = (1.0 / (sb9["Per"] * (1 - sb9["e"]**2)**0.5))[sb9_ids]
x = sb9["K1"][sb9_ids] * scalar
y = K_est * scalar
xerr = sb9["e_K1"][sb9_ids] * scalar
yerr = K_est_err * scalar
c = np.log10(sb9["Per"][sb9_ids])
if sort:
idx = np.argsort(c)
if reverse:
idx = idx[::-1]
else:
idx = np.arange(len(c))
x, y, xerr, yerr, c = (x[idx], y[idx], xerr[idx], yerr[idx], c[idx])
fig, ax = plt.subplots(1, 1, figsize=(8, 7))
scat = ax.scatter(x, y, c=c, s=10, vmax=1.75, rasterized=True)
ax.errorbar(x, y, xerr=xerr, yerr=yerr, fmt="none", ecolor="#CCCCCC", zorder=-1,
linewidth=1, capsize=0)
ax.loglog()
limits = np.array([ax.get_xlim(), ax.get_ylim()])
limits = (np.min(limits), np.max(limits))
ax.plot(limits, limits, **one_to_one_line_kwds)
ax.set_xlim(limits)
ax.set_ylim(limits)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = plt.colorbar(scat, cax=cax)
#cbar.set_label(r"\textrm{single star fraction} $\tau_\textrm{single}$")
cbar.set_label(r"$\log_{10}\left(P \textrm{/ days}\right)$")
fig.tight_layout()
#ax.set_xlabel(r"$K/P\sqrt{1 - e^2}$ \textrm{(SB9)}")
#ax.set_ylabel(r"$K_\textrm{est} /P\sqrt{1 - e^2}$ \textrm{(this work)}")
ax.set_xlabel(r"$K_{1} / \alpha$ \textrm{(SB9)}")
ax.set_ylabel(r"$K_{1,\textrm{est}} / \alpha$ \textrm{(this work)}")
fig.tight_layout()
savefig(fig, figure_name)
figure_name = "rv_sb9_kp_corner"
if figure_name in MAKE_FIGURES:
sort, reverse = (True, False)
x = sb9["K1"][sb9_ids]
y = sb9["Per"][sb9_ids]
c = velociraptor["rv_p50"][vl_sb9_ids]
if sort:
idx = np.argsort(c)
idx = idx[::-1] if reverse else idx
else:
idx = np.arange(len(c))
fig, axes = plt.subplots(2, 2)
axes[0, 1].set_visible(False)
axes[1, 0].scatter(x[idx], y[idx], c=c[idx], s=10, vmax=1.75, rasterized=True)
axes[1, 0].loglog()
axes[1, 0].set_xlabel(r"$P$ \textrm{/ days}")
axes[1, 0].set_ylabel(r"$K_{1}$ \textrm{/ km\,s}$^{-1}$")
# TODO: Draw histograms on other axes
fig.tight_layout()
fig.subplots_adjust(hspace=0, wspace=0)
for ax in (axes[0, 0], axes[1, 1]):
ax.set_xticks([])
ax.set_yticks([])
savefig(fig, figure_name)
# Cross-match against APW catalog only if we have to.
if any(["apw_unimodal" in figure_name.lower() for figure_name in MAKE_FIGURES]):
apw_unimodal = Table.read(os.path.join(
BASE_PATH, "data", "apw-highK-unimodal-xm-gaia.fits"))
# remove duplicates.
apw_unimodal = apw_unimodal.group_by("source_id")
apw_unimodal = apw_unimodal[apw_unimodal.groups.indices[:-1]]
assert len(set(apw_unimodal["source_id"])) == len(apw_unimodal)
vl_apw_um_ids, apw_um_ids = cross_match(velociraptor["source_id"],
apw_unimodal["source_id"])
figure_name = "rv_apw_unimodal_k_comparison"
if figure_name in MAKE_FIGURES:
sort, reverse = (True, False)
vl_apwu_subset = velociraptor[vl_apw_um_ids]
K_est, K_est_err = estimate_K(vl_apwu_subset)
x = apw_unimodal["K"][apw_um_ids]
xerr = apw_unimodal["K_err"][apw_um_ids]
y = K_est
yerr = K_est_err
c = vl_apwu_subset["rv_p50"]
if sort:
idx = np.argsort(c)
idx = idx[::-1] if reverse else idx
else:
idx = np.arange(len(c))
x, y, xerr, yerr, c = (x[idx], y[idx], xerr[idx], yerr[idx], c[idx])
fig, ax = plt.subplots(1, 1, figsize=(8, 7))
scat = ax.scatter(x, y, c=c, s=10, vmin=0, vmax=1, rasterized=True)
ax.errorbar(x, y, yerr=yerr, xerr=xerr, fmt="none", ecolor="#cccccc", lw=1,
zorder=-1, rasterized=True)
ax.loglog()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = plt.colorbar(scat, cax=cax)
cbar.set_label(r"\textrm{single star fraction} $\tau_\textrm{single}$")
#cbar.set_label(r"$\log_{10}\left(P \textrm{/ days}\right)$")
limits = np.array([ax.get_xlim(), ax.get_ylim()])
limits = (np.min(limits), np.max(limits))
ax.plot(limits, limits, **one_to_one_line_kwds)
ax.set_xlim(limits)
ax.set_ylim(limits)
ax.set_xlabel(r"$K$ \textrm{/ km\,s}$^{-1}$ \textrm{(Price-Whelan et al. 2017)}")
ax.set_ylabel(r"$K$ \textrm{/ km\,s}$^{-1}$ \textrm{(this work)}")
fig.tight_layout()
savefig(fig, figure_name)
figure_name = "rv_apw_unimodal_kalpha_comparison"
if figure_name in MAKE_FIGURES:
sort, reverse = (True, False)
vl_apwu_subset = velociraptor[vl_apw_um_ids]
K_est, K_est_err = estimate_K(vl_apwu_subset)
scalar = 1.0/(apw_unimodal["P"] * np.sqrt(1 - apw_unimodal["e"]**2))[apw_um_ids]
x = apw_unimodal["K"][apw_um_ids] * scalar
xerr = apw_unimodal["K_err"][apw_um_ids] * scalar
y = K_est * scalar
yerr = K_est_err * scalar
c = vl_apwu_subset["rv_p50"]
if sort:
idx = np.argsort(c)
idx = idx[::-1] if reverse else idx
else:
idx = np.arange(len(c))
x, y, xerr, yerr, c = (x[idx], y[idx], xerr[idx], yerr[idx], c[idx])
fig, ax = plt.subplots(1, 1, figsize=(8, 7))
scat = ax.scatter(x, y, c=c, s=10, vmin=0, vmax=1, rasterized=True)
ax.errorbar(x, y, yerr=yerr, xerr=xerr, fmt="none", ecolor="#cccccc", lw=1,
zorder=-1, rasterized=True)
ax.loglog()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = plt.colorbar(scat, cax=cax)
cbar.set_label(r"\textrm{single star fraction} $\tau_\textrm{single}$")
#cbar.set_label(r"$\log_{10}\left(P \textrm{/ days}\right)$")
limits = np.array([ax.get_xlim(), ax.get_ylim()])
limits = (np.min(limits), np.max(limits))
ax.plot(limits, limits, **one_to_one_line_kwds)
ax.set_xlim(limits)
ax.set_ylim(limits)
ax.set_xlabel(r"$K / \alpha$ \textrm{(Price-Whelan et al. 2017)}")
ax.set_ylabel(r"$K / \alpha$ \textrm{(this work)}")
fig.tight_layout()
savefig(fig, figure_name)
# Cross-match against APW catalog only if we have to.
if any(["apw_percentiles" in figure_name.lower() for figure_name in MAKE_FIGURES]):
apw_percentiles = Table.read(os.path.join(
BASE_PATH, "data", "apw-lnK-percentiles-xm-gaia.fits"))
# remove duplicates.
apw_percentiles = apw_percentiles.group_by("source_id")
apw_percentiles = apw_percentiles[apw_percentiles.groups.indices[:-1]]
assert len(set(apw_percentiles["source_id"])) == len(apw_percentiles)
vl_apw_lnk_ids, apw_lnk_ids = cross_match(velociraptor["source_id"],
apw_percentiles["source_id"])
figure_name = "rv_apw_percentiles_k_comparison"
if figure_name in MAKE_FIGURES:
sort, reverse = (True, False)
vl_apwp_subset = velociraptor[vl_apw_lnk_ids]
K_est, K_est_err = estimate_K(vl_apwp_subset)
if sort:
idx = np.argsort(c)
idx = idx[::-1] if reverse else idx
else:
idx = np.arange(len(c))
x, y, yerr, c = (x[idx], y[idx], yerr[idx], c[idx])
fig, ax = plt.subplots(figsize=(8, 7))
scat = ax.scatter(x, y, c=c, s=1, rasterized=True)
ax.errorbar(x, y, yerr=yerr, fmt="none", ecolor="#cccccc", lw=1, zorder=-1,
rasterized=True)
ax.loglog()
limits = np.array([ax.get_xlim(), ax.get_ylim()])
limits = (np.min(limits), np.max(limits))
ax.plot(limits, limits, **one_to_one_line_kwds)
ax.set_xlim(limits)
ax.set_ylim(limits)
ax.set_xlabel(r"$K_\textrm{1\%}$ \textrm{/ km\,s}$^{-1}$ \textrm{(Price-Whelan et al. 2017)}")
ax.set_ylabel(r"$K$ \textrm{/ km\,s}$^{-1}$ \textrm{(this work)}")
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = plt.colorbar(scat, cax=cax)
cbar.set_label(r"\textrm{single star fraction} $\tau_\textrm{single}$")
fig.tight_layout()
savefig(fig, figure_name)
figure_name = "rv_gp_hrd"
if figure_name in MAKE_FIGURES:
xlabel = "bp_rp"
ylabel = "phot_rp_mean_mag"
zlabels = ("rv_gp_mu_s", "rv_gp_sigma_s",)
# "rv_mu_multiple", "rv_sigma_multiple")
K, M = (2, len(zlabels))
fig, axes = plt.subplots(M, K, figsize=(4 * K + 1, 4 * M))
mask = (velociraptor["absolute_rp_mag"] > -5) \
* (velociraptor["bp_rp"] < 4)
limits = dict(bp_rp=(-0.25, 4),
phot_rp_mean_mag=(13, 6),
absolute_rp_mag=(10, -5))
plot_binned_statistic_kwds = dict(function="median", bins=100,
xlabel=latex_labels.get(xlabel, xlabel),
cmap=DEFAULT_SEQUENTIAL_CMAP, mask=mask,
subsample=100000, min_entries_per_bin=5)
for ax_row, zlabel in zip(axes, zlabels):
for ax, ylabel in zip(ax_row, ("phot_rp_mean_mag", "absolute_rp_mag")):
plot_binned_statistic(
velociraptor[xlabel],
velociraptor[ylabel],
velociraptor[zlabel],
ax=ax, ylabel=latex_labels.get(ylabel, ylabel),
**plot_binned_statistic_kwds)
ax.xaxis.set_major_locator(MaxNLocator(6))
ax.yaxis.set_major_locator(MaxNLocator(6))
ax.set_xlim(limits.get(xlabel, None))
ax.set_ylim(limits.get(ylabel, None))
ax.set_title(latex_labels.get(zlabel, zlabel))
#cbar = plt.colorbar(ax.images[0], fraction=0.046, pad=0.04)
#cbar.set_label(r"\textrm{single star fraction} $\tau_\textrm{single}$")
for ax in np.array(axes).flatten():
ax.set_aspect(np.ptp(ax.get_xlim())/np.ptp(ax.get_ylim()))
fig.tight_layout()
savefig(fig, figure_name)
figure_name = "rv_gp_wrt_params"
if figure_name in MAKE_FIGURES:
from matplotlib.colors import LogNorm
xlabels = ("bp_rp",
"phot_rp_mean_mag",
"absolute_rp_mag")
#ylabels = ("rv_mu_single", "rv_sigma_single")
ylabels = ("rv_gp_mu_s", "rv_gp_sigma_s")
K, M = (len(xlabels), len(ylabels))
fig, axes = plt.subplots(M, K, figsize=(4 * K, 4 * M))
mask = np.isfinite(velociraptor["rv_single_epoch_scatter"])
plot_binned_statistic_kwds = dict(function="count", bins=250,
cmap="Blues", mask=mask,
subsample=None, min_entries_per_bin=1,
norm=LogNorm())
for ax_row, xlabel in zip(axes.T, xlabels):
for ax, ylabel in zip(ax_row, ylabels):
plot_binned_statistic(
velociraptor[xlabel],
velociraptor[ylabel],
velociraptor[xlabel],
ax=ax,
xlabel=latex_labels.get(xlabel, xlabel),
ylabel=latex_labels.get(ylabel, ylabel),
**plot_binned_statistic_kwds)
ax.xaxis.set_major_locator(MaxNLocator(6))
ax.yaxis.set_major_locator(MaxNLocator(6))
ax.set_xlim(np.sort(common_kwds.get("{}.limits".format(xlabel), None)))
ax.set_ylim(0, max(ax.get_ylim()))
#cbar = plt.colorbar(ax.images[0], fraction=0.046, pad=0.04)
#cbar.set_label(r"\textrm{single star fraction} $\tau_\textrm{single}$")
for ax in np.array(axes).flatten():
ax.set_aspect(np.ptp(ax.get_xlim())/np.ptp(ax.get_ylim()))
fig.tight_layout()
savefig(fig, figure_name)
figure_name = "ast_gp_hrd"
if figure_name in MAKE_FIGURES:
xlabel = "bp_rp"
ylabel = "phot_rp_mean_mag"
zlabels = ("ast_mu_single", "ast_sigma_single",)
zlabels = ("ast_gp_mu_s", "ast_gp_sigma_s")
# "ast_mu_multiple", "ast_sigma_multiple")
K, M = (2, len(zlabels))
fig, axes = plt.subplots(M, K, figsize=(4 * K + 1, 4 * M))
mask = (velociraptor["absolute_rp_mag"] > -5) \
* (velociraptor["bp_rp"] < 4) \
* np.isfinite(velociraptor["rv_single_epoch_scatter"])
limits = dict(bp_rp=(-0.25, 4),
phot_rp_mean_mag=(13, 6),
absolute_rp_mag=(10, -5))
plot_binned_statistic_kwds = dict(function="median", bins=100,
xlabel=latex_labels.get(xlabel, xlabel),
cmap=DEFAULT_SEQUENTIAL_CMAP, mask=mask,
subsample=100000, min_entries_per_bin=5)
for ax_row, zlabel in zip(axes, zlabels):
for ax, ylabel in zip(ax_row, ("phot_rp_mean_mag", "absolute_rp_mag")):
plot_binned_statistic(
velociraptor[xlabel],
velociraptor[ylabel],
velociraptor[zlabel],
ax=ax, ylabel=latex_labels.get(ylabel, ylabel),
**plot_binned_statistic_kwds)
ax.xaxis.set_major_locator(MaxNLocator(6))
ax.yaxis.set_major_locator(MaxNLocator(6))
ax.set_xlim(limits.get(xlabel, None))
ax.set_ylim(limits.get(ylabel, None))
ax.set_title(latex_labels.get(zlabel, zlabel))
#cbar = plt.colorbar(ax.images[0], fraction=0.046, pad=0.04)
#cbar.set_label(r"\textrm{single star fraction} $\tau_\textrm{single}$")
for ax in np.array(axes).flatten():
ax.set_aspect(np.ptp(ax.get_xlim())/np.ptp(ax.get_ylim()))
fig.tight_layout()
savefig(fig, figure_name)
figure_name = "ast_gp_wrt_params"
if figure_name in MAKE_FIGURES:
from matplotlib.colors import LogNorm
xlabels = ("bp_rp",
"phot_rp_mean_mag",
"absolute_rp_mag")
ylabels = ("ast_mu_single", "ast_sigma_single")
ylabels = ("ast_gp_mu_s", "ast_gp_sigma_s")
K, M = (len(xlabels), len(ylabels))
fig, axes = plt.subplots(M, K, figsize=(4 * K, 4 * M))
mask = np.isfinite(velociraptor["astrometric_unit_weight_error"])
plot_binned_statistic_kwds = dict(function="count", bins=250,
cmap="Blues", mask=mask,
subsample=None, min_entries_per_bin=1,
norm=LogNorm())
for ax_row, xlabel in zip(axes.T, xlabels):
for ax, ylabel in zip(ax_row, ylabels):
plot_binned_statistic(
velociraptor[xlabel],
velociraptor[ylabel],
velociraptor[xlabel],
ax=ax,
xlabel=latex_labels.get(xlabel, xlabel),
ylabel=latex_labels.get(ylabel, ylabel),
**plot_binned_statistic_kwds)
ax.xaxis.set_major_locator(MaxNLocator(6))
ax.yaxis.set_major_locator(MaxNLocator(6))
ax.set_xlim(np.sort(common_kwds.get("{}.limits".format(xlabel), None)))
ax.set_ylim(0, max(ax.get_ylim()))
#cbar = plt.colorbar(ax.images[0], fraction=0.046, pad=0.04)
#cbar.set_label(r"\textrm{single star fraction} $\tau_\textrm{single}$")
for ax in np.array(axes).flatten():
ax.set_aspect(np.ptp(ax.get_xlim())/np.ptp(ax.get_ylim()))
fig.tight_layout()
savefig(fig, figure_name)
# Cross-match against Soubiran catalog only if we have to.
if any(["soubiran" in figure_name.lower() for figure_name in MAKE_FIGURES]):
soubiran = Table.read(os.path.join(
BASE_PATH, "data", "soubiran-2013-xm-gaia.fits"))
# remove duplicates.
soubiran = soubiran.group_by("source_id")
soubiran = soubiran[soubiran.groups.indices[:-1]]
assert len(set(soubiran["source_id"])) == len(soubiran)
vl_soubiran_ids, soubiran_ids = cross_match(velociraptor["source_id"],
soubiran["source_id"])
figure_name = "rv_soubiran_hist"
if figure_name in MAKE_FIGURES:
fig, axes = plt.subplots(1, 2, figsize=(8, 4))
vl_soubiran_subset = velociraptor[vl_soubiran_ids]
K_est, K_est_err = estimate_K(vl_soubiran_subset)
axes[0].hist(K_est[np.isfinite(K_est)], bins=100)
axes[0].set_xlabel(r"$K_{est}$ \textrm{/ km\,s}$^{-1}$")
axes[0].set_ylabel(r"\textrm{count}")
axes[1].semilogx()
positive = K_est > 0
axes[1].scatter(K_est[positive],
vl_soubiran_subset["rv_p50"][positive],
s=10, rasterized=True)
xlim = axes[1].get_xlim()
axes[1].errorbar(K_est[positive],
vl_soubiran_subset["rv_p50"][positive],
xerr=np.min([K_est_err[positive], K_est[positive] - 1e-4], axis=0),
fmt="none", ecolor="#CCCCCC", zorder=-1,
linewidth=1, capsize=0, rasterized=True)
axes[1].set_xlim(xlim)
axes[1].set_xlabel(r"$K_{est} \textrm{ / km\,s}^{-1}$")
axes[1].set_ylabel(r"\textrm{single star fraction} $\tau_\textrm{single}$")
for ax in np.array(axes).flatten():
ax.set_aspect(np.ptp(ax.get_xlim())/np.ptp(ax.get_ylim()))
fig.tight_layout()
notable = (vl_soubiran_subset["rv_p50"] < 0.6) \
* (K_est > 0)
savefig(fig, figure_name)
# Cross-match against Soubiran catalog only if we have to.
if any(["huang" in figure_name.lower() for figure_name in MAKE_FIGURES]):
huang = Table.read(os.path.join(
BASE_PATH, "data", "huang-apogee-rv-standards-xm-gaia.fits"))
# remove duplicates.
huang = huang.group_by("source_id")
huang = huang[huang.groups.indices[:-1]]
assert len(set(huang["source_id"])) == len(huang)
vl_huang_ids, huang_ids = cross_match(velociraptor["source_id"],
huang["source_id"])
figure_name = "rv_huang_hist"
"""
if figure_name in MAKE_FIGURES:
vl_huang_subset = velociraptor[vl_huang_ids]
K_est, K_est_err = estimate_K(vl_huang_subset)
fig, axes = plt.subplots(2, 1, figsize=(4, 8))
#v = log_K_significance[vl_huang_ids]
#sb9_v = log_K_significance[vl_sb9_ids]
#axes[0].hist(v[np.isfinite(v)], bins=100, normed=True, alpha=0.5)
#axes[0].hist(sb9_v[np.isfinite(sb9_v)], bins=100, facecolor="tab:green", alpha=0.5, normed=True)
#axes[0].set_xlabel(r"$\log_{10}\left[\sigma(\textrm{V}_\textrm{R}^{t}) - \mu_s\right] - \log_{10}{\sigma_s}$")
#axes[0].set_yticks([])
axes[1].scatter(vl, K_est, s=10, rasterized=True)
axes[1].errorbar(v, K_est,
yerr=np.min([K_est_err, K_est - 1e-4], axis=0),
fmt="none", ecolor="#CCCCCC", zorder=-1,
linewidth=1, capsize=0, rasterized=True)
axes[1].set_ylabel(r"$K_{est} \textrm{ / km\,s}^{-1}$")
axes[1].set_xlabel(r"$\log_{10}\left[\sigma(\textrm{V}_\textrm{R}^{t}) - \mu_s\right] - \log_{10}{\sigma_s}$")
for ax in np.array(axes).flatten():
ax.set_aspect(np.ptp(ax.get_xlim())/np.ptp(ax.get_ylim()))
fig.tight_layout()
notable = (vl_huang_subset["rv_p50"] < 0.6) \
* (K_est > 0)
savefig(fig, figure_name)
"""
figure_name = "rv_huang_hist2"
if figure_name in MAKE_FIGURES:
vl_huang_subset = velociraptor[vl_huang_ids]
K_est, K_est_err = estimate_K(vl_huang_subset)
fig, axes = plt.subplots(2, 1, figsize=(4, 8))
v = vl_huang_subset["rv_p50"]
axes[0].hist(v[np.isfinite(v)], bins=100, normed=True, alpha=0.5)
axes[0].set_xlabel(latex_labels["rv_p50"])
axes[0].set_yticks([])
axes[1].scatter(v, K_est, s=10, rasterized=True)
axes[1].errorbar(v, K_est,
yerr=np.min([K_est_err, K_est - 1e-4], axis=0),
fmt="none", ecolor="#CCCCCC", zorder=-1,
linewidth=1, capsize=0, rasterized=True)
axes[1].set_ylabel(r"$K_{est} \textrm{ / km\,s}^{-1}$")
axes[1].set_xlabel(r"$\log_{10}\left[\sigma(\textrm{V}_\textrm{R}^{t}) - \mu_s\right] - \log_{10}{\sigma_s}$")
for ax in np.array(axes).flatten():
ax.set_aspect(np.ptp(ax.get_xlim())/np.ptp(ax.get_ylim()))
fig.tight_layout()
notable = (vl_huang_subset["rv_p50"] < 0.6) \
* (K_est > 0)
savefig(fig, figure_name) | {"/scripts/npm_run_elastic_ball_test.py": ["/npm_utils.py"], "/npm.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation.py": ["/velociraptor.py"], "/run_analysis.py": ["/npm_utils.py"], "/run_probabilities2.py": ["/npm_utils.py"], "/scripts/npm_run_soubiran_2013.py": ["/npm_utils.py"], "/run_probabilities3.py": ["/npm_utils.py"], "/attic/plot_sb2.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation_hrd.py": ["/velociraptor.py"], "/attic/validation_sb9.py": ["/velociraptor.py"]} |
50,294 | andycasey/velociraptor | refs/heads/master | /scripts/xmatch_soubiran_2013_with_gaia.py | from astropy.table import Table
from astroquery.gaia import Gaia
soubiran_2013 = Table.read("data/Soubiran_2013.fits")
N = len(soubiran_2013)
# Get gaia source IDs and we can do the cross-match from there.
source_ids = []
for i, hip in enumerate(soubiran_2013["HIP"]):
job = Gaia.launch_job("""
SELECT gaia.source_id
FROM gaiadr2.gaia_source AS gaia,
gaiadr2.hipparcos2_best_neighbour AS hip
WHERE hip.original_ext_source_id = '{:.0f}'
AND hip.source_id = gaia.source_id
""".format(hip))
results = job.get_results()
if len(results) == 0:
source_id = -1
else:
source_id = results["source_id"][0]
print("{}/{}: HIP {} = Gaia DR2 {}".format(i, N, hip, source_id))
source_ids.append(source_id)
import numpy as np
soubiran_2013["source_id"] = np.array(source_ids, dtype=np.int64)
OK = np.isfinite(soubiran_2013["source_id"])
soubiran_2013 = soubiran_2013[OK]
soubiran_2013.write("data/Soubiran_2013-xm-Gaia.fits", overwrite=True)
| {"/scripts/npm_run_elastic_ball_test.py": ["/npm_utils.py"], "/npm.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation.py": ["/velociraptor.py"], "/run_analysis.py": ["/npm_utils.py"], "/run_probabilities2.py": ["/npm_utils.py"], "/scripts/npm_run_soubiran_2013.py": ["/npm_utils.py"], "/run_probabilities3.py": ["/npm_utils.py"], "/attic/plot_sb2.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation_hrd.py": ["/velociraptor.py"], "/attic/validation_sb9.py": ["/velociraptor.py"]} |
50,295 | andycasey/velociraptor | refs/heads/master | /run_probabilities3.py |
"""
Calculate probability distributions.
"""
import h5py
import logging
import multiprocessing as mp
import numpy as np
import sys
import pickle
import yaml
import warnings
from tqdm import tqdm
from collections import OrderedDict
from astropy.io import fits
from scipy import (special, stats)
import npm_utils as npm
if __name__ == "__main__":
data_path = sys.argv[1]
meta_path = sys.argv[2]
if not data_path.endswith(".hdf5"):
raise ValueError("data_path should be a .hdf5 file")
if not meta_path.endswith(".meta"):
raise ValueError("meta_path should be a .meta file")
# Load meta.
with open(meta_path, "r") as fp:
meta = yaml.load(fp)
# Load data.
data = h5py.File(data_path, "r+")
model_names = list(data["models"].keys())
predictor_label_names = [meta["models"][mn]["predictor_label_name"] \
for mn in model_names]
all_label_names = [] + predictor_label_names
for model_name, model_config in meta["models"].items():
all_label_names.extend(model_config["kdtree_label_names"])
all_label_names = list(np.unique(all_label_names))
# Load the predictors that we need.
with fits.open(meta["data_path"]) as image:
finite = np.all([np.isfinite(image[1].data[ln]) for ln in all_label_names], axis=0)
Y = np.array([image[1].data[pln] for pln in predictor_label_names]).T[finite]
# Store relevant data.
for label_name in all_label_names + ["source_id"]:
if label_name not in data:
data.create_dataset(label_name, data=np.array(image[1].data[label_name])[finite])
data.close()
del data
K = meta.get("number_of_draws", 10)
N, M = Y.shape
for m, model_name in enumerate(model_names[1:]):
print(f"Calculating probabilities for {model_name} with K = {K}")
def _init_worker(gp_predictions_, y_, rhos_, bounds_):
global gp_predictions, y, rhos, bounds
gp_predictions, y, rhos, bounds = (gp_predictions_, y_, rhos_, bounds_)
return None
def _get_lnprob(n):
gp_predictions_, y_ = gp_predictions[n], y[n]
if not np.all(np.isfinite(gp_predictions_)) or not np.isfinite(y_):
return (n, -1e25, 0.5)
mu = gp_predictions_[::2]
diag = np.atleast_2d(gp_predictions_[1::2])**0.5
cov = diag * rhos * diag.T
try:
draws = np.random.multivariate_normal(mu, cov, K).T
except ValueError:
print(f"ERROR ON {n}, {mu}, {cov}, {diag}, {rhos}")
return (n, -1e25, 0.5)
# Clip to bounded values.
parameter_names = ("theta",
"mu_single", "sigma_single",
"mu_multiple", "sigma_multiple")
for i, parameter_name in enumerate(parameter_names):
try:
l, u = bounds[parameter_name]
except KeyError:
continue
else:
draws[i] = np.clip(draws[i], l, u)
draws_3_min = np.log(draws[1] + 5 * draws[2]) + draws[4]**2
draws[3] = np.max([draws[3], draws_3_min], axis=0)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
lnprob_ = np.vstack([
np.log(draws[0]) + npm.normal_lpdf(y_, draws[1], draws[2]),
np.log(1 - draws[0]) + npm.lognormal_lpdf(y_, draws[3], draws[4])
]).T
lnprob_[~np.isfinite(lnprob_)] = -1e25
p_single_ = np.exp(lnprob_[:, 0] - special.logsumexp(lnprob_, axis=1))
# TODO HACK
is_def_single = y_ < draws[1]
p_single_[is_def_single] = 1.0
return (n, lnprob_, p_single_)
chunk_size = 1000000
chunks = int(np.ceil(N/chunk_size))
print(f"Using chunk size of {chunk_size} (expect {chunks} chunks)")
with tqdm(total=N) as pbar:
for i in range(chunks):
start, end = (chunk_size * i, chunk_size * (i + 1))
with h5py.File(data_path, "r+") as data:
group = data["models"][model_name]
if i == 0:
if "lnprob" not in group:
group.create_dataset("lnprob", shape=(N, K, 2), dtype=float)
if "p_single" not in group:
group.create_dataset("p_single", shape=(N, K), dtype=float)
if "classification" not in group:
group.create_dataset("classification", shape=(N, ), dtype=int)
if "confidence" not in group:
group.create_dataset("confidence", shape=(N, ), dtype=float)
initargs = (
group["gp_predictions"],
Y[:, m],
np.corrcoef(np.transpose(group["mixture_model_results"])),
meta["models"][model_name]["bounds"],
)
C = min(N - chunk_size * i, chunk_size)
end = start + C
lnprob_tmp = np.memmap(
"lnprob.tmp",
mode="w+", dtype=float,
shape=(C, K, 2))
p_single_tmp = np.memmap(
"p_single.tmp",
mode="w+", dtype=float,
shape=(C, K))
with mp.Pool(initializer=_init_worker, initargs=initargs) as p:
for n, lnp, ps in p.imap_unordered(_get_lnprob, range(start, end)):
lnprob_tmp[n - start] = lnp
p_single_tmp[n - start] = ps
pbar.update(1)
lnprob_tmp.flush()
p_single_tmp.flush()
confidence = np.sum(p_single_tmp > 0.5, axis=1)/K
assert confidence.size == C
group["lnprob"][start:end] = lnprob_tmp
group["p_single"][start:end] = p_single_tmp
group["classification"][start:end] = np.round(confidence).astype(int)
group["confidence"][start:end] = confidence
del lnprob_tmp, p_single_tmp, confidence
chunk_size = 100000
chunks = int(np.ceil(N/chunk_size))
# Calculate joint probabilities.
print(f"Calculating joint probabilities")
with tqdm(total=N) as pbar:
for i in range(chunks):
start, end = (chunk_size * i, chunk_size * (i + 1))
C = min(N - chunk_size * i, chunk_size)
end = start + C
with h5py.File(data_path, "r+") as data:
if i == 0:
if "joint_p_single" not in data:
data.create_dataset("joint_p_single", shape=(N, K), dtype=float)
if "joint_classification" not in data:
data.create_dataset("joint_classification", shape=(N,), dtype=int)
if "joint_confidence" not in data:
data.create_dataset("joint_confidence", shape=(N, ), dtype=float)
# Get lnprobs.
lnprobs = np.sum(
[data["models"][model_name]["lnprob"][start:end] \
for model_name in model_names], axis=0)
joint_p_single = np.exp(lnprobs[:, :, 0] - special.logsumexp(lnprobs, axis=-1))
data["joint_p_single"][start:end] = joint_p_single
confidence = np.sum(joint_p_single > 0.5, axis=1)/K
data["joint_classification"][start:end] = np.round(confidence).astype(int)
data["joint_confidence"][start:end] = confidence
pbar.update(C) | {"/scripts/npm_run_elastic_ball_test.py": ["/npm_utils.py"], "/npm.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation.py": ["/velociraptor.py"], "/run_analysis.py": ["/npm_utils.py"], "/run_probabilities2.py": ["/npm_utils.py"], "/scripts/npm_run_soubiran_2013.py": ["/npm_utils.py"], "/run_probabilities3.py": ["/npm_utils.py"], "/attic/plot_sb2.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation_hrd.py": ["/velociraptor.py"], "/attic/validation_sb9.py": ["/velociraptor.py"]} |
50,296 | andycasey/velociraptor | refs/heads/master | /attic/plot_sb2.py |
"""
Selection and analysis of SB2 type binary stars.
See notebooks/sb2.ipynb for inspiration.
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from astropy.io import fits
import velociraptor
from mpl_utils import mpl_style
plt.style.use(mpl_style)
data = fits.open("data/gaia-sources-for-npm.fits")[1].data
# Apply ranges
valid_ranges = dict(
bp_rp=(1.2, 2.3),
phot_rp_mean_mag=(10, 12.2))
def get_rv_completeness(x, semilogx, equidensity, N_bins,
x_min=None, x_max=None):
y = data["radial_velocity"]
x_finite = np.isfinite(x)
y_finite = np.isfinite(y)
if x_min is None:
x_min = np.min(x[x_finite])
if x_max is None:
x_max = np.max(x[x_finite])
mask = (x_max >= x) * (x >= x_min)
x_finite *= mask
y_finite *= mask
p = np.linspace(0, 100, N_bins)
if equidensity and semilogx:
bins = 10**np.percentile(np.log10(x[x_finite]), p)
elif equidensity and not semilogx:
bins = np.percentile(x[x_finite], p)
elif not equidensity and semilogx:
bins = np.logspace(np.log10(x_min), np.log10(x_max), N_bins)
elif not equidensity and not semilogx:
bins = np.linspace(x_min, x_max, N_bins)
numerator, _ = np.histogram(x[x_finite * y_finite], bins=bins)
denominator, _ = np.histogram(x[x_finite], bins=bins)
f = numerator/denominator.astype(float)
# Pretty sure this ~has~ to be wrong.
f_err = f / np.diff(bins) * np.sqrt(
(np.sqrt(numerator)/numerator)**2 + \
(np.sqrt(denominator)/denominator)**2)
return (bins, f, f_err, numerator, denominator)
def plot_rv_completeness(x, latex_label_name, semilogx, equidensity, N_bins,
ax=None, title=None, x_min=None, x_max=None,
valid_xrange=None, snap_xrange_to_nearest_bin_edge=False,
**kwargs):
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
else:
fig = ax.figure
bins, f, f_err, num, den = get_rv_completeness(
x, semilogx=semilogx, equidensity=equidensity,
N_bins=N_bins, x_min=x_min, x_max=x_max)
x = np.hstack([bins[0], np.repeat(bins[1:-1], 2), bins[-1]])
x_mid = bins[:-1] + 0.5 * np.diff(bins)
y = np.array(f).repeat(2)
kwds = dict(linestyle="-", marker="None")
kwds.update(kwargs)
line = ax.plot(x, y, **kwds)
ax.errorbar(x_mid, f, yerr=f_err, fmt="none", c=line[0].get_color())
ax.set_ylabel(r"\textrm{fraction of sources with radial velocity}")
ax.set_xlabel(latex_label_name)
if semilogx:
ax.semilogx()
if valid_xrange is not None:
lower, upper = valid_xrange
if snap_xrange_to_nearest_bin_edge:
mask = (bins >= lower) * (bins <= upper)
lower = bins[mask][0]
upper = bins[mask][-1]
print("Updated valid range for {} is ({:.2f}, {:.2f})".format(
latex_label_name, lower, upper))
ax.axvspan(lower, upper, facecolor="#dddddd", edgecolor="None", zorder=-1)
if x_min is not None:
ax.set_xlim(x_min, ax.get_xlim()[1])
if x_max is not None:
ax.set_xlim(ax.get_xlim()[0], x_max)
return fig
label_names = ("bp_rp", "phot_rp_mean_mag")
latex_label_names = dict(bp_rp=r"\textrm{bp - rp}",
phot_rp_mean_mag=r"\textrm{apparent rp magnitude}")
K = len(label_names)
fig, axes = plt.subplots(1, K, figsize=(10, 5))
common_kwds = dict(N_bins=30, semilogx=False, equidensity=False,
snap_xrange_to_nearest_bin_edge=True)
plot_rv_completeness(
data["bp_rp"],
latex_label_name=r"\textrm{bp - rp}",
x_min=0, x_max=4, valid_xrange=valid_ranges["bp_rp"],
ax=axes[0], **common_kwds)
plot_rv_completeness(
data["phot_rp_mean_mag"],
latex_label_name=r"\textrm{apparent rp magnitude}",
x_min=6, x_max=12.4, valid_xrange=valid_ranges["phot_rp_mean_mag"],
ax=axes[1], **common_kwds)
for ax in axes:
ax.xaxis.set_major_locator(MaxNLocator(7))
ax.yaxis.set_major_locator(MaxNLocator(5))
ax.set_ylim(-0.05, 1.05)
fig.tight_layout()
fig.savefig("figures/sb2_rvs_completeness.png", dpi=150)
fig.savefig("figures/sb2_rvs_completeness.pdf", dpi=300)
in_sb2_source_parameter_range = np.ones(len(data), dtype=bool)
for label_name, (lower_value, upper_value) in valid_ranges.items():
if lower_value is not None:
in_sb2_source_parameter_range *= (data[label_name] >= lower_value)
if upper_value is not None:
in_sb2_source_parameter_range *= (upper_value >= data[label_name])
finite_rv = np.isfinite(data["radial_velocity"])
is_sb2 = in_sb2_source_parameter_range * ~finite_rv
N_in_sb2_sp_range = sum(in_sb2_source_parameter_range)
N_is_sb2 = sum(is_sb2)
print("""
Total sources in valid SB2 range: {0:.0f}
Numer of sources in that range without an RV: {1:.0f}
""".format(N_in_sb2_sp_range, N_is_sb2))
# Plot the source property distributions of the SB2 candidate systems relative to a control sample.
# For each SB2 candidate, we need to find the closest star in (bp - rp, apparent rp mag, absolute rp mag)
absolute_rp_mag = data["phot_rp_mean_mag"] + 5 * np.log10(data["parallax"]/100.0)
X = np.vstack([
data["bp_rp"],
data["phot_rp_mean_mag"],
absolute_rp_mag
]).T
# Build a k-d tree using the stars that are NOT SB2s.
# We can query this in parameter space that we care about to get the clsoest non-SB2.
import npm_utils as npm
finite = np.all(np.isfinite(X), axis=1)
kdt_indices = np.where(finite * ~is_sb2)[0]
kdt, scale, offset = npm.build_kdtree(X[kdt_indices],
relative_scales=[0.1, 1.0, 1.0])
kdt_kwds = dict(offset=offset, scale=scale,
minimum_points=1, maximum_points=1)
sb2_indices = np.arange(len(data))[is_sb2]
control_indices = np.nan * np.ones_like(sb2_indices)
K = sb2_indices.size
import tqdm
for i, index in tqdm.tqdm(enumerate(sb2_indices), total=K):
try:
indices_returned = list(kdt_indices[kdt.query(X[[index]], 1, return_distance=False)][0])
except ValueError:
continue
control_indices[i] = indices_returned[0]
comp_subset = np.isfinite(control_indices)
subset_sb2_indices = sb2_indices[comp_subset]
subset_cnt_indices = control_indices[comp_subset].astype(int)
astrometric_unit_weight_error = np.sqrt(data["astrometric_chi2_al"]/(data["astrometric_n_obs_al"] - 5))
from mpl_utils import plot_histogram_steps
fig, axes = plt.subplots(3, 1, figsize=(3.4, 10))
B = 50
bins = np.linspace(0.5, 3, B)
sb2_color, control_color = ("tab:red", "#666666")
sb2_label, control_label = (r"\textrm{SB2 candidates}", r"\textrm{control sample}")
fill_alpha = 0.3
def plot_twosamples(ax, bins, control_sample, sb2_sample):
x = bins[:-1] + 0.5 * np.diff(bins)
y = np.histogram(control_sample, bins=bins)
ax.plot(x, y[0], "-",
c=control_color, drawstyle="steps-mid", label=control_label)
ax.errorbar(x, y[0], yerr=np.sqrt(y[0]),
c=control_color, fmt=None, ecolor=control_color)
xx = np.array(x).repeat(2)[1:]
xstep = np.repeat((x[1:] - x[:-1]), 2)
xstep = np.concatenate(([xstep[0]], xstep, [xstep[-1]]))
# Now: add one step at end of row.
xx = np.append(xx, xx.max() + xstep[-1]) - xstep/2.0
yy = np.array(y[0]).repeat(2)
ax.fill_between(xx, 0, yy, facecolor=control_color, alpha=fill_alpha)
y = np.histogram(sb2_sample, bins=bins)
ax.plot(x, y[0], "-",
c=sb2_color, drawstyle="steps-mid", label=sb2_label)
ax.errorbar(x, y[0],
yerr=np.sqrt(y[0]),
c=sb2_color, fmt=None, ecolor=sb2_color)
xx = np.array(x).repeat(2)[1:]
xstep = np.repeat((x[1:] - x[:-1]), 2)
xstep = np.concatenate(([xstep[0]], xstep, [xstep[-1]]))
# Now: add one step at end of row.
xx = np.append(xx, xx.max() + xstep[-1]) - xstep/2.0
yy = np.array(y[0]).repeat(2)
ax.fill_between(xx, 0, yy, facecolor=sb2_color, alpha=fill_alpha)
plot_twosamples(axes[0], bins,
astrometric_unit_weight_error[subset_cnt_indices],
astrometric_unit_weight_error[subset_sb2_indices])
axes[0].set_xlabel(r"\textrm{astrometric unit weight error}")#$u = (\chi_{al}^2/\nu)^{1/2}$")
axes[0].set_ylabel(r"\textrm{count / $10^{4}$}")
axes[0].legend(frameon=False)
def do_yticks(ax, step=10000):
ticks = np.arange(0, 10*step, step)
ax.set_yticks(ticks)
ax.set_ylim(0, ticks[-1])
ax.set_yticklabels([r"${:.0f}$".format(ea) for ea in (ticks/10000).astype(int)])
return ticks
do_yticks(axes[0])
axes[0].xaxis.set_major_locator(MaxNLocator(6))
"""
axes[1].hist(data["phot_bp_rp_excess_factor"][subset_cnt_indices],
facecolor=control_color, edgecolor=control_color, label=control_label, **hist_kwds)
axes[1].hist(data["phot_bp_rp_excess_factor"][subset_sb2_indices],
facecolor=sb2_color, edgecolor=sb2_color, label=sb2_label, **hist_kwds)
"""
plot_twosamples(axes[1], np.linspace(1.2, 1.5, B),
data["phot_bp_rp_excess_factor"][subset_cnt_indices],
data["phot_bp_rp_excess_factor"][subset_sb2_indices])
axes[1].set_xlabel(r"\textrm{phot bp - rp excess factor}")
axes[1].set_ylabel(r"\textrm{count / $10^4$}")
#axes[1].set_ylim(0, 10000
axes[1].xaxis.set_major_locator(MaxNLocator(4))
axes[1].legend(frameon=False)
do_yticks(axes[1])
phot_g_variability = np.log10(np.sqrt(data["astrometric_n_good_obs_al"]) \
* data["phot_g_mean_flux_error"] / data["phot_g_mean_flux"])
plot_twosamples(axes[2], np.linspace(-3, -1, B),
phot_g_variability[subset_cnt_indices],
phot_g_variability[subset_sb2_indices])
axes[2].set_xlabel(r"$\log_{10}\left(\textrm{photometric variability}\right)$")
axes[2].set_ylabel(r"\textrm{count / $10^4$}")
axes[2].xaxis.set_major_locator(MaxNLocator(6))
do_yticks(axes[2])
axes[2].legend(frameon=False)
fig.tight_layout()
offset = 0.05
for axlabel, ax in zip("abc", axes):
ax.text(offset, 1 - offset, r"\textrm{{({0})}}".format(axlabel), transform=ax.transAxes,
horizontalalignment="left", verticalalignment="top")
fig.tight_layout()
fig.savefig("figures/sb2_histograms.png", dpi=150)
fig.savefig("figures/sb2_histograms.pdf", dpi=300)
# Plot the completeness fraction across the H-R diagram.
def plot_density_fraction(x, y, N_bins=150,
min_points_per_bin=5, x_min=None, x_max=None,
y_min=None, y_max=None, ax=None, xlabel=None,
ylabel=None, figsize=(8, 8), colorbar=True,
log=False,
mask=None, **kwargs):
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
else:
fig = ax.figure
finite = np.isfinite(x * y)
if x_min is not None:
finite *= (x >= x_min)
if x_max is not None:
finite *= (x_max >= x)
if y_min is not None:
finite *= (y >= y_min)
if y_max is not None:
finite *= (y_max >= y)
den = finite
if mask is not None:
den *= mask
num = (~finite_rv) * den
H_all, xedges, yedges = np.histogram2d(x[den], y[den],
bins=N_bins)
H_bin, _, __ = np.histogram2d(x[num], y[num],
bins=(xedges, yedges))
H = H_bin/H_all.astype(float)
H[H_all < min_points_per_bin] = np.nan
if log:
H = np.log(1 + H)
print(np.nanmin(H), np.nanmax(H))
kwds = dict(
aspect=np.ptp(xedges)/np.ptp(yedges),
extent=(xedges[0], xedges[-1], yedges[-1], yedges[0]),
cmap="inferno",
)
kwds.update(kwargs)
image = ax.imshow(H.T, **kwds)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if colorbar:
cax = fig.add_axes([0.90, 0.125, 0.05, 0.75])
cbar = plt.colorbar(image, cax=cax, orientation="vertical")
cbar.set_label(r"\textrm{fraction of stars without radial velocity}")
return fig
fig, axes = plt.subplots(2, 1, figsize=(12, 7.5))
B = 100
kwds = dict(x=data["l"], y=data["b"], N_bins=(2 * B, B), aspect=0.5,
vmin=0, vmax=0.5, colorbar=False, cmap="Greys",
xlabel=r"$l$\textrm{ / deg}", ylabel=r"$b$\textrm{ / deg}")
plot_density_fraction(ax=axes[0], mask=None, **kwds)
plot_density_fraction(ax=axes[1], mask=in_sb2_source_parameter_range, **kwds)
for ax in axes:
ax.set_xticks(np.arange(0, 361, 45).astype(int))
fig.tight_layout()
fig.savefig("figures/sb2_sky_structure.png", dpi=150)
fig.savefig("figures/sb2_sky_structure.pdf", dpi=300)
| {"/scripts/npm_run_elastic_ball_test.py": ["/npm_utils.py"], "/npm.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation.py": ["/velociraptor.py"], "/run_analysis.py": ["/npm_utils.py"], "/run_probabilities2.py": ["/npm_utils.py"], "/scripts/npm_run_soubiran_2013.py": ["/npm_utils.py"], "/run_probabilities3.py": ["/npm_utils.py"], "/attic/plot_sb2.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation_hrd.py": ["/velociraptor.py"], "/attic/validation_sb9.py": ["/velociraptor.py"]} |
50,297 | andycasey/velociraptor | refs/heads/master | /article/figures/mpl_utils.py |
# A matplotlib style based on the gala package by @adrn:
# github.com/adrn/gala
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import binned_statistic_2d
mpl_style = {
# Lines
'lines.linewidth': 1.7,
'lines.antialiased': True,
'lines.marker': '.',
'lines.markersize': 5.,
# Patches
'patch.linewidth': 1.0,
'patch.facecolor': '#348ABD',
'patch.edgecolor': '#CCCCCC',
'patch.antialiased': True,
# images
'image.origin': 'upper',
# colormap
'image.cmap': 'viridis',
# Font
'font.size': 12.0,
'text.usetex': True,
'text.latex.preamble': r'\usepackage{amsmath}',
'text.latex.preview': True,
'axes.unicode_minus': False,
# Axes
'axes.facecolor': '#FFFFFF',
'axes.edgecolor': '#333333',
'axes.linewidth': 1.0,
'axes.grid': False,
'axes.titlesize': 'x-large',
'axes.labelsize': 'large',
'axes.labelcolor': 'k',
'axes.axisbelow': True,
# Ticks
'xtick.major.size': 8,
'xtick.minor.size': 4,
'xtick.major.pad': 6,
'xtick.minor.pad': 6,
'xtick.color': '#333333',
'xtick.direction': 'in',
'ytick.major.size': 8,
'ytick.minor.size': 4,
'ytick.major.pad': 6,
'ytick.minor.pad': 6,
'ytick.color': '#333333',
'ytick.direction': 'in',
'xtick.labelsize': 'medium',
'ytick.labelsize': 'medium',
# Legend
'legend.fancybox': True,
'legend.loc': 'best',
# Figure
'figure.figsize': [6, 6],
'figure.facecolor': '1.0',
'figure.edgecolor': '0.50',
'figure.subplot.hspace': 0.5,
# Other
'savefig.dpi': 300,
}
def plot_histogram_steps(ax, x_bins, y, y_err, **kwargs):
xx = np.array(x_bins).repeat(2)[1:]
xstep = np.repeat((x_bins[1:] - x_bins[:-1]), 2)
xstep = np.concatenate(([xstep[0]], xstep, [xstep[-1]]))
# Now: add one step at end of row.
xx = np.append(xx, xx.max() + xstep[-1]) - xstep/2.0
yy = np.array(y).repeat(2)
#xbc = x_bins[:-1] + 0.5 * np.diff(x_bins)
_ = ax.plot(xx, yy, '-', **kwargs)
ax.errorbar(x_bins, y, y_err, fmt="none", capsize=0, ecolor=_[0].get_color())
return (xx, yy)
def plot_binned_statistic(x, y, z, bins=100, function=np.nanmedian,
xlabel=None, ylabel=None, zlabel=None,
ax=None, colorbar=False, figsize=(8, 8),
vmin=None, vmax=None, min_entries_per_bin=None,
subsample=None, mask=None, **kwargs):
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
else:
fig = ax.figure
finite = np.isfinite(x * y * z)
if mask is not None:
finite *= mask
if subsample is not None:
idx = np.where(finite)[0]
if subsample < 1:
subsample *= idx.size
if int(subsample) > idx.size:
finite = idx
else:
finite = np.random.choice(idx, int(subsample), replace=False)
H, xedges, yedges, binnumber = binned_statistic_2d(
x[finite], y[finite], z[finite],
statistic=function, bins=bins)
if min_entries_per_bin is not None:
if function != "count":
H_count, _, __, ___ = binned_statistic_2d(
x[finite], y[finite], z[finite],
statistic="count", bins=bins)
else:
H_count = H
H[H_count < min_entries_per_bin] = np.nan
if vmin is None or vmax is None:
vmin_default, med, vmax_default = np.nanpercentile(H, [16, 50, 84])
if vmin is None:
vmin = vmin_default
if vmax is None:
vmax = vmax_default
imshow_kwds = dict(
vmin=vmin, vmax=vmax,
aspect=np.ptp(xedges)/np.ptp(yedges),
extent=(xedges[0], xedges[-1], yedges[-1], yedges[0]),
cmap="inferno")
imshow_kwds.update(kwargs)
image = ax.imshow(H.T, **imshow_kwds)
if colorbar:
cbar = plt.colorbar(image, ax=ax)
if zlabel is not None:
cbar.set_label(zlabel)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
#fig.tight_layout()
return fig | {"/scripts/npm_run_elastic_ball_test.py": ["/npm_utils.py"], "/npm.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation.py": ["/velociraptor.py"], "/run_analysis.py": ["/npm_utils.py"], "/run_probabilities2.py": ["/npm_utils.py"], "/scripts/npm_run_soubiran_2013.py": ["/npm_utils.py"], "/run_probabilities3.py": ["/npm_utils.py"], "/attic/plot_sb2.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation_hrd.py": ["/velociraptor.py"], "/attic/validation_sb9.py": ["/velociraptor.py"]} |
50,298 | andycasey/velociraptor | refs/heads/master | /attic/validation_hrd.py |
"""
Validation of the model predictions by making H-R diagram plots.
"""
import numpy as np
import matplotlib.pyplot as plt
import velociraptor
from matplotlib.colors import LogNorm
from matplotlib.ticker import MaxNLocator
from astropy.table import Table
sources = Table.read("data/rv-all.fits")
is_binary = sources["p_sbx"] > 0.5
finite = np.isfinite(sources["bp_rp"] * sources["absolute_g_mag"]) \
* ((sources["parallax"]/sources["parallax_error"]) > 5) \
* np.isfinite(sources["radial_velocity"])
fig, axes = plt.subplots(1, 2, figsize=(8, 4))
N_bins = 100
H_all, xedges, yedges = np.histogram2d(
sources["bp_rp"][finite],
sources["absolute_g_mag"][finite],
bins=(N_bins, N_bins))
H_bin, _, __ = np.histogram2d(
sources["bp_rp"][is_binary * finite],
sources["absolute_g_mag"][is_binary * finite],
bins=(xedges, yedges))
H = H_bin/H_all
H[H_all < 3] = np.nan
kwds = dict(
aspect=np.ptp(xedges)/np.ptp(yedges),
extent=(xedges[0], xedges[-1], yedges[-1], yedges[0]),
)
image = axes[0].imshow(H_all.T, norm=LogNorm(), cmap="Greys", **kwds)
image = axes[1].imshow(H.T, cmap="viridis", **kwds)
cax = fig.add_axes([0.825, 0.85, 0.125, 0.05])
cbar = plt.colorbar(image, cax=cax, orientation="horizontal")
cbar.set_ticks([0, 1])
cbar.set_label(r"\textrm{binary fraction}")
fig.tight_layout()
for ax in axes:
ax.set_xlabel(r"\textrm{bp-rp}")
ax.set_ylabel(r"\textrm{absolute G magnitude}")
ax.xaxis.set_major_locator(MaxNLocator(6))
ax.yaxis.set_major_locator(MaxNLocator(6))
axes[0].text(0.95, 0.95, r"$N = {0}$".format(sum(finite)),
transform=axes[0].transAxes,
horizontalalignment="right", verticalalignment="top")
fig.tight_layout()
fig.savefig("figures/validation-hrd-sbx.pdf", dpi=150) | {"/scripts/npm_run_elastic_ball_test.py": ["/npm_utils.py"], "/npm.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation.py": ["/velociraptor.py"], "/run_analysis.py": ["/npm_utils.py"], "/run_probabilities2.py": ["/npm_utils.py"], "/scripts/npm_run_soubiran_2013.py": ["/npm_utils.py"], "/run_probabilities3.py": ["/npm_utils.py"], "/attic/plot_sb2.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation_hrd.py": ["/velociraptor.py"], "/attic/validation_sb9.py": ["/velociraptor.py"]} |
50,299 | andycasey/velociraptor | refs/heads/master | /attic/validation_sb9.py |
"""
Validation of the model predictions compared to other binary catalogues.
"""
import numpy as np
import matplotlib.pyplot as plt
import velociraptor
from astropy.table import Table
sources = Table.read("data/rv-all.fits")
unimodal_catalog = Table.read("data/sb9_matched_by_position.fits")
unimodal_indices = np.nan * np.ones(len(unimodal_catalog))
for i, source_id in enumerate(unimodal_catalog["source_id"]):
try:
unimodal_indices[i] = np.where(sources["source_id"] == int(source_id))[0][0]
except:
continue
finite = np.isfinite(unimodal_indices)
unimodal_catalog = unimodal_catalog[finite]
unimodal_sources = sources[unimodal_indices[finite].astype(int)]
scalar = 1.0 / (unimodal_catalog["Per"] * (1 - unimodal_catalog["e"]**2)**0.5)
x = unimodal_catalog["K1"] * scalar
y = unimodal_sources["excess_rv_sigma"] * scalar
c = unimodal_sources["p_sbx"]
fig, ax = plt.subplots(1, 1, figsize=(7.5, 6))
scat = ax.scatter(x, y, c=c, vmin=0, vmax=1, s=5, cmap="coolwarm_r")
ax.loglog()
ax.set_xlim(10**-4.5, 10**3)
ax.set_ylim(10**-4.5, 10**3)
cbar = plt.colorbar(scat)
cbar.set_label(r"\textrm{binary probability}")
ax.set_xlabel(r"${K}/{P\sqrt{1-e^2}}$")
ax.set_ylabel(r"${\sigma_\textrm{vrad excess}}/{P\sqrt{1-e^2}}$")
fig.tight_layout()
fig.savefig("figures/sb9-comparison.pdf", dpi=150)
x = unimodal_catalog["K1"]
y = unimodal_sources["excess_rv_sigma"]
c = unimodal_sources["p_sbx"]
fig, ax = plt.subplots(1, 1, figsize=(7.5, 6))
scat = ax.scatter(x, y, c=c, vmin=0, vmax=1, s=5, cmap="coolwarm_r")
ax.loglog()
ax.set_xlim(10**-0.5, 10**2.5)
ax.set_ylim(10**-0.5, 10**2.5)
cbar = plt.colorbar(scat)
cbar.set_label(r"\textrm{binary probability}")
ax.set_xlabel(r"$K$ $(\textrm{km\,s}^{-1})$")
ax.set_ylabel(r"$\sigma_\textrm{vrad excess}$ $(\textrm{km\,s}^{-1})$")
fig.tight_layout()
fig.savefig("figures/sb9-compare-K-log.pdf", dpi=150)
fig, ax = plt.subplots(1, 1, figsize=(7.5, 6))
scat = ax.scatter(x, y, c=c, vmin=0, vmax=1, s=5, cmap="coolwarm_r")
ax.set_xlim(0, 100)
ax.set_ylim(0, 100)
cbar = plt.colorbar(scat)
cbar.set_label(r"\textrm{binary probability}")
ax.set_xlabel(r"$K$ $(\textrm{km\,s}^{-1})$")
ax.set_ylabel(r"$\sigma_\textrm{vrad excess}$ $(\textrm{km\,s}^{-1})$")
fig.tight_layout()
fig.savefig("figures/sb9-compare-K.pdf", dpi=150)
is_binary = unimodal_sources["p_sbx"] > 0.5
bins = np.linspace(0, 3000, 50)
fig, ax = plt.subplots()
ax.hist(unimodal_catalog["Per"][is_binary], facecolor="b", bins=bins, alpha=0.5)
ax.hist(unimodal_catalog["Per"][~is_binary], facecolor="r", bins=bins, alpha=0.5)
ax.axvline(682, linestyle=":", c="#666666", lw=1)
ax.axvline(682 * 2, linestyle=":", c="#666666", lw=1)
ax.set_xlabel(r"\textrm{period} $(\textrm{days})$")
ax.set_ylabel(r"\textrm{count}")
fig.tight_layout()
fig.savefig("figures/sb9-period-distribution.pdf", dpi=150)
| {"/scripts/npm_run_elastic_ball_test.py": ["/npm_utils.py"], "/npm.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation.py": ["/velociraptor.py"], "/run_analysis.py": ["/npm_utils.py"], "/run_probabilities2.py": ["/npm_utils.py"], "/scripts/npm_run_soubiran_2013.py": ["/npm_utils.py"], "/run_probabilities3.py": ["/npm_utils.py"], "/attic/plot_sb2.py": ["/velociraptor.py", "/npm_utils.py"], "/attic/validation_hrd.py": ["/velociraptor.py"], "/attic/validation_sb9.py": ["/velociraptor.py"]} |
50,302 | thelahunginjeet/rpy2ica | refs/heads/master | /tests/test_rpy2ica_pytest.py | from rpy2ica import fastica as rica
import numpy as np
class TestICA:
def setup(self):
self.signals = np.vstack([np.sin([x/20.0 for x in xrange(1,1001)]),(1.0 + np.mod(xrange(1000),200) - 100.0)/100.0])
self.mixing = np.array([[0.291, 0.6557], [-0.5439, 0.5572]])
self.X = np.dot(self.mixing,self.signals)
self.AR,self.WR,self.SR = rica(self.X,2,method="R",maxIterations=10000)
self.AC,self.WC,self.SC = rica(self.X,2,method="C",maxIterations=10000)
def test_R_W_orthogonality(self):
assert np.allclose(np.dot(self.WR.T,self.WR),np.eye(2),atol=1.0e-06),"native R: W^TW not within 1.0e-06 of I"
def test_R_S_recovery(self):
from scipy.linalg import det
assert np.allclose(1.0,np.abs(det(np.corrcoef(self.SR,self.signals)[0:2,2:])),atol=1.0e-03),"native R: |det(rho(ShatT,S))| not within 1e-03 of unity"
def test_C_W_orthogonality(self):
assert np.allclose(np.dot(self.WC.T,self.WC),np.eye(2),atol=1.0e-06),"R calling C: W^TW not within 1.0e-06 of I"
def test_C_S_recovery(self):
from scipy.linalg import det
assert np.allclose(1.0,np.abs(det(np.corrcoef(self.SC,self.signals)[0:2,2:])),atol=1.0e-03),"R calling C: |det(rho(ShatT,S))| not within 1e-03 of unity"
| {"/__init__.py": ["/rpy2ica.py"]} |
50,303 | thelahunginjeet/rpy2ica | refs/heads/master | /__init__.py | from rpy2ica import fastica
__all__ = ['fastica']
| {"/__init__.py": ["/rpy2ica.py"]} |
50,304 | thelahunginjeet/rpy2ica | refs/heads/master | /rpy2ica.py | '''
Wraps R's version of FastICA via Rpy2. See the README for information about possible
differences between the python and R versions of FastICA.
Created on June 14, 2013
@author: Kevin S. Brown, University of Connecticut
This source code is provided under the BSD-3 license, duplicated as follows:
Copyright (c) 2013, Kevin S. Brown
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or other
materials provided with the distribution.
3. Neither the name of the University of Connecticut nor the names of its contributors
may be used to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
from numpy import array
from numpy.random import randn
try:
import rpy2.robjects.numpy2ri
from rpy2.robjects.packages import importr
rpy2.robjects.numpy2ri.activate()
except ImportError:
print "Connection to R via Rpy not available; fastica_rpy will not work."
def fastica(X,nSources,algorithm="parallel",nonlinearity="logcosh",method="C",maxIterations=500,tolerance=1.0e-05,Winit=None):
"""
fastica_rpy wraps the fastICA package in R, with communication to R via rpy2. If rpy2 is not installed (or R is
not installed), this will not work. For full documentation see the R package. To use the defaults, simpy call:
A, W, S = fastica_rpy(X,nSources)
Parameters:
------------
X : numpy array, required
data matrix
nSources : integer, required
desired number of sources to extract
algorithm : string, optional
"deflation" : components extracted one at a time
"parallel" : components extracted all at once
nonlinearity: string, optional
should be either "logcosh" or "exp"
method : string, optional
should be either "R" (to do the calculations in R) or
"C" (to use the C library R is wrapping)
maxIterations : integer, optional
number of fixed-point iterations
tolerance : float, optional
convergence criterion for the unmixing matrix
Winit : numpy array, optional
initial guess for the unmixing matrix
Output:
----------
A : numpy array
mixing matix of size X.shape[0] x nSources
W : numpy array
unmixing matrix of size nSources x X.shape[0]
S : numpy array
matrix of independent courses, size nSources x X.shape[1]
"""
if Winit is None:
Winit = randn(nSources,nSources)
try:
rica = importr('fastICA')
except NameError:
print 'Cannot import package \'fastICA\'; you are missing either rpy2 (python problem) or fastICA (R problem)'
outDict = rica.fastICA(X.T,nSources,fun=nonlinearity,method=method,maxit=maxIterations,tol=tolerance,w_init=Winit.T)
return array(outDict.rx2('A')).T, array(outDict.rx2('W')).T, array(outDict.rx2('S')).T
| {"/__init__.py": ["/rpy2ica.py"]} |
50,305 | iteohy/covid_model | refs/heads/master | /server.py | from mesa.visualization.ModularVisualization import ModularServer
from mesa.visualization.modules import CanvasGrid, ChartModule, TextElement
from mesa.visualization.UserParam import UserSettableParameter
from model import Covid
class HappyElement(TextElement):
"""
Display a text count of how many happy agents there are.
"""
def __init__(self):
pass
def render(self, model):
return "Number agents: " + str(len(model.schedule.agents)) + "; Day: " + str(int(model.schedule.steps/model.day_steps))
def covid_draw(agent):
"""
Portrayal Method for canvas
"""
if agent is None:
return
#portrayal = {"Shape": dstate.shape, "r": dstate.radius, "Filled": dstate.filled, "Layer": dstate.layer}
portrayal = {"Shape": "circle", "r": 0.5, "Filled": "true", "Layer": 0}
dstate = agent.d_state
portrayal["Color"] = [dstate.color]
portrayal["stroke_color"] = dstate.stroke_color
#portrayal["Color"] = ["#FF0000"]
#portrayal["stroke_color"] = "#000000"
return portrayal
happy_element = HappyElement()
canvas_element = CanvasGrid(covid_draw, 20, 20, 500, 500)
seir_chart = ChartModule([
{"Label": "infected", "Color": "Red"},
{"Label": "exposed", "Color": "Blue"},
{"Label": "removed", "Color": "Grey"},
{"Label": "susceptible", "Color": "Green"},
{"Label": "isolated", "Color": "Black"}])
contact_chart = ChartModule([{"Label": "contact", "Color": "Black"}])
model_params = {
"width": 20,
"density": UserSettableParameter("number", "Number of agents", value=100),
"initial_infected": UserSettableParameter("number", "Number Infected", value=10),
"infection_rate": UserSettableParameter("slider", "Infectiousness", 0.7, 0.1, 1.0, 0.05),
"detection_rate": UserSettableParameter("slider", "Detection rate", 0.7, 0.1, 1.0, 0.05),
"min_infected": UserSettableParameter('number', 'Min Infected duration (days)', value=17),
"max_infected": UserSettableParameter('number', 'Max Infected duration (days)', value=24),
"mean_infected": UserSettableParameter('number', 'Mean Infected duration (days)', value=20),
"min_exposed": UserSettableParameter('number', 'Min Exposed duration (days)', value=3.68),
"max_exposed": UserSettableParameter('number', 'Max Exposed duration (days)', value=6.68),
"mean_exposed": UserSettableParameter('number', 'Mean Exposed duration (days)', value=5.2),
"day_steps": UserSettableParameter('number', 'Number of steps in a day', value=5),
"day_isolation": UserSettableParameter('number', 'Number of days to isolation', value=6)
}
server = ModularServer(
Covid, [canvas_element, happy_element, seir_chart, contact_chart], "COVID-19", model_params
) | {"/server.py": ["/model.py"]} |
50,306 | iteohy/covid_model | refs/heads/master | /runbatch.py | from model import Covid
from mesa.batchrunner import BatchRunner
import time
def getInfected(model):
return model.cuminfected
def getDays(model):
return model.schedule.steps/model.day_steps
def getStats(model):
return model.stats
# comm params
comm_model_params = {
#"width": 600,
"density": 50000,
"infection_rate": 0.7,
"min_infected": 17,
"max_infected": 24,
"mean_infected": 20,
"min_exposed": 3.78,
"max_exposed": 6.78,
"mean_exposed": 5.2,
"day_steps": 3,
"initial_infected": 2,
"day_isolation": 5,
"detection_rate": 0.7
}
comm_variable_params = {
#"infection_rate": [0.3, 0.5, 0.7, 0.9],
#"detection_rate": [0.3, 0.5, 0.7, 0.9],
#"day_isolation": [3, 5, 7, 9, 11, 13, 15]
"width": [300]
}
# dorm params
dorm_model_params = {
#"width": 200,
#"density": 5000,
"initial_infected": 5,
"infection_rate": 0.3,
"min_infected": 17,
"max_infected": 24,
"mean_infected": 20,
"min_exposed": 3.78,
"max_exposed": 6.78,
"mean_exposed": 5.2,
"day_steps": 5,
"day_isolation": 5,
"detection_rate": 0.7
}
dorm_variable_params = {
#"initial_infected": [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
#"infection_rate": [0.3, 0.5, 0.7, 0.9]
#"day_isolation": [3, 5, 7, 9, 11, 13, 15]
# grid size
#"width": [100, 200, 300, 400, 500]
# number of agents
#"density": [1000, 2000, 3000, 4000, 5000]
# density
"width": [100]
"density": [1000]
}
# assign active params
model_params = dorm_model_params
variable_params = dorm_variable_params
batch_run = BatchRunner(Covid, variable_params, model_params, iterations=5, max_steps=200*dorm_model_params['day_steps'],
model_reporters={"infected": getInfected, "days":getDays, "stats":getStats})
batch_run.run_all()
data = batch_run.get_model_vars_dataframe()
data['total_infected'] = data['infected']+data['initial_infected']
data['%'] = 100*data['total_infected']/model_params['density']
data.to_csv("agents_%d.csv"%int(time.time()), index=False)
#
#sdata = data[["initial_infected", "width", "days", "total_infected", "%"]]
#sdata = sdata.groupby(['initial_infected', 'width']).mean()
#sdata.to_csv("width_grouped_%d.csv"%int(time.time()), index=True)
| {"/server.py": ["/model.py"]} |
50,307 | iteohy/covid_model | refs/heads/master | /model.py | from mesa import Model, Agent
from mesa.time import RandomActivation
from mesa.space import MultiGrid
from mesa.datacollection import DataCollector
class DiseaseState():
def __init__(self, state, color, stroke_color):
"""
Create a disease state
Args:
state: Indicator for SEIR
color: color of agent for Portrayal
stroke_color: stroke color of agent for Portrayal
"""
self.state = state
self.setColors(color, stroke_color)
self.setShape("circle", 0.5, "true", 0)
self.lifespan = -1
self.assigned_lifespan = -1
def setColors(self, color, stroke_color):
#portrayal["Color"] = ["#FF0000"]
#portrayal["stroke_color"] = "#000000"
self.color = color
self.stroke_color = stroke_color
def setShape(self, shape, radius, filled, layer):
#portrayal = {"Shape": "circle", "r": 0.5, "Filled": "true", "Layer": 0}
self.shape = shape
self.radius = radius
self.layer = layer
self.filled = filled
def decrementLifespan(self):
if self.lifespan > -1:
self.lifespan -= 1
def setLifespan(self, lifespan):
self.lifespan = int(lifespan)
self.assigned_lifespan = int(lifespan)
#print(str(self)+': '+str(self.lifespan))
class Susceptible(DiseaseState):
def __init__(self):
# init disease state Susceptible, color Green, stroke Black
super().__init__("S", "#00FF00", "#000000")
class Exposed(DiseaseState):
def __init__(self):
# init disease state Exposed, color Blue, stroke Black
super().__init__("E", "#0000FF", "#000000")
class Infected(DiseaseState):
def __init__(self):
# init disease state Infected, color Red, stroke Black
super().__init__("I", "#FF0000", "#000000")
self.detected = True
def set_detected(self, detected):
self.detected = detected
def is_detected(self):
return self.detected
class Removed(DiseaseState):
def __init__(self):
# init disease state Removed, color Grey, stroke Black
super().__init__("R", "#CCCCCC", "#000000")
class CovidAgent(Agent):
"""
Agent simulating covid-19 spread
"""
def __init__(self, pos, model, d_state, move=True, isolate_duration=14):
"""
Create a new covid agent.
Args:
unique_id: Unique identifier for the agent.
x, y: Agent initial location.
d_state: disease state of the agent (S,E,I,R)
"""
super().__init__(pos, model)
self.pos = pos
self.contact = set()
self.d_state = d_state
self.move = move
self.isolate_duration = isolate_duration*model.day_steps
def step(self):
# increment lifespan
#self.d_state.incrementLife()
self.d_state.decrementLifespan()
if False:
#if isinstance(self.d_state, Removed):
print("assigned_ls, lifespan, day_isolation, isolate_duration, move")
print(type(self.d_state))
print(self.d_state.assigned_lifespan)
print(self.d_state.lifespan)
print(self.model.day_isolation)
print(self.isolate_duration)
print(self.move)
if isinstance(self.d_state, Exposed):
if self.d_state.lifespan <= 0 :
self.d_state = Infected()
ls = self.random.triangular(self.model.min_infected, self.model.max_infected, self.model.mean_infected)
self.d_state.setLifespan(ls)
self.d_state.set_detected(self.random.random() < self.model.detection_rate)
elif isinstance(self.d_state, Infected):
if self.d_state.lifespan <= 0:
self.d_state = Removed()
if (self.d_state.assigned_lifespan - self.d_state.lifespan) >= self.model.day_isolation and self.d_state.is_detected():
self.move = False
self.isolate_duration -= 1 # decrease isolation duration
# if agent not moving, stop here.
if not self.move:
# if isolation complete, move again
if self.isolate_duration<=0:
self.move = True
else:
return
# track agents in contact
contents = self.model.grid.get_cell_list_contents(self.pos)
if len(contents)>1:
for c in contents:
if c.unique_id == self.unique_id:
continue
if c.move:
self.contact.add(c.unique_id)
# what to do to agent(s) in cell
if isinstance(self.d_state, Infected): # infected
if isinstance(c.d_state, Susceptible) and c.move:
if self.random.uniform(0,1)<self.model.infection_rate: # probability of infection
c.d_state = Exposed()
ls = self.random.triangular(self.model.min_exposed, self.model.max_exposed, self.model.mean_exposed)
c.d_state.setLifespan(ls)
self.model.cuminfected += 1
# move to random adjacent cell
x, y = self.pos
r = self.random.random()
if r < 0.33:
x = x+1
elif r < 0.66:
x = x-1
r = self.random.random()
if r < 0.33:
y = y+1
elif r<0.66:
y = y-1
self.model.grid.move_agent(self, (x,y))
class Covid(Model):
"""
Model class for the Covid infection model.
"""
def __init__(self, density, initial_infected, infection_rate, min_infected, max_infected, mean_infected, min_exposed,
max_exposed, mean_exposed, day_steps, day_isolation, detection_rate, width=20):
"""
"""
# square grid
height=width
self.height = height
self.width = width
self.density = density
self.initial_infected = initial_infected
self.infection_rate = infection_rate
self.detection_rate = detection_rate
self.day_steps = day_steps
self.min_exposed = min_exposed*self.day_steps
self.max_exposed = max_exposed*self.day_steps
self.mean_exposed = mean_exposed*self.day_steps
self.min_infected = min_infected*self.day_steps
self.max_infected = max_infected*self.day_steps
self.mean_infected = mean_infected*self.day_steps
self.day_isolation = day_isolation*self.day_steps
self.schedule = RandomActivation(self)
self.grid = MultiGrid(width, height, torus=True)
self.infected = 0
self.exposed = 0
self.susceptible = 0
self.removed = 0
self.contact = 0
self.cuminfected = 0
self.isolated = 0
self.stats = {"infected":[], "exposed":[], "susceptible":[], "removed":[], "isolated":[]}
self.datacollector = DataCollector(
# Model-level count
{"contact": "contact", "infected": "infected", "cuminfected":"cuminfected",
"exposed":"exposed", "susceptible": "susceptible", "removed": "removed", "isolated":"isolated",
"stats":"stats"},
# For testing purposes, agent's individual x and y
{"x": lambda a: a.pos[0], "y": lambda a: a.pos[1]}
)
# Set up agents
# We use a grid iterator that returns
# the coordinates of a cell as well as
# its contents. (coord_iter)
num_agents = 0
num_infected = 0
while num_agents < self.density:
#for cell in self.grid.coord_iter():
#x = cell[1]
#y = cell[2]
# obtaining non-empty cell
x = int(self.random.uniform(0, width))
y = int(self.random.uniform(0, height))
while not self.grid.is_cell_empty((x,y)):
x = int(self.random.uniform(0, width))
y = int(self.random.uniform(0, height))
if num_agents < self.density and self.random.random()<0.2:
if num_infected < self.initial_infected:
agent_type = Infected()
num_infected += 1
# generate typical infected lifespan from normal distribution
ls = self.random.uniform(self.min_infected, self.max_infected)
agent_type.setLifespan(ls)
agent_type.set_detected(self.random.uniform(0,1)<self.detection_rate)
else:
agent_type = Susceptible()
agent = CovidAgent((x, y), self, agent_type)
self.grid.place_agent(agent, (x, y))
self.schedule.add(agent)
num_agents += 1
#print(str(num_agents)+" agents finally.")
self.running = True
#self.datacollector.collect(self)
def step(self):
"""
Run one step of the model. If All agents are happy, halt the model.
"""
# Reset counters
self.infected = 0
self.exposed = 0
self.susceptible = 0
self.removed = 0
self.isolated = 0
self.schedule.step()
# compute average contact per agent
total = 0
for cell in self.grid.coord_iter():
content, x, y = cell
if content:
for c in content:
total+=len(c.contact)
if isinstance(c.d_state, Infected):
self.infected += 1
elif isinstance(c.d_state, Exposed):
self.exposed += 1
elif isinstance(c.d_state, Susceptible):
self.susceptible += 1
elif isinstance(c.d_state, Removed):
self.removed += 1
if not c.move:
#print("isolated")
self.isolated +=1
self.contact = total/self.schedule.get_agent_count()
self.stats["infected"].append(self.infected)
self.stats["exposed"].append(self.exposed)
self.stats["susceptible"].append(self.susceptible)
self.stats["removed"].append(self.removed)
self.stats["isolated"].append(self.isolated)
# collect data
self.datacollector.collect(self)
if self.infected+self.exposed == 0:
self.running = False
if self.schedule.steps/self.day_steps > 180:
self.running = False
| {"/server.py": ["/model.py"]} |
50,308 | caocscar/opioid-web | refs/heads/master | /application.py | from flask import Flask, render_template, request
from opioid_dict import src_dict, center_dict, cities, counties, names, name_cases, name_case_ls
from create_D3_files import create_county_files
application = Flask(__name__)
application.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
@application.route('/')
def homepage():
data = {
'counties': counties,
}
placenames=[]
for each in cities:
placenames.append(each)
for each in counties:
placenames.append(each)
placenames.sort()
return render_template("index.html", data=data, cities=cities, counties=counties, placenames = placenames)
@application.route('/dashboard', methods=['GET'])
def generate_report_given_name():
arguments=['city', 'county', 'src', 'T0' , 'T1' ]
source = request.args.get('src', default = "EMS", type = str)
city = request.args.get('city')
county = request.args.get('county')
T0 = request.args.get('T0', default = 14, type = int)
T1 = request.args.get('T1', default = None, type = int)
if county:
cityorcounty = "County"
name = county
src = source
if city:
cityorcounty = "City"
name = city
src = source
create_county_files(name, source, cityorcounty, T0, T1)
source = source.upper()
if city:
city = city.title()
for each in name_cases:
if city in each.keys():
city = each[city]
if county:
county = county.title()
if city in cities:
if '(City)' in city:
propername = city[:-7]
else:
propername = city
folder = 'cities'
county_flag = ''
data = {
'placename': city,
'propername': propername,
'cityorcounty': "city",
'county':center_dict["City"][propername]["county"],
'src': source,
'county_flag': county_flag,
'titlename': src_dict[source],
'f_geojson': f'geojson/{folder}/{propername}.geojson',
'center': center_dict["City"][propername]['center'],
'zoom' : center_dict["City"][propername].get('zoom', 10)}
if county in counties:
folder = 'counties'
county_flag = 'County'
data = {
'placename': county,
'cityorcounty': "county",
'src': source,
'county_flag': county_flag,
'titlename': src_dict[source],
'f_geojson': f'geojson/{folder}/{county}.geojson',
'center': center_dict["County"][county]['center'],
'zoom' : center_dict["County"][county].get('zoom', 10)}
return render_template("county_src_report.html", data=data, T0=T0, T1=T1)
#%% Run Flask app
# python application.py
if __name__ == '__main__':
application.run(debug=True)
| {"/application.py": ["/opioid_dict.py", "/create_D3_files.py"], "/create_D3_files.py": ["/opioid_dict.py"]} |
50,309 | caocscar/opioid-web | refs/heads/master | /create_D3_files.py | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 7 14:36:25 2019
@author: umhs-caoa
"""
import pandas as pd
import numpy as np
import os
from opioid_dict import aggregation_dict, name_correction_dict
from datetime import datetime
pd.options.display.max_rows = 30
pd.options.display.max_columns = 10
wdir = os.path.join('static','data')
savedir = os.path.join('static','data')
racelist = ['White','Black','Hispanic or Latino','Asian','American Indian or Alaska Native',
'Native Hawaiian or Other Pacific Islander','Other','Unknown',
]
genderlist = ['Female','Male','Unknown']
headers = ['index','value']
files = ['web_EMS.csv','web_EMS_Washtenaw.csv','web_ME.csv','web_ME_Wayne.csv']
list_df = []
for file in files:
tmp = pd.read_csv(os.path.join(wdir,file))
tmp['date'] = pd.to_datetime(tmp['date'])
dataset = file.split('_')[1]
tmp['src'] = dataset.strip('.csv')
list_df.append(tmp)
df = pd.concat(list_df,ignore_index=True,sort=True)
df.replace({'city': name_correction_dict}, inplace=True)
#%%
def get_firstday(T0, latest_date):
if T0 >= 20000000:
T0 = str(T0)
return f'{T0[:4]}-{T0[4:6]}-{T0[6:]}'
else:
return (latest_date + pd.DateOffset(days=-T0+1)).strftime('%Y-%m-%d')
def get_lastday(T1, latest_date):
if T1:
T1 = str(T1)
return f'{T1[:4]}-{T1[4:6]}-{T1[6:]}'
else:
return latest_date.strftime('%Y-%m-%d')
def create_county_files(name, src, cityorcounty, T0, T1=None):
column = 'city' if cityorcounty.lower() == "city" else 'county'
cty = df[(df[column].str.contains(name)) & (df['src'] == src)]
# daily file
latest_date = df.loc[df['src'] == src,'date'].max()
T_start = get_firstday(T0, latest_date)
T_end = get_lastday(T1, latest_date)
if cty.empty:
earliest_date = pd.to_datetime(T_start)
cty_date = cty
daily = pd.Series()
else:
earliest_date = cty['date'].min().strftime('%Y-%m-%d')
ts = cty.set_index('date')
daily = ts[column].resample('D').count()
daterange = pd.date_range(earliest_date, latest_date, freq='D')
daily = daily.reindex(daterange, fill_value=0)
daily = daily.to_frame().reset_index()
daily.columns = ['date','value']
daily['avg'] = daily['value'].rolling(7, min_periods=1).mean().round(2)
create_daily_file(T_start, T_end, daily)
# date filtering
cty_date = cty[cty['date'].between(T_start,T_end)]
# rate and change table
dayswin = (pd.to_datetime(T_end) - pd.to_datetime(T_start)).days + 1
evtrte = len(cty_date)/dayswin
create_rte_table_file(cty,T_start,dayswin,evtrte)
# age, race, gender, gps file
create_age_file(cty_date)
create_race_file(cty_date)
create_gender_file(cty_date)
create_gps_file(cty_date, T0, T_end)
create_evt_table_file(cty_date,name,src)
create_ctyzip_freq_table(cty_date)
def create_daily_file(T_start, T_end, daily):
daily_subset = daily[daily['date'].between(T_start,T_end)]
daily_subset.to_csv(os.path.join(savedir,'county_src_daily.csv'), index=False)
def create_age_file(cty_date):
age = cty_date['age_grp'].value_counts(sort=False)
age.sort_index(inplace=True)
age = age.reset_index()
age.to_csv(os.path.join(savedir,'county_src_age.csv'), index=False, header=headers)
def create_race_file(cty_date):
race = cty_date['race'].value_counts()
race = race.reindex(racelist, fill_value=0)
race = race.reset_index()
race.to_csv(os.path.join(savedir,'county_src_race.csv'), index=False, header=headers)
def create_gender_file(cty_date):
gender = cty_date['gender'].value_counts()
gender = gender.reindex(genderlist, fill_value=0)
gender = gender.reset_index()
gender.to_csv(os.path.join(savedir,'county_src_gender.csv'), index=False, header=headers)
def create_evt_table_file(cty_date,name,src):
cty_date = cty_date.sort_values(by=['date', 'zipcode'])
if src == "EMS":
tmpTab = cty_date[['date','city','zipcode']]
tmpTab.columns = ['Date','City','Zip Code']
elif src == "ME" and name in ["Wayne","Detroit"]:
tmpTab = cty_date[['date','city','location','suspected_indicator']]
tmpTab.columns = ['Date','City','Location','Suspected Overdose Indicator']
elif src == "ME":
tmpTab = cty_date[['date','city','location']]
tmpTab.columns = ['Date','City','Location']
tmpTab = tmpTab.replace({'City':r'.*\d.*'},{'City':np.NaN}, regex=True)
tmpTab.to_csv(os.path.join(savedir,'county_src_evttab.csv'), index=False)
def create_rte_table_file(cty,T_start,days,evtrte):
pp_end = pd.to_datetime(T_start) + pd.DateOffset(days=-1)
pp_start = pp_end + pd.DateOffset(days=-days+1)
cty_pp = cty[cty['date'].between(pp_start,pp_end)]
pp_evtrte = len(cty_pp)/days
if pp_start < pd.to_datetime("20190101") or pp_evtrte == 0:
rtetab = pd.DataFrame({'Mean Incidents Per Day':[round(evtrte,1)],'Percent Change Since Last Period':[np.NaN]})
else:
rtetab = pd.DataFrame({'Mean Incidents Per Day':[round(evtrte,1)],'Percent Change Since Last Period':[round((evtrte-pp_evtrte)/pp_evtrte*100,1)]})
rtetab.to_csv(os.path.join(savedir,'county_src_ratechange.csv'), index=False)
def create_ctyzip_freq_table(cty):
cty['city'] = cty['city'].str.title()
cty.replace({'city': aggregation_dict}, inplace=True)
cty_counts = (cty.replace({'city':r'.*\d.*'},{'city':"Unknown"},regex=True))['city'].value_counts().to_frame(name="# Incidents")
cty_counts["City"] = cty_counts.index
cty_counts.loc[len(cty_counts)] = [len(cty),"Total"]
cty_counts["Percent"] = round(cty_counts["# Incidents"]/len(cty)*100,1)
cty_counts[["City","# Incidents","Percent"]].to_csv(os.path.join(savedir,'county_src_ctyfreqtab.csv'), index=False)
zip_counts = cty['zipcode'].value_counts().to_frame(name="# Incidents")
zip_counts["Zip Code"] = zip_counts.index
zip_counts.loc[len(zip_counts)] = [len(cty),"Total"]
zip_counts["Percent"] = round(zip_counts["# Incidents"]/len(cty)*100,1)
zip_counts[["Zip Code","# Incidents","Percent"]].to_csv(os.path.join(savedir,'county_src_zipfreqtab.csv'), index=False)
def create_gps_file(cty_date, T0, T_end):
if T0 <= 14:
cty_date['opacity'] = 1
else:
enddate = pd.to_datetime(T_end)
if T0 >= 20000000:
first_date = datetime.strptime(str(T0),'%Y%m%d')
numdays = (enddate - first_date).days
else:
numdays = T0
delta = enddate - cty_date['date']
cty_date['opacity'] = 1 - delta.dt.days / (numdays + 1)
with open(os.path.join(savedir,'county_src_gps.js'),'w') as fout:
fout.write('var event_pts = ')
cty_date[['lat','lng','opacity']].to_json(fout, orient='records')
#%%
if __name__ == '__main__':
create_county_files('Wayne','EMS', 'county', 20190101)
| {"/application.py": ["/opioid_dict.py", "/create_D3_files.py"], "/create_D3_files.py": ["/opioid_dict.py"]} |
50,310 | caocscar/opioid-web | refs/heads/master | /opioid_dict.py | src_dict = {'EMS':'EMS', 'ED':'Emergency Departments', 'ME':'Medical Examiner'}
race_dict = {'B':'Black',
'African American':'Black',
'Black or African American':'Black',
'African American / Black':'Black',
'W':'White',
'White, Not Recorded':'White',
'Patient Refused':'Unknown',
'1':'Unknown',
'2':'Unknown',
'6':'Unknown',
'7':'Unknown',
'8':'Unknown',
'A':'Unknown',
'D':'Unknown',
'U':'Unknown',
'Not Recorded':'Unknown',
'Not Applicable':'Unknown',
'H':'Hispanic or Latino',
'White, Hispanic or Latino':'Hispanic or Latino',
'White,Hispanic or Latino':'Hispanic or Latino',
'Hispanic or Latino, White':'Hispanic or Latino',
'Hispanic, White':'Hispanic or Latino',
'Asian, White':'Other',
'White, Asian':'Other',
'Asian, Black or African American':'Other',
'Black or African American, White':'Other',
'White, Black or African American':'Other',
'White,Black or African American':'Other',
'Black or African American, Hispanic or Latino':'Other',
'Hispanic or Latino, Black or African American':'Other',
'White, Native Hawaiian or Other Pacific Islander, Asian':'Other',
'Native Hawaiian or Other Pacific Islander, White':'Other',
'White, Native Hawaiian or Other Pacific Islander':'Other',
'White,Native Hawaiian or Other Pacific Islander,Hispanic or Latino,Black or African American,Asian,American Indian or Alaska Native':'Other',
'White, Native Hawaiian or Other Pacific Islander, Hispanic or Latino, Black or African American, Asian, American Indian or Alaska Native':'Other',
'Black or African American, American Indian or Alaska Native':'Other',
'White, Hispanic or Latino, Asian, American Indian or Alaska Native':'Other',
'Unspecified':'Other',
'American Indian / Alaskan Native':'American Indian or Alaska Native',
}
gender_dict = {'F':'Female',
'M':'Male',
'U':'Unknown',
'Unknown (Unable to Determine)':'Unknown',
'Not Applicable':'Unknown',
}
aggregation_dict = {'Kalamazoo':'Kalamazoo and Charter Township of Kalamazoo',
'Charter Township Of Kalamazoo':'Kalamazoo and Charter Township of Kalamazoo',
'Port Huron':'Port Huron and Charter Township of Port Huron',
'Charter Township Of Port Huron':'Port Huron and Charter Township of Port Huron',
}
name_correction_dict = {'Canton':'Charter Township of Canton',
}
center_dict = { "City" :
{'Ann Arbor': {
'center': '{lat: 42.28, lng: -83.73}',
'county': 'Washtenaw',
'zoom' : 12,
'minwidth': 550,
'minheight':400
},
'Ypsilanti':{
'center': '{lat: 42.24, lng: -83.61}',
'county' : 'Washtenaw',
'zoom' : 13,
'minwidth' : 500,
'minheight' : 400
},
'Detroit': {
'center': '{lat: 42.35, lng: -83.10}',
'county': 'Wayne',
'zoom': 11,
'minwidth': 550,
'minheight': 400
},
'Bay City': {
'center': '{lat: 43.58, lng: -83.88}',
'county': 'Bay',
'zoom': 12, #optimal: 13
'minwidth': 150, #optimal: 450
'minheight': 200 #optimal: 700
},
'Flint': {
'center': '{lat:43.01, lng:-83.70}',
'county': 'Genesee',
'zoom': 12, #optimal: 12
'minwidth': 250, #optimal: 450
'minheight': 300 #optimal: 550
},
'Grand Rapids': {
'center': '{lat:42.96, lng:-85.66}',
'county': 'Kent',
'zoom': 12, #optimal: 12
'minwidth': 300, #optimal: 550
'minheight': 300, #optimal: 600
},
'Pontiac': {
'center': '{lat:42.65, lng:-83.29}',
'county': 'Oakland',
'zoom': 12, #optimal: 12
'minwidth': 150, #optimal: 300
'minheight': 200, #optimal: 400
},
'Kalamazoo':{
'center': '{lat:42.29, lng: -85.58}',
'county': 'Kalamazoo',
'zoom': 12,
'minwidth':150,
'minheight': 200
},
'Muskegon':{
'center': '{lat:43.23, lng: -86.25}',
'county': 'Muskegon',
'zoom': 12,
'minwidth':150,
'minheight': 200
},
'Warren':{
'center': '{lat:42.49, lng: -83.06}',
'county': 'Macomb',
'zoom': 12,
'minwidth':150,
'minheight': 200
},
'Roseville':{
'center': '{lat:42.51, lng: -82.94}',
'county': 'Macomb',
'zoom': 13,
'minwidth':150,
'minheight': 200
},
'Hazel Park':{
'center': '{lat:42.46, lng:-83.10}',
'county': 'Oakland',
'zoom': 14,
'minwidth':150,
'minheight': 200
},
'Waterford':{
'center': '{lat:42.66, lng:-83.42}',
'county': 'Oakland',
'zoom': 12,
'minwidth':150,
'minheight': 200
},
'Port Huron':{
'center': '{lat:42.98, lng:-82.44}',
'county': 'St. Clair',
'zoom': 12,
'minwidth':150,
'minheight': 200
},
'Dearborn':{
'center': '{lat:42.31, lng:-83.22}',
'county': 'Wayne',
'zoom': 12,
'minwidth':150,
'minheight': 200
},
'Livonia':{
'center': '{lat:42.40, lng:-83.37}',
'county': 'Wayne',
'zoom': 12,
'minwidth':150,
'minheight': 200
},
'Lansing':{
'center': '{lat:42.70, lng:-84.55}',
'county': 'Ingham, Clinton, Eaton',
'zoom': 11,
'minwidth':150,
'minheight': 200
},
'Eastpointe':{
'center': '{lat:42.47, lng:-82.94}',
'county': 'Macomb',
'zoom': 13,
'minwidth':150,
'minheight': 200
},
'Royal Oak':{
'center': '{lat:42.51, lng:-83.15}',
'county': 'Oakland',
'zoom': 13,
'minwidth':150,
'minheight': 200
},
'Lincoln Park':{
'center': '{lat:42.24, lng:-83.18}',
'county': 'Wayne',
'zoom': 13,
'minwidth':150,
'minheight': 200
},
'Saginaw':{
'center': '{lat: 43.42, lng: -83.95}',
'county': 'Saginaw',
'zoom': 12,
'minwidth': 150,
'minheight': 200
}},
'County':
{'Alcona': {
'center': '{lat:44.68, lng:-83.58}',
'minwidth': 450,
'minheight': 400
},
'Alger': {
'center': '{lat:46.42, lng:-86.49}',
'zoom': 9,
'minwidth': 950,
'minheight': 600
},
'Allegan': {
'center': '{lat:42.59, lng:-85.91}',
'minwidth': 550,
'minheight': 350
},
'Alpena': {
'center': '{lat:45.03, lng:-83.58}',
'minwidth': 500,
'minheight': 400
},
'Antrim': {
'center': '{lat:45.01, lng:-85.14}',
'minwidth': 450,
'minheight': 450
},
'Arenac': {
'center': '{lat:44.04, lng:-83.87}',
'minwidth': 450,
'minheight': 300
},
'Baraga': {
'center': '{lat:46.69, lng:-88.34}',
'minwidth': 550,
'minheight': 600
},
'Barry': {
'center': '{lat:42.60, lng:-85.31}',
'minwidth': 350,
'minheight': 350
},
'Bay': {
'center': '{lat:43.7, lng:-83.93}',
'minwidth': 350,
'minheight': 550
},
'Benzie': {
'center': '{lat:44.65, lng:-86.04}',
'minwidth': 350,
'minheight': 300
},
'Berrien': {
'center': '{lat:42.00, lng:-86.52}',
'minwidth': 450,
'minheight': 500
},
'Branch': {
'center': '{lat:41.92, lng:-85.06}',
'minwidth': 350,
'minheight': 350
},
'Calhoun': {
'center': '{lat:42.25, lng:-85.00}',
'minwidth': 450,
'minheight': 350
},
'Cass': {
'center': '{lat:41.92, lng:-85.99}',
'minwidth': 350,
'minheight': 350
},
'Charlevoix': {
'center': '{lat:45.48, lng:-85.22}',
'zoom': 9,
'minwidth': 400,
'minheight': 400
},
'Cheboygan': {
'center': '{lat:45.49, lng:-84.47}',
'minwidth': 400,
'minheight': 650
},
'Chippewa': {
'center': '{lat:46.34, lng:-84.36}',
'zoom': 9,
'minwidth': 650,
'minheight': 500
},
'Clare': {
'center': '{lat:44.00, lng:-84.85}',
'minwidth': 400,
'minheight': 400
},
'Clinton': {
'center': '{lat:42.94, lng:-84.60}',
'minwidth': 350,
'minheight': 350
},
'Crawford': {
'center': '{lat:44.68, lng:-84.61}',
'minwidth': 350,
'minheight': 400
},
'Delta': {
'center': '{lat:45.85, lng:-86.91}',
'minwidth': 700,
'minheight': 650
},
'Dickinson': {
'center': '{lat:45.98, lng:-87.88}',
'minwidth': 400,
'minheight': 550
},
'Eaton': {
'center': '{lat:42.60, lng:-84.84}',
'minwidth': 350,
'minheight': 350
},
'Emmet': {
'center': '{lat:45.53, lng:-84.93}',
'minwidth': 300,
'minheight': 600
},
'Genesee': {
'center': '{lat:43.00, lng:-83.69}',
'minwidth': 350,
'minheight': 450
},
'Gladwin': {
'center': '{lat:43.99, lng:-84.39}',
'minwidth': 350,
'minheight': 400
},
'Gogebic': {
'center': '{lat:46.43, lng:-89.70}',
'zoom': 9,
'minwidth': 550,
'minheight': 400
},
'Grand Traverse': {
'center': '{lat:44.75, lng:-85.57}',
'minwidth': 400,
'minheight': 500
},
'Gratiot': {
'center': '{lat:43.29, lng:-84.61}',
'minwidth': 350,
'minheight': 350
},
'Hillsdale': {
'center': '{lat:41.88, lng:-84.59}',
'minwidth': 350,
'minheight': 400
},
'Houghton': {
'center': '{lat:46.85, lng:-88.61}',
'zoom': 9,
'minwidth': 300,
'minheight': 500
},
'Huron': {
'center': '{lat:43.87, lng:-83.04}',
'minwidth': 650,
'minheight': 450
},
'Ingham': {
'center': '{lat:42.60, lng:-84.37}',
'minwidth': 350,
'minheight': 400
},
'Ionia': {
'center': '{lat:42.94, lng:-85.07}',
'minwidth': 350,
'minheight': 350
},
'Iosco': {
'center': '{lat:44.34, lng:-83.60}',
'minwidth': 450,
'minheight': 400
},
'Iron': {
'center': '{lat:46.17, lng:-88.55}',
'minwidth': 650,
'minheight': 550
},
'Isabella': {
'center': '{lat:43.64, lng:-84.85}',
'minwidth': 400,
'minheight': 400
},
'Jackson': {
'center': '{lat:42.25, lng:-84.42}',
'minwidth': 450,
'minheight': 350
},
'Kalamazoo': {
'center': '{lat:42.25, lng:-85.53}',
'minwidth': 350,
'minheight': 350
},
'Kalkaska': {
'center': '{lat:44.69, lng:-85.09}',
'minwidth': 400,
'minheight': 400
},
'Kent': {
'center': '{lat:43.03, lng:-85.55}',
'minwidth': 400,
'minheight': 550
},
'Keweenaw': {
'center': '{lat:47.70, lng:-88.49}',
'zoom': 9,
'minwidth': 600,
'minheight': 550
},
'Lake': {
'center': '{lat:43.99, lng:-85.80}',
'minwidth': 350,
'minheight': 400
},
'Lapeer': {
'center': '{lat:43.10, lng:-83.22}',
'minwidth': 350,
'minheight': 450
},
'Leelanau': {
'center': '{lat:45.11, lng:-85.84}',
'minwidth': 450,
'minheight': 700
},
'Lenawee': {
'center': '{lat:41.93, lng:-84.06}',
'minwidth': 450,
'minheight': 400
},
'Livingston': {
'center': '{lat:42.60, lng:-83.91}',
'minwidth': 400,
'minheight': 400
},
'Luce': {
'center': '{lat:46.50, lng:-85.55}',
'minwidth': 500,
'minheight': 550
},
'Mackinac': {
'center': '{lat:45.98, lng:-84.99}',
'zoom': 9,
'minwidth': 650,
'minheight': 300
},
'Macomb': {
'center': '{lat:42.67, lng:-82.91}',
'minwidth': 300,
'minheight': 450
},
'Manistee': {
'center': '{lat:44.34, lng:-86.10}',
'minwidth': 450,
'minheight': 400
},
'Marquette': {
'center': '{lat:46.45, lng:-87.62}',
'zoom': 9,
'minwidth': 400,
'minheight': 500
},
'Mason': {
'center': '{lat:44.00, lng:-86.28}',
'minwidth': 350,
'minheight': 400
},
'Mecosta': {
'center': '{lat:43.64, lng:-85.32}',
'minwidth': 350,
'minheight': 400
},
'Menominee': {
'center': '{lat:45.54, lng:-87.58}',
'zoom': 9,
'minwidth': 250,
'minheight': 500
},
'Midland': {
'center': '{lat:43.65, lng:-84.39}',
'minwidth': 350,
'minheight': 400
},
'Missaukee': {
'center': '{lat:44.34, lng:-85.09}',
'minwidth': 400,
'minheight': 400
},
'Monroe': {
'center': '{lat:41.91, lng:-83.48}',
'minwidth': 450,
'minheight': 400
},
'Montcalm': {
'center': '{lat:43.29, lng:-85.20}',
'minwidth': 550,
'minheight': 350
},
'Montmorency': {
'center': '{lat:45.03, lng:-84.13}',
'minwidth': 400,
'minheight': 400
},
'Muskegon': {
'center': '{lat:43.30, lng:-86.13}',
'minwidth': 500,
'minheight': 400
},
'Newaygo': {
'center': '{lat:43.55, lng:-85.80}',
'minwidth': 350,
'minheight': 550
},
'Oakland': {
'center': '{lat:42.66, lng:-83.39}',
'minwidth': 450,
'minheight': 500
},
'Oceana': {
'center': '{lat:43.64, lng:-86.29}',
'minwidth': 400,
'minheight': 400
},
'Ogemaw': {
'center': '{lat:44.33, lng:-84.13}',
'minwidth': 400,
'minheight': 400
},
'Ontonagon': {
'center': '{lat:46.68, lng:-89.38}',
'zoom': 9,
'minwidth': 400,
'minheight': 400
},
'Osceola': {
'center': '{lat:43.99, lng:-85.33}',
'minwidth': 350,
'minheight': 400
},
'Oscoda': {
'center': '{lat:44.68, lng:-84.13}',
'minwidth': 400,
'minheight': 400
},
'Otsego': {
'center': '{lat:45.03, lng:-84.61}',
'minwidth': 400,
'minheight': 400
},
'Ottawa': {
'center': '{lat:42.99, lng:-86.03}',
'minwidth': 400,
'minheight': 450
},
'Presque Isle': {
'center': '{lat:45.41, lng:-83.82}',
'minwidth': 650,
'minheight': 450
},
'Roscommon': {
'center': '{lat:44.34, lng:-84.61}',
'minwidth': 400,
'minheight': 400
},
'Saginaw': {
'center': '{lat:43.35, lng:-84.03}',
'minwidth': 500,
'minheight': 450
},
'St. Clair': {
'center': '{lat:42.84, lng:-82.70}',
'minwidth': 450,
'minheight': 650
},
'St. Joseph': {
'center': '{lat:41.92, lng:-85.54}',
'minwidth': 400,
'minheight': 350
},
'Sanilac': {
'center': '{lat:43.42, lng:-82.81}',
'minwidth': 450,
'minheight': 550
},
'Schoolcraft': {
'center': '{lat:46.13, lng:-86.24}',
'zoom' : 9,
'minwidth': 550,
'minheight': 800
},
'Shiawassee': {
'center': '{lat:42.95, lng:-84.15}',
'minwidth': 350,
'minheight': 400
},
'Tuscola': {
'center': '{lat:43.48, lng:-83.40}',
'minwidth': 450,
'minheight': 550
},
'Van Buren': {
'center': '{lat:42.24, lng:-86.06}',
'minwidth': 450,
'minheight': 350
},
'Washtenaw': {
'center': '{lat:42.25, lng:-83.84}',
'minwidth': 450,
'minheight': 400
},
'Wayne': {
'center': '{lat:42.24, lng:-83.21}',
'minwidth': 500,
'minheight': 450
},
'Wexford': {
'center': '{lat:44.28, lng:-85.58}',
'minwidth': 400,
'minheight': 400
}}
}
name_case_ls = ['Saginaw', 'Kalamazoo', 'Muskegon']
name_cases = [{'Saginaw' : 'Saginaw (City)'}, {'Kalamazoo' : 'Kalamazoo (City)'}, {'Muskegon' : 'Muskegon (City)'}]
counties=[]
cities = []
placenames={}
names={}
for county in center_dict["County"].keys():
counties.append(county)
for city in center_dict["City"].keys():
if city in name_case_ls:
city = city + " (City)"
cities.append(city)
else:
cities.append(city)
for eachcounty in counties:
if eachcounty[0] not in placenames:
placenames[eachcounty[0]]=[eachcounty]
else:
placenames[eachcounty[0]].append(eachcounty)
for eachcity in cities:
if eachcity[0] not in placenames:
placenames[eachcity[0]] = [eachcity]
placenames[eachcity[0]].sort()
else:
placenames[eachcity[0]].append(eachcity)
placenames[eachcity[0]].sort()
keyalphabet=sorted(placenames.keys())
for letter in keyalphabet:
names[letter]=placenames[letter]
# print(cities)
| {"/application.py": ["/opioid_dict.py", "/create_D3_files.py"], "/create_D3_files.py": ["/opioid_dict.py"]} |
50,311 | michmaml/trans-19 | refs/heads/master | /django_project/urls.py | """django_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import path, include
from chpstaff import views as chpstaff_views
urlpatterns = [
path('admin/', admin.site.urls),
path('signup/', chpstaff_views.signup, name='chpstaff_signup'),
path('account/', chpstaff_views.account, name='chpstaff_account'),
path('login/', auth_views.LoginView.as_view(template_name='staff/login.html'),
name='chpstaff_login'),
path('logout/', auth_views.LogoutView.as_view(template_name='staff/logout.html'),
name='chpstaff_logout'),
# These urls handle password recovery
path('password-reset/',
auth_views.PasswordResetView.as_view(
template_name='staff/password_reset.html'
),
name='password_reset'),
path('password-reset/done/',
auth_views.PasswordResetDoneView.as_view(
template_name='staff/password_reset_done.html'
),
name='password_reset_done'),
path('password-reset-confirm/<uidb64>/<token>/',
auth_views.PasswordResetConfirmView.as_view(
template_name='staff/password_reset_confirm.html'
),
name='password_reset_confirm'),
path('password-reset-complete/',
auth_views.PasswordResetCompleteView.as_view(
template_name='staff/password_reset_complete.html'
),
name='password_reset_complete'),
# Main Path
path('', include('trans_19.urls')),
]
| {"/trans_19/views.py": ["/trans_19/models.py"], "/trans_19/urls.py": ["/trans_19/views.py"], "/trans_19/admin.py": ["/trans_19/models.py"], "/chpstaff/views.py": ["/chpstaff/forms.py", "/trans_19/models.py"]} |
50,312 | michmaml/trans-19 | refs/heads/master | /chpstaff/apps.py | from django.apps import AppConfig
class ChpstaffConfig(AppConfig):
name = 'chpstaff'
| {"/trans_19/views.py": ["/trans_19/models.py"], "/trans_19/urls.py": ["/trans_19/views.py"], "/trans_19/admin.py": ["/trans_19/models.py"], "/chpstaff/views.py": ["/chpstaff/forms.py", "/trans_19/models.py"]} |
50,313 | michmaml/trans-19 | refs/heads/master | /trans_19/views.py | from django.shortcuts import render
from django.urls import reverse_lazy
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import (
ListView, TemplateView, CreateView, UpdateView, DeleteView)
from .models import Patient, Visit, Location, chp_staff_data
from django import forms
from datetime import datetime, timedelta
# Create your views here.
def home(request):
context = {
'patients': Patient.objects.all()
}
return render(request, 'patients/home.html', context)
class PatientsListView(LoginRequiredMixin, ListView):
model = Patient
template_name = 'patients/home.html'
context_object_name = 'patients'
ordering = ['-caseNum']
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
name = self.request.user
chp_data_list = chp_staff_data.objects.filter(username=name)
for o in chp_data_list:
if o.username == name.username:
context['epidemiologist_number'] = o.epidemiologist_number
break
return context
class PatientDetailView(LoginRequiredMixin, TemplateView):
template_name = 'patients/patient_trips.html'
def get_context_data(self, **kwargs):
patient = self.kwargs['patient']
context = super().get_context_data(**kwargs)
context['visit_list'] = Visit.objects.filter(patient__pk=patient)
context['patient'] = Patient.objects.get(pk=patient)
return context
class AddPatientRecordView(LoginRequiredMixin, CreateView):
model = Patient
template_name = 'patients/patient_actions/add_patient_record.html'
fields = ['name', 'idNum', 'dateBirth', 'dateConfi', 'caseNum']
def get_success_url(self, **kwargs):
return reverse_lazy('trans_19_home')
class UpdatePatientRecordView(LoginRequiredMixin, UpdateView):
model = Patient
template_name = 'patients/patient_actions/update_patient_record.html'
pk_url_kwarg = 'patient'
success_url = '/'
fields = ['name', 'idNum', 'dateBirth', 'dateConfi', 'caseNum']
class DeletePatientRecordView(LoginRequiredMixin, DeleteView):
model = Patient
template_name = 'patients/patient_actions/delete_patient_record.html'
pk_url_kwarg = 'patient'
success_url = '/'
fields = ['name', 'idNum', 'dateBirth', 'dateConfi', 'caseNum']
class AddPatientVisitView(LoginRequiredMixin, CreateView):
model = Visit
template_name = 'patients/patient_actions/add_patient_visit.html'
fields = ['location', 'date_From', 'date_To', 'details']
def form_valid(self, form):
patient = self.kwargs['patient']
form.instance.patient = Patient.objects.get(pk=patient)
return super().form_valid(form)
def get_context_data(self, **kwargs):
patient = self.kwargs['patient']
context = super().get_context_data(**kwargs)
context['patientId'] = patient
return context
class UpdatePatientVisitView(LoginRequiredMixin, UpdateView):
model = Visit
template_name = 'patients/patient_actions/update_patient_visit.html'
pk_url_kwarg = 'visit'
fields = ['location', 'date_From', 'date_To', 'details']
def get_context_data(self, **kwargs):
patient = self.kwargs['patient']
context = super().get_context_data(**kwargs)
context['patientId'] = patient
return context
class DeletePatientVisitView(LoginRequiredMixin, DeleteView):
model = Visit
template_name = 'patients/patient_actions/delete_patient_visit.html'
pk_url_kwarg = 'visit'
fields = ['location', 'date_From', 'date_To', 'details']
def get_success_url(self, **kwargs):
return reverse_lazy('trans_19_patient', kwargs={'patient': self.kwargs['patient']})
def get_context_data(self, **kwargs):
patient = self.kwargs['patient']
context = super().get_context_data(**kwargs)
context['patientId'] = patient
success_url = 'patient/' + str(patient)
return context
class AddLocationRecordView(LoginRequiredMixin, CreateView):
model = Location
template_name = 'patients/location_actions/add_location_record.html'
fields = ['name', 'address', 'district', 'xCoord', 'yCoord', 'category']
def get_success_url(self, **kwargs):
return reverse_lazy('trans_19_addLocationRecord')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['locations'] = Location.objects.all()
return context
class ViewLocationRecordView(LoginRequiredMixin, TemplateView):
model = Location
template_name = 'patients/location_actions/location.html'
def get_success_url(self, **kwargs):
return reverse_lazy('trans_19_location')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['locations'] = Location.objects.all()
return context
class DeleteLocationView(LoginRequiredMixin, DeleteView):
model = Location
pk_url_kwarg = 'location'
template_name = 'patients/location_actions/delete_location.html'
def get_success_url(self, **kwargs):
context = super().get_context_data(**kwargs)
return reverse_lazy('trans_19_location')
class UpdateLocationView(LoginRequiredMixin, UpdateView):
model = Location
template_name = 'patients/location_actions/update_location.html'
pk_url_kwarg = 'location'
fields = ['name', 'address', 'district', 'xCoord', 'yCoord', 'category']
def get_success_url(self, **kwargs):
context = super().get_context_data(**kwargs)
return reverse_lazy('trans_19_location')
class PatientConnectionsView(LoginRequiredMixin, TemplateView):
template_name = 'patients/patient_connections.html'
def get_context_data(self, **kwargs):
patient_id = self.kwargs['patient']
context = super().get_context_data(**kwargs)
context['patient'] = Patient.objects.get(pk=patient_id)
location_id = - \
1 if self.request.GET.get('location') == None else int(
self.request.GET.get('location'))
time_range = 0 if self.request.GET.get('time_window') == None else int(
self.request.GET.get('time_window'))
try:
case = Patient.objects.get(pk=patient_id)
visits = Visit.objects.filter(patient=patient_id)
if location_id != -1:
visits = visits.filter(location=location_id)
for visit in visits:
date_lower_bound = visit.date_From - timedelta(days=time_range)
date_upper_bound = visit.date_To + timedelta(days=time_range)
visit.connections = Visit.objects.filter(location=visit.location).filter(
date_To__gte=date_lower_bound).filter(date_From__lte=date_upper_bound).exclude(patient_id=patient_id)
except Patient.DoesNotExist:
case = None
visits = None
except Exception as e:
visits = None
context['case'] = case
context['visits'] = visits
location_choices = {
-1: 'All'
}
tmp_visits = Visit.objects.filter(patient_id=patient_id)
for t in tmp_visits:
location_choices[t.location.id] = t.location.name
class SearchConnectionForm(forms.Form):
location = forms.ChoiceField(label='Location', choices=list(
location_choices.items()), required=True, initial=location_id)
time_window = forms.IntegerField(
label='Search window (in days)', required=True, initial=time_range)
context['form'] = SearchConnectionForm()
name = self.request.user
chp_data_list = chp_staff_data.objects.filter(username=name)
for o in chp_data_list:
if o.username == name.username:
context['epidemiologist_number'] = o.epidemiologist_number
break
return context
def account(request):
return render(request, 'patients/account.html', {'title': 'Trans-19 Account'})
def view_404(request):
return render(request, 'patients/page404.html')
| {"/trans_19/views.py": ["/trans_19/models.py"], "/trans_19/urls.py": ["/trans_19/views.py"], "/trans_19/admin.py": ["/trans_19/models.py"], "/chpstaff/views.py": ["/chpstaff/forms.py", "/trans_19/models.py"]} |
50,314 | michmaml/trans-19 | refs/heads/master | /chpstaff/forms.py | from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
#from django.forms import HiddenInput
# inherits from UserCreationForm
class UserSignInForm(UserCreationForm):
chp_staff_number = forms.CharField(max_length=7)
first_name = forms.CharField()
last_name = forms.CharField()
email = forms.EmailField()
epidemiologist_number = forms.CharField(max_length=7, required=False, help_text="Leave it blank if you are not an epidemiologist")
#epidemiologist = forms.BooleanField(initial=False)
class Meta:
model = User
fields = ['first_name', 'last_name', 'epidemiologist_number','chp_staff_number',
'email', 'username', 'password1', 'password2'] # , 'epidemiologist'
class UserUpdateAccount(forms.ModelForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['email']
| {"/trans_19/views.py": ["/trans_19/models.py"], "/trans_19/urls.py": ["/trans_19/views.py"], "/trans_19/admin.py": ["/trans_19/models.py"], "/chpstaff/views.py": ["/chpstaff/forms.py", "/trans_19/models.py"]} |
50,315 | michmaml/trans-19 | refs/heads/master | /trans_19/migrations/0001_initial.py | # Generated by Django 3.0.6 on 2020-05-14 13:47
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='chp_staff_data',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=30, verbose_name='User name')),
('chp_staff_number', models.CharField(max_length=30, verbose_name='chp_staff_number')),
('epidemiologist_number', models.CharField(max_length=30, verbose_name='epidemiologist_number')),
],
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=70, verbose_name='Location Visited')),
('address', models.CharField(max_length=70)),
('district', models.CharField(choices=[('Central & Western', 'Central & Western'), ('Eastern', 'Eastern'), ('Islands', 'Islands'), ('Kowloon City', 'Kowloon City'), ('Kwai Tsing', 'Kwai Tsing'), ('Kwun Tong', 'Kwun Tong'), ('North', 'North'), ('Sai Kung', 'Sai Kung'), ('Sha Tin', 'Sha Tin'), ('Sham Shui Po', 'Sham Shui Po'), ('Southern', 'Southern'), ('Tai Po', 'Tai Po'), ('Tsuen Wan', 'Tsuen Wan'), ('Tuen Mun', 'Tuen Mun'), ('Wan Chai', 'Wan Chai'), ('Wong Tai Sin', 'Wong Tai Sin'), ('Yau Tsim Mong', 'Yau Tsim Mong'), ('Yuen Long', 'Yuen Long')], max_length=17)),
('xCoord', models.IntegerField(verbose_name='X Coordinate')),
('yCoord', models.IntegerField(verbose_name='Y Coordinate')),
('category', models.CharField(choices=[('residence', 'Residence'), ('workplace', 'Workplace'), ('visit', 'Visit'), ('other', 'OTHER')], max_length=9)),
],
),
migrations.CreateModel(
name='Patient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, verbose_name='Name')),
('idNum', models.CharField(max_length=10, verbose_name='ID Number')),
('dateBirth', models.DateField(verbose_name='Date of Birth(YYYY-MM-DD)')),
('dateConfi', models.DateField(verbose_name='Date of Confirmation(YYYY-MM-DD)')),
('caseNum', models.IntegerField(verbose_name='Case Number')),
],
),
migrations.CreateModel(
name='Visit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_From', models.DateField(verbose_name='Date From (YYYY-MM-DD)')),
('date_To', models.DateField(verbose_name='Date To (YYYY-MM-DD)')),
('details', models.CharField(max_length=70)),
('category', models.CharField(choices=[('residence', 'Residence'), ('workplace', 'Workplace'), ('visit', 'Visit'), ('other', 'OTHER')], max_length=9)),
('location', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='trans_19.Location')),
('patient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='trans_19.Patient')),
],
),
]
| {"/trans_19/views.py": ["/trans_19/models.py"], "/trans_19/urls.py": ["/trans_19/views.py"], "/trans_19/admin.py": ["/trans_19/models.py"], "/chpstaff/views.py": ["/chpstaff/forms.py", "/trans_19/models.py"]} |
50,316 | michmaml/trans-19 | refs/heads/master | /trans_19/urls.py | from django.urls import path
from trans_19 import views
from .views import *
urlpatterns = [
path('', PatientsListView.as_view(),
name='trans_19_home'),
path('patient/<int:patient>/', PatientDetailView.as_view(),
name='trans_19_patient'),
path('patient/add/', AddPatientRecordView.as_view(),
name='trans_19_addPatientRecord'),
path('patient/<int:patient>/visit/add/', AddPatientVisitView.as_view(),
name='trans_19_addPatientVisit'),
path('patient/<int:patient>/update/', UpdatePatientRecordView.as_view(),
name='trans_19_updatePatientRecord'),
path('patient/<int:patient>/visit/<int:visit>/update/', UpdatePatientVisitView.as_view(),
name='trans_19_updatePatientVisit'),
path('patient/<int:patient>/delete/', DeletePatientRecordView.as_view(),
name='trans_19_deletePatientRecord'),
path('patient/<int:patient>/visit/<int:visit>/delete/', DeletePatientVisitView.as_view(),
name='trans_19_deletePatientVisit'),
path('patient/<int:patient>/connections/', PatientConnectionsView.as_view(),
name='trans_19_patient_connections'),
path('location/add/', AddLocationRecordView.as_view(),
name='trans_19_addLocationRecord'),
path('location/', ViewLocationRecordView.as_view(),
name='trans_19_location'),
path('location/location/<int:location>/delete/', DeleteLocationView.as_view(),
name='trans_19_deleteLocation'),
path('location/location/<int:location>/update/', UpdateLocationView.as_view(),
name='trans_19_updateLocation'),
]
| {"/trans_19/views.py": ["/trans_19/models.py"], "/trans_19/urls.py": ["/trans_19/views.py"], "/trans_19/admin.py": ["/trans_19/models.py"], "/chpstaff/views.py": ["/chpstaff/forms.py", "/trans_19/models.py"]} |
50,317 | michmaml/trans-19 | refs/heads/master | /trans_19/admin.py | from django.contrib import admin
from .models import Patient, Visit
# Register your models here.
admin.site.register(Patient)
admin.site.register(Visit)
| {"/trans_19/views.py": ["/trans_19/models.py"], "/trans_19/urls.py": ["/trans_19/views.py"], "/trans_19/admin.py": ["/trans_19/models.py"], "/chpstaff/views.py": ["/chpstaff/forms.py", "/trans_19/models.py"]} |
50,318 | michmaml/trans-19 | refs/heads/master | /chpstaff/views.py | from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from .forms import UserSignInForm, UserUpdateAccount
from trans_19.models import chp_staff_data
# Create your views here.
def signup(request):
if request.method == 'POST':
form = UserSignInForm(request.POST)
if form.is_valid():
form.save()
p = chp_staff_data(username = form.cleaned_data.get('username'),chp_staff_number=form.cleaned_data.get('chp_staff_number'), epidemiologist_number = form.cleaned_data.get('epidemiologist_number'))
p.save()
username = form.cleaned_data.get('username')
messages.success(request, f'Account created, please log in.')
return redirect('/login')
else:
form = UserSignInForm()
return render(request, 'staff/signup.html', {'form': form})
@login_required
def account(request):
if request.method == 'POST':
form = UserUpdateAccount(request.POST, instance=request.user)
if form.is_valid():
form.save()
messages.success(request, f'Your account has been updated!')
return redirect('/')
else:
form = UserUpdateAccount(instance=request.user)
context = {
'form': form
}
return render(request, 'staff/account.html', context)
| {"/trans_19/views.py": ["/trans_19/models.py"], "/trans_19/urls.py": ["/trans_19/views.py"], "/trans_19/admin.py": ["/trans_19/models.py"], "/chpstaff/views.py": ["/chpstaff/forms.py", "/trans_19/models.py"]} |
50,319 | michmaml/trans-19 | refs/heads/master | /trans_19/apps.py | from django.apps import AppConfig
class Trans19Config(AppConfig):
name = 'trans_19'
| {"/trans_19/views.py": ["/trans_19/models.py"], "/trans_19/urls.py": ["/trans_19/views.py"], "/trans_19/admin.py": ["/trans_19/models.py"], "/chpstaff/views.py": ["/chpstaff/forms.py", "/trans_19/models.py"]} |
50,320 | michmaml/trans-19 | refs/heads/master | /trans_19/models.py | from django.db import models
from django.urls import reverse
# Create your models here.
DISTRICTS_CHOICES = (
('Central & Western', 'Central & Western'),
('Eastern', 'Eastern'),
('Islands', 'Islands'),
('Kowloon City', 'Kowloon City'),
('Kwai Tsing', 'Kwai Tsing'),
('Kwun Tong', 'Kwun Tong'),
('North', 'North'),
('Sai Kung', 'Sai Kung'),
('Sha Tin', 'Sha Tin'),
('Sham Shui Po', 'Sham Shui Po'),
('Southern', 'Southern'),
('Tai Po', 'Tai Po'),
('Tsuen Wan', 'Tsuen Wan'),
('Tuen Mun', 'Tuen Mun'),
('Wan Chai', 'Wan Chai'),
('Wong Tai Sin', 'Wong Tai Sin'),
('Yau Tsim Mong', 'Yau Tsim Mong'),
('Yuen Long', 'Yuen Long')
)
VISIT_CHOICES = (
('residence', 'Residence'),
('workplace', 'Workplace'),
('visit', 'Visit'),
('other', 'OTHER')
)
class chp_staff_data(models.Model):
username = models.CharField('User name', max_length=30)
chp_staff_number = models.CharField('chp_staff_number', max_length=30)
epidemiologist_number = models.CharField(
'epidemiologist_number', max_length=30)
class Patient(models.Model):
name = models.CharField('Name', max_length=30)
idNum = models.CharField('ID Number', max_length=10)
dateBirth = models.DateField('Date of Birth(YYYY-MM-DD)')
dateConfi = models.DateField('Date of Confirmation(YYYY-MM-DD)')
caseNum = models.IntegerField('Case Number')
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('trans_19_patient', kwargs={'patient': self.pk})
class Location(models.Model):
name = models.CharField('Location Visited', max_length=70)
address = models.CharField(max_length=70)
district = models.CharField(max_length=17, choices=DISTRICTS_CHOICES)
xCoord = models.IntegerField('X Coordinate')
yCoord = models.IntegerField('Y Coordinate')
category = models.CharField(max_length=9, choices=VISIT_CHOICES)
def __str__(self):
return self.name
class Visit(models.Model):
patient = models.ForeignKey(Patient, on_delete=models.CASCADE)
location = models.ForeignKey(
Location, on_delete=models.CASCADE, null=True, blank=True)
date_From = models.DateField('Date From (YYYY-MM-DD)')
date_To = models.DateField('Date To (YYYY-MM-DD)')
details = models.CharField(max_length=70)
category = models.CharField(max_length=9, choices=VISIT_CHOICES)
def __str__(self):
return f'{self.patient} visit - {self.location.name}'
def get_absolute_url(self):
return reverse('trans_19_patient', kwargs={'patient': self.patient.id})
| {"/trans_19/views.py": ["/trans_19/models.py"], "/trans_19/urls.py": ["/trans_19/views.py"], "/trans_19/admin.py": ["/trans_19/models.py"], "/chpstaff/views.py": ["/chpstaff/forms.py", "/trans_19/models.py"]} |
50,322 | piotrek91666/scripts | refs/heads/master | /openwrt_hosts.py | #!/usr/bin/env python3
# openwrt_hosts.py - Utility to list host defined in your openwrt router.
# Version: 0.1
# (c) 2016 Piotr Grzeszczak
# http://www.grzeszczak.pw
# License: GPLv3
import paramiko
import socket
import argparse
from subprocess import Popen, PIPE, STDOUT
parser = argparse.ArgumentParser()
parser.add_argument("host",
help="OpenWrt host")
parser.add_argument("-f", "--file",
help="dhcp file (default /etc/config/dhcp)",
default='/etc/config/dhcp')
args = parser.parse_args()
ssh_client = paramiko.SSHClient()
ssh_client.load_system_host_keys()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh_client.connect(args.host, 22, 'root', look_for_keys=True)
except (paramiko.SSHException, socket.gaierror) as e:
print("Connection error: {}".format(e))
exit(1)
sftp = ssh_client.open_sftp()
fileObject = None
try:
fileObject = sftp.open(args.file, 'r', -1)
except FileNotFoundError as e:
print("Error while opening remote file: {}\n".format(args.file), e, sep='')
exit(1)
lines = []
for line in fileObject:
line = line.strip()
if line != '':
lines.append(line)
sftp.close()
ssh_client.close()
opt_sem = False
hosts = []
tmp_dict = {}
for line in lines:
if line == 'config host':
opt_sem = True
elif line.split()[0] == 'option' and opt_sem:
tmp_dict[line.split()[1].strip("'")] = line.split()[2].strip("'")
elif line.split()[0] != 'option' and opt_sem:
hosts.append(tmp_dict)
tmp_dict = {}
else:
opt_sem = False
for host in hosts:
ping_result = Popen(["/bin/ping", "-c1", host["name"]], stderr=STDOUT,
stdout=PIPE)
ping_state = ping_result.communicate()[0], ping_result.returncode
print("Host: {}\tIP: {}\tMAC: {}\t State: {}".format(
host["name"], host["ip"], host["mac"],
'DOWN' if ping_state[1] else 'UP').expandtabs(25))
| {"/openwrt_addhost.py": ["/openwrt/UCI.py"]} |
50,323 | piotrek91666/scripts | refs/heads/master | /openwrt/UCI.py | #!/usr/bin/env python
# openwrt_uci.py - Wrapper to OpenWRT UCI System.
# Version: 0.1
# (c) 2016 Piotr Grzeszczak
# http://www.grzeszczak.pw
# License: GPLv3
class UCI:
def __init__(self, ow_host, paramiko_instance=None):
self.paramiko_instance = paramiko_instance
if not self.paramiko_instance:
self.__connect(ow_host)
else:
self.ssh_client = paramiko_instance
def __connect(self, ow_host):
import paramiko
import socket
self.ssh_client = paramiko.SSHClient()
self.ssh_client.load_system_host_keys()
self.ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
print('Connecting to: {}'.format(ow_host))
self.ssh_client.connect(ow_host, 22, 'root', look_for_keys=True)
except (paramiko.SSHException, socket.gaierror) as e:
print("Connection error: {}".format(e))
exit(1)
def _send_cmd(self, command):
stdin, stdout, stderr = self.ssh_client.exec_command(command=command, timeout=25)
stderr_r = stderr.read()
if stderr_r:
print('Error occurred while executing remote command: {}\n{}'.format(command, stderr_r.decode()))
return None
return stdout.read()
def cfg_export(self, cmd_string):
self._send_cmd('uci export {}'.format(cmd_string))
def cfg_import(self, cmd_string):
self._send_cmd('uci import {}'.format(cmd_string))
def cfg_changes(self, cmd_string):
self._send_cmd('uci changes {}'.format(cmd_string))
def commit(self, cmd_string):
self._send_cmd('uci commit {}'.format(cmd_string))
def add(self, cmd_string):
print('Adding new section: {}'.format(cmd_string))
self._send_cmd('uci add {}'.format(cmd_string))
def add_list(self, cmd_string):
self._send_cmd('uci add_list {}'.format(cmd_string))
def del_list(self, cmd_string):
self._send_cmd('uci del_list {}'.format(cmd_string))
def show(self, cmd_string):
self._send_cmd('uci show {} -X'.format(cmd_string))
def get(self, cmd_string):
self._send_cmd('uci get {}'.format(cmd_string))
def set(self, cmd_string):
self._send_cmd('uci set {}'.format(cmd_string))
def delete(self, cmd_string):
self._send_cmd('uci delete {}'.format(cmd_string))
def rename(self, cmd_string):
self._send_cmd('uci rename {}'.format(cmd_string))
def revert(self, cmd_string):
self._send_cmd('uci revert {}'.format(cmd_string))
def reorder(self, cmd_string):
self._send_cmd('uci reorder {}'.format(cmd_string))
def __exit__(self):
if not self.paramiko_instance:
self.ssh_client.close()
| {"/openwrt_addhost.py": ["/openwrt/UCI.py"]} |
50,324 | piotrek91666/scripts | refs/heads/master | /randmac.py | #!/usr/bin/env python
# randmac.py - Utility to generate random mac address for vm's or containers.
# Version: 0.1
# (c) 2016 Piotr Grzeszczak
# http://www.grzeszczak.pw
# License: GPLv3
from sys import argv
from random import randint
from hashlib import md5
hash = md5()
hash.update((argv[1] if len(argv) > 1 else '').encode())
mac = hash.hexdigest()[:6] + "%02x%02x%02x" % (randint(0, 255), randint(0, 255), randint(0, 255))
print(':'.join([mac[i:i+2] for i in range(0, len(mac), 2)]))
| {"/openwrt_addhost.py": ["/openwrt/UCI.py"]} |
50,325 | piotrek91666/scripts | refs/heads/master | /openwrt_addhost.py | #!/usr/bin/env python
# openwrt_hosts.py - Utility to add host and domain in your openwrt router.
# Version: 0.1
# (c) 2016 Piotr Grzeszczak
# http://www.grzeszczak.pw
# License: GPLv3
import argparse
from openwrt.UCI import UCI
parser = argparse.ArgumentParser()
parser.add_argument("ow_host", help="openwrt host")
parser.add_argument('-n', "--name", help="hostname")
parser.add_argument('-m', "--mac", help="hardware address")
parser.add_argument('-i', "--ip", help="ip address")
args = parser.parse_args()
ow = UCI(args.ow_host)
| {"/openwrt_addhost.py": ["/openwrt/UCI.py"]} |
50,326 | piotrek91666/scripts | refs/heads/master | /wol.py | #!/usr/bin/env python
# wol.py - Utility to awake machines using the Wake on LAN.
# Version: 0.1
# (c) 2016 Piotr Grzeszczak
# http://www.grzeszczak.pw
# License: GPLv3
import socket
import struct
import netifaces
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("mac",
help="hardware address")
parser.add_argument("-p", "--port",
help="destination port (default 9)",
default=9,
type=int)
parser.add_argument("-i", "--interface",
help="interface (default eth0)",
default='eth0')
parser.add_argument("-b", "--bcast",
help="broadcast address (default 255.255.255.255)",
default='255.255.255.255')
args = parser.parse_args()
macaddress = args.mac
if len(macaddress) == 17:
macaddress = macaddress.replace(macaddress[2], '')
elif len(macaddress) == 12:
pass
else:
try:
raise ValueError(macaddress)
except:
print('\nInvalid hardware address.')
exit(1)
print("Sending magic packet to {}:{} with {}... ". format(macaddress, args.port, args.bcast), end='')
data = b'FFFFFFFFFFFF' + (macaddress * 20).encode()
send_data = b''
iface_addr = ''
try:
for i in range(0, len(data), 2):
send_data += struct.pack(b'B', int(data[i:i + 2], 16))
except ValueError:
print("\nError occurred while translating mac address.")
exit(1)
try:
iface_addr = netifaces.ifaddresses(args.interface)[netifaces.AF_INET][0]['addr']
except:
print("\nError occurred while specifying interface.")
exit(1)
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((iface_addr, 0))
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.connect((args.bcast, args.port))
sock.send(send_data)
sock.close()
except:
print("failed!")
exit(1)
else:
print("success!")
| {"/openwrt_addhost.py": ["/openwrt/UCI.py"]} |
50,327 | piotrek91666/scripts | refs/heads/master | /iflist.py | #!/usr/bin/env python
# iflist.py - Utility to list all network interfaces.
# Version: 0.1
# (c) 2016 Piotr Grzeszczak
# http://www.grzeszczak.pw
# License: GPLv3
import netifaces
for inet_face in netifaces.interfaces():
print("Interface: {}".format(inet_face))
ifaddr = netifaces.ifaddresses(inet_face)
for addr_id in ifaddr:
print(" {}:".format(addr_id))
for addr_num in ifaddr[addr_id]:
for addr_val in addr_num:
print(" {}: {}".format(addr_val, addr_num[addr_val]))
print("") | {"/openwrt_addhost.py": ["/openwrt/UCI.py"]} |
50,328 | piotrek91666/scripts | refs/heads/master | /spotlight_fetcher.py | #!/usr/bin/env python3
# spotlight_fetcher.py - Utility to download wallpapers from Microsoft Spotlight.
# Version: 0.1
# (c) 2016 Piotr Grzeszczak
# http://www.grzeszczak.it
# Based on https://github.com/liu-yun/spotlight/blob/master/spotlight.py
# License: GPLv3
import json
import os
import random
import uuid
import requests
import hashlib
import signal
from datetime import datetime
dest = os.path.join(os.path.expanduser('~'), "Pictures","Wallpapers", "Spotlight")
file_blacklist = []
# Catch Ctrl + C signal
def ctrlc_handler(signal, frame):
print('\nSIGINT signal received!\nExiting now...')
exit(0)
def random_id():
return str(uuid.uuid4()).replace('-', '')
def read_md5(file):
hasher = hashlib.md5()
with open(file, 'rb') as f:
buf = f.read()
hasher.update(buf)
return hasher.hexdigest()
def update_hash_db(path):
md5_list = []
for file in os.listdir(path):
md5_list.append(read_md5(path + '/' + file))
return md5_list
def downloader(url, session, dest_path):
d = session.get(url)
if d.status_code == 200:
with open(dest_path, 'wb+') as f:
for chunk in d:
f.write(chunk)
md5sum = read_md5(dest_path)
print('Downloading {} ({})'.format(dest_path.split('/')[-1:][0], md5sum))
return md5sum
else:
print('Download failed! ({})'.format(d.status_code))
return False
def worker():
print("Updating MD5 hash file list... ", end='')
hash_db = update_hash_db(dest)
print("items: {}".format(len(hash_db)))
# Magical request
print('Requesting json...')
cache_url = 'https://arc.msn.com/v3/Delivery/Cache'
data = {'pid': random.choice([209562, 209567, 279978]),
'fmt': 'json', # Output format
'ctry': 'PL', # Country
'time': datetime.now().strftime('%Y%m%dT%H%M%SZ'), # Current time
'lc': 'pl-pl', # https://msdn.microsoft.com/pl-pl/windows/uwp/publish/supported-languages
'pl': 'pl-pl,en-US',
'idtp': 'mid',
'uid': uuid.uuid4(),
'aid': '00000000-0000-0000-0000-000000000000',
'ua': 'WindowsShellClient/9.0.40929.0 (Windows)',
'asid': random_id(),
'ctmode': 'ImpressionTriggeredRotation',
'arch': 'x64',
'cdmver': '10.0.14936.1000',
'devfam': 'Windows.Desktop',
'devform': 'Unknown',
'devosver': '10.0.14936.1000',
'disphorzres': 1920,
'dispsize': 15.5,
'dispvertres': 1080,
'fosver': 14352,
'isu': 0,
'lo': 510893,
'metered': False,
'nettype': 'wifi',
'npid': 'LockScreen',
'oemid': 'VMWARE',
'ossku': 'Professional',
'prevosver': 14257,
'smBiosDm': 'VMware Virtual Platform',
'smBiosManufacturerName': 'VMware, Inc.',
'tl': 4,
'tsu': 6788
}
# Get image urls
r = requests.get(cache_url, params=data)
urls = []
try:
for item in r.json()['batchrsp']['items']:
d = json.loads(item['item'])
urls.append(d['ad']['image_fullscreen_001_landscape']['u'])
except:
print("Broken data...")
print("Found {} images...".format(len(urls)))
# Download them
# TODO: Cleanup this part.
with requests.Session() as s:
for url in urls:
local_path = os.path.join(dest, url.split('/')[3] + '.jpg')
if os.path.exists(local_path) is False and not local_path in file_blacklist:
md5sum = downloader(url, s, local_path)
if md5sum in hash_db:
print("Exist (md5 sum), added to cache blacklist.")
os.remove(local_path)
file_blacklist.append(local_path)
else:
hash_db.append(md5sum)
else:
print("Exist")
def main():
print("Ultimate Spotlight Fetcher!")
# Signals handlers
signal.signal(signal.SIGINT, ctrlc_handler)
print("Press Ctrl + C for exit")
if not os.path.exists(dest):
print("Destination folder {} not found, creating it...".format(dest))
os.mkdir(dest)
else:
print("Destination: {}".format(dest))
while(1):
worker()
if __name__ == '__main__':
main()
| {"/openwrt_addhost.py": ["/openwrt/UCI.py"]} |
50,329 | piotrek91666/scripts | refs/heads/master | /encrypt.py | #!/usr/bin/env python
import os
import argparse
import random
import struct
import hashlib
import getpass
from Crypto.Cipher import AES
def encrypt_file(key, in_filename, chunksize=64*1024):
out_filename = in_filename + '.encrypted'
iv = bytes([random.randint(0,0xFF) for i in range(16)])
encryptor = AES.new(key, AES.MODE_CBC, iv)
filesize = os.path.getsize(in_filename)
with open(in_filename, 'rb') as infile:
with open(out_filename, 'wb') as outfile:
outfile.write(struct.pack('<Q', filesize))
outfile.write(iv)
while True:
chunk = infile.read(chunksize)
if len(chunk) == 0:
break
elif len(chunk) % 16 != 0:
chunk += b' ' * (16 - len(chunk) % 16)
outfile.write(encryptor.encrypt(chunk))
def decrypt_file(key, in_filename, chunksize=24*1024):
print(in_filename)
saveadot = '.' if in_filename[0] == '.' else ''
out_filename = saveadot.join(args.file.split('.')[0:-1])
print(out_filename)
with open(in_filename, 'rb') as infile:
origsize = struct.unpack('<Q', infile.read(struct.calcsize('Q')))[0]
iv = infile.read(16)
decryptor = AES.new(key, AES.MODE_CBC, iv)
with open(out_filename, 'wb') as outfile:
while True:
chunk = infile.read(chunksize)
if len(chunk) == 0:
break
outfile.write(decryptor.decrypt(chunk))
outfile.truncate(origsize)
def passwd_prompt():
passwd = getpass.getpass('Password: ')
return hashlib.sha256(passwd.encode()).digest()
if __name__ == "__main__":
argParser = argparse.ArgumentParser()
argParser.add_argument("file", help="path to file")
args = argParser.parse_args()
if args.file.split('.')[-1] == "encrypted":
print("Decrypting file...")
decrypt_file(passwd_prompt(), args.file)
os.remove(args.file)
else:
print("Encrypting file...")
encrypt_file(passwd_prompt(), args.file)
os.remove(args.file)
| {"/openwrt_addhost.py": ["/openwrt/UCI.py"]} |
50,340 | arthurs/randomchatroom | refs/heads/master | /textfilter.py |
def formatLinks(text):
return text
| {"/tests/unit/filter_test.py": ["/filter.py"]} |
50,341 | arthurs/randomchatroom | refs/heads/master | /filter.py | import re
import cgi
import Cookie
import logging
swearList = None
#Deletes strings that are too long.
def length(string):
split = string.split(" ")
string = ""
for s in split:
if len(s) < 120:
split[split.index(s)] = []
string = string + s + " "
string = string.strip()
return string
def getSwears():
global swearList
if swearList != None:
return swearList
else:
logging.info("loading swearList")
file = open("filter","r")
swearList = file.readlines()
file.close()
return swearList
#Deletes swears
def swears(string):
rudishWords = getSwears()
for word in rudishWords:
pattern = re.compile(word,re.IGNORECASE | re.VERBOSE)
string = re.sub(pattern,'banana',string)
return string
#@Name Matching, because I could and it was suggested. =3
def twitter(string,name):
pattern = "\A@(\w+)"
search = re.search(pattern,string)
if search:
find = re.compile(search.group(1),re.I)
if re.search(find,name):
string = re.sub(find,"<blink><font color='red'>@"+ name +"</font></blink>",string)[1:]
return string
#Deletes swears and highlights links
def html(string, extra=""):
string = cgi.escape(string)
#### TAG REPLACEMENT METHOD ####
##goodHTML = ["a",
## "b",
## "i",
## "s",
## "u",
## "big",
## "bdo",
## "del",
## "sub",
## "sup",
## "font",
## "blink",
## "color"]
##split = re.split("[>|<]",string)
##
##if len(split) > 1:
## tags = split[:]
## text = split[:]
## loopCnt = 0
## newtags = split[:] #Clearing this.
## for s in split:
## newtags[loopCnt]=""
## tag = s.strip()
## if tag != "": #Filter out blanks found through split
## if loopCnt % 2 == 0:
## tags[loopCnt] = ""
## text[loopCnt] = s + " " #Adding to get the URL clearer after.
## else:
## text[loopCnt] = ""
## tags[loopCnt] = s + " "
## loopCnt += 1
## for s in goodHTML:
## pattern = re.compile(s,re.I)
## for tag in tags:
## print s
## if tag == "": continue
## closeTag = tag.split(" ")[0]
## if tag[0] != "/" and tags.count("/"+closeTag) == 0: tags.append("/"+closeTag)
## if not re.search(pattern,tag):
## newtags[tags.index(tag)] = s
##
## string = ""
## loopCnt = 0
## print tags
## for s in text:
## if text[loopCnt] != "":
## string = string + text[loopCnt]
## print text[loopCnt]
## elif tags[loopCnt] != "":
## string = string + '<' + tags[loopCnt] + '>'
## print tags[loopCnt]
## loopCnt += 1
##else: text = string
#IMG showing (Shh, it's a secret to all the foreigners. =3)
pattern = re.compile("img:(\S+)",re.I)
search = re.search(pattern,string)
# if search:
# url = search.group(1)
# if re.search("https://",url):
# code = "<img src='" + url + "' alt='Image' />"
# else:
# url = re.sub("http://","",url)
# code = "<img src='http://" + url + "' alt='Image' />"
# string = re.sub(pattern, code, string)
# else:
#URL matching
pattern = "((?:(?:http[s]?://)|(?:www.))(?:\S+))"
search = re.search(pattern,string)
if search:
for search in re.finditer(pattern, string):
url = search.group(0)
if search.group(1) != None:
if re.search("https://",url):
url = "https://"+re.sub("https://","",url)
code = "<a href='" + url + "'>"+url+"</a>"
else:
url = re.sub("http://","",url)
code = "<a href='http://" + url + "'>"+url+"</a>"
string = re.sub("(?:http[s]?://)?"+re.escape(url), code, string)
return string
def all(string,extra=""):
string = swears(string)
string = length(string)
string = html(string,extra)
return string
| {"/tests/unit/filter_test.py": ["/filter.py"]} |
50,342 | arthurs/randomchatroom | refs/heads/master | /randomchatroom.py | import cgi
import os
import re
import datetime
import logging
import Cookie
import filter
from google.appengine.api import users
from google.appengine.api import memcache
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import db
from google.appengine.ext.webapp import template
class Message(db.Model):
author = db.UserProperty()
content = db.StringProperty(multiline=True)
date = db.DateTimeProperty(auto_now_add=True)
alias = db.StringProperty()
room = db.StringProperty()
class MessageView:
def __init__(self, message, name):
self.message = message
if message.alias:
self.author = message.alias
else:
self.author = "A monkey"
now = datetime.datetime.now()
self.date = message.date
if name == None: name = ""
self.content = filter.twitter(message.content,name)
class MainPage(webapp.RequestHandler):
def get(self):
alias = self.request.cookies.get('alias')
if not alias: alias = ""
room = self.request.path
logging.info("****** room is " + room)
template_values = {'alias' : alias, 'room' : room}
path = os.path.join(os.path.dirname(__file__), 'index.html')
self.response.out.write(template.render(path, template_values))
class Messages(webapp.RequestHandler):
def post(self):
message = Message()
room = self.request.get('room')
message.room = room.upper()
alias = self.request.get('alias')
if alias:
alias = alias.lstrip('"')
alias = alias.rstrip('"')
cookie = Cookie.SimpleCookie()
cookie['alias'] = alias
cookie['alias']['path'] = '/'
print cookie
elif self.request.cookies.get('alias'):
alias = self.request.cookies.get('alias')
alias = alias.lstrip('"')
alias = alias.rstrip('"')
content = self.request.get('content')
message.alias = filter.all(alias)
message.content = filter.all(self.request.get('content'),alias)
if message.content:
message.put()
memcache.set("last_message_posted_at_"+message.room, datetime.datetime.utcnow())
self.redirect(room)
def get(self):
room = self.request.path.split("/messages")[1]
lastModifiedTime = memcache.get("last_message_posted_at_"+room)
# would be nice to initialize lastModifiedTime in memcache on app startup somehow so we dont need the None check
if lastModifiedTime is None:
lastModifiedTime = datetime.datetime.utcnow()
memcache.set("last_message_posted_at_"+room, datetime.datetime.utcnow())
if self.request.headers.get('If-Modified-Since') == lastModifiedTime.strftime('%a, %d %b %Y %H:%M:%S GMT'):
return self.response.set_status(304)
messages_query = db.GqlQuery("SELECT * FROM Message WHERE room = :1 ORDER BY date DESC",room)
messages = messages_query.fetch(50)
message_views = []
alias = self.request.cookies.get('alias')
for message in messages:
message_views.append(MessageView(message,alias)) #Adapting for Twitter style.
template_values = {
'messages': message_views,
}
path = os.path.join(os.path.dirname(__file__), '_messages.html')
self.response.out.write(template.render(path, template_values))
self.response.headers['Cache-Control'] = 'must-revalidate'
self.response.headers['Expires'] = ''
self.response.headers['Last-Modified'] = lastModifiedTime.strftime('%a, %d %b %Y %H:%M:%S GMT')
application = webapp.WSGIApplication(
[('/', MainPage),
('/messages/.*', Messages),
('/.*', MainPage)],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
| {"/tests/unit/filter_test.py": ["/filter.py"]} |
50,343 | arthurs/randomchatroom | refs/heads/master | /tests/unit/testhelper.py | import sys
from os.path import dirname
# import the app code by putting directory ../.. on the path
sys.path.append(dirname(dirname(dirname(__file__))))
| {"/tests/unit/filter_test.py": ["/filter.py"]} |
50,344 | arthurs/randomchatroom | refs/heads/master | /tests/unit/filter_test.py | import testhelper
import unittest
import filter
def runAgainstSamples(testCase, samples, testFunction):
for input, expected in samples:
actual = testFunction(input)
testCase.assertEqual(expected, actual)
class HtmlFilterTest(unittest.TestCase):
samples = [
("http://filter.com is great",
"<a href='http://filter.com'>filter.com</a> is great"),
("hey http://filter.com is great",
"hey <a href='http://filter.com'>filter.com</a> is great"),
("http://www.filter.com",
"<a href='http://www.filter.com'>www.filter.com</a>"),
("www.filter.com",
"<a href='http://www.filter.com'>www.filter.com</a>"),
("https://www.filter.com",
"<a href='https://www.filter.com'>https://www.filter.com</a>"),
]
def testHtmlFilter(self):
runAgainstSamples(self, self.samples, lambda input: filter.html(input))
class SwearFilterTest(unittest.TestCase):
samples = [("fuck you" , "banana you"), ("no swears", "no swears")]
def testSwearFilter(self):
runAgainstSamples(self, self.samples, lambda input: filter.swears(input))
if __name__ == "__main__":
unittest.main()
| {"/tests/unit/filter_test.py": ["/filter.py"]} |
50,348 | GreamDesu/ReTalk-backend | refs/heads/master | /pretty_funny_tools/urls.py | from django.conf.urls import patterns, url
from django.contrib import admin
import views as pretty_views
# admin.autodiscover()
urlpatterns = patterns('',
url(r'^administrator/$', pretty_views.JoomlaAdminView.as_view(), name='joomla_admin'),
url(r'^administrator/index.php$', pretty_views.JoomlaAdminFailView.as_view(),
name='joomla_admin_fail'),
url(r'^wp-login.php$', pretty_views.WPAdminView.as_view(), name='wp_admin'),
)
| {"/pretty_funny_tools/admin.py": ["/pretty_funny_tools/models.py"], "/api/migrations/0004_auto_20150721_2329.py": ["/api/models.py"]} |
50,349 | GreamDesu/ReTalk-backend | refs/heads/master | /pretty_funny_tools/forms.py | from django.contrib.auth.forms import AuthenticationForm
try:
from captcha.fields import CaptchaField
except ImportError:
pass
class PrettyAuthForm(AuthenticationForm):
try:
capcha = CaptchaField()
except NameError:
pass
| {"/pretty_funny_tools/admin.py": ["/pretty_funny_tools/models.py"], "/api/migrations/0004_auto_20150721_2329.py": ["/api/models.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.