index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
8,283
|
smaibom/DiscordSignupBot
|
refs/heads/master
|
/gsheetsapi.py
|
import gspread
from oauth2client import file, client, tools
class Spreadsheet(object):
"""docstring for Spreadsheet"""
def __init__(self, sheetID):
self.sheet = load_spreadsheet(sheetID)
def add_worksheet(self,sheetName,srows = 40,scols = 50):
"""
Adds a new worksheet to the spreadsheet object
Args:
sheetName(string): Name of the worksheet to be created
srows(int): The number of rows the worksheet is initialized with
scols(int): The number of columns the worksheet is initialized with
Returns:
worksheet. A worksheet object on success, None if worksheet name already exist or failed to be created
"""
try:
worksheet = self.sheet.add_worksheet(title=sheetName, rows=str(srows), cols=str(scols))
except gspread.exceptions.APIError as e:
worksheet = None
return worksheet
def get_worksheets(self):
"""
Returns the worksheets in the spreadsheet
"""
worksheets = self.sheet.worksheets()
return worksheets
def del_worksheet(self,worksheet):
"""
Deletes a worksheet from the spreadsheet
Args:
worksheet(Worksheet): A gspread worksheet object to be deleted
Returns:
True on success, False if failed to be deleted or does not exist
"""
try:
self.sheet.del_worksheet(worksheet)
return True
except Exception as e:
return False
def append_col(self,colnum,worksheet):
def load_spreadsheet(sheetID):
scope = 'https://www.googleapis.com/auth/spreadsheets'
store = file.Storage('token.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('credentials.json', scope)
creds = tools.run_flow(flow, store)
gc = gspread.authorize(creds)
return gc.open_by_key(sheetID)
|
{"/signupsystem.py": ["/gsheetsapi.py"]}
|
8,284
|
smaibom/DiscordSignupBot
|
refs/heads/master
|
/signupsystem.py
|
import numpy as np
import gspread
import gsheetsapi
class SignupSystem(object):
"""docstring for SignupSystem"""
def __init__(self,sheetID):
self.spreadsheet = gsheetsapi.Spreadsheet(sheetID)
self.worksheets = dict()
wsl = self.spreadsheet.get_worksheets()
for ws in wsl:
self.worksheets[ws.title] = ws
def register(self, userID, numChars=1):
"""
Register user to system
Args:
userID(string): Discord userID
numChars(int): Number of chars to register, defaults to 1
Returns:
True on successfull registration, False if user exists
TODO:
Fix the upcoming sheet by updating the sumif and adding -values to upcoming events
"""
usersheet = self.worksheets["Users"]
pastsheet = self.worksheets["Past"]
upcomingsheet = self.worksheets["Upcoming"]
users = usersheet.col_values(1)[1:]
if not userID in users:
#Starting index for inserting values
si = 2+len(users)
numevents = len(upcomingsheet.row_values(1))-1
usersheet.insert_row([userID,numChars],si)
pastsheet.insert_row([userID],si)
upcomingsheet.insert_row([userID] + [-1] * numevents,si)
return True
else:
return False
def unregister(self,userID):
"""
Removes user from system
Args:
userID(string): Discord userID
Returns:
True on successfull removal, False if userID does not exist
TODO:
Add changes to the signup sheet
"""
usersheet = self.worksheets["Users"]
pastsheet = self.worksheets["Past"]
upcomingsheet = self.worksheets["Upcoming"]
try:
cell = usersheet.find(userID)
usersheet.delete_row(cell.row)
pastsheet.delete_row(cell.row)
upcomingsheet.delete_row(cell.row)
return True
except gspread.exceptions.CellNotFound:
return False
def update_num_chars(self,userID,numChars):
"""
Updates the number of chars registrated to a userID
Args:
userID(string): Discord userID
numChars(int): The new number of chars
Returns:
True on successfull update, False if userID does not exists
"""
try:
cell = self.worksheets["Users"].find(userID)
self.worksheets["Users"].update_cell(cell.row,2,numChars)
return True
except gspread.exceptions.CellNotFound:
return False
def get_num_chars(self,userID):
"""
Gets the number of chars of a user
Args:
userID(string): Discord userID
Returns:
The number of chars, -1 if userID is not registered
"""
try:
cell = self.worksheets["Users"].find(userID)
val = self.worksheets["Users"].cell(cell.row,2)
return val
except gspread.exceptions.CellNotFound:
return -1
def create_event(self,date):
"""
"""
upcoming = self.worksheets["Upcoming"]
length = len(upcoming.col_values(1))
#If we only have 2 values we do not have any registered users as first and last entry is reserved
if length <= 2:
return False
col = len(upcoming.row_values(1))
#The letter for a range
letter = chr(65+col)
#gspread dosent support insert column so we have to get the range of cells and update each cell
cellrange = letter + '1:'+letter+str(length)
cells = upcoming.range(cellrange)
#Format of event is date, signup for each registered person and ending is a sumif for total chars available based on signups
cells[0].value = "placeholder"
for i in range(1,len(cells)-1):
cells[i].value = -1
#Need to create a sumif statement rangeing from 2:len-1
#example: =sumif(B2:B5,=1,Users!B2:B5)
sumif = "=sumif(" + letter + '2:' + letter + str(length-1)
sumif += ',"=1",'
sumif += 'Users!B2:B' + str(length-1) + ')'
cells[-1].value = sumif
upcoming.update_cells(cells[:-1])
#Update cells has a bug where it appends a ' to the front of a cell starting with = causing it to give a wrong statement
upcoming.update_acell(letter+str(length),sumif)
def main():
sheet = SignupSystem("19lDNiH55dpAJNG573fwxQvM3o3YmY-8M_8k8wDGkDD0")
#sheet.unregister('ragnors')
sheet.register('ragn',5)
#sheet.create_event("tommorow")
if __name__ == '__main__':
main()
|
{"/signupsystem.py": ["/gsheetsapi.py"]}
|
8,288
|
ericbhanson/cashtag_analyzer
|
refs/heads/master
|
/cashtag_analyzer/__init__.py
|
import sqlalchemy
import sys
import yaml
def connect_to_db(db_settings):
protocol = db_settings['protocol']
user = db_settings['user']
password = db_settings['password']
host = db_settings['host']
dbname = db_settings['dbname']
engine = sqlalchemy.create_engine(protocol + '://' + user + ':' + password + '@' + host + '/' + dbname + '?charset=utf8mb4',
pool_recycle=30)
db_connection = engine.connect()
return db_connection
def get_row_count(db_connection, table):
select_query = table.select()
results = db_connection.execute(select_query)
results_text = '{} row(s) are currently in MySQL database.'.format(len(results.fetchall()))
return results_text
def get_table(db_connection, table_name):
table = sqlalchemy.Table(table_name, sqlalchemy.MetaData(), autoload=True, autoload_with=db_connection)
return table
def insert_data(db_connection, data_to_insert, table):
insert_query = table.insert(data_to_insert)
try:
db_connection.execute(insert_query)
except sqlalchemy.exc.DBAPIError:
raise
else:
results_text = 'Post-INSERT row count: ' + get_row_count(db_connection, table)
print(results_text)
print('Results collected and available for analysis.')
def load_settings(file_location=sys.argv[1], file_name='settings.yaml'):
with open(file_location + file_name, 'rb') as settings_file:
yaml_settings = settings_file.read()
settings = yaml.load(yaml_settings)
return settings
|
{"/cashtag_analyzer/tweet_collector.py": ["/cashtag_analyzer/__init__.py"], "/cashtag_analyzer/market_data_collector.py": ["/cashtag_analyzer/__init__.py"]}
|
8,289
|
ericbhanson/cashtag_analyzer
|
refs/heads/master
|
/cashtag_analyzer/tweet_collector.py
|
import cashtag_analyzer
import re
import tweepy
# Connect to the Twitter API using the authorization information provided in the settings file.
def connect_to_twitter(twitter_settings):
access_token = twitter_settings['access_token']
access_token_secret = twitter_settings['access_token_secret']
consumer_key = twitter_settings['consumer_key']
consumer_secret = twitter_settings['consumer_secret']
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
return api
# Get the timeline for each user in the screen name list and examine their Tweets for cashtags using the re module.
# Assemble information about those cashtagged Tweets in a list for storage in a database.
def get_cashtag_tweets(screen_name, twitter_api):
cashtag_tweets_list = []
timeline = tweepy.Cursor(twitter_api.user_timeline, screen_name=screen_name, include_rts=False).items()
for status in timeline:
tweet_text = status.text
regex_result = re.findall('\$([A-Z]+)', tweet_text)
if regex_result:
created_at = status.created_at
cashtags = ', '.join(regex_result)
name = status.user.screen_name
tweet_id = status.id
cashtag_tweets_dict = {'cashtags': cashtags, 'created_at': created_at,
'screen_name': name, 'tweet_id': tweet_id, 'tweet_text': tweet_text}
cashtag_tweets_list.append(cashtag_tweets_dict)
print(cashtag_tweets_dict)
return cashtag_tweets_list
# Load the settings from the settings file.
settings = cashtag_analyzer.load_settings()
tweets_table = settings['mysql_connection']['tweets_table']
# Connect to the database.
db_connection = cashtag_analyzer.connect_to_db(settings['mysql_connection'])
table = cashtag_analyzer.get_table(db_connection, tweets_table)
# Connect to Twitter's API.
twitter_api = connect_to_twitter(settings['twitter_api'])
# Load the list of screen names to examined from the settings file.
screen_names = sorted(settings['screen_names'])
for screen_name in screen_names:
# Get the list of cashtagged Tweets and store them in a list.
cashtag_tweets_list = get_cashtag_tweets(screen_name, twitter_api)
if (cashtag_tweets_list):
# As a sanity check, get the number of rows in the table before executing the INSERT statement.
results_text = 'Pre-INSERT row count: ' + cashtag_analyzer.get_row_count(db_connection, table)
print(results_text)
# Insert the list of cashtagged Tweets into the database.
cashtag_analyzer.insert_data(db_connection, cashtag_tweets_list, table)
|
{"/cashtag_analyzer/tweet_collector.py": ["/cashtag_analyzer/__init__.py"], "/cashtag_analyzer/market_data_collector.py": ["/cashtag_analyzer/__init__.py"]}
|
8,290
|
ericbhanson/cashtag_analyzer
|
refs/heads/master
|
/cashtag_analyzer/market_data_collector.py
|
import cashtag_analyzer # Import the modules from the __init__ script.
import ccxt # Import ccxt to connect to exchange APIs.
import collections # Import collections to create lists within dictionaries on the fly.
import datetime # Import datetime for the timedelta and utcfromtimestamp functions.
import numpy # Import numpy to compare the contents of lists.
import re # Import re to split up the lists of symbols into individual items.
import sqlalchemy # Import sqlalchemy to do specific data selection from the MySQL database.
# Determines what symbols in the cashtag list are traded on the selected exchange.
def create_match_list(exchange, twitter_base_list, twitter_dict):
print('Checking list of cashtags against supported symbols in {}...'.format(exchange.name))
match_list = []
base_set = set()
base_dict = collections.defaultdict(list)
markets = exchange.load_markets()
for symbol in markets:
base = markets[symbol]['base']
base_set.add(base)
base_dict[base].append(symbol)
base_list = list(base_set)
match = numpy.isin(base_list, twitter_base_list, assume_unique=True)
for i in range(len(base_list)):
if (match[i] == True):
for created_at in twitter_dict[base_list[i]]:
match_list.append([created_at, base_list[i], base_dict[base_list[i]]])
print('Supported symbols check complete.')
return match_list
# Queries the exchange for market data for the time period around the Tweet each symbol in the match list.
def create_market_data_list(exchange, match_list, limit=2, timeframe='1d'):
print('Getting market data for each cashtag...')
market_data_list = []
for i in range(len(match_list)):
base = match_list[i][1]
created_at = match_list[i][0]
since = int((created_at - datetime.timedelta(days=1)).timestamp() * 1000)
symbols = match_list[i][2]
for symbol in symbols:
uohlcv_list = exchange.fetch_ohlcv(symbol, limit=limit, since=since, timeframe=timeframe)
if (uohlcv_list and len(uohlcv_list) == 2):
for uohlcv in uohlcv_list:
print(since, uohlcv)
candle_ts = datetime.datetime.utcfromtimestamp(uohlcv[0] // 1000)
close_price = float(uohlcv[4])
high_price = float(uohlcv[2])
low_price = float(uohlcv[3])
open_price = float(uohlcv[1])
volume = float(uohlcv[5])
uohlcv_dict = {'base': base, 'candle_ts': candle_ts, 'close': close_price, 'high': high_price,
'low': low_price, 'open': open_price, 'symbol': symbol, 'tweet_ts': created_at,
'volume': volume}
market_data_list.append(uohlcv_dict)
print('Market data collection complete.')
return market_data_list
# Get a list of cashtags for the current screen name and turn it into a list (for direct processing) and a dictionary
# (for lookup purposes during the direct processing).
def create_twitter_lists(screen_name, table):
print('Creating list of cashtags...')
select_query = table.select(whereclause="`screen_name` = '{}'".format(screen_name))
results = db_connection.execute(select_query)
twitter_base_set = set()
twitter_dict = collections.defaultdict(list)
for result in results.fetchall():
regex_result = re.findall('(\w+)', result[0])
for r in regex_result:
twitter_base_set.add(r)
twitter_dict[r].append(result['created_at'])
twitter_base_list = list(twitter_base_set)
print('Cashtag list created.')
return twitter_base_list, twitter_dict
# Load the settings from the settings file and turn them into variables.
settings = cashtag_analyzer.load_settings()
exchange_id = settings['exchange_options']['exchange_id']
limit = settings['exchange_options']['limit']
results_table = settings['mysql_connection']['results_table']
timeframe = settings['exchange_options']['timeframe']
tweets_table = settings['mysql_connection']['tweets_table']
# Dynamically load the exchange method from the ccxt module.
exchange_method = getattr(ccxt, exchange_id)
exchange = exchange_method()
# Connect to the database.
db_connection = cashtag_analyzer.connect_to_db(settings['mysql_connection'])
table = cashtag_analyzer.get_table(db_connection, tweets_table)
# Select a list of screen names from the database.
select_query = sqlalchemy.select([table.c['screen_name']]).distinct()
results = db_connection.execute(select_query)
# Loop through the screen name list and collect market data for each cashtag.
for result in results:
screen_name = result[0]
print('Getting results for screen name {}...'.format(screen_name))
twitter_base_list, twitter_dict = create_twitter_lists(screen_name, table)
match_list = create_match_list(exchange, twitter_base_list, twitter_dict)
market_data_list = create_market_data_list(exchange, match_list, limit=limit, timeframe=timeframe)
# As a sanity check, get the number of rows in the table before executing the INSERT statement and print the results.
results_text = 'Pre-INSERT row count: ' + cashtag_analyzer.get_row_count(db_connection, table)
print(results_text)
# Insert the market data into the database.
cashtag_analyzer.insert_data(db_connection, market_data_list, table)
|
{"/cashtag_analyzer/tweet_collector.py": ["/cashtag_analyzer/__init__.py"], "/cashtag_analyzer/market_data_collector.py": ["/cashtag_analyzer/__init__.py"]}
|
8,292
|
cocpy/Tello-Python
|
refs/heads/master
|
/tello/__init__.py
|
__title__ = 'tello-python'
__author__ = 'C灵C'
__liscence__ = 'MIT'
__copyright__ = 'Copyright 2021 C灵C'
__version__ = '1.1.6'
__all__ = ['tello', 'stats']
from .tello import Tello
from .stats import Stats
|
{"/tello/demo.py": ["/tello/__init__.py"]}
|
8,293
|
cocpy/Tello-Python
|
refs/heads/master
|
/tello/demo.py
|
from tello import tello
drone = tello.Tello()
# 起飞
drone.takeoff()
# 前进100cm
drone.forward(100)
# 旋转90°
drone.cw(90)
# 左翻滚
drone.flip('l')
# 打开视频流
drone.streamon()
# 降落
drone.land()
|
{"/tello/demo.py": ["/tello/__init__.py"]}
|
8,294
|
cocpy/Tello-Python
|
refs/heads/master
|
/setup.py
|
import setuptools
with open('README.md', 'r', encoding='utf-8') as fh:
long_description = fh.read()
setuptools.setup(
name='tello-python',
version='1.1.6',
author='C灵C',
author_email='c0c@cocpy.com',
description='Control DJI Tello drone with Python3',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/cocpy/Tello-Python',
packages=setuptools.find_packages(),
install_requires=[
'opencv-python', 'flask', 'paddlepaddle', 'paddlehub'
],
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
)
|
{"/tello/demo.py": ["/tello/__init__.py"]}
|
8,301
|
mkihr-ojisan/yakudobot
|
refs/heads/master
|
/scheduler.py
|
import tweepy,os,datetime,time
from apscheduler.schedulers.blocking import BlockingScheduler
from main import db
from database.models import YakudoScore
twische = BlockingScheduler()
auth = tweepy.OAuthHandler(os.environ.get('CONSUMER_KEY'),os.environ.get('CONSUMER_SECRET'))
auth.set_access_token(os.environ.get('ACCESS_TOKEN_KEY'), os.environ.get('ACCESS_TOKEN_SECRET'))
api = tweepy.API(auth)
userID = "mis1yakudo334"
def getalltweets():
all_tweets = []
Current_Date = datetime.datetime.today()
starttime = Current_Date.strftime('%Y-%m-%d_00:00:00_JST')
endtime = Current_Date.strftime('%Y-%m-%d_23:59:59_JST')
tweets = api.user_timeline(screen_name=userID,since = starttime, until = endtime, count=200,include_rts=False, tweet_mode='extended')
all_tweets.extend([t for t in tweets if "Score:" in t.full_text])
oldest_id = tweets[-1].id
while True:
tweets = api.user_timeline(screen_name=userID, since = starttime, until = endtime, count=200,include_rts=False,max_id=oldest_id - 1,tweet_mode='extended')
if len(tweets) == 0:
break
oldest_id = tweets[-1].id
all_tweets.extend([t for t in tweets if "Score:" in t.full_text])
return all_tweets
@twische.scheduled_job('interval',minutes=1)
def timed_job():
now = datetime.datetime.now()
if now.minute == 0:
yakudos = YakudoScore.query.filter(YakudoScore.date==datetime.datetime.now().strftime('%Y-%m-%d')).all()
if len(yakudos) == 0:
api.update_status("おいお前ら!早くyakudoしろ!(" + datetime.datetime.now().strftime('%Y-%m-%d %H:%M') + ")")
else:
api.update_status("本日のyakudo:" + str(len(yakudos)) + "件(" + datetime.datetime.now().strftime('%Y-%m-%d %H:%M') + ")")
print("ScheduledTask Complete")
elif now.minute == 59 and now.hour == 23:
yakudos = YakudoScore.query.filter(YakudoScore.date == datetime.datetime.now().strftime('%Y-%m-%d')).all()
maxscore = 0
maxuser = ""
maxtweetid = ""
if len(yakudos) == 0:
api.update_status("本日のyakudoは...何一つ...出ませんでした...")
else:
for yakudo in yakudos:
if yakudo.score > maxscore:
maxscore = yakudo.score
maxtweetid = yakudo.tweetid
maxuser = yakudo.username
if maxscore > 0:
msg = "Highest Score:{:.3f}\n優勝おめでとう!\n".format(maxscore)
url = "https://twitter.com/" + maxuser + "/status/" + maxtweetid
api.update_status(msg + url)
else:
api.update_status("おい待てや...今日のyakudo...-inf点しか無いやん...")
elif now.minute == 50:
print("Checking Database")
yakudos = YakudoScore.query.filter(YakudoScore.date == datetime.datetime.now().strftime('%Y-%m-%d')).all()
count = 0
for yakudo in yakudos:
try:
tweet = api.get_status(yakudo.tweetid)
except:
print("Tweet Not Found")
time.sleep(1)
api.destroy_status(yakudo.retweetid)
YakudoScore.query.filter(YakudoScore.tweetid == yakudo.tweetid).delete()
db.session.commit()
count+=1
time.sleep(1)
if count >= 200:
break
if __name__ == "__main__":
twische.start()
|
{"/scheduler.py": ["/main.py", "/database/models.py"], "/database/models.py": ["/main.py"], "/monitor.py": ["/main.py", "/database/models.py"]}
|
8,302
|
mkihr-ojisan/yakudobot
|
refs/heads/master
|
/main.py
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config.from_object('database.config') # 追加
db = SQLAlchemy(app) # 追加
#herokuサーバーをスリープさせない為の対策
@app.route("/")
def index():
return "This is mis1yakudo_bot!"
if __name__ == "__main__":
app.run()
|
{"/scheduler.py": ["/main.py", "/database/models.py"], "/database/models.py": ["/main.py"], "/monitor.py": ["/main.py", "/database/models.py"]}
|
8,303
|
mkihr-ojisan/yakudobot
|
refs/heads/master
|
/database/models.py
|
from main import db
from flask_sqlalchemy import SQLAlchemy
import datetime
class YakudoScore(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.Text)
tweetid = db.Column(db.Text)
retweetid = db.Column(db.Text)
score = db.Column(db.Float)
date = db.Column(db.Text, nullable=False)
def __repr__(self):
return "YakudoScore!"
def init():
db.create_all()
|
{"/scheduler.py": ["/main.py", "/database/models.py"], "/database/models.py": ["/main.py"], "/monitor.py": ["/main.py", "/database/models.py"]}
|
8,304
|
mkihr-ojisan/yakudobot
|
refs/heads/master
|
/database/config.py
|
import os
#SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or "sqlite:///test.db"
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL').replace("://", "ql://", 1) or "sqlite:///test.db"
SQLALCHEMY_TRACK_MODIFICATIONS = True
SECRET_KEY="secret key"
|
{"/scheduler.py": ["/main.py", "/database/models.py"], "/database/models.py": ["/main.py"], "/monitor.py": ["/main.py", "/database/models.py"]}
|
8,305
|
mkihr-ojisan/yakudobot
|
refs/heads/master
|
/monitor.py
|
import datetime
import tempfile
import requests
import os
import tweepy
from threading import Thread
import cv2
from main import db
from database.models import YakudoScore
import traceback
auth = tweepy.OAuthHandler(os.environ.get('CONSUMER_KEY'),os.environ.get('CONSUMER_SECRET'))
auth.set_access_token(os.environ.get('ACCESS_TOKEN_KEY'), os.environ.get('ACCESS_TOKEN_SECRET'))
api = tweepy.API(auth)
keyword= ['#mis1yakudo']
botname = "nishinomiya443"
yakudo = None
msg = ""
url = ""
userid = None
class MyStreamListener(tweepy.StreamListener):
def on_status(self, status):
t = Thread(runtask(status))
t.start()
def checkyakudo(url):
# load img from url
res = requests.get(url)
img = None
with tempfile.NamedTemporaryFile(dir='./') as fp:
fp.write(res.content)
fp.file.seek(0)
img = cv2.imread(fp.name)
result = (1/cv2.Laplacian(img, cv2.CV_64F).var())*10000 # yakudoスコアの計算
return result
isquotetweet = False
if not keyword[0] in tweet and "twitter.com/" in tweet and "/status" in tweet:
isquotetweet = True
def runtask(status):
print(status.text)
if status.user.screen_name != botname and not status.text.startswith("RT @") and keyword[0] in status.text:
url = "https://twitter.com/" + status.user.screen_name + "/status/" + status.id_str
msg = datetime.datetime.now().strftime('%Y-%m-%d %H:%M')+"\n"
msg += "User:@"+status.user.screen_name + "\n"
# yakudo_check_block
yakudo = YakudoScore(username=status.user.screen_name,tweetid=status.id_str,date=datetime.datetime.now().strftime('%Y-%m-%d'))
if hasattr(status, 'extended_entities'):
finalscore = 0
count = 0
isphoto = True
for image in status.extended_entities["media"]:
if image["type"] == "video":
msg += "やめろ!クソ動画を投稿するんじゃない!\n"
msg += "Score:-inf\n"
yakudo.score = 0
isphoto = False
break
score = checkyakudo(image['media_url_https'])
finalscore += score
count += 1
childtext = "{:.0f}枚目:{:.3f}\n"
msg += childtext.format(count, score)
yakudo.score = score
if isphoto:
finalscore /= count
msg += "GoodYakudo!\n" if finalscore >= 150 else "もっとyakudoしろ!\n"
finaltext = "Score:{:.3f}\n"
msg += finaltext.format(finalscore)
else:
msg += "画像が入ってないやん!\n"
msg += "Score:-inf\n"
yakudo.score = 0
userid = status.user.id
new_tweet = api.update_status(msg + url)
api.create_friendship(status.user.id)
yakudo.retweetid = new_tweet.id_str
db.session.add(yakudo)
db.session.commit()
yakudo = None
msg = ""
url = ""
userid = None
def start_monitoring():
print("start monitoring")
while True:
try:
if yakudo is not None and msg != "" and url != "":
new_tweet = api.update_status(msg + url)
api.create_friendship(userid)
yakudo.retweetid = new_tweet.id_str
db.session.add(yakudo)
db.session.commit()
print("start streaming")
myStream = tweepy.Stream(auth=api.auth, listener=MyStreamListener())
myStream.filter(track=keyword)
except:
traceback.print_exc()
continue
if __name__ == "__main__":
start_monitoring()
|
{"/scheduler.py": ["/main.py", "/database/models.py"], "/database/models.py": ["/main.py"], "/monitor.py": ["/main.py", "/database/models.py"]}
|
8,310
|
gilmoore/VAE_Tacotron2
|
refs/heads/master
|
/tacotron/utils/plot.py
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
def split_title_line(title_text, max_words=5):
"""
A function that splits any string based on specific character
(returning it with the string), with maximum number of words on it
"""
seq = title_text.split()
return '\n'.join([' '.join(seq[i:i + max_words]) for i in range(0, len(seq), max_words)])
def plot_alignment(alignment, path, info=None, split_title=False):
fig, ax = plt.subplots()
im = ax.imshow(
alignment,
aspect='auto',
origin='lower',
interpolation='none')
fig.colorbar(im, ax=ax)
xlabel = 'Decoder timestep'
if info is not None:
if split_title:
title = split_title_line(info)
else:
title = info
plt.xlabel(xlabel)
plt.title(title)
plt.ylabel('Encoder timestep')
plt.tight_layout()
plt.savefig(path, format='png')
def plot_spectrogram(spectrogram, path, info=None, split_title=False):
plt.figure()
plt.imshow(np.rot90(spectrogram))
plt.colorbar(shrink=0.65, orientation='horizontal')
plt.ylabel('mels')
xlabel = 'frames'
if info is not None:
if split_title:
title = split_title_line(info)
else:
title = info
plt.xlabel(xlabel)
plt.title(title)
plt.tight_layout()
plt.savefig(path, format='png')
|
{"/train.py": ["/tacotron/train.py"], "/tacotron/synthesizer.py": ["/hparams.py"], "/tacotron/models/tacotron.py": ["/tacotron/models/modules.py", "/tacotron/models/zoneout_LSTM.py", "/tacotron/utils/util.py"], "/tacotron/utils/audio.py": ["/hparams.py"], "/tacotron/synthesize.py": ["/hparams.py", "/tacotron/synthesizer.py", "/tacotron/utils/audio.py"], "/tacotron/utils/util.py": ["/hparams.py"], "/synthesize.py": ["/tacotron/synthesize.py"], "/tacotron/train.py": ["/hparams.py"], "/tacotron/models/modules.py": ["/tacotron/models/zoneout_LSTM.py", "/hparams.py", "/tacotron/utils/util.py"]}
|
8,311
|
gilmoore/VAE_Tacotron2
|
refs/heads/master
|
/train.py
|
import argparse
from tacotron.train import tacotron_train
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--base_dir', default='.')
parser.add_argument('--hparams', default='',
help='Hyperparameter overrides as a comma-separated list of name=value pairs')
parser.add_argument('--input', default='training_data/train.txt')
parser.add_argument('--name', help='Name of logging directory.')
parser.add_argument('--model', default='Tacotron')
parser.add_argument('--restore', type=bool, default=True, help='Set this to False to do a fresh training')
parser.add_argument('--summary_interval', type=int, default=100,
help='Steps between running summary ops')
parser.add_argument('--checkpoint_interval', type=int, default=500,
help='Steps between writing checkpoints')
parser.add_argument('--tf_log_level', type=int, default=1, help='Tensorflow C++ log level.')
args = parser.parse_args()
accepted_models = ['Tacotron', 'Wavenet']
if args.model not in accepted_models:
raise ValueError('please enter a valid model to train: {}'.format(accepted_models))
if args.model == 'Tacotron':
tacotron_train(args)
elif args.model == 'Wavenet':
raise NotImplementedError('Wavenet is still a work in progress, thank you for your patience!')
if __name__ == '__main__':
main()
|
{"/train.py": ["/tacotron/train.py"], "/tacotron/synthesizer.py": ["/hparams.py"], "/tacotron/models/tacotron.py": ["/tacotron/models/modules.py", "/tacotron/models/zoneout_LSTM.py", "/tacotron/utils/util.py"], "/tacotron/utils/audio.py": ["/hparams.py"], "/tacotron/synthesize.py": ["/hparams.py", "/tacotron/synthesizer.py", "/tacotron/utils/audio.py"], "/tacotron/utils/util.py": ["/hparams.py"], "/synthesize.py": ["/tacotron/synthesize.py"], "/tacotron/train.py": ["/hparams.py"], "/tacotron/models/modules.py": ["/tacotron/models/zoneout_LSTM.py", "/hparams.py", "/tacotron/utils/util.py"]}
|
8,312
|
gilmoore/VAE_Tacotron2
|
refs/heads/master
|
/hparams.py
|
import tensorflow as tf
import numpy as np
# Default hyperparameters
hparams = tf.contrib.training.HParams(
# Comma-separated list of cleaners to run on text prior to training and eval. For non-English
# text, you may want to use "basic_cleaners" or "transliteration_cleaners".
cleaners='english_cleaners',
#Audio
num_mels = 80,
num_freq = 513, #only used when adding linear spectrograms post processing network
rescale = True,
rescaling_max = 0.999,
trim_silence = True,
#Mel spectrogram
fft_size = 1024,
hop_size = 256,
sample_rate = 22050, #22050 Hz (corresponding to ljspeech dataset)
frame_shift_ms = None,
#Mel and Linear spectrograms normalization/scaling and clipping
mel_normalization = False,
signal_normalization = True,
allow_clipping_in_normalization = True, #Only relevant if mel_normalization = True
symmetric_mels = True, #Whether to scale the data to be symmetric around 0
max_abs_value = 4., #max absolute value of data. If symmetric, data will be [-max, max] else [0, max]
#Limits
min_level_db =- 100,
ref_level_db = 20,
fmin = 125,
fmax = 7600,
#Griffin Lim
power = 1.55,
griffin_lim_iters = 60,
# VAE:
use_vae=True,
vae_dim=32,
vae_warming_up=15000,
init_vae_weights=0.001,
vae_weight_multiler=0.002,
filters=[32, 32, 64, 64, 128, 128],
#Tacotron
outputs_per_step = 1, #number of frames to generate at each decoding step (speeds up computation and allows for higher batch size)
stop_at_any = True, #Determines whether the decoder should stop when predicting <stop> to any frame or to all of them
embedding_dim = 512, #dimension of embedding space
enc_conv_num_layers = 3, #number of encoder convolutional layers
enc_conv_kernel_size = (5, ), #size of encoder convolution filters for each layer
enc_conv_channels = 512, #number of encoder convolutions filters for each layer
encoder_lstm_units = 256, #number of lstm units for each direction (forward and backward)
encoder_depth=512,
smoothing = False, #Whether to smooth the attention normalization function
attention_dim = 128, #dimension of attention space
attention_filters = 32, #number of attention convolution filters
attention_kernel = (31, ), #kernel size of attention convolution
cumulative_weights = True, #Whether to cumulate (sum) all previous attention weights or simply feed previous weights (Recommended: True)
prenet_layers = [256, 256], #number of layers and number of units of prenet
decoder_layers = 2, #number of decoder lstm layers
decoder_lstm_units = 1024, #number of decoder lstm units on each layer
max_iters = 2500, #Max decoder steps during inference (Just for safety from infinite loop cases)
postnet_num_layers = 5, #number of postnet convolutional layers
postnet_kernel_size = (5, ), #size of postnet convolution filters for each layer
postnet_channels = 512, #number of postnet convolution filters for each layer
mask_encoder = False, #whether to mask encoder padding while computing attention
impute_finished = False, #Whether to use loss mask for padded sequences
mask_finished = False, #Whether to mask alignments beyond the <stop_token> (False for debug, True for style)
predict_linear = False, #Whether to add a post-processing network to the Tacotron to predict linear spectrograms (True mode Not tested!!)
#Wavenet
# Input type:
# 1. raw [-1, 1]
# 2. mulaw [-1, 1]
# 3. mulaw-quantize [0, mu]
# If input_type is raw or mulaw, network assumes scalar input and
# discretized mixture of logistic distributions output, otherwise one-hot
# input and softmax output are assumed.
# **NOTE**: if you change the one of the two parameters below, you need to
# re-run preprocessing before training.
# **NOTE**: scaler input (raw or mulaw) is experimental. Use it your own risk.
input_type="mulaw-quantize",
quantize_channels=256, # 65536 or 256
silence_threshold=2,
# Mixture of logistic distributions:
log_scale_min=float(np.log(1e-14)),
#TODO model params
#Tacotron Training
tacotron_batch_size = 32, #number of training samples on each training steps
tacotron_reg_weight = 1e-6, #regularization weight (for l2 regularization)
tacotron_scale_regularization = True, #Whether to rescale regularization weight to adapt for outputs range (used when reg_weight is high and biasing the model)
tacotron_decay_learning_rate = True, #boolean, determines if the learning rate will follow an exponential decay
tacotron_start_decay = 50000, #Step at which learning decay starts
tacotron_decay_steps = 50000, #starting point for learning rate decay (and determines the decay slope) (UNDER TEST)
tacotron_decay_rate = 0.4, #learning rate decay rate (UNDER TEST)
tacotron_initial_learning_rate = 1e-3, #starting learning rate
tacotron_final_learning_rate = 1e-5, #minimal learning rate
tacotron_adam_beta1 = 0.9, #AdamOptimizer beta1 parameter
tacotron_adam_beta2 = 0.999, #AdamOptimizer beta2 parameter
tacotron_adam_epsilon = 1e-6, #AdamOptimizer beta3 parameter
tacotron_zoneout_rate = 0.1, #zoneout rate for all LSTM cells in the network
tacotron_dropout_rate = 0.5, #dropout rate for all convolutional layers + prenet
tacotron_teacher_forcing_ratio = 1., #Value from [0., 1.], 0.=0%, 1.=100%, determines the % of times we force next decoder inputs
#Wavenet Training TODO
#Eval sentences
sentences = [
# From July 8, 2017 New York Times:
'Scientists at the CERN laboratory say they have discovered a new particle.',
'There\'s a way to measure the acute emotional intelligence that has never gone out of style.',
'President Trump met with other leaders at the Group of 20 conference.',
'The Senate\'s bill to repeal and replace the Affordable Care Act is now imperiled.',
# From Google's Tacotron example page:
'Generative adversarial network or variational auto-encoder.',
'Basilar membrane and otolaryngology are not auto-correlations.',
'He has read the whole thing.',
'He reads books.',
"Don't desert me here in the desert!",
'He thought it was time to present the present.',
'Thisss isrealy awhsome.',
'Punctuation sensitivity, is working.',
'Punctuation sensitivity is working.',
"The buses aren't the problem, they actually provide a solution.",
"The buses aren't the PROBLEM, they actually provide a SOLUTION.",
"The quick brown fox jumps over the lazy dog.",
"Does the quick brown fox jump over the lazy dog?",
"Peter Piper picked a peck of pickled peppers. How many pickled peppers did Peter Piper pick?",
"She sells sea-shells on the sea-shore. The shells she sells are sea-shells I'm sure.",
"The blue lagoon is a nineteen eighty American romance adventure film.",
"Tajima Airport serves Toyooka.",
'Talib Kweli confirmed to AllHipHop that he will be releasing an album in the next year.',
#From Training data:
'the rest being provided with barrack beds, and in dimensions varying from thirty feet by fifteen to fifteen feet by ten.',
'in giltspur street compter, where he was first lodged.',
'a man named burnett came with his wife and took up his residence at whitchurch, hampshire, at no great distance from laverstock,',
'it appears that oswald had only one caller in response to all of his fpcc activities,',
'he relied on the absence of the strychnia.',
'scoggins thought it was lighter.',
'''would, it is probable, have eventually overcome the reluctance of some of the prisoners at least,
and would have possessed so much moral dignity''',
'''the only purpose of this whole sentence is to evaluate the scalability of the model for very long sentences.
This is not even a long sentence anymore, it has become an entire paragraph.
Should I stop now? Let\'s add this last sentence in which we talk about nothing special.''',
'Thank you so much for your support!!'
]
)
def hparams_debug_string():
values = hparams.values()
hp = [' %s: %s' % (name, values[name]) for name in sorted(values) if name != 'sentences']
return 'Hyperparameters:\n' + '\n'.join(hp)
|
{"/train.py": ["/tacotron/train.py"], "/tacotron/synthesizer.py": ["/hparams.py"], "/tacotron/models/tacotron.py": ["/tacotron/models/modules.py", "/tacotron/models/zoneout_LSTM.py", "/tacotron/utils/util.py"], "/tacotron/utils/audio.py": ["/hparams.py"], "/tacotron/synthesize.py": ["/hparams.py", "/tacotron/synthesizer.py", "/tacotron/utils/audio.py"], "/tacotron/utils/util.py": ["/hparams.py"], "/synthesize.py": ["/tacotron/synthesize.py"], "/tacotron/train.py": ["/hparams.py"], "/tacotron/models/modules.py": ["/tacotron/models/zoneout_LSTM.py", "/hparams.py", "/tacotron/utils/util.py"]}
|
8,313
|
gilmoore/VAE_Tacotron2
|
refs/heads/master
|
/tacotron/synthesizer.py
|
import os
import numpy as np
import tensorflow as tf
from hparams import hparams
from librosa import effects
from tacotron.models import create_model
from tacotron.utils.text import text_to_sequence
from tacotron.utils import plot
from datasets import audio
from datetime import datetime
class Synthesizer:
def load(self, checkpoint_path, gta=False, model_name='Tacotron'):
print('Constructing model: %s' % model_name)
inputs = tf.placeholder(tf.int32, [1, None], 'inputs')
input_lengths = tf.placeholder(tf.int32, [1], 'input_lengths')
with tf.variable_scope('model') as scope:
self.model = create_model(model_name, hparams)
if hparams.use_vae:
ref_targets = tf.placeholder(tf.float32, [1, None, hparams.num_mels], 'ref_targets')
if gta:
targets = tf.placeholder(tf.float32, [1, None, hparams.num_mels], 'mel_targets')
if hparams.use_vae:
self.model.initialize(inputs, input_lengths, targets, gta=gta, reference_mel=ref_targets)
else:
self.model.initialize(inputs, input_lengths, targets, gta=gta)
else:
if hparams.use_vae:
self.model.initialize(inputs, input_lengths, reference_mel=ref_targets)
else:
self.model.initialize(inputs, input_lengths)
self.mel_outputs = self.model.mel_outputs
self.alignment = self.model.alignments[0]
self.gta = gta
print('Loading checkpoint: %s' % checkpoint_path)
self.session = tf.Session()
self.session.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(self.session, checkpoint_path)
def synthesize(self, text, index, out_dir, log_dir, mel_filename, reference_mel):
cleaner_names = [x.strip() for x in hparams.cleaners.split(',')]
seq = text_to_sequence(text, cleaner_names)
feed_dict = {
self.model.inputs: [np.asarray(seq, dtype=np.int32)],
self.model.input_lengths: np.asarray([len(seq)], dtype=np.int32)
}
if self.gta:
feed_dict[self.model.mel_targets] = np.load(mel_filename).reshape(1, -1, 80)
feed_dict[self.model.reference_mel] = np.load(mel_filename).reshape(1, -1, 80)
elif hparams.use_vae:
reference_mel = [np.asarray(reference_mel, dtype=np.float32)]
feed_dict[self.model.reference_mel] = reference_mel
if self.gta or not hparams.predict_linear:
mels, alignment = self.session.run([self.mel_outputs, self.alignment], feed_dict=feed_dict)
else:
linear, mels, alignment = self.session.run([self.linear_outputs, self.mel_outputs, self.alignment], feed_dict=feed_dict)
linear = linear.reshape(-1, hparams.num_freq)
mels = mels.reshape(-1, hparams.num_mels) #Thanks to @imdatsolak for pointing this out
# Write the spectrogram to disk
# Note: outputs mel-spectrogram files and target ones have same names, just different folders
mel_filename = os.path.join(out_dir, 'speech-mel-{:05d}.npy'.format(index))
np.save(mel_filename, mels, allow_pickle=False)
if log_dir is not None:
#save wav (mel -> wav)
wav = audio.inv_mel_spectrogram(mels.T)
audio.save_wav(wav, os.path.join(log_dir, 'wavs/speech-wav-{:05d}-mel.wav'.format(index)))
if hparams.predict_linear:
#save wav (linear -> wav)
wav = audio.inv_linear_spectrogram(linear.T)
audio.save_wav(wav, os.path.join(log_dir, 'wavs/speech-wav-{:05d}-linear.wav'.format(index)))
#save alignments
plot.plot_alignment(alignment, os.path.join(log_dir, 'plots/speech-alignment-{:05d}.png'.format(index)),
info='{}'.format(text), split_title=True)
#save mel spectrogram plot
plot.plot_spectrogram(mels, os.path.join(log_dir, 'plots/speech-mel-{:05d}.png'.format(index)),
info='{}'.format(text), split_title=True)
return mel_filename
|
{"/train.py": ["/tacotron/train.py"], "/tacotron/synthesizer.py": ["/hparams.py"], "/tacotron/models/tacotron.py": ["/tacotron/models/modules.py", "/tacotron/models/zoneout_LSTM.py", "/tacotron/utils/util.py"], "/tacotron/utils/audio.py": ["/hparams.py"], "/tacotron/synthesize.py": ["/hparams.py", "/tacotron/synthesizer.py", "/tacotron/utils/audio.py"], "/tacotron/utils/util.py": ["/hparams.py"], "/synthesize.py": ["/tacotron/synthesize.py"], "/tacotron/train.py": ["/hparams.py"], "/tacotron/models/modules.py": ["/tacotron/models/zoneout_LSTM.py", "/hparams.py", "/tacotron/utils/util.py"]}
|
8,314
|
gilmoore/VAE_Tacotron2
|
refs/heads/master
|
/tacotron/models/tacotron.py
|
import tensorflow as tf
from tacotron.utils.symbols import symbols
from tacotron.utils.infolog import log
from tacotron.models.helpers import TacoTrainingHelper, TacoTestHelper
from tacotron.models.modules import *
from tacotron.models.zoneout_LSTM import ZoneoutLSTMCell
from tensorflow.contrib.seq2seq import dynamic_decode
from tacotron.models.Architecture_wrappers import TacotronEncoderCell, TacotronDecoderCell
from tacotron.models.custom_decoder import CustomDecoder
from tacotron.models.attention import LocationSensitiveAttention
from tacotron.utils.util import shape_list, vae_weight
class Tacotron():
"""vae_tacotron2 Feature prediction Model.
"""
def __init__(self, hparams):
self._hparams = hparams
def initialize(self, inputs, input_lengths, mel_targets=None, mel_lengths=None, stop_token_targets=None, linear_targets=None, gta=False, reference_mel=None):
"""
Initializes the model for inference
sets "mel_outputs" and "alignments" fields.
Args:
- inputs: int32 Tensor with shape [N, T_in] where N is batch size, T_in is number of
steps in the input time series, and values are character IDs
- input_lengths: int32 Tensor with shape [N] where N is batch size and values are the lengths
of each sequence in inputs.
- mel_targets: float32 Tensor with shape [N, T_out, M] where N is batch size, T_out is number
of steps in the output time series, M is num_mels, and values are entries in the mel
spectrogram. Only needed for training.
"""
if mel_targets is None and stop_token_targets is not None:
raise ValueError('no mel targets were provided but token_targets were given')
if mel_targets is not None and stop_token_targets is None and not gta:
raise ValueError('Mel targets are provided without corresponding token_targets')
if gta==False and self._hparams.predict_linear==True and linear_targets is None:
raise ValueError('Model is set to use post processing to predict linear spectrograms in training but no linear targets given!')
if gta and linear_targets is not None:
raise ValueError('Linear spectrogram prediction is not supported in GTA mode!')
with tf.variable_scope('inference') as scope:
is_training = mel_targets is not None and not gta
batch_size = tf.shape(inputs)[0]
hp = self._hparams
#GTA is only used for predicting mels to train Wavenet vocoder, so we ommit post processing when doing GTA synthesis
post_condition = hp.predict_linear and not gta
# Embeddings ==> [batch_size, sequence_length, embedding_dim]
embedding_table = tf.get_variable(
'inputs_embedding', [len(symbols), hp.embedding_dim], dtype=tf.float32)
embedded_inputs = tf.nn.embedding_lookup(embedding_table, inputs)
#Encoder Cell ==> [batch_size, encoder_steps, encoder_lstm_units]
encoder_cell = TacotronEncoderCell(
EncoderConvolutions(is_training, kernel_size=hp.enc_conv_kernel_size,
channels=hp.enc_conv_channels, scope='encoder_convolutions'),
EncoderRNN(is_training, size=hp.encoder_lstm_units,
zoneout=hp.tacotron_zoneout_rate, scope='encoder_LSTM'))
encoder_outputs = encoder_cell(embedded_inputs, input_lengths)
if hp.use_vae:
if is_training:
reference_mel = mel_targets
style_embeddings, mu, log_var = VAE(
inputs=reference_mel,
input_lengths=mel_lengths,
filters=hp.filters,
kernel_size=(3, 3),
strides=(2, 2),
num_units=hp.vae_dim,
is_training=is_training,
scope='vae')
self.mu = mu
self.log_var = log_var
style_embeddings = tf.layers.dense(style_embeddings, hp.encoder_depth)
style_embeddings = tf.expand_dims(style_embeddings, axis=1)
style_embeddings = tf.tile(style_embeddings, [1, shape_list(encoder_outputs)[1], 1]) # [N, T_in, 256]
encoder_outputs = encoder_outputs + style_embeddings
#For shape visualization purpose
enc_conv_output_shape = encoder_cell.conv_output_shape
#Decoder Parts
#Attention Decoder Prenet
prenet = Prenet(is_training, layer_sizes=hp.prenet_layers, scope='decoder_prenet')
#Attention Mechanism
attention_mechanism = LocationSensitiveAttention(hp.attention_dim, encoder_outputs,
mask_encoder=hp.mask_encoder, memory_sequence_length=input_lengths, smoothing=hp.smoothing,
cumulate_weights=hp.cumulative_weights)
#Decoder LSTM Cells
decoder_lstm = DecoderRNN(is_training, layers=hp.decoder_layers,
size=hp.decoder_lstm_units, zoneout=hp.tacotron_zoneout_rate, scope='decoder_lstm')
#Frames Projection layer
frame_projection = FrameProjection(hp.num_mels * hp.outputs_per_step, scope='linear_transform')
#<stop_token> projection layer
stop_projection = StopProjection(is_training, scope='stop_token_projection')
#Decoder Cell ==> [batch_size, decoder_steps, num_mels * r] (after decoding)
decoder_cell = TacotronDecoderCell(
prenet,
attention_mechanism,
decoder_lstm,
frame_projection,
stop_projection,
mask_finished=hp.mask_finished)
#Define the helper for our decoder
if (is_training or gta) == True:
self.helper = TacoTrainingHelper(batch_size, mel_targets, stop_token_targets,
hp.num_mels, hp.outputs_per_step, hp.tacotron_teacher_forcing_ratio, gta)
else:
self.helper = TacoTestHelper(batch_size, hp.num_mels, hp.outputs_per_step)
#initial decoder state
decoder_init_state = decoder_cell.zero_state(batch_size=batch_size, dtype=tf.float32)
#Only use max iterations at synthesis time
max_iters = hp.max_iters if not is_training else None
#Decode
(frames_prediction, stop_token_prediction, _), final_decoder_state, _ = dynamic_decode(
CustomDecoder(decoder_cell, self.helper, decoder_init_state),
impute_finished=hp.impute_finished,
maximum_iterations=max_iters)
# Reshape outputs to be one output per entry
#==> [batch_size, non_reduced_decoder_steps (decoder_steps * r), num_mels]
decoder_output = tf.reshape(frames_prediction, [batch_size, -1, hp.num_mels])
stop_token_prediction = tf.reshape(stop_token_prediction, [batch_size, -1])
#Postnet
postnet = Postnet(is_training, kernel_size=hp.postnet_kernel_size,
channels=hp.postnet_channels, scope='postnet_convolutions')
#Compute residual using post-net ==> [batch_size, decoder_steps * r, postnet_channels]
residual = postnet(decoder_output)
#Project residual to same dimension as mel spectrogram
#==> [batch_size, decoder_steps * r, num_mels]
residual_projection = FrameProjection(hp.num_mels, scope='postnet_projection')
projected_residual = residual_projection(residual)
#Compute the mel spectrogram
mel_outputs = decoder_output + projected_residual
if post_condition:
#Based on https://github.com/keithito/tacotron/blob/tacotron2-work-in-progress/models/tacotron.py
#Post-processing Network to map mels to linear spectrograms using same architecture as the encoder
post_processing_cell = TacotronEncoderCell(
EncoderConvolutions(is_training, kernel_size=hp.enc_conv_kernel_size,
channels=hp.enc_conv_channels, scope='post_processing_convolutions'),
EncoderRNN(is_training, size=hp.encoder_lstm_units,
zoneout=hp.tacotron_zoneout_rate, scope='post_processing_LSTM'))
expand_outputs = post_processing_cell(mel_outputs)
linear_outputs = FrameProjection(hp.num_freq, scope='post_processing_projection')(expand_outputs)
#Grab alignments from the final decoder state
alignments = tf.transpose(final_decoder_state.alignment_history.stack(), [1, 2, 0])
self.inputs = inputs
self.input_lengths = input_lengths
self.decoder_output = decoder_output
self.alignments = alignments
self.stop_token_prediction = stop_token_prediction
self.stop_token_targets = stop_token_targets
self.mel_outputs = mel_outputs
self.reference_mel = reference_mel
if post_condition:
self.linear_outputs = linear_outputs
self.linear_targets = linear_targets
self.mel_targets = mel_targets
self.mel_lengths = mel_lengths
log('Initialized Tacotron model. Dimensions (? = dynamic shape): ')
log(' embedding: {}'.format(embedded_inputs.shape))
log(' enc conv out: {}'.format(enc_conv_output_shape))
log(' encoder out: {}'.format(encoder_outputs.shape))
log(' decoder out: {}'.format(decoder_output.shape))
log(' residual out: {}'.format(residual.shape))
log(' projected residual out: {}'.format(projected_residual.shape))
log(' mel out: {}'.format(mel_outputs.shape))
if post_condition:
log(' linear out: {}'.format(linear_outputs.shape))
log(' <stop_token> out: {}'.format(stop_token_prediction.shape))
def add_loss(self, global_step):
'''Adds loss to the model. Sets "loss" field. initialize must have been called.'''
with tf.variable_scope('loss') as scope:
hp = self._hparams
# Compute loss of predictions before postnet
before = tf.losses.mean_squared_error(self.mel_targets, self.decoder_output)
# Compute loss after postnet
after = tf.losses.mean_squared_error(self.mel_targets, self.mel_outputs)
#Compute <stop_token> loss (for learning dynamic generation stop)
stop_token_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=self.stop_token_targets,
logits=self.stop_token_prediction))
if hp.predict_linear:
#Compute linear loss
#From https://github.com/keithito/tacotron/blob/tacotron2-work-in-progress/models/tacotron.py
#Prioritize loss for frequencies under 2000 Hz.
l1 = tf.abs(self.linear_targets - self.linear_outputs)
n_priority_freq = int(2000 / (hp.sample_rate * 0.5) * hp.num_mels)
linear_loss = 0.5 * tf.reduce_mean(l1) + 0.5 * tf.reduce_mean(l1[:,:,0:n_priority_freq])
else:
linear_loss = 0.
# Compute the regularization weight
if hp.tacotron_scale_regularization:
reg_weight_scaler = 1. / (2 * hp.max_abs_value) if hp.symmetric_mels else 1. / (hp.max_abs_value)
reg_weight = hp.tacotron_reg_weight * reg_weight_scaler
else:
reg_weight = hp.tacotron_reg_weight
# Get all trainable variables
all_vars = tf.trainable_variables()
regularization = tf.add_n([tf.nn.l2_loss(v) for v in all_vars
if not('bias' in v.name or 'Bias' in v.name)]) * reg_weight
# Compute final loss term
self.before_loss = before
self.after_loss = after
self.stop_token_loss = stop_token_loss
self.regularization_loss = regularization
self.linear_loss = linear_loss
self.loss = self.before_loss + self.after_loss + self.stop_token_loss + self.regularization_loss + self.linear_loss
if hp.use_vae:
self.ki_loss = -0.5 * tf.reduce_sum(1 + self.log_var - tf.pow(self.mu, 2) - tf.exp(self.log_var))
vae_loss_weight = vae_weight(global_step)
self.loss += self.ki_loss * vae_loss_weight
def add_optimizer(self, global_step):
'''Adds optimizer. Sets "gradients" and "optimize" fields. add_loss must have been called.
Args:
global_step: int32 scalar Tensor representing current global step in training
'''
with tf.variable_scope('optimizer') as scope:
hp = self._hparams
if hp.tacotron_decay_learning_rate:
self.decay_steps = hp.tacotron_decay_steps
self.decay_rate = hp.tacotron_decay_rate
self.learning_rate = self._learning_rate_decay(hp.tacotron_initial_learning_rate, global_step)
else:
self.learning_rate = tf.convert_to_tensor(hp.tacotron_initial_learning_rate)
optimizer = tf.train.AdamOptimizer(self.learning_rate, hp.tacotron_adam_beta1,
hp.tacotron_adam_beta2, hp.tacotron_adam_epsilon)
gradients, variables = zip(*optimizer.compute_gradients(self.loss))
self.gradients = gradients
#Just for causion
#https://github.com/Rayhane-mamah/Tacotron-2/issues/11
clipped_gradients, _ = tf.clip_by_global_norm(gradients, 0.5)
# Add dependency on UPDATE_OPS; otherwise batchnorm won't work correctly. See:
# https://github.com/tensorflow/tensorflow/issues/1122
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
self.optimize = optimizer.apply_gradients(zip(clipped_gradients, variables),
global_step=global_step)
def _learning_rate_decay(self, init_lr, global_step):
#################################################################
# Narrow Exponential Decay:
# Phase 1: lr = 1e-3
# We only start learning rate decay after 50k steps
# Phase 2: lr in ]1e-3, 1e-5[
# decay reach minimal value at step 300k
# Phase 3: lr = 1e-5
# clip by minimal learning rate value (step > 300k)
#################################################################
hp = self._hparams
#Compute natural exponential decay
lr = tf.train.exponential_decay(init_lr,
global_step - hp.tacotron_start_decay, #lr = 1e-3 at step 50k
self.decay_steps,
self.decay_rate, #lr = 1e-5 around step 300k
name='exponential_decay')
#clip learning rate by max and min values (initial and final values)
return tf.minimum(tf.maximum(lr, hp.tacotron_final_learning_rate), init_lr)
|
{"/train.py": ["/tacotron/train.py"], "/tacotron/synthesizer.py": ["/hparams.py"], "/tacotron/models/tacotron.py": ["/tacotron/models/modules.py", "/tacotron/models/zoneout_LSTM.py", "/tacotron/utils/util.py"], "/tacotron/utils/audio.py": ["/hparams.py"], "/tacotron/synthesize.py": ["/hparams.py", "/tacotron/synthesizer.py", "/tacotron/utils/audio.py"], "/tacotron/utils/util.py": ["/hparams.py"], "/synthesize.py": ["/tacotron/synthesize.py"], "/tacotron/train.py": ["/hparams.py"], "/tacotron/models/modules.py": ["/tacotron/models/zoneout_LSTM.py", "/hparams.py", "/tacotron/utils/util.py"]}
|
8,315
|
gilmoore/VAE_Tacotron2
|
refs/heads/master
|
/tacotron/utils/audio.py
|
import librosa
import librosa.filters
import numpy as np
from scipy import signal
from hparams import hparams
import tensorflow as tf
def load_wav(path):
return librosa.core.load(path, sr=hparams.sample_rate)[0]
def save_wav(wav, path):
wav *= 32767 / max(0.01, np.max(np.abs(wav)))
librosa.output.write_wav(path, wav.astype(np.int16), hparams.sample_rate)
def trim_silence(wav):
'''Trim leading and trailing silence
Useful for M-AILABS dataset if we choose to trim the extra 0.5 silences.
'''
return librosa.effects.trim(wav)[0]
def preemphasis(x):
return signal.lfilter([1, -hparams.preemphasis], [1], x)
def inv_preemphasis(x):
return signal.lfilter([1], [1, -hparams.preemphasis], x)
def get_hop_size():
hop_size = hparams.hop_size
if hop_size is None:
assert hparams.frame_shift_ms is not None
hop_size = int(hparams.frame_shift_ms / 1000 * hparams.sample_rate)
return hop_size
def melspectrogram(wav):
D = _stft(wav)
S = _amp_to_db(_linear_to_mel(np.abs(D))) - hparams.ref_level_db
if hparams.mel_normalization:
return _normalize(S)
return S
def inv_mel_spectrogram(mel_spectrogram):
'''Converts mel spectrogram to waveform using librosa'''
if hparams.mel_normalization:
D = _denormalize(mel_spectrogram)
else:
D = mel_spectrogram
S = _mel_to_linear(_db_to_amp(D + hparams.ref_level_db)) # Convert back to linear
return _griffin_lim(S ** hparams.power)
def _griffin_lim(S):
'''librosa implementation of Griffin-Lim
Based on https://github.com/librosa/librosa/issues/434
'''
angles = np.exp(2j * np.pi * np.random.rand(*S.shape))
S_complex = np.abs(S).astype(np.complex)
y = _istft(S_complex * angles)
for i in range(hparams.griffin_lim_iters):
angles = np.exp(1j * np.angle(_stft(y)))
y = _istft(S_complex * angles)
return y
def _stft(y):
return librosa.stft(y=y, n_fft=hparams.fft_size, hop_length=get_hop_size())
def _istft(y):
return librosa.istft(y, hop_length=get_hop_size())
# Conversions
_mel_basis = None
_inv_mel_basis = None
def _linear_to_mel(spectogram):
global _mel_basis
if _mel_basis is None:
_mel_basis = _build_mel_basis()
return np.dot(_mel_basis, spectogram)
def _mel_to_linear(mel_spectrogram):
global _inv_mel_basis
if _inv_mel_basis is None:
_inv_mel_basis = np.linalg.pinv(_build_mel_basis())
return np.maximum(1e-10, np.dot(_inv_mel_basis, mel_spectrogram))
def _build_mel_basis():
assert hparams.fmax <= hparams.sample_rate // 2
return librosa.filters.mel(hparams.sample_rate, hparams.fft_size, n_mels=hparams.num_mels,
fmin=hparams.fmin, fmax=hparams.fmax)
def _amp_to_db(x):
min_level = np.exp(hparams.min_level_db / 20 * np.log(10))
return 20 * np.log10(np.maximum(min_level, x))
def _db_to_amp(x):
return np.power(10.0, (x) * 0.05)
def _normalize(S):
if hparams.allow_clipping_in_normalization:
if hparams.symmetric_mels:
return np.clip((2 * hparams.max_abs_value) * ((S - hparams.min_level_db) / (-hparams.min_level_db)) - hparams.max_abs_value,
-hparams.max_abs_value, hparams.max_abs_value)
else:
return np.clip(hparams.max_abs_value * ((S - hparams.min_level_db) / (-hparams.min_level_db)), 0, hparams.max_abs_value)
assert S.max() <= 0 and S.min() - hparams.min_level_db >= 0
if hparams.symmetric_mels:
return (2 * hparams.max_abs_value) * ((S - hparams.min_level_db) / (-hparams.min_level_db)) - hparams.max_abs_value
else:
return hparams.max_abs_value * ((S - hparams.min_level_db) / (-hparams.min_level_db))
def _denormalize(D):
if hparams.allow_clipping_in_normalization:
if hparams.symmetric_mels:
return (((np.clip(D, -hparams.max_abs_value,
hparams.max_abs_value) + hparams.max_abs_value) * -hparams.min_level_db / (2 * hparams.max_abs_value))
+ hparams.min_level_db)
else:
return ((np.clip(D, 0, hparams.max_abs_value) * -hparams.min_level_db / hparams.max_abs_value) + hparams.min_level_db)
if hparams.symmetric_mels:
return (((D + hparams.max_abs_value) * -hparams.min_level_db / (2 * hparams.max_abs_value)) + hparams.min_level_db)
else:
return ((D * -hparams.min_level_db / hparams.max_abs_value) + hparams.min_level_db)
|
{"/train.py": ["/tacotron/train.py"], "/tacotron/synthesizer.py": ["/hparams.py"], "/tacotron/models/tacotron.py": ["/tacotron/models/modules.py", "/tacotron/models/zoneout_LSTM.py", "/tacotron/utils/util.py"], "/tacotron/utils/audio.py": ["/hparams.py"], "/tacotron/synthesize.py": ["/hparams.py", "/tacotron/synthesizer.py", "/tacotron/utils/audio.py"], "/tacotron/utils/util.py": ["/hparams.py"], "/synthesize.py": ["/tacotron/synthesize.py"], "/tacotron/train.py": ["/hparams.py"], "/tacotron/models/modules.py": ["/tacotron/models/zoneout_LSTM.py", "/hparams.py", "/tacotron/utils/util.py"]}
|
8,316
|
gilmoore/VAE_Tacotron2
|
refs/heads/master
|
/tacotron/synthesize.py
|
import argparse
import os
import re
from hparams import hparams, hparams_debug_string
from tacotron.synthesizer import Synthesizer
import tensorflow as tf
import time
from tqdm import tqdm
from tacotron.utils.audio import load_wav, melspectrogram
def run_eval(args, checkpoint_path, output_dir):
print(hparams_debug_string())
synth = Synthesizer()
synth.load(checkpoint_path)
eval_dir = os.path.join(output_dir, 'eval')
log_dir = os.path.join(output_dir, 'logs-eval')
wav = load_wav(args.reference_audio)
reference_mel = melspectrogram(wav).transpose()
#Create output path if it doesn't exist
os.makedirs(eval_dir, exist_ok=True)
os.makedirs(log_dir, exist_ok=True)
os.makedirs(os.path.join(log_dir, 'wavs'), exist_ok=True)
os.makedirs(os.path.join(log_dir, 'plots'), exist_ok=True)
with open(os.path.join(eval_dir, 'map.txt'), 'w') as file:
for i, text in enumerate(tqdm(hparams.sentences)):
start = time.time()
mel_filename = synth.synthesize(text, i+1, eval_dir, log_dir, None, reference_mel)
file.write('{}|{}\n'.format(text, mel_filename))
print('synthesized mel spectrograms at {}'.format(eval_dir))
def run_synthesis(args, checkpoint_path, output_dir):
metadata_filename = os.path.join(args.input_dir, 'train.txt')
print(hparams_debug_string())
synth = Synthesizer()
synth.load(checkpoint_path, gta=args.GTA)
with open(metadata_filename, encoding='utf-8') as f:
metadata = [line.strip().split('|') for line in f]
frame_shift_ms = hparams.hop_size / hparams.sample_rate
hours = sum([int(x[4]) for x in metadata]) * frame_shift_ms / (3600)
print('Loaded metadata for {} examples ({:.2f} hours)'.format(len(metadata), hours))
if args.GTA==True:
synth_dir = os.path.join(output_dir, 'gta')
else:
synth_dir = os.path.join(output_dir, 'natural')
#Create output path if it doesn't exist
os.makedirs(synth_dir, exist_ok=True)
print('starting synthesis')
mel_dir = os.path.join(args.input_dir, 'mels')
wav_dir = os.path.join(args.input_dir, 'audio')
with open(os.path.join(synth_dir, 'map.txt'), 'w') as file:
for i, meta in enumerate(tqdm(metadata)):
text = meta[5]
mel_filename = os.path.join(mel_dir, meta[1])
wav_filename = os.path.join(wav_dir, meta[0])
mel_output_filename = synth.synthesize(text, None, i+1, synth_dir, None, mel_filename)
file.write('{}|{}|{}|{}\n'.format(text, mel_filename, mel_output_filename, wav_filename))
print('synthesized mel spectrograms at {}'.format(synth_dir))
def tacotron_synthesize(args):
hparams.parse(args.hparams)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
output_dir = 'tacotron_' + args.output_dir
try:
checkpoint_path = tf.train.get_checkpoint_state(args.checkpoint).model_checkpoint_path
print('loaded model at {}'.format(checkpoint_path))
except:
raise AssertionError('Cannot restore checkpoint: {}, did you train a model?'.format(args.checkpoint))
if args.mode == 'eval':
run_eval(args, checkpoint_path, output_dir)
else:
run_synthesis(args, checkpoint_path, output_dir)
|
{"/train.py": ["/tacotron/train.py"], "/tacotron/synthesizer.py": ["/hparams.py"], "/tacotron/models/tacotron.py": ["/tacotron/models/modules.py", "/tacotron/models/zoneout_LSTM.py", "/tacotron/utils/util.py"], "/tacotron/utils/audio.py": ["/hparams.py"], "/tacotron/synthesize.py": ["/hparams.py", "/tacotron/synthesizer.py", "/tacotron/utils/audio.py"], "/tacotron/utils/util.py": ["/hparams.py"], "/synthesize.py": ["/tacotron/synthesize.py"], "/tacotron/train.py": ["/hparams.py"], "/tacotron/models/modules.py": ["/tacotron/models/zoneout_LSTM.py", "/hparams.py", "/tacotron/utils/util.py"]}
|
8,317
|
gilmoore/VAE_Tacotron2
|
refs/heads/master
|
/tacotron/utils/util.py
|
import tensorflow as tf
import numpy as np
from hparams import hparams as hp
def shape_list(x):
"""Return list of dims, statically where possible."""
x = tf.convert_to_tensor(x)
# If unknown rank, return dynamic shape
if x.get_shape().dims is None:
return tf.shape(x)
static = x.get_shape().as_list()
shape = tf.shape(x)
ret = []
for i in range(len(static)):
dim = static[i]
if dim is None:
dim = shape[i]
ret.append(dim)
return ret
def vae_weight(global_step):
warm_up_step = hp.vae_warming_up
w1 = tf.cond(
global_step < warm_up_step,
lambda: tf.cond(
global_step % 100 < 1,
lambda: tf.convert_to_tensor(hp.init_vae_weights) + tf.cast(global_step / 100 * hp.vae_weight_multiler, tf.float32),
lambda: tf.cast(tf.convert_to_tensor(0), tf.float32)
),
lambda: tf.cast(tf.convert_to_tensor(0), tf.float32)
)
w2 = tf.cond(
global_step > warm_up_step,
lambda: tf.cond(
global_step % 400 < 1,
lambda: tf.convert_to_tensor(hp.init_vae_weights) + tf.cast((global_step - warm_up_step) / 400 * hp.vae_weight_multiler + warm_up_step / 100 * hp.vae_weight_multiler, tf.float32),
lambda: tf.cast(tf.convert_to_tensor(0), tf.float32)
),
lambda: tf.cast(tf.convert_to_tensor(0), tf.float32)
)
return tf.maximum(w1, w2)
|
{"/train.py": ["/tacotron/train.py"], "/tacotron/synthesizer.py": ["/hparams.py"], "/tacotron/models/tacotron.py": ["/tacotron/models/modules.py", "/tacotron/models/zoneout_LSTM.py", "/tacotron/utils/util.py"], "/tacotron/utils/audio.py": ["/hparams.py"], "/tacotron/synthesize.py": ["/hparams.py", "/tacotron/synthesizer.py", "/tacotron/utils/audio.py"], "/tacotron/utils/util.py": ["/hparams.py"], "/synthesize.py": ["/tacotron/synthesize.py"], "/tacotron/train.py": ["/hparams.py"], "/tacotron/models/modules.py": ["/tacotron/models/zoneout_LSTM.py", "/hparams.py", "/tacotron/utils/util.py"]}
|
8,318
|
gilmoore/VAE_Tacotron2
|
refs/heads/master
|
/synthesize.py
|
import argparse
from tacotron.synthesize import tacotron_synthesize
def main():
accepted_modes = ['eval', 'synthesis']
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', default='logs-Tacotron/pretrained/', help='Path to model checkpoint')
parser.add_argument('--hparams', default='',
help='Hyperparameter overrides as a comma-separated list of name=value pairs')
parser.add_argument('--reference_audio', required=True)
parser.add_argument('--model', default='Tacotron')
parser.add_argument('--input_dir', default='training_data/', help='folder to contain inputs sentences/targets')
parser.add_argument('--output_dir', default='output/', help='folder to contain synthesized mel spectrograms')
parser.add_argument('--mode', default='synthesis', help='mode of run: can be one of {}'.format(accepted_modes))
parser.add_argument('--GTA', default=False, help='Ground truth aligned synthesis, defaults to True, only considered in synthesis mode')
args = parser.parse_args()
accepted_models = ['Tacotron', 'Wavenet']
if args.model not in accepted_models:
raise ValueError('please enter a valid model to train: {}'.format(accepted_models))
if args.mode not in accepted_modes:
raise ValueError('accepted modes are: {}, found {}'.format(accepted_modes, args.mode))
if args.model == 'Tacotron':
tacotron_synthesize(args)
elif args.model == 'Wavenet':
raise NotImplementedError('Wavenet is still a work in progress, thank you for your patience!')
if __name__ == '__main__':
main()
|
{"/train.py": ["/tacotron/train.py"], "/tacotron/synthesizer.py": ["/hparams.py"], "/tacotron/models/tacotron.py": ["/tacotron/models/modules.py", "/tacotron/models/zoneout_LSTM.py", "/tacotron/utils/util.py"], "/tacotron/utils/audio.py": ["/hparams.py"], "/tacotron/synthesize.py": ["/hparams.py", "/tacotron/synthesizer.py", "/tacotron/utils/audio.py"], "/tacotron/utils/util.py": ["/hparams.py"], "/synthesize.py": ["/tacotron/synthesize.py"], "/tacotron/train.py": ["/hparams.py"], "/tacotron/models/modules.py": ["/tacotron/models/zoneout_LSTM.py", "/hparams.py", "/tacotron/utils/util.py"]}
|
8,319
|
gilmoore/VAE_Tacotron2
|
refs/heads/master
|
/tacotron/train.py
|
import numpy as np
from datetime import datetime
import os
import subprocess
import time
import tensorflow as tf
import traceback
import argparse
from tacotron.feeder import Feeder
from hparams import hparams, hparams_debug_string
from tacotron.models import create_model
from tacotron.utils.text import sequence_to_text
from tacotron.utils import infolog, plot, ValueWindow
from datasets import audio
log = infolog.log
def add_stats(model):
with tf.variable_scope('stats') as scope:
tf.summary.histogram('mel_outputs', model.mel_outputs)
tf.summary.histogram('mel_targets', model.mel_targets)
tf.summary.scalar('before_loss', model.before_loss)
tf.summary.scalar('after_loss', model.after_loss)
if hparams.predict_linear:
tf.summary.scalar('linear loss', model.linear_loss)
tf.summary.scalar('regularization_loss', model.regularization_loss)
tf.summary.scalar('stop_token_loss', model.stop_token_loss)
tf.summary.scalar('loss', model.loss)
tf.summary.scalar('learning_rate', model.learning_rate) #control learning rate decay speed
# gradient_norms = [tf.norm(grad) for grad in model.gradients]
# tf.summary.histogram('gradient_norm', gradient_norms)
# tf.summary.scalar('max_gradient_norm', tf.reduce_max(gradient_norms)) #visualize gradients (in case of explosion)
if hparams.use_vae:
tf.summary.scalar('ki_loss', model.ki_loss)
return tf.summary.merge_all()
def time_string():
return datetime.now().strftime('%Y-%m-%d %H:%M')
def train(log_dir, args):
save_dir = os.path.join(log_dir, 'pretrained/')
checkpoint_path = os.path.join(save_dir, 'model.ckpt')
input_path = os.path.join(args.base_dir, args.input)
plot_dir = os.path.join(log_dir, 'plots')
wav_dir = os.path.join(log_dir, 'wavs')
mel_dir = os.path.join(log_dir, 'mel-spectrograms')
os.makedirs(plot_dir, exist_ok=True)
os.makedirs(wav_dir, exist_ok=True)
os.makedirs(mel_dir, exist_ok=True)
if hparams.predict_linear:
linear_dir = os.path.join(log_dir, 'linear-spectrograms')
os.makedirs(linear_dir, exist_ok=True)
log('Checkpoint path: {}'.format(checkpoint_path))
log('Loading training data from: {}'.format(input_path))
log('Using model: {}'.format(args.model))
log(hparams_debug_string())
#Set up data feeder
coord = tf.train.Coordinator()
with tf.variable_scope('datafeeder') as scope:
feeder = Feeder(coord, input_path, hparams)
#Set up model:
step_count = 0
try:
#simple text file to keep count of global step
with open(os.path.join(log_dir, 'step_counter.txt'), 'r') as file:
step_count = int(file.read())
except:
print('no step_counter file found, assuming there is no saved checkpoint')
global_step = tf.Variable(step_count, name='global_step', trainable=False)
with tf.variable_scope('model') as scope:
model = create_model(args.model, hparams)
if hparams.predict_linear:
model.initialize(feeder.inputs, feeder.input_lengths, feeder.mel_targets, feeder.mel_lengths, feeder.token_targets, feeder.linear_targets)
else:
model.initialize(feeder.inputs, feeder.input_lengths, feeder.mel_targets, feeder.mel_lengths, feeder.token_targets)
model.add_loss(global_step)
model.add_optimizer(global_step)
stats = add_stats(model)
#Book keeping
step = 0
time_window = ValueWindow(100)
loss_window = ValueWindow(100)
saver = tf.train.Saver(max_to_keep=5)
#Memory allocation on the GPU as needed
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
#Train
with tf.Session(config=config) as sess:
try:
summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
sess.run(tf.global_variables_initializer())
#saved model restoring
if args.restore:
#Restore saved model if the user requested it, Default = True.
try:
checkpoint_state = tf.train.get_checkpoint_state(save_dir)
except tf.errors.OutOfRangeError as e:
log('Cannot restore checkpoint: {}'.format(e))
if (checkpoint_state and checkpoint_state.model_checkpoint_path):
log('Loading checkpoint {}'.format(checkpoint_state.model_checkpoint_path))
saver.restore(sess, checkpoint_state.model_checkpoint_path)
else:
if not args.restore:
log('Starting new training!')
else:
log('No model to load at {}'.format(save_dir))
#initializing feeder
feeder.start_in_session(sess)
#Training loop
while not coord.should_stop():
start_time = time.time()
step, loss, opt = sess.run([global_step, model.loss, model.optimize])
time_window.append(time.time() - start_time)
loss_window.append(loss)
message = 'Step {:7d} [{:.3f} sec/step, loss={:.5f}, avg_loss={:.5f}]'.format(
step, time_window.average, loss, loss_window.average)
log(message, end='\r')
if loss > 100 or np.isnan(loss):
log('Loss exploded to {:.5f} at step {}'.format(loss, step))
raise Exception('Loss exploded')
if step % args.summary_interval == 0:
log('\nWriting summary at step: {}'.format(step))
summary_writer.add_summary(sess.run(stats), step)
if step % args.checkpoint_interval == 0:
with open(os.path.join(log_dir,'step_counter.txt'), 'w') as file:
file.write(str(step))
log('Saving checkpoint to: {}-{}'.format(checkpoint_path, step))
saver.save(sess, checkpoint_path, global_step=step)
log('Saving alignment, Mel-Spectrograms and griffin-lim inverted waveform..')
if hparams.predict_linear:
input_seq, mel_prediction, linear_prediction, alignment, target = sess.run([
model.inputs[0],
model.mel_outputs[0],
model.linear_outputs[0],
model.alignments[0],
model.mel_targets[0],
])
#save predicted linear spectrogram to disk (debug)
linear_filename = 'linear-prediction-step-{}.npy'.format(step)
np.save(os.path.join(linear_dir, linear_filename), linear_prediction.T, allow_pickle=False)
#save griffin lim inverted wav for debug (linear -> wav)
wav = audio.inv_linear_spectrogram(linear_prediction.T)
audio.save_wav(wav, os.path.join(wav_dir, 'step-{}-waveform-linear.wav'.format(step)))
else:
input_seq, mel_prediction, alignment, target = sess.run([model.inputs[0],
model.mel_outputs[0],
model.alignments[0],
model.mel_targets[0],
])
#save predicted mel spectrogram to disk (debug)
mel_filename = 'mel-prediction-step-{}.npy'.format(step)
np.save(os.path.join(mel_dir, mel_filename), mel_prediction.T, allow_pickle=False)
#save griffin lim inverted wav for debug (mel -> wav)
wav = audio.inv_mel_spectrogram(mel_prediction.T)
audio.save_wav(wav, os.path.join(wav_dir, 'step-{}-waveform-mel.wav'.format(step)))
#save alignment plot to disk (control purposes)
plot.plot_alignment(alignment, os.path.join(plot_dir, 'step-{}-align.png'.format(step)),
info='{}, {}, step={}, loss={:.5f}'.format(args.model, time_string(), step, loss))
#save real mel-spectrogram plot to disk (control purposes)
plot.plot_spectrogram(target, os.path.join(plot_dir, 'step-{}-real-mel-spectrogram.png'.format(step)),
info='{}, {}, step={}, Real'.format(args.model, time_string(), step, loss))
#save predicted mel-spectrogram plot to disk (control purposes)
plot.plot_spectrogram(mel_prediction, os.path.join(plot_dir, 'step-{}-pred-mel-spectrogram.png'.format(step)),
info='{}, {}, step={}, loss={:.5}'.format(args.model, time_string(), step, loss))
log('Input at step {}: {}'.format(step, sequence_to_text(input_seq)))
except Exception as e:
log('Exiting due to exception: {}'.format(e), slack=True)
traceback.print_exc()
coord.request_stop(e)
def tacotron_train(args):
hparams.parse(args.hparams)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level)
run_name = args.name or args.model
log_dir = os.path.join(args.base_dir, 'logs-{}'.format(run_name))
os.makedirs(log_dir, exist_ok=True)
infolog.init(os.path.join(log_dir, 'Terminal_train_log'), run_name)
train(log_dir, args)
|
{"/train.py": ["/tacotron/train.py"], "/tacotron/synthesizer.py": ["/hparams.py"], "/tacotron/models/tacotron.py": ["/tacotron/models/modules.py", "/tacotron/models/zoneout_LSTM.py", "/tacotron/utils/util.py"], "/tacotron/utils/audio.py": ["/hparams.py"], "/tacotron/synthesize.py": ["/hparams.py", "/tacotron/synthesizer.py", "/tacotron/utils/audio.py"], "/tacotron/utils/util.py": ["/hparams.py"], "/synthesize.py": ["/tacotron/synthesize.py"], "/tacotron/train.py": ["/hparams.py"], "/tacotron/models/modules.py": ["/tacotron/models/zoneout_LSTM.py", "/hparams.py", "/tacotron/utils/util.py"]}
|
8,320
|
gilmoore/VAE_Tacotron2
|
refs/heads/master
|
/tacotron/models/zoneout_LSTM.py
|
import numpy as np
import tensorflow as tf
from tensorflow.python.ops.rnn_cell import RNNCell
# Thanks to 'initializers_enhanced.py' of Project RNN Enhancement:
# https://github.com/nicolas-ivanov/Seq2Seq_Upgrade_TensorFlow/blob/master/rnn_enhancement/initializers_enhanced.py
def orthogonal_initializer(scale=1.0):
def _initializer(shape, dtype=tf.float32):
flat_shape = (shape[0], np.prod(shape[1:]))
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v
q = q.reshape(shape)
return tf.constant(scale * q[:shape[0], :shape[1]], dtype=tf.float32)
return _initializer
class ZoneoutLSTMCell(RNNCell):
"""Zoneout Regularization for LSTM-RNN.
"""
def __init__(self, num_units, is_training, input_size=None,
use_peepholes=False, cell_clip=None,
#initializer=orthogonal_initializer(),
initializer=tf.contrib.layers.xavier_initializer(),
num_proj=None, proj_clip=None, ext_proj=None,
forget_bias=1.0,
state_is_tuple=True,
activation=tf.tanh,
zoneout_factor_cell=0.0,
zoneout_factor_output=0.0,
reuse=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
is_training: bool, set True when training.
use_peepholes: bool, set True to enable diagonal/peephole
connections.
cell_clip: (optional) A float value, if provided the cell state
is clipped by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight
matrices.
num_proj: (optional) int, The output dimensionality for
the projection matrices. If None, no projection is performed.
forget_bias: Biases of the forget gate are initialized by default
to 1 in order to reduce the scale of forgetting at the beginning of
the training.
activation: Activation function of the inner states.
"""
if not state_is_tuple:
tf.logging.warn(
"%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
if input_size is not None:
tf.logging.warn(
"%s: The input_size parameter is deprecated.", self)
if not (zoneout_factor_cell >= 0.0 and zoneout_factor_cell <= 1.0):
raise ValueError(
"Parameter zoneout_factor_cell must be in [0 1]")
if not (zoneout_factor_output >= 0.0 and zoneout_factor_output <= 1.0):
raise ValueError(
"Parameter zoneout_factor_cell must be in [0 1]")
self.num_units = num_units
self.is_training = is_training
self.use_peepholes = use_peepholes
self.cell_clip = cell_clip
self.num_proj = num_proj
self.proj_clip = proj_clip
self.initializer = initializer
self.forget_bias = forget_bias
self.state_is_tuple = state_is_tuple
self.activation = activation
self.zoneout_factor_cell = zoneout_factor_cell
self.zoneout_factor_output = zoneout_factor_output
if num_proj:
self._state_size = (
tf.nn.rnn_cell.LSTMStateTuple(num_units, num_proj)
if state_is_tuple else num_units + num_proj)
self._output_size = num_proj
else:
self._state_size = (
tf.nn.rnn_cell.LSTMStateTuple(num_units, num_units)
if state_is_tuple else 2 * num_units)
self._output_size = num_units
self._ext_proj = ext_proj
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
if self._ext_proj is None:
return self._output_size
return self._ext_proj
def __call__(self, inputs, state, scope=None):
num_proj = self.num_units if self.num_proj is None else self.num_proj
if self.state_is_tuple:
(c_prev, h_prev) = state
else:
c_prev = tf.slice(state, [0, 0], [-1, self.num_units])
h_prev = tf.slice(state, [0, self.num_units], [-1, num_proj])
# c_prev : Tensor with the size of [batch_size, state_size]
# h_prev : Tensor with the size of [batch_size, state_size/2]
dtype = inputs.dtype
input_size = inputs.get_shape().with_rank(2)[1]
with tf.variable_scope(scope or type(self).__name__):
if input_size.value is None:
raise ValueError(
"Could not infer input size from inputs.get_shape()[-1]")
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
lstm_matrix = _linear([inputs, h_prev], 4 * self.num_units, True)
i, j, f, o = tf.split(lstm_matrix, 4, 1)
# diagonal connections
if self.use_peepholes:
w_f_diag = tf.get_variable(
"W_F_diag", shape=[self.num_units], dtype=dtype)
w_i_diag = tf.get_variable(
"W_I_diag", shape=[self.num_units], dtype=dtype)
w_o_diag = tf.get_variable(
"W_O_diag", shape=[self.num_units], dtype=dtype)
with tf.name_scope(None, "zoneout"):
# make binary mask tensor for cell
keep_prob_cell = tf.convert_to_tensor(
self.zoneout_factor_cell,
dtype=c_prev.dtype
)
random_tensor_cell = keep_prob_cell
random_tensor_cell += \
tf.random_uniform(tf.shape(c_prev),
seed=None, dtype=c_prev.dtype)
binary_mask_cell = tf.floor(random_tensor_cell)
# 0 <-> 1 swap
binary_mask_cell_complement = tf.ones(tf.shape(c_prev)) \
- binary_mask_cell
# make binary mask tensor for output
keep_prob_output = tf.convert_to_tensor(
self.zoneout_factor_output,
dtype=h_prev.dtype
)
random_tensor_output = keep_prob_output
random_tensor_output += \
tf.random_uniform(tf.shape(h_prev),
seed=None, dtype=h_prev.dtype)
binary_mask_output = tf.floor(random_tensor_output)
# 0 <-> 1 swap
binary_mask_output_complement = tf.ones(tf.shape(h_prev)) \
- binary_mask_output
# apply zoneout for cell
if self.use_peepholes:
c_temp = c_prev * \
tf.sigmoid(f + self.forget_bias +
w_f_diag * c_prev) + \
tf.sigmoid(i + w_i_diag * c_prev) * \
self.activation(j)
if self.is_training and self.zoneout_factor_cell > 0.0:
c = binary_mask_cell * c_prev + \
binary_mask_cell_complement * c_temp
else:
c = c_temp
else:
c_temp = c_prev * tf.sigmoid(f + self.forget_bias) + \
tf.sigmoid(i) * self.activation(j)
if self.is_training and self.zoneout_factor_cell > 0.0:
c = binary_mask_cell * c_prev + \
binary_mask_cell_complement * c_temp
else:
c = c_temp
if self.cell_clip is not None:
c = tf.clip_by_value(c, -self.cell_clip, self.cell_clip)
# apply zoneout for output
if self.use_peepholes:
h_temp = tf.sigmoid(o + w_o_diag * c) * self.activation(c)
if self.is_training and self.zoneout_factor_output > 0.0:
h = binary_mask_output * h_prev + \
binary_mask_output_complement * h_temp
else:
h = h_temp
else:
h_temp = tf.sigmoid(o) * self.activation(c)
if self.is_training and self.zoneout_factor_output > 0.0:
h = binary_mask_output * h_prev + \
binary_mask_output_complement * h_temp
else:
h = h_temp
# apply prejection
if self.num_proj is not None:
w_proj = tf.get_variable(
"W_P", [self.num_units, num_proj], dtype=dtype)
h = tf.matmul(h, w_proj)
if self.proj_clip is not None:
h = tf.clip_by_value(h, -self.proj_clip, self.proj_clip)
new_state = (tf.nn.rnn_cell.LSTMStateTuple(c, h)
if self.state_is_tuple else tf.concat(1, [c, h]))
return h, new_state
def _linear(args, output_size, bias, bias_start=0.0, scope=None):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_start: starting value to initialize the bias; 0 by default.
scope: VariableScope for the created subgraph; defaults to "Linear".
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
if args is None or (isinstance(args, (list, tuple)) and not args):
raise ValueError("`args` must be specified")
if not isinstance(args, (list, tuple)):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape().as_list() for a in args]
for shape in shapes:
if len(shape) != 2:
raise ValueError(
"Linear is expecting 2D arguments: %s" % str(shapes))
if not shape[1]:
raise ValueError(
"Linear expects shape[1] of arguments: %s" % str(shapes))
else:
total_arg_size += shape[1]
# Now the computation.
with tf.variable_scope(scope or "Linear"):
matrix = tf.get_variable("Matrix", [total_arg_size, output_size])
if len(args) == 1:
res = tf.matmul(args[0], matrix)
else:
res = tf.matmul(tf.concat(args, 1), matrix)
if not bias:
return res
bias_term = tf.get_variable(
"Bias", [output_size],
initializer=tf.constant_initializer(bias_start))
return res + bias_term
|
{"/train.py": ["/tacotron/train.py"], "/tacotron/synthesizer.py": ["/hparams.py"], "/tacotron/models/tacotron.py": ["/tacotron/models/modules.py", "/tacotron/models/zoneout_LSTM.py", "/tacotron/utils/util.py"], "/tacotron/utils/audio.py": ["/hparams.py"], "/tacotron/synthesize.py": ["/hparams.py", "/tacotron/synthesizer.py", "/tacotron/utils/audio.py"], "/tacotron/utils/util.py": ["/hparams.py"], "/synthesize.py": ["/tacotron/synthesize.py"], "/tacotron/train.py": ["/hparams.py"], "/tacotron/models/modules.py": ["/tacotron/models/zoneout_LSTM.py", "/hparams.py", "/tacotron/utils/util.py"]}
|
8,321
|
gilmoore/VAE_Tacotron2
|
refs/heads/master
|
/tacotron/models/modules.py
|
import tensorflow as tf
from tacotron.models.zoneout_LSTM import ZoneoutLSTMCell
from tensorflow.contrib.rnn import LSTMBlockCell
from hparams import hparams
from tensorflow.contrib.rnn import GRUCell
from tacotron.utils.util import shape_list
def VAE(inputs, input_lengths, filters, kernel_size, strides, num_units, is_training, scope):
with tf.variable_scope(scope):
outputs = ReferenceEncoder(
inputs=inputs,
input_lengths=input_lengths,
filters=filters,
kernel_size=kernel_size,
strides=strides,
is_training=is_training)
mu = tf.layers.dense(outputs, num_units, name='mean')
log_var = tf.layers.dense(outputs, num_units, name='vari')
std = tf.exp(log_var)
z = tf.random_normal(shape=[tf.shape(mu)[0], num_units], mean=0.0, stddev=1.0)
output = mu + z * std
return output, mu, log_var
def ReferenceEncoder(inputs, input_lengths, filters, kernel_size, strides, is_training, scope='reference_encoder'):
with tf.variable_scope(scope):
reference_output = tf.expand_dims(inputs, axis=-1)
for i, channel in enumerate(filters):
reference_output = conv2d(reference_output, channel, kernel_size,
strides, tf.nn.relu, is_training, 'conv2d_{}'.format(i))
shape = shape_list(reference_output)
reference_output = tf.reshape(reference_output, shape[:-2] + [shape[2] * shape[3]])
#GRU
encoder_outputs, encoder_state = tf.nn.dynamic_rnn(
cell=GRUCell(128),
inputs=reference_output,
sequence_length=input_lengths,
dtype=tf.float32
)
return encoder_state
def conv1d(inputs, kernel_size, channels, activation, is_training, scope):
drop_rate = hparams.tacotron_dropout_rate
with tf.variable_scope(scope):
conv1d_output = tf.layers.conv1d(
inputs,
filters=channels,
kernel_size=kernel_size,
activation=None,
padding='same')
batched = tf.layers.batch_normalization(conv1d_output, training=is_training)
activated = activation(batched)
return tf.layers.dropout(activated, rate=drop_rate, training=is_training,
name='dropout_{}'.format(scope))
def conv2d(inputs, filters, kernel_size, strides, activation, is_training, scope):
with tf.variable_scope(scope):
conv2d_output = tf.layers.conv2d(
inputs, filters=filters, kernel_size=kernel_size, strides=strides, padding='same')
batch_norm_output = tf.layers.batch_normalization(
conv2d_output, training=is_training, name='batch_norm')
if activation is not None:
conv2d_output = activation(batch_norm_output)
return conv2d_output
class EncoderConvolutions:
"""Encoder convolutional layers used to find local dependencies in inputs characters.
"""
def __init__(self, is_training, kernel_size=(5, ), channels=512, activation=tf.nn.relu, scope=None):
"""
Args:
is_training: Boolean, determines if the model is training or in inference to control dropout
kernel_size: tuple or integer, The size of convolution kernels
channels: integer, number of convolutional kernels
activation: callable, postnet activation function for each convolutional layer
scope: Postnet scope.
"""
super(EncoderConvolutions, self).__init__()
self.is_training = is_training
self.kernel_size = kernel_size
self.channels = channels
self.activation = activation
self.scope = 'enc_conv_layers' if scope is None else scope
def __call__(self, inputs):
with tf.variable_scope(self.scope):
x = inputs
for i in range(hparams.enc_conv_num_layers):
x = conv1d(x, self.kernel_size, self.channels, self.activation,
self.is_training, 'conv_layer_{}_'.format(i + 1)+self.scope)
return x
class EncoderRNN:
"""Encoder bidirectional one layer LSTM
"""
def __init__(self, is_training, size=256, zoneout=0.1, scope=None):
"""
Args:
is_training: Boolean, determines if the model is training or in inference to control zoneout
size: integer, the number of LSTM units for each direction
zoneout: the zoneout factor
scope: EncoderRNN scope.
"""
super(EncoderRNN, self).__init__()
self.is_training = is_training
self.size = size
self.zoneout = zoneout
self.scope = 'encoder_LSTM' if scope is None else scope
#Create LSTM Cell
self._cell = ZoneoutLSTMCell(size, is_training,
zoneout_factor_cell=zoneout,
zoneout_factor_output=zoneout)
def __call__(self, inputs, input_lengths):
with tf.variable_scope(self.scope):
outputs, (fw_state, bw_state) = tf.nn.bidirectional_dynamic_rnn(
self._cell,
self._cell,
inputs,
sequence_length=input_lengths,
dtype=tf.float32)
return tf.concat(outputs, axis=2) # Concat and return forward + backward outputs
class Prenet:
"""Two fully connected layers used as an information bottleneck for the attention.
"""
def __init__(self, is_training, layer_sizes=[256, 256], activation=tf.nn.relu, scope=None):
"""
Args:
is_training: Boolean, determines if the model is in training or inference to control dropout
layer_sizes: list of integers, the length of the list represents the number of pre-net
layers and the list values represent the layers number of units
activation: callable, activation functions of the prenet layers.
scope: Prenet scope.
"""
super(Prenet, self).__init__()
self.drop_rate = hparams.tacotron_dropout_rate
self.layer_sizes = layer_sizes
self.is_training = is_training
self.activation = activation
self.scope = 'prenet' if scope is None else scope
def __call__(self, inputs):
x = inputs
with tf.variable_scope(self.scope):
for i, size in enumerate(self.layer_sizes):
dense = tf.layers.dense(x, units=size, activation=self.activation,
name='dense_{}'.format(i + 1))
#The paper discussed introducing diversity in generation at inference time
#by using a dropout of 0.5 only in prenet layers (in both training and inference).
x = tf.layers.dropout(dense, rate=self.drop_rate, training=True,
name='dropout_{}'.format(i + 1) + self.scope)
return x
class DecoderRNN:
"""Decoder two uni directional LSTM Cells
"""
def __init__(self, is_training, layers=2, size=1024, zoneout=0.1, scope=None):
"""
Args:
is_training: Boolean, determines if the model is in training or inference to control zoneout
layers: integer, the number of LSTM layers in the decoder
size: integer, the number of LSTM units in each layer
zoneout: the zoneout factor
"""
super(DecoderRNN, self).__init__()
self.is_training = is_training
self.layers = layers
self.size = size
self.zoneout = zoneout
self.scope = 'decoder_rnn' if scope is None else scope
#Create a set of LSTM layers
self.rnn_layers = [ZoneoutLSTMCell(size, is_training,
zoneout_factor_cell=zoneout,
zoneout_factor_output=zoneout) for i in range(layers)]
self._cell = tf.contrib.rnn.MultiRNNCell(self.rnn_layers, state_is_tuple=True)
def __call__(self, inputs, states):
with tf.variable_scope(self.scope):
return self._cell(inputs, states)
class FrameProjection:
"""Projection layer to r * num_mels dimensions or num_mels dimensions
"""
def __init__(self, shape=80, activation=None, scope=None):
"""
Args:
shape: integer, dimensionality of output space (r*n_mels for decoder or n_mels for postnet)
activation: callable, activation function
scope: FrameProjection scope.
"""
super(FrameProjection, self).__init__()
self.shape = shape
self.activation = activation
self.scope = 'Linear_projection' if scope is None else scope
def __call__(self, inputs):
with tf.variable_scope(self.scope):
#If activation==None, this returns a simple Linear projection
#else the projection will be passed through an activation function
output = tf.layers.dense(inputs, units=self.shape, activation=self.activation,
name='projection_{}'.format(self.scope))
return output
class StopProjection:
"""Projection to a scalar and through a sigmoid activation
"""
def __init__(self, is_training, shape=hparams.outputs_per_step, activation=tf.nn.sigmoid, scope=None):
"""
Args:
is_training: Boolean, to control the use of sigmoid function as it is useless to use it
during training since it is integrate inside the sigmoid_crossentropy loss
shape: integer, dimensionality of output space. Defaults to 1 (scalar)
activation: callable, activation function. only used during inference
scope: StopProjection scope.
"""
super(StopProjection, self).__init__()
self.is_training = is_training
self.shape = shape
self.activation = activation
self.scope = 'stop_token_projection' if scope is None else scope
def __call__(self, inputs):
with tf.variable_scope(self.scope):
output = tf.layers.dense(inputs, units=self.shape,
activation=None, name='projection_{}'.format(self.scope))
#During training, don't use activation as it is integrated inside the sigmoid_cross_entropy loss function
if self.is_training:
return output
return self.activation(output)
class Postnet:
"""Postnet that takes final decoder output and fine tunes it (using vision on past and future frames)
"""
def __init__(self, is_training, kernel_size=(5, ), channels=512, activation=tf.nn.tanh, scope=None):
"""
Args:
is_training: Boolean, determines if the model is training or in inference to control dropout
kernel_size: tuple or integer, The size of convolution kernels
channels: integer, number of convolutional kernels
activation: callable, postnet activation function for each convolutional layer
scope: Postnet scope.
"""
super(Postnet, self).__init__()
self.is_training = is_training
self.kernel_size = kernel_size
self.channels = channels
self.activation = activation
self.scope = 'postnet_convolutions' if scope is None else scope
def __call__(self, inputs):
with tf.variable_scope(self.scope):
x = inputs
for i in range(hparams.postnet_num_layers - 1):
x = conv1d(x, self.kernel_size, self.channels, self.activation,
self.is_training, 'conv_layer_{}_'.format(i + 1)+self.scope)
x = conv1d(x, self.kernel_size, self.channels, lambda _: _, self.is_training, 'conv_layer_{}_'.format(5)+self.scope)
return x
|
{"/train.py": ["/tacotron/train.py"], "/tacotron/synthesizer.py": ["/hparams.py"], "/tacotron/models/tacotron.py": ["/tacotron/models/modules.py", "/tacotron/models/zoneout_LSTM.py", "/tacotron/utils/util.py"], "/tacotron/utils/audio.py": ["/hparams.py"], "/tacotron/synthesize.py": ["/hparams.py", "/tacotron/synthesizer.py", "/tacotron/utils/audio.py"], "/tacotron/utils/util.py": ["/hparams.py"], "/synthesize.py": ["/tacotron/synthesize.py"], "/tacotron/train.py": ["/hparams.py"], "/tacotron/models/modules.py": ["/tacotron/models/zoneout_LSTM.py", "/hparams.py", "/tacotron/utils/util.py"]}
|
8,322
|
gminator/weather
|
refs/heads/master
|
/openweather/models.py
|
from django.db import models
import requests
from datetime import datetime, timedelta
import math
class Day(object):
def unit(self,temp):
return round({
"c" : temp-273.15,
"k" : temp,
"f" : (((temp-274.15)/5) * 9) + 32,
}[self.units], 2)
def __init__(self, **kwargs):
self.lat = kwargs["lat"] if "lat" in kwargs else None
self.lng = kwargs["lon"] if "lon" in kwargs else None
self.tz = kwargs["timezone"] if "timezone" in kwargs else None
self.hourly = kwargs["hourly"] if "hourly" in kwargs else []
self.units = kwargs["units"] if "units" in kwargs else "c"
self.tmps = [self.unit(hour["temp"]) if "temp" in hour else None for hour in kwargs["hourly"]]
self.humids = [hour["humidity"] if "humidity" in hour else None for hour in kwargs["hourly"]]
self.winds = [round(hour["wind_speed"] /1.60934,2) if "wind_speed" in hour else None for hour in kwargs["hourly"]]
@property
def median_humidity(self,):
return self.median(self.humids)
@property
def min_humidity(self,):
return self.min(self.humids)
@property
def max_humidity(self,):
return self.max(self.humids)
@property
def avg_humidity(self,):
return self.average(self.humids)
@property
def avg_tmp(self,):
return self.average(self.tmps)
@property
def median_tmp(self,):
return self.median(self.tmps)
@property
def min_tmp(self,):
return self.min(self.tmps)
@property
def max_tmp(self,):
return self.max(self.tmps)
def min(self,data):
return min(data)
def max(self,data):
return max(data)
def median(self,data):
data.sort()
l = len(data)
#Event Numbers
if l % 2 == 0:
f = int((l/2) - 1)
return round((data[f] + data[f + 1])/2,2)
i = l//2
return data[i]
def average(sefl,data):
return round(sum(data)/len(data),2)
def serialize(self,):
return {
"temp" : {
"min" : self.min_tmp,
"max" : self.max_tmp,
"median" : self.median_tmp,
"avg" : self.avg_tmp,
},
"humidity" : {
"min" : self.min_humidity,
"max" : self.max_humidity,
"median" : self.median_humidity,
"avg" : self.avg_humidity,
},
"graph" : self.graph()
}
def graph(self,):
data = {}
for row in self.hourly:
key = datetime.fromtimestamp(row["dt"]).strftime("%Y-%m-%d %H:%M")
if key not in data:
data[key] = [self.unit(row["temp"]), row["humidity"]]
graph = [["Date", "Temp", "Humidity"]]
for date,values in data.items():
graph.append([date] + values)
return graph
# Create your models here.
class OpenWeather(object):
def __init__(self,):
self.uri = "https://community-open-weather-map.p.rapidapi.com/"
self.key = "06eafc15dbmsh348f712812a3bf8p136ec0jsn8b48402aa070"
def headers(self,):
return {
#"x-rapidapi-host": "community-open-weather-map.p.rapidapi.com",
"x-rapidapi-key": self.key,
#"useQueryString": 'true'
}
def weather_on(self,**kwargs):
"""
Past Data
Get pass Weather Data
Up to 5 Days
@param dt int Unix Time Stamp
"""
current_date = datetime.now()
if "stub" in kwargs:
current_date = kwargs["stub"]
limit = current_date - timedelta(days=5)
if kwargs["dt"] < limit.timestamp():
raise BadDateException("Your date exceeds the the 5 day limit")
if "dt" not in kwargs or kwargs["dt"] == None: raise BadRequest("Please set a time")
if "lat" not in kwargs or kwargs["lat"] == None: raise BadRequest("Please set a lattitude")
if "lon" not in kwargs or kwargs["lon"] == None: raise BadRequest("Please set a longtitude")
#raise Exception(kwargs)
response = requests.request("GET", self.uri + "onecall/timemachine",
headers=self.headers(),
params=kwargs)
return Day(units=kwargs["units"], **response.json())
class BadDateException(Exception): pass
class BadRequest(Exception): pass
|
{"/openweather/api.py": ["/openweather/models.py"], "/openweather/tests.py": ["/openweather/models.py"]}
|
8,323
|
gminator/weather
|
refs/heads/master
|
/openweather/api.py
|
from django.shortcuts import render
from rest_framework.authentication import SessionAuthentication, BasicAuthentication, TokenAuthentication
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from openweather.models import OpenWeather, BadRequest, BadDateException
from datetime import datetime
from rest_framework import status
class WeatherViewSet(viewsets.ViewSet):
def list(self, request):
weather = OpenWeather()
try:
day = weather.weather_on(
dt=int(datetime.strptime(request.GET["date"], "%Y/%m/%d").timestamp()) if "date" in request.GET else None,
lat=request.GET["lat"] if "lat" in request.GET else None,
lon=request.GET["lng"] if "lng" in request.GET else None,
units=request.GET["unit"] if "unit" in request.GET else "c"
)
except BadRequest as e:
return Response({"error" : str(e)}, status=status.HTTP_400_BAD_REQUEST)
except BadDateException as e:
return Response({"error" : str(e)}, status=status.HTTP_400_BAD_REQUEST)
except Exception as e:
return Response({"error" : str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return Response(day.serialize())
|
{"/openweather/api.py": ["/openweather/models.py"], "/openweather/tests.py": ["/openweather/models.py"]}
|
8,324
|
gminator/weather
|
refs/heads/master
|
/openweather/views.py
|
from django.shortcuts import render
from django.views.generic import TemplateView, ListView,ListView, DetailView,View
class OpenWeatherView(TemplateView):
template_name = "weather.html"
|
{"/openweather/api.py": ["/openweather/models.py"], "/openweather/tests.py": ["/openweather/models.py"]}
|
8,325
|
gminator/weather
|
refs/heads/master
|
/openweather/tests.py
|
from django.test import TestCase
from openweather.models import OpenWeather,BadDateException, Day
from unittest.mock import patch
from datetime import datetime
# Create your tests here.
class OpenWeatherTests(TestCase):
def test_test_median_humidity(self,):
day = Day(hourly=[
{"temp": 274.15,"humidity": 10, "wind_speed" : 1.60934},
{"temp": 275.15,"humidity": 50, "wind_speed" : 3.21868},
{"temp": 276.15, "humidity": 100, "wind_speed": 4.82802}
])
self.assertEquals(day.median_humidity, 50)
def test_test_max_humidity(self,):
day = Day(hourly=[
{"temp": 274.15,"humidity": 10, "wind_speed" : 1.60934},
{"temp": 275.15,"humidity": 50, "wind_speed" : 3.21868},
{"temp": 276.15, "humidity": 100, "wind_speed": 4.82802}
])
self.assertEquals(day.max_humidity, 100)
def test_test_min_humidity(self,):
day = Day(hourly=[
{"temp": 274.15,"humidity": 10, "wind_speed" : 1.60934},
{"temp": 275.15,"humidity": 50, "wind_speed" : 3.21868},
{"temp": 276.15, "humidity": 100, "wind_speed": 4.82802}
])
self.assertEquals(day.min_humidity, 10)
def test_day_average(self,):
day = Day(hourly=[
{"temp": 274.15,"humidity": 10, "wind_speed" : 1.60934},
{"temp": 275.15,"humidity": 50, "wind_speed" : 3.21868},
{"temp": 276.15, "humidity": 100, "wind_speed": 4.82802},
{"temp": 277.15, "humidity": 100, "wind_speed": 4.82802}
])
self.assertEquals(day.median_tmp, 2.5)
def test_test_median_tmp_no_middle(self,):
day = Day(hourly=[
{"temp": 274.15,"humidity": 10, "wind_speed" : 1.60934},
{"temp": 275.15,"humidity": 50, "wind_speed" : 3.21868},
{"temp": 276.15, "humidity": 100, "wind_speed": 4.82802},
{"temp": 277.15, "humidity": 100, "wind_speed": 4.82802}
])
self.assertEquals(day.median_tmp, 2.5)
def test_test_median_tmp(self,):
day = Day(hourly=[
{"temp": 274.15,"humidity": 10, "wind_speed" : 1.60934},
{"temp": 275.15,"humidity": 50, "wind_speed" : 3.21868},
{"temp": 276.15, "humidity": 100, "wind_speed": 4.82802}
])
self.assertEquals(day.median_tmp, 2.0)
def test_test_max_tmp(self,):
day = Day(hourly=[
{"temp": 274.15,"humidity": 10, "wind_speed" : 1.60934},
{"temp": 275.15,"humidity": 50, "wind_speed" : 3.21868},
{"temp": 276.15, "humidity": 100, "wind_speed": 4.82802}
])
self.assertEquals(day.max_tmp, 3.0)
def test_test_min_tmp(self,):
day = Day(hourly=[
{"temp": 274.15,"humidity": 10, "wind_speed" : 1.60934},
{"temp": 275.15,"humidity": 50, "wind_speed" : 3.21868},
{"temp": 276.15, "humidity": 100, "wind_speed": 4.82802}
])
self.assertEquals(day.min_tmp, 1.0)
def test_celius_constructor(self,):
day = Day(hourly=[
{"temp": 274.15,"humidity": 10, "wind_speed" : 1.60934},
{"temp": 275.15,"humidity": 50, "wind_speed" : 3.21868},
{"temp": 276.15, "humidity": 100, "wind_speed": 4.82802}
])
self.assertEquals(day.tmps, [1.0,2.0,3.0])
self.assertEquals(day.humids, [10,50,100])
self.assertEquals(day.winds, [1,2,3])
def test_past_days_invalid_date(self,):
weather = OpenWeather()
with self.assertRaises(BadDateException) as context:
weather.weather_on(units="c",lat=0, lon=0,dt=123453)
def test_past_days(self,):
now = datetime.now()
weather = OpenWeather()
def api_response(*args, **kwargs):
return FakeResponse({"hourly" : [{"temp": 274.15,"humidity": 10, "wind_speed" : 1.60934}]})
with patch('requests.request', api_response):
day = weather.weather_on(units="c",lat=0, lon=0,dt=now.timestamp(), stub=now)
self.assertIsInstance(day, Day)
class FakeResponse(object):
def __init__(self,json):
self.data = json
def json(self,):
return self.data
|
{"/openweather/api.py": ["/openweather/models.py"], "/openweather/tests.py": ["/openweather/models.py"]}
|
8,422
|
aeberspaecher/transparent_pyfftw
|
refs/heads/master
|
/transparent_pyfftw/generate_wrappers.py
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
"""Generate pyfftw wrapper functions by name.
Add the threads keyword to the call, do nothing else.
"""
# if the wrapper codes are removed from transparent_pyfftw_wrapper.py,
# we can regenerate and re-add with
# ./generate_wrappers.py >> transparent_pyfftw_wrapper.py
names = ["fft", "ifft", "fft2", "ifft2", "fftn", "ifftn", "rfft", "irfft",
"rfft2", "irfft2", "rfftn", "irfftn", "hfft", "ihfft", "hfft2",
"ihfft2", "hfftn", "ihfftn"]
def generate_wrapper(name, module, original_docstring, num_threads):
"""Generate a wrapper function.
Parameters
----------
name : string
Name of the function wrapped.
module : string
Name of the module the wrapped function is part of.
original_docstring : string
Docstring of the wrapped function.
num_threads : int
Number of threads to use.
Returns
-------
wrapper_code : string
A string that contains the code to the wrapper function.
"""
# create a string that informs the user about the 'threads' parameter added
# to the call if appropriate:
# check two versions of the string that triggers addition of the threads
# keyword - this is necessary due to pyfftw documentation inconsistencies
add_keyword_atoms = ('additional arguments docs', 'additional argument docs')
if(any( [ keyword in original_docstring for keyword in add_keyword_atoms ] )):
additional_arg_string = \
'Arguments automatically added on call are "threads=%s".\n'%num_threads
additional_arg_code = 'kwargs["threads"] = %s'%num_threads
else:
additional_arg_string = \
'This wrapper does nothing besides calling the pyfftw function.\n'
additional_arg_code = ''
wrapper_string = '''
def %(name)s(*args, **kwargs):
"""A thin wrapper around pyfftw.interfaces.%(module)s.%(name)s.
%(additional_arg_string)s
Docstring of original pyfftw function:
--------------------------------------
%(original_docstring)s
"""
%(additional_arg_code)s
return _%(name)s(*args, **kwargs)
'''%locals()
return wrapper_string
|
{"/transparent_pyfftw/__init__.py": ["/transparent_pyfftw/transparent_pyfftw.py"]}
|
8,423
|
aeberspaecher/transparent_pyfftw
|
refs/heads/master
|
/transparent_pyfftw/transparent_pyfftw.py
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
"""Common functions for transparent_pyfftw.
"""
import os
import numpy as np
import pyfftw
from .options import wisdom_file
def read_wisdom():
"""Read wisdom and get it in the data structures expected by pyfftw.
"""
try:
wisdom = file(wisdom_file, mode="r").readlines()
except IOError:
print("Wisdom file not loadable. If you haven't saved any wisdom yet, try calling save_wisdom().")
wisdom = None
else:
if(len(wisdom) == 0):
print("Wisdom file is empty. Try calling save_wisdom().")
wisdom = None
else:
wisdom_tuple = []
for line in wisdom:
# if a line starts with a space or a right paren, it belongs to
# last list member ("current" element). otherwise, it starts a
# new member.
if(line.startswith(" ") or line.startswith(")")):
wisdom_tuple[-1] += line # append to string
else:
wisdom_tuple.append(line)
wisdom = wisdom_tuple # override
return wisdom
# if configured to use centuries of fftw wisdom, read the fftw oracle of
# delphi (i.e. the wisdom file) - do this on import:
if(wisdom_file is not None):
wisdom = read_wisdom()
if(wisdom is not None):
pyfftw.import_wisdom(wisdom)
pyfftw_simd_alignment = pyfftw.simd_alignment
pyfftw.interfaces.cache.enable()
pyfftw.interfaces.cache.set_keepalive_time(300) # keep cache alive for 300 sec
# TODO: make this a configurable parameter?
def get_num_threads():
"""Get number of threads from environment variable.
Returns
-------
num_threads : int
$TFFTW_NUM_THREADS if set, 1 otherwise.
"""
# set number of threads from environment variable:
try:
num_threads = int(os.environ["TFFTW_NUM_THREADS"])
except KeyError:
num_threads = 1
return num_threads
def save_wisdom():
"""Save generated wisdom to file specified when configuring the project.
"""
if(wisdom_file is not None):
wisdom = pyfftw.export_wisdom()
with file(wisdom_file, mode="w") as f:
for wisdom_bit in wisdom:
f.write(wisdom_bit)
else:
raise Exception("Configured not to use any FFTW wisdom!")
def get_empty_fftw_array(shape, dtype=np.float64, **kwargs):
"""Create memory aligned empty array.
Parameters
----------
shape : tuple-like
dtype : object
Returns
-------
aligned : array
Empty, byte-aligned array.
Notes
-----
Keyword arguments are passed on to pyfftw.n_byte_align_empty().
"""
return pyfftw.n_byte_align_empty(shape, pyfftw_simd_alignment, dtype, **kwargs)
def align_array(arr):
"""Return memory aligned copy of arr. This may be speed up pyfftw calls.
Parameters
----------
arr : array
Returns
-------
arr_aligned : array
"""
return pyfftw.n_byte_align(arr, pyfftw_simd_alignment)
|
{"/transparent_pyfftw/__init__.py": ["/transparent_pyfftw/transparent_pyfftw.py"]}
|
8,424
|
aeberspaecher/transparent_pyfftw
|
refs/heads/master
|
/transparent_pyfftw/scipy_fftpack.py
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
"""Wrappers for pyfftw's SciPy fftpack interfaces.
"""
import pyfftw.interfaces.scipy_fftpack as sfft
from .generate_wrappers import generate_wrapper
from .transparent_pyfftw import *
# the wrappers are generated on import:
func_names = sfft.__all__
for func_name in func_names:
num_threads = get_num_threads()
original_docstring = sfft.__dict__[func_name].__doc__
wrapper_func_string = generate_wrapper(func_name, "scipy_fftpack",
sfft.__dict__[func_name].__doc__,
num_threads)
# import pyfftw functions and add a '_' to the name:
exec "from pyfftw.interfaces.scipy_fftpack import %s as _%s"%(2*(func_name,))
# define the wrapper:
exec wrapper_func_string
|
{"/transparent_pyfftw/__init__.py": ["/transparent_pyfftw/transparent_pyfftw.py"]}
|
8,425
|
aeberspaecher/transparent_pyfftw
|
refs/heads/master
|
/transparent_pyfftw/__init__.py
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
"""Transparent wrappers for pyfftw's NumPy FFT interfaces.
Does nothing more but inserting a number of threads into your FFT calls and
handing that parameter to pyfftw.
Wrappers are available for pyfftw.numpy_fft and pyfftw.scipy_fftpack.
To save acquired wisdom, call transparent_pyfftw.save_wisdom(). Wisdom is automatically loaded on import.
Additional helper functions: save_wisdom(), get_empty_fftw_array(),
align_array().
"""
from .transparent_pyfftw import *
|
{"/transparent_pyfftw/__init__.py": ["/transparent_pyfftw/transparent_pyfftw.py"]}
|
8,426
|
aeberspaecher/transparent_pyfftw
|
refs/heads/master
|
/transparent_pyfftw/numpy_fft.py
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
"""Wrappers for pyfftw's NumPy fft interfaces.
"""
import pyfftw.interfaces.numpy_fft as nfft
from .generate_wrappers import generate_wrapper
from .transparent_pyfftw import *
# the wrappers are generated on import:
func_names = nfft.__all__
for func_name in func_names:
num_threads = get_num_threads()
original_docstring = nfft.__dict__[func_name].__doc__
wrapper_func_string = generate_wrapper(func_name, "numpy_fft",
nfft.__dict__[func_name].__doc__,
num_threads)
# import pyfftw functions and add a '_' to the name:
exec "from pyfftw.interfaces.numpy_fft import %s as _%s"%(2*(func_name,))
# define the wrapper:
exec wrapper_func_string
|
{"/transparent_pyfftw/__init__.py": ["/transparent_pyfftw/transparent_pyfftw.py"]}
|
8,430
|
OsamaAlOlabi/black-jack-card-game
|
refs/heads/master
|
/main.py
|
import art
import game_logic
print(art.logo)
game_logic.random_card_for_me()
game_logic.random_card_for_me()
game_logic.random_card_for_bot()
game_logic.black_jack()
|
{"/main.py": ["/game_logic.py"]}
|
8,431
|
OsamaAlOlabi/black-jack-card-game
|
refs/heads/master
|
/game_logic.py
|
import art
import random
cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]
my_cards = []
bot_cards = []
my_score = 0
bot_score = 0
# Choose a random card for the player
def random_card_for_me():
global my_score
my_card_num_index = random.randrange(len(cards))
my_card_num_value = cards[my_card_num_index]
my_cards.append(my_card_num_value)
my_score = sum(my_cards)
# If random card is 11, check if the sum of cards > 21 then change 11 to 1
while True:
if 11 in my_cards and my_score > 21:
change = my_cards.index(11)
my_cards[change] = 1
else:
break
my_score = sum(my_cards)
# Choose a random card for the dealer
def random_card_for_bot():
global bot_score
bot_card_num_index = random.randrange(len(cards))
bot_card_num_value = cards[bot_card_num_index]
bot_cards.append(bot_card_num_value)
bot_score = sum(bot_cards)
# If random card is 11, check if the sum of cards > 21 then change 11 to 1
while True:
if 11 in bot_cards and bot_score > 21:
change = bot_cards.index(11)
bot_cards[change] = 1
else:
break
bot_score = sum(bot_cards)
# How the game functions
def black_jack():
print(f"Your cards are {my_cards}, your current score: {my_score}")
print(f"Dealer starting card is: {bot_cards}")
# How the dealer functions
def bot_logic():
while bot_score <= my_score:
random_card_for_bot()
if bot_score == 21 and my_score == 21:
print(f"Your final hand is: {my_cards}, final score: {my_score}")
print(f"Dealer final hand is: {bot_cards}, final score: {bot_score}")
print("It's a draw")
play_again()
break
elif bot_score > 21:
print(f"Your final hand is: {my_cards}, final score: {my_score}")
print(f"Dealer final hand is: {bot_cards}, final score: {bot_score}")
print(f"You win, the dealer {bot_score} is above 21")
play_again()
break
print(f"Your final hand is: {my_cards}, final score: {my_score}")
print(f"Dealer final hand is: {bot_cards}, final score: {bot_score}")
print(f"You lose, the dealer has {bot_score} and you have {my_score}\n")
play_again()
if my_score == 21:
bot_logic()
elif my_score > 21:
print(f"\nYou're score is {my_score} which is above 21.")
print("You lose\n")
play_again()
elif my_score < 21:
while True:
another_card = input("Would you like another card? 'Y' or 'N' ").lower()
print("")
if another_card == "y":
random_card_for_me()
black_jack()
break
elif another_card == "n":
bot_logic()
break
else:
print("Please type 'Y' or 'N'")
# Play again function
def play_again():
global my_score
global bot_score
global my_cards
global bot_cards
while True:
repeat = input("Would you like to play again? 'Y' or 'N' ")
if repeat == "y":
my_cards = []
bot_cards = []
my_score = 0
bot_score = 0
print("*************************************************************")
print(art.logo)
random_card_for_me()
random_card_for_me()
random_card_for_bot()
black_jack()
break
elif repeat == "n":
exit()
else:
print("Please type 'Y' or 'N'")
|
{"/main.py": ["/game_logic.py"]}
|
8,435
|
BrianIshii/StockAnalyzer
|
refs/heads/master
|
/AAPL.py
|
import datetime
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import Analysis.trade as trade
global dates
def main():
global dates
start_date = "2015-01-02"
symbols = ['SPY']
dates = pd.date_range("2015-01-02", "2015-02-02")
symbols.append("AAPL")
df = trade.get_data(symbols,dates)
print(str(df))
print(df)
graph_close(df, "AAPL")
#
def graph_close(df,symbol):
#past_year = df[symbol][252:]
past_year = df[symbol][5:]
past_year
print(type(past_year[0]))
#need to change it to float
#df[symbol].hist(bins=2,label = symbol)
#past_year.hist(bins=2,label = "Past Year")
#plt.legend(loc="upper right")
#past_year_mean = past_year.mean()
#past_year_std = past_year.std()
#mean = df[symbol].mean()
#print("mean = " + str(mean))
#std = df[symbol].std()
#print("std = " + str(std))
#simplify with func
#plt.axvline(mean,color='w',linestyle="dashed",linewidth=2)
#plt.axvline(std + mean,color='r',linestyle="dashed",linewidth=2)
#plt.axvline(-std + mean,color='r',linestyle="dashed",linewidth=2)
#plt.axvline(past_year_mean,color='w',linewidth=2)
#plt.axvline(past_year_std + past_year_mean,color='r',linewidth=2)
#plt.axvline(-past_year_std + past_year_mean,color='r',linewidth=2)
#plt.axvline(df[symbol][-1],color='black',linewidth=2)
#print(df.kurtosis())
def buy_sell(df,symbol):
rm,upper_band,lower_band = trade.get_bollinger_bands(symbol,df[symbol],20,False)
rm = pd.DataFrame(rm)
rm = rm.rename(columns={symbol:"rm"})
df = df.join(rm)
upper_band = pd.DataFrame(upper_band)
upper_band = upper_band.rename(columns={symbol:"upper_band"})
df = df.join(upper_band)
lower_band = pd.DataFrame(lower_band)
lower_band = lower_band.rename(columns={symbol:"lower_band"})
df = df.join(lower_band)
df = df.dropna(subset=["rm"])
add = float(df[symbol][0]/1000)
df['sell_points'] = (df[symbol] >= df["upper_band"]-add).astype(float)
df['buy_points'] = (df[symbol] <= df["lower_band"]+add).astype(float)
df['sell'] = 0
df['buy'] = 0
df['sell'][df['sell_points'] == 1] = df[symbol]
df['sell'][df['sell_points'] == 0] = "NaN"
df['buy'][df['buy_points'] == 1] = df[symbol]
df['buy'][df['buy_points'] == 0] = "NaN"
plt.plot(df['sell'],'go')
plt.plot(df["buy"],'ro')
plt.plot(df[symbol])
plt.plot(df['lower_band'],'r')
plt.plot(df['upper_band'],'g')
def volume(df,symbol):
global dates
symbols = [symbol]
df_vol = trade.get_data(symbols,dates,"Volume")
df_vol = df_vol.rename(columns={symbol:"Volume"})
df_vol = (df_vol/1000000)
plt.plot(df[symbol])
plt.plot(df_vol["Volume"])
print(df_vol)
def scatter_plot(symbol,symbols):
global dates
df= trade.get_data(symbols,dates)
daily_returns = trade.compute_daily_returns(df)
daily_returns.plot(kind='scatter',x='SPY',y=symbol)
beta_symbol,alpha_symbol = np.polyfit(daily_returns['SPY'],daily_returns[symbol],1)
plt.plot(daily_returns['SPY'], beta_symbol*daily_returns['SPY'] + alpha_symbol, '-',color='r')
print("alpha of symbol: " , alpha_symbol)
print("beta of symbol: " , beta_symbol)
print(daily_returns.corr(method='pearson'))
if __name__ == "__main__":
main()
|
{"/AAPL.py": ["/Analysis/trade.py"]}
|
8,436
|
BrianIshii/StockAnalyzer
|
refs/heads/master
|
/Data/data.py
|
#!/usr/bin/env python3
"""
data.py is an object to look at stock data
Brian Ishii 2017
"""
import os
import pandas as pd
import json
class Data:
def __init__(self, start_date, end_date, data_type="Adj Close"):
self.symbols = ["SPY"]
self.dates = self.get_dates(start_date, end_date)
self.data_type = data_type
self.df = self.get_data()
def __repr__(self):
return "Data({!r}, {!r}, {!r})".format(
self.symbols, self.dates, self.data_type)
def __eq__(self):
pass
def get_data(self):
"""Returns a pd.DataFrame with desired data
Keyword arguments:
symbols: (List) list of symbols i.e. ["SPY", "AAPL"]
dates: (DatetimeIndex) range of dates desired
col: (String) column name of data requested (default "Adj Close")
"""
df = pd.DataFrame(index=self.dates)
for symbol in self.symbols:
temp = pd.read_csv(self.path_to_symbol(symbol),
index_col="Date", usecols=["Date", self.data_type],
parse_dates=True, na_values = ["NaN"])
temp = temp.rename(columns={self.data_type:symbol})
df = df.join(temp)
if symbol == "SPY":
df = df.dropna(subset=["SPY"])
return df
def path_to_symbol(self, symbol, base_dir="Data"):
"""returns the CSV file path for the given symbol
Keyword arguments:
symbol: (String) stock name i.e. "AAPL"
base_dir: (String) base directory for the file (default "Data")
"""
path = os.getcwd()
return os.path.join(path + "/{!s}.csv".format(symbol))
def get_dates(self, start_date, end_date):
"""returns a pandas date range indexed for each day
Keyword arguments:
start_date: (String) YYYY-MM-DD
end_date: (String) YYYY-MM-DD
"""
return pd.date_range(start_date, end_date)
def get_bollinger_bands(self, symbol, window=20):
"""returns a tuple with (rolling mean, upper_band, and
lower_band)
Keyword arguments:
symbol: (String) stock symbol i.e. "AAPL"
window: (int) number of days to include in the mean (default 20)
"""
if symbol not in self.symbols:
raise IndexError()
values = self.df[symbol]
rolling_mean = values.rolling(window=window).mean()
rolling_std = values.rolling(window=window).std()
upper_band = rolling_mean + rolling_std * 2
lower_band = rolling_mean - rolling_std * 2
return rolling_mean, upper_band, lower_band
def add_stock(self, symbol):
"""appends the stock symbol to the symbols list
Keyword arguments:
symbol: (String) stock symbol i.e. "AAPL"
"""
self.symbols.append(symbol)
|
{"/AAPL.py": ["/Analysis/trade.py"]}
|
8,437
|
BrianIshii/StockAnalyzer
|
refs/heads/master
|
/Stock/stock_tests.py
|
#!/usr/bin/env python3
"""
stock_tests.py has test for stock.py
Brian Ishii 2017
"""
import unittest
from stock import *
class StockTestCases(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
def test_1(self):
pass
if __name__ == '__main__':
unittest.main()
|
{"/AAPL.py": ["/Analysis/trade.py"]}
|
8,438
|
BrianIshii/StockAnalyzer
|
refs/heads/master
|
/Stock/Stock.py
|
#/usr/bin/env python3
"""
data.py is an object to look at stock data
Brian Ishii 2017
"""
import json
class Stock:
def __init__(self, symbol):
self.symbol = symbol
def __repr__(self):
pass
def __str__(self):
pass
def get_json_data(self, symbol):
f = open("stock.json", 'r')
temp = json.load(f)
f.close()
return temp["Stocks"]["Tech"][symbol]
|
{"/AAPL.py": ["/Analysis/trade.py"]}
|
8,439
|
BrianIshii/StockAnalyzer
|
refs/heads/master
|
/Analysis/practice_numpy.py
|
import numpy as np
def OneD_array():
print np.array([2,3,4])
def TwoD_array():
print np.array([(2,3,4),(5,6,7)])
def empty_array():
#print np.empty(5)
#print np.empty((4,5))
print np.empty((5,4,3))
def ones_array():
print np.ones((5,4,3))
def random_array():
print np.random.random((5,4))
def randInt_array():
#print np.random.randint(10)
#print np.random.randint(0,10)
#print np.random.randint(0,10,size=5)
print np.random.randint(0, 10, size=(2,3))
if __name__ == "__main__":
#OneD_array()
#TwoD_array()
#empty_array()
#ones_array()
#random_array()
#randInt_array()
|
{"/AAPL.py": ["/Analysis/trade.py"]}
|
8,440
|
BrianIshii/StockAnalyzer
|
refs/heads/master
|
/practice.py
|
import pandas as pd
import matplotlib.pyplot as plt
import trade
def first_graph():
df = pd.read_csv("StockData/AAPL_stock_history.csv")
plt.plot(df["Adj Close"])
print df
plt.show()
def new_dataFrame():
#define date range
start_date = "2016-01-22"
end_date = "2016-12-26"
dates = pd.date_range(start_date,end_date)
#create empty data frame
df1 = pd.DataFrame(index = dates)
#Read AAPL data
df_AAPL = pd.read_csv("StockData/AAPL_stock_history.csv",index_col = "Date",parse_dates=True,na_values = ['nan'])
new_df = df1.join(df_AAPL)
new_df = new_df.dropna()
plt.plot(new_df["Adj Close"])
plt.show()
print new_df
def join_dataFrame():
#set range for df1
start_date = "1999-01-22"
end_date = "2016-12-26"
dates = pd.date_range(start_date,end_date)
#create empty data frame
df1 = pd.DataFrame(index = dates)
#Read AAPL data
df_AAPL = pd.read_csv("StockData/AAPL_stock_history.csv",index_col = "Date",usecols=["Date","Adj Close"],parse_dates=True,na_values = ['nan'])
#Rename 'Adj Close' to "AAPL"
df_AAPL = df_AAPL.rename(columns={'Adj Close':'AAPL'})
df1 = df1.join(df_AAPL, how = 'inner')
df1 = df1.dropna()
#read SPY data
df_SPY = pd.read_csv("StockData/SPY_stock_history.csv",index_col = "Date",usecols=["Date","Adj Close"],parse_dates=True,na_values = ['nan'])
df_SPY = df_SPY.rename(columns={'Adj Close':'SPY'})
#join df_SPY with df1
df1 = df1.join(df_SPY)
print df1
plt.plot(df1[["AAPL","SPY"]])
plt.show()
def first_time_using_lib():
# Define a date range
dates = pd.date_range('2009-01-22', '2010-01-26')
# Choose stock symbols to read
symbols = ['AAPL','GOOGL','AMZN']
# Get stock data
df = trade.get_data(symbols, dates)
df2 = df.ix['2009-01-22':'2010-01-28',["AAPL"]]
#plt.plot(df2["AAPL"])
#plt.show()
trade.plot_data(df2)
print df2
def multiple_stocks_on_a_graph():
dates = pd.date_range('2016-01-01','2017-01-01')
symbols = ['AAPL','GOOGL','AMZN']
df = trade.get_data(symbols, dates)
print df
trade.plot_selected(trade.normalize_data(df),symbols,'2016-01-01','2017-01-01')
if __name__ == "__main__":
#first_graph()
#new_dataFrame()
#join_dataFrame()
#first_time_using_lib()
multiple_stocks_on_a_graph()
print trade.symbol_to_path("AAPL")
|
{"/AAPL.py": ["/Analysis/trade.py"]}
|
8,441
|
BrianIshii/StockAnalyzer
|
refs/heads/master
|
/Analysis/trade.py
|
'''
Trade.py is a library to look at stock data
Written by Brian Ishii 2017
'''
import os
import pandas as pd
import matplotlib.pyplot as plt
import math
#import updateData
def plot_selected(df, columns, start_index, end_index):
"""
plots selected data
Arguments:
df -- (pd.DataFrame) pandas dataframe
columns -- (List) list of stock names i.e. "AAPL"
start_index -- (date) starting date of data graphed year-month-day formmat i.e. '2017-01-01'
end_index -- (date) ending date of data graphed year-month-day formmat i.e. '2017-01-01'
"""
plot_data(df.ix[start_index:end_index,columns],title="Selected Data ({})-({})".format(start_index,end_index))
def path_to_symbol(symbol, base_dir="Data"):
"""
returns the CSV file path given the ticker symbol
Arguments:
symbol -- (String) stock name i.e. "AAPL"
"""
return os.path.join(base_dir, "{}.csv".format(str(symbol)))
def get_data(symbols, dates, col="Adj Close"):
"""
Read stock data (adjusted close) for given symbols from CSV files.
Arguments:
symbols -- (List) list of symbols i.e. ["AAPL","GOOGL"]
dates -- (pd.date_range) range of dates called
col -- (String) column name of data requested i.e. 'Volume'
"""
df = pd.DataFrame(index=dates)
if 'SPY' not in symbols: # add SPY for reference, if absent
symbols.insert(0, 'SPY')
for symbol in symbols:
df_temp = pd.read_csv(path_to_symbol(symbol),index_col="Date",usecols= ["Date",col],parse_dates=True,na_values = ['nan'])
df_temp = df_temp.rename(columns={col:symbol})
df = df.join(df_temp)
if symbol == 'SPY':
df = df.dropna(subset=["SPY"])
return df
def plot_data(df,title="Stock Prices",ylabel="Prices"):
"""
plots stock prices with labels
Arguments:
df -- (pd.DataFrame) dataframe with price and date
"""
ax = df.plot(title=title,fontsize=12)
ax.set_xlabel("Date")
ax.set_ylabel(ylabel)
plt.show()
def normalize_data(df):
"""
Normalize Data
Arguments:
df -- (pd.DataFrame) pandas dataframe
"""
return df/ df.ix[0,:]
def get_bollinger_bands(symbol,values,window,plot):
"""
Get Upper and lower bands
Arguments:
symbol -- (String) stock name i.e. "AAPL"
values --(pd.Dataframe) i.e. df['AAPL']
window -- (int) how many days i.e. 20
plot -- (Bool) plot True or False
"""
rm = pd.rolling_mean(values,window=window)
rstd = pd.rolling_std(values,window=window)
upper_band = rm + rstd * 2
lower_band = rm - rstd * 2
if plot is True:
ax = values.plot(title="Bollinger Bands", label=symbol)
rm.plot(label="Rolling Mean", ax=ax)
upper_band.plot(label="Upper-Band", ax=ax)
lower_band.plot(label="Lower-Band", ax=ax)
ax.set_xlabel("Date")
ax.set_ylabel("Price")
ax.legend(loc='upper left')
plt.show()
return rm,upper_band,lower_band
def compute_daily_returns(df):
"""
Compute the Daily returns of a stock
Arguments:
df -- (pd.DataFrame) i.e. df['AAPL']
"""
daily_returns = ((df / df.shift(1))-1)
daily_returns.ix[0,:] = 0
return daily_returns
def daily_returns_hist(symbols,dates):
"""
Read stock data (adjusted close) for given symbols from CSV files.
Arguments:
symbols -- (List) list of symbols i.e. ["AAPL","GOOGL"]
dates -- (pd.date_range) range of dates called
"""
df = get_data(symbols,dates)
daily_returns = compute_daily_returns(df)
for symbol in symbols:
daily_returns[symbol].hist(bins=20,label = symbol)
plt.legend(loc="upper right")
if len(symbols) == 1:
mean = daily_returns['SPY'].mean()
print("mean = " + str(mean))
std = daily_returns['SPY'].std()
plt.axvline(mean,color='w',linestyle="dashed",linewidth=2)
plt.axvline(std,color='r',linestyle="dashed",linewidth=2)
plt.axvline(-std,color='r',linestyle="dashed",linewidth=2)
print(daily_returns.kurtosis())
plt.show()
def compute_cumulative_returns(df, start_index, end_index):
"""
compute the cumulative returns of a stock in a time period
Arguments:
df -- (pd.DataFrame) i.e. df['AAPL']
start_index -- (date) starting date of data graphed year-month-day formmat i.e. '2017-01-01'
end_index -- (date) ending date of data graphed year-month-day formmat i.e. '2017-01-01'
"""
cumulative_returns = (df.loc[end_index]/df.loc[start_index])-1
return cumulative_returns
def sharpe_ratio(df,symbol,start_index, end_index):
"""
computes the sharpe ratio returns sharpe, c, mean, and std
Arguments:
df -- (pd.DataFrame) i.e. df['AAPL']
start_index -- (date) starting date of data graphed year-month-day formmat i.e. '2017-01-01'
end_index -- (date) ending date of data graphed year-month-day formmat i.e. '2017-01-01'
"""
d = compute_daily_returns(df)
c = compute_cumulative_returns(df,start_index,end_index)
mean = d[symbol].mean()
std = d[symbol].std()
sharpe = math.sqrt(252)*(mean/std)
return sharpe,c,mean,std
def check_data(today,now):
"""
Checks Date and updates the CSV data files if necessary
Arguments:
today -- (String) string in date form i.e. '2017-01-01'
"""
df_check = pd.read_csv(path_to_symbol("SPY"))
date = df_check["Date"][0]
if date != today:
if int(now.strftime('%w')) == 1:
print("Data up to date")
return
else:
updateData.update_data("StockData")
return
else:
print("Data up to date")
return
|
{"/AAPL.py": ["/Analysis/trade.py"]}
|
8,442
|
BrianIshii/StockAnalyzer
|
refs/heads/master
|
/Analysis/test_trade.py
|
import unittest
from trade import *
class TimerTests (unittest.TestCase):
def test_symbol_to_path_1(self):
self.assertEqual(symbol_to_path("AAPL"),"StockData/AAPL.csv")
def test_symbol_to_path_2(self):
self.assertEqual(symbol_to_path("GOOGL"),"StockData/GOOGL.csv")
if __name__ == '__main__':
unittest.main()
|
{"/AAPL.py": ["/Analysis/trade.py"]}
|
8,443
|
BrianIshii/StockAnalyzer
|
refs/heads/master
|
/Data/data_tests.py
|
#!/usr/bin/env python3
"""
data_tests.py has tests for data.py
Brian Ishii 2017
"""
import unittest
import os
from data import *
class DataTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
test = Data("2015-01-02", "2015-01-06")
cls.test = test
cls.cwd = os.getcwd()
def test_path_to_symbol_aapl(self):
self.assertEqual(self.test.path_to_symbol("AAPL"),
self.cwd + "/AAPL.csv")
def test_path_to_symbol_spy(self):
self.assertEqual(self.test.path_to_symbol("SPY"),
self.cwd + "/SPY.csv")
def test_get_data(self):
output = (""
+ " SPY\n"
+ "2015-01-02 197.045185\n"
+ "2015-01-05 193.486620\n"
+ "2015-01-06 191.664176\n"
+ "2015-01-07 194.052535\n"
+ "2015-01-08 197.496002\n"
+ "2015-01-09 195.913355\n"
+ "2015-01-12 194.378654\n"
+ "2015-01-13 193.831927")
temp = Data("2015-01-02", "2015-01-13")
self.assertEqual(str(temp.get_data()), output)
def test_get_dates(self):
dates = ("DatetimeIndex(['2015-01-02',"
+ " '2015-01-03', '2015-01-04', '2015-01-05',\n"
+ " '2015-01-06'],\n"
+ " dtype='datetime64[ns]', freq='D')")
self.assertEqual(
str(self.test.get_dates("2015-01-02", "2015-01-06")),
dates)
def test_get_bollinger_bands_error(self):
self.assertRaises(IndexError, self.test.get_bollinger_bands, "AAPL")
def test_get_bollinger_bands(self):
temp = Data("2015-01-01", "2015-01-15")
rm = ("2015-01-02 NaN\n"
+ "2015-01-05 NaN\n"
+ "2015-01-06 NaN\n"
+ "2015-01-07 NaN\n"
+ "2015-01-08 194.748904\n"
+ "2015-01-09 194.522538\n"
+ "2015-01-12 194.700944\n"
+ "2015-01-13 195.134495\n"
+ "2015-01-14 194.856332\n"
+ "2015-01-15 193.536497\n"
+ "Name: SPY, dtype: float64")
ub = ("2015-01-02 NaN\n"
+ "2015-01-05 NaN\n"
+ "2015-01-06 NaN\n"
+ "2015-01-07 NaN\n"
+ "2015-01-08 199.689884\n"
+ "2015-01-09 199.021440\n"
+ "2015-01-12 199.063118\n"
+ "2015-01-13 198.236422\n"
+ "2015-01-14 198.621840\n"
+ "2015-01-15 197.302006\n"
+ "Name: SPY, dtype: float64")
lb = ("2015-01-02 NaN\n"
+ "2015-01-05 NaN\n"
+ "2015-01-06 NaN\n"
+ "2015-01-07 NaN\n"
+ "2015-01-08 189.807923\n"
+ "2015-01-09 190.023635\n"
+ "2015-01-12 190.338771\n"
+ "2015-01-13 192.032567\n"
+ "2015-01-14 191.090823\n"
+ "2015-01-15 189.770988\n"
+ "Name: SPY, dtype: float64")
test_rm, test_ub, test_lb = temp.get_bollinger_bands("SPY", window=5)
self.assertEqual(str(test_rm), rm)
self.assertEqual(str(test_ub), ub)
self.assertEqual(str(test_lb), lb)
if __name__ == '__main__':
unittest.main()
|
{"/AAPL.py": ["/Analysis/trade.py"]}
|
8,472
|
dvska/django-admin-ip-whitelist
|
refs/heads/master
|
/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-07-17 20:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DjangoAdminAccessIPWhitelist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('whitelist_reason', models.CharField(help_text=b'Reason for the whitelist?', max_length=255)),
('ip', models.CharField(help_text=b'Enter an IP to whitelist', max_length=255)),
],
options={
'db_table': 'django_admin_access_ip_whitelist',
'verbose_name': 'Django /admin access IP whitelist',
'verbose_name_plural': 'Django /admin access allowed IPs',
'permissions': (('can_whitelist_user', 'Can Whitelist User'),),
},
),
]
|
{"/admin_ip_whitelist/tests.py": ["/admin_ip_whitelist/models.py"], "/admin_ip_whitelist/middleware.py": ["/admin_ip_whitelist/models.py"], "/admin_ip_whitelist/admin.py": ["/admin_ip_whitelist/models.py"]}
|
8,473
|
dvska/django-admin-ip-whitelist
|
refs/heads/master
|
/setup.py
|
#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
Version = '0.1.1'
setup(name='django-admin-ip-whitelist',
version=Version,
# install_requires='redis',
description="Django middleware to allow access to /admin only for users, whose IPs are in the white list",
long_description="django-admin-ip-whitelist is a django middleware app to allow access to /admin by IP addresses",
author="dvska",
url="http://github.com/dvska/django-admin-ip-whitelist",
packages=['admin_ip_whitelist'],
license='Apache',
platforms='Posix; MacOS X;',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
"Operating System :: OS Independent",
"Programming Language :: Python",
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
{"/admin_ip_whitelist/tests.py": ["/admin_ip_whitelist/models.py"], "/admin_ip_whitelist/middleware.py": ["/admin_ip_whitelist/models.py"], "/admin_ip_whitelist/admin.py": ["/admin_ip_whitelist/models.py"]}
|
8,474
|
dvska/django-admin-ip-whitelist
|
refs/heads/master
|
/admin_ip_whitelist/tests.py
|
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.test import TestCase, override_settings
from testfixtures import LogCapture, log_capture
from .models import ADMIN_ACCESS_WHITELIST_PREFIX, DjangoAdminAccessIPWhitelist
class MiddlewareTests(TestCase):
def tearDown(self):
cache.clear()
def test_other_view(self):
other_url = reverse('test')
response = self.client.get(other_url, REMOTE_ADDR="5.5.5.5")
self.assertEquals(response.status_code, 200)
self.assertEquals(response.content, 'Hello, World!')
def test_denied(self):
admin_url = reverse('admin:index')
with LogCapture() as l:
response = self.client.get(admin_url, REMOTE_ADDR="5.5.5.5")
expected_response = "You are banned.\n<!-- 5.5.5.5 -->"
self.assertEquals(response.status_code, 403) # forbidden
self.assertEquals(response.content, expected_response)
self.assertEquals(response['content-type'], 'text/html')
module_name = 'admin_ip_whitelist.middleware'
l.check(
(module_name, "DEBUG", "[django-admin-ip-whitelist] status = enabled"),
(module_name, "DEBUG", "GOT IP FROM Request: 5.5.5.5 and User Agent None"),
)
@override_settings(ADMIN_ACCESS_WHITELIST_MESSAGE='Leave, now.')
def test_denied_custom_message(self):
admin_url = reverse('admin:index')
with LogCapture() as l:
response = self.client.get(admin_url, REMOTE_ADDR="5.5.5.5")
expected_response = "Leave, now.\n<!-- 5.5.5.5 -->"
self.assertEquals(response.status_code, 403) # forbidden
self.assertEquals(response.content, expected_response)
self.assertEquals(response['content-type'], 'text/html')
module_name = 'admin_ip_whitelist.middleware'
l.check(
(module_name, "DEBUG", "[django-admin-ip-whitelist] status = enabled"),
(module_name, "DEBUG", "GOT IP FROM Request: 5.5.5.5 and User Agent None"),
)
@override_settings(ADMIN_ACCESS_WHITELIST_USE_HTTP_X_FORWARDED_FOR=True)
@log_capture()
def test_http_x_forward_for(self, l):
DjangoAdminAccessIPWhitelist.objects.create(
whitelist_reason='You are special',
ip='1.2.3.4',
)
admin_url = reverse('admin:index')
# Allowed, the FORWARDED address is being considered.
response = self.client.get(
admin_url, REMOTE_ADDR="5.5.5.5",
HTTP_X_FORWARDED_FOR="1.2.3.4, 4.4.4.4, 3.3.3.3")
self.assertEquals(response.status_code, 302) # redirect
expected_url = "{}?next={}".format(reverse('admin:login'), admin_url)
self.assertEquals(response.url, expected_url)
# Allowed, If no forwarded address is given, it falls back
# to REMOTE_ADDR.
response = self.client.get(
admin_url, REMOTE_ADDR="1.2.3.4")
self.assertEquals(response.status_code, 302) # redirect
expected_url = "{}?next={}".format(reverse('admin:login'), admin_url)
self.assertEquals(response.url, expected_url)
module_name = 'admin_ip_whitelist.middleware'
l.check(
(module_name, "DEBUG", "[django-admin-ip-whitelist] status = enabled"),
(module_name, "DEBUG", "GOT IP FROM Request: 1.2.3.4 and User Agent None"),
(module_name, "DEBUG", "/Admin access IP: DJANGO_ADMIN_ACCESS_WHITELIST:1.2.3.4"),
(module_name, "DEBUG", "GOT IP FROM Request: 1.2.3.4 and User Agent None"),
(module_name, "DEBUG", "/Admin access IP: DJANGO_ADMIN_ACCESS_WHITELIST:1.2.3.4"),
)
@log_capture()
def test_allowed(self, l):
DjangoAdminAccessIPWhitelist.objects.create(
whitelist_reason='You are special',
ip='1.2.3.4',
)
admin_url = reverse('admin:index')
# This user is not allowed.
response = self.client.get(admin_url, REMOTE_ADDR="5.5.5.5")
expected_response = "You are banned.\n<!-- 5.5.5.5 -->"
self.assertEquals(response.status_code, 403) # forbidden
self.assertEquals(response.content, expected_response)
self.assertEquals(response['content-type'], 'text/html')
# This user is special.
response = self.client.get(admin_url, REMOTE_ADDR="1.2.3.4")
self.assertEquals(response.status_code, 302) # redirect
expected_url = "{}?next={}".format(reverse('admin:login'), admin_url)
self.assertEquals(response.url, expected_url)
module_name = 'admin_ip_whitelist.middleware'
l.check(
(module_name, "DEBUG", "[django-admin-ip-whitelist] status = enabled"),
(module_name, "DEBUG", "GOT IP FROM Request: 5.5.5.5 and User Agent None"),
(module_name, "DEBUG", "GOT IP FROM Request: 1.2.3.4 and User Agent None"),
(module_name, "DEBUG", "/Admin access IP: DJANGO_ADMIN_ACCESS_WHITELIST:1.2.3.4"),
)
class ModelTests(TestCase):
def tearDown(self):
cache.clear()
def test_instance_create_and_update(self):
self.assertEquals(len(cache._cache.keys()), 0)
cache_key = ADMIN_ACCESS_WHITELIST_PREFIX + '1.2.3.4'
self.assertEquals(cache.get(cache_key), None)
obj = DjangoAdminAccessIPWhitelist.objects.create(
whitelist_reason='You are special',
ip='1.2.3.4',
)
self.assertEquals(len(cache._cache.keys()), 1)
self.assertEquals(cache.get(cache_key), '1')
obj.ip = '5.5.5.5'
obj.save()
self.assertEquals(cache.get(cache_key), None)
new_cache_key = ADMIN_ACCESS_WHITELIST_PREFIX + '5.5.5.5'
self.assertEquals(cache.get(new_cache_key), '1')
self.assertEquals(len(cache._cache.keys()), 1)
def test_instance_delete(self):
self.assertEquals(len(cache._cache.keys()), 0)
obj = DjangoAdminAccessIPWhitelist.objects.create(
whitelist_reason='You are special',
ip='1.2.3.4',
)
self.assertEquals(len(cache._cache.keys()), 1)
cache_key = ADMIN_ACCESS_WHITELIST_PREFIX + '1.2.3.4'
self.assertEquals(cache.get(cache_key), '1')
obj.delete()
self.assertEquals(cache.get(cache_key), None)
def test_unicode(self):
obj = DjangoAdminAccessIPWhitelist.objects.create(
whitelist_reason=u"This is what a cat looks like: \U0001F408",
ip='1.2.3.4',
)
self.assertEquals(
unicode(obj),
u"Whitelisted 1.2.3.4 (This is what a cat looks like: \U0001F408)"
)
def test_str(self):
obj = DjangoAdminAccessIPWhitelist.objects.create(
whitelist_reason=u"This is what a cat looks like: \U0001F408",
ip='1.2.3.4',
)
self.assertEquals(
str(obj),
"Whitelisted 1.2.3.4 (This is what a cat looks like: \xF0\x9F\x90\x88)"
)
|
{"/admin_ip_whitelist/tests.py": ["/admin_ip_whitelist/models.py"], "/admin_ip_whitelist/middleware.py": ["/admin_ip_whitelist/models.py"], "/admin_ip_whitelist/admin.py": ["/admin_ip_whitelist/models.py"]}
|
8,475
|
dvska/django-admin-ip-whitelist
|
refs/heads/master
|
/admin_ip_whitelist/middleware.py
|
import logging
import django
from django.conf import settings
from django.core.cache import cache
from django.core.exceptions import MiddlewareNotUsed
from django.utils.deprecation import MiddlewareMixin
from django.http import HttpResponseForbidden
from .models import DjangoAdminAccessIPWhitelist, ADMIN_ACCESS_WHITELIST_PREFIX
log = logging.getLogger(__name__)
class AdminAccessIPWhiteListMiddleware(MiddlewareMixin):
def __init__(self):
"""
Middleware init is called once per server on startup - do the heavy
lifting here.
"""
# If disabled or not enabled raise MiddleWareNotUsed so django
# processes next middleware.
self.ENABLED = getattr(settings, 'ADMIN_ACCESS_WHITELIST_ENABLED', False)
self.USE_HTTP_X_FORWARDED_FOR = getattr(settings, 'ADMIN_ACCESS_WHITELIST_USE_HTTP_X_FORWARDED_FOR', False)
self.ADMIN_ACCESS_WHITELIST_MESSAGE = getattr(settings, 'ADMIN_ACCESS_WHITELIST_MESSAGE', 'You are banned.')
if not self.ENABLED:
raise MiddlewareNotUsed("django-admin-ip-whitelist is not enabled via settings.py")
log.debug("[django-admin-ip-whitelist] status = enabled")
# Prefix All keys in cache to avoid key collisions
self.ABUSE_PREFIX = 'DJANGO_ADMIN_ACCESS_WHITELIST_ABUSE:'
self.WHITELIST_PREFIX = ADMIN_ACCESS_WHITELIST_PREFIX
for whitelist in DjangoAdminAccessIPWhitelist.objects.all():
cache_key = self.WHITELIST_PREFIX + whitelist.ip
cache.set(cache_key, "1")
def _get_ip(self, request):
ip = request.META['REMOTE_ADDR']
if self.USE_HTTP_X_FORWARDED_FOR or not ip or ip == '127.0.0.1':
ip = request.META.get('HTTP_X_FORWARDED_FOR', ip).split(',')[0].strip()
return ip
def process_request(self, request):
if not request.path.startswith('/admin'):
return None
ip = self._get_ip(request)
user_agent = request.META.get('HTTP_USER_AGENT', None)
log.debug("GOT IP FROM Request: %s and User Agent %s" % (ip, user_agent))
if self.is_whitelisted(ip):
return None
else:
return self.http_response_forbidden(self.ADMIN_ACCESS_WHITELIST_MESSAGE + '\n<!-- {} -->'.format(ip), content_type="text/html")
@staticmethod
def http_response_forbidden(message, content_type):
if django.VERSION[:2] > (1, 3):
kwargs = {'content_type': content_type}
else:
kwargs = {'mimetype': content_type}
return HttpResponseForbidden(message, **kwargs)
def is_whitelisted(self, ip):
# If a whitelist key exists, return True to allow the request through
is_whitelisted = cache.get(self.WHITELIST_PREFIX + ip)
if is_whitelisted:
log.debug("/Admin access IP: " + self.WHITELIST_PREFIX + ip)
return is_whitelisted
|
{"/admin_ip_whitelist/tests.py": ["/admin_ip_whitelist/models.py"], "/admin_ip_whitelist/middleware.py": ["/admin_ip_whitelist/models.py"], "/admin_ip_whitelist/admin.py": ["/admin_ip_whitelist/models.py"]}
|
8,476
|
dvska/django-admin-ip-whitelist
|
refs/heads/master
|
/admin_ip_whitelist/admin.py
|
from django.contrib import admin
from .models import DjangoAdminAccessIPWhitelist
admin.site.register(DjangoAdminAccessIPWhitelist)
|
{"/admin_ip_whitelist/tests.py": ["/admin_ip_whitelist/models.py"], "/admin_ip_whitelist/middleware.py": ["/admin_ip_whitelist/models.py"], "/admin_ip_whitelist/admin.py": ["/admin_ip_whitelist/models.py"]}
|
8,477
|
dvska/django-admin-ip-whitelist
|
refs/heads/master
|
/admin_ip_whitelist/test_urls.py
|
from django.conf.urls import url
from django.contrib import admin
from .test_views import TestView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^test/', TestView.as_view(), name='test'),
]
|
{"/admin_ip_whitelist/tests.py": ["/admin_ip_whitelist/models.py"], "/admin_ip_whitelist/middleware.py": ["/admin_ip_whitelist/models.py"], "/admin_ip_whitelist/admin.py": ["/admin_ip_whitelist/models.py"]}
|
8,478
|
dvska/django-admin-ip-whitelist
|
refs/heads/master
|
/admin_ip_whitelist/models.py
|
# dvska made
# Licensed under the Apache License, Version 2.0 (the "License");
from django.core.cache import cache
from django.db import models
from django.db.models.signals import post_delete, pre_save
ADMIN_ACCESS_WHITELIST_PREFIX = 'DJANGO_ADMIN_ACCESS_WHITELIST:'
WHITELIST_PREFIX = 'DJANGO_ADMIN_ACCESS_WHITELIST:'
class DjangoAdminAccessIPWhitelist(models.Model):
whitelist_reason = models.CharField(max_length=255, help_text="Reason for the whitelist?")
ip = models.CharField(max_length=255, help_text='Enter an IP to whitelist')
def __unicode__(self):
return "Whitelisted %s (%s)" % (self.ip, self.whitelist_reason)
def __str__(self):
return self.__unicode__().encode('utf-8')
class Meta:
permissions = (("can_whitelist_user", "Can Whitelist User"),)
verbose_name = "Django /admin access IP whitelist"
verbose_name_plural = "Django /admin access allowed IPs"
db_table = 'django_admin_access_ip_whitelist'
def _generate_cache_key(instance):
return ADMIN_ACCESS_WHITELIST_PREFIX + instance.ip
def _update_cache(sender, **kwargs):
# add a whitelist entry
new_instance = kwargs.get('instance')
# If the entry has changed, remove the old cache entry and
# add the new one.
if new_instance.pk:
old_instance = DjangoAdminAccessIPWhitelist.objects.get(
pk=new_instance.pk)
if _generate_cache_key(old_instance) != \
_generate_cache_key(new_instance):
old_cache_key = _generate_cache_key(old_instance)
cache.delete(old_cache_key)
cache_key = _generate_cache_key(new_instance)
cache.set(cache_key, "1")
def _delete_cache(sender, **kwargs):
instance = kwargs.get('instance')
cache_key = _generate_cache_key(instance)
cache.delete(cache_key)
pre_save.connect(_update_cache, sender=DjangoAdminAccessIPWhitelist)
post_delete.connect(_delete_cache, sender=DjangoAdminAccessIPWhitelist)
|
{"/admin_ip_whitelist/tests.py": ["/admin_ip_whitelist/models.py"], "/admin_ip_whitelist/middleware.py": ["/admin_ip_whitelist/models.py"], "/admin_ip_whitelist/admin.py": ["/admin_ip_whitelist/models.py"]}
|
8,479
|
1mSAD/Discord-Mediabot
|
refs/heads/main
|
/main.py
|
import discord
from discord.ext import commands
from discord import file
from discord_slash import SlashCommand
#Setting Values
from config import *
client = commands.Bot(command_prefix=config["Prefix"])
slash = SlashCommand(client, sync_commands=True, sync_on_cog_reload=True)
TOKEN = config["TOKEN"]
events_extensions = ['cogs.events.tiktok',
'cogs.events.instagram',
'cogs.commands.sendtodm',
'cogs.commands.slash-sendtodm',
'cogs.commands.help']
@client.event
async def on_ready():
await client.change_presence(status=discord.Status.online, activity=discord.Game(f'{config["Prefix"]}help'))
print("\u001b[32mMediabot is Ready to go. \u001b[0m")
if __name__ == "__main__":
# Loads Extentions (Cogs)
for extension in events_extensions:
print(f"Loaded \u001b[32m{extension}\u001b[0m")
client.load_extension(extension)
from api.flaskapi import run_api
run_api()
client.run(TOKEN)
|
{"/main.py": ["/config.py", "/api/flaskapi.py"], "/cogs/commands/help.py": ["/config.py"], "/cogs/functions/tik_fn.py": ["/config.py"], "/cogs/functions/insta_fn.py": ["/config.py"], "/cogs/commands/sendtodm.py": ["/config.py"], "/cogs/events/instagram.py": ["/config.py"], "/cogs/commands/slash-sendtodm.py": ["/config.py"], "/api/flaskapi.py": ["/config.py"], "/cogs/events/tiktok.py": ["/config.py"]}
|
8,480
|
1mSAD/Discord-Mediabot
|
refs/heads/main
|
/cogs/commands/help.py
|
import discord
from discord.ext import commands
from discord_slash import cog_ext, SlashContext
import json
#Setting Values
import sys
sys.path.append("./")
from config import *
prefix = config["Prefix"]
def gembed(ctx):
embed=discord.Embed(title='help', description='available commands.', color = discord.Colour.random(),)
embed.add_field(
name=f"{prefix}send - {prefix}s - /send",
value=f"```{prefix}s <mention||userid> <link> <number-optional>```",
inline=False
)
embed.add_field(
name=f"{prefix}clear - ``only works in dm``"
, value=f"```{prefix}clear <amount>```",
inline=True
)
embed.set_footer(
text=f"Requested by {ctx.author}",
icon_url=ctx.author.avatar_url
)
return embed
class HelpCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.bot.remove_command('help')
@commands.command(name='help')
async def Help_cmd(self, ctx):
await ctx.send(embed=gembed(ctx))
@cog_ext.cog_slash(name="help",description="view available commands.")
async def slashHelp_cmd(self, ctx:SlashContext):
await ctx.send(embed=gembed(ctx))
def setup(bot):
bot.add_cog(HelpCog(bot))
|
{"/main.py": ["/config.py", "/api/flaskapi.py"], "/cogs/commands/help.py": ["/config.py"], "/cogs/functions/tik_fn.py": ["/config.py"], "/cogs/functions/insta_fn.py": ["/config.py"], "/cogs/commands/sendtodm.py": ["/config.py"], "/cogs/events/instagram.py": ["/config.py"], "/cogs/commands/slash-sendtodm.py": ["/config.py"], "/api/flaskapi.py": ["/config.py"], "/cogs/events/tiktok.py": ["/config.py"]}
|
8,481
|
1mSAD/Discord-Mediabot
|
refs/heads/main
|
/cogs/functions/tik_fn.py
|
import requests
import json
import math
import re
from decimal import Decimal
import os
from urllib.parse import parse_qsl, urlparse
import random
import time
from pystreamable import StreamableApi
import sys
sys.path.append("./")
from config import *
stream_email = config["stream_email"]
stream_pass = config["stream_pass"]
import discord
#For Below Function
def remove_exponent(d):
"""Remove exponent."""
return d.quantize(Decimal(1)) if d == d.to_integral() else d.normalize()
#To Make Numbers Readable , 1k 1m...
def millify(n, precision=0, drop_nulls=True, prefixes=[]):
"""Humanize number."""
millnames = ['', 'k', 'M', 'B', 'T', 'P', 'E', 'Z', 'Y']
if prefixes:
millnames = ['']
millnames.extend(prefixes)
n = float(n)
millidx = max(0, min(len(millnames) - 1,
int(math.floor(0 if n == 0 else math.log10(abs(n)) / 3))))
result = '{:.{precision}f}'.format(n / 10**(3 * millidx), precision=precision)
if drop_nulls:
result = remove_exponent(Decimal(result))
return '{0}{dx}'.format(result, dx=millnames[millidx])
class Tiktok_fn:
def __init__(self, url):
self.url = url
# Convert https://vm.tiktok.com to https://www.tiktok.com
header = {
'Host': 't.tiktok.com',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:79.0) Gecko/20100101 Firefox/79.0',
'Referer': 'https://www.tiktok.com/',
}
tikfull_url = url
if url.startswith('https://vm.tiktok.com'):
res = requests.get(url)
url = (res.url)
if url.startswith('https://m.tiktok.com'):
req = requests.get(f"https://www.tiktok.com/oembed?url={url}")
extr_url = json.loads(req.text)
extr_url = extr_url["html"]
m = re.search(" +cite=\"(.*?)\"", extr_url)
url = m.group(1)
tikfull_url = url
self.url = tikfull_url
video_id = (('{}'.format(*tikfull_url.split('/')[-1:]))).split("?")[0]
video_username = (('{}'.format(*tikfull_url.split('/')[-3:]))).split("?")[0]
# Send to api
api_url = f"http://localhost:8080/api/tiktok/{video_username}/{video_id}"
# Read json array via api
response = requests.request("get", api_url)
self.datameta = json.loads(response.text)
self.video_url = self.datameta["video"]["playAddr"]
self.header = header
self.fll_url = tikfull_url
def likes_number(self):
likes_number = (millify(self.datameta["stats"]["diggCount"], precision=1))
return likes_number
def comments_number(self):
comments_number = (millify(self.datameta["stats"]["commentCount"], precision=1))
return comments_number
def share_number(self):
share_number = (millify(self.datameta["stats"]["shareCount"], precision=1))
return share_number
def play_number(self):
play_number = (millify(self.datameta["stats"]["playCount"], precision=1))
return play_number
def user_name(self):
user_name = self.datameta["author"]["uniqueId"]
return user_name
def author_avatar(self):
author_avatar = self.datameta["author"]["avatarLarger"]
return author_avatar
def sound_des(self):
sound_des = self.datameta["music"]["title"]
return sound_des
def caption(self):
caption = self.datameta["desc"]
return caption
def video_id(self):
video_id = self.datameta["video"]["id"]
return video_id
def video_url(self):
video_url = self.datameta["video"]["playAddr"]
return video_url
def default_url(self):
return self.fll_url
# Upload To streamable
def upload_to_streamable(self, path, title):
streamable_username = stream_email
streamable_password = stream_pass
api = StreamableApi(streamable_username, streamable_password)
deets = api.upload_video(path, title)
count = 0
while True:
count+=1
test = api.get_info(deets['shortcode'])
if test['percent'] == 100:
break
elif count == 6:
exit()
else:
time.sleep(10)
global streamable_link
streamable_link = ("https://streamable.com/" +deets['shortcode'])
return streamable_link
def embedgen(self, url, author, author_avatar):
e=discord.Embed(title="Tiktok", description=self.caption())
e.set_author(name=(f'@{self.user_name()}'), url=url , icon_url=self.author_avatar())
e.set_thumbnail(url="https://i.imgur.com/rMollzc.png")
e.add_field(name="Likes", value=self.likes_number(), inline=True)
e.add_field(name="Comments", value=self.comments_number(), inline=True)
e.add_field(name="Sound", value=self.sound_des(), inline=False)
e.add_field(name="Shares", value=self.share_number(), inline=True)
e.add_field(name="Views", value=self.play_number(), inline=True)
e.set_footer(text=(f'Shared by {author}'), icon_url=author_avatar)
return e
class TikTokDownloader:
HEADERS = {
'Connection': 'keep-alive',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache',
'DNT': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36',
'Accept': '*/*',
'Sec-Fetch-Site': 'same-site',
'Sec-Fetch-Mode': 'no-cors',
'Sec-Fetch-Dest': 'video',
'Referer': 'https://www.tiktok.com/',
'Accept-Language': 'en-US,en;q=0.9,bs;q=0.8,sr;q=0.7,hr;q=0.6',
'sec-gpc': '1',
'Range': 'bytes=0-',
}
def __init__(self, url: str):
web_id = str(random.randint(10000, 999999999))
self.__url = url
self.__cookies = {
'tt_webid': web_id,
'tt_webid_v2': web_id
}
def __get_video_url(self) -> str:
response = requests.get(self.__url, cookies=self.__cookies, headers=TikTokDownloader.HEADERS)
return response.text.split('"playAddr":"')[1].split('"')[0].replace(r'\u0026', '&')
def download(self, file_path: str):
video_url = self.__get_video_url()
url = urlparse(video_url)
params = tuple(parse_qsl(url.query))
request = requests.Request(method='GET',url='{}://{}{}'.format(url.scheme,url.netloc, url.path),cookies=self.__cookies,headers=TikTokDownloader.HEADERS,params=params)
prepared_request = request.prepare()
session = requests.Session()
response = session.send(request=prepared_request)
response.raise_for_status()
with open(os.path.abspath(file_path), 'wb') as output_file:
output_file.write(response.content)
|
{"/main.py": ["/config.py", "/api/flaskapi.py"], "/cogs/commands/help.py": ["/config.py"], "/cogs/functions/tik_fn.py": ["/config.py"], "/cogs/functions/insta_fn.py": ["/config.py"], "/cogs/commands/sendtodm.py": ["/config.py"], "/cogs/events/instagram.py": ["/config.py"], "/cogs/commands/slash-sendtodm.py": ["/config.py"], "/api/flaskapi.py": ["/config.py"], "/cogs/events/tiktok.py": ["/config.py"]}
|
8,482
|
1mSAD/Discord-Mediabot
|
refs/heads/main
|
/cogs/functions/insta_fn.py
|
import requests
import json
import time
from pystreamable import StreamableApi
#Setting Values
import sys
sys.path.append("./")
from config import *
stream_email = config["stream_email"]
stream_pass = config["stream_pass"]
import discord
class Insta_fn:
def __init__(self, url, multipost_num=0):
shortcode = url.split('/')[-2].replace('/', '')
api_url = f"http://localhost:8080/api/instagram/{shortcode}"
# Read json array via api
response = requests.request("get", api_url)
self.datameta = json.loads(response.text)
self.typeofmedia = self.datameta["__typename"]
self.multipost_num = multipost_num
self.multipost_num_chosen = False
if multipost_num > 0:
self.multipost_num_chosen = True
elif self.typeofmedia == "GraphSidecar":
try:
self.datameta["edge_sidecar_to_children"]["edges"][self.multipost_num]["node"]["__typename"]
except:
print(f"Number {multipost_num} is out of index, ``Setting number back to 1.``")
self.multipost_num = 0
def type_media(self):
if self.typeofmedia != 'GraphSidecar':
return self.typeofmedia
elif self.typeofmedia == 'GraphSidecar':
if self.datameta["edge_sidecar_to_children"]["edges"][self.multipost_num]["node"]["__typename"] == 'GraphVideo':
return 'GraphVideo'
else:
return 'GraphImage'
elif self.datameta["statusCode"] == 404:
return '**Error StatusCode 404 \n Account Maybe Private.**'
def play_number(self):
if self.type_media() == "GraphVideo":
try:
play_number = self.datameta["video_view_count"]
play_number = ("" + "{:,}".format(play_number))
except:
play_number = self.datameta["edge_sidecar_to_children"]["edges"][self.multipost_num]["node"]["video_view_count"] or self.datameta["edge_sidecar_to_children"]["edges"][0]["node"]["video_view_count"]
play_number = ("" + "{:,}".format(play_number))
return play_number
def likes_number(self):
likes_number = self.datameta["edge_media_preview_like"]["count"]
likes_number = ("" + "{:,}".format(likes_number))
return likes_number
def comments_number(self):
comments_number = self.datameta["edge_media_to_comment"]["count"]
comments_number = ("" + "{:,}".format(comments_number))
return comments_number
def caption(self):
try:
caption = self.datameta["edge_media_to_caption"]["edges"][0]["node"]["text"]
except:
caption = ' '
return caption
def user_name(self):
user_name = self.datameta["owner"]["username"]
return user_name
def user_pfp(self):
user_pfp = self.datameta["owner"]["profile_pic_url"]
return user_pfp
def display_url(self):
if self.typeofmedia == "GraphVideo":
display_url = self.datameta["video_url"]
return display_url
elif self.typeofmedia == "GraphImage":
display_url = self.datameta["display_url"]
return display_url
elif self.typeofmedia == "GraphSidecar":
if self.type_media() == "GraphVideo":
display_url = self.datameta["edge_sidecar_to_children"]["edges"][self.multipost_num]["node"]["video_url"]
return display_url
elif self.type_media() == "GraphImage":
display_url = self.datameta["edge_sidecar_to_children"]["edges"][self.multipost_num]["node"]["display_url"]
return display_url
def video_duration(self):
video_duration = self.datameta["video_duration"]
limit_duration = float('600')
return video_duration
def video_id(self):
video_id = self.datameta["id"]
return video_id
def video_download(self, path):
data = requests.get(self.display_url())
idd = self.datameta["id"]
with open(path+'/{}.mp4'.format(idd), 'wb') as fb:
fb.write(data.content)
# Upload To streamable
def upload_to_streamable(self, path, title):
streamable_username = stream_email
streamable_password = stream_pass
api = StreamableApi(streamable_username, streamable_password)
deets = api.upload_video(path, title)
count = 0
while True:
count+=1
test = api.get_info(deets['shortcode'])
if test['percent'] == 100:
break
elif count == 6:
exit()
else:
time.sleep(10)
global streamable_link
streamable_link = ("https://streamable.com/" +deets['shortcode'])
return streamable_link
def embedgen(self, url, author, author_avatar):
embed=discord.Embed(title="Instagram", description=self.caption())
embed.set_author(name=(f'@{self.user_name()}'), url=url, icon_url=self.user_pfp())
embed.set_thumbnail(url="https://i.imgur.com/9S6AZz8.png")
embed.add_field(name="Likes", value=self.likes_number(), inline=True)
embed.add_field(name="Comments", value=self.comments_number(), inline=True)
embed.set_footer(text=(f'Shared by • @{author}'), icon_url=author_avatar)
# Embed for video
if self.type_media() == "GraphVideo":
embed.add_field(name="Views", value=self.play_number(), inline=True)
#Embed for pic
if self.type_media() == "GraphImage":
embed.set_image(url=self.display_url())
return embed
|
{"/main.py": ["/config.py", "/api/flaskapi.py"], "/cogs/commands/help.py": ["/config.py"], "/cogs/functions/tik_fn.py": ["/config.py"], "/cogs/functions/insta_fn.py": ["/config.py"], "/cogs/commands/sendtodm.py": ["/config.py"], "/cogs/events/instagram.py": ["/config.py"], "/cogs/commands/slash-sendtodm.py": ["/config.py"], "/api/flaskapi.py": ["/config.py"], "/cogs/events/tiktok.py": ["/config.py"]}
|
8,483
|
1mSAD/Discord-Mediabot
|
refs/heads/main
|
/cogs/commands/sendtodm.py
|
import discord
from discord.ext import commands
from discord import file
import os
import random
import sys
sys.path.append("./cogs/functions")
import tik_fn
import insta_fn
# Setting Values
sys.path.append("./")
from config import *
path_down = config["path"]
limitsize = config["limitsize"] # <-- 8 mb for file size limit set by discord
sEmoji = config["sEmoji"]
class SendtodmCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
# Delete dms
@commands.dm_only()
@commands.command(name='clear',pass_context=True)
async def clear(self, ctx, limit: int=None):
passed = 0
failed = 0
async for msg in ctx.message.channel.history(limit=limit+1):
if msg.author.id == self.bot.user.id:
try:
await msg.delete()
passed += 1
except:
failed += 1
else:
pass
#ctx.send(f"[Complete] Removed {passed} messages with {failed} fails", delete_after=10)
# send video to dm
@commands.command(name='send', aliases=['s'],pass_context=True)
async def sendtodm_cmd(self, ctx, member: discord.Member, link_url, multipost_num=1):
channel = await member.create_dm()
# Tiktok send to dm
if link_url.startswith('https://www.tiktok.com') or link_url.startswith('https://vm.tiktok.com'):
try:
# Sends url to tik_fn
t = tik_fn.Tiktok_fn(link_url)
# Download video
downloader = tik_fn.TikTokDownloader(t.default_url())
downloader.download(path_down+'/{}.mp4'.format(t.video_id()))
mp4_file = (f"{path_down}/{t.video_id()}.mp4")
file_size = os.path.getsize(mp4_file)
# Embed
e = t.embedgen(link_url, ctx.author, ctx.author.avatar_url)
# Upload to discord
if file_size <= limitsize:
await channel.send(embed=e)
await channel.send(file=discord.File(mp4_file))
# Upload to Streamable
else:
await channel.send(embed=e)
mssg = await channel.send(f'Wait Uploading...🔃 {ctx.author}')
streamable_link=t.upload_to_streamable(mp4_file, t.video_id())
await mssg.edit(content=streamable_link)
#Delete the file
os.remove(mp4_file)
await ctx.message.add_reaction(sEmoji)
except:
embed=discord.Embed(title="Error", description='The video is private, or the api is broken \n make sure to use a proxy.', icon_url=ctx.author.avatar_url)
embed.set_thumbnail(url="https://i.imgur.com/j3wGKKr.png")
await ctx.channel.send(embed=embed, delete_after=10)
# Instagram send to dm
elif link_url.startswith('https://www.instagram.com/'):
url = link_url
multipost_num = (int(multipost_num) - 1)
try:
i = insta_fn.Insta_fn(url , multipost_num)
embed = i.embedgen(link_url, ctx.author, ctx.author.avatar_url)
# For Videos
if i.type_media() == "GraphVideo":
i.video_download(path_down)
file_tosend = (f"{path_down}/{i.video_id()}.mp4")
file_size = os.path.getsize(file_tosend)
if file_size <= limitsize:
await channel.send(embed=embed)
await channel.send(file=discord.File(file_tosend))
os.remove(file_tosend) # Deletes downloaded file
await ctx.message.add_reaction(sEmoji)
else: # Upload to streamable if file over size limit
await channel.send(embed=embed)
msg = await channel.send(f'{message.author} 🔃 Wait Uploading...')
streamable_link=i.upload_to_streamable(file_tosend, i.user_name())
await msg.edit(content=streamable_link)
os.remove(file_tosend) # Deletes downloaded file
await ctx.message.add_reaction(sEmoji)
# For Pictures
elif i.type_media() == "GraphImage":
await channel.send(embed=embed)
await ctx.message.add_reaction(sEmoji)
except:
embed=discord.Embed(title="Error", description='Account Maybe Private.', icon_url=ctx.author.avatar_url)
embed.set_thumbnail(url="https://i.imgur.com/j3wGKKr.png")
await ctx.channel.send(embed=embed, delete_after=10)
else:
pass
def setup(bot):
bot.add_cog(SendtodmCog(bot))
|
{"/main.py": ["/config.py", "/api/flaskapi.py"], "/cogs/commands/help.py": ["/config.py"], "/cogs/functions/tik_fn.py": ["/config.py"], "/cogs/functions/insta_fn.py": ["/config.py"], "/cogs/commands/sendtodm.py": ["/config.py"], "/cogs/events/instagram.py": ["/config.py"], "/cogs/commands/slash-sendtodm.py": ["/config.py"], "/api/flaskapi.py": ["/config.py"], "/cogs/events/tiktok.py": ["/config.py"]}
|
8,484
|
1mSAD/Discord-Mediabot
|
refs/heads/main
|
/cogs/events/instagram.py
|
import discord
from discord.ext import commands
from discord import file
import os
import sys
sys.path.append("./cogs/functions")
import insta_fn
#Setting Values
sys.path.append("./")
from config import *
instapath = config["path"]
limitsize = config["limitsize"] # <-- 8 mb for file size limit set by discord
class InstagramCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_message(self, message, multipost_num=1):
if message.content.startswith('https://www.instagram.com/'):
split_url = message.content.split(' ') #Seperates url and num if there is
url = split_url[0]
# Check if the user entered a number after the url
try:
split_url[1]
except IndexError:
multipost_num = 0
else:
multipost_num = (int(split_url[1]) - 1) # -1 from number
try:
i = insta_fn.Insta_fn(url , multipost_num)
embed = i.embedgen(message.content, message.author, message.author.avatar_url)
# For Videos
if i.type_media() == "GraphVideo":
i.video_download(instapath)
file_tosend = (f"{instapath}/{i.video_id()}.mp4")
file_size = os.path.getsize(file_tosend)
if file_size <= limitsize:
await message.channel.send(embed=embed)
await message.channel.send(file=discord.File(file_tosend))
os.remove(file_tosend) # Delete the file
else: # Upload to streamable if file over size limit
await message.channel.send(embed=embed)
msg = await message.channel.send(f'{message.author} 🔃 Wait Uploading...')
streamable_link=i.upload_to_streamable(file_tosend, i.user_name())
await msg.edit(content=streamable_link)
os.remove(file_tosend) # Delete the file
# For Pictures
elif i.type_media() == "GraphImage":
await message.channel.send(embed=embed)
except:
embed=discord.Embed(title="Error", description='Account Maybe Private.', icon_url=message.author.avatar_url)
embed.set_thumbnail(url="https://i.imgur.com/j3wGKKr.png")
await message.channel.send(embed=embed, delete_after=10)
def setup(bot):
bot.add_cog(InstagramCog(bot))
|
{"/main.py": ["/config.py", "/api/flaskapi.py"], "/cogs/commands/help.py": ["/config.py"], "/cogs/functions/tik_fn.py": ["/config.py"], "/cogs/functions/insta_fn.py": ["/config.py"], "/cogs/commands/sendtodm.py": ["/config.py"], "/cogs/events/instagram.py": ["/config.py"], "/cogs/commands/slash-sendtodm.py": ["/config.py"], "/api/flaskapi.py": ["/config.py"], "/cogs/events/tiktok.py": ["/config.py"]}
|
8,485
|
1mSAD/Discord-Mediabot
|
refs/heads/main
|
/config.py
|
import os
from dotenv import load_dotenv
load_dotenv()
config = {
"TOKEN": '' or os.getenv("TOKEN"), #Discord Bot Token.
'Prefix': '.',
"INSTA_USER": '' or os.getenv("IG_USERNAME"), # Instagram Username
"SESSION-Path": './api', # get session with instaloader -l USERNAME, then copy the session file to this directory.
"stream_email": '' or os.getenv("stream_email"), # Streamable email https://streamable.com/.
"stream_pass": '' or os.getenv("stream_pass"), # Streamable pass.
"proxyip": '' or os.getenv("proxyip"), # Required for tiktok to work, use http proxy, example 0.0.0.0:80 or user:pass@0.0.0.0:80.
"path": './api/downloads-cache', # download path.
"limitsize": 8388608, # 8 mb for file size limit set by discord.
"sEmoji": '☑',
}
|
{"/main.py": ["/config.py", "/api/flaskapi.py"], "/cogs/commands/help.py": ["/config.py"], "/cogs/functions/tik_fn.py": ["/config.py"], "/cogs/functions/insta_fn.py": ["/config.py"], "/cogs/commands/sendtodm.py": ["/config.py"], "/cogs/events/instagram.py": ["/config.py"], "/cogs/commands/slash-sendtodm.py": ["/config.py"], "/api/flaskapi.py": ["/config.py"], "/cogs/events/tiktok.py": ["/config.py"]}
|
8,486
|
1mSAD/Discord-Mediabot
|
refs/heads/main
|
/cogs/commands/slash-sendtodm.py
|
import discord
from discord.ext import commands
from discord import file
from discord_slash import cog_ext, SlashContext
from discord_slash.utils.manage_commands import create_choice, create_option
import os
import random
import sys
sys.path.append("./cogs/functions")
import tik_fn
import insta_fn
# Setting Values
sys.path.append("./")
from config import *
path_down = config["path"]
limitsize = config["limitsize"] # <-- 8 mb for file size limit set by discord
sEmoji = config["sEmoji"]
class SlashSendtodmCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
# send video to dm
@cog_ext.cog_slash(
name="send",
description="Send post to dm.",
options=[
create_option(
name="user",
description="Select a user to send to.",
required=True,
option_type=6,
),
create_option(
name="url",
description="Link of Post.",
required=True,
option_type=3,
),
create_option(
name="number",
description="If there is multiple posts choose the number of post (instagram).",
required=False,
option_type=4,
),
]
)
async def sendtodm_cmd(self, ctx:SlashContext, user: discord.Member, url, number=1):
member = user
link_url = url
multipost_num = number
await ctx.defer()
channel = await member.create_dm()
# Tiktok send to dm
if link_url.startswith('https://www.tiktok.com') or link_url.startswith('https://vm.tiktok.com'):
try:
# Sends url to tik_fn
t = tik_fn.Tiktok_fn(link_url)
# Download video
downloader = tik_fn.TikTokDownloader(t.default_url())
downloader.download(path_down+'/{}.mp4'.format(t.video_id()))
mp4_file = (f"{path_down}/{t.video_id()}.mp4")
file_size = os.path.getsize(mp4_file)
# Embed
e = t.embedgen(link_url, ctx.author, ctx.author.avatar_url)
# Upload to discord
if file_size <= limitsize:
await channel.send(embed=e)
await channel.send(file=discord.File(mp4_file))
# Upload to Streamable
else:
await channel.send(embed=e)
mssg = await channel.send(f'Wait Uploading...🔃 {ctx.author}')
streamable_link=t.upload_to_streamable(mp4_file, t.video_id())
await mssg.edit(content=streamable_link)
#Delete the file
os.remove(mp4_file)
await ctx.send(sEmoji, delete_after=15)
except:
embed=discord.Embed(title="Error", description='The video is private, or the api is broken \n make sure to use a proxy.', icon_url=ctx.author.avatar_url)
embed.set_thumbnail(url="https://i.imgur.com/j3wGKKr.png")
await ctx.send(embed=embed, delete_after=10)
# Instagram send to dm
elif link_url.startswith('https://www.instagram.com/'):
url = link_url
multipost_num = (int(multipost_num) - 1)
try:
i = insta_fn.Insta_fn(url , multipost_num)
embed = i.embedgen(link_url, ctx.author, ctx.author.avatar_url)
# For Videos
if i.type_media() == "GraphVideo":
i.video_download(path_down)
file_tosend = (f"{path_down}/{i.video_id()}.mp4")
file_size = os.path.getsize(file_tosend)
if file_size <= limitsize:
await channel.send(embed=embed)
await channel.send(file=discord.File(file_tosend))
os.remove(file_tosend) # Deletes downloaded file
await ctx.send(sEmoji, delete_after=15)
else: # Upload to streamable if file over size limit
await channel.send(embed=embed)
msg = await channel.send(f'{message.author} 🔃 Wait Uploading...')
streamable_link=i.upload_to_streamable(file_tosend, i.user_name())
await msg.edit(content=streamable_link)
os.remove(file_tosend) # Deletes downloaded file
await ctx.send(sEmoji, delete_after=15)
# For Pictures
elif i.type_media() == "GraphImage":
await channel.send(embed=embed)
await ctx.send(sEmoji, delete_after=15)
except:
embed=discord.Embed(title="Error", description='Account Maybe Private.', icon_url=ctx.author.avatar_url)
embed.set_thumbnail(url="https://i.imgur.com/j3wGKKr.png")
await ctx.send(embed=embed, delete_after=10)
else:
pass
def setup(bot):
bot.add_cog(SlashSendtodmCog(bot))
|
{"/main.py": ["/config.py", "/api/flaskapi.py"], "/cogs/commands/help.py": ["/config.py"], "/cogs/functions/tik_fn.py": ["/config.py"], "/cogs/functions/insta_fn.py": ["/config.py"], "/cogs/commands/sendtodm.py": ["/config.py"], "/cogs/events/instagram.py": ["/config.py"], "/cogs/commands/slash-sendtodm.py": ["/config.py"], "/api/flaskapi.py": ["/config.py"], "/cogs/events/tiktok.py": ["/config.py"]}
|
8,487
|
1mSAD/Discord-Mediabot
|
refs/heads/main
|
/api/flaskapi.py
|
from flask import Flask, json, request
import json
import requests
import random
import instaloader
import sys
sys.path.append("./")
#Setting Values
from config import *
USER = config["INSTA_USER"]
session_path = config["SESSION-Path"]
proxyip = config["proxyip"]
app = Flask(__name__)
#app.config.from_mapping(config)
@app.route('/', methods=['GET'])
def home():
return {'status': 'Online'}
# TikTok
# Get metadata
@app.route('/api/tiktok/<video_username>/<video_id>', methods=['GET'])
def data(video_username, video_id):
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36 Edg/91.0.864.37'}
web_id = str(random.randint(10000, 999999999))
cookie = { 'tt_webid': web_id, 'tt_webid_v2': web_id }
api_url = ('https://www.tiktok.com/node/share/video/@' + video_username + '/' + video_id)
response = requests.request("get", api_url, headers=headers, cookies=cookie)
data = json.loads(response.text)
if data["statusCode"] == 0:
return data['itemInfo']['itemStruct']
else:
proxies = dict(https=f'http://{proxyip}')
response = requests.request("get", api_url, headers=headers, proxies=proxies, cookies=cookie)
data = json.loads(response.text)
if data["statusCode"] == 0:
return data['itemInfo']['itemStruct']
else:
return {"statusCode": 404}
# Instagram
L = instaloader.Instaloader()
# login credentials
try:
try:
L.load_session_from_file(USER)
except:
L.load_session_from_file(USER, f'{session_path}/session-{USER}')
except:
print('Instagram Session File Not Found Please Add it, otherwise youll get blocked by instagram.')
# Get metadata
@app.route('/api/instagram/<shortcode>', methods=['GET'])
def gp(shortcode):
try:
post = instaloader.Post.from_shortcode(L.context, shortcode)
return json.dumps(post._full_metadata_dict, ensure_ascii=False)
except instaloader.exceptions.BadResponseException:
return {"statusCode": 404}
from threading import Thread
def run():
app.run(host='0.0.0.0',port=8080)
def run_api():
t = Thread(target=run)
t.start()
|
{"/main.py": ["/config.py", "/api/flaskapi.py"], "/cogs/commands/help.py": ["/config.py"], "/cogs/functions/tik_fn.py": ["/config.py"], "/cogs/functions/insta_fn.py": ["/config.py"], "/cogs/commands/sendtodm.py": ["/config.py"], "/cogs/events/instagram.py": ["/config.py"], "/cogs/commands/slash-sendtodm.py": ["/config.py"], "/api/flaskapi.py": ["/config.py"], "/cogs/events/tiktok.py": ["/config.py"]}
|
8,488
|
1mSAD/Discord-Mediabot
|
refs/heads/main
|
/cogs/events/tiktok.py
|
import discord
from discord.ext import commands
from discord import file
import os
import sys
sys.path.append("./cogs/functions")
import tik_fn
# Setting Values
sys.path.append("./")
from config import *
tik_down = config["path"]
limitsize = config["limitsize"] # <-- 8 mb for file size limit set by discord
class TiktokCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_message(self, message):
# Listen for Tiktok links
if message.content.startswith('https://www.tiktok.com') or message.content.startswith('https://vm.tiktok.com'):
try:
# Sends url to tik_fn
t = tik_fn.Tiktok_fn(message.content)
# Download video
downloader = tik_fn.TikTokDownloader(t.default_url())
downloader.download(tik_down+'/{}.mp4'.format(t.video_id()))
mp4_file = (f"{tik_down}/{t.video_id()}.mp4")
file_size = os.path.getsize(mp4_file)
# Embed
e = t.embedgen(message.content, message.author, message.author.avatar_url)
# Upload to discord
if file_size <= limitsize:
await message.channel.send(embed=e)
await message.channel.send(file=discord.File(mp4_file))
# Upload to Streamable
else:
await message.channel.send(embed=e)
mssg = await message.channel.send(f'Wait Uploading...🔃 {message.author}')
streamable_link=t.upload_to_streamable(mp4_file, t.video_id())
await mssg.edit(content=streamable_link)
#Delete the file
os.remove(mp4_file)
except:
embed=discord.Embed(title="Error", description='The video is private, or the api is broken \n make sure to use a proxy.', icon_url=message.author.avatar_url)
embed.set_thumbnail(url="https://i.imgur.com/j3wGKKr.png")
await message.channel.send(embed=embed, delete_after=10)
def setup(bot):
bot.add_cog(TiktokCog(bot))
|
{"/main.py": ["/config.py", "/api/flaskapi.py"], "/cogs/commands/help.py": ["/config.py"], "/cogs/functions/tik_fn.py": ["/config.py"], "/cogs/functions/insta_fn.py": ["/config.py"], "/cogs/commands/sendtodm.py": ["/config.py"], "/cogs/events/instagram.py": ["/config.py"], "/cogs/commands/slash-sendtodm.py": ["/config.py"], "/api/flaskapi.py": ["/config.py"], "/cogs/events/tiktok.py": ["/config.py"]}
|
8,490
|
LightCC/OpenPegs
|
refs/heads/master
|
/src/PegBoard.py
|
try:
from .PegNode import PegNode
except ImportError:
print("\n{}: Try running `pegs` from the command line!!\nor run with `python run_pegs.py` from root directory\n".format(__file__))
class PegException(Exception):
pass
class PegBoard:
'''PegBoard is a linked list of nodes on a Peg Board
PegBoard includes information on the board geometry of each
individual node where a peg can be, in addition to information
about how moves can happen (i.e. which nodes are adjacent,
and where a jump over that adjacent node will land)
Arguments:
node_ids as list:
list of ids to use for each node that is created
[node_ids_str] as list:
list of id strings that matchs the node_ids. These will be returned when attempting to print a node. If left off or empty, the built-in string function for each id will be used instead.
'''
def __init__(self, node_ids, node_ids_str=None):
# Ensure node_ids is a list
if not isinstance(node_ids, list):
raise ValueError('node_ids (arg 1) was type "{}", expected "list"'.format(type(node_ids)))
# If not provided, set node_ids_str to a list of the default string output of the node_ids list items
if node_ids_str == None:
node_ids_str = [str(x) for x in node_ids]
else:
# if node_ids_str was given, check that it is a list
if not isinstance(node_ids_str, list):
raise ValueError('node_ids_str (arg 2) was type "{}", expected "list"'.format(type(node_ids)))
# if it is a list, check if all items are strings
if not all(isinstance(x, str) for x in node_ids_str):
raise ValueError('if provided, all items in Arg 3, node_ids_str, "{}" list must be strings'.format(node_ids_str))
# Ensure input args are the same length as lists
if len(node_ids) != len(node_ids_str):
raise ValueError('Length of node_ids (arg 1) [{}] does not equal length of node_ids_str (arg 2) [{}]'.format(len(node_ids), len(node_ids_str)))
## create the nodes list
nodes = {}
newnodes = {node_id: PegNode(nodes, node_id, node_ids_str[index]) for index, node_id in enumerate(node_ids)}
nodes.update(newnodes)
## Assign all object properties
self._node_ids = node_ids
self._node_ids_str = node_ids_str
self._nodes = nodes
# Setup _format_str to None so it is initialized,
# need child class to set this up!!
self._format_str = None
def node(self, node_id):
return self._nodes[node_id]
def nodes(self):
return self._nodes
## Format Strings and functions for printing Board status and other info strings.
# Note: Format string is set by the user/child class, the PegBoard class just fills in the information from the class object (i.e. filling in node ids, peg positions, etc.)
def format_str(self):
if self._format_str == None:
raise ValueError('Child Class must create _format_str variable!!')
return self._format_str
def nodes_str(self, indent=0):
outstr = self.format_str().format(x=self._node_ids_str)
return self._indent_string(outstr, indent)
def pegs_str(self, indent=0):
pegs = [ self.node(node_id).peg_str() for node_id in self._node_ids ]
outstr = self.format_str().format(x=pegs)
return self._indent_string(outstr, indent)
def full_str(self, indent=0):
fullstr = [ '{}:{}'.format(self._nodes[node_id].node_id_str(), self._nodes[node_id].peg_str()) for node_id in self._node_ids ]
outstr = self.format_str().format(x=fullstr)
spaces = ' ' * 3
outstr = outstr.replace(' ', spaces)
return self._indent_string(outstr, indent)
def node_and_pegs_str(self, indent=0, space_between=3):
node = self.nodes_str()
pegs = self.pegs_str()
nodelines = node.splitlines()
peglines = pegs.splitlines()
outstr = '\n'.join([ '{}{}{}'.format(nodelines[index], ' ' * space_between, peglines[index]) for index, _ in enumerate(nodelines) ])
return self._indent_string(outstr, indent)
def _indent_string(self, text, indent):
spaces = ' ' * indent
outstr = ''.join([spaces + line + '\n' for line in text.splitlines()])
return outstr[:-1]
|
{"/src/PegBoard.py": ["/src/PegNode.py"], "/src/pegs.py": ["/src/PegPyramid.py"], "/src/PegNode.py": ["/src/PegNodeLink.py"], "/tests/test_PegBoard.py": ["/src/PegBoard.py", "/src/PegNode.py", "/src/PegNodeLink.py"], "/tests/test_PegNode.py": ["/src/PegNode.py"], "/src/PegNodeLink.py": ["/src/PegNode.py"], "/tests/test_PegNodeLink.py": ["/src/PegNodeLink.py", "/src/PegNode.py"], "/src/PegPyramid.py": ["/src/PegNode.py", "/src/PegBoard.py"]}
|
8,491
|
LightCC/OpenPegs
|
refs/heads/master
|
/src/pegs.py
|
try:
from .PegPyramid import PegPyramid
except ImportError:
print("\n{}: Try running `pegs` from the command line!!\nor run with `python run_pegs.py` from root directory\n".format(__file__))
def main():
indent = 3
print("Running Pegs Game...")
pyramid = PegPyramid()
print('\n'
'Game board node names, but no pegs!!')
print(pyramid.node_and_pegs_str(indent))
## Setup the game board
valid_start_node = False
while valid_start_node == False:
start_node = input('\nStart: Which node on left should be empty? ')
valid_start_node = pyramid.setup_game_board(start_node)
print('\n'
'All Nodes but Start Node filled')
print(pyramid.node_and_pegs_str(indent))
## Begin play
remaining_moves = True
## Evaluate available moves
while remaining_moves:
remaining_moves = pyramid.valid_moves()
## If there are available moves, print them and have user select one
if remaining_moves:
print('\nValid Remaining Moves:')
for index, available_move in enumerate(remaining_moves):
print(' Move #{}: {}'.format(index, available_move))
print('')
selected_move = None
while selected_move == None:
move_str = input('Which move will you make? ')
try:
move_index = int(move_str)
if move_index < 0 or move_index > index:
raise ValueError
selected_move = remaining_moves[move_index]
except:
if len(remaining_moves) == 1:
valid_range = '0'
else:
valid_range = '0 to {}'.format(len(remaining_moves) - 1)
print('ERROR!! Invalid selection... must be {}!'.format(valid_range))
continue
# A valid move was picked, execute it
pyramid.execute_jump_move(selected_move)
print('\n Peg in {} jumped to {}, removing {}'.format(selected_move.start_node().node_id(), selected_move.end_node().node_id(), selected_move.adjacent_node().node_id()))
print('')
print(pyramid.node_and_pegs_str(3))
else:
valid_moves_remain = False
## No more available moves, game is done!
pegs = sum(node.peg() for node in pyramid.nodes().values())
print('\n'
'No moves available:')
print('\n'
' You finished the game with {} remaining pegs'.format(pegs))
if pegs >= 4:
print(' It takes someone special to leave that many pegs on the board!!')
elif pegs == 3:
print(' I can do that well with random moves!!')
elif pegs == 2:
print('\n'
' You might be getting the hang of this!!\n'
' But you can still do better...')
elif pegs == 1:
print('\n'
' What? You solved it?!\n'
' We worship the ground you walk on!!\n'
' But can you do it again...')
else:
Exception('Not a possible outcome - someone cheated! (or someone didn\'t program right...)')
## Pause for user to press enter, so that window will not disappear if run directly from *.exe
input('\n=== PRESS ENTER TO END GAME ===')
if __name__ == '__main__':
main()
|
{"/src/PegBoard.py": ["/src/PegNode.py"], "/src/pegs.py": ["/src/PegPyramid.py"], "/src/PegNode.py": ["/src/PegNodeLink.py"], "/tests/test_PegBoard.py": ["/src/PegBoard.py", "/src/PegNode.py", "/src/PegNodeLink.py"], "/tests/test_PegNode.py": ["/src/PegNode.py"], "/src/PegNodeLink.py": ["/src/PegNode.py"], "/tests/test_PegNodeLink.py": ["/src/PegNodeLink.py", "/src/PegNode.py"], "/src/PegPyramid.py": ["/src/PegNode.py", "/src/PegBoard.py"]}
|
8,492
|
LightCC/OpenPegs
|
refs/heads/master
|
/src/PegNode.py
|
try:
from .PegNodeLink import PegNodeLink
except ImportError:
print("\n{}: Try running `pegs` from the command line!!\nor run with `python run_pegs.py` from root directory\n".format(__file__))
class PegNode:
'''Create a new PegNode instance
Arguments:
parent: the parent that owns this node, a dict with {node_id: node} entries
node_id: a unique key that identifies this PegNode
node_id_str: a string that will be printed out for the node_id. This will be created from the default __str__() of the node_id if not provided
'''
def __init__(self, parent, node_id, node_id_str='', peg=False):
self._node_id = node_id
if node_id_str:
self._node_id_str = node_id_str
else:
self._node_id_str = str(node_id)
if not isinstance(self._node_id_str, str):
raise ValueError('"node_id_str" (arg 3) must be a string, it was {}'.format(type(self._node_id_str)))
self._parent = parent
self._links = []
# If peg arg evaluates to anything, set to True, else False
self._peg = True if peg else False
def peg(self):
return self._peg
def peg_str(self):
return 'x' if self._peg else 'o'
def set_peg(self):
if self._peg:
raise ValueError('Peg already present at Node {}, cannot add'.format(self.node_id()))
else:
self._peg = True
def clear_peg(self):
if self._peg:
self._peg = False
else:
raise ValueError('No peg was present at Node {} to remove'.format(self.node_id()))
def node_id(self):
return self._node_id
def node_id_str(self):
return self._node_id_str
def links(self):
return self._links
def add_link(self, adjacent_node, end_node):
self._links.append(PegNodeLink(self, adjacent_node, end_node))
def __str__(self):
outstr = ('Node ID: {} (Type: {})\n'
'Node ID String: "{}"\n'
'Links:\n'.format(self._node_id, type(self._node_id), self._node_id_str))
if self._links:
for index, link in enumerate(self._links):
outstr += ' #{}: {}\n'.format(index, link)
else:
outstr += ' None\n'
return outstr[:-1] # Strip last '\n'
|
{"/src/PegBoard.py": ["/src/PegNode.py"], "/src/pegs.py": ["/src/PegPyramid.py"], "/src/PegNode.py": ["/src/PegNodeLink.py"], "/tests/test_PegBoard.py": ["/src/PegBoard.py", "/src/PegNode.py", "/src/PegNodeLink.py"], "/tests/test_PegNode.py": ["/src/PegNode.py"], "/src/PegNodeLink.py": ["/src/PegNode.py"], "/tests/test_PegNodeLink.py": ["/src/PegNodeLink.py", "/src/PegNode.py"], "/src/PegPyramid.py": ["/src/PegNode.py", "/src/PegBoard.py"]}
|
8,493
|
LightCC/OpenPegs
|
refs/heads/master
|
/tests/test_PegBoard.py
|
import pytest
from src.PegBoard import PegBoard
from src.PegNode import PegNode
from src.PegNodeLink import PegNodeLink
def fake_function():
pass
@pytest.fixture(params=[1, 1.1, {1, 2, 3}, 'string', {1: 1, 2: '2'}, (1, '1'), fake_function])
def not_a_list(request):
'''A test fixture "not_a_list" that can be used as an argument to supply parameterized test cases that have separate objects of different types that are not of type <list>
'''
return request.param
@pytest.fixture(params=[1, 1.1, {'1', '2', '3'}, ['1', '2', '3'], {'1': '1', '2': '2'}, ('1', '1'), fake_function])
def not_a_string(request):
'''A test fixture "not_a_string" that provides multiple parameterized test cases with separate objects that are not of type <str>
'''
return request.param
class TestPegBoard:
def test_basic_PegBoard_object_creation(self):
'''Ensure that basic PegBoard object creation is working correctly. i.e. Node objects are created that return the correct node ids, and different methods of accessing them through the .nodes() and .node(node_id) functions work.
'''
node_ids = [1, 2]
board = PegBoard(node_ids)
nodes = board.nodes()
assert len(nodes) == 2
assert board.node(1).node_id() == 1
assert board.node(2).node_id() == 2
assert nodes[1] is board.node(1)
assert nodes[2] is board.node(2)
def test_format_string_outputs(self):
'''test the creation of a format_str
Whether a format string is working is tested by the outputs of .node_str(), .pegs_str(), .full_str(), and .node_and_pegs_str().
'''
node_ids = [1, 2, 3]
board = PegBoard(node_ids)
# Setup a format string in a pyramid
test_str = (' {x[0]} \n'
'{x[1]} {x[2]}')
board._format_str = test_str
assert board.format_str() == test_str
assert board.nodes_str() == (' 1 \n'
'2 3')
assert board.nodes_str(indent=2) == (' 1 \n'
' 2 3')
assert board.pegs_str() == (' o \n'
'o o')
## Set a peg in every position (switch from o's to x's)
for node in board.nodes().values():
node.set_peg()
assert board.pegs_str() == (' x \n'
'x x')
assert board.pegs_str(1) == (' x \n'
' x x')
assert board.full_str() == (' 1:x \n'
'2:x 3:x')
assert board.full_str(indent=5) == (' 1:x \n'
' 2:x 3:x')
assert board.node_and_pegs_str() == (' 1 x \n'
'2 3 x x')
assert board.node_and_pegs_str(indent=3, space_between=0) == (' 1 x \n'
' 2 3x x')
def test_raises_ValueError_if_format_string_is_not_set(self):
'''Ensure a ValueError is raised if the .format_str() function is called before the ._format_str property is set by the parent
'''
node_ids = [1, 2]
board = PegBoard(node_ids)
with pytest.raises(ValueError):
board.format_str()
def test_raises_ValueError_if_node_ids_or_node_ids_str_are_not_a_list(self, not_a_list):
'''Ensure a ValueError is raised when creating a PegBoard with either a node_ids or node_ids_str argument that are not a list
'''
## Test an node_ids that is not a list raises ValueError)
with pytest.raises(ValueError):
PegBoard(not_a_list)
# convert the invalid node_ids into a list that is valid, either by adding each item in the object to a list, or adding the object directly as the only item in a list
try:
valid_node_ids = [ x for x in not_a_list ]
except TypeError: # TypeError is thrown if node_ids is not iterable
valid_node_ids = [ not_a_list ]
# Use the valid node_ids value with a node_ids_str that is not a list, and ensure a ValueError is raised
with pytest.raises(ValueError):
PegBoard(valid_node_ids, node_ids_str=not_a_list)
# Now test with both invalid
with pytest.raises(ValueError):
PegBoard(not_a_list, not_a_list)
def test_raises_ValueError_if_arg_lengths_are_not_equal(self):
'''Ensure a ValueError is raised if the node_ids and node_ids_str are both provided, and are both lists, but are not the same length
'''
node_ids = [1, 2, 3]
node_ids_str = ['1', '2']
with pytest.raises(ValueError):
PegBoard(node_ids, node_ids_str=node_ids_str)
def test_raises_ValueError_if_node_ids_str_arg_items_are_not_strings(self, not_a_string):
'''Ensure a ValueError is raised if any item in the node_ids_str argument are not a string, when node_ids_str is provided and is a list (i.e. is not the empty string, which is default and will auto-create a list of strings from the node_ids).
'''
node_ids = [1, 2, 3]
node_ids_str = ['1', '2', '3']
# No Exception should be raised
PegBoard(node_ids, node_ids_str=node_ids_str)
# Set up three test lists with the current non-string in each of the 3 positions in the list then ensure it generates a ValueError
node_ids_str_test1 = [not_a_string, '2', '3']
with pytest.raises(ValueError):
PegBoard(node_ids, node_ids_str=node_ids_str_test1)
node_ids_str_test2 = ['1', not_a_string, '3']
with pytest.raises(ValueError):
PegBoard(node_ids, node_ids_str=node_ids_str_test2)
node_ids_str_test3 = ['1', '2', not_a_string]
with pytest.raises(ValueError):
PegBoard(node_ids, node_ids_str=node_ids_str_test3)
|
{"/src/PegBoard.py": ["/src/PegNode.py"], "/src/pegs.py": ["/src/PegPyramid.py"], "/src/PegNode.py": ["/src/PegNodeLink.py"], "/tests/test_PegBoard.py": ["/src/PegBoard.py", "/src/PegNode.py", "/src/PegNodeLink.py"], "/tests/test_PegNode.py": ["/src/PegNode.py"], "/src/PegNodeLink.py": ["/src/PegNode.py"], "/tests/test_PegNodeLink.py": ["/src/PegNodeLink.py", "/src/PegNode.py"], "/src/PegPyramid.py": ["/src/PegNode.py", "/src/PegBoard.py"]}
|
8,494
|
LightCC/OpenPegs
|
refs/heads/master
|
/tests/test_PegNode.py
|
import pytest
from src.PegNode import PegNode
def fake_function():
pass
## supplies valid (node_id, node_id_str)
@pytest.fixture(params=[(1, '1'), ('string', 'string'), (1.1, '1.1')])
def valid_node_id_type(request):
return request.param
## supplies values that are not strings
@pytest.fixture(params=[1, 1.1, ['1.1'], {2, 1}, fake_function, ('abc', 'def')])
def not_a_string(request):
return request.param
class TestPegNode:
def test_init_sets_node_id_and_string(self, valid_node_id_type):
(node_id, node_id_str) = valid_node_id_type
node = PegNode(None, node_id, node_id_str)
assert node.node_id() == node_id
assert node.node_id_str() == node_id_str
def test_generates_correct_node_id_string(self, valid_node_id_type):
node_id, node_id_str = valid_node_id_type
node = PegNode(None, node_id)
assert node.node_id_str() == node_id_str
def test_init_raises_valueerror_if_node_id_str_arg_is_not_str(self, not_a_string):
with pytest.raises(ValueError):
PegNode(None, 1, node_id_str=not_a_string)
def test_init_peg_at_initialization(self):
node_peg_false = PegNode(None, 1, peg=False)
assert node_peg_false.peg() == False
assert node_peg_false.peg_str() == 'o'
node_peg_false2 = PegNode(None, 1, peg=0)
assert node_peg_false2.peg() == False
assert node_peg_false2.peg_str() == 'o'
node_peg_true = PegNode(None, 2, peg=True)
assert node_peg_true.peg() == True
assert node_peg_true.peg_str() == 'x'
node_peg_true2 = PegNode(None, 2, peg=9999)
assert node_peg_true2.peg() == True
assert node_peg_true2.peg_str() == 'x'
def test_setting_and_removing_pegs(self):
node = PegNode(None, 1)
assert node.peg() == False
# With peg not present, should have error clearing it
with pytest.raises(ValueError):
node.clear_peg()
node.set_peg()
assert node.peg() == True
# With peg already present, should have error setting it
with pytest.raises(ValueError):
node.set_peg()
node.clear_peg()
assert node.peg() == False
def test_add_links_to_node(self):
nodes = {}
## Make 4 nodes that can be linked
# will be in diamond pattern
# 1 -> 2 -> 4
# 1 -> 3 -> 4
# and reverse from 4 to 1 on each path
node1 = PegNode(nodes, 1)
node2 = PegNode(nodes, 2)
node3 = PegNode(nodes, 3)
node4 = PegNode(nodes, 4)
nodes.update({1: node1, 2: node2, 3: node3, 4: node4})
node1.add_link(node2, node4)
assert len(node1._links) == 1
node1.add_link(node3, node4)
assert len(node1._links) == 2
node4.add_link(node2, node1)
assert len(node4._links) == 1
node4.add_link(node3, node1)
assert len(node1._links) == 2
assert len(node2._links) == 0
assert len(node3._links) == 0
assert len(node4._links) == 2
assert node1._links[0]._start_node is node1
assert node1._links[0]._adjacent_node is node2
assert node1._links[0]._end_node is node4
assert node1._links[1]._start_node is node1
assert node1._links[1]._adjacent_node is node3
assert node1._links[1]._end_node is node4
assert node4._links[1]._start_node is node4
assert node4._links[1]._adjacent_node is node3
assert node4._links[1]._end_node is node1
assert str(node1._links[0]) == '1->2->4'
assert str(node1._links[1]) == '1->3->4'
assert str(node4._links[0]) == '4->2->1'
assert str(node4._links[1]) == '4->3->1'
assert node1._links[0]._end_node._parent[1] is node1
assert node1._links[0]._end_node._parent[4] is node4
assert len(node4._parent) == 4
assert len(node1._parent) == 4
## Test the .links() method
links = node1.links()
assert links is node1._links
|
{"/src/PegBoard.py": ["/src/PegNode.py"], "/src/pegs.py": ["/src/PegPyramid.py"], "/src/PegNode.py": ["/src/PegNodeLink.py"], "/tests/test_PegBoard.py": ["/src/PegBoard.py", "/src/PegNode.py", "/src/PegNodeLink.py"], "/tests/test_PegNode.py": ["/src/PegNode.py"], "/src/PegNodeLink.py": ["/src/PegNode.py"], "/tests/test_PegNodeLink.py": ["/src/PegNodeLink.py", "/src/PegNode.py"], "/src/PegPyramid.py": ["/src/PegNode.py", "/src/PegBoard.py"]}
|
8,495
|
LightCC/OpenPegs
|
refs/heads/master
|
/src/PegNodeLink.py
|
class PegNodeLink:
'''PegNodeLink objects provide mapping of legal jumps from a PegNode
When jumping, a PegNodeLink provides the start_node that a peg is currently located at, an adjacent_node that can be jumped over (if a peg is at that location), and an end_node (which must be empty) for the peg to land at after jumping.
Arguments:
start_node(PegNode): Beginning Node Position
adjacent_node(PegNode): Adjacent Node that will be jumped over
end_node(PegNode): Ending Node that will be jumped to
'''
def __init__(self, start_node, adjacent_node, end_node):
from .PegNode import PegNode
if isinstance(start_node, PegNode):
self._start_node = start_node
else:
raise ValueError('start_node must be a PegNode instance')
if isinstance(adjacent_node, PegNode):
self._adjacent_node = adjacent_node
else:
raise ValueError('adjacent_node must be a PegNode instance')
if isinstance(end_node, PegNode):
self._end_node = end_node
else:
raise ValueError('end_node must be a PegNode instance')
def start_node(self):
return self._start_node
def adjacent_node(self):
return self._adjacent_node
def end_node(self):
return self._end_node
def __str__(self):
return '{}->{}->{}'.format(self._start_node.node_id_str(), self._adjacent_node.node_id_str(), self._end_node.node_id_str())
|
{"/src/PegBoard.py": ["/src/PegNode.py"], "/src/pegs.py": ["/src/PegPyramid.py"], "/src/PegNode.py": ["/src/PegNodeLink.py"], "/tests/test_PegBoard.py": ["/src/PegBoard.py", "/src/PegNode.py", "/src/PegNodeLink.py"], "/tests/test_PegNode.py": ["/src/PegNode.py"], "/src/PegNodeLink.py": ["/src/PegNode.py"], "/tests/test_PegNodeLink.py": ["/src/PegNodeLink.py", "/src/PegNode.py"], "/src/PegPyramid.py": ["/src/PegNode.py", "/src/PegBoard.py"]}
|
8,496
|
LightCC/OpenPegs
|
refs/heads/master
|
/tests/test_PegNodeLink.py
|
import pytest
from src.PegNodeLink import PegNodeLink
from src.PegNode import PegNode
def fake_function():
pass
## supply a node_arg for instantiating PegNodeLink objects
# that is not a PegNode object to trigger a ValueError exception
@pytest.fixture(params=[None,
1,
'string',
1.1,
['list', 1],
{1: 'dict'},
{'set', 1, 2},
fake_function
])
def node_arg(request):
return request.param
class TestPegNodeLink:
def test_init_raises_valueerror_if_arg_is_not_PegNode(self, node_arg):
node1 = PegNode(None, 1)
# No Exception should be raised
PegNodeLink(node1, node1, node1)
# Ensure bad first arg raises ValueError
with pytest.raises(ValueError):
PegNodeLink(node_arg, node1, node1)
# Ensure bad second arg raises ValueError
with pytest.raises(ValueError):
PegNodeLink(node1, node_arg, node1)
# Ensure bad third arg raises ValueError
with pytest.raises(ValueError):
PegNodeLink(node1, node1, node_arg)
def test_returning_all_nodes(self):
# Setup
node1 = PegNode(None, 1)
node2 = PegNode(None, 2)
node3 = PegNode(None, 3)
link = PegNodeLink(node1, node2, node3)
# Test node access methods
assert link.start_node() is node1
assert link.adjacent_node() is node2
assert link.end_node() is node3
def test_PegNodeLink_string_output(self):
node1 = PegNode(None, 1)
node2 = PegNode(None, '2')
node3 = PegNode(None, 3.14159)
link1 = PegNodeLink(node1, node2, node3)
assert str(link1) == '1->2->3.14159'
|
{"/src/PegBoard.py": ["/src/PegNode.py"], "/src/pegs.py": ["/src/PegPyramid.py"], "/src/PegNode.py": ["/src/PegNodeLink.py"], "/tests/test_PegBoard.py": ["/src/PegBoard.py", "/src/PegNode.py", "/src/PegNodeLink.py"], "/tests/test_PegNode.py": ["/src/PegNode.py"], "/src/PegNodeLink.py": ["/src/PegNode.py"], "/tests/test_PegNodeLink.py": ["/src/PegNodeLink.py", "/src/PegNode.py"], "/src/PegPyramid.py": ["/src/PegNode.py", "/src/PegBoard.py"]}
|
8,497
|
LightCC/OpenPegs
|
refs/heads/master
|
/src/PegPyramid.py
|
try:
from .PegNode import PegNode
from .PegBoard import PegBoard
except ImportError:
print("\n{}: Try running `pegs` from the command line!!\nor run with `python run_pegs.py` from root directory\n".format(__file__))
class PegPyramid(PegBoard):
def __init__(self):
node_ids = list(range(1, 16))
node_ids_str = [ '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' ]
nodes = {}
node_dict = { node_id: PegNode(nodes, node_id, node_ids_str[index]) for index, node_id in enumerate(node_ids) }
nodes.update(node_dict)
super().__init__(node_ids, node_ids_str)
self._rows = [
[ nodes[1] ],
[ nodes[2], nodes[3] ],
[ nodes[4], nodes[5], nodes[6] ],
[ nodes[7], nodes[8], nodes[9], nodes[10] ],
[ nodes[11], nodes[12], nodes[13], nodes[14], nodes[15] ]
]
self._setup_links()
self._format_str = self._create_format_str()
def _create_format_str(self):
## Create a dict of rows with their lengths
row_lengths = [ len(row) for row in self._rows ]
max_nodes_in_row = max(row_lengths)
## Now create a string for each row and combine them
rows = []
for row in self._rows:
# Center each row by adding spaces
row_center_spacing = ' ' * (max_nodes_in_row - len(row))
rowstr = row_center_spacing
for node in row:
node_index = self._node_ids.index(node.node_id())
rowstr += '{{x[{node_index}]}} '.format(node_index=node_index)
rowstr += row_center_spacing
# rowstr will have one extra space at the end from the loop, strip one off
rows.append(rowstr[:-1])
# Remove the final '\n' from outstr
return '\n'.join(rows)
def _setup_links(self):
self._create_link_by_id(1, 2, 4)
self._create_link_by_id(1, 3, 6)
self._create_link_by_id(2, 4, 7)
self._create_link_by_id(2, 5, 9)
self._create_link_by_id(3, 5, 8)
self._create_link_by_id(3, 6, 10)
self._create_link_by_id(4, 2, 1)
self._create_link_by_id(4, 5, 6)
self._create_link_by_id(4, 7, 11)
self._create_link_by_id(4, 8, 13)
self._create_link_by_id(5, 8, 12)
self._create_link_by_id(5, 9, 14)
self._create_link_by_id(6, 3, 1)
self._create_link_by_id(6, 5, 4)
self._create_link_by_id(6, 9, 13)
self._create_link_by_id(6, 10, 15)
self._create_link_by_id(7, 4, 2)
self._create_link_by_id(7, 8, 9)
self._create_link_by_id(8, 5, 3)
self._create_link_by_id(8, 9, 10)
self._create_link_by_id(9, 5, 2)
self._create_link_by_id(9, 8, 7)
self._create_link_by_id(10, 6, 3)
self._create_link_by_id(10, 9, 8)
self._create_link_by_id(11, 7, 4)
self._create_link_by_id(11, 12, 13)
self._create_link_by_id(12, 8, 5)
self._create_link_by_id(12, 13, 14)
self._create_link_by_id(13, 8, 4)
self._create_link_by_id(13, 9, 6)
self._create_link_by_id(13, 12, 11)
self._create_link_by_id(13, 14, 15)
self._create_link_by_id(14, 9, 5)
self._create_link_by_id(14, 13, 12)
self._create_link_by_id(15, 10, 6)
self._create_link_by_id(15, 14, 13)
def _create_link_by_id(self, start_node_id, adjacent_node_id, end_node_id):
self._nodes[start_node_id].add_link(self.node(adjacent_node_id), self.node(end_node_id))
def setup_game_board(self, start_node_id_str):
if start_node_id_str in self._node_ids_str:
for node in self._nodes.values():
if start_node_id_str != node.node_id_str():
node.set_peg()
return True
else: # the node_id_str passed in was not found
return False
def valid_moves(self):
moves = []
for node in self._nodes.values():
for link in node.links():
if self.link_has_valid_jump(link):
moves.append(link)
return moves
def link_has_valid_jump(self, link):
# If start node has a peg, and adjacent node has a peg to jump, and end node is empty to land, then link is valid for a jump
return all( [link.start_node().peg(), link.adjacent_node().peg(), not link.end_node().peg()] )
def execute_jump_move(self, link):
if self.link_has_valid_jump(link):
link.adjacent_node().clear_peg() # Jump over here and remove peg from board
link.start_node().clear_peg() # Jump from here, peg moves
link.end_node().set_peg() # peg lands here and fills the spot
else:
if not link.start_node().peg():
raise ValueError('Link {} is not valid - No peg to jump with in start node {}'.format(link, link.start_node().node_id_str))
elif not link.adjacent_node().peg():
raise ValueError('Link {} is not valid - No peg to jump over in adjacent node {}'.format(link, link.adjacent_node().node_id_str))
if link.end_node().peg():
raise ValueError('Link {} is not valid - Peg already present in end node {}'.format(link, link.end_node().node_id_str))
|
{"/src/PegBoard.py": ["/src/PegNode.py"], "/src/pegs.py": ["/src/PegPyramid.py"], "/src/PegNode.py": ["/src/PegNodeLink.py"], "/tests/test_PegBoard.py": ["/src/PegBoard.py", "/src/PegNode.py", "/src/PegNodeLink.py"], "/tests/test_PegNode.py": ["/src/PegNode.py"], "/src/PegNodeLink.py": ["/src/PegNode.py"], "/tests/test_PegNodeLink.py": ["/src/PegNodeLink.py", "/src/PegNode.py"], "/src/PegPyramid.py": ["/src/PegNode.py", "/src/PegBoard.py"]}
|
8,530
|
turtlecode/RadioButtonDesktopApplication-Python-Pyqt5
|
refs/heads/main
|
/radiobutton.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'radiobutton.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(424, 800)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.country_box = QtWidgets.QGroupBox(self.centralwidget)
self.country_box.setGeometry(QtCore.QRect(70, 80, 141, 231))
self.country_box.setObjectName("country_box")
self.gridLayoutWidget = QtWidgets.QWidget(self.country_box)
self.gridLayoutWidget.setGeometry(QtCore.QRect(20, 20, 101, 191))
self.gridLayoutWidget.setObjectName("gridLayoutWidget")
self.gridLayout = QtWidgets.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.england = QtWidgets.QRadioButton(self.gridLayoutWidget)
self.england.setObjectName("england")
self.gridLayout.addWidget(self.england, 1, 0, 1, 1)
self.turkey = QtWidgets.QRadioButton(self.gridLayoutWidget)
self.turkey.setObjectName("turkey")
self.gridLayout.addWidget(self.turkey, 0, 0, 1, 1)
self.france = QtWidgets.QRadioButton(self.gridLayoutWidget)
self.france.setObjectName("france")
self.gridLayout.addWidget(self.france, 3, 0, 1, 1)
self.germany = QtWidgets.QRadioButton(self.gridLayoutWidget)
self.germany.setObjectName("germany")
self.gridLayout.addWidget(self.germany, 2, 0, 1, 1)
self.job_group = QtWidgets.QGroupBox(self.centralwidget)
self.job_group.setGeometry(QtCore.QRect(230, 80, 141, 231))
self.job_group.setObjectName("job_group")
self.gridLayoutWidget_2 = QtWidgets.QWidget(self.job_group)
self.gridLayoutWidget_2.setGeometry(QtCore.QRect(20, 20, 101, 191))
self.gridLayoutWidget_2.setObjectName("gridLayoutWidget_2")
self.gridLayout_2 = QtWidgets.QGridLayout(self.gridLayoutWidget_2)
self.gridLayout_2.setContentsMargins(0, 0, 0, 0)
self.gridLayout_2.setObjectName("gridLayout_2")
self.police = QtWidgets.QRadioButton(self.gridLayoutWidget_2)
self.police.setObjectName("police")
self.gridLayout_2.addWidget(self.police, 3, 0, 1, 1)
self.carpenter = QtWidgets.QRadioButton(self.gridLayoutWidget_2)
self.carpenter.setObjectName("carpenter")
self.gridLayout_2.addWidget(self.carpenter, 0, 0, 1, 1)
self.doctor = QtWidgets.QRadioButton(self.gridLayoutWidget_2)
self.doctor.setObjectName("doctor")
self.gridLayout_2.addWidget(self.doctor, 1, 0, 1, 1)
self.teacher = QtWidgets.QRadioButton(self.gridLayoutWidget_2)
self.teacher.setObjectName("teacher")
self.gridLayout_2.addWidget(self.teacher, 2, 0, 1, 1)
self.get_selected = QtWidgets.QPushButton(self.centralwidget)
self.get_selected.setGeometry(QtCore.QRect(150, 330, 121, 51))
self.get_selected.setObjectName("get_selected")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(20, 390, 381, 111))
font = QtGui.QFont()
font.setPointSize(28)
self.label.setFont(font)
self.label.setObjectName("label")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 424, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.country_box.setTitle(_translate("MainWindow", "Country"))
self.england.setText(_translate("MainWindow", "England"))
self.turkey.setText(_translate("MainWindow", "Turkey"))
self.france.setText(_translate("MainWindow", "France"))
self.germany.setText(_translate("MainWindow", "Germany"))
self.job_group.setTitle(_translate("MainWindow", "Job"))
self.police.setText(_translate("MainWindow", "Police"))
self.carpenter.setText(_translate("MainWindow", "Carpenter"))
self.doctor.setText(_translate("MainWindow", "Doctor"))
self.teacher.setText(_translate("MainWindow", "Teacher"))
self.get_selected.setText(_translate("MainWindow", "Get"))
self.label.setText(_translate("MainWindow", "TextLabel"))
|
{"/radiobutton_lesson8.py": ["/radiobutton.py"]}
|
8,531
|
turtlecode/RadioButtonDesktopApplication-Python-Pyqt5
|
refs/heads/main
|
/radiobutton_lesson8.py
|
import sys
from PyQt5 import QtWidgets
from radiobutton import Ui_MainWindow
class my_app(QtWidgets.QMainWindow):
def __init__(self):
super(my_app, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.turkey.toggled.connect(self.country_onclick)
self.ui.england.toggled.connect(self.country_onclick)
self.ui.germany.toggled.connect(self.country_onclick)
self.ui.france.toggled.connect(self.country_onclick)
self.ui.carpenter.toggled.connect(self.job_onclick)
self.ui.doctor.toggled.connect(self.job_onclick)
self.ui.teacher.toggled.connect(self.job_onclick)
self.ui.police.toggled.connect(self.job_onclick)
self.ui.get_selected.clicked.connect(self.get_selected)
def job_onclick(self):
rb = self.sender()
if rb.isChecked():
print(rb.text())
def country_onclick(self):
rb = self.sender()
if rb.isChecked():
print(rb.text())
def get_selected(self):
text_country = ''
text_job = ''
full_text = ''
items_country = self.ui.country_box.findChildren(QtWidgets.QRadioButton)
for item_country in items_country:
if item_country.isChecked():
text_country = "You are from " + item_country.text() + '\n'
items_job = self.ui.job_group.findChildren(QtWidgets.QRadioButton)
for item_job in items_job:
if item_job.isChecked():
text_job = "Your job is " + item_job.text()
full_text = text_country + text_job
self.ui.label.setText(full_text)
def create_app():
app = QtWidgets.QApplication(sys.argv)
win = my_app()
win.show()
sys.exit(app.exec_())
create_app()
|
{"/radiobutton_lesson8.py": ["/radiobutton.py"]}
|
8,534
|
brunolorente/ogc-route-client
|
refs/heads/master
|
/helpers.py
|
import requests
from pprint import pprint
'''
HELPERS
'''
def get_api_name(landing_page):
try:
api_response = requests.get(url = landing_page, params = {'f':'json'})
json_api_response = api_response.json()
except requests.ConnectionError as exception:
return False
return json_api_response["title"]
def get_routes(landing_page):
routes = []
url = landing_page+'/routes'
try:
api_response = requests.get(url = url)
json_api_response = api_response.json()
except requests.ConnectionError as exception:
return False
for route in json_api_response["links"]:
if route["rel"] == "item":
element = dict(href=route["href"], title=route["title"])
routes.append(element)
return routes
|
{"/app.py": ["/helpers.py"]}
|
8,535
|
brunolorente/ogc-route-client
|
refs/heads/master
|
/app.py
|
import json
import requests
from pprint import pp, pprint
from helpers import get_routes, get_api_name
from flask import Flask, render_template, request, url_for
app = Flask(__name__)
API_BASE_URL = 'https://dp21.skymantics.com/rimac'
API_NAME = get_api_name(API_BASE_URL)
DEFAULT_ZOOM = 12
DEFAULT_CENTER = [0,0]
TILESERVER_URL = 'https://tile.openstreetmap.org/{z}/{x}/{y}.png'
@app.route('/')
def index():
ROUTES = get_routes(API_BASE_URL)
if not request.root_url:
# this assumes that the 'index' view function handles the path '/'
request.root_url = url_for('index', _external=True)
return render_template(
'index.html',
tileserver=TILESERVER_URL,
routes=ROUTES,
name=API_NAME,
zoom=DEFAULT_ZOOM,
center=DEFAULT_CENTER
)
@app.route('/route', defaults={
})
def get_route():
if (request.args.get('waypoints') != '' or request.args.get('waypoints') != None):
waypoints_from_request = request.args.get('waypoints')
else:
waypoints_from_request = None
if (request.args.get('route_name') != '' or request.args.get('route_name') != None):
route_name_from_request = request.args.get('route_name')
else:
route_name_from_request = None
# Get the waypoints from the request
waypoints = waypoints_from_request
# Get the route name from the request
route_name = route_name_from_request
# Set the API resource url
URL = API_BASE_URL+"/routes"
params = {
'waypoints':json.loads(waypoints),
'name': route_name,
}
# Optional params
if (request.args.get('max_height') != '' or request.args.get('max_height') != None):
max_height_from_request = request.args.get('max_height')
params['maxHeight'] = max_height_from_request
if (request.args.get('max_width') != '' or request.args.get('max_width') != None):
max_width_from_request = request.args.get('max_width')
params['maxWeight'] = max_width_from_request
if (request.args.get('preference') != '' or request.args.get('preference') != None):
preference_from_request = request.args.get('preference')
params['preference'] = preference_from_request
# sending get request and saving the response as response object
api_response = requests.post(url = URL, json = params)
# extracting data in json format
json_api_response = api_response.json()
# Get features
json_fearures_list = json_api_response["features"]
# Parsing to string
features_list = json.dumps(json_fearures_list)
# Returning string
return features_list
@app.route('/all')
def all_routes():
json_routes_list = get_routes(API_BASE_URL)
# Parsing to string
routes_list = json.dumps(json_routes_list)
# Returning string
return routes_list
@app.route('/route/named')
def named_route():
route_id = request.args.get('route_link')
target_url = API_BASE_URL+'/routes/'+route_id
# sending get request and saving the response as response object
api_response = requests.get(url = target_url)
# extracting data in json format
json_api_response = api_response.json()
# Get features
json_fearures_list = json_api_response["features"]
# Parsing to string
features_list = json.dumps(json_fearures_list)
# Returning string
return features_list
|
{"/app.py": ["/helpers.py"]}
|
8,544
|
beallio/media-server-status
|
refs/heads/master
|
/serverstatus/assets/exceptions.py
|
"""
Custom exceptions for managing different servers
"""
class MissingConfigFile(Exception):
"""
Config file not found
"""
pass
class MissingForecastIOKey(Exception):
"""
No Forecast.IO API key found
"""
pass
class PlexAPIKeyNotFound(Exception):
"""
No Plex API key found
"""
pass
class MissingConfigValue(Exception):
"""
General exception catch all for missing config values
"""
pass
class PlexConnectionError(Exception):
"""
Error connecting to specified Plex server
"""
pass
class PlexAPIDataError(Exception):
"""
Plex returned malformed data, or data in a format unfamiliar with
(perhaps an API change)
"""
pass
class PlexImageError(Exception):
"""
Error retrieving image cover from Plex server
"""
pass
class SubsonicConnectionError(Exception):
"""
Error connection to specified Subsonic server
"""
pass
|
{"/serverstatus/views.py": ["/serverstatus/__init__.py"], "/serverstatus/assets/apifunctions.py": ["/serverstatus/assets/weather.py", "/serverstatus/assets/services.py", "/serverstatus/assets/sysinfo.py", "/serverstatus/assets/wrappers.py"], "/serverstatus/assets/weather.py": ["/serverstatus/assets/exceptions.py"]}
|
8,545
|
beallio/media-server-status
|
refs/heads/master
|
/serverstatus/assets/sysinfo.py
|
import datetime
import subprocess
import os
import time
import urllib2
from collections import OrderedDict
from math import floor, log
import logging
import psutil
logger = logging.getLogger(__name__)
def convert_bytes(value, unit, output_str=False, decimals=2, auto_determine=False):
"""
:param value: int
:param unit: str
:param output_str: bool
:param decimals: int
:param auto_determine: bool
:return: str or int or float
"""
assert any([type(value) == int, type(value) == float, type(value) is long])
assert all([type(decimals) is int, type(output_str) is bool, type(auto_determine) is bool, value >= 0])
conversions = dict(B=0, KB=1, MB=2, GB=3, TB=4, PB=5, EB=6, ZB=7, YB=8)
assert unit in conversions
base = 1024.0
converted_value = float(value) / base ** conversions[unit]
if auto_determine and value > 0:
# Generate automatic prefix by bytes
base_power = floor(log(float(value)) / log(base))
swap_conversion_values = {conversions[x]: x for x in conversions}
while base_power not in swap_conversion_values:
# future proofing. Not really necessary.
base_power -= base_power
unit = swap_conversion_values[base_power]
converted_value = value / base ** conversions[unit]
if output_str:
if decimals < 0:
decimals = 0
return '{:,.{decimal}f} {unit}'.format(converted_value, decimal=decimals, unit=unit)
else:
return converted_value
def get_wan_ip(site='http://myip.dnsdynamic.org/'):
return urllib2.urlopen(site).read()
def get_partitions(partitions=None):
if partitions is None:
partitions = psutil.disk_partitions(all=True)
return {p[1]: psutil.disk_usage(p[1]) for p in partitions if p[0] != 0}
def get_ping(host="8.8.8.8", kind='avg', num=4):
# solution from http://stackoverflow.com/questions/316866/ping-a-site-in-python
"""
returns ping time to selected site
host: site, ip address to ping
kind:
num: number of pings to host
:param host: string
:param kind: string
:param num: int
:return: float
"""
assert kind in ['max', 'avg', 'mdev', 'min']
assert type(int(num)) is int
ping = subprocess.Popen(["ping", "-c", str(num), host], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, error = ping.communicate()
out = out.split('\n')
try:
out = [x for x in out if x.startswith('rtt')][0]
out_mapped = zip(out.split()[1].split('/'), out.split()[3].split('/'))
out_mapped = {x[0]: x[1] for x in out_mapped}
out = out_mapped[kind]
except IndexError:
# most likely no ping returned, system offline
out = 0
return float(out)
def get_system_uptime():
def append_type(x, kind):
"""
Return 0 if days/hours/minutes equals 0 otherwise append correct plural "s" to type
ex. if systems up for 2 hours, returns "2 hours" likewise return "1 hour" if system has been up for 1 hour
"""
assert type(x) is int and type(kind) is str
if x == 0:
return x
else:
return '{} {}'.format(str(x), kind + 's' if x != 1 else kind)
boot_time = datetime.datetime.fromtimestamp(psutil.boot_time()).replace(microsecond=0)
time_now = datetime.datetime.now().replace(microsecond=0)
delta = time_now - boot_time
formatted_time = str(delta).split(',')
try:
# System's been up a day or more
hours = formatted_time[1].strip().split(':')
except IndexError:
# System's been up for less than day
hours = formatted_time[0].strip().split(':')
formatted_time[0] = 0
hours.pop(2)
hours, mins = [int(hour) for hour in hours]
formatted_time = dict(days=formatted_time[0], hours=append_type(hours, 'hour'), min=append_type(mins, 'minute'))
output = dict(
boottime=boot_time,
uptime=delta,
uptime_formatted=formatted_time)
return output
def return_network_io():
"""
returns Network bytes sent, and received
:rtype : list
"""
network_io = psutil.net_io_counters()
return [network_io.bytes_sent, network_io.bytes_recv]
def get_network_speed(sleep=1):
assert type(sleep) is int
start_time = datetime.datetime.now()
start_data = return_network_io()
time.sleep(sleep)
time_delta = datetime.datetime.now() - start_time
end_data = return_network_io()
bits = 8
return dict(up=convert_bytes((end_data[0] - start_data[0]) / time_delta.seconds * bits, 'MB'),
down=convert_bytes((end_data[1] - start_data[1]) / time_delta.seconds * bits, 'MB'))
def get_total_system_space(digits=1):
"""
returns total system disk space formatted, ex.
{'total': '8,781.9 GB', 'used': '3,023.0 GB', 'pct': 34.4, 'free': '5,313.4 GB'}
:rtype : dict
:param digits: int
:return: dict
"""
assert type(digits) is int
all_partitions = psutil.disk_partitions(all=True)
# limit total disk space to those paritions mounted in "/dev/"
partitions_to_keep = [partition for partition in all_partitions if partition.device.startswith('/dev/')]
partitions = get_partitions(partitions_to_keep)
disk_space = dict(total=sum([partitions[partition].total for partition in partitions]),
used=sum([partitions[partition].used for partition in partitions]),
free=sum([partitions[partition].free for partition in partitions]))
disk_space_formatted = {k: convert_bytes(disk_space[k], 'GB', True, digits, True) for k in disk_space}
disk_space_formatted['pct'] = round(float(disk_space['used']) / float(disk_space['total']) * 100.0, digits)
return disk_space_formatted
def get_partitions_space(partitions, digits=1, sort='alpha'):
"""
{'Home': {'total': '168.8 GB', 'pct': 44.4, 'free': '85.3 GB', 'used': '74.9 GB'},
'Incoming': {'total': '293.3 GB', 'pct': 48.2, 'free': '137.0 GB', 'used': '141.4 GB'}}
:param partitions:
:param digits:
:return:
"""
assert type(partitions) is dict
system_partitions = get_partitions()
# return disk space for each partition listed in config
# test if listed partition actually exists in system first to avoid throwing an error
disk_space = {p: system_partitions[partitions[p]] for p in partitions if partitions[p] in system_partitions}
disk_space_formatted = {p: dict(total=convert_bytes(disk_space[p].total, 'GB', True, digits, True),
used=convert_bytes(disk_space[p].used, 'GB', True, digits, True),
free=convert_bytes(disk_space[p].free, 'GB', True, digits, True)) for p in
disk_space}
for p in disk_space:
disk_space_formatted[p]['pct'] = round(float(disk_space[p].used) / float(disk_space[p].total) * 100.0,
digits)
if sort.lower() == 'alpha':
# place in ordered dictionary so paths always display in alphabetical order on page
disk_space_formatted = OrderedDict(sorted(disk_space_formatted.items(), key=lambda x: x[0]))
return disk_space_formatted
def get_load_average():
os_averages = os.getloadavg()
cpu_count = psutil.cpu_count()
final_averages = [average / cpu_count for average in os_averages]
return final_averages
class GetSystemInfo(object):
def __init__(self):
pass
def get_info(self):
"""
Returns system information in a dictionary
mem_total: Total RAM in the system in megabytes as float, ex. "7876.88671875"
mem_available: Unused RAM in the system in megabytes as float, ex. "4623.8671875"
mem_used_pct: mem_available / mem_total as float, ex. "41.3"
load_avg: tuple of avg loads at 1 min, 5 min, and 15 min, respectively, ex. "(0.52, 0.51, 0.43)"
partitions: dictionary of partitions on system, truncated ex, {''/mnt/Entertainment'':
sdiskusage(total=56955559936, used=15403667456, free=38635122688, percent=27.0)}
uptime_formatted: dictionary of uptime split in days, hours, min, ex.
{'hours': '2 hours', 'days': '6 days', 'min': '26 minutes'}
:return: dict
"""
mem_info = psutil.virtual_memory()
system_uptime = get_system_uptime()
load_avg = get_load_average()
return dict(mem_total=convert_bytes(mem_info[0], 'MB'),
mem_available=convert_bytes(mem_info[1], 'MB'),
mem_used_pct=mem_info[2],
mem_bars=self._memory_bars(mem_info[2]),
load_avg=load_avg,
uptime_formatted=system_uptime['uptime_formatted'])
@staticmethod
def _memory_bars(val_pct):
mid = 50
upper = 80
ret = dict(xmin=min(val_pct, mid),
xmid=min(val_pct - mid, upper - mid),
xmax=min(val_pct - upper, 100 - upper))
return {k: max(ret[k], 0) for k in ret}
if __name__ == '__main__':
pass
|
{"/serverstatus/views.py": ["/serverstatus/__init__.py"], "/serverstatus/assets/apifunctions.py": ["/serverstatus/assets/weather.py", "/serverstatus/assets/services.py", "/serverstatus/assets/sysinfo.py", "/serverstatus/assets/wrappers.py"], "/serverstatus/assets/weather.py": ["/serverstatus/assets/exceptions.py"]}
|
8,546
|
beallio/media-server-status
|
refs/heads/master
|
/serverstatus/views.py
|
"""
Routing file for flask app
Handles routing for requests
"""
import json
import datetime
from flask import render_template, Response, request
from serverstatus import app
from assets import apifunctions
@app.route('/')
@app.route('/index')
def index():
"""
Base index view at "http://www.example.com/"
"""
start_time = datetime.datetime.now()
return render_template('index.html',
title=app.config['WEBSITE_TITLE'],
time=datetime.datetime.now() - start_time,
testing=app.config['TESTING'])
@app.route('/api/<data>', methods=['GET'])
def get_json(data):
"""
Returns API data data based on "http://www.example.com/api/<data>"
call where <data> is function is a function in the APIFunction
class in the apifunctions module.
Returns data in JSON format.
"""
values, status = BACKENDCALLS.get_data(data)
json_data = json.dumps(values)
# set mimetype to prevent client side manipulation since we're not using
# jsonify
return Response(json_data, status=status, mimetype='application/json')
@app.route('/html/<data>')
def html_generator(data):
"""
Returns html rendered jinja templates based on "http://www.example.com/html/<data>"
call where <data> is a jinja template in the "templates" directory.
Returns rendered html in plain text to client, so we use this data to
load divs via jQuery on the client side
"""
values, status = BACKENDCALLS.get_data(data)
start = datetime.datetime.now()
rendered_html = render_template(data + '.html', values=values)
app.logger.debug(
'Render time for {}: {}'.format(data, datetime.datetime.now() - start))
# set mimetype to prevent users browser from rendering rendered HTML
return Response(rendered_html, status=status, mimetype='text/plain')
@app.route('/img/<data>')
def get_img_data(data):
"""
Returns image to client based on "http://www.example.com/img/<data>"
request where <data> is a flask request such as
"http://www.example.com/img/subsonic?cover=28102"
"""
start = datetime.datetime.now()
resp = BACKENDCALLS.get_image_data(request)
app.logger.debug('Image request time for {}: {}'
.format(data, datetime.datetime.now() - start))
return resp
class BackEndCalls(object):
"""
Provides access points into the API Functions of the backend.
Also loads API configs to remedy issues where the config hasn't been
loaded for a particular server.
Provides access for images requests to Plex and Subsonic
"""
def __init__(self):
self.api_functions = None
self.api_functions = self.get_api_functions()
def get_api_functions(self):
"""
Provides access to API Functions module through class
:return: API_Functions
"""
self._load_apis()
return self.api_functions
def get_data(self, data):
"""
From flask request at http://servername.com/api/{api_call} fetches
{api_call} from apifunctions module, and returns data.
Disallows public access to any function in apifunctions starting with
"_" (underscore)
:type data: unicode or LocalProxy
:return:
"""
values = None
status = 404
values = getattr(self.api_functions, str(data).lstrip('_'))()
status = 200
"""
try:
values = getattr(self.api_functions, str(data).lstrip('_'))()
status = 200
except (AttributeError, TypeError) as err:
app.logger.error(err)
# no api function for call, return empty json
except:
app.logger.error('An unknown error occurred')"""
return values, status
def get_image_data(self, flask_request):
"""
Parses flask request from
http://servername.com/img/{plex | subsonic}?14569852 where
{plex|subsonic} is the server requested. Routes request to appropriate
server to get thumbnail image data
:type flask_request: werkzeug.local.Request
:return:
"""
def parse_request(request_args):
parsed_values = dict()
for arg in request_args:
if request_args[arg] == '':
parsed_values['plex_id'] = arg
continue
try:
parsed_values[arg] = bool(request_args[arg])
except ValueError:
parsed_values[arg] = request_args[arg]
return parsed_values
resp = Response('null', status=404, mimetype='text/plain')
# convert to string since flask requests returns unicode
data_low = str(flask_request.view_args.get('data', None).lower())
if data_low == 'plex':
args = parse_request(flask_request.args)
resp = Response(
self.api_functions._get_plex_cover_art(args), status=200,
mimetype='image/jpeg')
elif data_low == 'subsonic':
resp = Response(self._check_subsonic_request(flask_request),
status=200,
mimetype='image/jpeg')
return resp
def _load_apis(self):
"""
Check if api_functions is set, set if not.
:return:
"""
if self.api_functions is None:
self.api_functions = apifunctions.APIFunctions(app.config)
def _check_subsonic_request(self, request_args):
"""
Parses flask request to determine parameters for requesting cover art
from Subsonic server
Parameters
----------
request_args : flask.Request.args
Description of parameter `request_args`.
"""
query_string = request_args.query_string
args = request_args.args
try:
# check if only cover id was submitted
# e.g. /img/subsonic?28102
cover_id = int(query_string)
cover_size = None
except ValueError:
try:
# check if cover id is included in request_args
# e.g. /img/subsonic?cover=28102
cover_id = args['cover']
except KeyError:
# we need a cover to look up
raise
try:
# check if cover size is included in request_args
# e.g. /img/subsonic?cover=28102&size=145
cover_size = args['size']
try:
# check if cover size is an integer
cover_size = int(cover_size)
except ValueError:
# incorrect cover size requested
cover_size = None
except KeyError:
# cover size not included in request_args
cover_size = None
return self.api_functions._get_subsonic_cover_art(cover_id, cover_size)
BACKENDCALLS = BackEndCalls()
|
{"/serverstatus/views.py": ["/serverstatus/__init__.py"], "/serverstatus/assets/apifunctions.py": ["/serverstatus/assets/weather.py", "/serverstatus/assets/services.py", "/serverstatus/assets/sysinfo.py", "/serverstatus/assets/wrappers.py"], "/serverstatus/assets/weather.py": ["/serverstatus/assets/exceptions.py"]}
|
8,547
|
beallio/media-server-status
|
refs/heads/master
|
/__init__.py
|
#!/usr/bin/env python
"""
Main initializing file. If called from command line starts WSGI debug testing
server
"""
if __name__ == '__main__':
from serverstatus import app
app.config.update(DEBUG=True, TESTING=True)
app.run(host='0.0.0.0')
print 'Test server running...'
|
{"/serverstatus/views.py": ["/serverstatus/__init__.py"], "/serverstatus/assets/apifunctions.py": ["/serverstatus/assets/weather.py", "/serverstatus/assets/services.py", "/serverstatus/assets/sysinfo.py", "/serverstatus/assets/wrappers.py"], "/serverstatus/assets/weather.py": ["/serverstatus/assets/exceptions.py"]}
|
8,548
|
beallio/media-server-status
|
refs/heads/master
|
/serverstatus/__init__.py
|
"""
Initialize and setup flask app
"""
import imp
import os
import logging
import logging.handlers as handlers
from flask import Flask
app = Flask(__name__)
# update config for flask app
app.config.update(
APPNAME='server_status',
LOGGINGMODE=logging.DEBUG,
APPLOCATION=os.path.join(os.path.dirname(os.path.dirname(
os.path.realpath(__file__)))),
LOG_LOCATION='/tmp',
TEMP_LOCATION='/tmp',
CONFIG_LOCATION='/var/config.py')
app.config['TEMP_IMAGES'] = os.path.join(app.config['TEMP_LOCATION'],
'flask-images')
app.config['APP_MODULESLOCATION'] = os.path.join(app.config['APPLOCATION'],
'serverstatus')
import views
import assets
from assets.exceptions import MissingConfigFile
from assets.services import SubSonic
def _setup_logger():
"""
Setup application logging object
:return: logging object
"""
mod_logger = None
# use dir name thrice to return to base module path
log_directory = app.config.get('LOG_LOCATION', None)
log_location = os.path.join(log_directory,
'_'.join([app.config['APPNAME'], '.log']))
if not os.path.isdir(log_directory):
try:
os.mkdir(log_directory)
except IOError:
pass
if os.path.isdir(log_directory):
file_handler = handlers.RotatingFileHandler(filename=log_location,
maxBytes=3145728)
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
"%Y-%m-%d %H:%M:%S")
file_handler.setFormatter(formatter)
logging.getLogger('').addHandler(file_handler)
mod_logger = logging.getLogger(__name__)
mod_logger.setLevel(app.config['LOGGINGMODE'])
mod_logger.debug('LOGGER initialized at {}'.format(log_location))
return mod_logger
def _load_config_file(mod_logger=None):
def gen_contents(config_data):
# list module contents
"""
Generator to return modules from config file
:type config_data: __builtin__.module
"""
mods = dir(config_data)
for config_attrib in mods:
# exclude objects that aren't our data
if not config_attrib.startswith('__'):
# create dict object since flask app config only accepts dicts
# on updates
config_value = getattr(config_data, config_attrib)
if config_attrib in app.config:
mod_logger.warning(
'Overwriting existing config value {} with {}'.format(
config_attrib, config_value))
result = {config_attrib: config_value}
yield result
# import config file
config_location = app.config.get('CONFIG_LOCATION', None)
try:
config_data_file = imp.load_source('config', config_location)
for data in gen_contents(config_data_file):
app.config.update(data)
if mod_logger:
mod_logger.info(
'Config file loaded from {}'.format(config_location))
except IOError as e:
errs = dict(err=e.strerror, dir_location=config_location)
logger_msg = ('{err}: Configuration file could not be found at '
'"{dir_location}"').format(**errs)
mod_logger.critical(logger_msg)
raise MissingConfigFile(logger_msg)
logger = _setup_logger() # initialize LOGGER
# import config data from config file into flask app object
_load_config_file(logger)
# remove initialization functions from namespace
del _load_config_file
del _setup_logger
|
{"/serverstatus/views.py": ["/serverstatus/__init__.py"], "/serverstatus/assets/apifunctions.py": ["/serverstatus/assets/weather.py", "/serverstatus/assets/services.py", "/serverstatus/assets/sysinfo.py", "/serverstatus/assets/wrappers.py"], "/serverstatus/assets/weather.py": ["/serverstatus/assets/exceptions.py"]}
|
8,549
|
beallio/media-server-status
|
refs/heads/master
|
/config.py
|
"""
Internal configuration file for Server Status app
Change the values according to your own server setup.
By default, the app is set to initialize the config file from "/var/config.py"
If you wish to change the location you'll need to change the location of the
file in serverstatus/__init__.py
For ForecastIO you'll need to go to https://developer.forecast.io/ and sign up
for an API key (at the time of writing the first 1,000 calls/day to the API are
free.
"""
SUBSONIC_INFO = dict(
url='http://192.168.0.1',
serverpath='/rest',
port=4040,
user='user',
password='password',
api=1.8,
appname='py-sonic',
external_url='http://www.example.com/subsonic'
)
PLEX_INFO = dict(
external_url='http://www.example.com/plex',
internal_url='http://192.168.0.1',
internal_port=32400,
user='user',
password='password',
auth_token='AUTH_TOKEN',
local_network_auth=False
)
SERVERSYNC_INFO = dict(
lockfile_path='/tmp/server_sync.lockfile')
CRASHPLAN_INFO = dict(
logfile_path='/usr/local/crashplan/log/app.log')
PARTITIONS = dict(Partition_Name_1='/mnt/partition1',
Partition_Name_2='/mnt/partition2',
Partition_Name_3='/mnt/partition3',
Root='/',
Home='/home')
INTERNAL_IP = 'http://192.168.0.1'
WEATHER = dict(
Forecast_io_API_key='FORECASTIOKEY',
Latitude=37.8030,
Longitude=-122.4360,
units='us')
SERVER_URL = 'http://www.example.com'
DEBUG = False
SECRET_KEY = 'my secret'
WEBSITE_TITLE = 'Your title here'
|
{"/serverstatus/views.py": ["/serverstatus/__init__.py"], "/serverstatus/assets/apifunctions.py": ["/serverstatus/assets/weather.py", "/serverstatus/assets/services.py", "/serverstatus/assets/sysinfo.py", "/serverstatus/assets/wrappers.py"], "/serverstatus/assets/weather.py": ["/serverstatus/assets/exceptions.py"]}
|
8,550
|
beallio/media-server-status
|
refs/heads/master
|
/serverstatus/assets/apifunctions.py
|
"""
Serves as backend for returning information about server to jQuery and Jinja
templates. Data is returned in the form of dicts to mimic JSON formatting.
"""
from collections import OrderedDict
import logging
from serverstatus.assets.weather import Forecast
from serverstatus.assets.services import CheckCrashPlan, ServerSync, Plex, \
SubSonic
from serverstatus.assets.sysinfo import GetSystemInfo, get_network_speed, \
get_ping, get_wan_ip, get_partitions_space, get_total_system_space
import serverstatus.assets.wrappers as wrappers
class APIFunctions(object):
"""
Serves as backend for returning information about server to jQuery and Jinja
templates. Data is returned in the form of dicts to mimic JSON formatting.
Any function within the APIFunctions class maybe called externally as long
as the function does not start with "_". For example, a user/website may
return data from http://foobar.com/api/system_info but not
http://foobar.com/api/_get_plex_cover_art
Examples to return data:
To return system info:
http://foobar.com/api/system_info
To return network speed:
http://foobar.com/api/network_speed
"""
def __init__(self, config):
self.logger = LOGGER
LOGGER.debug('{} initialized'.format(__name__))
self.config = config
self.subsonic = None
self.plex = None
self.server_sync = None
self.crashplan = None
self.weather = None
@staticmethod
@wrappers.logger('debug')
def system_info():
"""
Returns data for system info section (memory, load, uptime)
:return: dict
"""
get_system_info = GetSystemInfo()
output = get_system_info.get_info()
return output
@staticmethod
@wrappers.logger('debug')
def network_speed():
"""
Returns server network speed. Sleep defines the length of time in
between polling for network IO data to calculate speed based off delta
:return: dict
"""
return get_network_speed(sleep=5)
@staticmethod
@wrappers.logger('debug')
def ping():
"""
Returns ping from Google DNS (default)
:return: dict
"""
return dict(ping='{:.0f}'.format(get_ping()))
@wrappers.logger('debug')
def storage(self):
"""
Returns formatted storage data based off options selected in Config file
:return: dict
"""
paths = get_partitions_space(self.config['PARTITIONS'])
return dict(total=get_total_system_space(), paths=paths)
@wrappers.logger('debug')
def ip_address(self):
"""
Returns servers internal and external IP addresses
:return: dict
"""
return dict(wan_ip=get_wan_ip(), internal_ip=self.config['INTERNAL_IP'])
@wrappers.logger('debug')
def services(self):
"""
Returns sorted status mappings for servers listed in config file
:return: dict
"""
self._load_configs()
servers = [self.plex, self.subsonic, self.server_sync, self.crashplan]
servers_mapped = [getattr(server, 'status_mapping') for server in
servers]
servers_dict = OrderedDict()
for server in servers_mapped:
servers_dict = OrderedDict(servers_dict.items() + server.items())
return servers_dict
@wrappers.logger('debug')
def media(self):
"""
Returns now playing data for Plex and Subsonic (if any), and recently
added items for both
:return: dict
"""
self._load_configs()
subsonic = self.subsonic
plex = self.plex
return dict(
subsonic_nowplaying=subsonic.now_playing(),
plex_nowplaying=plex.now_playing(),
subsonic_recentlyadded=subsonic.recently_added(num_results=6),
plex_recentlyadded=plex.recently_added(num_results=6))
@wrappers.logger('debug')
def forecast(self):
"""
Gets forecast data from forecast.io
:return: dict
"""
self._load_configs()
self.weather.reload_data()
return self.weather.get_data()
@wrappers.logger('debug')
def plex_transcodes(self):
"""
Gets number of transcodes from Plex
:return: dict
"""
self._load_configs()
return dict(plex_transcodes=self.plex.transcodes)
def _get_plex_cover_art(self, args):
"""
Gets Plex cover art passing flask requests into Plex class
:return: image
"""
self._load_configs()
return self.plex.get_cover_image(**args)
def _get_subsonic_cover_art(self, cover_id, size):
"""
Gets subsonic cover art passing flask requests into Subsonic class
:return: image
"""
self._load_configs()
cover_id = int(cover_id)
return self.subsonic.get_cover_art(cover_id, size)
def _load_configs(self):
"""
Loads config data for Service subclasses if not already loaded to
prevent errors.
:return: Service class
"""
if self.subsonic is None:
try:
self.subsonic = SubSonic(self.config['SUBSONIC_INFO'])
except KeyError:
LOGGER.debug('Subsonic not loaded yet')
if self.plex is None:
try:
self.plex = Plex(self.config['PLEX_INFO'])
except KeyError:
LOGGER.debug('Plex not loaded yet')
if self.server_sync is None:
try:
self.server_sync = ServerSync(self.config['SERVERSYNC_INFO'])
except KeyError:
LOGGER.debug('Server Sync not loaded yet')
if self.crashplan is None:
try:
self.crashplan = CheckCrashPlan(self.config['CRASHPLAN_INFO'])
except KeyError:
LOGGER.debug('CrashPlan not loaded yet')
if self.weather is None:
try:
self.weather = Forecast(self.config['WEATHER'])
except KeyError:
LOGGER.debug('weather not loaded yet')
LOGGER = logging.getLogger(__name__)
|
{"/serverstatus/views.py": ["/serverstatus/__init__.py"], "/serverstatus/assets/apifunctions.py": ["/serverstatus/assets/weather.py", "/serverstatus/assets/services.py", "/serverstatus/assets/sysinfo.py", "/serverstatus/assets/wrappers.py"], "/serverstatus/assets/weather.py": ["/serverstatus/assets/exceptions.py"]}
|
8,551
|
beallio/media-server-status
|
refs/heads/master
|
/serverstatus/tests/test_serverstatus.py
|
import urllib2
import unittest
from collections import OrderedDict
from copy import deepcopy
from flask import Flask
from flask.ext.testing import LiveServerTestCase
from serverstatus import app
from serverstatus.assets.apifunctions import APIFunctions
from serverstatus.assets.services import ServerSync, SubSonic
from serverstatus.assets.weather import Forecast
class TestApiFunctions(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
self.apifunctions = APIFunctions(app.config)
def test_ping(self):
self.assertTrue(isinstance(self.apifunctions.ping(), dict))
def test_system_info(self):
self.assertTrue(isinstance(self.apifunctions.system_info(), dict))
def test_storage(self):
self.assertTrue(isinstance(self.apifunctions.storage(), dict))
def test_network_speed(self):
self.assertTrue(isinstance(self.apifunctions.network_speed(), dict))
def test_services(self):
self.assertTrue(isinstance(self.apifunctions.services(), OrderedDict))
def test_weather(self):
self.assertTrue(isinstance(self.apifunctions.forecast(), dict))
def test_media(self):
results = self.apifunctions.media()
self.assertIsInstance(results, dict)
for key in results:
if 'plex_nowplaying' in key:
self.plex_nowplaying(results[key])
if 'plex_recentlyadded' in key:
self.plex_recentlyadded(results[key])
if 'subsonic_nowplaying' in key:
self.subsonic_nowplaying(results[key])
def test_plex_transcodes(self):
self.assertTrue(isinstance(self.apifunctions.plex_transcodes(), dict))
def plex_recentlyadded(self, result):
self.assertIsInstance(result, dict)
for vid_type in result:
self.assertIsInstance(result[vid_type], list)
for video in result[vid_type]:
self.assertIsInstance(video, dict)
def subsonic_recentlyadded(self, result):
self.assertIsInstance(result, list)
for album in result:
self.assertIsInstance(result[album], dict)
def plex_nowplaying(self, result):
if not result:
self.assertIs(result, None)
if result:
self.assertIsInstance(result, list)
for video in result:
self.assertIsInstance(video, dict)
def subsonic_nowplaying(self, result):
if not result:
self.assertIs(result, None)
if result:
for key in result:
self.assertIsInstance(result[key], dict)
class TestSubSonicServer(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
self.config = app.config['SUBSONIC_INFO']
self.config_test_values = dict(
url='http://192.168.1.100',
port=40,
user='guestuser',
password='password',
serverpath='/subbybad/'
)
def est_bad_config_values(self):
for key in self.config_test_values:
config = deepcopy(self.config)
config[key] = self.config_test_values[key]
self.subsonic = SubSonic(config)
print key, self.config_test_values[key]
self.assertTrue(self.subsonic.connection_status)
def test_bad_server_url(self):
bad_url = 'http://192.168.1.100'
config = deepcopy(self.config)
config['url'] = bad_url
self.subsonic = SubSonic(config)
self.assertFalse(self.subsonic.connection_status)
def test_bad_port(self):
config = deepcopy(self.config)
config['port'] = 40
self.subsonic = SubSonic(config)
self.assertFalse(self.subsonic.connection_status)
def test_bad_username(self):
config = deepcopy(self.config)
config['user'] = 'guestuser'
self.subsonic = SubSonic(config)
self.assertFalse(self.subsonic.connection_status)
def test_bad_password(self):
config = deepcopy(self.config)
config['password'] = 'password'
self.subsonic = SubSonic(config)
self.assertFalse(self.subsonic.connection_status)
def test_bad_serverpath(self):
config = deepcopy(self.config)
config['serverpath'] = '/subbybad/'
self.subsonic = SubSonic(config)
self.assertFalse(self.subsonic.connection_status)
class TestForecastIO(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
self.config = app.config['WEATHER']
def test_bad_apikey(self):
config = deepcopy(self.config)
config['Forecast_io_API_key'] = 'thisisabadkey'
with self.assertRaises(ValueError):
Forecast(config)
class TestServerSync(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
self.config = app.config['SERVERSYNC_INFO']
def test_no_lockfile_path(self):
config = deepcopy(self.config)
del config['lockfile_path']
serversync = ServerSync(config)
self.assertFalse(serversync.connection_status)
def test_bad_lockfile_path(self):
config = deepcopy(self.config)
config['lockfile_path'] = '/tmp/badfile.lock'
serversync = ServerSync(config)
self.assertFalse(serversync.connection_status)
class TestLiveServer(LiveServerTestCase):
server_address = 'http://192.168.1.101/status/'
def create_app(self):
self.app = Flask(__name__)
self.app.config.update(TESTING=True, LIVESERVER_PORT=8943)
return self.app
def test_server_is_up_and_running(self):
response = urllib2.urlopen(TestLiveServer.server_address)
self.assertEqual(response.code, 200)
"""
class TestDebugServer(flaskTestCase):
def create_app(self):
app = Flask(__name__)
app.config['TESTING'] = True
return app
def test_some_json(self):
functions_to_test = (func for func in dir(APIFunctions) if not
func.startswith('_'))
for func in functions_to_test:
test_api = '/api/' + func
response = self.client.get(test_api)
print 'hi'
self.assertEquals(response.json, dict())
"""
if __name__ == '__main__':
unittest.main()
|
{"/serverstatus/views.py": ["/serverstatus/__init__.py"], "/serverstatus/assets/apifunctions.py": ["/serverstatus/assets/weather.py", "/serverstatus/assets/services.py", "/serverstatus/assets/sysinfo.py", "/serverstatus/assets/wrappers.py"], "/serverstatus/assets/weather.py": ["/serverstatus/assets/exceptions.py"]}
|
8,552
|
beallio/media-server-status
|
refs/heads/master
|
/serverstatus/assets/services.py
|
import os
import logging
import urllib2
import urlparse
from collections import OrderedDict
from operator import itemgetter
from time import localtime, strftime
import datetime
from cStringIO import StringIO
from PIL import Image, ImageOps
import libsonic
import xmltodict
from serverstatus import app
import serverstatus.assets.exceptions as exceptions
LOGGER = logging.getLogger(__name__)
class Service(object):
def __init__(self, service_config):
assert isinstance(service_config, dict)
self.logger = LOGGER
self.logger.debug(
'{} class initialized'.format(self.__class__.__name__))
self.service_config = service_config
self._services_status_mapping = self._status_mappings_dict()
self._service_name = None
self._connect_status = None
self._server_full_url = None
self._resolved_status_mapping = dict()
self._temp_img_dir = app.config.get('TEMP_IMAGES', '/tmp')
@property
def service_name(self):
return self._service_name
@property
def status_mapping(self):
self._resolved_status_mapping = self._map_connection_status()
return self._resolved_status_mapping
@property
def connection_status(self):
self._connect_status = self._test_server_connection()
return self._connect_status
@property
def server_full_url(self):
return self._server_full_url
@property
def external_url(self):
return self._get_config_attrib('external_url')
@staticmethod
def convert_date_fmt(date_str, fmt_str_in, fmt_str_out):
dt_value = datetime.datetime.strptime(date_str, fmt_str_in)
return dt_value.strftime(fmt_str_out)
def _test_server_connection(self):
# method to be overridden by subclasses
return
def _map_connection_status(self):
service_name = self._service_name
output = {service_name: dict()}
try:
output = {service_name: self._services_status_mapping[
str(self.connection_status)]}
output[service_name][
'title'] = self._add_service_name_to_status_mapping()
if self.external_url:
output[service_name]['external_url'] = self.external_url
except KeyError:
pass
return output
def _add_service_name_to_status_mapping(self):
delim = '-'
service_name = self._service_name
if delim in service_name:
title = service_name.split(delim)
title = ' '.join([w.title() for w in title])
else:
title = service_name.title()
return title
def _get_config_attrib(self, attrib):
try:
return self.service_config[attrib]
except KeyError:
# Config attribute not found
return None
@staticmethod
def _status_mappings_dict():
return dict(
False=dict(
text='Offline',
icon='icon-off icon-white',
css_class='btn_mod btn btn-xs btn-danger',
),
True=dict(
text='Online',
icon='icon-ok icon-white',
css_class='btn_mod btn btn-xs btn-success',
),
ServerSyncActive=dict(
text='Online',
icon='icon-download icon-white',
css_class='btn_mod btn btn-xs btn-success',
),
BackupServerActive=dict(
text='Active',
icon='icon-upload icon-white',
css_class='btn_mod btn btn-xs btn-success',
),
Waiting=dict(
text='Pending',
icon='icon-pause icon-white',
css_class='btn_mod btn btn-xs btn-warning',
)
)
def _log_warning_for_missing_config_value(self, cls_name, config_val,
default):
# Log warning that config value for plex is missing from config file.
# Using default value instead
self.logger.warning(
('{config_val} missing from config value for {cls_name}. '
'Using {default} instead').
format(cls_name=cls_name, default=default, config_val=config_val))
@staticmethod
def _convert_xml_to_json(resp_output):
return xmltodict.parse(resp_output)
@staticmethod
def _build_external_img_path(service_name):
base_path = 'img/'
return ''.join([base_path, service_name, '?'])
def _test_file_path(self, file_path_key):
# //TODO Needed
output = None
try:
file_path = self.service_config[file_path_key]
if os.path.exists(file_path):
output = file_path
except KeyError as err:
self.logger.error(err)
finally:
return output
class SubSonic(Service):
def __init__(self, server_info):
Service.__init__(self, server_info)
self._service_name = 'subsonic'
self.conn = libsonic.Connection(baseUrl=self.service_config['url'],
username=self.service_config['user'],
password=self.service_config[
'password'],
port=self.service_config['port'],
appName=self.service_config['appname'],
apiVersion=self.service_config['api'],
serverPath=self.service_config[
'serverpath'])
self._connect_status = self._test_server_connection()
self._server_full_url = self._get_server_full_url()
self._resolved_status_mapping = self._map_connection_status()
self._img_base_url = self._build_external_img_path(
self._service_name) + 'cover='
def recently_added(self, num_results=None):
"""
Returns recently added entries.
:param num_results: number of recently added results to return
:type num_results: int
:return: list of [dict]
"""
def recently_added_generator(num):
recently_added = self.conn.getAlbumList("newest", num)['albumList'][
'album']
for album in recently_added:
yield album
return
if num_results is None:
num_results = 10
return [self._get_entry_info(entry, min_size=145, max_size=500) for
entry in recently_added_generator(num_results)]
def get_cover_art(self, cover_art_id, size=None):
assert isinstance(cover_art_id, int)
if any([size is None, size <= 0, type(size) is not int]):
return self.conn.getCoverArt(aid=cover_art_id)
else:
if size > 2000:
# set max limit on size of photo returned
size = 2000
return self.conn.getCoverArt(aid=cover_art_id, size=size)
def now_playing(self):
"""
Returns now playing entries from Subsonic server in list format. Each
entry in list represents one song currently playing from server. Each
entry in list is a dict
:returns: list of [dict]
"""
entries = []
now_playing = self.conn.getNowPlaying()
try:
many_songs_playing = isinstance(now_playing['nowPlaying']['entry'],
list)
except (KeyError, TypeError):
# no songs playing
return None
if many_songs_playing:
# multiple songs playing
entries = [self._get_entry_info(entry) for entry in
now_playing['nowPlaying']['entry']]
elif not many_songs_playing:
# single song playing
entries.append(
self._get_entry_info(now_playing['nowPlaying']['entry']))
# remove entries from now playing if user hasn't touched them or
# playlist auto advanced in X min
results = [self._get_entry_info(entry, max_size=800) for entry in
entries if entry['minutesAgo'] <= 10]
if results:
return results
else:
return None
def set_output_directory(self, directory):
# //TODO remove extraneous code
self._temp_img_dir = directory
return self._temp_img_dir == directory
def _test_server_connection(self):
"""
Test if we're able to connect to Subsonic server.
:return: bool - True if able to connect, false otherwise
:raise: exceptions.SubsonicConnectionError
"""
connection_status = False
try:
connection_status = self.conn.ping()
assert connection_status
except AssertionError:
err = 'Unable to reach Subsonic server'
self.logger.error(err)
# raise exceptions.SubsonicConnectionError(err)
finally:
return connection_status
def _create_cover_art_file(self, cover_art_id, size=None):
"""
size in get_cover_art method for subsonic returns a square image with
dimensions in pixels equal to size
:param cover_art_id:
:return:
"""
# set default image size in pixels
if size is None:
size = 600
img_data = self.conn.getCoverArt(aid=cover_art_id, size=size)
cover_dir = self._temp_img_dir # temp storage for created image files
filename = 'cover'
ext = '.jpg'
short_filepath = filename + str(cover_art_id) + '_' + str(size) + ext
full_filepath = os.path.join(cover_dir, short_filepath)
if not os.path.exists(cover_dir):
# check if filepath exists. Attempt to create if it doesn't
try:
os.mkdir(cover_dir)
except IOError:
self.logger.error(
'Failed to create cover art directory: {}'.format(
full_filepath))
return
if not os.path.isfile(full_filepath):
self.logger.info('Write cover art file: {}'.format(full_filepath))
with open(full_filepath, 'wb') as img_file:
img_file.write(img_data.read())
return full_filepath
def _get_entry_info(self, entry, min_size=None, max_size=None):
"""
appends URL coverart link to Subsonic entry dict
:param entry: subsonic entry
:type entry: dict
:return: dict
"""
assert type(entry) == dict
if min_size:
min_size = 145
if max_size:
max_size = 1200
# create url link to thumbnail coverart, and full-size coverart
cover_art_link = [''.join([self._img_base_url,
str(entry.get('coverArt', entry['id'])),
'&size=',
str(size)]) for size in (min_size, max_size)]
entry.update(coverArtExternalLink_sm=cover_art_link[0],
coverArtExternalLink_xl=cover_art_link[1])
try:
created_date = self.convert_date_fmt(entry[u'created'],
'%Y-%m-%dT%H:%M:%S.%fZ',
'%m/%d/%Y %I:%M%p')
except ValueError as dt_conv_err:
self.logger.error('Error converting date: {}'.format(dt_conv_err))
else:
entry[u'created'] = created_date
try:
# Return progress on currently playing song(s). No good way to do
# this since Subsonic doesn't have access to this info through
# it's API. Calculate progress by taking last time
# song was accessed divide by progress
entry['progress'] = min(
float(entry['minutesAgo']) / float(entry['duration'] / 60), 1)
except KeyError:
entry['progress'] = 1
finally:
entry.update(progress_pct='{:.2%}'.format(entry['progress']),
progress=entry['progress'] * 100)
return entry
def _get_server_full_url(self):
serverpath = self.service_config['serverpath'].strip('/')
try:
serverpath, _ = serverpath.split('/')
except ValueError as err:
self.logger.warning(
'Issue parsing Subsonic server path: {}'.format(err))
return '{url}:{port:d}/{path}'.format(url=self.service_config['url'],
port=self.service_config['port'],
path=serverpath)
class CheckCrashPlan(Service):
def __init__(self, server_info):
Service.__init__(self, server_info)
self._service_name = 'backups'
self.file_path = self._test_file_path('logfile_path')
self._connect_status = self._test_server_connection()
self._resolved_status_mapping = self._map_connection_status()
def _test_server_connection(self):
items_to_keep = ['scanning', 'backupenabled']
with open(self.file_path, 'r') as log_file:
items = [line.lower().split() for line in log_file.readlines()
for x in items_to_keep if x in line.lower()]
# remove "=" from list
for item in items:
item.remove('=')
items_values = [True if item[1] == 'true' else False for item in items]
if all(items_values):
return 'BackupServerActive'
elif any(items_values):
return 'Waiting'
else:
return False
class ServerSync(Service):
def __init__(self, server_info):
Service.__init__(self, server_info)
self.server_info = server_info
self.lockfile_path = self.server_info.get('lockfile_path', None)
self._service_name = 'server-sync'
self._connect_status = self._test_server_connection()
self._resolved_status_mapping = self._map_connection_status()
def _test_server_connection(self):
try:
return os.path.exists(self.lockfile_path)
except TypeError:
self.logger.debug('Server Sync Lockfile does not exist at {}'.
format(self.lockfile_path))
return False
class Plex(Service):
"""
Note: Plex requires a PlexPass for access to the server API. Plex won't
allow you to connect to API otherwise
Provides media metadata information from Plex
"""
url_scheme = 'http://'
def __init__(self, server_config):
Service.__init__(self, server_config)
assert type(server_config) is dict
self.service_config = server_config
self._service_name = 'plex'
self.server_internal_url_and_port = self._get_full_url_and_port
try:
self._server_full_url = server_config['external_url']
except KeyError as err:
self.logger.error(
'Missing config value {config_value} from {cls}'.format(
config_value='external_url',
cls=self.__class__.__name__))
raise exceptions.MissingConfigValue(err)
self._connect_status = self._test_server_connection()
self._resolved_status_mapping = self._map_connection_status()
self._transcodes = 0
self._cover_mapping = dict()
self._img_base_url = self._build_external_img_path(self._service_name)
def recently_added(self, num_results=None):
"""
:type num_results: int or unknown
:return: dict of [lists]
"""
def process_video_data(videos):
# sort the recently added list by date in descending order
videos = sorted(videos, key=itemgetter('@addedAt'), reverse=True)
# trim the list to the number of results we want
videos_trimmed = videos[:num_results]
return [self._get_video_data(video) for video in videos_trimmed]
if not self._connect_status:
return None
if any([num_results is None, type(num_results) is not int]):
# Check if correct for maximum number of results is entered
# if not set default
num_results = 6
api_call = 'recentlyadded'
json_data = self._get_xml_convert_to_json(api_call)
# the media value we want are contained in lists so loop through the
# MediaContainer, find the lists of data, and return each value in
# the lists. The lists contain Movies and Shows separately.
movies = [media for value in json_data['MediaContainer'] if
type(json_data['MediaContainer'][value]) == list for
media in json_data['MediaContainer'][value] if
media['@type'] != 'season']
tv_shows = [media for value in json_data['MediaContainer'] if
type(json_data['MediaContainer'][value]) == list for
media in json_data['MediaContainer'][value] if
media['@type'] == 'season']
# remove extra data
del json_data
return dict(Movies=process_video_data(movies),
TVShows=process_video_data(tv_shows))
def now_playing(self):
"""
Returns now playing data from Plex server in a JSON-like dictionary
:return: dict()
"""
def generate_video_data(vid_data, api_call=None):
"""
Generator function for creating relevant video data. Takes JSON
data, checks if is data is an OrderedDict
then grabs the relevant data if the video is a TV show or Movie.
"""
# In JSON form Plex returns multiple videos as a list of
# OrderedDicts, and a single video as an OrderedDict
# Convert the single video to a list for processing
if isinstance(vid_data, OrderedDict):
video_list = list()
video_list.append(vid_data)
elif isinstance(vid_data, list):
video_list = vid_data
else:
# Plex returned data that we haven't seen before.
# Raise exception to warn user.
msg = (
'Plex returned API data that does not match to known '
'standards.Plex return data as {} when it should return a '
'list or OrderedDict').format(type(vid_data))
self.logger.error(msg)
raise exceptions.PlexAPIDataError(msg)
for video in video_list:
# Grab relevant data about Video from JSON data, send the API
# call to calculate _transcodes, otherwise it will skip and
# return 0
yield self._get_video_data(video, api_call)
return
self._transcodes = 0 # reset serverinfo count
api_call = 'nowplaying'
now_playing_relevant_data = list()
json_data = self._get_xml_convert_to_json(api_call)
if not int(json_data['MediaContainer']['@size']):
# Nothing is currently playing in plex
return None
for vid in generate_video_data(json_data['MediaContainer']['Video'],
api_call):
now_playing_relevant_data.append(vid)
return now_playing_relevant_data
def get_cover_image(self, plex_id, thumbnail=None, local=None):
"""
Returns binary jpeg object for Plex item found local temp directory as
set in config file. Checks request argument against mapped value from
Plex item ID
:param plex_id: metadata coverart ID that corresponds to mapping
dictionary
:type plex_id: str
:param thumbnail: boolean values that tells us to return thumbnail
image if True. Returns full scale image if False
:type thumbnail: bool or NoneType
:param local: boolean value that tells us to pull image from Plex
server or return local copy
:type local: bool or NoneType
:return: binary
:raises: exceptions.PlexImageError
"""
def open_image(ext):
try:
return open(os.path.join(self._temp_img_dir, plex_id + ext),
'rb')
except IOError as img_err:
raise exceptions.PlexImageError(img_err)
thumbnail = thumbnail is not None
local = local is not None
if self._cover_mapping is None:
# if _cover_mapping is empty we need to initialize Now Playing
self.now_playing()
if thumbnail:
resp = open_image('.thumbnail')
elif local:
resp = open_image('.jpg')
else:
try:
resp = urllib2.urlopen(
urlparse.urljoin(self.server_internal_url_and_port,
self._cover_mapping[plex_id]))
except (TypeError, urllib2.HTTPError) as err:
raise exceptions.PlexImageError(err)
return resp
@property
def transcodes(self):
"""
Returns number of current number of Plex transcode sessions
>>> 0
>>> 1
:return: int
"""
server_info = self.plex_server_info()
self._transcodes = server_info.get('transcoderActiveVideoSessions', 0)
return self._transcodes
def plex_server_info(self):
json_show_data = self._get_xml_convert_to_json('serverinfo')
server_data = json_show_data.get('MediaContainer', None)
data_dict = {str(key.strip('@')): server_data[key] for key in
server_data if type(server_data[key]) is unicode or
type(server_data[key]) is str}
for key in data_dict:
try:
data_dict[key] = int(data_dict[key])
except ValueError:
if ',' in data_dict[key]:
split_values = data_dict[key].split(',')
data_dict[key] = [int(val) for val in split_values]
return data_dict
def _test_server_connection(self):
"""
Test if connection to Plex is active or not
>>> True
>>> False
:return: bool
"""
resp = None
try:
if self.service_config['local_network_auth']:
# local network authentication required
# // TODO Need to complete code for authorization if necessary
pass
except KeyError:
pass
resp = self._get_plex_api_data('serverinfo')
is_connectable = resp is not None
if not is_connectable:
self.logger.error('Could not connect to Plex server')
return is_connectable
def _get_api_url_suffix(self, url_suffix):
"""
https://code.google.com/p/plex-api/wiki/PlexWebAPIOverview
contains information required Plex HTTP APIs
serverinfo: Transcode bitrateinfo, myPlexauthentication info
nowplaying: This will retrieve the "Now Playing" Information of the PMS.
librarysections: Contains all of the sections on the PMS. This acts as
a directory and you are able to "walk" through it.
prefs: Gets the server preferences
servers: get the local List of servers
ondeck: Show ondeck list
channels_all: Returns all channels installed in Plex Server
channels_recentlyviewed: Get listing of recently viewed channels
recentlyadded: Gets listing of recently added media, in descending
order by date added
metadata: Returns metadata from media, e.g. /library/metadata/<val>
when <val> is an integer tied to a specific episode or movie
>>> '/library/recentlyAdded'
:param the_data_were_looking_for:
:return:
"""
url_api_mapping = dict(
serverinfo='/',
nowplaying='/status/sessions',
librarysections='/library/sections',
prefs='/:/prefs',
servers='/servers',
ondeck='/library/onDeck',
channels_all='/channels/all',
recentlyadded='/library/recentlyAdded',
metadata='/library/metadata/'
)
try:
results = url_api_mapping[url_suffix]
except KeyError as err:
self.logger.error(err)
raise exceptions.PlexAPIKeyNotFound(err)
return results
@property
def _get_full_url_and_port(self):
"""
builds out internal url with port
>>> 'http://localhost:32400'
>>> 'http://192.168.0.1:32400'
:return: str
"""
port = str(self.service_config.get('internal_port', '32400'))
if port != self.service_config.get('internal_port') or str(port) != \
self.service_config.get('internal_port'):
self._log_warning_for_missing_config_value(
cls_name=self.__class__.__name__, default=port,
config_val='port')
try:
internal_url = self.service_config['internal_url'].replace(
Plex.url_scheme, '').lstrip('/')
except KeyError:
internal_url = 'localhost'
self._log_warning_for_missing_config_value(
cls_name=self.__class__.__name__,
default=internal_url, config_val='internal_url')
return ''.join([Plex.url_scheme, internal_url, ':', port])
def _get_plex_api_data(self, api_call, api_suffix=None):
"""
Call plex api, and return XML data
For /status/sessions:
>>> '<MediaContainer size="0"></MediaContainer>'
:param api_call:
:return: str
:raises: exceptions.PlexConnectionError
"""
if api_suffix is None:
# no extra api call for this
api_suffix = ''
try:
full_api_call = ''.join(
[self._get_api_url_suffix(api_call), api_suffix])
resp = urllib2.urlopen(
urlparse.urljoin(self.server_internal_url_and_port,
full_api_call))
output = resp.read()
except urllib2.URLError as err:
self.logger.error('Error connecting to Plex')
raise exceptions.PlexConnectionError(err)
else:
resp.close()
return output
def _get_xml_convert_to_json(self, api_key, api_suffix=None):
"""
Gets Plex data based on api key and converts Plex XML response to JSON
format
:type api_key: str
:type api_suffix: unknown or str
:return:
"""
xml_data = self._get_plex_api_data(api_key, api_suffix)
return self._convert_xml_to_json(xml_data)
def _get_video_data(self, video, get_type=None):
is_now_playing = get_type == 'nowplaying'
# need a separate dict for section mapping since Plex returns different
# data for Now Playing and Recently Added
library_section_mapping = {'1': 'Movies', '2': 'TV Shows'}
# need a separate dict for section mapping since Plex returns different
# data for Now Playing and Recently Added
# all the video.gets below are to handle the different mappings
# Plex sends for Now Playing/Recently Added.
vidtype = video.get('@librarySectionTitle',
library_section_mapping.get(
video.get('@librarySectionID', 0)))
if vidtype == 'TV Shows':
video_data = self._get_tv_show_data(video, get_type)
elif vidtype == 'Movies':
release_date = video['@originallyAvailableAt']
video_data = dict(showtitle=video['@title'],
summary=video['@summary'],
releasedate=self.convert_date_fmt(release_date,
'%Y-%m-%d',
'%m/%d/%Y'))
else:
# encountered an unexpected video type
msg = 'Unexpected media type {} encountered'.format(vidtype)
self.logger.error(msg)
raise exceptions.PlexAPIDataError(msg)
if is_now_playing:
# only applicable if we want to retrieve now playing data from Plex
plex_path_to_art = video.get('@grandparentThumb', video['@thumb'])
try:
# this is only relevant for videos that are currently playing
video_data['progress'] = (float(video['@viewOffset']) / float(
video['@duration'])) * 100.0
except KeyError:
# video's not playing - not an issue
video_data['progress'] = 0
# add common elements to video dict
else:
plex_path_to_art = video['@thumb']
self._save_cover_art(self.server_internal_url_and_port +
plex_path_to_art)
arturlmapped_value = os.path.basename(plex_path_to_art)
video_data.update(type=vidtype,
art_external_url=''.join([self._img_base_url,
arturlmapped_value]),
added_at=strftime('%m/%d/%Y %I:%M %p',
localtime(int(video['@addedAt']))))
# converts direct plex http link to thumbnail to internal mapping
# security through obfuscation /s
self._cover_mapping[arturlmapped_value] = plex_path_to_art
video_data['rating'] = float(video.get('@rating', 0))
return video_data
def _save_cover_art(self, cover_loc):
# retrieve image data from Plex server metadata
img_data = StringIO(urllib2.urlopen(
urlparse.urljoin(self.server_internal_url_and_port,
cover_loc)).read())
# check if temp directory exists, if not attempt to create directory
if not os.path.exists(self._temp_img_dir):
try:
os.mkdir(self._temp_img_dir)
self.logger.info('Creating temporary image directory {}'.
format(self._temp_img_dir))
except OSError as err:
self.logger.error(('Failure creating temporary image directory'
' {}.\nError message {}').format(
self._temp_img_dir, err))
raise
img = Image.open(img_data)
exts = ('.jpg', '.thumbnail')
sizes = [(568, 852), (144, 214)]
# create filepaths to temp images in temp directory
img_filepaths = [os.path.join(self._temp_img_dir, ''.join(
[str(cover_loc.split('/')[-1]), ext])) for ext in exts]
# index 0 = size tuple
# index 1 = path to file
size_and_fps = zip(sizes, img_filepaths)
for img_file in size_and_fps:
# preserve original file for multiple manipulations
temp_img = img.copy()
size = img_file[0]
filepath = img_file[1]
if not os.path.exists(filepath):
# create plex cover art file if file does not exist
try:
temp_img = ImageOps.fit(image=temp_img, size=size,
method=Image.ANTIALIAS)
temp_img.save(filepath, "JPEG")
self.logger.info(
'Write image file: {}'.format(filepath))
except IOError as pil_err:
self.logger.error(
'Image file write failure at {}. Reason: {}'.
format(filepath, pil_err))
else:
self.logger.debug('Image file already exists at: {}'.
format(filepath))
return img_filepaths[0]
def _get_tv_show_data(self, video, get_type=None):
is_now_playing = get_type == 'nowplaying'
video_data = dict(showtitle=
video.get('@parentTitle',
video.get('@grandparentTitle')),
episode_number=int(video.get('@leafCount',
video.get('@index'))),
summary=video.get('@parentSummary',
video.get('@summary'))
if video['@summary'] != '' else 'Not available',
season=video['@title'] if
video['@title'].lower() == 'specials'
else int(video['@title'].lstrip('Season ')) if not
is_now_playing else int(video['@parentIndex']))
if isinstance(video_data['season'], int):
video_data['season'] = '{0:02d}'.format(video_data['season'])
if not is_now_playing:
json_show_data = self._get_xml_convert_to_json('serverinfo',
video['@key'].
lstrip('/'))
video = json_show_data['MediaContainer']
video_data.update(rating=video.get('@grandparentContentRating', ''),
studio=video['@grandparentStudio'])
try:
# if there's more than one episode in the season
video = video['Video'][
int(video_data['episode_number']) - 1]
except KeyError:
# first show in season
video = video['Video']
# get originally date playing on TV
try:
aired_date = video['@originallyAvailableAt']
aired_date = self.convert_date_fmt(aired_date, "%Y-%m-%d",
"%m/%d/%Y")
except KeyError:
aired_date = 'Not available'
video_data.update(title=video['@title'], aired_date=aired_date)
# Set individual show summary to parent summary if show summary does
# not exist
if video['@summary'] != '':
video_data['summary'] = video['@summary']
return video_data
|
{"/serverstatus/views.py": ["/serverstatus/__init__.py"], "/serverstatus/assets/apifunctions.py": ["/serverstatus/assets/weather.py", "/serverstatus/assets/services.py", "/serverstatus/assets/sysinfo.py", "/serverstatus/assets/wrappers.py"], "/serverstatus/assets/weather.py": ["/serverstatus/assets/exceptions.py"]}
|
8,553
|
beallio/media-server-status
|
refs/heads/master
|
/setup.py
|
#!/usr/bin/env python
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme_file = 'README.md'
readme_file_full_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), readme_file)
with open(readme_file_full_path, 'r') as f:
readme_contents = f.read()
if not readme_contents:
readme_contents = ''
setup(name='server-status',
version='0.0.1',
author='David Beall',
author_email='david@beallio.com',
url='http://ww.beallio.com',
description='Server Status',
long_description='{}'.format(readme_contents),
packages=['serverstatus'],
package_dir={'serverstatus': 'serverstatus'},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: System Administrators',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries',
'Topic :: System',
])
|
{"/serverstatus/views.py": ["/serverstatus/__init__.py"], "/serverstatus/assets/apifunctions.py": ["/serverstatus/assets/weather.py", "/serverstatus/assets/services.py", "/serverstatus/assets/sysinfo.py", "/serverstatus/assets/wrappers.py"], "/serverstatus/assets/weather.py": ["/serverstatus/assets/exceptions.py"]}
|
8,554
|
beallio/media-server-status
|
refs/heads/master
|
/serverstatus/assets/wrappers.py
|
"""
function wrappers module
"""
import logging
from inspect import stack, getmodule
def logger(log_type):
"""
decorator to log output of functions
:param log_type: logger level as string (debug, warn, info, etc)
:type log_type: str
"""
def log_decorator(func):
"""
wrapped function
"""
def wrapped(*args, **kwargs):
# preserve calling module name for LOGGER
frm = stack()[1]
mod = getmodule(frm[0])
wrapped_logger = logging.getLogger(mod.__name__)
result = func(*args, **kwargs)
try:
getattr(wrapped_logger, log_type)(result)
except AttributeError as err:
wrapped_logger.error(err)
return result
return wrapped
return log_decorator
def log_args(function):
"""
Logs arguments passed to function
"""
def wrapper(*args, **kwargs):
print 'Arguments:', args, kwargs
return function(*args, **kwargs)
return wrapper
|
{"/serverstatus/views.py": ["/serverstatus/__init__.py"], "/serverstatus/assets/apifunctions.py": ["/serverstatus/assets/weather.py", "/serverstatus/assets/services.py", "/serverstatus/assets/sysinfo.py", "/serverstatus/assets/wrappers.py"], "/serverstatus/assets/weather.py": ["/serverstatus/assets/exceptions.py"]}
|
8,555
|
beallio/media-server-status
|
refs/heads/master
|
/serverstatus/assets/weather.py
|
# coding=utf-8
from collections import namedtuple
from time import localtime, strftime
import logging
import forecastio
from serverstatus.assets.exceptions import MissingForecastIOKey
LOGGER = logging.getLogger(__name__)
class Forecast(object):
def __init__(self, weather_config):
assert type(weather_config) is dict
self.logger = LOGGER
self.logger.debug(
'{} class initialized'.format(self.__class__.__name__))
self.forcastio_link_url = 'http://forecast.io/#/f/'
try:
self.api_key = weather_config['Forecast_io_API_key']
except KeyError:
raise MissingForecastIOKey('No ForecastIO API key found. API key required for weather data')
# default weather to Stanford, CA and US units
self.lat = weather_config.get('Latitude', 37.4225)
self.lng = weather_config.get('Longitude', 122.1653)
self.units = weather_config.get('units', 'us')
self.forecast = self._get_forecast_io()
def get_data(self):
json = self.forecast.json
current = json['currently']
hourly = json['hourly']
minutely = json['minutely']
daily = json['daily']['data'][0]
output = dict(current_summary=current['summary'],
current_summary_icon=self._get_weather_icons(current['icon']),
current_temp=u'{:0.0f}°'.format(round(current['temperature'], 0)),
feels_like_temp=u'{:0.0f}°'.format(round(current['apparentTemperature'], 0)),
current_windspeed='{:0.0f}'.format(round(current['windSpeed'], 0)),
minutely_summary=minutely['summary'],
hourly_summary=hourly['summary'],
sunset=self._convert_time_to_text(daily['sunsetTime']),
sunrise=self._convert_time_to_text(daily['sunriseTime']),
url_link='{url}{lat},{lng}'.format(
url=self.forcastio_link_url,
lat=self.lat, lng=self.lng))
if output['current_windspeed'] != 0:
output['current_windbearing'] = self._get_wind_bearing_text(current['windBearing'])
return output
def reload_data(self):
self.forecast.update()
def _get_forecast_io(self):
return forecastio.load_forecast(self.api_key, self.lat, self.lng,
units=self.units)
@staticmethod
def _get_weather_icons(weather_icon):
assert type(weather_icon) is unicode
weather_icon = weather_icon.replace("-", "_")
weather_mappings = dict(clear_day='B',
clear_night='C',
rain='R',
snow='W',
sleet='X',
wind='F',
fog='L',
cloudy='N',
partly_cloudy_day='H',
partly_cloudy_night='I')
assert weather_icon in weather_mappings
return weather_mappings[weather_icon]
@staticmethod
def _get_wind_bearing_text(degrees):
# normalize windbearing so N starts at 0 degrees
deg_norm = (float(degrees) + 11.25) / 22.5
# convert range of windbearing degrees to lookup patterns
deg_norm_lookup = int(deg_norm) + int((deg_norm // 1) > 0)
direction_mappings = {1: ('North', 'N'),
2: ('North-northeast', 'NNE'),
3: ('Northeast', 'NE'),
4: ('East-northeast', 'ENE'),
5: ('East', 'E'),
6: ('East-southeast', 'ESE'),
7: ('Southeast', 'SE'),
8: ('South-southeast', 'SSE'),
9: ('South', 'S'),
10: ('South-southwest', 'SSW'),
11: ('Southwest', 'SW'),
12: ('West-southwest', 'WSW'),
13: ('West', 'W'),
14: ('West-northwest', 'WNW'),
15: ('Northwest', 'NW'),
16: ('North-northwest', 'NNW')}
try:
bearing_text = direction_mappings[int(deg_norm_lookup)]
except KeyError:
# Key values exceeds max in dictionary, which means it's blowing North
bearing_text = direction_mappings[1]
# output namedtuple for Cardinal direction, and abbrevation text
return namedtuple(typename='bearing_text', field_names=['cardinal', 'abbrev'])._make(bearing_text)
@staticmethod
def _convert_time_to_text(time_var):
assert type(time_var) is int
time_var = strftime('%I:%M %p', localtime(time_var))
# Remove '0' values from time if less than 10hrs or 10mins
if time_var.startswith('0'):
time_var = time_var[1:]
return time_var
|
{"/serverstatus/views.py": ["/serverstatus/__init__.py"], "/serverstatus/assets/apifunctions.py": ["/serverstatus/assets/weather.py", "/serverstatus/assets/services.py", "/serverstatus/assets/sysinfo.py", "/serverstatus/assets/wrappers.py"], "/serverstatus/assets/weather.py": ["/serverstatus/assets/exceptions.py"]}
|
8,556
|
beallio/media-server-status
|
refs/heads/master
|
/wsgi.py
|
"""
APACHE MOD_WSGI Load script
Some of the variables in this file may need to be adjusted depending on
server setup and/or location of virtual environment and application
"""
import sys
import os
PROJECT_DIR = '/var/www/status' # change to the root of your app
# 'venv/bin' is the location of the project's virtual environment
VIRTUAL_ENV_DIR = 'venv/bin'
PACKAGES = 'lib/python2.7/site-packages'
activate_this = os.path.join(PROJECT_DIR, VIRTUAL_ENV_DIR, 'activate_this.py')
execfile(activate_this, dict(__file__=activate_this))
sys.path.append(PROJECT_DIR)
sys.path.append(os.path.join(PROJECT_DIR, VIRTUAL_ENV_DIR, PACKAGES))
|
{"/serverstatus/views.py": ["/serverstatus/__init__.py"], "/serverstatus/assets/apifunctions.py": ["/serverstatus/assets/weather.py", "/serverstatus/assets/services.py", "/serverstatus/assets/sysinfo.py", "/serverstatus/assets/wrappers.py"], "/serverstatus/assets/weather.py": ["/serverstatus/assets/exceptions.py"]}
|
8,557
|
mbijou/car-repair-shop-backend
|
refs/heads/master
|
/company/serializers/__init__.py
|
from rest_framework import serializers
from company.models import Company
from django.db.transaction import atomic
class CompanySerializer(serializers.ModelSerializer):
class Meta:
model = Company
fields = ("name",)
def save(self, **kwargs):
Company.save_company()
return super().save(**kwargs)
@atomic
def create(self, validated_data):
company = Company.get_company()
company.__dict__.update(validated_data)
company.save()
return company
|
{"/company/serializers/__init__.py": ["/company/models.py"], "/company/viewsets/__init__.py": ["/company/models.py", "/company/serializers/__init__.py"], "/company/urls.py": ["/company/viewsets/__init__.py"], "/company/admin.py": ["/company/models.py"]}
|
8,558
|
mbijou/car-repair-shop-backend
|
refs/heads/master
|
/company/viewsets/__init__.py
|
from rest_framework import views
from rest_framework.mixins import ListModelMixin
from rest_framework.viewsets import ModelViewSet
from rest_framework.response import Response
from company.models import Company
from company.serializers import CompanySerializer
from django.db.transaction import atomic
class CompanyViewSet(ModelViewSet):
serializer_class = CompanySerializer
queryset = Company.objects.all()
# TODO Allow only one company to be created
def list(self, request, *args, **kwargs):
company_instance = Company.get_company()
if company_instance is None:
return super().list(request, *args, **kwargs)
else:
serializer = self.serializer_class(company_instance)
return Response(serializer.data)
|
{"/company/serializers/__init__.py": ["/company/models.py"], "/company/viewsets/__init__.py": ["/company/models.py", "/company/serializers/__init__.py"], "/company/urls.py": ["/company/viewsets/__init__.py"], "/company/admin.py": ["/company/models.py"]}
|
8,559
|
mbijou/car-repair-shop-backend
|
refs/heads/master
|
/company/urls.py
|
from django.urls import path
from company.viewsets import CompanyViewSet
from rest_framework import routers
router = routers.SimpleRouter()
router.register(r'company', CompanyViewSet)
urlpatterns = router.urls
|
{"/company/serializers/__init__.py": ["/company/models.py"], "/company/viewsets/__init__.py": ["/company/models.py", "/company/serializers/__init__.py"], "/company/urls.py": ["/company/viewsets/__init__.py"], "/company/admin.py": ["/company/models.py"]}
|
8,560
|
mbijou/car-repair-shop-backend
|
refs/heads/master
|
/company/admin.py
|
from django.contrib import admin
# Register your models here.
from company.models import Company
admin.register(Company)
|
{"/company/serializers/__init__.py": ["/company/models.py"], "/company/viewsets/__init__.py": ["/company/models.py", "/company/serializers/__init__.py"], "/company/urls.py": ["/company/viewsets/__init__.py"], "/company/admin.py": ["/company/models.py"]}
|
8,561
|
mbijou/car-repair-shop-backend
|
refs/heads/master
|
/company/models.py
|
from django.db import models
from django.core.exceptions import ValidationError
# Create your models here.
class Company(models.Model):
name = models.CharField(max_length=200)
def save(self, *args, **kwargs):
self.pk = 1
return super().save(*args, **kwargs)
@classmethod
def save_company(cls):
obj, created = cls.objects.get_or_create(pk=1)
return obj
@classmethod
def get_company(cls):
try:
obj = cls.objects.get(pk=1)
return obj
except cls.DoesNotExist:
return
|
{"/company/serializers/__init__.py": ["/company/models.py"], "/company/viewsets/__init__.py": ["/company/models.py", "/company/serializers/__init__.py"], "/company/urls.py": ["/company/viewsets/__init__.py"], "/company/admin.py": ["/company/models.py"]}
|
8,564
|
rikenshah/Well-thy
|
refs/heads/master
|
/pyScripts/analysis.py
|
import pandas as pd
datapath = "../datasets/merged.csv"
df = pd.read_csv(datapath)
|
{"/health/views.py": ["/health/models.py"]}
|
8,565
|
rikenshah/Well-thy
|
refs/heads/master
|
/pyScripts/gov.py
|
# This is a magic script that transforms two datasets into one smartly by comparison of resonse parameters
import pandas as pd
datapath = "../datasets/healthcaregov/data.csv"
datapath2 = "../datasets/prudentialLifeInsurance/train.csv"
savepath1 = "../datasets/merged1.csv"
df = pd.read_csv(datapath)
df2 = df[df["Individual Tobacco Rate"].notnull()]
individual_rate = df2["Individual Rate"]
# normalized_individual_rate = ((individual_rate-individual_rate.mean())/individual_rate.std())*4+4
# normalized_individual_rate = (individual_rate-individual_rate.min())/(individual_rate.max()-individual_rate.min())
individual_tobacco_rate = df2["Individual Tobacco Rate"]
# normalized_individual_tobacco_rate = ((individual_tobacco_rate-individual_tobacco_rate.mean())/individual_tobacco_rate.std())*4+4
# here multiplying by 8 does not give a good range
rate_diff = (individual_tobacco_rate-individual_rate)*16/individual_rate
df2["rate_diff"] = pd.Series(rate_diff)
response = 1
mapping_dict = {}
def init_map(df2,i):
if i == 0:
mapping_dict[0] = df2.loc[(df2.rate_diff < 0.5)].iterrows()
elif i == 1:
mapping_dict[1] = df2.loc[(df2.rate_diff < 1) & (df2.rate_diff >0.5)].iterrows()
elif i in range(2,8):
mapping_dict[i] = df2.loc[(df2.rate_diff < i) & (df2.rate_diff >(i-1))].iterrows()
elif i == 8:
mapping_dict[8] = df2.loc[(df2.rate_diff >7)].iterrows()
else:
return
for i in range(9):
init_map(df2,i)
## Loading second dataset
df3 = pd.read_csv(datapath2)
df3 = df3[df3.Response.notnull()]
for i, row in df3.iterrows():
try:
new_tuple = next(mapping_dict[row.Response])
except:
init_map(df2,row.Response)
for key,value in new_tuple[1].iteritems():
df3.set_value(i,key,value)
df3.to_csv(savepath1)
|
{"/health/views.py": ["/health/models.py"]}
|
8,566
|
rikenshah/Well-thy
|
refs/heads/master
|
/health/models.py
|
from django.db import models
from django.core.validators import MaxValueValidator, MinValueValidator
from django.contrib.auth.models import User
class HealthProfile(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
age = models.IntegerField(validators=[MaxValueValidator(100), MinValueValidator(0)],null=True,blank=True,help_text="Enter age :")
height = models.FloatField(validators=[MaxValueValidator(300), MinValueValidator(20)],null=True,blank=True,help_text="Enter height (In Inches) :")
weight = models.FloatField(validators=[MaxValueValidator(300), MinValueValidator(20)],null=True,blank=True,help_text="Enter weight (In Lbs) :")
ailments = models.TextField(max_length=1000, null=True, blank=True, help_text='Enter comma separated list of pre-existing ailments :')
tobacco = models.BooleanField(help_text="Do you consume tobacco?", default=False)
smoke = models.BooleanField(help_text="Do you consume smoke?", default=False)
drink = models.BooleanField(help_text="Do you consume drink?", default=False)
healthcare_costs = models.FloatField(validators=[MaxValueValidator(50000), MinValueValidator(0)],null=True,blank=True,help_text="Enter your total healthcare costs (per year)")
POSS_EXERCISE = (
(2, '>15 hours/week'),
(1, '6-15 hours/week'),
(0, '<6 hours/week'),
)
exercise = models.IntegerField(choices=POSS_EXERCISE, default=1, help_text='Select how much do you exercise?')
POSS_TRAVEL = (
(2, '>10 hours/week'),
(1, '5-10 hours/week'),
(0, '<5 hours/week'),
)
travel_time = models.IntegerField(choices=POSS_TRAVEL, default=1, help_text='Select how much do you travel?')
POSS_SLEEP = (
(2, '>8 hours/day'),
(1, '6-8 hours/day'),
(0, '<6 hours/day'),
)
sleep_time = models.IntegerField(choices=POSS_SLEEP, default=POSS_SLEEP[1], help_text='Select how much do you sleep?')
job_type = models.TextField(max_length=1000, null=True, blank=True, help_text='Enter your job description :')
def __str__(self):
"""
String for representing the Model object (in Admin site etc.)
"""
return self.user.first_name
|
{"/health/views.py": ["/health/models.py"]}
|
8,567
|
rikenshah/Well-thy
|
refs/heads/master
|
/health/migrations/0005_auto_20180426_1550.py
|
# Generated by Django 2.0.1 on 2018-04-26 15:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('health', '0004_auto_20180426_1547'),
]
operations = [
migrations.AlterField(
model_name='healthprofile',
name='sleep_time',
field=models.IntegerField(choices=[(2, '>8 hours/day'), (1, '6-8 hours/day'), (0, '<6 hours/day')], default=(1, '6-8 hours/day'), help_text='Select how much do you sleep?'),
),
]
|
{"/health/views.py": ["/health/models.py"]}
|
8,568
|
rikenshah/Well-thy
|
refs/heads/master
|
/health/migrations/0001_initial.py
|
# Generated by Django 2.0.1 on 2018-04-10 16:31
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='HealthProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('age', models.IntegerField(blank=True, help_text='Enter age :', null=True, validators=[django.core.validators.MaxValueValidator(100), django.core.validators.MinValueValidator(0)])),
('height', models.FloatField(blank=True, help_text='Enter height (In Centimeter) :', null=True, validators=[django.core.validators.MaxValueValidator(300), django.core.validators.MinValueValidator(20)])),
('weight', models.FloatField(blank=True, help_text='Enter weight (In Lbs) :', null=True, validators=[django.core.validators.MaxValueValidator(300), django.core.validators.MinValueValidator(20)])),
('ailments', models.TextField(blank=True, help_text='Enter comma separated list of pre-existing ailments :', max_length=1000, null=True)),
('tobacco', models.BooleanField(default=False, help_text='Do you consume tobacco?')),
('smoke', models.BooleanField(default=False, help_text='Do you consume smoke?')),
('drink', models.BooleanField(default=False, help_text='Do you consume drink?')),
('exercise', models.IntegerField(blank=True, choices=[(2, '>15 hours/week'), (1, '6-15 hours/week'), (0, '<6 hours/week')], default=1, help_text='Select how much do you exercise?')),
('travel_time', models.IntegerField(blank=True, choices=[(2, '>10 hours/week'), (1, '5-10 hours/week'), (0, '<5 hours/week')], default=1, help_text='Select how much do you travel?')),
('sleep_time', models.IntegerField(blank=True, choices=[(2, '>8 hours/day'), (1, '6-8 hours/day'), (0, '<6 hours/day')], default=1, help_text='Select how much do you sleep?')),
('job_type', models.TextField(blank=True, help_text='Enter your job description :', max_length=1000, null=True)),
('user', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
{"/health/views.py": ["/health/models.py"]}
|
8,569
|
rikenshah/Well-thy
|
refs/heads/master
|
/health/migrations/0006_auto_20180426_1551.py
|
# Generated by Django 2.0.1 on 2018-04-26 15:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('health', '0005_auto_20180426_1550'),
]
operations = [
migrations.AlterField(
model_name='healthprofile',
name='exercise',
field=models.IntegerField(choices=[(2, '>15 hours/week'), (1, '6-15 hours/week'), (0, '<6 hours/week')], default=1, help_text='Select how much do you exercise?'),
),
migrations.AlterField(
model_name='healthprofile',
name='travel_time',
field=models.IntegerField(choices=[(2, '>10 hours/week'), (1, '5-10 hours/week'), (0, '<5 hours/week')], default=1, help_text='Select how much do you travel?'),
),
]
|
{"/health/views.py": ["/health/models.py"]}
|
8,570
|
rikenshah/Well-thy
|
refs/heads/master
|
/health/migrations/0003_auto_20180426_1527.py
|
# Generated by Django 2.0.1 on 2018-04-26 15:27
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('health', '0002_auto_20180414_0114'),
]
operations = [
migrations.AlterField(
model_name='healthprofile',
name='healthcare_costs',
field=models.FloatField(blank=True, help_text='Enter your total healthcare costs (per year)', null=True, validators=[django.core.validators.MaxValueValidator(50000), django.core.validators.MinValueValidator(0)]),
),
]
|
{"/health/views.py": ["/health/models.py"]}
|
8,571
|
rikenshah/Well-thy
|
refs/heads/master
|
/pyScripts/get_recommendations.py
|
'''
Generates recommendation for the user based on
bmi, smoking, tobacco usage, alcohol consumption, exercise
travel time, sleep time, job type.
'''
import csv, re
featureWeights_dict={}
healthy_bmi = 0
moderate_travel = 1
excess_travel = 2
low_sleep = 0
moderate_sleep = 1
no_exercise = 0
moderate_exercise = 1
optimal_exercise = 2
def preprocessData(data):
# print("Recoomendation preprocess")
# print(data)
data["exercise"] = [data["exercise"],3]
data["travel_time"] = [data["travel_time"],3]
data["sleep_time"] = [data["sleep_time"],3]
data["drink"] = [1 if data["drink"] else 0,2]
data["tobacco"] = [1 if data["tobacco"] else 0,2]
data["smoke"] = [1 if data["smoke"] else 0,2]
"""Bag of words to identify past ailments and dangerous job types"""
ailments=set(['heart','brain','kidney','liver','breating','asthema'])
job_type=set(['army','defence','factory'])
#pattern = re.compile("\s+|^\s+|\s*,*\s*|\s+$")
pattern = re.compile("\s+,*\s*")
current_ailments = set([ x for x in pattern.split(data["ailments"]) if x])
current_jobtype = set([ x for x in pattern.split(data["job_type"]) if x])
data["ailments"] = [1 if current_ailments.intersection(ailments) else 0,2]
data["job_type"] = [1 if current_jobtype.intersection(job_type) else 0,2]
"""Identifying Healthy BMI & Age range"""
data["age"]=[0 if data["age"]>18 and data["age"]<45 else 1,2]
data["bmi"]=data["weight"]/(data["height"]*data["height"])*703
data["bmi"]=[0 if data["bmi"]>18.5 and data["bmi"]<24.9 else 1,2]
# print("preprocess",data)
return data
def initialize_feature_weights():
reader = csv.reader(open('pyScripts/feature_weights.csv'))
for row in reader:
value=[]
split_row= row[0].split('\t')
key=split_row[0]
value=split_row[1:]
featureWeights_dict[key]=value
# print(featureWeights_dict)
return featureWeights_dict
#Calculates the number of points healthscore will improve, rounded to 2 decimals
def getPointsForImprovement(current,levels, weight, maxHealthScore):
return (round((float(weight) * maxHealthScore * (current/levels)) , 2))
def getBmiRec(bmi_data):
if bmi_number != healthy_bmi:
return ("If you get your bmi (body-mass-index) in the healthly range "
"(18.5 - 24. 9) your healthscore will improve 100 points.")
return None
def getDrinkRec(drinks):
if drinks: #drinks alcohol
return ("If you stop drinking alcohol your healthscore will improve by "
" 50 points.")
return None
def getExerciseRec(exercise):
if exercise == no_exercise:
return ("If start exercising 6 hours a week your healthscore will improve "
" 17 points.")
elif exercise == moderate_exercise:
return ("If you exercise more than 15 hours a week "
" your healthscore will improve 17 points.")
return None
def getSmokeRec(smokes):
if smokes:
return ("If you quit smoking your healthscore will improve 50 points.")
return None
def getTobaccoRec(uses_tobacco):
if uses_tobacco:
return ("If you stop using tobacco your healthscore will improve 50 points.")
return None
def getTravelRec(travel_time):
if travel_time == excess_travel:
return ("If you reduce your travel_time to under 10 hours "
"your healthscore will improve 17 points.")
elif travel_time == moderate_travel:
return ("If you reduce your travel_time to under 5 hours "
"your healthscore will improve 17 points.")
return None
def getSleepRec(sleep):
if sleep == low_sleep:
return ("If you increase sleep to more than 6 hours a day "
"your healthscore will improve 17 points.")
elif sleep == moderate_sleep:
return ("If you increase your sleep to more than 8 hours a day "
"your healthscore will improve 17 points.")
return None
#Calculates improvement for a key
def getRecommendationPointsForKey(data, featureWeight, maxHealthScore):
if featureWeight[1] == 'negative':
return getNegativeRecommendation(data, featureWeight[0], maxHealthScore)
return getPositiveRecommendation(data, featureWeight[0], maxHealthScore)
#Calculates improvement for a key that has a negative relationship
def getNegativeRecommendation(data, weight, maxHealthScore):
if data[0] == 0:
return None
return getPointsForImprovement(data[0],data[1], weight, maxHealthScore)
#Calculates improvement for a key that has a positive relationship
def getPositiveRecommendation(data, weight, maxHealthScore):
if data[0] != 2:
print("New method")
print(type(weight))
print(weight)
print(data)
# return getPointsForImprovement(data[1], weight, maxHealthScore)
return float(weight)*((data[1]-data[0]-1)/data[1])*maxHealthScore
return None
def initializeStrDic():
return{"smoke" : ["", "stop smoking"], "exercise" : ["increase your exercise to atleast 6 hours a week", "increase your exercise to more than 15 hours a week"], "sleep_time": ["increase the amount you sleep to atleast 6 hours a day","increase the amount you sleep to above 8 hours a day"], "bmi": ["", "get your bmi in the healthy range (18.5 - 24 .9)"],"drink": ["", "stop drinking"], "tobacco": ["", "stop using tobacco"], "travel_time" : ["", "reduce the travel time to less than 5 hours","reduce the travel to less than 10 hours"]}
def processRecommendations(data, maxHealthScore):
'''
recs = {}
recs["bmi"] = getBmiRec(data["bmi"], featureWeights["bmi"], maxHealthScore)
recs["drink"] = getDrinkRec(data["drink"][0], featureWeights["drink"], maxHealthScore)
recs["exercise"] = getExerciseRec(data["exercise"][0], featureWeights["exercise"], maxHealthScore)
recs["smoke"] = getSmokeRec(data["smoke"][0], featureWeights["smoke"], maxHealthScore)
recs["tobacco"] = getTobaccoRec(data["tobacco"][0], featureWeights["tobacco"], maxHealthScore)
recs["travel_time"] = getTravelRec(data["travel_time"][0],
featureWeights["travel_time"], maxHealthScore)
recs["sleep_time"] = getSleepRec(data["sleep_time"][0], featureWeights["sleep_time"], maxHealthScore )
'''
print("processRecommendations")
data = preprocessData(data)
print(data)
all_recommendations = []
print("end")
featureWeights = initialize_feature_weights()
points = 0.0
resultStrings = []
recStrDic = initializeStrDic()
print("recStrDict : ",recStrDic)
for key in ["exercise","sleep_time","drink","tobacco","smoke","bmi","travel_time"]:
result = getRecommendationPointsForKey(data[key], featureWeights[key], maxHealthScore)
if result is not None:
points += result
print("Result is ",result);
resultStrings.append(recStrDic[key][data[key][0]])
all_recommendations.append(getRecommendationString([recStrDic[key][data[key][0]]],result))
all_recommendations.append(getRecommendationString(resultStrings, points))
# for key in ["exercise","sleep_time","drink","tobacco","smoke","bmi","travel_time"]:
# all_recommendations.append(getRecommendationString([recStrDic[key][data[key][0]]],getRecommendationPointsForKey(data[key], featureWeights[key], maxHealthScore)))
all_recommendations = [all_recommendations[-1]]+all_recommendations[0:len(all_recommendations)-1]
return all_recommendations,round(((points/maxHealthScore)*data["healthcare_costs"]),2)
def getRecommendationString(resultStrings, points):
recommendationString = "If you "
resultStringsLength = len(resultStrings)
if resultStringsLength == 0:
return ["You are in good shape."]
for index in (range(resultStringsLength - 1)):
recommendationString += (resultStrings[index] + ", ")
if len(resultStrings) == 1:
recommendationString += (resultStrings[resultStringsLength -1] + " your healthscore will improve by " + str(round(points, 2)) + " points.")
else:
recommendationString += ("and " + resultStrings[resultStringsLength -1] + " your healthscore will improve by " + str(round(points, 2)) + " points.")
return recommendationString
if __name__ == "__main__":
data = {}
data["exercise"] = [0,3]
data["travel_time"] = [0,3]
data["sleep_time"] = [0,3]
data["drink"] = [1,2]
data["tobacco"] = [1,2]
data["smoke"] = [1,2]
data["bmi"] = [1,2]
featureWeights = {'age': ['0.1', 'negative'], 'bmi': ['0.2', 'negative'],
'ailments': ['0.2', 'negative'], 'tobacco': ['0.1', 'negative'], 'smoke': ['0.1', 'negative'],
'drink': ['0.1', 'negative'], 'exercise': ['0.05', 'positive'], 'travel_time': ['0.05', 'negative'],
'sleep_time': ['0.05', 'positive'], 'job_type': ['0.05', 'negative']}
print(processRecommendations(data, 1000))
|
{"/health/views.py": ["/health/models.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.