index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
12,828
|
sanneabhilash/python_learning
|
refs/heads/master
|
/Concepts_with_examples/AccessModifiers/protected.py
|
# if you want make a method or variable private, use _ before the name
# PROTECTED : protected member is (in C++ and Java) accessible only from within the class and it’s subclasses
class Jar:
def __init__(self):
# protected variable prefixed with _
self._content = None
def fill(self, content):
self._content = content
def empty(self):
print('empty the jar...')
self._content = None
myJar = Jar()
myJar.fill('sugar')
# If you try accessing _content from outside, you'll get an error
print(myJar._content)
|
{"/PracticePrograms/modules_import_weather_data/circus.py": ["/PracticePrograms/modules_import_weather_data/weather.py"], "/Concepts_with_examples/modules_and_packages/importSinglePackage.py": ["/Concepts_with_examples/modules_and_packages/carsPackage/Audi.py"]}
|
12,829
|
sanneabhilash/python_learning
|
refs/heads/master
|
/Concepts_with_examples/modules_and_packages/carsPackage/bmw.py
|
class Bmw:
def __init__(self):
self.models = ['320d', '330d', 'bikes']
def out_models(self):
print("Existing models are: ")
for model in self.models:
print(model)
|
{"/PracticePrograms/modules_import_weather_data/circus.py": ["/PracticePrograms/modules_import_weather_data/weather.py"], "/Concepts_with_examples/modules_and_packages/importSinglePackage.py": ["/Concepts_with_examples/modules_and_packages/carsPackage/Audi.py"]}
|
12,830
|
sanneabhilash/python_learning
|
refs/heads/master
|
/PracticePrograms/numberGuessingGame.py
|
import random
def guessing_game():
num = random.randint(1, 10)
guess = int(input('Guess a number between 1 and 10'))
times = 1
while guess != num:
guess = int(input('Guess again'))
times += 1
if times == 3:
break
if guess == num:
print('You win!')
else:
print('You lose! The number was', num)
def lotto_numbers():
lotto_nums = []
for i in range(5):
lotto_nums.append(random.randint(1, 53))
return lotto_nums
def main():
answer = input(
'Do you want to get lottery numbers (1) or play the game (2) or quit (Q)?')
if (answer == '1'):
numbers = lotto_numbers()
print(numbers)
elif (answer == '2'):
guessing_game()
else:
print('Toodles!')
main()
|
{"/PracticePrograms/modules_import_weather_data/circus.py": ["/PracticePrograms/modules_import_weather_data/weather.py"], "/Concepts_with_examples/modules_and_packages/importSinglePackage.py": ["/Concepts_with_examples/modules_and_packages/carsPackage/Audi.py"]}
|
12,831
|
sanneabhilash/python_learning
|
refs/heads/master
|
/PracticePrograms/modules_import_weather_data/weather.py
|
import requests
def current_weather():
url = "https://samples.openweathermap.org/data/2.5/weather?q=London,uk&appid=f2e9b3d28adf99c7d56b98d9044e6173"
r = requests.get(url)
print(r)
weather_json = r.json()
print(weather_json)
min = weather_json['main']['temp_min']
max = weather_json['main']['temp_max']
print("The circus' forecast is", min , "as the low and", max, "as the high")
|
{"/PracticePrograms/modules_import_weather_data/circus.py": ["/PracticePrograms/modules_import_weather_data/weather.py"], "/Concepts_with_examples/modules_and_packages/importSinglePackage.py": ["/Concepts_with_examples/modules_and_packages/carsPackage/Audi.py"]}
|
12,832
|
sanneabhilash/python_learning
|
refs/heads/master
|
/Concepts_with_examples/datatype_conversions.py
|
# Dynamic Type Casting
print('--------Dynamic type casting----------')
i=100
j='Hello World'
print(j)
print(type(j))
j=99.5
print(j)
print(type(j))
j=i
print(j)
print(type(j))
# Static type casting
print('-----------Static Type Conversions--------')
num=100
dec=5.6
word='Hello'
print(num, dec, word)
print(type(num), type(dec), type(word))
dec = int(333.33)
word = float(22)
num = str('example')
print(type(num), type(dec), type(word))
print(num, dec, word)
|
{"/PracticePrograms/modules_import_weather_data/circus.py": ["/PracticePrograms/modules_import_weather_data/weather.py"], "/Concepts_with_examples/modules_and_packages/importSinglePackage.py": ["/Concepts_with_examples/modules_and_packages/carsPackage/Audi.py"]}
|
12,833
|
sanneabhilash/python_learning
|
refs/heads/master
|
/Concepts_with_examples/yieldReturn.py
|
# def colors():
# yield "red"
# yield "blue"
# yield "yellow"
#
# next_color = colors()
# print(type(next_color)) # <class 'generator'>
#
# print(next(next_color))
# print(next(next_color))
# print(next(next_color))
def something():
for i in range(1, 10):
yield i
next_number = something()
print(next(next_number))
print(next(next_number))
|
{"/PracticePrograms/modules_import_weather_data/circus.py": ["/PracticePrograms/modules_import_weather_data/weather.py"], "/Concepts_with_examples/modules_and_packages/importSinglePackage.py": ["/Concepts_with_examples/modules_and_packages/carsPackage/Audi.py"]}
|
12,834
|
sanneabhilash/python_learning
|
refs/heads/master
|
/PracticePrograms/sendEmail.py
|
import smtplib
try:
s = smtplib.SMTP('smtp.gmail.com', 587)
s.starttls()
s.login("user@gmail.com", "password")
message = "This message is from python"
s.sendmail("user@gmail.com", "user@yahoo.com", message)
s.quit()
except Exception as e:
print(e)
|
{"/PracticePrograms/modules_import_weather_data/circus.py": ["/PracticePrograms/modules_import_weather_data/weather.py"], "/Concepts_with_examples/modules_and_packages/importSinglePackage.py": ["/Concepts_with_examples/modules_and_packages/carsPackage/Audi.py"]}
|
12,835
|
sanneabhilash/python_learning
|
refs/heads/master
|
/File_Actions_Automation/ReadWriteIntoFiles.py
|
import os
def readcfg(config):
items = []
if os.path.isfile(config):
cfile = open(config, 'r')
for line in cfile.readlines():
items.append(parsecfgline(line))
cfile.close()
return items
def parsecfgline(line):
option = {}
if '|' in line:
opts = line.split('|')
if len(opts) == 3:
option['origin'] = extcheck(opts[0], 0)
option['exclude'] = extcheck(opts[0], 1)
option['dest'] = opts[1]
option['type'] = opts[2].replace('\n', '')
return option
def extcheck(opt, idx):
res = ''
if ';' in opt:
opts = opt.split(';')
if len(opts) == 2:
res = opts[0] if idx == 0 else opts[1]
elif idx == 0:
res = opt
return res
opts = readcfg(os.path.splitext(os.path.basename(__file__))[0] + '.ini')
for opt in opts:
print(opt)
|
{"/PracticePrograms/modules_import_weather_data/circus.py": ["/PracticePrograms/modules_import_weather_data/weather.py"], "/Concepts_with_examples/modules_and_packages/importSinglePackage.py": ["/Concepts_with_examples/modules_and_packages/carsPackage/Audi.py"]}
|
12,836
|
sanneabhilash/python_learning
|
refs/heads/master
|
/pandas_practice/readExcelToDataFrame.py
|
import pandas as pd
data = pd.read_csv("D:\pythonTraining\Day6\emp_data.csv")
print(data)
print(type(data))
|
{"/PracticePrograms/modules_import_weather_data/circus.py": ["/PracticePrograms/modules_import_weather_data/weather.py"], "/Concepts_with_examples/modules_and_packages/importSinglePackage.py": ["/Concepts_with_examples/modules_and_packages/carsPackage/Audi.py"]}
|
12,837
|
sanneabhilash/python_learning
|
refs/heads/master
|
/Concepts_with_examples/modules_and_packages/import_custom_module.py
|
# importing module: (.py file code)
import Concepts_with_examples.modules_and_packages.CalcModule as Calc
print(Calc.add(1, 3))
|
{"/PracticePrograms/modules_import_weather_data/circus.py": ["/PracticePrograms/modules_import_weather_data/weather.py"], "/Concepts_with_examples/modules_and_packages/importSinglePackage.py": ["/Concepts_with_examples/modules_and_packages/carsPackage/Audi.py"]}
|
12,838
|
sanneabhilash/python_learning
|
refs/heads/master
|
/unit_testing_examples/unitTestExample.py
|
import unittest
def add(x, y):
return x + y
def div(x, y):
return x / y
def sub(x, y):
return x - y
def fact(n):
if n == 0:
return 1
return n * fact(n - 1)
# MyTest inherits TestCase class from unittest
class MyTest(unittest.TestCase):
def setUp(self):
print("IN SET UP")
def tearDown(self) -> None:
print("IN TEAR DOWN")
def test_add(self):
self.assertEqual(add(3, 4), 7)
def test_sub(self):
self.assertEqual(sub(10, 5), 5)
def test_factorial(self):
res = fact(5)
self.assertEqual(res, 120)
def test_zerodivisionerror(self):
with self.assertRaises(ZeroDivisionError): 6 / 0
def test_zerodivisionerrorB(self):
self.assertRaises(ZeroDivisionError, div, 8, 0)
def test_split(self):
s = 'hello$$sorld'
# check that s.split fails when the separator is not a string
with self.assertRaises(TypeError):
s.split(4)
# Executing the script as standalone, the __name__ will equal to __main__
# unittest.main() will execute all tests methods that you wrote
if __name__ == '__main__':
unittest.main()
|
{"/PracticePrograms/modules_import_weather_data/circus.py": ["/PracticePrograms/modules_import_weather_data/weather.py"], "/Concepts_with_examples/modules_and_packages/importSinglePackage.py": ["/Concepts_with_examples/modules_and_packages/carsPackage/Audi.py"]}
|
12,839
|
sanneabhilash/python_learning
|
refs/heads/master
|
/Concepts_with_examples/lambdas/lambda.py
|
add_l = lambda x, y: y / x
result = add_l(2, 4)
print(result)
print(add_l)
print(type(add_l))
# lambda function to determine max of two numbers
maximum = lambda x, y: x if x > y else y
# lambda function to determine min of two numbers
minimum = lambda x, y: x if x < y else y
max3 = lambda x, y, z: z if z > (x if x > y else y) else (x if x > y else y)
print(minimum(1, 2), maximum(1, 2), max3(1, 2, 3))
square = lambda x: x ** 2
print(square(3))
# To-do swap using lambda
swap = lambda x, y: y is x and x is y
l = 10
m = 11
swap(l, m)
print(l, m)
|
{"/PracticePrograms/modules_import_weather_data/circus.py": ["/PracticePrograms/modules_import_weather_data/weather.py"], "/Concepts_with_examples/modules_and_packages/importSinglePackage.py": ["/Concepts_with_examples/modules_and_packages/carsPackage/Audi.py"]}
|
12,840
|
sanneabhilash/python_learning
|
refs/heads/master
|
/Concepts_with_examples/FileHandling.py
|
import os
# os.makedirs('D:/pythonProgramCreatedDirectory') # In case you want to create directory
f = open('D:/TestDigitalAssets/sample.txt', 'w') # File opened in write mode
# if specified file does not exist, then it creates a new file
print("Write using 'w' mode:")
f.write('This is a test file')
f.write('\nThis is a new line')
f.close() # Opened in write mode 'w', you cannot read
# READ file contents - Entire file is fetched
f = open('D:/TestDigitalAssets/sample.txt', 'r') # File opened in read mode
print('--------------READ ENTIRE FILE AND PRINT-------------')
print(f.read())
f.close()
# READLINE - read one line at a time
f = open('D:/TestDigitalAssets/sample.txt', 'r') # File opened in read mode
print('--------------READ LINE BY LINE-------------')
for line in f.readlines():
# read lines one by one as a list instead of whole file
print('Line: ', line, end="")
f.close()
print()
# you can read + write when file opened in w+ mode
# w+ -> creates a file if it does not exits
print("--------------WRITE + READ in 'w+' MODE----------------")
f = open('D:/TestDigitalAssets/sample1.txt', 'w+')
print("Write using 'w' mode:")
f.write('This is READ and WRITE mode')
print(f.read())
f.close()
# APPEND mode, to add lines to existing file contents at end
print("--------------APPEND FILE in 'a' MODE----------")
f = open('D:/TestDigitalAssets/sample.txt', 'a')
f.write('\nAPPEND')
f.close()
f = open('D:/TestDigitalAssets/sample.txt', 'r')
print(f.read())
f.close()
print("--------------APPEND FILE in 'a+' MODE----------")
# APPEND+ mode, to add lines to existing file contents at end and read
f = open('D:/TestDigitalAssets/sample.txt', 'a+')
f.write('\nAPPEND+')
f.seek(0) # go to begining of file, the current pointer will be at EOF
print('\nAPPEND using A+:', f.read())
f.close()
# Write and read binary mode
print("-----------WRITE BINARY 'wb' MODE---------")
f = open('D:/TestDigitalAssets/binary.txt', 'wb')
num = [5, 10]
arr = bytearray(num)
print('Writing binary text: ', arr)
f.write(arr)
f.close()
print("-----------READ BINARY 'rb' MODE---------")
f = open('D:/TestDigitalAssets/binary.txt', 'rb')
num = list(f.read())
print(num)
f.close()
|
{"/PracticePrograms/modules_import_weather_data/circus.py": ["/PracticePrograms/modules_import_weather_data/weather.py"], "/Concepts_with_examples/modules_and_packages/importSinglePackage.py": ["/Concepts_with_examples/modules_and_packages/carsPackage/Audi.py"]}
|
12,841
|
sanneabhilash/python_learning
|
refs/heads/master
|
/File_Actions_Automation/walkdir.py
|
import os
for fn, sflds, fnames in os.walk('C:\\Personal'):
print('Current folder is ' + fn)
for sf in sflds:
print(sf + ' is a subfolder of ' + fn)
for fname in fnames:
print(fname + ' is a file of ' + fn)
print('')
|
{"/PracticePrograms/modules_import_weather_data/circus.py": ["/PracticePrograms/modules_import_weather_data/weather.py"], "/Concepts_with_examples/modules_and_packages/importSinglePackage.py": ["/Concepts_with_examples/modules_and_packages/carsPackage/Audi.py"]}
|
12,842
|
sanneabhilash/python_learning
|
refs/heads/master
|
/Concepts_with_examples/lambdas/map.py
|
# map() function takes in lambda function and a list.
# program with map function : a new list is returned which contains all the lambda modified items
my_list = [1, 2, 3, 4, 5, 6]
my_list = list(map(lambda x: (x**2), my_list))
print(my_list)
|
{"/PracticePrograms/modules_import_weather_data/circus.py": ["/PracticePrograms/modules_import_weather_data/weather.py"], "/Concepts_with_examples/modules_and_packages/importSinglePackage.py": ["/Concepts_with_examples/modules_and_packages/carsPackage/Audi.py"]}
|
12,843
|
sanneabhilash/python_learning
|
refs/heads/master
|
/Concepts_with_examples/Operators/identity.py
|
# Identity operator, compares if the address location is same
# is - Evaluates to true if the variables on either side of the operator point to the same object and false otherwise.
# is not - Evaluates to false if the variables on either side of the operator point to the same object
# and true otherwise.
i = 11
k = 10
j = i
if type(i) is int:
print('i is an integer')
if type(i) is float:
print('i is a float type')
if i is j:
print('i and j point to same address')
print(id(i), id(j))
if i is not k:
print('i and k do not hold same address')
print(id(i), id(k))
|
{"/PracticePrograms/modules_import_weather_data/circus.py": ["/PracticePrograms/modules_import_weather_data/weather.py"], "/Concepts_with_examples/modules_and_packages/importSinglePackage.py": ["/Concepts_with_examples/modules_and_packages/carsPackage/Audi.py"]}
|
12,844
|
sanneabhilash/python_learning
|
refs/heads/master
|
/Concepts_with_examples/functions.py
|
# Basic python methods Example
def add(i, j):
return i + j
def empty_method(i, j):
pass
def return_none():
return None
def add_return_none2(i, j):
print('Sum=', i + j)
return
print(add(10, 1))
print(empty_method(10, 1))
print(return_none())
print(add_return_none2(11, 2))
# parameter j is optional, if not assigned a value during method call, the it is default to 10
def add_two_numbers(i, j=10):
return i + j;
print(add_two_numbers(1, 23))
print(add_two_numbers(1))
# FUNCTION which accepts any number of arguments
def add(*arr):
total = 0
for i in arr:
total = total + i
return total
print(add(1, 3, 4))
# RECURSIVE FUNCTION CALL Example
def factorial(i):
if i <= 1:
return 1
elif i == 0:
return 1
else:
return i * factorial(i - 1)
print(factorial(7))
|
{"/PracticePrograms/modules_import_weather_data/circus.py": ["/PracticePrograms/modules_import_weather_data/weather.py"], "/Concepts_with_examples/modules_and_packages/importSinglePackage.py": ["/Concepts_with_examples/modules_and_packages/carsPackage/Audi.py"]}
|
12,845
|
sanneabhilash/python_learning
|
refs/heads/master
|
/Concepts_with_examples/Operators/logical.py
|
# and or operators
print('----------Logical-------')
print(True and False)
print(True or False)
i = 10
j = 11
if i and j > 0:
print('i and j are greater than 0')
k = False
# not operator
if not k:
print('K is false')
|
{"/PracticePrograms/modules_import_weather_data/circus.py": ["/PracticePrograms/modules_import_weather_data/weather.py"], "/Concepts_with_examples/modules_and_packages/importSinglePackage.py": ["/Concepts_with_examples/modules_and_packages/carsPackage/Audi.py"]}
|
12,846
|
sanneabhilash/python_learning
|
refs/heads/master
|
/DataBaseInteractions/SQLiteDemo.py
|
import sqlite3 as lite
import sys
# We are suing SQLite package to connect to DB and run commands
# Data files are created under folder: ..\Day5\DataBaseInteractions
# Demo code for connect to db and do CURD operations
# Install SQL community edition to view files created - commondb, database.db
con = None
try:
# create sqlite connection and create commonDB by default
con = lite.connect('commondb') # Any db name you wanna use
cur = con.cursor() # Cursor objects helps establishing connection
# fetch sqlite version
print("--------------------------------------")
cur.execute('SELECT SQLITE_Version()') # get SQLite version installed
data = cur.fetchone()
print("SQLite version: %s " % data)
except Exception as e:
print("Error:", e.args[0])
sys.exit(1)
finally:
if con:
con.close()
# CURD Operations demo
con = lite.connect('commondb')
with con:
cur = con.cursor()
cur.execute('DROP table IF Exists employee') # Delete table if exists
print("=------------------------------=")
print("CREATE TABLE employee(Id INT, Name TEXT)")
cur.execute("CREATE TABLE employee(Id INT, Name TEXT)") # Create table with schema
cur.execute("INSERT INTO employee VALUES(1, 'Suni')") # Insert into table
cur.execute("INSERT INTO employee VALUES(2, 'Saini')")
cur.execute("INSERT INTO employee VALUES(3, 'Geloth')")
cur.execute("INSERT INTO employee VALUES(4, 'Abhi')")
cur.execute("SELECT * FROM employee") # Fetch data from table
row = cur.fetchall()
for r, n in row:
print(str(r), " : ", str(n))
# delete data from employee where id = 3
with con:
cur.execute('Delete FROM employee where id=3')
#fetch data from employee
cur.execute('Select * from employee')
rows = cur.fetchall()
for row in rows:
print(row)
# update data for employee where emp_id=4
with con:
cur.execute("update employee set name='Robert' where id=4")
# fetch data from employee
cur.execute('Select * from employee')
rows = cur.fetchall()
for row in rows:
print(row)
|
{"/PracticePrograms/modules_import_weather_data/circus.py": ["/PracticePrograms/modules_import_weather_data/weather.py"], "/Concepts_with_examples/modules_and_packages/importSinglePackage.py": ["/Concepts_with_examples/modules_and_packages/carsPackage/Audi.py"]}
|
12,847
|
sanneabhilash/python_learning
|
refs/heads/master
|
/Concepts_with_examples/static_variables.py
|
class Jar:
my_static_variable = 'Hello World'
def __init__(self):
self.content = None
def fill(self, content):
self.content = content
def empty(self):
print('Empty the jar...')
self.content = None
myJar = Jar()
myJar.content = 'sugar'
print(Jar.my_static_variable) # Accessed without creating object
print(myJar.my_static_variable) # Even objects can access
myJar2 = Jar()
myJar2.my_static_variable = "changed the static value"
print(myJar2.my_static_variable) # Even objects can access
print(Jar.my_static_variable) # Accessed without creating object
print(myJar.my_static_variable) # Even objects can access
|
{"/PracticePrograms/modules_import_weather_data/circus.py": ["/PracticePrograms/modules_import_weather_data/weather.py"], "/Concepts_with_examples/modules_and_packages/importSinglePackage.py": ["/Concepts_with_examples/modules_and_packages/carsPackage/Audi.py"]}
|
12,848
|
sanneabhilash/python_learning
|
refs/heads/master
|
/pandas_practice/sqlDataToDataFrame.py
|
import pandas as pd
import mysql.connector
# load data from mysql database
con = mysql.connector.connect(host="localhost", user='root', passwd='root', auth_plugin='mysql_native_password', database = 'univdb')
emp_data=pd.read_sql('select d.dept_id, d.dept_code from department d', con)
print(emp_data)
print(type(emp_data))
|
{"/PracticePrograms/modules_import_weather_data/circus.py": ["/PracticePrograms/modules_import_weather_data/weather.py"], "/Concepts_with_examples/modules_and_packages/importSinglePackage.py": ["/Concepts_with_examples/modules_and_packages/carsPackage/Audi.py"]}
|
12,849
|
sanneabhilash/python_learning
|
refs/heads/master
|
/Concepts_with_examples/conditional_statements.py
|
# IF Else condition
i, j = 10, 15
if i > j:
print('i is greater than j')
elif j == i:
print('i is equal to j')
else:
print('i is not greater than j')
# Python does not support switch statements, instead we are provided with switcher
# The Pythonic way to implement switch statement is to use the powerful dictionary mappings,
# also known as associative arrays, that provide simple one-to-one key-value mappings.
# Create a dictionary named switcher to store all the switch-like cases.
def switch_demo(argument):
switcher = {
1: "January",
2: "February",
3: "March",
4: "April",
5: "May",
6: "June",
7: "July",
8: "August",
9: "September",
10: "October",
11: "November",
12: "December"
}
return switcher.get(argument, "Invalid month")
# when you pass an argument to the switch_demo function, it is looked up against the switcher dictionary mapping.
# If a match is found, the associated value is printed, else a default string (‘Invalid Month’) is printed.
# The default string helps implement the ‘default case’ of a switch statement.
|
{"/PracticePrograms/modules_import_weather_data/circus.py": ["/PracticePrograms/modules_import_weather_data/weather.py"], "/Concepts_with_examples/modules_and_packages/importSinglePackage.py": ["/Concepts_with_examples/modules_and_packages/carsPackage/Audi.py"]}
|
12,850
|
sanneabhilash/python_learning
|
refs/heads/master
|
/Concepts_with_examples/Generators.py
|
from typing import Any, Generator
new_list = (x ** 2 for x in [1, 2, 3, 4, 5, 6])
# new_list: Generator[Any, Any, None] = (x ** 2 for x in [1, 2, 3, 4, 5, 6])
print(type(new_list)) # <class 'generator'>
for item in new_list:
print(item)
|
{"/PracticePrograms/modules_import_weather_data/circus.py": ["/PracticePrograms/modules_import_weather_data/weather.py"], "/Concepts_with_examples/modules_and_packages/importSinglePackage.py": ["/Concepts_with_examples/modules_and_packages/carsPackage/Audi.py"]}
|
12,851
|
sanneabhilash/python_learning
|
refs/heads/master
|
/File_Actions_Automation/basics.py
|
import os
# File path handling
os.chdir("C:/Users") # Changes the current folder
print(os.path.dirname("C:/Users")) # returns the path's directory name
print(os.path.split("C:/Users/asanne")) # returns tuple
print(os.path.join("foo", "panda")) # returns concatenated path
calc = "C:\\Windows\\System32\\calc.exe" # path to windows calculator
print(os.path.sep) # Returns the separating slash symbol for current OS
print(calc.split(os.path.sep))
# Read and write
|
{"/PracticePrograms/modules_import_weather_data/circus.py": ["/PracticePrograms/modules_import_weather_data/weather.py"], "/Concepts_with_examples/modules_and_packages/importSinglePackage.py": ["/Concepts_with_examples/modules_and_packages/carsPackage/Audi.py"]}
|
12,852
|
sanneabhilash/python_learning
|
refs/heads/master
|
/pandas_practice/pandasDemo.py
|
import pandas as pd
data = pd.Series([1,2.5, "Hello", [1,2,4]])
print(data)
print(type(data))
df = pd.DataFrame({'name': ['anil', 'sunil', 'ramesh', 'suresh'], 'score':[56,45, 87, 89]})
print(df)
print("_______________________________")
print(df["name"])
print("_______________________________")
print(df["score"])
|
{"/PracticePrograms/modules_import_weather_data/circus.py": ["/PracticePrograms/modules_import_weather_data/weather.py"], "/Concepts_with_examples/modules_and_packages/importSinglePackage.py": ["/Concepts_with_examples/modules_and_packages/carsPackage/Audi.py"]}
|
12,853
|
sanneabhilash/python_learning
|
refs/heads/master
|
/PracticePrograms/factorial.py
|
def factorial(number: int):
fact: int = 1
while number > 1:
fact *= number
number = number - 1
return fact
def recursive_factorial(number: int):
fact: int = number;
if number <= 1:
return 1
else:
fact = fact * recursive_factorial(number - 1)
return fact
argument = 5
print("Non Recursive Method: ", factorial(argument))
print("Recursive Method: ", recursive_factorial(argument))
|
{"/PracticePrograms/modules_import_weather_data/circus.py": ["/PracticePrograms/modules_import_weather_data/weather.py"], "/Concepts_with_examples/modules_and_packages/importSinglePackage.py": ["/Concepts_with_examples/modules_and_packages/carsPackage/Audi.py"]}
|
12,854
|
sanneabhilash/python_learning
|
refs/heads/master
|
/PracticePrograms/palindromic_triangle.py
|
"""
You are given a positive integer .
Your task is to print a palindromic triangle of size .
For example, a palindromic triangle of size is:
1
121
12321
1234321
123454321
You can't take more than two lines. The first line (a for-statement) is already written for you.
You have to complete the code using exactly one print statement.
Note:
Using anything related to strings will give a score of .
Using more than one for-statement will give a score of .
Input Format
A single line of input containing the integer .
Constraints
# O < N < 10
"""
for i in range(1,int(input())+1):
print (((10 ** i - 1) // 9) ** 2)
|
{"/PracticePrograms/modules_import_weather_data/circus.py": ["/PracticePrograms/modules_import_weather_data/weather.py"], "/Concepts_with_examples/modules_and_packages/importSinglePackage.py": ["/Concepts_with_examples/modules_and_packages/carsPackage/Audi.py"]}
|
12,855
|
sanneabhilash/python_learning
|
refs/heads/master
|
/Concepts_with_examples/inbuild_methods_on_lists.py
|
my_list = [2, 1, 3, 6, 5, 4]
print(my_list)
my_list.append(7)
my_list.append(8)
my_list.append("HelloWorld")
print(my_list)
my_list.remove("HelloWorld") # sorting of mixed list throws error, so removing string
my_list.sort() # The original object is modified
print(my_list) # sort by default ascending
my_list.sort(reverse=True)
print(my_list)
|
{"/PracticePrograms/modules_import_weather_data/circus.py": ["/PracticePrograms/modules_import_weather_data/weather.py"], "/Concepts_with_examples/modules_and_packages/importSinglePackage.py": ["/Concepts_with_examples/modules_and_packages/carsPackage/Audi.py"]}
|
12,856
|
sanneabhilash/python_learning
|
refs/heads/master
|
/Concepts_with_examples/listOperations.py
|
# List is an ordered sequence of items. List is mutable
# List once created can be modified.
my_list = ["apples", "bananas", "oranges", "kiwis"]
print("--------------")
print(my_list)
print("--------------")
# accessing list using index
print(my_list[0])
print(my_list[3])
# slicing list
print(my_list[1:4])
print(my_list[-2:])
print(my_list[:-2])
print("--------------")
# iterating over list
for item in my_list:
print(item)
print("--------------")
# check if item exists
if "apples" in my_list:
print("Yes")
print("--------------")
# modify list element
my_list[2] = "guava"
print(my_list)
print("---------------")
# list is mutable. try delete an element from list
del my_list[2]
print("list destructor")
# delete list
del my_list
print("--------------")
my_list = ["apples", "bananas", "oranges", "kiwis"]
print("list is mutable. try append an element to an list")
my_list.append("pomegranate")
print(my_list)
print("--------------")
# reverse list
print(my_list[::-1])
print("--------------")
# sort list
print(sorted(my_list))
print("--------------")
# concatenate lists
my_list1 = ["apples", "bananas"]
my_list2 = ["oranges", "kiwis"]
print(my_list1 + my_list2)
print("--------------")
# list index method
my_list = ["apple", "banana", "orange", "kiwi"]
print(my_list.index("orange"))
print("--------------")
# convert a list into set
my_list = ['apples', 'bananas', 'kiwis', 'oranges']
my_list = set(my_list)
print(type(my_list))
print(my_list)
print("--------------")
# convert list to an dictionary
my_list = [['a', "apple"], ['b', "banana"], ['c', "cat"], ['d', "dog"]]
dict1 = dict(i for i in my_list)
print(dict1)
print("--------------")
# convert a list to an string
my_list = ["apple", "banana", "orange", "kiwi"]
strtest = ','.join(my_list)
print(strtest)
# list copy : shallow copy and deep copy methods
import copy
my_list = ["apple", "banana", "orange", "kiwi"]
print("--------------")
new_list = copy.copy(my_list)
print(my_list, id(my_list))
print(new_list, id(new_list))
print("--------------")
new_list = copy.deepcopy(my_list)
print(my_list, id(my_list))
print(new_list, id(new_list))
|
{"/PracticePrograms/modules_import_weather_data/circus.py": ["/PracticePrograms/modules_import_weather_data/weather.py"], "/Concepts_with_examples/modules_and_packages/importSinglePackage.py": ["/Concepts_with_examples/modules_and_packages/carsPackage/Audi.py"]}
|
12,859
|
lukejuusola/AoCMM2016-Traffic
|
refs/heads/master
|
/MaxValue.py
|
import numpy as np
import math
def MaxValue(f, X, Y):
Xl, Yl = np.meshgrid(X, Y)
vf = np.vectorize(f)
Z = vf(Xl, Yl)
index = np.argmax(Z)
x_in = math.floor(index/50)
y_in = index%50
return (X[x_in], Y[y_in], Z[x_in][y_in])
if __name__ == '__main__':
x_mean = .84
y_mean = .12
f = lambda x,y: 1 - math.sqrt((x-x_mean)**2 + (y-y_mean)**2)
print(MaxValue(f, np.linspace(0,1), np.linspace(0,1)))
|
{"/PointPicker.py": ["/CrashMap.py", "/MaxValue.py", "/Plot.py"], "/Plot.py": ["/CrashMap.py"], "/NaiveContinuousComplete.py": ["/PointPicker.py", "/CrashMap.py", "/readin.py"]}
|
12,860
|
lukejuusola/AoCMM2016-Traffic
|
refs/heads/master
|
/gradient.py
|
from CrashMap import CrashMap
import numpy as np
import scipy
from Plot import plot
import random
m_x = (-10,10)
m_y = (-10,10)
stdx = 1
stdy = 1
def fscore(f1, f2):
return lambda x,y: (f1(x,y) - f2(x,y))**2
def calcInt(f):
score, error =scipy.integrate.quadpack.dblquad(f, m_x[0], m_x[1], lambda x: m_y[0], lambda x: m_y[1])
return score
def calcGradient(ambulances, crashmap):
delx = 0.1
amb_map = CrashMap(ambulances, stdx, stdy)
score = calcInt(fscore(crashmap(x,y), amb_map(x,y)))
deltas = []
for i in range(0, len(ambulances)):
x0, y0 = ambulances[i]
ambulances[i] = (x0 - delx, y0)
mapx0 = CrashMap(ambulances, stdx, stdy)
#plot (lambda x,y: (crashmap(x,y) - amb_map(x,y))**2, -5, 5, -5, 5)
ambulances[i] = (x0 + delx, y0)
mapx1 = CrashMap(ambulances, stdx, stdy)
#plot (lambda x,y: (crashmap(x,y) - amb_map(x,y))**2, -5, 5, -5, 5)
ambulances[i] = (x0, y0-delx)
mapy0 = CrashMap(ambulances, stdx, stdy)
ambulances[i] = (x0, y0+delx)
mapy1 = CrashMap(ambulances, stdx, stdy)
ambulances[i] = (x0, y0)
scoreX0 = calcInt(fscore(crashmap(x,y), mapx0(x,y)))
scoreX1 = calcInt(fscore(crashmap(x,y), mapx1(x,y)))
scoreY0 = calcInt(fscore(crashmap(x,y), mapy0(x,y)))
scoreY1 = calcInt(fscore(crashmap(x,y), mapy1(x,y)))
dx = (scoreX1 - scoreX0)/(2*delx)
dy = (scoreY1 - scoreY0)/(2*delx)
deltas.append((dx, dy))
return deltas
def update(ambulances, crashmap, rate):
#rate = 10
grad = calcGradient(ambulances, crashmap)
for i in range(0, len(ambulances)):
x0, y0 = ambulances[i]
x1 = x0 - rate * grad[i][0]
y1 = y0 - rate * grad[i][1]
ambulances[i] = (x1, y1)
return grad
num_amb = 3
num_crashes = 4
ambulances = []
crashes = []
for i in range(0, num_amb):
ambulances.append((random.uniform(-5,5), random.uniform(-5,5)))
for i in range(0, num_crashes):
crashes.append((random.uniform(-5,5), random.uniform(-5,5)))
crashmap = CrashMap(crashes, stdx, stdy)
plot(crashmap, -6, 6, -6, 6)
amb_map = CrashMap(ambulances, stdx, stdy)
#gradient = calcGradient(ambulances, crashmap)
score = calcInt(fscore(amb_map, crashmap))
lastscore = 0
#print abs(lastscore - score)/score
count = 0
gradsum = 1
rate = 10
while(gradsum > 10**-8):
#amb_map = CrashMap(ambulances, stdx, stdy)
#plot(lambda x,y: (amb_map(x,y) - crashmap(x,y))**2, -10, 10, -10, 10)
grads = update(ambulances, crashmap, rate)
gradsum = sum(abs(x)+ abs(y) for x,y in grads)
lastscore = score
score = calcInt(fscore(CrashMap(ambulances, stdx, stdy), crashmap))
if score < lastscore:
print abs(score-lastscore)/score
rate *= 1 + 10*abs(score - lastscore)/score
else:
print "reset"
rate *= 0.5
count += 1;
if count % 10 == 0:
print ambulances
print gradsum
print rate
#print lastscore, score
#print (lastscore-score)/score
plot(crashmap, -10, 10, -10, 10)
plot(CrashMap(ambulances, stdx, stdy), -10, 10, -10, 10)
plot(fscore(CrashMap(ambulances, stdx, stdy),crashmap), -10, 10, -10, 10)
|
{"/PointPicker.py": ["/CrashMap.py", "/MaxValue.py", "/Plot.py"], "/Plot.py": ["/CrashMap.py"], "/NaiveContinuousComplete.py": ["/PointPicker.py", "/CrashMap.py", "/readin.py"]}
|
12,861
|
lukejuusola/AoCMM2016-Traffic
|
refs/heads/master
|
/readin.py
|
import numpy
#Finds number of lines in file
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
# Reads the data in "filein" into a matrix. "num" is the number of data points per line.
# For raw output (files of form "out__.txt") num = 4. The coordinates are (latitude, longitude, #casualties, hour)
# For nodelist, num = 3. Coordinates: (nodeID, longitude, latitude)
# For node-accident assignment files (files of form "node__.txt") num = 2. Coordinates: (nodeID, #accidents)
# Data is returned as a matrix with each column as a different node/accident, and each row n as all the
# nth coordinate values of the data set.
def readData(filein, num):
filename = filein
points = file_len(filename)
data = numpy.zeros((num, points))
ind = 0
f = open(filename)
for line in f:
temp = line.split()
tempFl = numpy.zeros((num, 1))
for x in range(num):
tempFl[x] = float(temp[x])
data[:, ind] = numpy.transpose(tempFl)
ind += 1
return data
|
{"/PointPicker.py": ["/CrashMap.py", "/MaxValue.py", "/Plot.py"], "/Plot.py": ["/CrashMap.py"], "/NaiveContinuousComplete.py": ["/PointPicker.py", "/CrashMap.py", "/readin.py"]}
|
12,862
|
lukejuusola/AoCMM2016-Traffic
|
refs/heads/master
|
/PointPicker.py
|
from CrashMap import CrashMap
from MaxValue import MaxValue
import numpy as np
from Plot import plot
amb_std = 2
crash_std = 1
def MapDifference(f, h):
def difference(x,y):
return f(x,y) - h(x,y)
return difference
def NaiveContinuousSolution(crashes, totalPicked):
picks = []
crashMap = CrashMap(crashes, crash_std, crash_std)
X = np.linspace(-5, 5)
Y = np.linspace(-5, 5)
for i in range(totalPicked):
heatMap = crashMap
if len(picks) != 0:
ambMap = CrashMap(picks, amb_std, amb_std)
heatMap = MapDifference(crashMap, ambMap)
picks.append(MaxValue(heatMap, X, Y)[:2])
return picks
if __name__ == '__main__':
test_crashes = [(1.25,1.25),(1.75,1.75), (1.25,1.75), (1.75,1.25), (-2,-2)]
picks = NaiveContinuousSolution(test_crashes)
ambMap = CrashMap(picks, amb_std, amb_std)
crashMap = CrashMap(test_crashes, crash_std, crash_std)
plot(crashMap, -5, 5, -5, 5)
plot(ambMap, -5, 5, -5, 5)
plot(MapDifference(crashMap, ambMap), -5, 5, -5, 5)
|
{"/PointPicker.py": ["/CrashMap.py", "/MaxValue.py", "/Plot.py"], "/Plot.py": ["/CrashMap.py"], "/NaiveContinuousComplete.py": ["/PointPicker.py", "/CrashMap.py", "/readin.py"]}
|
12,863
|
lukejuusola/AoCMM2016-Traffic
|
refs/heads/master
|
/Constants.py
|
stdy = 1.0
stdx = 1.0
safetyWeight = 500.0
ambCost = 5000.0
priceWeight = 100.0
|
{"/PointPicker.py": ["/CrashMap.py", "/MaxValue.py", "/Plot.py"], "/Plot.py": ["/CrashMap.py"], "/NaiveContinuousComplete.py": ["/PointPicker.py", "/CrashMap.py", "/readin.py"]}
|
12,864
|
lukejuusola/AoCMM2016-Traffic
|
refs/heads/master
|
/Plot.py
|
from CrashMap import CrashMap
from matplotlib import cm
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
def plot(f, leftX, rightX, leftY, rightY):
fig = plt.figure()
ax = fig.gca(projection='3d')
vf = np.vectorize(f)
X = np.linspace(leftX, rightX, 100)
Y = np.linspace(leftY, rightY, 100)
X, Y = np.meshgrid(X, Y)
Z = vf(X, Y)
#surf = ax.contourf(X, Y, Z)
#plt.contourf(X, Y, Z)
surf = ax.plot_surface(X,Y,Z, rstride=2, cstride=2, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
plt.show()
|
{"/PointPicker.py": ["/CrashMap.py", "/MaxValue.py", "/Plot.py"], "/Plot.py": ["/CrashMap.py"], "/NaiveContinuousComplete.py": ["/PointPicker.py", "/CrashMap.py", "/readin.py"]}
|
12,865
|
lukejuusola/AoCMM2016-Traffic
|
refs/heads/master
|
/NaiveContinuousComplete.py
|
from PointPicker import *
from CrashMap import CrashMap
from readin import *
from scipy.integrate import dblquad
import matplotlib.pyplot as plt
import random
x1 = -5
x2 = 5
y1 = -5
y2 = 5
ambCost = 2
max_n = 20
def Score(crashes, ambulances):
if(len(ambulances) == 0 or len(crashes) == 0):
return
crashMap = CrashMap(crashes, crash_std, crash_std)
ambMap = CrashMap(ambulances, amb_std, amb_std)
SquareDiff = lambda x, y: (MapDifference(crashMap, ambMap)(x,y))**2
safety = 1./dblquad(SquareDiff, x1, x2, lambda x: y1, lambda x: y2)[0]
#return safety - len(ambulances)*ambCost
return safety
def FindOptimumN(crashes):
ret = []
for n in range(1,max_n):
ambs = NaiveContinuousSolution(crashes, n)
ret.append((n, Score(crashes, ambs)))
return ret
if __name__ == '__main__':
crashes = []
#crashes = [(1.25,1.25),(1.75,1.75), (1.25,1.75), (1.75,1.25), (-2,-2)]
for i in range(50):
crashes.append((random.randint(-2, 2), random.randint(-2, 2)))
points = FindOptimumN(crashes)
plt.scatter(list(map(lambda x: x[0], points)), list(map(lambda x: x[1], points)))
plt.show()
|
{"/PointPicker.py": ["/CrashMap.py", "/MaxValue.py", "/Plot.py"], "/Plot.py": ["/CrashMap.py"], "/NaiveContinuousComplete.py": ["/PointPicker.py", "/CrashMap.py", "/readin.py"]}
|
12,866
|
lukejuusola/AoCMM2016-Traffic
|
refs/heads/master
|
/CrashMap.py
|
import math
from scipy.integrate import dblquad, IntegrationWarning
import numpy as np
import warnings
import copy
warnings.simplefilter("ignore", IntegrationWarning)
warnings.simplefilter("ignore", UserWarning)
manhattan_x = (-10, 10)
manhattan_y = (-10, 10)
#Assume dataset is in form [(x_0, y_0), ..., (x_n, y_n)] where x, y is gps coordinates
def CrashMap(dataset, stdx, stdy):
new_dataset = copy.deepcopy(dataset)
def freqMap(x, y):
z = 0.0
C = 1.0 # Normalization constant. Definitely needs to be tweeked
# Should just be able to divide in the end.
for x_i,y_i in new_dataset:
dx = (x - x_i)
dy = (y - y_i)
exponent = -(dx**2/(2*stdx**2) + dy**2/(2*stdy**2))
z += C*math.exp(exponent)
return z
norm_c, error = dblquad(freqMap, manhattan_x[0], manhattan_x[1],\
lambda x: manhattan_y[0],\
lambda x: manhattan_y[1])
def normedFreqMap(x,y):
return freqMap(x,y)/norm_c
return normedFreqMap
|
{"/PointPicker.py": ["/CrashMap.py", "/MaxValue.py", "/Plot.py"], "/Plot.py": ["/CrashMap.py"], "/NaiveContinuousComplete.py": ["/PointPicker.py", "/CrashMap.py", "/readin.py"]}
|
12,937
|
modulexcite/tabularize.py
|
refs/heads/master
|
/tests.py
|
import unittest
import tabularize
class TabularizeTestCase(unittest.TestCase):
def test_ignore_headers(self):
self.assertEqual(tabularize.loads('| name | surname |'), [])
def test_whitespace(self):
self.assertEqual(tabularize.loads("""
| name | surname |
| edi | budu |
"""), [{ "name": "edi", "surname": "budu"}])
def test_dashes(self):
self.assertEqual(tabularize.loads("""
------------------
| name | surname |
------------------
| edi | budu |
------------------
"""), [{ "name": "edi", "surname": "budu"}])
self.assertEqual(tabularize.loads("""
__________________
| name | surname |
..................
| edi | budu |
__________________
"""), [{ "name": "edi", "surname": "budu"}])
def test_multiple_lines(self):
self.assertEqual(tabularize.loads("""
__________________
| name | surname |
| edi | budu |
| budu | edi |
__________________
"""), [{ "name": "edi", "surname": "budu"},
{ "name": "budu", "surname": "edi"}])
def test_comments(self):
self.assertEqual(tabularize.loads("""
Here is the our customer table:
| name | surname |
| edi | budu |
| budu | edi |
Thanks
"""), [{ "name": "edi", "surname": "budu"},
{ "name": "budu", "surname": "edi"}])
def test_different_types(self):
self.assertEqual(tabularize.loads("""
__________________
| name | surname |
| edi | budu |
| budu | edi |
__________________
""", return_type=list), [["edi", "budu"], ["budu", "edi"]])
self.assertEqual(tabularize.loads("""
__________________
| name | surname |
| edi | budu |
| budu | edi |
__________________
""", return_type=tuple), [("edi", "budu"), ("budu", "edi")])
def test_docstrings(self):
class _docstring:
"""
This is a docstring
Here is my test case:
------------------------------
| name | surname | full_name |
------------------------------
| edi | budu | edi budu |
| budu | edi | budu edi |
------------------------------
"""
tabular = tabularize.from_docstring(_docstring, return_type=list)
self.assertIsInstance(tabular, list)
for name, surname, full_name in tabular: # testception
self.assertEqual("%s %s" % (name, surname), full_name)
if __name__ == "__main__":
unittest.main()
|
{"/tests.py": ["/tabularize.py"]}
|
12,938
|
modulexcite/tabularize.py
|
refs/heads/master
|
/tabularize.py
|
"""
Tabularize module
Contains the `load` and `loads` methods like json, yaml modules.
"""
def normalize_line(line):
return [piece.strip() for piece in line.split("|")[1:-1]]
def is_valid_line(line):
return "|" in line
def loads(text, return_type=dict):
"""Loads tabular data from provided string"""
lines = map(normalize_line,
filter(is_valid_line,
text.strip().splitlines()))
keys = lines.pop(0)
if not issubclass(return_type, dict):
return map(return_type, lines)
return [return_type(zip(keys, line)) for line in lines]
def load(source):
"""Reads the tabular data of file-like objects"""
return loads(source.read())
def from_docstring(_object, *args, **kwargs):
"""Loads the docstring of object as tabular data"""
return loads(_object.__doc__, *args, **kwargs)
|
{"/tests.py": ["/tabularize.py"]}
|
12,974
|
cFireworks/kmeans
|
refs/heads/master
|
/k_means.py
|
import numpy as np
def centroids_init(X, n_clusters, mode='random'):
"""
初始化中心点
"""
n_samples, n_features = X.shape
centroids = np.empty((n_clusters, n_features), dtype=X.dtype)
if mode == 'random':
random_state = np.random.mtrand._rand
seeds = random_state.permutation(n_samples)[:n_clusters]
centroids = X[seeds]
elif mode == 'kmeans++':
# n_local_trials = 2 + int(np.log(n_clusters))
random_state = np.random.mtrand._rand
# select the first center randomly
index_0 = np.random.randint(0, n_samples)
centroids[0] = X[index_0]
for i in range(1, n_clusters):
# compute the distances to known-centers
dist = compute_dist(X, centroids[:i])
min_dist = dist.min(axis=1)
prob = min_dist / min_dist.sum()
# 依概率随机选取下一个中心点
index = np.random.choice(np.arange(len(prob)), p=prob.ravel())
centroids[i] = np.copy(X[index])
return centroids-+-
def compute_dist(X, Y):
"""
使用矩阵乘法的方法,计算样本点与中心点距离平方
"""
XX = np.sum(X*X, axis=1)[:, np.newaxis]
YY = np.sum(Y*Y, axis=1)
XY = np.dot(X, Y.T)
return np.maximum(XX + YY - 2 * XY, 0)
def update_centers(X, n_clusters, labels, distances):
"""
更新中心点,解决中心点偏离的问题
"""
n_features = X.shape[1]
num_in_cluster = np.zeros((n_clusters,))
centers = np.zeros((n_clusters, n_features))
# 寻找空类
for i in range(n_clusters):
num_in_cluster[i] = (labels == i).sum()
empty_clusters = np.where(num_in_cluster == 0)[0]
if len(empty_clusters):
far_from_centers = distances.argsort()[::-1]
for i, cluster_id in enumerate(empty_clusters):
far_index = far_from_centers[i]
centers[cluster_id] = X[far_index]
num_in_cluster[cluster_id] = 1
for i in range(n_clusters):
centers[i] += X[labels == i].sum(axis=0)
centers /= num_in_cluster[:, np.newaxis]
return centers
def k_init(X, ):
return
def k_means(X, n_clusters, max_iter, init_mode='kmeans++', verbose=False, tol=1e-4):
best_labels, best_inertia, best_centers = None, None, None
# init
n_samples = X.shape[0]
centers = centroids_init(X, n_clusters, init_mode)
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = np.zeros(shape=(n_samples,), dtype=X.dtype)
# iterations
for i in range(max_iter):
Y = centers.copy()
# 计算样本点到中心点的欧式距离
dist = compute_dist(X, Y)
# 记录样本点距离最近的中心点序号
labels = dist.argmin(axis=1)
distances = dist[np.arange(dist.shape[0]), labels]
inertia = distances.sum()
# 计算新的中心点
centers = update_centers(X, n_clusters, labels, distances)
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
d_center = np.ravel(Y - centers, order='K')
center_shift_total = np.dot(d_center, d_center)
if center_shift_total <= tol:
if verbose:
print("Converged at iteration %d: "
"center shift %e within tolerance %e"
% (i, center_shift_total, tol))
break
if center_shift_total > 0:
# rerun E-step in case of non-convergence so that predicted labels
# match cluster centers
dist = compute_dist(X, best_centers)
best_labels = dist.argmin(axis=1)
distances = dist[np.arange(dist.shape[0]), best_labels]
best_inertia = distances.sum()
return best_labels, best_inertia, best_centers, i + 1
|
{"/main.py": ["/k_means.py", "/eval.py"]}
|
12,975
|
cFireworks/kmeans
|
refs/heads/master
|
/main.py
|
from sklearn.cluster import KMeans
from k_means import k_means
import numpy as np
from keras.datasets import mnist
import time
from eval import ClusterEval
# load data
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# data dimension
raw_dim = 28 * 28 # raw dimension
low_dim = 200 # random projection to low-dimension
# random_projection matrix
rj_matrix = 1.0 - 2.0 * (np.random.rand(raw_dim, low_dim) > 0.5)
rj_matrix = rj_matrix / np.sqrt(low_dim)
print(np.sum(rj_matrix), np.max(rj_matrix), np.min(rj_matrix))
# choose data
train_num = 20000
data = train_images[0: train_num].reshape(
(train_num, raw_dim)) / 255. # X
labels = train_labels[0: train_num] # y
def cluster_sklearn_kmeans(data, n_cluster=10):
# using kmeans on raw data
# @return cluster labels
print("Begin sklearn clustering on raw data...")
print("Data shape = ", data.shape)
start = time.time()
kmeans = KMeans(n_clusters=n_cluster)
kmeans.fit(data)
end = time.time()
print("Clustering on raw data, using time = ", end - start)
return kmeans.labels_
def my_cluster_my_kmeans(data, n_cluster=10):
# using kmeans on raw data
# @return cluster labels
print("Begin my clustering on raw data...")
print("Data shape = ", data.shape)
start = time.time()
labels, _, _, _ = k_means(data, n_clusters=n_cluster, max_iter=300)
end = time.time()
print("Clustering on raw data, using time = ", end - start)
return labels
cluster_fn = [cluster_sklearn_kmeans, my_cluster_my_kmeans]
def cluster_on_rj_data(data, dim=100, function_name=my_cluster_my_kmeans):
# using random projection to reduce the dimension of raw data, then cluster
# @return cluster labels
print("Begin clustering on low-dimension data...")
print("Data shape = ", data.shape)
print("First random projection...")
start = time.time()
rj_data = np.dot(data, rj_matrix)
end = time.time()
print("Random projection time = ", end - start)
print("Second kmeans...")
labels = function_name(rj_data, n_cluster=10)
return labels
def cluster_on_rs_data(data, p=0.01, function_name=my_cluster_my_kmeans):
# using random sparsification to sparse raw data, then cluster
# @return cluster labels
print("Begin clustering on sparsed data...")
print("Data shape = ", data.shape)
print("First random projection...")
start = time.time()
rj_data = np.dot(data, rj_matrix)
end = time.time()
print("Random projection time = ", end - start)
print("Second random sparsification...")
start = time.time()
# construct random sparsification matrix
n = rj_data.shape[0] # the number of data points
max_v = np.max(np.abs(rj_data)) # max value
tau = p * ((rj_data / max_v) ** 2) # tau_ij
# sparsification probability
prob = np.zeros_like(tau, dtype=np.float32)
sqrt_tau = 64. * np.sqrt(tau / n) * np.log(n) * np.log(n)
prob[tau > sqrt_tau] = tau[tau > sqrt_tau]
prob[tau <= sqrt_tau] = sqrt_tau[tau <= sqrt_tau]
sparse_map = np.random.rand(rj_data.shape[0], rj_data.shape[1]) <= prob
# sparsification
rs_data = rj_data.copy()
index = (prob != 0.0) & (sparse_map == 1.0)
rs_data[index] = rs_data[index] / \
prob[index] # data[i][j]/prob[i][j]
rs_data[sparse_map == 0.0] = 0.0 # data[i][j] = 0.0
end = time.time()
print("Random projection time = ", end - start)
print("Before sparsification, the number of zero-elements is:",
np.sum(rj_data == 0.0)/(rj_data.shape[0] * rj_data.shape[1]))
print("After sparsification, the number of zero-elements is:",
np.sum(rs_data == 0.0)/(rs_data.shape[0] * rs_data.shape[1]))
print("Second kmeans...")
labels = function_name(rs_data, n_cluster=10)
return labels
def analysis_and_plot(data, clu_labels, labels=None):
# analyse the cluster result, CP, SP, RI, ARI, FusionMatrix
# @params data : numpy.array
# @params clu_labels : clustered labels
# @params labels : real labels
evaler = ClusterEval(data, clu_labels, labels)
print("CP = ", evaler.CP)
print("SP = ", evaler.SP)
if isinstance(labels, np.ndarray):
print("RI = ", evaler.RI)
print("ARI = ", evaler.ARI)
'''
print("Confusion matrix:")
for row in evaler.norm_labels_grid:
print(list(row))
plt.figure()
plt.imshow(evaler.norm_labels_grid)
plt.show()
'''
# print("###################################")
# print("Cluster on raw data and evaluate...")
# clu_labels = cluster_on_raw_data(data)
# analysis_and_plot(data, clu_labels, labels)
# print("###################################")
# print("###################################")
# print("my Cluster on raw data and evaluate...")
# clu_labels = my_cluster_on_raw_data(data)
# analysis_and_plot(data, clu_labels, labels)
# print("###################################")
print("###################################")
print("Cluster on random sparsification data and evaluate...")
clu_labels = cluster_on_rs_data(data)
analysis_and_plot(data, clu_labels, labels)
print("###################################")
|
{"/main.py": ["/k_means.py", "/eval.py"]}
|
12,976
|
cFireworks/kmeans
|
refs/heads/master
|
/eval.py
|
# -*- coding : utf-8 -*-
### some methods to evaluate kmeans clustering results
from matplotlib import pyplot as plt
import numpy as np
class ClusterEval():
def __init__(self, data, clu_labels, labels = None):
### init function
### @params data : numpy.array source data
### @params clu_labels : numpy.array cluster labels
### @params labels : source labels, if no labels available, None
self.data = data
self.clu_labels = clu_labels
self.labels = labels
self.n_data = data.shape[0] # data number
self.n_clusters = len(np.unique(clu_labels)) # the number of clusters
if isinstance(labels, np.ndarray):
self.n_classes = len(np.unique(labels)) # real class number
self.centers = self.calc_centers() # find centers
self.CP = self.compactness()
self.SP = self.separation()
self.labels_grid = self.calc_labels_grid() # labels fusion matrix
self.norm_labels_grid = self.labels_grid / np.sum(self.labels_grid, axis = 1).reshape(-1, 1) # normlize
self.RI = self.rand_index()
self.ARI = self.adjust_rand_index()
def calc_centers(self):
### calculate centers using clu_labels and data
### @return numpy.array
centers = []
for k in range(self.n_clusters):
centers.append(np.mean(self.data[self.clu_labels == k, :]))
centers = np.array(centers)
print(centers.shape)
return centers
def compactness(self):
### compute the target function, eval inner-cluster distance
### the lower the better
CP = 0.0
for k in range(self.n_clusters):
indexes = np.array(range(self.n_data))[self.clu_labels == k]
clu_data = self.data[indexes, :]
center = self.centers[k]
CP += np.mean(np.sum(np.square(clu_data - center.reshape(1, -1)), axis = 1))
return CP
def separation(self):
### compute the between-cluster distance
### the higher the better
SP = 0.0
for k in range(self.n_clusters):
dis2 = np.sum(np.square(self.centers - self.centers[k].reshape(1, -1)), axis = 1)
SP += np.sum(np.sqrt(dis2))
SP = 2 * SP / (self.n_clusters * (self.n_clusters - 1))
return SP
def calc_labels_grid(self):
### labels available, compute labels fusion matrix
### row-axis is cluster labels, col-axis is real labels
if not isinstance(self.labels, np.ndarray):
return None
grid = np.zeros((self.n_clusters, self.n_clusters))
for k in range(self.n_clusters):
indexes = np.array(range(self.n_data))[self.clu_labels == k]
real_labels = self.labels[indexes]
for j in range(self.n_classes):
grid[k][j] = np.sum(real_labels == j)
return grid
def rand_index(self):
### labels available, rand index
### the higher the better
if not isinstance(self.labels, np.ndarray):
return None
# brute force, for every pair
#tp = 0 # true positive, same cluster clustered in the same cluster
#tn = 0 # true negative, different cluster clustered in the different cluster
#for i in range(self.n_data):
# for j in range(self.n_data):
# if self.labels[i] == self.labels[j] and self.clu_labels[i] == self.clu_labels[j]:
# tp += 1
# if self.labels[i] != self.labels[j] and self.clu_labels[i] != self.clu_labels[j]:
# tn += 1
#RI = 2.0 * (tp + tn)/(self.n_data * (self.n_data - 1))
RI = 0.0
for i in range(self.n_clusters):
for j in range(self.n_classes):
a = self.labels_grid[i][j]
RI += a * (a - 1) / 2
RI = RI / (self.n_data * (self.n_data - 1))
return RI
def adjust_rand_index(self):
### labels available, adjust rand index
### ARI = (RI - E[RI]) / (MaxRI -E[RI])
### the higher the better
if not isinstance(self.labels, np.ndarray):
return None
sum_labels = np.sum(self.labels_grid, axis = 0) # sum by col
sum_clu_labels = np.sum(self.labels_grid, axis = 1) # sum by row
Index = 0 # RI
ExpectedIndex = 0 # E[RI]
MaxIndex = 0 # MaxRI
# calculate RI
for i in range(self.n_clusters):
for j in range(self.n_classes):
a = self.labels_grid[i][j]
Index += a * (a - 1)/2
# calculate E[RI] and MaxRI
sum_a = sum([x * (x - 1) / 2 for x in sum_labels])
sum_b = sum([x * (x - 1) / 2 for x in sum_clu_labels])
ExpectedIndex = 2 * sum_a * sum_b / (self.n_data * (self.n_data - 1))
MaxIndex = (sum_a + sum_b) / 2
ARI = (Index - ExpectedIndex) / (MaxIndex - ExpectedIndex)
return ARI
|
{"/main.py": ["/k_means.py", "/eval.py"]}
|
12,980
|
naoya0082/-B-class_review
|
refs/heads/master
|
/customer.py
|
class Customer:
def __init__(self, first_name, family_name, age):
self.first_name = first_name
self.family_name = family_name
self.age = age
def full_name(self):
return f"{self.first_name} {self.family_name}"
def entry_fee(self):
if self.age < 20:
self.entry_fee = 1000
elif self.age >= 20 and self.age < 65:
self.entry_fee = 1500
else:
self.entry_fee = 1200
return self.entry_fee
def info_csv(self):
print(f"{self.full_name()}, {self.age}, {self.entry_fee()}")
if __name__ == "__main__":
ken = Customer(first_name="Ken", family_name="Tanaka", age=15)
ken.full_name() # "Ken Tanaka" という値を返す
tom = Customer(first_name="Tom", family_name="Ford", age=57)
tom.full_name() # "Tom Ford" という値を返す
|
{"/customer2.py": ["/customer.py"]}
|
12,981
|
naoya0082/-B-class_review
|
refs/heads/master
|
/customer2.py
|
from customer import Customer
if __name__ == "__main__":
ken = Customer(first_name="Ken", family_name="Tanaka", age=15)
ken.age # 15 という値を返す
print(ken.age)
tom = Customer(first_name="Tom", family_name="Ford", age=57)
tom.age # 57 という値を返す
print(tom.age)
ieyasu = Customer(first_name="Ieyasu", family_name="Tokugawa", age=73)
ieyasu.age # 73 という値を返す
print(ieyasu.age)
|
{"/customer2.py": ["/customer.py"]}
|
12,993
|
johnrdowson/nornir_pyez
|
refs/heads/main
|
/nornir_pyez/plugins/tasks/pyez_get_config.py
|
import copy
from typing import Any, Dict, List, Optional
from nornir.core.task import Result, Task
from nornir_pyez.plugins.connections import CONNECTION_NAME
from lxml import etree
import xmltodict
import json
def pyez_get_config(
task: Task,
database: str = None,
filter_xml: str = None
) -> Result:
device = task.host.get_connection(CONNECTION_NAME, task.nornir.config)
if database is not None:
if filter_xml is not None:
data = device.rpc.get_config(
options={'database': database}, filter_xml=filter_xml)
else:
data = device.rpc.get_config(options={'database': database})
else:
if filter_xml is None:
data = device.rpc.get_config()
else:
data = device.rpc.get_config(filter_xml=filter_xml)
data = etree.tostring(data, encoding='unicode', pretty_print=True)
parsed = xmltodict.parse(data)
clean_parse = json.loads(json.dumps(parsed))
return Result(host=task.host, result=clean_parse)
|
{"/nornir_pyez/plugins/tasks/pyez_get_config.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/__init__.py": ["/nornir_pyez/plugins/tasks/pyez_facts.py", "/nornir_pyez/plugins/tasks/pyez_config.py", "/nornir_pyez/plugins/tasks/pyez_get_config.py", "/nornir_pyez/plugins/tasks/pyez_commit.py", "/nornir_pyez/plugins/tasks/pyez_diff.py", "/nornir_pyez/plugins/tasks/pyez_route_info.py", "/nornir_pyez/plugins/tasks/pyez_rpc.py", "/nornir_pyez/plugins/tasks/pyez_sec_nat.py"], "/nornir_pyez/plugins/tasks/pyez_rpc.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_config.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_route_info.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/Tests/replace_config.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/config_tester.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/rpc_test.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/template_config.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_commit.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/Tests/fulltest.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/getconfig.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_facts.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_sec_nat.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_diff.py": ["/nornir_pyez/plugins/connections/__init__.py"]}
|
12,994
|
johnrdowson/nornir_pyez
|
refs/heads/main
|
/nornir_pyez/plugins/tasks/__init__.py
|
from .pyez_facts import pyez_facts
from .pyez_config import pyez_config
from .pyez_get_config import pyez_get_config
from .pyez_commit import pyez_commit
from .pyez_diff import pyez_diff
from .pyez_int_terse import pyez_int_terse
from .pyez_route_info import pyez_route_info
from .pyez_rpc import pyez_rpc
from .pyez_sec_nat import pyez_sec_nat_dest, pyez_sec_nat_src
from .pyez_sec_policy import pyez_sec_policy
from .pyez_sec_vpn import pyez_sec_ike, pyez_sec_ipsec
from .pyez_sec_zones import pyez_sec_zones
__all__ = (
"pyez_facts",
"pyez_config",
"pyez_get_config",
"pyez_diff",
"pyez_commit",
"pyez_int_terse",
"pyez_route_info",
"pyez_rpc",
"pyez_sec_ike",
"pyez_sec_ipsec",
"pyez_sec_nat_dest",
"pyez_sec_nat_src",
"pyez_sec_policy",
"pyez_sec_zones",
)
|
{"/nornir_pyez/plugins/tasks/pyez_get_config.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/__init__.py": ["/nornir_pyez/plugins/tasks/pyez_facts.py", "/nornir_pyez/plugins/tasks/pyez_config.py", "/nornir_pyez/plugins/tasks/pyez_get_config.py", "/nornir_pyez/plugins/tasks/pyez_commit.py", "/nornir_pyez/plugins/tasks/pyez_diff.py", "/nornir_pyez/plugins/tasks/pyez_route_info.py", "/nornir_pyez/plugins/tasks/pyez_rpc.py", "/nornir_pyez/plugins/tasks/pyez_sec_nat.py"], "/nornir_pyez/plugins/tasks/pyez_rpc.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_config.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_route_info.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/Tests/replace_config.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/config_tester.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/rpc_test.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/template_config.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_commit.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/Tests/fulltest.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/getconfig.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_facts.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_sec_nat.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_diff.py": ["/nornir_pyez/plugins/connections/__init__.py"]}
|
12,995
|
johnrdowson/nornir_pyez
|
refs/heads/main
|
/nornir_pyez/plugins/tasks/pyez_rpc.py
|
from typing import Dict
from nornir.core.task import Result, Task
from nornir_pyez.plugins.connections import CONNECTION_NAME
def pyez_rpc(
task: Task,
func: str,
extras: Dict = None,
) -> Result:
device = task.host.get_connection(CONNECTION_NAME, task.nornir.config)
function = getattr(device.rpc, func)
if extras:
data = function(**extras)
else:
data = function()
return Result(host=task.host, result=data)
|
{"/nornir_pyez/plugins/tasks/pyez_get_config.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/__init__.py": ["/nornir_pyez/plugins/tasks/pyez_facts.py", "/nornir_pyez/plugins/tasks/pyez_config.py", "/nornir_pyez/plugins/tasks/pyez_get_config.py", "/nornir_pyez/plugins/tasks/pyez_commit.py", "/nornir_pyez/plugins/tasks/pyez_diff.py", "/nornir_pyez/plugins/tasks/pyez_route_info.py", "/nornir_pyez/plugins/tasks/pyez_rpc.py", "/nornir_pyez/plugins/tasks/pyez_sec_nat.py"], "/nornir_pyez/plugins/tasks/pyez_rpc.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_config.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_route_info.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/Tests/replace_config.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/config_tester.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/rpc_test.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/template_config.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_commit.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/Tests/fulltest.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/getconfig.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_facts.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_sec_nat.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_diff.py": ["/nornir_pyez/plugins/connections/__init__.py"]}
|
12,996
|
johnrdowson/nornir_pyez
|
refs/heads/main
|
/nornir_pyez/plugins/tasks/pyez_config.py
|
import copy
from typing import Any, Dict, List, Optional
from jnpr.junos.utils.config import Config
from nornir.core.task import Result, Task
from nornir_pyez.plugins.connections import CONNECTION_NAME
def pyez_config(
task: Task,
payload: str = None,
update: bool = False,
data_format: str = 'text',
template_path: str = None,
template_vars: str = None,
commit_now: bool = False
) -> Result:
device = task.host.get_connection(CONNECTION_NAME, task.nornir.config)
device.timeout = 300
config = Config(device)
config.lock()
if template_path:
config.load(template_path=template_path,
template_vars=template_vars, format=data_format)
else:
if data_format == 'text':
if update:
config.load(payload, format='text', update=True)
else:
config.load(payload, format='text', update=False)
else:
if update:
config.load(payload, format=data_format, update=True)
else:
config.load(payload, format=data_format, update=False)
if commit_now:
if config.commit_check() == True:
config.commit()
else:
config.rollback()
config.unlock()
return Result(host=task.host, result=f"Successfully deployed config \n {payload}")
|
{"/nornir_pyez/plugins/tasks/pyez_get_config.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/__init__.py": ["/nornir_pyez/plugins/tasks/pyez_facts.py", "/nornir_pyez/plugins/tasks/pyez_config.py", "/nornir_pyez/plugins/tasks/pyez_get_config.py", "/nornir_pyez/plugins/tasks/pyez_commit.py", "/nornir_pyez/plugins/tasks/pyez_diff.py", "/nornir_pyez/plugins/tasks/pyez_route_info.py", "/nornir_pyez/plugins/tasks/pyez_rpc.py", "/nornir_pyez/plugins/tasks/pyez_sec_nat.py"], "/nornir_pyez/plugins/tasks/pyez_rpc.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_config.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_route_info.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/Tests/replace_config.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/config_tester.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/rpc_test.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/template_config.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_commit.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/Tests/fulltest.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/getconfig.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_facts.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_sec_nat.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_diff.py": ["/nornir_pyez/plugins/connections/__init__.py"]}
|
12,997
|
johnrdowson/nornir_pyez
|
refs/heads/main
|
/nornir_pyez/plugins/tasks/pyez_route_info.py
|
import copy
from typing import Any, Dict, List, Optional
from nornir.core.task import Result, Task
from nornir_pyez.plugins.connections import CONNECTION_NAME
from lxml import etree
import xmltodict
import json
def pyez_route_info(
task: Task,
) -> Result:
device = task.host.get_connection(CONNECTION_NAME, task.nornir.config)
data = device.rpc.get_route_information()
data = etree.tostring(data, encoding='unicode', pretty_print=True)
parsed = xmltodict.parse(data)
clean_parse = json.loads(json.dumps(parsed))
return Result(host=task.host, result=clean_parse)
|
{"/nornir_pyez/plugins/tasks/pyez_get_config.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/__init__.py": ["/nornir_pyez/plugins/tasks/pyez_facts.py", "/nornir_pyez/plugins/tasks/pyez_config.py", "/nornir_pyez/plugins/tasks/pyez_get_config.py", "/nornir_pyez/plugins/tasks/pyez_commit.py", "/nornir_pyez/plugins/tasks/pyez_diff.py", "/nornir_pyez/plugins/tasks/pyez_route_info.py", "/nornir_pyez/plugins/tasks/pyez_rpc.py", "/nornir_pyez/plugins/tasks/pyez_sec_nat.py"], "/nornir_pyez/plugins/tasks/pyez_rpc.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_config.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_route_info.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/Tests/replace_config.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/config_tester.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/rpc_test.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/template_config.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_commit.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/Tests/fulltest.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/getconfig.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_facts.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_sec_nat.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_diff.py": ["/nornir_pyez/plugins/connections/__init__.py"]}
|
12,998
|
johnrdowson/nornir_pyez
|
refs/heads/main
|
/Tests/replace_config.py
|
from nornir_pyez.plugins.tasks import pyez_config
import os
from nornir import InitNornir
from nornir_utils.plugins.functions import print_result
from rich import print
from nornir.core.plugins.connections import ConnectionPluginRegister
from nornir_pyez.plugins.connections import Pyez
ConnectionPluginRegister.register("pyez", Pyez)
script_dir = os.path.dirname(os.path.realpath(__file__))
nr = InitNornir(config_file=f"{script_dir}/config.yml")
xml_payload = """
<configuration>
<interfaces>
<interface>
<name>lo0</name>
<unit>
<name>0</name>
<family operation="replace">
<inet>
<address>
<name>3.3.3.3/32</name>
</address>
</inet>
</family>
</unit>
</interface>
</interfaces>
</configuration>
"""
response = nr.run(
task=pyez_config, payload=xml_payload, data_format='xml'
)
# response is an AggregatedResult, which behaves like a list
# there is a response object for each device in inventory
devices = []
for dev in response:
print(response[dev].result)
|
{"/nornir_pyez/plugins/tasks/pyez_get_config.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/__init__.py": ["/nornir_pyez/plugins/tasks/pyez_facts.py", "/nornir_pyez/plugins/tasks/pyez_config.py", "/nornir_pyez/plugins/tasks/pyez_get_config.py", "/nornir_pyez/plugins/tasks/pyez_commit.py", "/nornir_pyez/plugins/tasks/pyez_diff.py", "/nornir_pyez/plugins/tasks/pyez_route_info.py", "/nornir_pyez/plugins/tasks/pyez_rpc.py", "/nornir_pyez/plugins/tasks/pyez_sec_nat.py"], "/nornir_pyez/plugins/tasks/pyez_rpc.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_config.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_route_info.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/Tests/replace_config.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/config_tester.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/rpc_test.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/template_config.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_commit.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/Tests/fulltest.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/getconfig.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_facts.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_sec_nat.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_diff.py": ["/nornir_pyez/plugins/connections/__init__.py"]}
|
12,999
|
johnrdowson/nornir_pyez
|
refs/heads/main
|
/Tests/config_tester.py
|
from nornir_pyez.plugins.tasks import pyez_config
import os
from nornir import InitNornir
from nornir_utils.plugins.functions import print_result
from rich import print
from nornir.core.plugins.connections import ConnectionPluginRegister
from nornir_pyez.plugins.connections import Pyez
ConnectionPluginRegister.register("pyez", Pyez)
script_dir = os.path.dirname(os.path.realpath(__file__))
nr = InitNornir(config_file=f"{script_dir}/config.yml")
payload = """interfaces {
lo0 {
unit 0 {
family inet {
address 3.3.3.3/32;
}
}
}
}
"""
response = nr.run(
task=pyez_config, payload=payload
)
# response is an AggregatedResult, which behaves like a list
# there is a response object for each device in inventory
devices = []
for dev in response:
print(response[dev].result)
|
{"/nornir_pyez/plugins/tasks/pyez_get_config.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/__init__.py": ["/nornir_pyez/plugins/tasks/pyez_facts.py", "/nornir_pyez/plugins/tasks/pyez_config.py", "/nornir_pyez/plugins/tasks/pyez_get_config.py", "/nornir_pyez/plugins/tasks/pyez_commit.py", "/nornir_pyez/plugins/tasks/pyez_diff.py", "/nornir_pyez/plugins/tasks/pyez_route_info.py", "/nornir_pyez/plugins/tasks/pyez_rpc.py", "/nornir_pyez/plugins/tasks/pyez_sec_nat.py"], "/nornir_pyez/plugins/tasks/pyez_rpc.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_config.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_route_info.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/Tests/replace_config.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/config_tester.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/rpc_test.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/template_config.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_commit.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/Tests/fulltest.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/getconfig.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_facts.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_sec_nat.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_diff.py": ["/nornir_pyez/plugins/connections/__init__.py"]}
|
13,000
|
johnrdowson/nornir_pyez
|
refs/heads/main
|
/Tests/rpc_test.py
|
from nornir_pyez.plugins.tasks import pyez_rpc
import os
from nornir import InitNornir
from nornir_utils.plugins.functions import print_result
from rich import print
from nornir.core.plugins.connections import ConnectionPluginRegister
from nornir_pyez.plugins.connections import Pyez
ConnectionPluginRegister.register("pyez", Pyez)
script_dir = os.path.dirname(os.path.realpath(__file__))
nr = InitNornir(config_file=f"{script_dir}/config.yml")
# xpath = 'interfaces/interface'
# xml = '<interfaces></interfaces>'
# database = 'committed'
extras = {
"level-extra": "detail",
"interface-name": "ge-0/0/0"
}
response = nr.run(
task=pyez_rpc, func='get-interface-information', extras=extras)
# response is an AggregatedResult, which behaves like a list
# there is a response object for each device in inventory
devices = []
for dev in response:
print(response[dev].result)
|
{"/nornir_pyez/plugins/tasks/pyez_get_config.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/__init__.py": ["/nornir_pyez/plugins/tasks/pyez_facts.py", "/nornir_pyez/plugins/tasks/pyez_config.py", "/nornir_pyez/plugins/tasks/pyez_get_config.py", "/nornir_pyez/plugins/tasks/pyez_commit.py", "/nornir_pyez/plugins/tasks/pyez_diff.py", "/nornir_pyez/plugins/tasks/pyez_route_info.py", "/nornir_pyez/plugins/tasks/pyez_rpc.py", "/nornir_pyez/plugins/tasks/pyez_sec_nat.py"], "/nornir_pyez/plugins/tasks/pyez_rpc.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_config.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_route_info.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/Tests/replace_config.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/config_tester.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/rpc_test.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/template_config.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_commit.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/Tests/fulltest.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/getconfig.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_facts.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_sec_nat.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_diff.py": ["/nornir_pyez/plugins/connections/__init__.py"]}
|
13,001
|
johnrdowson/nornir_pyez
|
refs/heads/main
|
/Tests/template_config.py
|
from nornir_pyez.plugins.tasks import pyez_config, pyez_diff, pyez_commit
import os
from nornir import InitNornir
from nornir.core.task import Task, Result
from nornir_utils.plugins.functions import print_result
from nornir_utils.plugins.tasks.data import load_yaml
from rich import print
from nornir.core.plugins.connections import ConnectionPluginRegister
from nornir_pyez.plugins.connections import Pyez
ConnectionPluginRegister.register("pyez", Pyez)
script_dir = os.path.dirname(os.path.realpath(__file__))
nr = InitNornir(config_file=f"{script_dir}/config.yml")
def template_config(task):
# retrieve data from groups.yml
data = {}
data['dns_server'] = task.host['dns_server']
data['ntp_server'] = task.host['ntp_server']
print(data)
response = task.run(
task=pyez_config, template_path='junos.j2', template_vars=data, data_format='set')
if response:
diff = task.run(pyez_diff)
if diff:
task.run(task=pyez_commit)
response = nr.run(
task=template_config)
print_result(response)
|
{"/nornir_pyez/plugins/tasks/pyez_get_config.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/__init__.py": ["/nornir_pyez/plugins/tasks/pyez_facts.py", "/nornir_pyez/plugins/tasks/pyez_config.py", "/nornir_pyez/plugins/tasks/pyez_get_config.py", "/nornir_pyez/plugins/tasks/pyez_commit.py", "/nornir_pyez/plugins/tasks/pyez_diff.py", "/nornir_pyez/plugins/tasks/pyez_route_info.py", "/nornir_pyez/plugins/tasks/pyez_rpc.py", "/nornir_pyez/plugins/tasks/pyez_sec_nat.py"], "/nornir_pyez/plugins/tasks/pyez_rpc.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_config.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_route_info.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/Tests/replace_config.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/config_tester.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/rpc_test.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/template_config.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_commit.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/Tests/fulltest.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/getconfig.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_facts.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_sec_nat.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_diff.py": ["/nornir_pyez/plugins/connections/__init__.py"]}
|
13,002
|
johnrdowson/nornir_pyez
|
refs/heads/main
|
/nornir_pyez/plugins/tasks/pyez_commit.py
|
from jnpr.junos.utils.config import Config
from nornir.core.task import Result, Task
from nornir_pyez.plugins.connections import CONNECTION_NAME
def pyez_commit(
task: Task,
) -> Result:
device = task.host.get_connection(CONNECTION_NAME, task.nornir.config)
device.timeout = 300
config = Config(device)
if config.commit_check() == True:
config.commit()
else:
config.rollback()
config.unlock()
return Result(host=task.host, result=f"Successfully committed")
|
{"/nornir_pyez/plugins/tasks/pyez_get_config.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/__init__.py": ["/nornir_pyez/plugins/tasks/pyez_facts.py", "/nornir_pyez/plugins/tasks/pyez_config.py", "/nornir_pyez/plugins/tasks/pyez_get_config.py", "/nornir_pyez/plugins/tasks/pyez_commit.py", "/nornir_pyez/plugins/tasks/pyez_diff.py", "/nornir_pyez/plugins/tasks/pyez_route_info.py", "/nornir_pyez/plugins/tasks/pyez_rpc.py", "/nornir_pyez/plugins/tasks/pyez_sec_nat.py"], "/nornir_pyez/plugins/tasks/pyez_rpc.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_config.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_route_info.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/Tests/replace_config.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/config_tester.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/rpc_test.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/template_config.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_commit.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/Tests/fulltest.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/getconfig.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_facts.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_sec_nat.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_diff.py": ["/nornir_pyez/plugins/connections/__init__.py"]}
|
13,003
|
johnrdowson/nornir_pyez
|
refs/heads/main
|
/setup.py
|
import setuptools
with open('README.md', 'r') as file:
long_description = file.read()
with open("requirements.txt", "r") as f:
INSTALL_REQUIRES = f.read().splitlines()
setuptools.setup(name='nornir_pyez',
version='0.0.10',
description='PyEZs library and plugins for Nornir',
url='https://github.com/DataKnox/nornir_pyez',
packages=setuptools.find_packages(),
author='Knox Hutchinson',
author_email='knox@knoxsdata.com',
license='MIT',
keywords=['ping', 'icmp', 'network'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: System Administrators',
'Natural Language :: English'
],
long_description=long_description,
long_description_content_type='text/markdown',
install_requires=INSTALL_REQUIRES,
entry_points={
'nornir.plugins.connections': "pyez = nornir_pyez.plugins.connections:Pyez"
},
zip_safe=False)
|
{"/nornir_pyez/plugins/tasks/pyez_get_config.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/__init__.py": ["/nornir_pyez/plugins/tasks/pyez_facts.py", "/nornir_pyez/plugins/tasks/pyez_config.py", "/nornir_pyez/plugins/tasks/pyez_get_config.py", "/nornir_pyez/plugins/tasks/pyez_commit.py", "/nornir_pyez/plugins/tasks/pyez_diff.py", "/nornir_pyez/plugins/tasks/pyez_route_info.py", "/nornir_pyez/plugins/tasks/pyez_rpc.py", "/nornir_pyez/plugins/tasks/pyez_sec_nat.py"], "/nornir_pyez/plugins/tasks/pyez_rpc.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_config.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_route_info.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/Tests/replace_config.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/config_tester.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/rpc_test.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/template_config.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_commit.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/Tests/fulltest.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/getconfig.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_facts.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_sec_nat.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_diff.py": ["/nornir_pyez/plugins/connections/__init__.py"]}
|
13,004
|
johnrdowson/nornir_pyez
|
refs/heads/main
|
/Tests/fulltest.py
|
from nornir_pyez.plugins.tasks import pyez_config, pyez_diff, pyez_commit
import os
from nornir import InitNornir
from nornir_utils.plugins.functions import print_result
from rich import print
from nornir.core.plugins.connections import ConnectionPluginRegister
from nornir_pyez.plugins.connections import Pyez
ConnectionPluginRegister.register("pyez", Pyez)
script_dir = os.path.dirname(os.path.realpath(__file__))
nr = InitNornir(config_file=f"{script_dir}/config.yml")
xml_payload = """
<configuration>
<interfaces>
<interface>
<name>lo0</name>
<unit>
<name>0</name>
<family operation="replace">
<inet>
<address>
<name>3.3.3.4/32</name>
</address>
</inet>
</family>
</unit>
</interface>
</interfaces>
</configuration>
"""
def mega_runner(task):
send_result = task.run(
task=pyez_config, payload=xml_payload, data_format='xml')
if send_result:
diff_result = task.run(task=pyez_diff)
if diff_result:
task.run(task=pyez_commit)
response = nr.run(task=mega_runner)
print_result(response)
|
{"/nornir_pyez/plugins/tasks/pyez_get_config.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/__init__.py": ["/nornir_pyez/plugins/tasks/pyez_facts.py", "/nornir_pyez/plugins/tasks/pyez_config.py", "/nornir_pyez/plugins/tasks/pyez_get_config.py", "/nornir_pyez/plugins/tasks/pyez_commit.py", "/nornir_pyez/plugins/tasks/pyez_diff.py", "/nornir_pyez/plugins/tasks/pyez_route_info.py", "/nornir_pyez/plugins/tasks/pyez_rpc.py", "/nornir_pyez/plugins/tasks/pyez_sec_nat.py"], "/nornir_pyez/plugins/tasks/pyez_rpc.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_config.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_route_info.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/Tests/replace_config.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/config_tester.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/rpc_test.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/template_config.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_commit.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/Tests/fulltest.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/getconfig.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_facts.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_sec_nat.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_diff.py": ["/nornir_pyez/plugins/connections/__init__.py"]}
|
13,005
|
johnrdowson/nornir_pyez
|
refs/heads/main
|
/Tests/getconfig.py
|
from nornir_pyez.plugins.tasks import pyez_get_config
import os
from nornir import InitNornir
from nornir_utils.plugins.functions import print_result
from rich import print
from nornir.core.plugins.connections import ConnectionPluginRegister
from nornir_pyez.plugins.connections import Pyez
ConnectionPluginRegister.register("pyez", Pyez)
script_dir = os.path.dirname(os.path.realpath(__file__))
nr = InitNornir(config_file=f"{script_dir}/config.yml")
# xpath = 'interfaces/interface'
# xml = '<interfaces></interfaces>'
# database = 'committed'
response = nr.run(
task=pyez_get_config
)
# response is an AggregatedResult, which behaves like a list
# there is a response object for each device in inventory
devices = []
for dev in response:
print(response[dev].result)
|
{"/nornir_pyez/plugins/tasks/pyez_get_config.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/__init__.py": ["/nornir_pyez/plugins/tasks/pyez_facts.py", "/nornir_pyez/plugins/tasks/pyez_config.py", "/nornir_pyez/plugins/tasks/pyez_get_config.py", "/nornir_pyez/plugins/tasks/pyez_commit.py", "/nornir_pyez/plugins/tasks/pyez_diff.py", "/nornir_pyez/plugins/tasks/pyez_route_info.py", "/nornir_pyez/plugins/tasks/pyez_rpc.py", "/nornir_pyez/plugins/tasks/pyez_sec_nat.py"], "/nornir_pyez/plugins/tasks/pyez_rpc.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_config.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_route_info.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/Tests/replace_config.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/config_tester.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/rpc_test.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/template_config.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_commit.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/Tests/fulltest.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/getconfig.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_facts.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_sec_nat.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_diff.py": ["/nornir_pyez/plugins/connections/__init__.py"]}
|
13,006
|
johnrdowson/nornir_pyez
|
refs/heads/main
|
/nornir_pyez/plugins/connections/__init__.py
|
from typing import Any, Dict, Optional
from jnpr.junos import Device
from nornir.core.configuration import Config
CONNECTION_NAME = "pyez"
class Pyez:
def open(
self,
hostname: Optional[str],
username: Optional[str],
password: Optional[str],
port: Optional[int],
platform: Optional[str],
extras: Optional[Dict[str, Any]] = None,
configuration: Optional[Config] = None,
) -> None:
extras = extras or {}
if not port:
port = 830
parameters: Dict[str, Any] = {
"host": hostname,
"user": username,
"password": password,
"port": port,
"optional_args": {},
"ssh_config": extras["ssh_config"] if "ssh_config" in extras.keys() else None,
"ssh_private_key_file": extras["ssh_private_key_file"] if "ssh_private_key_file" in extras.keys() else None,
}
connection = Device(**parameters)
connection.open()
self.connection = connection
def close(self) -> None:
self.connection.close()
|
{"/nornir_pyez/plugins/tasks/pyez_get_config.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/__init__.py": ["/nornir_pyez/plugins/tasks/pyez_facts.py", "/nornir_pyez/plugins/tasks/pyez_config.py", "/nornir_pyez/plugins/tasks/pyez_get_config.py", "/nornir_pyez/plugins/tasks/pyez_commit.py", "/nornir_pyez/plugins/tasks/pyez_diff.py", "/nornir_pyez/plugins/tasks/pyez_route_info.py", "/nornir_pyez/plugins/tasks/pyez_rpc.py", "/nornir_pyez/plugins/tasks/pyez_sec_nat.py"], "/nornir_pyez/plugins/tasks/pyez_rpc.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_config.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_route_info.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/Tests/replace_config.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/config_tester.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/rpc_test.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/template_config.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_commit.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/Tests/fulltest.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/getconfig.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_facts.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_sec_nat.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_diff.py": ["/nornir_pyez/plugins/connections/__init__.py"]}
|
13,007
|
johnrdowson/nornir_pyez
|
refs/heads/main
|
/nornir_pyez/plugins/tasks/pyez_facts.py
|
import copy
from typing import Any, Dict, List, Optional
from nornir.core.task import Result, Task
from nornir_pyez.plugins.connections import CONNECTION_NAME
def pyez_facts(
task: Task,
) -> Result:
device = task.host.get_connection(CONNECTION_NAME, task.nornir.config)
result = device.facts
return Result(host=task.host, result=result)
|
{"/nornir_pyez/plugins/tasks/pyez_get_config.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/__init__.py": ["/nornir_pyez/plugins/tasks/pyez_facts.py", "/nornir_pyez/plugins/tasks/pyez_config.py", "/nornir_pyez/plugins/tasks/pyez_get_config.py", "/nornir_pyez/plugins/tasks/pyez_commit.py", "/nornir_pyez/plugins/tasks/pyez_diff.py", "/nornir_pyez/plugins/tasks/pyez_route_info.py", "/nornir_pyez/plugins/tasks/pyez_rpc.py", "/nornir_pyez/plugins/tasks/pyez_sec_nat.py"], "/nornir_pyez/plugins/tasks/pyez_rpc.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_config.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_route_info.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/Tests/replace_config.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/config_tester.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/rpc_test.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/template_config.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_commit.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/Tests/fulltest.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/getconfig.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_facts.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_sec_nat.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_diff.py": ["/nornir_pyez/plugins/connections/__init__.py"]}
|
13,008
|
johnrdowson/nornir_pyez
|
refs/heads/main
|
/nornir_pyez/plugins/tasks/pyez_sec_nat.py
|
import copy
from typing import Any, Dict, List, Optional
from nornir.core.task import Result, Task
from nornir_pyez.plugins.connections import CONNECTION_NAME
from lxml import etree
import xmltodict
import json
def pyez_sec_nat_dest(
task: Task,
rule: str = None
) -> Result:
device = task.host.get_connection(CONNECTION_NAME, task.nornir.config)
# check to see if the user has passed the argument 'rule' in the call; defaults to all.
if rule is not None:
data = device.rpc.get_destination_nat_rule_sets_information(rule_name=rule)
else:
data = device.rpc.get_destination_nat_rule_sets_information(all=True)
data = etree.tostring(data, encoding='unicode', pretty_print=True)
parsed = xmltodict.parse(data)
clean_parse = json.loads(json.dumps(parsed))
return Result(host=task.host, result=clean_parse)
def pyez_sec_nat_src(
task: Task,
rule: str = None
) -> Result:
device = task.host.get_connection(CONNECTION_NAME, task.nornir.config)
# check to see if the user has passed the argument 'rule' in the call; defaults to all.
if rule is not None:
data = device.rpc.get_source_nat_rule_sets_information(rule_name=rule)
else:
data = device.rpc.get_source_nat_rule_sets_information(all=True)
data = etree.tostring(data, encoding='unicode', pretty_print=True)
parsed = xmltodict.parse(data)
clean_parse = json.loads(json.dumps(parsed))
return Result(host=task.host, result=clean_parse)
|
{"/nornir_pyez/plugins/tasks/pyez_get_config.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/__init__.py": ["/nornir_pyez/plugins/tasks/pyez_facts.py", "/nornir_pyez/plugins/tasks/pyez_config.py", "/nornir_pyez/plugins/tasks/pyez_get_config.py", "/nornir_pyez/plugins/tasks/pyez_commit.py", "/nornir_pyez/plugins/tasks/pyez_diff.py", "/nornir_pyez/plugins/tasks/pyez_route_info.py", "/nornir_pyez/plugins/tasks/pyez_rpc.py", "/nornir_pyez/plugins/tasks/pyez_sec_nat.py"], "/nornir_pyez/plugins/tasks/pyez_rpc.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_config.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_route_info.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/Tests/replace_config.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/config_tester.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/rpc_test.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/template_config.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_commit.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/Tests/fulltest.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/getconfig.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_facts.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_sec_nat.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_diff.py": ["/nornir_pyez/plugins/connections/__init__.py"]}
|
13,009
|
johnrdowson/nornir_pyez
|
refs/heads/main
|
/nornir_pyez/plugins/tasks/pyez_diff.py
|
import copy
from typing import Any, Dict, List, Optional
from jnpr.junos.utils.config import Config
from nornir.core.task import Result, Task
from nornir_pyez.plugins.connections import CONNECTION_NAME
def pyez_diff(
task: Task
) -> Result:
device = task.host.get_connection(CONNECTION_NAME, task.nornir.config)
device.timeout = 300
config = Config(device)
diff = config.diff()
return Result(host=task.host, result=diff)
|
{"/nornir_pyez/plugins/tasks/pyez_get_config.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/__init__.py": ["/nornir_pyez/plugins/tasks/pyez_facts.py", "/nornir_pyez/plugins/tasks/pyez_config.py", "/nornir_pyez/plugins/tasks/pyez_get_config.py", "/nornir_pyez/plugins/tasks/pyez_commit.py", "/nornir_pyez/plugins/tasks/pyez_diff.py", "/nornir_pyez/plugins/tasks/pyez_route_info.py", "/nornir_pyez/plugins/tasks/pyez_rpc.py", "/nornir_pyez/plugins/tasks/pyez_sec_nat.py"], "/nornir_pyez/plugins/tasks/pyez_rpc.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_config.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_route_info.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/Tests/replace_config.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/config_tester.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/rpc_test.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/template_config.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_commit.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/Tests/fulltest.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/Tests/getconfig.py": ["/nornir_pyez/plugins/tasks/__init__.py", "/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_facts.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_sec_nat.py": ["/nornir_pyez/plugins/connections/__init__.py"], "/nornir_pyez/plugins/tasks/pyez_diff.py": ["/nornir_pyez/plugins/connections/__init__.py"]}
|
13,017
|
pinaki-das-sage/assignments
|
refs/heads/main
|
/assignment9.py
|
import pandas as pd
from flask import render_template
import numpy as np
import plotly.express as px
import plotly
import json
from sklearn.model_selection import train_test_split
from customutils import CustomUtils
class Assignment9:
@staticmethod
def binary_map(x):
return x.map({'Yes': 1, "No": 0})
@staticmethod
def process():
churn_data = CustomUtils.read_file_and_return_df('9_churn_data.csv')
customer_data = CustomUtils.read_file_and_return_df('9_customer_data.csv')
internet_data = CustomUtils.read_file_and_return_df('9_internet_data.csv')
# merge churn data with customer data
df_1 = pd.merge(churn_data, customer_data, how='inner', on='customerID')
# merge with internet usage data
dataset = pd.merge(df_1, internet_data, how='inner', on='customerID')
# dataset.isnull().sum()
# dataset.head()
# clean the data
# dataset['TotalCharges'].describe()
dataset['TotalCharges'] = dataset['TotalCharges'].replace(' ', np.nan)
dataset['TotalCharges'] = pd.to_numeric(dataset['TotalCharges'])
value = (dataset['TotalCharges'] / dataset['MonthlyCharges']).median() * dataset['MonthlyCharges']
dataset['TotalCharges'] = value.where(dataset['TotalCharges'] == np.nan, other=dataset['TotalCharges'])
# dataset['TotalCharges'].describe()
varlist = ['PhoneService', 'PaperlessBilling', 'Churn', 'Partner', 'Dependents']
dataset[varlist] = dataset[varlist].apply(Assignment9.binary_map)
# dataset.head()
# one hot encoding and merge
dummy1 = pd.get_dummies(dataset[['Contract', 'PaymentMethod', 'gender', 'InternetService']], drop_first=True)
dataset = pd.concat([dataset, dummy1], axis=1)
# dataset.head()
# Creating dummy variables for the variable 'MultipleLines'
ml = pd.get_dummies(dataset['MultipleLines'], prefix='MultipleLines')
# Dropping MultipleLines_No phone service column
ml1 = ml.drop(['MultipleLines_No phone service'], 1)
# Adding the results to the master dataframe
dataset = pd.concat([dataset, ml1], axis=1)
# Creating dummy variables for the variable 'OnlineSecurity'.
os = pd.get_dummies(dataset['OnlineSecurity'], prefix='OnlineSecurity')
os1 = os.drop(['OnlineSecurity_No internet service'], 1)
# Adding the results to the master dataframe
dataset = pd.concat([dataset, os1], axis=1)
# Creating dummy variables for the variable 'OnlineBackup'.
ob = pd.get_dummies(dataset['OnlineBackup'], prefix='OnlineBackup')
ob1 = ob.drop(['OnlineBackup_No internet service'], 1)
# Adding the results to the master dataframe
dataset = pd.concat([dataset, ob1], axis=1)
# Creating dummy variables for the variable 'DeviceProtection'.
dp = pd.get_dummies(dataset['DeviceProtection'], prefix='DeviceProtection')
dp1 = dp.drop(['DeviceProtection_No internet service'], 1)
# Adding the results to the master dataframe
dataset = pd.concat([dataset, dp1], axis=1)
# Creating dummy variables for the variable 'TechSupport'.
ts = pd.get_dummies(dataset['TechSupport'], prefix='TechSupport')
ts1 = ts.drop(['TechSupport_No internet service'], 1)
# Adding the results to the master dataframe
dataset = pd.concat([dataset, ts1], axis=1)
# Creating dummy variables for the variable 'StreamingTV'.
st = pd.get_dummies(dataset['StreamingTV'], prefix='StreamingTV')
st1 = st.drop(['StreamingTV_No internet service'], 1)
# Adding the results to the master dataframe
dataset = pd.concat([dataset, st1], axis=1)
# Creating dummy variables for the variable 'StreamingMovies'.
smd = pd.get_dummies(dataset['StreamingMovies'], prefix='StreamingMovies')
smd.drop(['StreamingMovies_No internet service'], 1, inplace=True)
# Adding the results to the master dataframe
dataset = pd.concat([dataset, smd], axis=1)
# dataset.head()
# drop the columns for which dummies have been created
dataset = dataset.drop(
['Contract', 'PaymentMethod', 'gender', 'MultipleLines', 'InternetService', 'OnlineSecurity',
'OnlineBackup', 'DeviceProtection',
'TechSupport', 'StreamingTV', 'StreamingMovies'], 1)
# dataset.head()
# outliers removal
# num_telecom = dataset[['tenure', 'MonthlyCharges', 'SeniorCitizen', 'TotalCharges']]
# num_telecom.describe(percentiles=[.25, .5, .75, .90, .95, .99])
# dataset.isnull().sum()
dataset = dataset[~np.isnan(dataset['TotalCharges'])]
# define feature and target
X = dataset.drop(['Churn', 'customerID'], axis=1)
# X.head()
y = dataset['Churn']
# y.head()
# Splitting the data into train and test
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7, test_size=0.3, random_state=100)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train[['tenure', 'MonthlyCharges', 'TotalCharges']] = scaler.fit_transform(
X_train[['tenure', 'MonthlyCharges', 'TotalCharges']])
# X_train.head()
# Model Building
# Logistic regression model
import statsmodels.api as sm
logm1 = sm.GLM(y_train, (sm.add_constant(X_train)), family=sm.families.Binomial())
logm1.fit().summary()
# Feature Selection Using RFE
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression(max_iter=1000)
logreg.fit(X_train, y_train)
# display the coefficients as a dataframe
feature_cols = X.columns
coeffs = pd.DataFrame(list(zip(feature_cols, logreg.coef_[0])), columns=['feature', 'coef'])
coeffs.set_index('feature', inplace=True)
# coeffs.sort_values('coef', ascending=False).head(15)
# create a bar chart out of it
fig = px.bar(coeffs.sort_values('coef', ascending=False), height=600)
graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
# Adding a constant
X_train_sm = sm.add_constant(X_train[feature_cols])
logm2 = sm.GLM(y_train, X_train_sm, family=sm.families.Binomial())
res = logm2.fit()
res.summary()
# Getting the predicted values on the train set
y_train_pred = res.predict(X_train_sm)
y_train_pred_final = pd.DataFrame({'Churn': y_train.values, 'Churn_Prob': y_train_pred})
y_train_pred_final['CustID'] = y_train.index
# y_train_pred_final.head()
# Creating new column 'predicted' with 1 if Churn_Prob > 0.5 else 0
y_train_pred_final['predicted'] = y_train_pred_final.Churn_Prob.map(lambda x: 1 if x > 0.5 else 0)
# Let's see the head
# y_train_pred_final.head()
# confusion matrix
from sklearn import metrics
# confusion_matrix = metrics.confusion_matrix(y_train_pred_final.Churn, y_train_pred_final.predicted)
# print(confusion_matrix)
accuracy_value = metrics.accuracy_score(y_train_pred_final.Churn, y_train_pred_final.predicted)
# Making predictions on the test set
X_test[['tenure', 'MonthlyCharges', 'TotalCharges']] = scaler.fit_transform(
X_test[['tenure', 'MonthlyCharges', 'TotalCharges']])
X_test = X_test[feature_cols]
# X_test.head()
X_test_sm = sm.add_constant(X_test)
y_test_pred = res.predict(X_test_sm)
# Converting y_pred to a dataframe which is an array
y_pred_1 = pd.DataFrame(y_test_pred)
# y_pred_1.head()
# Converting y_test to dataframe
y_test_df = pd.DataFrame(y_test)
# Putting CustID to index
y_test_df['CustID'] = y_test_df.index
y_pred_1.reset_index(drop=True, inplace=True)
y_test_df.reset_index(drop=True, inplace=True)
y_pred_final = pd.concat([y_test_df, y_pred_1], axis=1)
y_pred_final = y_pred_final.reindex(['CustID', 'Churn', 'Churn_Prob'], axis=1)
y_pred_final['final_predicted'] = y_pred_final.Churn_Prob.map(lambda x: 1 if x > 0.42 else 0)
baseline_accuracy = metrics.accuracy_score(y_pred_final.Churn, y_pred_final.final_predicted)
accuracy_improvement = accuracy_value - baseline_accuracy
values = {
'accuracy_value': accuracy_value,
'baseline_accuracy': baseline_accuracy,
'accuracy_improvement': accuracy_improvement
}
return render_template("assignment9.html.j2", graphJSON=graphJSON, values=values)
|
{"/assignment9.py": ["/customutils.py"], "/assignment10.py": ["/customutils.py"], "/assignment16.py": ["/customutils.py"], "/assignment17.py": ["/customutils.py"], "/assignment12.py": ["/customutils.py"], "/assignment11.py": ["/customutils.py"], "/app.py": ["/assignment5.py", "/assignment9.py", "/assignment10.py", "/assignment11.py", "/assignment12.py", "/assignment16.py", "/assignment17.py"]}
|
13,018
|
pinaki-das-sage/assignments
|
refs/heads/main
|
/customutils.py
|
from sklearn import tree
import pydotplus
import base64
from IPython.display import Image
import os
from pathlib import Path
import pandas as pd
class CustomUtils:
@staticmethod
def get_base64_encoded_image(decision_tree, columns):
dot_data = tree.export_graphviz(decision_tree, out_file=None, feature_names=columns, impurity=False,
filled=True,
proportion=True,
rounded=True)
graph = pydotplus.graph_from_dot_data(dot_data)
image = Image(graph.create_png())
encodedImage = base64.b64encode(image.data).decode("utf-8")
return encodedImage
@staticmethod
def read_file_and_return_df(filename):
filepath = os.path.join(Path(__file__).parent, 'data', '.')
df = pd.read_csv(f'{filepath}/{filename}')
return df
|
{"/assignment9.py": ["/customutils.py"], "/assignment10.py": ["/customutils.py"], "/assignment16.py": ["/customutils.py"], "/assignment17.py": ["/customutils.py"], "/assignment12.py": ["/customutils.py"], "/assignment11.py": ["/customutils.py"], "/app.py": ["/assignment5.py", "/assignment9.py", "/assignment10.py", "/assignment11.py", "/assignment12.py", "/assignment16.py", "/assignment17.py"]}
|
13,019
|
pinaki-das-sage/assignments
|
refs/heads/main
|
/assignment10.py
|
from flask import render_template
from flask import request
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
from customutils import CustomUtils
class Assignment10:
@staticmethod
def gender_map(x):
return x.map({'male': 1, "female": 0})
@staticmethod
def process():
passengers = CustomUtils.read_file_and_return_df('10_titanic.csv');
feature_cols = ['Pclass', 'Sex', 'Age']
# passengers.head()
passengers[['Sex']] = passengers[['Sex']].apply(Assignment10.gender_map)
# passengers.head()
# there are some NaN values in age, we use the mean age there
mean_age = passengers['Age'].mean()
passengers['Age'].fillna(value=mean_age, inplace=True)
passengers.head()
# mean_age
X = passengers[feature_cols]
y = passengers['Survived']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=90)
knn = KNeighborsClassifier(n_neighbors=21)
knn.fit(X_train, y_train)
# predict
y_pred = knn.predict(X_test)
model_accuracy = metrics.accuracy_score(y_test, y_pred)
# model_accuracy
return render_template("assignment10.html.j2", model_accuracy=model_accuracy)
# @TODO figure out a better way to handle the duplicate code
@staticmethod
def predict():
data = request.form
age = data.get("age")
gender = data.get("gender")
pclass = data.get("pclass")
# put some default values in case user didnt pass anything
if gender == "":
gender = 1
if pclass == "":
pclass = 2
import os
from pathlib import Path
filepath = os.path.join(Path(__file__).parent, 'data', '.')
passengers = pd.read_csv(f'{filepath}/10_titanic.csv')
feature_cols = ['Pclass', 'Sex', 'Age']
# passengers.head()
passengers[['Sex']] = passengers[['Sex']].apply(Assignment10.gender_map)
# passengers.head()
# there are some NaN values in age, we use the mean age there
mean_age = passengers['Age'].mean()
passengers['Age'].fillna(value=mean_age, inplace=True)
if age == "":
age = str(round(mean_age, 2))
# passengers.head()
# mean_age
X = passengers[feature_cols]
y = passengers['Survived']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=90)
knn = KNeighborsClassifier(n_neighbors=21)
knn.fit(X_train, y_train)
# predict
y_pred = knn.predict(X_test)
survived = knn.predict([[pclass, gender, age]])[0]
survivedString = "Died"
if survived == 1:
survivedString = "Survived"
genderString = "female"
if gender == "1":
genderString = "male"
pclassString = "Third"
if pclass == "1":
pclassString = "First"
elif pclass == "2":
pclassString = "Second"
return f'a person with <b>{genderString}</b> gender of <b>{age}</b> age in <b>{pclassString}</b> class would ' \
f'have <b>{survivedString}</b> according to knn '
|
{"/assignment9.py": ["/customutils.py"], "/assignment10.py": ["/customutils.py"], "/assignment16.py": ["/customutils.py"], "/assignment17.py": ["/customutils.py"], "/assignment12.py": ["/customutils.py"], "/assignment11.py": ["/customutils.py"], "/app.py": ["/assignment5.py", "/assignment9.py", "/assignment10.py", "/assignment11.py", "/assignment12.py", "/assignment16.py", "/assignment17.py"]}
|
13,020
|
pinaki-das-sage/assignments
|
refs/heads/main
|
/assignment16.py
|
from flask import render_template
import pandas as pd
import numpy as np
from ast import literal_eval
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
from customutils import CustomUtils
import warnings; warnings.simplefilter('ignore')
class Assignment16:
@staticmethod
def process():
md = CustomUtils.read_file_and_return_df('16_movies_metadata.csv')
# md.head()
# fill the null values with []
md['genres'] = md['genres'].fillna('[]').apply(literal_eval).apply(
lambda x: [i['name'] for i in x] if isinstance(x, list) else []
)
# get the vote counts and averages for all movies
vote_counts = md[md['vote_count'].notnull()]['vote_count'].astype('int')
vote_averages = md[md['vote_average'].notnull()]['vote_average'].astype('int')
vote_mean = vote_averages.mean()
# vote_mean
top_vote_counts = vote_counts.quantile(0.95)
# top_vote_counts
# get release year for all movies in a new column
md['year'] = pd.to_datetime(md['release_date'], errors='coerce').apply(
lambda x: str(x).split('-')[0] if x != np.nan else np.nan
)
# get the above average movies list
qualified = md[(md['vote_count'] >= top_vote_counts) & (md['vote_count'].notnull()) & (md['vote_average'].notnull())][
['title', 'year', 'vote_count', 'vote_average', 'popularity', 'genres']]
qualified['vote_count'] = qualified['vote_count'].astype('int')
qualified['vote_average'] = qualified['vote_average'].astype('int')
# qualified.shape
# get the top 250 movies by vote average
qualified = qualified.sort_values('vote_average', ascending=False).head(250)
# qualified.head(15)
s = md.apply(lambda x: pd.Series(x['genres']), axis=1).stack().reset_index(level=1, drop=True)
s.name = 'genre'
gen_md = md.drop('genres', axis=1).join(s)
best_romantic_movies = Assignment16.build_chart(gen_md, 'Romance').head(15)
links_small = CustomUtils.read_file_and_return_df('16_links_small.csv')
links_small = links_small[links_small['tmdbId'].notnull()]['tmdbId'].astype('int')
md = md.drop([19730, 29503, 35587])
md['id'] = md['id'].astype('int')
smd = md[md['id'].isin(links_small)]
# smd.shape
smd['tagline'] = smd['tagline'].fillna('')
smd['description'] = smd['overview'] + smd['tagline']
smd['description'] = smd['description'].fillna('')
tf = TfidfVectorizer(analyzer='word')
tfidf_matrix = tf.fit_transform(smd['description'])
# tfidf_matrix.shape
cosine_sim = linear_kernel(tfidf_matrix, tfidf_matrix)
# cosine_sim[0]
smd = smd.reset_index()
titles = smd['title']
indices = pd.Series(smd.index, index=smd['title'])
movie_to_search = 'Batman Begins'
recommendations = Assignment16.get_recommendations(indices, cosine_sim, titles, movie_to_search).head(10)
return render_template("assignment16.html.j2", vote_counts=vote_counts, vote_averages=vote_averages,
vote_mean=vote_mean, best_romantic_movies=best_romantic_movies.to_html(classes='table table-striped', index=False, justify='center'),
movie_to_search=movie_to_search, recommendations=recommendations.to_html(classes='table table-striped', index=False, justify='center'),
sample_dataset=md.head(5).to_html(classes='table table-striped', index=False, justify='center')
)
@staticmethod
def build_chart(gen_md, genre, percentile=0.85):
df = gen_md[gen_md['genre'] == genre]
vote_counts = df[df['vote_count'].notnull()]['vote_count'].astype('int')
vote_averages = df[df['vote_average'].notnull()]['vote_average'].astype('int')
C = vote_averages.mean()
m = vote_counts.quantile(percentile)
qualified = df[(df['vote_count'] >= m) & (df['vote_count'].notnull()) & (df['vote_average'].notnull())][
['title', 'year', 'vote_count', 'vote_average', 'popularity']]
qualified['vote_count'] = qualified['vote_count'].astype('int')
qualified['vote_average'] = qualified['vote_average'].astype('int')
qualified['wr'] = qualified.apply(
lambda x: (x['vote_count'] / (x['vote_count'] + m) * x['vote_average']) + (m / (m + x['vote_count']) * C),
axis=1)
qualified = qualified.sort_values('wr', ascending=False).head(250)
return qualified
@staticmethod
def get_recommendations(indices, cosine_sim, titles, title):
idx = indices[title]
sim_scores = list(enumerate(cosine_sim[idx]))
sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
sim_scores = sim_scores[1:31]
movie_indices = [i[0] for i in sim_scores]
return titles.iloc[movie_indices].to_frame()
|
{"/assignment9.py": ["/customutils.py"], "/assignment10.py": ["/customutils.py"], "/assignment16.py": ["/customutils.py"], "/assignment17.py": ["/customutils.py"], "/assignment12.py": ["/customutils.py"], "/assignment11.py": ["/customutils.py"], "/app.py": ["/assignment5.py", "/assignment9.py", "/assignment10.py", "/assignment11.py", "/assignment12.py", "/assignment16.py", "/assignment17.py"]}
|
13,021
|
pinaki-das-sage/assignments
|
refs/heads/main
|
/assignment17.py
|
from flask import render_template
import numpy as np
import pandas as pd
import statsmodels.api as sm
import plotly.express as px
import plotly
import json
from customutils import CustomUtils
import warnings; warnings.simplefilter('ignore')
class Assignment17:
@staticmethod
def process():
df = CustomUtils.read_file_and_return_df('17_monthly_ridership.csv')
# df.head()
# rename the column names
df.columns = ["month", "average_monthly_ridership"]
# df.head()
# data cleanup
df['average_monthly_ridership'].unique()
df = df.drop(df.index[df['average_monthly_ridership'] == ' n=114'])
# correct the column dtypes
df['average_monthly_ridership'] = df['average_monthly_ridership'].astype(np.int32)
df['month'] = pd.to_datetime(df['month'], format='%Y-%m')
# df.dtypes
average_rider_line_chart = px.line(df, x="month", y="average_monthly_ridership", title='Average monthly bus riders in Oergon', height=600)
# change the month to numeric format so we have monthly data rather than yearly
to_plot_monthly_variation = df
mon = df['month']
temp = pd.DatetimeIndex(mon)
month = pd.Series(temp.month)
to_plot_monthly_variation = to_plot_monthly_variation.drop(['month'], axis=1)
to_plot_monthly_variation = to_plot_monthly_variation.join(month)
to_plot_monthly_variation.head()
average_rider_bar_chart = px.bar(to_plot_monthly_variation, x='month', y='average_monthly_ridership', height=600)
# observations = ridership declines in july and august
# Applying Seasonal ARIMA model to forcast the data
mod = sm.tsa.SARIMAX(df['average_monthly_ridership'], trend='n', order=(0, 1, 0), seasonal_order=(1, 1, 1, 12))
results = mod.fit()
# print(results.summary())
df['forecast'] = results.predict(start=102, end=120, dynamic=True)
rider_forecast = px.line(df, x='month', y=['average_monthly_ridership', 'forecast'], height=600)
return render_template("assignment17.html.j2",
sample_dataset=df.head(5).to_html(classes='table table-striped', index=False, justify='center'),
average_rider_line_json=json.dumps(average_rider_line_chart, cls=plotly.utils.PlotlyJSONEncoder),
average_rider_bar_json=json.dumps(average_rider_bar_chart, cls=plotly.utils.PlotlyJSONEncoder),
rider_forecast_json=json.dumps(rider_forecast, cls=plotly.utils.PlotlyJSONEncoder)
)
@staticmethod
def get_recommendations(indices, cosine_sim, titles, title):
idx = indices[title]
sim_scores = list(enumerate(cosine_sim[idx]))
sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
sim_scores = sim_scores[1:31]
movie_indices = [i[0] for i in sim_scores]
return titles.iloc[movie_indices].to_frame()
|
{"/assignment9.py": ["/customutils.py"], "/assignment10.py": ["/customutils.py"], "/assignment16.py": ["/customutils.py"], "/assignment17.py": ["/customutils.py"], "/assignment12.py": ["/customutils.py"], "/assignment11.py": ["/customutils.py"], "/app.py": ["/assignment5.py", "/assignment9.py", "/assignment10.py", "/assignment11.py", "/assignment12.py", "/assignment16.py", "/assignment17.py"]}
|
13,022
|
pinaki-das-sage/assignments
|
refs/heads/main
|
/assignment12.py
|
from flask import render_template
import plotly.express as px
import plotly
import json
from sklearn.model_selection import train_test_split
from sklearn import tree
from customutils import CustomUtils
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import GridSearchCV
from IPython.display import Image
from six import StringIO
from sklearn.tree import export_graphviz
import pydot
import pandas as pd
class Assignment12:
@staticmethod
def process():
df = CustomUtils.read_file_and_return_df('11b_employee.csv')
# df.head()
# pd.set_option("display.float_format", "{:.2f}".format)
# df.describe()
df.drop(['EmployeeCount', 'EmployeeNumber', 'Over18', 'StandardHours'], axis="columns", inplace=True)
# df.head()
label = LabelEncoder()
df['Attrition'] = label.fit_transform(df['Attrition'])
# df.head()
# create a list of categorical columns, any "object" (str) columns with less than 10 unique values should be fit
categorical_cols = []
unique_vals = []
for column in df.columns:
if df[column].dtype == object and len(df[column].unique()) <= 10:
categorical_cols.append(column)
unique_vals.append(", ".join(df[column].unique()))
categories = pd.DataFrame.from_dict({
'Category': categorical_cols,
'Unique Values': unique_vals
})
# categories
# df.hist(edgecolor='black', linewidth=1.2, figsize=(20, 20));
categorical_cols.append('Attrition')
df = df[categorical_cols]
df.head()
categorical_cols.remove('Attrition')
barChartJsons = []
# plot how every feature correlates with the "target"
for i, column in enumerate(categorical_cols, 1):
# print(df[column].value_counts())
fig = px.bar(df, x=f'{column}', y='Attrition', height=600, color=f'{column}')
chartJson = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
barChartJsons.append(chartJson)
# fig.show()
conclusions = pd.DataFrame.from_dict({
'Category': [
'BusinessTravel', 'Department', 'EducationField', 'Gender', 'JobRole', 'MaritalStatus', 'OverTime'
],
'Inference': [
'The workers who travel rarely are more likely to quit.',
'The worker in Research & Development are more likely to quit then the workers on other departement.',
'The workers with Life Sciences and Medical degrees are more likely to quit then employees from other fields of educations.',
'Male employees are more likely to quit.',
'The workers in Laboratory Technician, Sales Executive, and Research scientist are more likely to quit the workers in other positions.',
'Single employees are more likely to quit.',
'The workers who work more hours are more likely to quit.'
],
})
# encode all the categorical columns
label = LabelEncoder()
for column in categorical_cols:
df[column] = label.fit_transform(df[column])
# df.head()
X = df.drop('Attrition', axis=1)
y = df.Attrition
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
tree_clf = DecisionTreeClassifier(random_state=42)
tree_clf.fit(X_train, y_train)
random_train_scores = Assignment12.get_score(tree_clf, X_train, y_train, X_test, y_test, train=True)
# random_test_scores = Assignment12.get_score(tree_clf, X_train, y_train, X_test, y_test, train=False)
params = {
"criterion": ("gini", "entropy"),
"splitter": ("best", "random"),
"max_depth": (list(range(1, 20))),
"min_samples_split": [2, 3, 4],
"min_samples_leaf": list(range(1, 20)),
}
tree_clf = DecisionTreeClassifier(random_state=42)
tree_cv = GridSearchCV(tree_clf, params, scoring="accuracy", n_jobs=-1, verbose=1, cv=3)
tree_cv.fit(X_train, y_train)
best_params = tree_cv.best_params_
# print(f"Best paramters: {best_params}")
tree_clf = DecisionTreeClassifier(**best_params)
tree_clf.fit(X_train, y_train)
# bestparams_train_score = Assignment12.get_score(tree_clf, X_train, y_train, X_test, y_test, train=True)
# bestparams_test_score = Assignment12.get_score(tree_clf, X_train, y_train, X_test, y_test, train=False)
features = list(df.columns)
features.remove("Attrition")
dot_data = StringIO()
export_graphviz(tree_clf, out_file=dot_data, feature_names=features, filled=True)
graph = pydot.graph_from_dot_data(dot_data.getvalue())
Image(graph[0].create_png())
tree2 = CustomUtils.get_base64_encoded_image(tree_clf, X_train.columns)
return render_template("assignment12.html.j2", barChartJsons=barChartJsons,
categories=categories.to_html(classes='table table-striped', index=False, justify='center'),
conclusions=conclusions.to_html(classes='table table-striped', index=False, justify='center'),
random_train_scores=pd.DataFrame.from_dict(random_train_scores).to_html(classes='table table-striped', index=False, justify='center'),
tree2=tree2
# random_test_scores=random_test_scores,
# best_params=pd.DataFrame.from_dict(best_params).to_html(classes='table table-striped', index=False, justify='center'),
# bestparams_train_score = bestparams_train_score, bestparams_test_score=bestparams_test_score
)
@staticmethod
def get_score(clf, X_train, y_train, X_test, y_test, train=True):
if train:
pred = clf.predict(X_train)
clf_report = classification_report(y_train, pred, output_dict=True)
accuracy = f'{accuracy_score(y_train, pred) * 100:.2f}%'
confusion = f'{confusion_matrix(y_train, pred)}'
print("Train Result:\n================================================")
print(f"Accuracy Score: {accuracy_score(y_train, pred) * 100:.2f}%")
print("_______________________________________________")
print(f"CLASSIFICATION REPORT:\n{clf_report}")
print("_______________________________________________")
print(f"Confusion Matrix: \n {confusion_matrix(y_train, pred)}\n")
elif not train:
pred = clf.predict(X_test)
clf_report = pd.DataFrame(classification_report(y_test, pred, output_dict=True))
accuracy = f'{accuracy_score(y_test, pred) * 100:.2f}%'
confusion = f'{confusion_matrix(y_test, pred)}'
# print("Test Result:\n================================================")
# print(f"Accuracy Score: {accuracy_score(y_test, pred) * 100:.2f}%")
# print("_______________________________________________")
# print(f"CLASSIFICATION REPORT:\n{clf_report}")
# print("_______________________________________________")
# print(f"Confusion Matrix: \n {confusion_matrix(y_test, pred)}\n")
return {
'accuracy_score': accuracy,
'confusion_matrix': confusion,
'classification_report': clf_report
}
|
{"/assignment9.py": ["/customutils.py"], "/assignment10.py": ["/customutils.py"], "/assignment16.py": ["/customutils.py"], "/assignment17.py": ["/customutils.py"], "/assignment12.py": ["/customutils.py"], "/assignment11.py": ["/customutils.py"], "/app.py": ["/assignment5.py", "/assignment9.py", "/assignment10.py", "/assignment11.py", "/assignment12.py", "/assignment16.py", "/assignment17.py"]}
|
13,023
|
pinaki-das-sage/assignments
|
refs/heads/main
|
/assignment11.py
|
from flask import render_template
import plotly.express as px
import plotly
import pandas as pd
import json
from sklearn.model_selection import train_test_split
from sklearn import tree
from customutils import CustomUtils
class Assignment11:
@staticmethod
def process():
bank = CustomUtils.read_file_and_return_df('11a_bank.csv')
# bank.head()
bank_data = bank.copy()
# Combine similar jobs into categiroes
bank_data['job'] = bank_data['job'].replace(['admin.'], 'management')
bank_data['job'] = bank_data['job'].replace(['housemaid'], 'services')
bank_data['job'] = bank_data['job'].replace(['self-employed'], 'entrepreneur')
bank_data['job'] = bank_data['job'].replace(['retired', 'student', 'unemployed', 'unknown'], 'others')
# Combine 'unknown' and 'other' as 'other' isn't really match with either 'success' or 'failure'
bank_data['poutcome'] = bank_data['poutcome'].replace(['other'], 'unknown')
bank_data.poutcome.value_counts()
# data cleanup
bank_data.drop('contact', axis=1, inplace=True)
bank_data['default_cat'] = bank_data['default'].map({'yes': 1, 'no': 0})
bank_data.drop('default', axis=1, inplace=True)
bank_data["housing_cat"] = bank_data['housing'].map({'yes': 1, 'no': 0})
bank_data.drop('housing', axis=1, inplace=True)
bank_data["loan_cat"] = bank_data['loan'].map({'yes': 1, 'no': 0})
bank_data.drop('loan', axis=1, inplace=True)
bank_data.drop('month', axis=1, inplace=True)
bank_data.drop('day', axis=1, inplace=True)
bank_data["deposit_cat"] = bank_data['deposit'].map({'yes': 1, 'no': 0})
bank_data.drop('deposit', axis=1, inplace=True)
bank_with_dummies = pd.get_dummies(data=bank_data, columns=['job', 'marital', 'education', 'poutcome'], \
prefix=['job', 'marital', 'education', 'poutcome'])
# bank_with_dummies.head()
fig = px.bar(bank_data, x='job', y='deposit_cat', height=600, color='job')
barchartJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
# make a copy
bankcl = bank_with_dummies
# The Correltion matrix
corr = bankcl.corr()
# corr
# Train-Test split: 20% test data
data_drop_deposite = bankcl.drop('deposit_cat', 1)
label = bankcl.deposit_cat
data_train, data_test, label_train, label_test = train_test_split(data_drop_deposite, label, test_size=0.2,
random_state=50)
# Decision tree with depth = 2
dt2 = tree.DecisionTreeClassifier(random_state=1, max_depth=2)
dt2.fit(data_train, label_train)
dt2_score_train = dt2.score(data_train, label_train)
dt2_score_test = dt2.score(data_test, label_test)
# Decision tree with depth = 3
dt3 = tree.DecisionTreeClassifier(random_state=1, max_depth=3)
dt3.fit(data_train, label_train)
dt3_score_train = dt3.score(data_train, label_train)
dt3_score_test = dt3.score(data_test, label_test)
# Decision tree with depth = 4
dt4 = tree.DecisionTreeClassifier(random_state=1, max_depth=4)
dt4.fit(data_train, label_train)
dt4_score_train = dt4.score(data_train, label_train)
dt4_score_test = dt4.score(data_test, label_test)
# Decision tree with depth = 6
dt6 = tree.DecisionTreeClassifier(random_state=1, max_depth=6)
dt6.fit(data_train, label_train)
dt6_score_train = dt6.score(data_train, label_train)
dt6_score_test = dt6.score(data_test, label_test)
# Decision tree: To the full depth
dt1 = tree.DecisionTreeClassifier()
dt1.fit(data_train, label_train)
dt1_score_train = dt1.score(data_train, label_train)
# print("Training score: ", dt1_score_train)
dt1_score_test = dt1.score(data_test, label_test)
# print("Testing score: ", dt1_score_test)
# convert all data to pandas df and sent to template to print
scores = {
"Tree Depth": ["2", "3", "4", "6", "max"],
"Training score": [dt2_score_train, dt3_score_train, dt4_score_train, dt6_score_train, dt1_score_train],
"Testing score": [dt2_score_test, dt3_score_test, dt4_score_test, dt6_score_test, dt1_score_test]
}
scoresDf = pd.DataFrame.from_dict(scores)
scoresDfHTML = scoresDf.to_html(classes='table table-striped', index=False, justify='center')
# Extract the deposte_cat column (the dependent variable)
# corr_deposite = pd.DataFrame(corr['deposit_cat'].drop('deposit_cat'))
# corr_deposite.sort_values(by='deposit_cat', ascending=False)
tree2 = CustomUtils.get_base64_encoded_image(dt2, data_train.columns)
tree3 = CustomUtils.get_base64_encoded_image(dt3, data_train.columns)
return render_template("assignment11.html.j2", barchartJSON=barchartJSON,
scoresDfHTML=scoresDfHTML,
tree2=tree2, tree3=tree3)
|
{"/assignment9.py": ["/customutils.py"], "/assignment10.py": ["/customutils.py"], "/assignment16.py": ["/customutils.py"], "/assignment17.py": ["/customutils.py"], "/assignment12.py": ["/customutils.py"], "/assignment11.py": ["/customutils.py"], "/app.py": ["/assignment5.py", "/assignment9.py", "/assignment10.py", "/assignment11.py", "/assignment12.py", "/assignment16.py", "/assignment17.py"]}
|
13,024
|
pinaki-das-sage/assignments
|
refs/heads/main
|
/assignment5.py
|
import os
from pathlib import Path
import pandas as pd
from flask import render_template
import plotly.express as px
import plotly
import json
class Assignment5:
movies = None
def __init__(self):
filename = os.path.join(Path(__file__).parent, 'data', '5_imdb_top_1000.csv')
self.movies = pd.read_csv(filename)
def process(self):
# create a earnings column from gross by replacing all ,
self.movies['Earnings'] = self.movies['Gross'].str.replace(',', '')
movies = self.movies.astype({'Earnings': float})
# create a new column for year
movies['Year'] = movies['Released_Year']
# there's a stray PG value in the Year column, filter it out
movies['Year'] = movies[movies['Year'] != 'PG']['Year']
# drop null values from Year column
movies['Year'].dropna(inplace=True)
# group by year but retain it as a column (dont make it an index)
groupedMoviesList = movies.groupby('Year', as_index=False)
# get a average of the ratings per year
averageRatingByYear = groupedMoviesList.mean()
# create a line chart out of it
fig = px.line(
averageRatingByYear,
x="Year",
y="IMDB_Rating",
title='Average movie rating by year (hover to see average earnings)',
hover_data=["Earnings"])
graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
datasource = "https://www.kaggle.com/harshitshankhdhar/imdb-dataset-of-top-1000-movies-and-tv-shows"
return render_template("assignment5.html.j2", graphJSON=graphJSON, datasource=datasource)
|
{"/assignment9.py": ["/customutils.py"], "/assignment10.py": ["/customutils.py"], "/assignment16.py": ["/customutils.py"], "/assignment17.py": ["/customutils.py"], "/assignment12.py": ["/customutils.py"], "/assignment11.py": ["/customutils.py"], "/app.py": ["/assignment5.py", "/assignment9.py", "/assignment10.py", "/assignment11.py", "/assignment12.py", "/assignment16.py", "/assignment17.py"]}
|
13,025
|
pinaki-das-sage/assignments
|
refs/heads/main
|
/app.py
|
import os
from flask import Flask, render_template
import pandas as pd
from assignment5 import Assignment5
from assignment9 import Assignment9
from assignment10 import Assignment10
from assignment11 import Assignment11
from assignment12 import Assignment12
from assignment16 import Assignment16
from assignment17 import Assignment17
import plotly.express as px
import plotly
import json
app = Flask(__name__)
# home page
@app.route("/")
def home():
return render_template("index.html.j2")
# 404 handler
@app.errorhandler(404)
def not_found(e):
return render_template("404.html.j2")
# first method - kept it simple here, it is defined right here within the file
@app.route("/assignment4")
def assignment4():
filename = os.path.join(app.root_path, 'data', '4_tax2gdp.csv')
tax2gdp = pd.read_csv(filename)
# filter some outliers
tax2gdp2 = tax2gdp[tax2gdp['GDP (In billions)'] < 10000]
fig = px.bar(x=tax2gdp2["Tax Percentage"],
y=tax2gdp2["GDP (In billions)"]
)
fig.update_layout(
title='Tax rate by GDP for countries. Still WIP. Need to figure out how to add the country name on hover.',
showlegend=True)
graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
return render_template("assignment4.html.j2", graphJSON=graphJSON)
# second method - this is defined in its own file and we just call the method
@app.route("/assignment5")
def assignment5():
obj = Assignment5()
return obj.process()
# ninth assignment - static function used
@app.route("/assignment9")
def assignment9():
return Assignment9.process()
@app.route("/assignment10")
def assignment10():
return Assignment10.process()
@app.route("/assignment11")
def assignment11():
return Assignment11.process()
@app.route("/assignment12")
def assignment12():
return Assignment12.process()
@app.route("/assignment16")
def assignment16():
return Assignment16.process()
@app.route("/assignment17")
def assignment17():
return Assignment17.process()
# background process happening without any refreshing
@app.route('/assignment10_predict', methods=['POST'])
def assignment10_predict():
return Assignment10.predict()
if __name__ == "__main__":
app.run(debug=True)
|
{"/assignment9.py": ["/customutils.py"], "/assignment10.py": ["/customutils.py"], "/assignment16.py": ["/customutils.py"], "/assignment17.py": ["/customutils.py"], "/assignment12.py": ["/customutils.py"], "/assignment11.py": ["/customutils.py"], "/app.py": ["/assignment5.py", "/assignment9.py", "/assignment10.py", "/assignment11.py", "/assignment12.py", "/assignment16.py", "/assignment17.py"]}
|
13,033
|
gouemoolaf28/growth_agency_articles
|
refs/heads/master
|
/maddyness/spiders/articles.py
|
import scrapy
from scrapy.loader import ItemLoader
from ..items import MaddynessItem
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
class ArticlesSpider(CrawlSpider):
name = 'articles'
allowed_domains = ['maddyness.com']
start_urls = ['http://www.maddyness.com/?s=MaddyMoney/']
rules = (
Rule(LinkExtractor(restrict_xpaths=(
"(//div[@class='home-article-card-wrapper']//a)")), callback='parse_item', follow=True),
)
def parse_item(self, response):
# article_item = MaddynessItem()
for article in response.xpath("//a[@class='financement-link']"):
loader = ItemLoader(item=MaddynessItem(),
selector=article, response=response)
loader.add_xpath(
"company_name", ".//div[@class='finance-card-company']/text()")
loader.add_xpath("site_url", ".//@href")
yield loader.load_item()
# article_item['company_name'] = article.xpath(
# ".//div[@class='finance-card-company']/text()").get()
# article_item['site_url'] = article.xpath("./@href").get()
# yield article_item
|
{"/maddyness/spiders/articles.py": ["/maddyness/items.py"]}
|
13,034
|
gouemoolaf28/growth_agency_articles
|
refs/heads/master
|
/maddyness/items.py
|
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy.loader.processors import TakeFirst
class MaddynessItem(scrapy.Item):
company_name = scrapy.Field(
output_processor=TakeFirst()
)
site_url = scrapy.Field(
output_processor=TakeFirst()
)
|
{"/maddyness/spiders/articles.py": ["/maddyness/items.py"]}
|
13,035
|
gouemoolaf28/growth_agency_articles
|
refs/heads/master
|
/maddyness/pipelines.py
|
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
# from itemadapter import ItemAdapter
# import logging
# import gspread
# import sqlite3
# class MaddynessPipeline:
# collection_name = "articles"
# gc = gspread.service_account(filename='./creds.json')
# def open_spider(self, spider):
# logging.warning("SPIDER OPENED FROM PIPELINE")
# sh = self.gc.open('scrapetosheets').sheet1
# def close_spider(self, spider):
# logging.warning("SPIDER CLOSED FROM PIPELINE")
# def process_item(self, item, spider):
# sh.append_rows(item)
# return item
# class SQLitePipeline(object):
# def open_spider(self, spider):
# self.connection = sqlite3.connect("growthagency.db")
# self.c = self.connection.cursor()
# try:
# self.c.execute('''
# CREATE TABLE article(
# company_name TEXT,
# site_url TEXT
# )
# ''')
# self.connection.commit()
# except sqlite3.OperationalError:
# pass
# def close_spider(self, spider):
# self.connection.close()
# def process_item(self, item, spider):
# self.c.execute('''
# INSERT INTO article (company_name, site_url) VALUES (?,?)
# ''', (
# item.get('company_name'),
# item.get('site_url')
# ))
# self.connection.commit()
# return item
|
{"/maddyness/spiders/articles.py": ["/maddyness/items.py"]}
|
13,036
|
gyan42/pyspark-learning-ground
|
refs/heads/master
|
/test_driven_developement/src/solved.py
|
class MovingAverage:
spark = None
stockPriceInputDir = None
size = 0
def __init__(self, spark, stockPriceInputDir, size):
self.spark = spark
self.stockPriceInputDir = stockPriceInputDir
self.size = size
def calculate(self):
pass
class MovingAverageWithStockInfo:
spark = None
stockPriceInputDir = None
stockInfoInputDir = None
size = 0
def __init__(self, spark, stockPriceInputDir,stockInfoInputDir,size):
self.spark = spark
self.stockPriceInputDir = stockPriceInputDir
self.stockInfoInputDir = stockInfoInputDir
self.size = size
def calculate(self):
pass
def calculate_for_a_stock(self,stockId):
pass
|
{"/test_driven_developement/src/__init__.py": ["/test_driven_developement/src/solved.py"]}
|
13,037
|
gyan42/pyspark-learning-ground
|
refs/heads/master
|
/test_driven_developement/src/__init__.py
|
from .solved import MovingAverage,MovingAverageWithStockInfo
|
{"/test_driven_developement/src/__init__.py": ["/test_driven_developement/src/solved.py"]}
|
13,069
|
KirtishS/MySustainableEarth
|
refs/heads/main
|
/graphs/glaciers_oil_areas.py
|
from plotly.subplots import make_subplots
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
from data.source import clean_greenhouse, clean_surface_area, clean_agriculture_area, \
clean_oil_production, clean_glaciers, clean_forest_area, temperature_glaciers
def glacier_graph(country: str, start_year: int, end_year: int):
glacier_df = clean_glaciers()
glacier_df = glacier_df[(glacier_df["Year"] >= start_year) & (glacier_df["Year"] < end_year)]
temp_df = temperature_glaciers()
temp_df = temp_df.loc[temp_df["Country"] == country]
temp_df = temp_df[(temp_df["dt"] > start_year) & (temp_df["dt"] < end_year)]
fig = make_subplots()
fig.add_trace(
go.Scatter(x=glacier_df["Year"], y=-glacier_df["Mean cumulative mass balance"],
line=dict(color='firebrick', width=4), name="Glacier Mass Balance Rise")
)
fig.add_trace(
go.Scatter(x=temp_df["dt"], y=temp_df["avg"],
line=dict(color='royalblue', width=4), name="Temperature Increase")
)
fig.update_layout(title='Glacier vs Temperature Rise',
xaxis_title='Years',
yaxis_title='Glacier Mass Balance vs Temperature Mean')
# fig.show()
return fig
def area_graph(type: str, start_year: int, end_year: int):
df = clean_forest_area()
df1 = clean_agriculture_area()
df2 = clean_surface_area()
df = pd.merge(df, df1, on=['country', 'year'])
df = pd.merge(df, df2, on=['country', 'year'])
df = df[(df["year"] >= start_year) & (df["year"] < end_year)]
df.rename(columns={'value_x': 'Forest Area Reduction', 'value': 'Surface Area Reduction',
'value_y': 'Agricultural Area Reduction'}, inplace=True)
if type == "forest":
fig = px.choropleth(df, locations="country",
color="Forest Area Reduction",
locationmode="country names",
hover_name="country",
animation_frame="year",
color_continuous_scale=px.colors.sequential.Plasma)
elif type == "surface":
fig = px.choropleth(df, locations="country",
color="Surface Area Reduction",
locationmode="country names",
hover_name="country",
animation_frame="year",
color_continuous_scale=px.colors.sequential.Plasma)
else:
fig = px.choropleth(df, locations="country",
color="Agricultural Area Reduction",
locationmode="country names",
hover_name="country",
animation_frame="year",
color_continuous_scale=px.colors.sequential.Plasma)
# fig.show()
return fig
def oil_graph(start_year, end_year):
df = clean_oil_production()
df = df[(df["year"] >= start_year) & (df["year"] < end_year)]
fig = px.scatter(df, x="country", y="value", animation_frame="year", size="value", color="country", hover_name="country")
fig['layout']['sliders'][0]['pad'] = dict(r=10, t=150, )
fig["layout"].pop("updatemenus")
fig.update_layout(title='Increase in Oil Production',
xaxis_title='Country',
yaxis_title='Mean Oil Production')
# fig.show()
return fig
if __name__ == "__main__":
country = "Canada"
type = "surface"
glacier_graph(country, 2005, 2020)
area_graph(type, 2000, 2020)
oil_graph(2000, 2020)
|
{"/graphs/glaciers_oil_areas.py": ["/data/source.py"], "/dashboard_components/glaciers_oil_areas_dash.py": ["/graphs/population_vs_electricity_graphs.py", "/graphs/glaciers_oil_areas.py"], "/dashboard_components/emissions.py": ["/graphs/emissions.py"], "/ml_models/prediction.py": ["/ml_models/glacier_model.py", "/ml_models/sea_level_model.py", "/ml_models/temperature_model.py"], "/ml_models/sea_level_model.py": ["/data/source.py"], "/ml_models/glacier_model.py": ["/data/source.py"], "/graphs/sea_level_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/graphs/emissions.py": ["/data/source.py"], "/main.py": ["/dashboard_components/population_vs_electricity_section.py", "/dashboard_components/glaciers_oil_areas_dash.py", "/dashboard_components/emissions.py", "/dashboard_components/catastrophe_section.py", "/dashboard_components/machine_learning_section.py", "/non_renewable.py", "/renewable.py"], "/graphs/sea_level_vs_glacier_melt.py": ["/data/source.py"], "/graphs/glaciers_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/dashboard_components/catastrophe_section.py": ["/data/source.py", "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py", "/graphs/population_vs_electricity_graphs.py", "/graphs/sea_level_vs_glacier_melt.py"], "/dashboard_components/machine_learning_section.py": ["/graphs/glaciers_model.py", "/graphs/sea_level_model.py", "/ml_models/prediction.py"], "/ml_models/temperature_model.py": ["/data/source.py"], "/dashboard_components/population_vs_electricity_section.py": ["/graphs/population_vs_electricity_graphs.py"], "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py": ["/data/source.py"], "/graphs/population_vs_electricity_graphs.py": ["/data/source.py"]}
|
13,070
|
KirtishS/MySustainableEarth
|
refs/heads/main
|
/dashboard_components/glaciers_oil_areas_dash.py
|
from pathlib import Path
from typing import Tuple
import dash
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
from dash.dependencies import Output, Input, State
from matplotlib.widgets import Button, Slider
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from graphs.population_vs_electricity_graphs import renewable_vs_non_renewable_electricity, \
non_renewable_electricity_vs_poverty, non_renewable_electricity_vs_population
from graphs.glaciers_oil_areas import glacier_graph, area_graph, oil_graph
def glaciers_tab(app):
tab1 = dbc.Card(
dbc.CardBody([
dbc.Row([
dbc.Col(dbc.FormGroup([
dbc.Label("Country Name:"),
dbc.Input(value="Canada", id="glacier-input-1", type="text"),
dbc.Label("Enter Start Year:"),
dbc.Input(value=1990, id="glacier-input-2", type="number"),
dbc.Label("Enter End Year:"),
dbc.Input(value=2016, id="glacier-input-3", type="number"),
]),
md=12),
dbc.Col(dbc.FormGroup([
dbc.Button('Display the Graph', id='glacier-button',
color='info',
style={'margin-bottom': '1em'}, block=True)
]),
md=12)
]),
html.Hr(),
dbc.Row([
dbc.Col(dcc.Graph(id='glacier-graph'))
])
]),
className="glacier-1",
)
@app.callback(
Output('glacier-graph', 'figure'),
[Input('glacier-button', 'n_clicks')],
[State('glacier-input-1', 'value'),
State('glacier-input-2', 'value'),
State('glacier-input-3', 'value')
])
def update_figure(n_clicks,country_name,start_year,end_year):
return glacier_graph(country_name,start_year,end_year)
return tab1
def area_tab(app):
tab2 = dbc.Card(
dbc.CardBody([
dbc.Row([
dbc.Col(
dbc.FormGroup([
dbc.Label("Enter Start Year:"),
dbc.Input(value=1990, id="area-input-1", type="number"),
dbc.Label("Enter End Year:"),
dbc.Input(value=2013, id="area-input-2", type="number"),
]),
md=6),
dbc.Col(
dbc.FormGroup([
dbc.Label("Choose Area Type"),
dcc.Dropdown(id="area-dropdown", value="forest",
style={'backgroundColor': 'white', 'color': 'black'},
options=[{"label": "Forest Area", "value": "forest"},
{"label": "Surface Area", "value": "surface"},
{"label": "Agriculture Area", "value": "agriculture"}]),
dbc.Label("."),
dbc.Button('Display the Graph', id='area-button',
color='info',
style={'margin-bottom': '1em'}, block=True)
]),
md=6),
]),
html.Hr(),
dbc.Row([
html.Br(),html.Br(),
dbc.Col(dcc.Graph(id='area-graph')),
]),
]),
className="mt-3",
)
@app.callback(
Output('area-graph', 'figure'),
[Input('area-button', 'n_clicks')],
[State('area-dropdown', 'value'),
State('area-input-1', 'value'),
State('area-input-2', 'value'),])
def update_figure(n_clicks, type, start_year,end_year):
return area_graph(type,start_year,end_year)
return tab2
def oil_tab(app):
tab3 = dbc.Card(
dbc.CardBody([
dbc.Row([
dbc.Col(dbc.FormGroup([
dbc.Label("Enter Start Year:"),
dbc.Input(value=2000, id="oil-input-1", type="number"),
dbc.Label("Enter End Year:"),
dbc.Input(value=2020, id="oil-input-2", type="number"),
]),
md=12),
dbc.Col(dbc.FormGroup([
dbc.Button('Display the Graph', id='oil-button',
color='info',
style={'margin-bottom': '1em'}, block=True)
]),
md=12)
]),
html.Hr(),
dbc.Row([
dbc.Col(dcc.Graph(id='oil-graph'))
])
]),
className="mt-3",
)
@app.callback(
Output('oil-graph', 'figure'),
[Input('oil-button', 'n_clicks')],
[State('oil-input-1', 'value'),
State('oil-input-2', 'value')
])
def update_figure(n_clicks, start_year, end_year):
return oil_graph(start_year,end_year)
return tab3
def glacier_and_oil_impacts(app):
tabs = dbc.Tabs(
[
dbc.Tab(oil_tab(app), label="Impact of Oil Production"),
dbc.Tab(glaciers_tab(app), label="Impact of Glaciers"),
dbc.Tab(area_tab(app), label="Area Changes"),
]
)
return tabs
|
{"/graphs/glaciers_oil_areas.py": ["/data/source.py"], "/dashboard_components/glaciers_oil_areas_dash.py": ["/graphs/population_vs_electricity_graphs.py", "/graphs/glaciers_oil_areas.py"], "/dashboard_components/emissions.py": ["/graphs/emissions.py"], "/ml_models/prediction.py": ["/ml_models/glacier_model.py", "/ml_models/sea_level_model.py", "/ml_models/temperature_model.py"], "/ml_models/sea_level_model.py": ["/data/source.py"], "/ml_models/glacier_model.py": ["/data/source.py"], "/graphs/sea_level_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/graphs/emissions.py": ["/data/source.py"], "/main.py": ["/dashboard_components/population_vs_electricity_section.py", "/dashboard_components/glaciers_oil_areas_dash.py", "/dashboard_components/emissions.py", "/dashboard_components/catastrophe_section.py", "/dashboard_components/machine_learning_section.py", "/non_renewable.py", "/renewable.py"], "/graphs/sea_level_vs_glacier_melt.py": ["/data/source.py"], "/graphs/glaciers_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/dashboard_components/catastrophe_section.py": ["/data/source.py", "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py", "/graphs/population_vs_electricity_graphs.py", "/graphs/sea_level_vs_glacier_melt.py"], "/dashboard_components/machine_learning_section.py": ["/graphs/glaciers_model.py", "/graphs/sea_level_model.py", "/ml_models/prediction.py"], "/ml_models/temperature_model.py": ["/data/source.py"], "/dashboard_components/population_vs_electricity_section.py": ["/graphs/population_vs_electricity_graphs.py"], "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py": ["/data/source.py"], "/graphs/population_vs_electricity_graphs.py": ["/data/source.py"]}
|
13,071
|
KirtishS/MySustainableEarth
|
refs/heads/main
|
/dashboard_components/emissions.py
|
from pathlib import Path
from typing import Tuple
import dash
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
from dash.dependencies import Output, Input, State
from matplotlib.widgets import Button, Slider
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from graphs.emissions import *
def tab_1_content(app):
tab1 = dbc.Card(
dbc.CardBody([
dbc.Row([
dbc.Col(dbc.FormGroup([
dbc.Label("Country Name:"),
dbc.Input(value="Canada", id="emissions-country-input-1", type="text"),
]),
md=6),
dbc.Col(dbc.FormGroup([
dbc.Label("."),
dbc.Button('Display the Graph', id='emissions-display-graph-button-1',
color='info',
style={'margin-bottom': '1em'}, block=True)
]),
md=6)
]),
html.Hr(),
dbc.Row([
dbc.Col(dcc.Graph(id='emissions-graph-1'))
])
]),
className="mt-3",
)
@app.callback(
Output('emissions-graph-1', 'figure'),
[Input('emissions-display-graph-button-1', 'n_clicks')],
[State('emissions-country-input-1', 'value')])
def update_figure(n_clicks, country_name):
if country_name:
return emissions_chart(country_name)
return tab1
def tab_2_content(app):
tab2 = dbc.Card(
dbc.CardBody([
dbc.Row([
dbc.Col(dbc.FormGroup([
dbc.Label("Enter Year: "),
dbc.Input(value=1990, id="emissions-year-input-2", type="number"),
]),
md=6),
dbc.Col(
dbc.FormGroup([
dbc.Label("Choose Type"),
dcc.Dropdown(id="emissions-column-input-2", value='carbon', style={'backgroundColor':'white','color':'black'},
options=[{"label": "Carbon", "value": "carbon"},
{"label": "Carbon Per Person", "value": "carbon_person"},
{"label": "Coal", "value": "coal"},
{"label": "Sulfur", "value": "sulfur"},
{"label": "Greenhouse", "value": "greenhouse"}]),
]),
md=6)
]),
dbc.Row([
dbc.Col(dbc.FormGroup([
dbc.Button('Display the Graph', id='emissions-display-graph-button-2',
color='info',
style={'margin-bottom': '1em'}, block=True)
]),
md=12)
]),
html.Hr(),
dbc.Row([
html.Br(),html.Br(),
dbc.Col(dcc.Graph(id='emissions-graph-2'))
])
]),
className="mt-3",
)
@app.callback(
Output('emissions-graph-2', 'figure'),
[Input('emissions-display-graph-button-2', 'n_clicks')],
[State('emissions-year-input-2', 'value'),
State('emissions-column-input-2','value')])
def update_figure(n_clicks, year, country):
if year and country:
return map_analysis(country, year)
return tab2
def tab_3_content(app):
tab3 = dbc.Card(
dbc.CardBody([
dbc.Row([
dbc.Col(dbc.FormGroup([
dbc.Label("Enter Year: "),
dbc.Input(value=1990, id="emissions-year-input-3", type="number"),
]),
md=6),
dbc.Col(
dbc.FormGroup([
dbc.Label("Choose Type"),
dcc.Dropdown(id="emissions-column-input-3", value='coal', style={'backgroundColor':'white','color':'black'},
options=[{"label": "Carbon", "value": 'carbon_total'},
{"label": "Carbon Per Person", "value": 'carbon_per_person'},
{"label": "Coal", "value": 'coal'},
{"label": "Sulfur", "value": 'sulfur'},
{"label": "Greenhouse", "value": 'greenhouse'}]),
]),
md=6)
]),
dbc.Row([
dbc.Col(dbc.FormGroup([
dbc.Button('Display the Graph', id='emissions-display-graph-button-3',
color='info',
style={'margin-bottom': '1em'}, block=True)
]),
md=12)
]),
html.Hr(),
dbc.Row([
dbc.Col(dcc.Graph(id='emissions-graph-3'))
])
]),
className="mt-3",
)
@app.callback(
Output('emissions-graph-3', 'figure'),
[Input('emissions-display-graph-button-3', 'n_clicks')],
[State('emissions-year-input-3', 'value'),
State('emissions-column-input-3', 'value')])
def update_figure(n_clicks, year, column):
if year and column:
return bar_analysis(column, year)
return tab3
def tab_4_content(app):
tab4 = dbc.Card(
dbc.CardBody([
dbc.Row([
dbc.Col(
dbc.FormGroup([
dbc.Label("Choose Type"),
dcc.Dropdown(id="emissions-column-input-4", value='coal', style={'backgroundColor':'white','color':'black'},
options=[{"label": "Carbon", "value": 'carbon_total'},
{"label": "Carbon Per Person", "value": 'carbon_per_person'},
{"label": "Coal", "value": 'coal'},
{"label": "Sulfur", "value": 'sulfur'},
{"label": "Greenhouse", "value": 'greenhouse'}]),
]),
md=12)
]),
dbc.Row([
dbc.Col(dbc.FormGroup([
dbc.Button('Display the Graph', id='emissions-display-graph-button-4',
color='info',
style={'margin-bottom': '1em'}, block=True)
]),
md=12)
]),
html.Hr(),
dbc.Row([
dbc.Col(dcc.Graph(id='emissions-graph-4'))
])
]),
className="mt-3",
)
@app.callback(
Output('emissions-graph-4', 'figure'),
[Input('emissions-display-graph-button-4', 'n_clicks')],
[State('emissions-column-input-4', 'value')])
def update_figure(n_clicks, column):
if column:
return pie_analysis2(column)
return tab4
def emission_section(app):
tabs = dbc.Tabs(
[
dbc.Tab(tab_4_content(app), label="Stacked Bar Chart"),
dbc.Tab(tab_1_content(app), label="Line Chart (Carbon and Greenhouse)"),
dbc.Tab(tab_2_content(app), label="Map"),
dbc.Tab(tab_3_content(app), label="Bar Chart"),
]
)
return tabs
|
{"/graphs/glaciers_oil_areas.py": ["/data/source.py"], "/dashboard_components/glaciers_oil_areas_dash.py": ["/graphs/population_vs_electricity_graphs.py", "/graphs/glaciers_oil_areas.py"], "/dashboard_components/emissions.py": ["/graphs/emissions.py"], "/ml_models/prediction.py": ["/ml_models/glacier_model.py", "/ml_models/sea_level_model.py", "/ml_models/temperature_model.py"], "/ml_models/sea_level_model.py": ["/data/source.py"], "/ml_models/glacier_model.py": ["/data/source.py"], "/graphs/sea_level_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/graphs/emissions.py": ["/data/source.py"], "/main.py": ["/dashboard_components/population_vs_electricity_section.py", "/dashboard_components/glaciers_oil_areas_dash.py", "/dashboard_components/emissions.py", "/dashboard_components/catastrophe_section.py", "/dashboard_components/machine_learning_section.py", "/non_renewable.py", "/renewable.py"], "/graphs/sea_level_vs_glacier_melt.py": ["/data/source.py"], "/graphs/glaciers_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/dashboard_components/catastrophe_section.py": ["/data/source.py", "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py", "/graphs/population_vs_electricity_graphs.py", "/graphs/sea_level_vs_glacier_melt.py"], "/dashboard_components/machine_learning_section.py": ["/graphs/glaciers_model.py", "/graphs/sea_level_model.py", "/ml_models/prediction.py"], "/ml_models/temperature_model.py": ["/data/source.py"], "/dashboard_components/population_vs_electricity_section.py": ["/graphs/population_vs_electricity_graphs.py"], "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py": ["/data/source.py"], "/graphs/population_vs_electricity_graphs.py": ["/data/source.py"]}
|
13,072
|
KirtishS/MySustainableEarth
|
refs/heads/main
|
/ml_models/prediction.py
|
from ml_models.glacier_model import Glacier_Models
from ml_models.sea_level_model import Sea_Level_Models
from ml_models.temperature_model import Temperature_Models
def sea_level_prediction(temperature):
# print(temperature, "sea_level_prediction")
poly_linear_regressor = Sea_Level_Models.get_sea_level_model()
poly_regressor = Sea_Level_Models.get_sea_level_poly_regressor()
# print(poly_linear_regressor, poly_regressor)
sea_level = poly_linear_regressor.predict(poly_regressor.fit_transform(temperature))
# print(id(poly_linear_regressor), sea_level)
return sea_level
def glacier_prediction(temperature):
poly_linear_regressor = Glacier_Models.get_glaciers_model()
poly_regressor = Glacier_Models.get_glaciers_poly_regressor()
glacier = poly_linear_regressor.predict(poly_regressor.fit_transform(temperature))
return glacier
def temperature_prediction(data):
linear_regressor = Temperature_Models.get_temperature_model()
temperature = linear_regressor.predict(data)
return temperature
if __name__ == '__main__':
print(sea_level_prediction([[19.7]]))
print(glacier_prediction([[20.3]]))
print(temperature_prediction([[200000, 125000,205000]]))
print(temperature_prediction([[205000, 120500, 200500]]))
print(sea_level_prediction(temperature_prediction([[200000, 125000,205000]])))
print(glacier_prediction(temperature_prediction([[205000, 120500, 200500]])))
|
{"/graphs/glaciers_oil_areas.py": ["/data/source.py"], "/dashboard_components/glaciers_oil_areas_dash.py": ["/graphs/population_vs_electricity_graphs.py", "/graphs/glaciers_oil_areas.py"], "/dashboard_components/emissions.py": ["/graphs/emissions.py"], "/ml_models/prediction.py": ["/ml_models/glacier_model.py", "/ml_models/sea_level_model.py", "/ml_models/temperature_model.py"], "/ml_models/sea_level_model.py": ["/data/source.py"], "/ml_models/glacier_model.py": ["/data/source.py"], "/graphs/sea_level_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/graphs/emissions.py": ["/data/source.py"], "/main.py": ["/dashboard_components/population_vs_electricity_section.py", "/dashboard_components/glaciers_oil_areas_dash.py", "/dashboard_components/emissions.py", "/dashboard_components/catastrophe_section.py", "/dashboard_components/machine_learning_section.py", "/non_renewable.py", "/renewable.py"], "/graphs/sea_level_vs_glacier_melt.py": ["/data/source.py"], "/graphs/glaciers_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/dashboard_components/catastrophe_section.py": ["/data/source.py", "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py", "/graphs/population_vs_electricity_graphs.py", "/graphs/sea_level_vs_glacier_melt.py"], "/dashboard_components/machine_learning_section.py": ["/graphs/glaciers_model.py", "/graphs/sea_level_model.py", "/ml_models/prediction.py"], "/ml_models/temperature_model.py": ["/data/source.py"], "/dashboard_components/population_vs_electricity_section.py": ["/graphs/population_vs_electricity_graphs.py"], "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py": ["/data/source.py"], "/graphs/population_vs_electricity_graphs.py": ["/data/source.py"]}
|
13,073
|
KirtishS/MySustainableEarth
|
refs/heads/main
|
/renewable.py
|
from dash import html
import dash_bootstrap_components as dbc
def renewables_tab(app):
tab1 = dbc.Card(
dbc.CardBody([
dbc.Row(
dbc.Card(
[
html.Iframe(src="https://www.youtube.com/embed/lNQmwWFwiiQ",title="YouTube video player",height="315"),
dbc.CardBody(
[
html.H2("Renewables"),
html.P("Renewable power is booming, as innovation brings down costs and starts to deliver on the promise of a clean energy future. American solar and wind generation are breaking records and being integrated into the national electricity grid without compromising reliability.",
className="card-text"),
html.P("This means that renewables are increasingly displacing “dirty” fossil fuels in the power sector, offering the benefit of lower emissions of carbon and other types of pollution. But not all sources of energy marketed as “renewable” are beneficial to the environment. Biomass and large hydroelectric dams create difficult tradeoffs when considering the impact on wildlife, climate change, and other issues. Here’s what you should know about the different types of renewable energy sources—and how you can use these emerging technologies at your own home. ",
className="card-text"),
]
),
],
)
), html.Hr(),
]),
className="mt-6 mt-auto",
)
return tab1
def nuclear_tab(app):
tab1 = dbc.Card(
dbc.CardBody([
dbc.Row(
dbc.Card(
[
html.Iframe(src="https://www.youtube.com/embed/vt179qMm_1o",title="YouTube video player",height="315"),
dbc.CardBody(
[
html.H2("Nuclear"),
html.P("""
One side effect of nuclear power is the amount of nuclear waste it produces. It has been estimated that the world produces some 34,000m3 of nuclear waste each year, waste that takes years to degrade.
Anti-nuclear environmental group Greenpeace released a report in January 2019 that detailed what it called a nuclear waste ‘crisis’ for which there is ‘no solution on the horizon’. One such solution was a concrete nuclear waste ‘coffin’ on Runit Island, which has begun to crack open and potentially release radioactive material.""", className="card-text"),
html.P("""
The initial costs for building a nuclear power plant are steep. A recent virtual test reactor in the US estimate rose from $3.5bn to $6bn alongside huge extra costs to maintain the facility. South Africa scrapped plans to add 9.6GW of nuclear power to its energy mix due to the cost, which was estimated anywhere between $34-84bn. So whilst nuclear plants are cheap to run and produce inexpensive fuel, the initial costs are off-putting. """,
className="card-text"),
]
),
],
)
), html.Hr(),
]),
className="mt-6 mt-auto",
)
return tab1
def carb_price_tab(app):
tab1 = dbc.Card(
dbc.CardBody([
dbc.Row(
dbc.Card(
[
html.Iframe(src="https://www.youtube.com/embed/_4gbACmsBTw",title="YouTube video player",height="315"),
dbc.CardBody(
[
html.H2("Carbon Price"),
html.P("""
Following the 2015 Paris Climate Agreement, there has been a growing understanding of the structural changes required across the global economy to shift to a low-carbon economy. The increasing regulation of carbon emissions through taxes, emissions trading schemes, and fossil fuel extraction fees is expected to play a vital role in global efforts to address climate change. Central to these efforts to reduce carbon dioxide (CO2) emission is a market mechanism known as carbon pricing.
""",
className="card-text"),
html.P("""
Set by governments or markets, carbon prices cover a part of a country’s total emissions, charging C02 emitters for each ton released through a tax or a fee. Those fees may also apply to methane, nitrous oxide, and other gases that contribute to rising global temperatures. In a cap-and-trade system of carbon pricing, the government sets a cap on the total amount of emissions allowed, and C02 emitters are either given permits or allowances or must buy the right to emit C02; companies whose total emissions fall under the cap may choose to sell their unused emissions credits to those who surpass its carbon allotment. Either way, carbon pricing takes advantage of market mechanisms to create financial incentives to lower emissions by switching to more efficient processes or cleaner fuels. """,
className="card-text"),
]
),
],
)
), html.Hr(),
]),
className="mt-6 mt-auto",
)
return tab1
def renewable_info(app):
tabs = dbc.Tabs(
[
dbc.Tab(renewables_tab(app), label="Renewables"),
dbc.Tab(nuclear_tab(app), label="Nuclear"),
dbc.Tab(carb_price_tab(app), label="Carbon Price"),
]
)
return tabs
|
{"/graphs/glaciers_oil_areas.py": ["/data/source.py"], "/dashboard_components/glaciers_oil_areas_dash.py": ["/graphs/population_vs_electricity_graphs.py", "/graphs/glaciers_oil_areas.py"], "/dashboard_components/emissions.py": ["/graphs/emissions.py"], "/ml_models/prediction.py": ["/ml_models/glacier_model.py", "/ml_models/sea_level_model.py", "/ml_models/temperature_model.py"], "/ml_models/sea_level_model.py": ["/data/source.py"], "/ml_models/glacier_model.py": ["/data/source.py"], "/graphs/sea_level_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/graphs/emissions.py": ["/data/source.py"], "/main.py": ["/dashboard_components/population_vs_electricity_section.py", "/dashboard_components/glaciers_oil_areas_dash.py", "/dashboard_components/emissions.py", "/dashboard_components/catastrophe_section.py", "/dashboard_components/machine_learning_section.py", "/non_renewable.py", "/renewable.py"], "/graphs/sea_level_vs_glacier_melt.py": ["/data/source.py"], "/graphs/glaciers_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/dashboard_components/catastrophe_section.py": ["/data/source.py", "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py", "/graphs/population_vs_electricity_graphs.py", "/graphs/sea_level_vs_glacier_melt.py"], "/dashboard_components/machine_learning_section.py": ["/graphs/glaciers_model.py", "/graphs/sea_level_model.py", "/ml_models/prediction.py"], "/ml_models/temperature_model.py": ["/data/source.py"], "/dashboard_components/population_vs_electricity_section.py": ["/graphs/population_vs_electricity_graphs.py"], "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py": ["/data/source.py"], "/graphs/population_vs_electricity_graphs.py": ["/data/source.py"]}
|
13,074
|
KirtishS/MySustainableEarth
|
refs/heads/main
|
/data/source.py
|
from pathlib import Path
import pandas as pd
def read_dataset(path: Path) -> pd.DataFrame:
if path.exists():
df = pd.read_csv(path)
return df
def get_electricity_and_population_info():
df = read_dataset(Path('.', 'data', 'csv_files', 'electricity_and_population_info.csv'))
return df
def get_drought():
df = read_dataset(Path('.', 'data', 'csv_files', 'final_drought_data(1970 -2008).csv'))
return df
def get_flood():
df = read_dataset(Path('.', 'data', 'csv_files', 'final_flood_data(1970 -2008).csv'))
return df
def get_storm():
df = read_dataset(Path('.', 'data', 'csv_files', 'final_storm_data(1970 -2008).csv'))
return df
def get_deforestation():
df = read_dataset(Path('.', 'data', 'csv_files', 'Clean_Forest_Area.csv'))
return df
def get_all_emissions_info():
df = read_dataset(Path('.','data', 'csv_files', 'Clean_Combine_All.csv'))
return df
def get_iso_countries():
df = read_dataset(Path('.','data', 'csv_files', 'countries_iso.csv'))
return df
def get_green_house():
df = read_dataset(Path('.', 'data', 'csv_files', 'Clean_Greenhouse_Emissions.csv'))
return df
def get_sea_level():
df = read_dataset(Path('.', 'data', 'csv_files', 'final_sea_level_data(1993-2015).csv'))
return df
def get_glaciers():
df = read_dataset(Path('.', 'data', 'csv_files', 'Clean_Glaciers.csv'))
return df
def get_temperature():
df = read_dataset(Path('.', 'data', 'csv_files', 'temperature_new.csv'))
return df
def clean_glaciers():
df = read_dataset(Path('.','data', 'csv_files', 'Clean_Glaciers.csv'))
return df
def clean_surface_area():
df = read_dataset(Path('.','data', 'csv_files', 'Clean_Surface_Area.csv'))
return df
def clean_forest_area():
df = read_dataset(Path('.','data', 'csv_files', 'Clean_Forest_Area.csv'))
return df
def clean_agriculture_area():
df = read_dataset(Path('.','data', 'csv_files', 'Clean_Agriculture_Area.csv'))
return df
def clean_oil_production():
df = read_dataset(Path('.','data', 'csv_files', 'Clean_Oil_Production.csv'))
return df
def clean_greenhouse():
df = read_dataset(Path('.','data', 'csv_files', 'Clean_Greenhouse_Emissions.csv'))
return df
def temperature_glaciers():
df = read_dataset(Path('.','data', 'csv_files', 'temperature_new.csv'))
return df
def glaciers_vs_temperature():
df = read_dataset(Path('.','data', 'csv_files', 'glaciers_temperature_df.csv'))
return df
def sea_level_vs_temperature():
df = read_dataset(Path('.','data', 'csv_files', 'sea_level_temperature_df.csv'))
return df
def get_temp_greenhouse_carbon_forest():
df = read_dataset(Path('.','data', 'csv_files', 'temp_greenhouse_carbon_forest.csv'))
return df
if __name__ == '__main__':
print(get_electricity_and_population_info())
|
{"/graphs/glaciers_oil_areas.py": ["/data/source.py"], "/dashboard_components/glaciers_oil_areas_dash.py": ["/graphs/population_vs_electricity_graphs.py", "/graphs/glaciers_oil_areas.py"], "/dashboard_components/emissions.py": ["/graphs/emissions.py"], "/ml_models/prediction.py": ["/ml_models/glacier_model.py", "/ml_models/sea_level_model.py", "/ml_models/temperature_model.py"], "/ml_models/sea_level_model.py": ["/data/source.py"], "/ml_models/glacier_model.py": ["/data/source.py"], "/graphs/sea_level_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/graphs/emissions.py": ["/data/source.py"], "/main.py": ["/dashboard_components/population_vs_electricity_section.py", "/dashboard_components/glaciers_oil_areas_dash.py", "/dashboard_components/emissions.py", "/dashboard_components/catastrophe_section.py", "/dashboard_components/machine_learning_section.py", "/non_renewable.py", "/renewable.py"], "/graphs/sea_level_vs_glacier_melt.py": ["/data/source.py"], "/graphs/glaciers_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/dashboard_components/catastrophe_section.py": ["/data/source.py", "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py", "/graphs/population_vs_electricity_graphs.py", "/graphs/sea_level_vs_glacier_melt.py"], "/dashboard_components/machine_learning_section.py": ["/graphs/glaciers_model.py", "/graphs/sea_level_model.py", "/ml_models/prediction.py"], "/ml_models/temperature_model.py": ["/data/source.py"], "/dashboard_components/population_vs_electricity_section.py": ["/graphs/population_vs_electricity_graphs.py"], "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py": ["/data/source.py"], "/graphs/population_vs_electricity_graphs.py": ["/data/source.py"]}
|
13,075
|
KirtishS/MySustainableEarth
|
refs/heads/main
|
/ml_models/sea_level_model.py
|
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from data.source import sea_level_vs_temperature
class Sea_Level_Models:
__sea_level_model = None
__sea_level_poly_regressor = None
@staticmethod
def get_sea_level_model():
if Sea_Level_Models.__sea_level_model == None:
# print('Creating new sea level model...')
dataset = sea_level_vs_temperature()
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
poly_regressor = PolynomialFeatures(degree=2)
X_poly = poly_regressor.fit_transform(X)
poly_linear_regressor = LinearRegression()
poly_linear_regressor.fit(X_poly, y)
Sea_Level_Models.__sea_level_model = poly_linear_regressor
Sea_Level_Models.__sea_level_poly_regressor = poly_regressor
# print(ML_Models.__sea_level_model, ML_Models.__sea_level_poly_regressor)
return Sea_Level_Models.__sea_level_model
@staticmethod
def get_sea_level_poly_regressor():
if Sea_Level_Models.__sea_level_poly_regressor == None:
Sea_Level_Models.get_sea_level_model()
return Sea_Level_Models.__sea_level_poly_regressor
|
{"/graphs/glaciers_oil_areas.py": ["/data/source.py"], "/dashboard_components/glaciers_oil_areas_dash.py": ["/graphs/population_vs_electricity_graphs.py", "/graphs/glaciers_oil_areas.py"], "/dashboard_components/emissions.py": ["/graphs/emissions.py"], "/ml_models/prediction.py": ["/ml_models/glacier_model.py", "/ml_models/sea_level_model.py", "/ml_models/temperature_model.py"], "/ml_models/sea_level_model.py": ["/data/source.py"], "/ml_models/glacier_model.py": ["/data/source.py"], "/graphs/sea_level_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/graphs/emissions.py": ["/data/source.py"], "/main.py": ["/dashboard_components/population_vs_electricity_section.py", "/dashboard_components/glaciers_oil_areas_dash.py", "/dashboard_components/emissions.py", "/dashboard_components/catastrophe_section.py", "/dashboard_components/machine_learning_section.py", "/non_renewable.py", "/renewable.py"], "/graphs/sea_level_vs_glacier_melt.py": ["/data/source.py"], "/graphs/glaciers_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/dashboard_components/catastrophe_section.py": ["/data/source.py", "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py", "/graphs/population_vs_electricity_graphs.py", "/graphs/sea_level_vs_glacier_melt.py"], "/dashboard_components/machine_learning_section.py": ["/graphs/glaciers_model.py", "/graphs/sea_level_model.py", "/ml_models/prediction.py"], "/ml_models/temperature_model.py": ["/data/source.py"], "/dashboard_components/population_vs_electricity_section.py": ["/graphs/population_vs_electricity_graphs.py"], "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py": ["/data/source.py"], "/graphs/population_vs_electricity_graphs.py": ["/data/source.py"]}
|
13,076
|
KirtishS/MySustainableEarth
|
refs/heads/main
|
/ml_models/glacier_model.py
|
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from data.source import glaciers_vs_temperature
class Glacier_Models:
__glaciers_model = None
__glaciers_poly_regressor = None
@staticmethod
def get_glaciers_model():
if Glacier_Models.__glaciers_model == None:
# print('Creating new glaciers model...')
dataset = glaciers_vs_temperature()
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
poly_regressor = PolynomialFeatures(degree=2)
X_poly = poly_regressor.fit_transform(X)
poly_linear_regressor = LinearRegression()
poly_linear_regressor.fit(X_poly, y)
Glacier_Models.__glaciers_model = poly_linear_regressor
Glacier_Models.__glaciers_poly_regressor = poly_regressor
# print(Glacier_Models.__glaciers_model, Glacier_Models.__glaciers_poly_regressor)
return Glacier_Models.__glaciers_model
@staticmethod
def get_glaciers_poly_regressor():
if Glacier_Models.__glaciers_poly_regressor == None:
Glacier_Models.get_glaciers_model()
return Glacier_Models.__glaciers_poly_regressor
|
{"/graphs/glaciers_oil_areas.py": ["/data/source.py"], "/dashboard_components/glaciers_oil_areas_dash.py": ["/graphs/population_vs_electricity_graphs.py", "/graphs/glaciers_oil_areas.py"], "/dashboard_components/emissions.py": ["/graphs/emissions.py"], "/ml_models/prediction.py": ["/ml_models/glacier_model.py", "/ml_models/sea_level_model.py", "/ml_models/temperature_model.py"], "/ml_models/sea_level_model.py": ["/data/source.py"], "/ml_models/glacier_model.py": ["/data/source.py"], "/graphs/sea_level_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/graphs/emissions.py": ["/data/source.py"], "/main.py": ["/dashboard_components/population_vs_electricity_section.py", "/dashboard_components/glaciers_oil_areas_dash.py", "/dashboard_components/emissions.py", "/dashboard_components/catastrophe_section.py", "/dashboard_components/machine_learning_section.py", "/non_renewable.py", "/renewable.py"], "/graphs/sea_level_vs_glacier_melt.py": ["/data/source.py"], "/graphs/glaciers_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/dashboard_components/catastrophe_section.py": ["/data/source.py", "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py", "/graphs/population_vs_electricity_graphs.py", "/graphs/sea_level_vs_glacier_melt.py"], "/dashboard_components/machine_learning_section.py": ["/graphs/glaciers_model.py", "/graphs/sea_level_model.py", "/ml_models/prediction.py"], "/ml_models/temperature_model.py": ["/data/source.py"], "/dashboard_components/population_vs_electricity_section.py": ["/graphs/population_vs_electricity_graphs.py"], "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py": ["/data/source.py"], "/graphs/population_vs_electricity_graphs.py": ["/data/source.py"]}
|
13,077
|
KirtishS/MySustainableEarth
|
refs/heads/main
|
/graphs/sea_level_model.py
|
import plotly.graph_objects as go
from data.source import sea_level_vs_temperature
from ml_models.prediction import sea_level_prediction
def sea_level_vs_temperature_model_info():
df = sea_level_vs_temperature()
temperatures_list = df.iloc[:, :-1].values
fig = go.Figure()
fig.add_trace(go.Scatter(x=df['temperature'], y=df['sea_level'], mode='markers', name='Complete Dataset',
line=dict(color='firebrick', width=4)))
fig.add_trace(go.Scatter(x=df['temperature'], y=sea_level_prediction(temperatures_list), name='Regression Model',
line=dict(color='royalblue', width=4)))
fig.update_layout(title='<b> Global Mean Sea Level vs Temperature (Polynomial Regression)</b>',
xaxis_title='Temperature',
yaxis_title='Global Mean Sea Level')
# fig.show()
return fig
def sea_level_vs_temperature_model_prediction(temperature: int, sea_level: int):
df = sea_level_vs_temperature()
temperatures_list = df.iloc[:, :-1].values
fig = go.Figure()
fig.add_trace(go.Scatter(x=[temperature], y=[sea_level], mode='markers', name='Predicted Value',
marker=dict(color='firebrick', size=10)))
fig.add_trace(go.Scatter(x=df['temperature'], y=sea_level_prediction(temperatures_list), name='Regression Model',
line=dict(color='royalblue', width=4)))
fig.update_layout(title='<b>Global Mean Sea Level vs Temperature (Polynomial Regression)</b>',
xaxis_title='Temperature',
yaxis_title='Global Mean Sea Level')
# fig.show()
return fig
if __name__ == "__main__":
sea_level_vs_temperature_model_info()
sea_level_vs_temperature_model_prediction(20, 79)
print("ok")
|
{"/graphs/glaciers_oil_areas.py": ["/data/source.py"], "/dashboard_components/glaciers_oil_areas_dash.py": ["/graphs/population_vs_electricity_graphs.py", "/graphs/glaciers_oil_areas.py"], "/dashboard_components/emissions.py": ["/graphs/emissions.py"], "/ml_models/prediction.py": ["/ml_models/glacier_model.py", "/ml_models/sea_level_model.py", "/ml_models/temperature_model.py"], "/ml_models/sea_level_model.py": ["/data/source.py"], "/ml_models/glacier_model.py": ["/data/source.py"], "/graphs/sea_level_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/graphs/emissions.py": ["/data/source.py"], "/main.py": ["/dashboard_components/population_vs_electricity_section.py", "/dashboard_components/glaciers_oil_areas_dash.py", "/dashboard_components/emissions.py", "/dashboard_components/catastrophe_section.py", "/dashboard_components/machine_learning_section.py", "/non_renewable.py", "/renewable.py"], "/graphs/sea_level_vs_glacier_melt.py": ["/data/source.py"], "/graphs/glaciers_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/dashboard_components/catastrophe_section.py": ["/data/source.py", "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py", "/graphs/population_vs_electricity_graphs.py", "/graphs/sea_level_vs_glacier_melt.py"], "/dashboard_components/machine_learning_section.py": ["/graphs/glaciers_model.py", "/graphs/sea_level_model.py", "/ml_models/prediction.py"], "/ml_models/temperature_model.py": ["/data/source.py"], "/dashboard_components/population_vs_electricity_section.py": ["/graphs/population_vs_electricity_graphs.py"], "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py": ["/data/source.py"], "/graphs/population_vs_electricity_graphs.py": ["/data/source.py"]}
|
13,078
|
KirtishS/MySustainableEarth
|
refs/heads/main
|
/graphs/emissions.py
|
from pathlib import Path
from typing import Tuple
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
from plotly.subplots import make_subplots
from matplotlib.widgets import Button, Slider
from data.source import *
def emissions_chart(country_name):
df = get_all_emissions_info()
df = df.loc[df['country'] == country_name]
fig = go.Figure()
fig.add_trace(go.Scatter(x=df['year'], y=df['carbon_total'], name='Carbon Emissions',
line=dict(color='firebrick', width=4)))
fig.add_trace(go.Scatter(x=df['year'], y=df['greenhouse'], name='Other Greenhouse Emissions',
line=dict(color='royalblue', width=4)))
fig.update_layout(title='<b>Emissions for </b> ' + country_name,
xaxis_title='Years',
yaxis_title='Metric tonnes of fuel')
return fig
def bar_analysis(column, year):
df = get_all_emissions_info()
fig = go.Figure()
df = df.loc[df['year'] == year]
fig.add_trace(go.Bar(x=df['country'], y=df[column]))
return fig
def map_analysis(column, year):
df = get_iso_countries()
df = df.loc[df['year'] == year]
fig = px.choropleth(df, locations=df['geo'],
color=df[column],
hover_name="geo",
color_continuous_scale=px.colors.sequential.Plasma)
return fig
def pie_analysis2(column):
df = get_all_emissions_info()
selected_countries = ['USA', 'Canada', 'India', 'China', 'Brazil']
df = df.loc[df['country'].isin(selected_countries)]
fig = px.bar(df,x='year',y=column,color='country')
return fig
if __name__ == "__main__":
country_name = 'Canada'
year = 1990
emissions_chart(country_name)
bar_analysis('coal', 1981)
map_analysis('greenhouse', 2000)
pie_analysis('coal', 1990)
print("ok")
|
{"/graphs/glaciers_oil_areas.py": ["/data/source.py"], "/dashboard_components/glaciers_oil_areas_dash.py": ["/graphs/population_vs_electricity_graphs.py", "/graphs/glaciers_oil_areas.py"], "/dashboard_components/emissions.py": ["/graphs/emissions.py"], "/ml_models/prediction.py": ["/ml_models/glacier_model.py", "/ml_models/sea_level_model.py", "/ml_models/temperature_model.py"], "/ml_models/sea_level_model.py": ["/data/source.py"], "/ml_models/glacier_model.py": ["/data/source.py"], "/graphs/sea_level_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/graphs/emissions.py": ["/data/source.py"], "/main.py": ["/dashboard_components/population_vs_electricity_section.py", "/dashboard_components/glaciers_oil_areas_dash.py", "/dashboard_components/emissions.py", "/dashboard_components/catastrophe_section.py", "/dashboard_components/machine_learning_section.py", "/non_renewable.py", "/renewable.py"], "/graphs/sea_level_vs_glacier_melt.py": ["/data/source.py"], "/graphs/glaciers_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/dashboard_components/catastrophe_section.py": ["/data/source.py", "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py", "/graphs/population_vs_electricity_graphs.py", "/graphs/sea_level_vs_glacier_melt.py"], "/dashboard_components/machine_learning_section.py": ["/graphs/glaciers_model.py", "/graphs/sea_level_model.py", "/ml_models/prediction.py"], "/ml_models/temperature_model.py": ["/data/source.py"], "/dashboard_components/population_vs_electricity_section.py": ["/graphs/population_vs_electricity_graphs.py"], "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py": ["/data/source.py"], "/graphs/population_vs_electricity_graphs.py": ["/data/source.py"]}
|
13,079
|
KirtishS/MySustainableEarth
|
refs/heads/main
|
/main.py
|
import dash
from dash.dependencies import Output, Input
from dash import dcc
from dash import html
import dash_bootstrap_components as dbc
from dashboard_components.population_vs_electricity_section import population_vs_electricity_section
from dashboard_components.glaciers_oil_areas_dash import glacier_and_oil_impacts
from dashboard_components.emissions import emission_section
from dashboard_components.catastrophe_section import catastrophe_section
from dashboard_components.machine_learning_section import machine_learning_results
from non_renewable import non_renewable_info
from renewable import renewable_info
fstcard = dbc.Card(
dbc.CardBody([
dbc.Row(
dbc.Card(
[
dbc.CardBody(
[
html.H3("Impact on Earth", className="card-title"),
html.H4("Humans are the only beings who are contributing to this huge temperature rise.", className="card-title"),
html.P("Livings on this planet is a blessing to us but we don't realise the importance of the resources being provided by Mother Earth to us. The quote says it rightly 'Human needs can't be fulfilled, craving for more is our non-removable nature. But do we realise on what cost we are fulfilling our needs and what is the adverse side effect of this huge craving, the answer would be a big' NO", className="card-text"),
html.P("Global warming is the increase of average world temperature as a result of what is known as the greenhouse effect. ", className="card-text"),
]
),
dbc.CardImg(src="https://media.newyorker.com/photos/5d7baf31a5350d0008a14576/master/pass/McKibben-ClimateFinance2.jpg", bottom=True),
],
)
),html.Hr(),
]),
className="mt-6 mt-auto",
)
sndcrd = dbc.Card(
dbc.CardBody([
dbc.Row(
dbc.Card(
[
dbc.CardImg(
src="https://images.unsplash.com/photo-1571896851392-055658ba3c9f?ixid=MnwxMjA3fDB8MHxzZWFyY2h8MTJ8fGdsb2JhbCUyMHdhcm1pbmd8ZW58MHx8MHx8&ixlib=rb-1.2.1&auto=format&fit=crop&w=500&q=60",
bottom=True),
dbc.CardBody(
[
html.P("We use coal and oil but what do we produce, we produce Carbon Dioxide(CO2). We produce nuclear power but on what cost. ",className="card-text"),
html.P("The price paid is the death of people and the hazardous side effect of the test which is conducted is the extinction of those Oxygen producing blessings that are TREES. We cut of the trees to set up industrial amuzement parks and the stocks go up to give us a huge profit and a enourmous anual turnover but on what on cost and are we benefitted by the loss of pure air we breathe. We use fuel run automobiles and what do we do produce CO2, SO2, NO2 and the adverse effect goes on to be global warming, noise pollution, acid rain and hugely affecting problems that is melting of glaciers.",className="card-text"),
]
),
],
)
), html.Hr(),
]),
className="mt-6 mt-auto",
)
SIDEBAR_STYLE = {
"position": "fixed",
"top": 0,
"left": 0,
"bottom": 0,
"width": "16rem",
"padding": "2rem 1rem",
"background-color": "#0D1321",
"color" : "#F0EBD8",
}
CONTENT_STYLE = {
"margin-left": "18rem",
"margin-right": "2rem",
"padding": "2rem 1rem",
}
sidebar = html.Div(
[
html.H4("My Sustainable Earth"),
html.Hr(),
dbc.Nav(
[
dbc.NavLink("Home", href="/", active="exact"),
dbc.NavLink("Analysis", href="/page-1", active="exact"),
dbc.NavLink("Solutions", href="/page-2", active="exact"),
],
vertical=True,
pills=True,
),
],
style=SIDEBAR_STYLE,
)
content = html.Div(id="page-content", children=[], style=CONTENT_STYLE)
def dashboard():
app = dash.Dash(external_stylesheets=[dbc.themes.DARKLY])
@app.callback(
Output("page-content", "children"),
[Input("url", "pathname")]
)
def render_page_content(pathname):
if pathname == "/":
return [
html.Hr(),
html.H2(children="Electricity Generation Information:"),
population_vs_electricity_section(app),
html.Hr(),
html.H2(children="Glaciers and Oil"),
glacier_and_oil_impacts(app),
html.Hr(),
html.H2(children="Emissions:"),
emission_section(app),
html.Hr(),
html.H2(children="Catastrophe Information:"),
catastrophe_section(app),
html.Hr(),
]
elif pathname == "/page-1":
return [
html.H2(children="Machine Learning Results:"),
machine_learning_results(app),
html.Hr(),
html.H2(children="Awareness"),
dbc.Row([
dbc.Col(fstcard, width=6),
dbc.Col(sndcrd, width=6),
]),
html.Hr(),
]
elif pathname == "/page-2":
return [
html.H3(children="Non renewable"),
non_renewable_info(app),
html.Hr(),
html.H3(children="Renewable"),
renewable_info(app),
html.Hr(),
]
app.layout = html.Div([
dcc.Location(id="url"),
sidebar,
content
])
return app
if __name__ == "__main__":
app = dashboard()
app.run_server(debug=True)
|
{"/graphs/glaciers_oil_areas.py": ["/data/source.py"], "/dashboard_components/glaciers_oil_areas_dash.py": ["/graphs/population_vs_electricity_graphs.py", "/graphs/glaciers_oil_areas.py"], "/dashboard_components/emissions.py": ["/graphs/emissions.py"], "/ml_models/prediction.py": ["/ml_models/glacier_model.py", "/ml_models/sea_level_model.py", "/ml_models/temperature_model.py"], "/ml_models/sea_level_model.py": ["/data/source.py"], "/ml_models/glacier_model.py": ["/data/source.py"], "/graphs/sea_level_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/graphs/emissions.py": ["/data/source.py"], "/main.py": ["/dashboard_components/population_vs_electricity_section.py", "/dashboard_components/glaciers_oil_areas_dash.py", "/dashboard_components/emissions.py", "/dashboard_components/catastrophe_section.py", "/dashboard_components/machine_learning_section.py", "/non_renewable.py", "/renewable.py"], "/graphs/sea_level_vs_glacier_melt.py": ["/data/source.py"], "/graphs/glaciers_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/dashboard_components/catastrophe_section.py": ["/data/source.py", "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py", "/graphs/population_vs_electricity_graphs.py", "/graphs/sea_level_vs_glacier_melt.py"], "/dashboard_components/machine_learning_section.py": ["/graphs/glaciers_model.py", "/graphs/sea_level_model.py", "/ml_models/prediction.py"], "/ml_models/temperature_model.py": ["/data/source.py"], "/dashboard_components/population_vs_electricity_section.py": ["/graphs/population_vs_electricity_graphs.py"], "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py": ["/data/source.py"], "/graphs/population_vs_electricity_graphs.py": ["/data/source.py"]}
|
13,080
|
KirtishS/MySustainableEarth
|
refs/heads/main
|
/graphs/sea_level_vs_glacier_melt.py
|
import plotly.graph_objects as go
from data.source import *
# Sea level vs Glacier melt ( 1. Options button, 2. year_range )
def plot_sea_level_vs_glacier_temp(option, start_year, end_year):
df_sea = get_sea_level()
years = []
f_year = start_year
years.append(f_year)
while f_year != end_year:
f_year = f_year + 1
years.append(f_year)
if option == 'Glacier Melt':
df_glacier = get_glaciers()
df_glacier = df_glacier[df_glacier['Year'].isin(years)]
fig = go.Figure()
fig.add_trace(go.Scatter(x=years, y=df_sea['GMSL_mean'],
mode='lines',
line=dict(color='firebrick', width=4),
name='Sea Level increase'))
fig.add_trace(go.Scatter(x=years, y=df_glacier['Mean cumulative mass balance'],
mode='lines+markers',
line=dict(color='royalblue', width=4),
name='Glacier level decrease'))
fig.update_layout(barmode='group', xaxis_tickangle=-45, xaxis_title=" Years ",
yaxis_title="Glacier Melt Level")
return fig
elif option == 'Temperature':
df_temp = get_temperature()
df_temp = df_temp[df_temp['dt'].isin(years)]
# df_temp = df_temp.drop(columns=['Country'], axis=1)
# df_temp['avg'] = df_temp.groupby('dt')['avg'].transform('mean')
# df_temp = df_temp.drop_duplicates()
# df_temp.index = range(len(df_temp.index))
fig = go.Figure()
fig.add_trace(go.Scatter(x=years, y=df_sea['GMSL_mean'],
mode='lines',
line=dict(color='firebrick', width=4),
name='Sea Level increase'))
fig.add_trace(go.Scatter(x=years, y=df_temp['avg'],
mode='lines+markers',
line=dict(color='royalblue', width=4),
name='Temperature'))
fig.update_layout(barmode='group', xaxis_tickangle=-45, xaxis_title=" Years ",
yaxis_title="Temperature Level Increase ")
return fig
|
{"/graphs/glaciers_oil_areas.py": ["/data/source.py"], "/dashboard_components/glaciers_oil_areas_dash.py": ["/graphs/population_vs_electricity_graphs.py", "/graphs/glaciers_oil_areas.py"], "/dashboard_components/emissions.py": ["/graphs/emissions.py"], "/ml_models/prediction.py": ["/ml_models/glacier_model.py", "/ml_models/sea_level_model.py", "/ml_models/temperature_model.py"], "/ml_models/sea_level_model.py": ["/data/source.py"], "/ml_models/glacier_model.py": ["/data/source.py"], "/graphs/sea_level_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/graphs/emissions.py": ["/data/source.py"], "/main.py": ["/dashboard_components/population_vs_electricity_section.py", "/dashboard_components/glaciers_oil_areas_dash.py", "/dashboard_components/emissions.py", "/dashboard_components/catastrophe_section.py", "/dashboard_components/machine_learning_section.py", "/non_renewable.py", "/renewable.py"], "/graphs/sea_level_vs_glacier_melt.py": ["/data/source.py"], "/graphs/glaciers_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/dashboard_components/catastrophe_section.py": ["/data/source.py", "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py", "/graphs/population_vs_electricity_graphs.py", "/graphs/sea_level_vs_glacier_melt.py"], "/dashboard_components/machine_learning_section.py": ["/graphs/glaciers_model.py", "/graphs/sea_level_model.py", "/ml_models/prediction.py"], "/ml_models/temperature_model.py": ["/data/source.py"], "/dashboard_components/population_vs_electricity_section.py": ["/graphs/population_vs_electricity_graphs.py"], "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py": ["/data/source.py"], "/graphs/population_vs_electricity_graphs.py": ["/data/source.py"]}
|
13,081
|
KirtishS/MySustainableEarth
|
refs/heads/main
|
/graphs/glaciers_model.py
|
import plotly.graph_objects as go
from data.source import glaciers_vs_temperature
from ml_models.prediction import glacier_prediction
def glacier_vs_temperature_model_info():
df = glaciers_vs_temperature()
temperatures_list = df.iloc[:, :-1].values
# print(df)
fig = go.Figure()
fig.add_trace(go.Scatter(x=df['temperature'], y=df['glacier'], mode='markers', name='Complete Dataset',
line=dict(color='firebrick', width=4)))
fig.add_trace(go.Scatter(x=df['temperature'], y=glacier_prediction(temperatures_list), name='Regression Model',
line=dict(color='royalblue', width=4)))
fig.update_layout(title='<b>Glaciers Mass Balance vs Temperature (Polynomial Regression)</b>',
xaxis_title='Temperature',
yaxis_title='Glaciers Mass Balance')
# fig.show()
return fig
def glacier_vs_temperature_model_prediction(temperature: int, glacier: int):
df = glaciers_vs_temperature()
temperatures_list = df.iloc[:, :-1].values
fig = go.Figure()
fig.add_trace(go.Scatter(x=[temperature], y=[glacier], mode='markers', name='Predicted Value',
marker=dict(color='firebrick', size=10)))
fig.add_trace(go.Scatter(x=df['temperature'], y=glacier_prediction(temperatures_list), name='Regression Model',
line=dict(color='royalblue', width=4)))
fig.update_layout(title='<b>Glacier Mass Balance vs Temperature (Polynomial Regression)</b>',
xaxis_title='Temperature',
yaxis_title='Glacier Level')
# fig.show()
return fig
if __name__ == "__main__":
glacier_vs_temperature_model_info()
glacier_vs_temperature_model_prediction(20, -34.04636935)
print("ok")
|
{"/graphs/glaciers_oil_areas.py": ["/data/source.py"], "/dashboard_components/glaciers_oil_areas_dash.py": ["/graphs/population_vs_electricity_graphs.py", "/graphs/glaciers_oil_areas.py"], "/dashboard_components/emissions.py": ["/graphs/emissions.py"], "/ml_models/prediction.py": ["/ml_models/glacier_model.py", "/ml_models/sea_level_model.py", "/ml_models/temperature_model.py"], "/ml_models/sea_level_model.py": ["/data/source.py"], "/ml_models/glacier_model.py": ["/data/source.py"], "/graphs/sea_level_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/graphs/emissions.py": ["/data/source.py"], "/main.py": ["/dashboard_components/population_vs_electricity_section.py", "/dashboard_components/glaciers_oil_areas_dash.py", "/dashboard_components/emissions.py", "/dashboard_components/catastrophe_section.py", "/dashboard_components/machine_learning_section.py", "/non_renewable.py", "/renewable.py"], "/graphs/sea_level_vs_glacier_melt.py": ["/data/source.py"], "/graphs/glaciers_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/dashboard_components/catastrophe_section.py": ["/data/source.py", "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py", "/graphs/population_vs_electricity_graphs.py", "/graphs/sea_level_vs_glacier_melt.py"], "/dashboard_components/machine_learning_section.py": ["/graphs/glaciers_model.py", "/graphs/sea_level_model.py", "/ml_models/prediction.py"], "/ml_models/temperature_model.py": ["/data/source.py"], "/dashboard_components/population_vs_electricity_section.py": ["/graphs/population_vs_electricity_graphs.py"], "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py": ["/data/source.py"], "/graphs/population_vs_electricity_graphs.py": ["/data/source.py"]}
|
13,082
|
KirtishS/MySustainableEarth
|
refs/heads/main
|
/non_renewable.py
|
from dash import html
import dash_bootstrap_components as dbc
def coal_tab(app):
tab1 = dbc.Card(
dbc.CardBody([
dbc.Row(
dbc.Card(
[
html.Iframe(src="https://www.youtube.com/embed/JONcq3KPsQo",title="YouTube video player",height="315"),
dbc.CardBody(
[
html.H2("Coal"),
html.P(
"COAL Highly Taxed Several principal emissions result from coal combustion:1.Sulfur dioxide (SO2), which contributes to acid rain and respiratory illnesses.2.Nitrogen oxides (NOx), which contribute to smog and respiratory illnesses. ",
className="card-text"),
html.P(
"Coal phase-out has a positive synergy between the global climate challenge and local environmental pollution. In international climate negotiations, governments need to factor-in that exiting coal is a cheap way to substantially reduce global greenhouse gas emissions and has huge co-benefits at home. Our study shows that national and global interests are not necessarily trading-off, but can go hand in hand. ",
className="card-text"),
]
),
],
)
), html.Hr(),
]),
className="mt-6 mt-auto",
)
return tab1
def oil_tab(app):
tab1 = dbc.Card(
dbc.CardBody([
dbc.Row(
dbc.Card(
[
html.Iframe(src="https://www.youtube.com/embed/yn2oV1WSEfA",title="YouTube video player",height="315"),
dbc.CardBody(
[
html.H2("Oil"),
html.Ol([
html.Li("Pollution impacts communities.") ,
html.Li("Dangerous emissions fuel climate change.") ,
html.Li("Oil and gas development can ruin wildlands.") ,
html.Li("Drilling disrupts wildlife habitat.") ,
html.Li("Oil spills can be deadly to animals.") ,
]),
html.P("""
Oil and gas drilling has a serious impact on our wildlands and communities. Drilling projects operate around the clock generating pollution, fueling climate change, disrupting wildlife and damaging public lands that were set aside to benefit all people.""",
className="card-text"),
# html.P(
# "Coal phase-out has a positive synergy between the global climate challenge and local environmental pollution. In international climate negotiations, governments need to factor-in that exiting coal is a cheap way to substantially reduce global greenhouse gas emissions and has huge co-benefits at home. Our study shows that national and global interests are not necessarily trading-off, but can go hand in hand. ",
# className="card-text"),
]
),
],
)
), html.Hr(),
]),
className="mt-6 mt-auto",
)
return tab1
def natgas_tab(app):
tab1 = dbc.Card(
dbc.CardBody([
dbc.Row(
dbc.Card(
[
html.Iframe(src="https://www.youtube.com/embed/vyEt4rckt7E",title="YouTube video player",height="315"),
dbc.CardBody(
[
html.H2("Natural Gas"),
html.P(
"COAL Highly Taxed Several principal emissions result from coal combustion:1.Sulfur dioxide (SO2), which contributes to acid rain and respiratory illnesses.2.Nitrogen oxides (NOx), which contribute to smog and respiratory illnesses. ",
className="card-text"),
html.P(
"Coal phase-out has a positive synergy between the global climate challenge and local environmental pollution. In international climate negotiations, governments need to factor-in that exiting coal is a cheap way to substantially reduce global greenhouse gas emissions and has huge co-benefits at home. Our study shows that national and global interests are not necessarily trading-off, but can go hand in hand. ",
className="card-text"),
]
),
],
)
), html.Hr(),
]),
className="mt-6 mt-auto",
)
return tab1
def non_renewable_info(app):
tabs = dbc.Tabs(
[
dbc.Tab(oil_tab(app), label="Oil" ),
dbc.Tab(coal_tab(app), label="Coal"),
dbc.Tab(natgas_tab(app), label="Natural Gas"),
]
)
return tabs
|
{"/graphs/glaciers_oil_areas.py": ["/data/source.py"], "/dashboard_components/glaciers_oil_areas_dash.py": ["/graphs/population_vs_electricity_graphs.py", "/graphs/glaciers_oil_areas.py"], "/dashboard_components/emissions.py": ["/graphs/emissions.py"], "/ml_models/prediction.py": ["/ml_models/glacier_model.py", "/ml_models/sea_level_model.py", "/ml_models/temperature_model.py"], "/ml_models/sea_level_model.py": ["/data/source.py"], "/ml_models/glacier_model.py": ["/data/source.py"], "/graphs/sea_level_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/graphs/emissions.py": ["/data/source.py"], "/main.py": ["/dashboard_components/population_vs_electricity_section.py", "/dashboard_components/glaciers_oil_areas_dash.py", "/dashboard_components/emissions.py", "/dashboard_components/catastrophe_section.py", "/dashboard_components/machine_learning_section.py", "/non_renewable.py", "/renewable.py"], "/graphs/sea_level_vs_glacier_melt.py": ["/data/source.py"], "/graphs/glaciers_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/dashboard_components/catastrophe_section.py": ["/data/source.py", "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py", "/graphs/population_vs_electricity_graphs.py", "/graphs/sea_level_vs_glacier_melt.py"], "/dashboard_components/machine_learning_section.py": ["/graphs/glaciers_model.py", "/graphs/sea_level_model.py", "/ml_models/prediction.py"], "/ml_models/temperature_model.py": ["/data/source.py"], "/dashboard_components/population_vs_electricity_section.py": ["/graphs/population_vs_electricity_graphs.py"], "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py": ["/data/source.py"], "/graphs/population_vs_electricity_graphs.py": ["/data/source.py"]}
|
13,083
|
KirtishS/MySustainableEarth
|
refs/heads/main
|
/dashboard_components/catastrophe_section.py
|
from dash.dependencies import Output, Input, State
from matplotlib.widgets import Button, Slider
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import numpy as np
from data.source import get_temperature, get_glaciers, get_drought, get_deforestation, get_flood, get_storm, \
get_green_house
from graphs.flood_drought_storm_vs_temp_deforest_greenhouse import plot_map_for_drought_storm_flood, \
plot_combined_bar_vs_options
from graphs.population_vs_electricity_graphs import renewable_vs_non_renewable_electricity, \
non_renewable_electricity_vs_poverty, non_renewable_electricity_vs_population
from graphs.sea_level_vs_glacier_melt import plot_sea_level_vs_glacier_temp
def sea_level_vs_others_tab_1(app):
all_options = {
'Temperature': 'Temp',
'Glacier Melt': 'Glacier'
}
tab1 = dbc.Card(
dbc.CardBody([
dbc.Row([
dbc.Col(dbc.FormGroup([
dbc.Label("Select Options:"),
dbc.Col(dcc.Dropdown(id='sea_level_option_dropdown',
options=[{'label': k, 'value': k} for k in all_options.keys()],
value='Temperature'), style={'backgroundColor':'white','color':'black'})
]),
md=6),
dbc.Col(dbc.FormGroup([
dbc.Label("Select Start Year:"),
dbc.Col(dcc.Dropdown(id='sea_level_start_year_dropdown', value=2000), style={'backgroundColor':'white','color':'black'})
]),
md=6),
dbc.Col(dbc.FormGroup([
dbc.Label("Select End Year:"),
dbc.Col(dcc.Dropdown(id='sea_level_end_year_dropdown', value=2010), style={'backgroundColor':'white','color':'black'})
]),
md=6),
dbc.Col(dbc.FormGroup([
dbc.Label("."),
dbc.Button('Display the Graph', id='sea_level_button',
color='info',
style={'margin-bottom': '1em'}, block=True)
]),
md=6)
]),
html.Hr(),
dbc.Row([
dbc.Col(dcc.Graph(id='sea_level_graph'))
])
]),
className="mt-3",
)
@app.callback(
Output('sea_level_start_year_dropdown', 'options'),
Output('sea_level_end_year_dropdown', 'options'),
[Input('sea_level_option_dropdown', 'value')],
)
def get_start_end_year_range(selected_option):
df_temp = get_temperature()
df_glacier = get_glaciers()
temp_year = df_temp['dt'].unique()
glacier_year = df_glacier['Year'].unique()
year_range = {
'Temperature': temp_year,
'Glacier Melt': glacier_year
}
if selected_option == 'Temperature':
return [{'label': i, 'value': i} for i in year_range[selected_option]], [{'label': i, 'value': i} for i in
year_range[selected_option]]
if selected_option == 'Glacier Melt':
return [{'label': i, 'value': i} for i in year_range[selected_option]], [{'label': i, 'value': i} for i in
year_range[selected_option]]
@app.callback(
Output('sea_level_graph', 'figure'),
[Input('sea_level_button', 'n_clicks')],
[State('sea_level_option_dropdown', 'value'),
State('sea_level_start_year_dropdown', 'value'),
State('sea_level_end_year_dropdown', 'value')]
)
def get_figure(n_clicks, options, start_year, end_year):
if options == 'Temperature':
fig = plot_sea_level_vs_glacier_temp(options, start_year, end_year)
return fig
elif options == 'Glacier Melt':
fig = plot_sea_level_vs_glacier_temp(options, start_year, end_year)
return fig
return tab1
def catastrophe_vs_options_tab_2(app):
catastrophe_types = {
'Drought': 'drought',
'Flood': 'flood',
'Storm': 'storm'
}
tab2 = dbc.Card(
dbc.CardBody([
dbc.Row([
dbc.Col(dbc.FormGroup([
dbc.Label("Select catastrophe type :"),
dbc.Col(dcc.Dropdown(id='catastrophe_type_dropdown',
options=[{'label': k, 'value': k} for k in catastrophe_types.keys()],
value='Drought', style={'backgroundColor':'white','color':'black'}))
]),
md=4),
dbc.Col(dbc.FormGroup([
dbc.Label("Select a country to view:"),
dbc.Col(dcc.Dropdown(id='country_view_dropdown', value='All', style={'backgroundColor':'white','color':'black'}))
]),
md=4),
dbc.Col(dbc.FormGroup([
dbc.Label(" "),
dbc.Button('Display the Graph', id='catastrophe_map_button',
color='info',
style={'margin-bottom': '1em'}, block=True)
]),
md=4)
]),
html.Hr(),
dbc.Row([
dbc.Col(dcc.Graph(id='catastrophe_map_graph'))
])
]),
className="mt-3",
)
@app.callback(
Output('country_view_dropdown', 'options'),
[Input('catastrophe_type_dropdown', 'value')],
)
def set_country_names(selected_option):
if selected_option == 'Drought':
df_drought = get_drought()
country_names = df_drought['country'].unique()
country_names = np.insert(country_names, 0, 'All', axis=0)
return [{'label': i, 'value': i} for i in country_names]
elif selected_option == 'Flood':
df_flood = get_flood()
country_names = df_flood['country'].unique()
country_names = np.insert(country_names, 0, 'All', axis=0)
return [{'label': i, 'value': i} for i in country_names]
elif selected_option == 'Storm':
df_storm = get_storm()
country_names = df_storm['country'].unique()
country_names = np.insert(country_names, 0, 'All', axis=0)
return [{'label': i, 'value': i} for i in country_names]
else:
print("error")
@app.callback(
Output('catastrophe_map_graph', 'figure'),
[Input('catastrophe_map_button', 'n_clicks')],
[State('catastrophe_type_dropdown', 'value'),
State('country_view_dropdown', 'value')]
)
def get_the_map(n_clicks, cat_type, country_name):
fig = plot_map_for_drought_storm_flood(cat_type, country_name)
return fig
return tab2
def catastrophe_combined_graph_vs_options_tab_3(app):
factor_types = ['Temperature', 'Deforestation', 'Green House Gas Emissions']
tab3 = dbc.Card(
dbc.CardBody([
dbc.Row([
dbc.Col(dbc.FormGroup([
dbc.Label("Select factor type:"),
dbc.Col(dcc.Dropdown(id='factor_type_dropdown',
options=[{'label': k, 'value': k} for k in factor_types],
value='Deforestation', style={'backgroundColor':'white','color':'black'}))
]),
md=6),
dbc.Col(dbc.FormGroup([
dbc.Label("Select Start Year:"),
dbc.Col(dcc.Dropdown(id='catastrophe_start_year', value=1990 ,style={'backgroundColor':'white','color':'black'}))
]),
md=6),
dbc.Col(dbc.FormGroup([
dbc.Label("Select End Year:"),
dbc.Col(dcc.Dropdown(id='catastrophe_end_year', value=2008, style={'backgroundColor':'white','color':'black'}))
]),
md=6),
dbc.Col(dbc.FormGroup([
dbc.Label("Select a country :"),
dbc.Col(dcc.Dropdown(id='catastrophe_country_name', value='Indonesia', style={'backgroundColor':'white','color':'black'}))
]),
md=6),
dbc.Col(dbc.FormGroup([
dbc.Label(" "),
dbc.Button('Display the Graph', id='catastrophe_combined_graph_button',
color='info',
style={'margin-bottom': '1em'}, block=True)
]),
md=12)
]),
html.Hr(),
dbc.Row([
dbc.Col(dcc.Graph(id='catastrophe_combined_graph'))
])
]),
className="mt-3",
)
@app.callback(
Output('catastrophe_start_year', 'options'),
Output('catastrophe_end_year', 'options'),
Output('catastrophe_country_name', 'options'),
[Input('factor_type_dropdown', 'value')],
)
def set_start_end_year_and_country(selected_option):
years = []
f_year = 1970
years.append(f_year)
while f_year != 2008:
f_year = f_year + 1
years.append(f_year)
if selected_option == 'Temperature':
df_temp = get_temperature()
df_temp = df_temp[df_temp['dt'].isin(years)]
years_range = df_temp['dt'].unique()
countries = df_temp['Country'].unique()
return [{'label': i, 'value': i} for i in years_range], [{'label': i, 'value': i} for i in years_range], [
{'label': i, 'value': i} for i in countries]
elif selected_option == 'Deforestation':
df_deforest = get_deforestation()
df_deforest = df_deforest[df_deforest['year'].isin(years)]
years_range = df_deforest['year'].unique()
countries = df_deforest['country'].unique()
return [{'label': i, 'value': i} for i in years_range], [{'label': i, 'value': i} for i in years_range], [
{'label': i, 'value': i} for i in countries]
elif selected_option == 'Green House Gas Emissions':
df_green = get_green_house()
df_green = df_green[df_green['year'].isin(years)]
years_range = df_green['year'].unique()
countries = df_green['country'].unique()
return [{'label': i, 'value': i} for i in years_range], [{'label': i, 'value': i} for i in years_range], [
{'label': i, 'value': i} for i in countries]
else:
print("error")
@app.callback(
Output('catastrophe_combined_graph', 'figure'),
[Input('catastrophe_combined_graph_button', 'n_clicks')],
[State('factor_type_dropdown', 'value'),
State('catastrophe_start_year', 'value'),
State('catastrophe_end_year', 'value'),
State('catastrophe_country_name', 'value')]
)
def get_combined_graph(n_clicks, factor_type, start_date, end_date, country_name):
fig = plot_combined_bar_vs_options(factor_type, start_date, end_date, country_name)
return fig
return tab3
def catastrophe_section(app):
tabs = dbc.Tabs(
[
dbc.Tab(catastrophe_vs_options_tab_2(app), label="Catastrophe Over the Years"),
dbc.Tab(sea_level_vs_others_tab_1(app), label="Sea Level Rise"),
dbc.Tab(catastrophe_combined_graph_vs_options_tab_3(app), label="Trends in affects of other factors")
]
)
return tabs
|
{"/graphs/glaciers_oil_areas.py": ["/data/source.py"], "/dashboard_components/glaciers_oil_areas_dash.py": ["/graphs/population_vs_electricity_graphs.py", "/graphs/glaciers_oil_areas.py"], "/dashboard_components/emissions.py": ["/graphs/emissions.py"], "/ml_models/prediction.py": ["/ml_models/glacier_model.py", "/ml_models/sea_level_model.py", "/ml_models/temperature_model.py"], "/ml_models/sea_level_model.py": ["/data/source.py"], "/ml_models/glacier_model.py": ["/data/source.py"], "/graphs/sea_level_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/graphs/emissions.py": ["/data/source.py"], "/main.py": ["/dashboard_components/population_vs_electricity_section.py", "/dashboard_components/glaciers_oil_areas_dash.py", "/dashboard_components/emissions.py", "/dashboard_components/catastrophe_section.py", "/dashboard_components/machine_learning_section.py", "/non_renewable.py", "/renewable.py"], "/graphs/sea_level_vs_glacier_melt.py": ["/data/source.py"], "/graphs/glaciers_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/dashboard_components/catastrophe_section.py": ["/data/source.py", "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py", "/graphs/population_vs_electricity_graphs.py", "/graphs/sea_level_vs_glacier_melt.py"], "/dashboard_components/machine_learning_section.py": ["/graphs/glaciers_model.py", "/graphs/sea_level_model.py", "/ml_models/prediction.py"], "/ml_models/temperature_model.py": ["/data/source.py"], "/dashboard_components/population_vs_electricity_section.py": ["/graphs/population_vs_electricity_graphs.py"], "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py": ["/data/source.py"], "/graphs/population_vs_electricity_graphs.py": ["/data/source.py"]}
|
13,084
|
KirtishS/MySustainableEarth
|
refs/heads/main
|
/dashboard_components/machine_learning_section.py
|
from pathlib import Path
from typing import Tuple
import dash
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
from dash.dependencies import Output, Input, State
from matplotlib.widgets import Button, Slider
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from graphs.glaciers_model import glacier_vs_temperature_model_info, glacier_vs_temperature_model_prediction
from graphs.sea_level_model import sea_level_vs_temperature_model_info, sea_level_vs_temperature_model_prediction
from ml_models.prediction import temperature_prediction,glacier_prediction, sea_level_prediction
def glacier_model_tab(app):
tab1 = dbc.Card(
dbc.CardBody([
html.Hr(),
dbc.Row([
html.Br(), html.Br(),
dbc.Col(html.H3(children="Machine Learning Models used for Datasets"))
]),
html.Hr(),
dbc.Row([
html.Br(), html.Br(),
dbc.Col(dcc.Graph(id='glacier-model-1-graph', figure=glacier_vs_temperature_model_info()))
]),
]),
className="ml-1",
)
return tab1
def sea_level_model_tab(app):
tab2 = dbc.Card(
dbc.CardBody([
html.Hr(),
dbc.Row([
html.Br(), html.Br(),
dbc.Col(html.H3(children="Machine Learning Models used for Datasets"))
]),
html.Hr(),
dbc.Row([
html.Br(), html.Br(),
dbc.Col(dcc.Graph(id='glacier-model-2-graph', figure=sea_level_vs_temperature_model_info()))
]),
]),
className="ml-2",
)
return tab2
def predictor_tab(app):
tab2 = dbc.Card(
dbc.CardBody([
dbc.Row([
dbc.Col(dbc.FormGroup([
dbc.Label("Enter a value for Greenhouse Gas Emission (Kilotonne of CO2 equivalent) between 150000 and 350000: "),
dbc.Input(value=200000, id="temp-input-1", type="number", min=150000, max=350000),
dbc.Label("Enter a value for Forest Area Loss (sq km) between 100000 and 250000: "),
dbc.Input(value=125000, id="temp-input-2", type="number", min=100000, max=250000),
dbc.Label("Enter a value for Carbon Dioxide Emission (Kilotonne) between 95000 and 250000: "),
dbc.Input(value=205000, id="temp-input-3", type="number", min=95000, max=250000),
dbc.Label("."),
dbc.Button('Predict Temperature', id='temp-button',
color='info',
style={'margin-bottom': '1em'}, block=True)
]),
md=12)
]),
html.Hr(),
dbc.Row([
html.Br(), html.Br(),
dbc.Col(html.H4(id='temp-heading', children="Predicted temperature value: ")),
dbc.Col(html.Div(id='temp-value'))
]),
html.Hr(),
dbc.Row([
html.Br(), html.Br(),
dbc.Col(dcc.Graph(id='model-1-graph'))
]),
dbc.Row([
html.Br(), html.Br(),
dbc.Col(dcc.Graph(id='model-2-graph'))
]),
]),
className="ml-3",
)
@app.callback(
Output('temp-value', 'children'),
[Input('temp-button', 'n_clicks')],
[State('temp-input-1', 'value'),
State('temp-input-2', 'value'),
State('temp-input-3', 'value'), ])
def update_temp(n_clicks,greenhouse_gas,forest,carbon_dioxide):
temp = temperature_prediction([[greenhouse_gas,forest,carbon_dioxide]])
return temp[0][0]
@app.callback(
Output('model-1-graph', 'figure'),
[Input('temp-value', 'children')])
def update_sea_level(temperature):
sea_level = sea_level_prediction([[temperature]])
return sea_level_vs_temperature_model_prediction(temperature, sea_level[0])
@app.callback(
Output('model-2-graph', 'figure'),
[Input('temp-value', 'children')])
def update_glacier(temperature):
glacier_mass_balance = glacier_prediction([[temperature]])
return glacier_vs_temperature_model_prediction(temperature, glacier_mass_balance[0])
return tab2
def machine_learning_results(app):
tabs = dbc.Tabs(
[
dbc.Tab(predictor_tab(app), label="Temperature Predictor"),
dbc.Tab(glacier_model_tab(app), label="Complete Dataset - Glaciers"),
dbc.Tab(sea_level_model_tab(app), label="Complete Dataset - Sea Level"),
]
)
return tabs
|
{"/graphs/glaciers_oil_areas.py": ["/data/source.py"], "/dashboard_components/glaciers_oil_areas_dash.py": ["/graphs/population_vs_electricity_graphs.py", "/graphs/glaciers_oil_areas.py"], "/dashboard_components/emissions.py": ["/graphs/emissions.py"], "/ml_models/prediction.py": ["/ml_models/glacier_model.py", "/ml_models/sea_level_model.py", "/ml_models/temperature_model.py"], "/ml_models/sea_level_model.py": ["/data/source.py"], "/ml_models/glacier_model.py": ["/data/source.py"], "/graphs/sea_level_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/graphs/emissions.py": ["/data/source.py"], "/main.py": ["/dashboard_components/population_vs_electricity_section.py", "/dashboard_components/glaciers_oil_areas_dash.py", "/dashboard_components/emissions.py", "/dashboard_components/catastrophe_section.py", "/dashboard_components/machine_learning_section.py", "/non_renewable.py", "/renewable.py"], "/graphs/sea_level_vs_glacier_melt.py": ["/data/source.py"], "/graphs/glaciers_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/dashboard_components/catastrophe_section.py": ["/data/source.py", "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py", "/graphs/population_vs_electricity_graphs.py", "/graphs/sea_level_vs_glacier_melt.py"], "/dashboard_components/machine_learning_section.py": ["/graphs/glaciers_model.py", "/graphs/sea_level_model.py", "/ml_models/prediction.py"], "/ml_models/temperature_model.py": ["/data/source.py"], "/dashboard_components/population_vs_electricity_section.py": ["/graphs/population_vs_electricity_graphs.py"], "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py": ["/data/source.py"], "/graphs/population_vs_electricity_graphs.py": ["/data/source.py"]}
|
13,085
|
KirtishS/MySustainableEarth
|
refs/heads/main
|
/ml_models/temperature_model.py
|
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from data.source import get_temp_greenhouse_carbon_forest
class Temperature_Models:
__temperature_model = None
@staticmethod
def get_temperature_model():
if Temperature_Models.__temperature_model == None:
df = get_temp_greenhouse_carbon_forest()
df.drop(labels='Unnamed: 0', axis=1, inplace=True)
X = df.iloc[:, [2, 3, 4]].values
y = df.iloc[:, [1]].values
linear_regressor = LinearRegression()
Temperature_Models.__temperature_model = linear_regressor.fit(X, y)
return Temperature_Models.__temperature_model
|
{"/graphs/glaciers_oil_areas.py": ["/data/source.py"], "/dashboard_components/glaciers_oil_areas_dash.py": ["/graphs/population_vs_electricity_graphs.py", "/graphs/glaciers_oil_areas.py"], "/dashboard_components/emissions.py": ["/graphs/emissions.py"], "/ml_models/prediction.py": ["/ml_models/glacier_model.py", "/ml_models/sea_level_model.py", "/ml_models/temperature_model.py"], "/ml_models/sea_level_model.py": ["/data/source.py"], "/ml_models/glacier_model.py": ["/data/source.py"], "/graphs/sea_level_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/graphs/emissions.py": ["/data/source.py"], "/main.py": ["/dashboard_components/population_vs_electricity_section.py", "/dashboard_components/glaciers_oil_areas_dash.py", "/dashboard_components/emissions.py", "/dashboard_components/catastrophe_section.py", "/dashboard_components/machine_learning_section.py", "/non_renewable.py", "/renewable.py"], "/graphs/sea_level_vs_glacier_melt.py": ["/data/source.py"], "/graphs/glaciers_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/dashboard_components/catastrophe_section.py": ["/data/source.py", "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py", "/graphs/population_vs_electricity_graphs.py", "/graphs/sea_level_vs_glacier_melt.py"], "/dashboard_components/machine_learning_section.py": ["/graphs/glaciers_model.py", "/graphs/sea_level_model.py", "/ml_models/prediction.py"], "/ml_models/temperature_model.py": ["/data/source.py"], "/dashboard_components/population_vs_electricity_section.py": ["/graphs/population_vs_electricity_graphs.py"], "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py": ["/data/source.py"], "/graphs/population_vs_electricity_graphs.py": ["/data/source.py"]}
|
13,086
|
KirtishS/MySustainableEarth
|
refs/heads/main
|
/dashboard_components/population_vs_electricity_section.py
|
from pathlib import Path
from typing import Tuple
import dash
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
from dash.dependencies import Output, Input, State
from matplotlib.widgets import Button, Slider
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from graphs.population_vs_electricity_graphs import renewable_vs_non_renewable_electricity, \
non_renewable_electricity_vs_poverty, non_renewable_electricity_vs_population
def tab_1_content(app):
tab1 = dbc.Card(
dbc.CardBody([
dbc.Row([
dbc.Col(dbc.FormGroup([
dbc.Label("Country Name:"),
dbc.Input(value="Canada", id="population_vs_electricity-country-input-1", type="text"),
]),
md=6),
dbc.Col(dbc.FormGroup([
dbc.Label("."),
dbc.Button('Display the Graph', id='population_vs_electricity_country-display-graph-button-1',
color='info',
style={'margin-bottom': '1em'}, block=True)
]),
md=6)
]),
html.Hr(),
dbc.Row([
dbc.Col(dcc.Graph(id='population_vs_electricity_country-graph-1'))
])
]),
className="mt-3",
)
@app.callback(
Output('population_vs_electricity_country-graph-1', 'figure'),
[Input('population_vs_electricity_country-display-graph-button-1', 'n_clicks')],
[State('population_vs_electricity-country-input-1', 'value')])
def update_figure(n_clicks, country_name):
if country_name:
return renewable_vs_non_renewable_electricity(country_name)
return tab1
def tab_2_content(app):
tab2 = dbc.Card(
dbc.CardBody([
dbc.Row([
dbc.Col(dbc.FormGroup([
dbc.Label("Choose The Year:"),
dcc.RangeSlider(
id='population_vs_electricity-country-input-2',
min=1985,
max=2015,
value=[2000],
dots=True,
marks={i: str(i) for i in range(1985, 2016)},
),
]),
md=12)
]),
html.Hr(),
dbc.Row([
html.Br(),html.Br(),
dbc.Col(dcc.Graph(id='population_vs_electricity_country-graph-2'))
])
]),
className="mt-3",
)
@app.callback(
Output('population_vs_electricity_country-graph-2', 'figure'),
[Input('population_vs_electricity-country-input-2', 'value')],
[State('population_vs_electricity-country-input-2', 'value')])
def update_figure(n_clicks, year):
if year:
return non_renewable_electricity_vs_poverty(year[0])
return tab2
def tab_3_content(app):
tab3 = dbc.Card(
dbc.CardBody([
dbc.Row([
dbc.Col(dbc.FormGroup([
dbc.Label("Choose The Year:"),
dcc.RangeSlider(
id='population_vs_electricity-country-input-3',
min=1985,
max=2015,
value=[2000],
dots=True,
marks={i: str(i) for i in range(1985, 2016)},
),
]),
md=12),
]),
html.Hr(),
dbc.Row([
dbc.Col(dcc.Graph(id='population_vs_electricity_country-graph-3'))
])
]),
className="mt-3",
)
@app.callback(
Output('population_vs_electricity_country-graph-3', 'figure'),
[Input('population_vs_electricity-country-input-3', 'value')],
[State('population_vs_electricity-country-input-3', 'value')])
def update_figure(n_clicks, year):
if year:
return non_renewable_electricity_vs_population(year[0])
return tab3
def population_vs_electricity_section(app):
tabs = dbc.Tabs(
[
dbc.Tab(tab_1_content(app), label="Production Sources"),
dbc.Tab(tab_2_content(app), label="Impact of Poverty"),
dbc.Tab(tab_3_content(app), label="Impact of Population"),
]
)
return tabs
|
{"/graphs/glaciers_oil_areas.py": ["/data/source.py"], "/dashboard_components/glaciers_oil_areas_dash.py": ["/graphs/population_vs_electricity_graphs.py", "/graphs/glaciers_oil_areas.py"], "/dashboard_components/emissions.py": ["/graphs/emissions.py"], "/ml_models/prediction.py": ["/ml_models/glacier_model.py", "/ml_models/sea_level_model.py", "/ml_models/temperature_model.py"], "/ml_models/sea_level_model.py": ["/data/source.py"], "/ml_models/glacier_model.py": ["/data/source.py"], "/graphs/sea_level_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/graphs/emissions.py": ["/data/source.py"], "/main.py": ["/dashboard_components/population_vs_electricity_section.py", "/dashboard_components/glaciers_oil_areas_dash.py", "/dashboard_components/emissions.py", "/dashboard_components/catastrophe_section.py", "/dashboard_components/machine_learning_section.py", "/non_renewable.py", "/renewable.py"], "/graphs/sea_level_vs_glacier_melt.py": ["/data/source.py"], "/graphs/glaciers_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/dashboard_components/catastrophe_section.py": ["/data/source.py", "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py", "/graphs/population_vs_electricity_graphs.py", "/graphs/sea_level_vs_glacier_melt.py"], "/dashboard_components/machine_learning_section.py": ["/graphs/glaciers_model.py", "/graphs/sea_level_model.py", "/ml_models/prediction.py"], "/ml_models/temperature_model.py": ["/data/source.py"], "/dashboard_components/population_vs_electricity_section.py": ["/graphs/population_vs_electricity_graphs.py"], "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py": ["/data/source.py"], "/graphs/population_vs_electricity_graphs.py": ["/data/source.py"]}
|
13,087
|
KirtishS/MySustainableEarth
|
refs/heads/main
|
/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py
|
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
from plotly.subplots import make_subplots
from data.source import *
# Option:1 Map Structure
def plot_map_for_drought_storm_flood(type_of_catastrophe, country):
if type_of_catastrophe == 'Drought':
df_drought = get_drought()
country_name = list(country.split(" "))
if country != 'All':
df_drought = df_drought[df_drought['country'].isin(country_name)]
fig = px.choropleth(df_drought,
locations='country',
color="value",
animation_frame="years",
color_continuous_scale="Plasma",
locationmode='country names',
range_color=(0, 20),
title='Drought over the years for ' + country_name[0],
height=600
)
return fig
elif type_of_catastrophe == 'Storm':
df_storm = get_storm()
country_name = list(country.split(" "))
if country != 'All':
df_storm = df_storm[df_storm['country'].isin(country_name)]
fig = px.choropleth(df_storm,
locations='country',
color="value",
animation_frame="years",
color_continuous_scale="Plasma",
locationmode='country names',
range_color=(0, 20),
title='Storm over the years for ' + country_name[0],
height=600
)
return fig
elif type_of_catastrophe == 'Flood':
df_flood = get_flood()
country_name = list(country.split(" "))
if country != 'All':
df_flood = df_flood[df_flood['country'].isin(country_name)]
fig = px.choropleth(df_flood,
locations='country',
color="value",
animation_frame="years",
color_continuous_scale="Plasma",
locationmode='country names',
range_color=(0, 20),
title='Flood over the years for ' + country_name[0],
height=600
)
return fig
else:
print("Issues loading graph")
# Option 2: Bar Structure
def plot_combined_bar_vs_options(type_of_factor, start_date, end_date, country):
df_drought = get_drought()
df_flood = get_flood()
df_storm = get_storm()
# Getting the range of years
years = []
f_year = start_date
years.append(f_year)
while f_year != end_date:
f_year = f_year + 1
years.append(f_year)
# Keeping only the country's data in the dataframes
country_name = list(country.split(" "))
df_drought = df_drought[df_drought['country'].isin(country_name)]
df_drought = df_drought[df_drought['years'].isin(years)]
df_flood = df_flood[df_flood['country'].isin(country_name)]
df_flood = df_flood[df_flood['years'].isin(years)]
df_storm = df_storm[df_storm['country'].isin(country_name)]
df_storm = df_storm[df_storm['years'].isin(years)]
if type_of_factor == 'Deforestation':
df_deforest = get_deforestation()
df_deforest = df_deforest[df_deforest['country'].isin(country_name)]
df_deforest = df_deforest[df_deforest['year'].isin(years)]
fig = go.Figure()
fig.add_trace(go.Bar(
x=years,
y=df_drought['value'],
name='drought',
marker_color='indianred'
))
fig.add_trace(go.Bar(
x=years,
y=df_flood['value'],
name='flood',
marker_color='lightsalmon'
))
fig.add_trace(go.Bar(
x=years,
y=df_storm['value'],
name='storm',
marker_color='pink'
))
fig.add_trace(go.Scatter(
x=years,
y=df_deforest['value'],
mode='lines+markers',
name='Reduction in Forest Area')
)
fig.update_layout(barmode='group', xaxis_tickangle=-45, xaxis_title=" Years ",
yaxis_title=" People affected ")
return fig
if type_of_factor == 'Green House Gas Emissions':
df_green = get_green_house()
df_green = df_green[df_green['country'].isin(country_name)]
df_green = df_green[df_green['year'].isin(years)]
fig = go.Figure()
fig.add_trace(go.Bar(
x=years,
y=df_drought['value'],
name='drought',
marker_color='indianred'
))
fig.add_trace(go.Bar(
x=years,
y=df_flood['value'],
name='flood',
marker_color='lightsalmon'
))
fig.add_trace(go.Bar(
x=years,
y=df_storm['value'],
name='storm',
marker_color='pink'
))
fig.add_trace(go.Scatter(
x=years,
y=df_green['value'],
mode='lines+markers',
name='Green House Gas Emissions')
)
fig.update_layout(barmode='group', xaxis_tickangle=-45, xaxis_title=" Years ",
yaxis_title=" People affected ")
return fig
if type_of_factor == 'Temperature':
df_temp = get_temperature()
df_temp = df_temp[df_temp['Country'].isin(country_name)]
df_temp = df_temp[df_temp['dt'].isin(years)]
fig = go.Figure()
fig.add_trace(go.Bar(
x=years,
y=df_drought['value'],
name='drought',
marker_color='indianred'
))
fig.add_trace(go.Bar(
x=years,
y=df_flood['value'],
name='flood',
marker_color='lightsalmon'
))
fig.add_trace(go.Bar(
x=years,
y=df_storm['value'],
name='storm',
marker_color='pink'
))
fig.add_trace(go.Scatter(
x=years,
y=df_temp['avg'],
mode='lines+markers',
name='Temperature')
)
fig.update_layout(barmode='group', xaxis_tickangle=-45, xaxis_title=" Years ",
yaxis_title=" People affected ")
return fig
# plot_combined_bar_vs_options('Temperature', [1990, 2010], 'Ireland')
|
{"/graphs/glaciers_oil_areas.py": ["/data/source.py"], "/dashboard_components/glaciers_oil_areas_dash.py": ["/graphs/population_vs_electricity_graphs.py", "/graphs/glaciers_oil_areas.py"], "/dashboard_components/emissions.py": ["/graphs/emissions.py"], "/ml_models/prediction.py": ["/ml_models/glacier_model.py", "/ml_models/sea_level_model.py", "/ml_models/temperature_model.py"], "/ml_models/sea_level_model.py": ["/data/source.py"], "/ml_models/glacier_model.py": ["/data/source.py"], "/graphs/sea_level_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/graphs/emissions.py": ["/data/source.py"], "/main.py": ["/dashboard_components/population_vs_electricity_section.py", "/dashboard_components/glaciers_oil_areas_dash.py", "/dashboard_components/emissions.py", "/dashboard_components/catastrophe_section.py", "/dashboard_components/machine_learning_section.py", "/non_renewable.py", "/renewable.py"], "/graphs/sea_level_vs_glacier_melt.py": ["/data/source.py"], "/graphs/glaciers_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/dashboard_components/catastrophe_section.py": ["/data/source.py", "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py", "/graphs/population_vs_electricity_graphs.py", "/graphs/sea_level_vs_glacier_melt.py"], "/dashboard_components/machine_learning_section.py": ["/graphs/glaciers_model.py", "/graphs/sea_level_model.py", "/ml_models/prediction.py"], "/ml_models/temperature_model.py": ["/data/source.py"], "/dashboard_components/population_vs_electricity_section.py": ["/graphs/population_vs_electricity_graphs.py"], "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py": ["/data/source.py"], "/graphs/population_vs_electricity_graphs.py": ["/data/source.py"]}
|
13,088
|
KirtishS/MySustainableEarth
|
refs/heads/main
|
/graphs/population_vs_electricity_graphs.py
|
from pathlib import Path
from typing import Tuple
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from matplotlib.widgets import Button, Slider
from data.source import get_electricity_and_population_info
def renewable_vs_non_renewable_electricity(country_name: str):
df = get_electricity_and_population_info()
df = df.loc[df['country']==country_name]
fig = go.Figure()
fig.add_trace(go.Scatter(x=df['year'], y=df['electricity_from_Fossil_fuel'], name='From oil, gas and coal', mode='lines+markers',
line=dict(color='firebrick', width=4)))
fig.add_trace(go.Scatter(x=df['year'], y=df['total_electricity'], name='Total Electricity',
line=dict(color='royalblue', width=4)))
fig.update_layout(title='<b>Electricity Production - Renewable vs Non-Renewable Sources</b> for '+country_name,
xaxis_title='Years',
yaxis_title='Electricity (kWh)')
# fig.show()
return fig
def non_renewable_electricity_vs_poverty(year: int):
df = get_electricity_and_population_info()
df = df.loc[df['year']==year]
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.add_trace(go.Scatter(x=df['country'], y=df['total_electricity'], name='Total Electricity', mode='lines+markers',
line=dict(color='darkgreen', width=4)), secondary_y=False, )
fig.add_trace(go.Scatter(x=df['country'], y=df['electricity_from_Fossil_fuel'], name='Electricity From oil, gas and coal', mode='lines+markers',
line=dict(color='firebrick', width=4)), secondary_y=False,)
fig.add_trace(go.Scatter(x=df['country'], y=df['AdjustedIncomePerPerson'], name='Adjusted Income Per Person', mode='lines+markers',
line=dict(color='royalblue', width=4)), secondary_y=True)
fig.update_yaxes(title_text="<b>Electricity (kWh)</b>", secondary_y=False)
fig.update_yaxes(title_text="<b>Adjusted Income Per Person</b>", secondary_y=True)
fig.update_layout(title='<b>Electricity From Non-Renewable Sources vs Poverty Rate</b> for the year ' + str(year),
xaxis_title='Countries')
# fig.show()
return fig
def non_renewable_electricity_vs_population(year: int):
df = get_electricity_and_population_info()
df = df.loc[df['year']==year]
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.add_trace(go.Scatter(x=df['country'], y=df['total_electricity'], name='Total Electricity', mode='lines+markers',
line=dict(color='darkgreen', width=4)), secondary_y=False)
fig.add_trace(go.Scatter(x=df['country'], y=df['electricity_from_Fossil_fuel'], name='Electricity From oil, gas and coal', mode='lines+markers',
line=dict(color='firebrick', width=4)), secondary_y=False,)
fig.add_trace(go.Scatter(x=df['country'], y=df['total_population'], name='Total Population', mode='lines+markers',
line=dict(color='royalblue', width=4)), secondary_y=True)
fig.update_yaxes(title_text="<b>Electricity (kWh)</b>", secondary_y=False)
fig.update_yaxes(title_text="<b>Total Population</b>", secondary_y=True)
fig.update_layout(title='<b>Electricity From Non-Renewable Sources vs Total Population </b>for the year ' + str(year),
xaxis_title='Countries')
# fig.show()
return fig
if __name__ == "__main__":
country_name = 'India'
year = 1990
renewable_vs_non_renewable_electricity(country_name)
non_renewable_electricity_vs_poverty(year)
non_renewable_electricity_vs_population(year)
print("ok")
|
{"/graphs/glaciers_oil_areas.py": ["/data/source.py"], "/dashboard_components/glaciers_oil_areas_dash.py": ["/graphs/population_vs_electricity_graphs.py", "/graphs/glaciers_oil_areas.py"], "/dashboard_components/emissions.py": ["/graphs/emissions.py"], "/ml_models/prediction.py": ["/ml_models/glacier_model.py", "/ml_models/sea_level_model.py", "/ml_models/temperature_model.py"], "/ml_models/sea_level_model.py": ["/data/source.py"], "/ml_models/glacier_model.py": ["/data/source.py"], "/graphs/sea_level_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/graphs/emissions.py": ["/data/source.py"], "/main.py": ["/dashboard_components/population_vs_electricity_section.py", "/dashboard_components/glaciers_oil_areas_dash.py", "/dashboard_components/emissions.py", "/dashboard_components/catastrophe_section.py", "/dashboard_components/machine_learning_section.py", "/non_renewable.py", "/renewable.py"], "/graphs/sea_level_vs_glacier_melt.py": ["/data/source.py"], "/graphs/glaciers_model.py": ["/data/source.py", "/ml_models/prediction.py"], "/dashboard_components/catastrophe_section.py": ["/data/source.py", "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py", "/graphs/population_vs_electricity_graphs.py", "/graphs/sea_level_vs_glacier_melt.py"], "/dashboard_components/machine_learning_section.py": ["/graphs/glaciers_model.py", "/graphs/sea_level_model.py", "/ml_models/prediction.py"], "/ml_models/temperature_model.py": ["/data/source.py"], "/dashboard_components/population_vs_electricity_section.py": ["/graphs/population_vs_electricity_graphs.py"], "/graphs/flood_drought_storm_vs_temp_deforest_greenhouse.py": ["/data/source.py"], "/graphs/population_vs_electricity_graphs.py": ["/data/source.py"]}
|
13,090
|
rti/poodle-backend-django
|
refs/heads/main
|
/app/models.py
|
from django.db import models
class Query(models.Model):
name = models.CharField(max_length=512)
def __str__(self):
return self.name
def choices(self):
return [choice for option in self.options.all()
for choice in option.choices.all()]
class Meta:
verbose_name_plural = "Queries"
class Option(models.Model):
begin_date = models.DateField()
begin_time = models.TimeField(blank=True, null=True)
end_date = models.DateField(blank=True, null=True)
end_time = models.TimeField(blank=True, null=True)
query = models.ForeignKey(
Query, related_name='options', on_delete=models.CASCADE)
def time_str(time):
if time:
return time.strftime('%H:%M')
return None
def begin_time_short(self):
return Option.time_str(self.begin_time)
def end_time_short(self):
return Option.time_str(self.end_time)
# TODO: appending the Query name here is only required to identify
# Options in the admin form. Find a way to only append the Query name there
def __str__(self):
result = str(self.begin_date)
if self.begin_time:
result += ' ' + str(self.begin_time_short())
if self.end_date or self.end_time:
result += ' -'
if self.end_date:
result += ' ' + str(self.end_date)
if self.end_time:
result += ' ' + str(self.end_time_short())
return '%s (%s)' % (result, str(self.query))
class Attendee(models.Model):
name = models.CharField(max_length=64)
def __str__(self):
return self.name
class Status(models.TextChoices):
YES = 'Y', 'Yes'
NO = 'N', 'No'
MAYBE = 'M', 'Maybe'
class Choice(models.Model):
attendee = models.ForeignKey(
Attendee, related_name='choices', on_delete=models.CASCADE)
option = models.ForeignKey(
Option, related_name='choices', on_delete=models.CASCADE)
status = models.CharField(max_length=1, choices=Status.choices)
def __str__(self):
return (self.attendee.name + '\'s choice for "' +
self.option.query.name + '": ' +
str(self.option.begin_date) + ' ' + str(self.status) + '')
class Meta:
constraints = [
models.UniqueConstraint(
fields=['attendee', 'option'], name='unique_choice')]
|
{"/app/tests.py": ["/app/models.py"], "/app/views.py": ["/app/serializers.py", "/app/models.py"], "/app/admin.py": ["/app/models.py"], "/app/serializers.py": ["/app/models.py"]}
|
13,091
|
rti/poodle-backend-django
|
refs/heads/main
|
/app/tests.py
|
from datetime import date, time
from django.contrib.auth.models import User
from django.db import utils
from django.test import TestCase
from re import match
from rest_framework import status
from rest_framework.test import APITestCase
from app.models import Query, Option, Attendee, Choice
class ModelRelationsTest(TestCase):
def setUp(self):
self.query = Query.objects.create(name='When can we meet?')
self.options = [
Option.objects.create(begin_date='2021-01-01', query=self.query),
Option.objects.create(begin_date='2021-01-02', query=self.query),
Option.objects.create(begin_date='2021-01-03', query=self.query), ]
self.attendees = [
Attendee.objects.create(name='Alisa'),
Attendee.objects.create(name='Asisa'), ]
self.choices = [
Choice.objects.create(option=self.options[0], attendee=self.attendees[0], status='Y'),
Choice.objects.create(option=self.options[1], attendee=self.attendees[0], status='N'),
Choice.objects.create(option=self.options[2], attendee=self.attendees[0], status='Y'),
Choice.objects.create(option=self.options[0], attendee=self.attendees[1], status='M'),
Choice.objects.create(option=self.options[1], attendee=self.attendees[1], status='Y'),
Choice.objects.create(option=self.options[2], attendee=self.attendees[1], status='Y'), ]
def test_prerequisites(self):
self.assertIsNotNone(self.query)
self.assertEqual(len(self.options), 3)
self.assertEqual(len(self.query.options.all()), 3)
self.assertEqual(len(self.query.choices()), 6)
self.assertEqual(len(self.attendees), 2)
self.assertEqual(len(self.attendees[0].choices.all()), 3)
self.assertEqual(len(self.attendees[0].choices.all()), 3)
self.assertEqual(len(self.choices), 6)
self.assertEqual(len(self.options[0].choices.all()), 2)
self.assertEqual(len(self.options[1].choices.all()), 2)
self.assertEqual(len(self.options[2].choices.all()), 2)
def test_unique_choice(self):
try:
Choice.objects.create(option=self.options[0], attendee=self.attendees[0], status='M')
self.fail
except utils.IntegrityError:
pass
def test_delete_attendee_deletes_choices(self):
self.assertEqual(len(self.query.choices()), 6)
self.attendees[0].delete()
self.assertEqual(len(self.query.choices()), 3)
self.attendees[1].delete()
self.assertEqual(len(self.query.choices()), 0)
def test_delete_option_deletes_choices(self):
self.assertEqual(len(self.query.choices()), 6)
self.options[0].delete()
self.assertEqual(len(self.query.choices()), 4)
self.options[1].delete()
self.assertEqual(len(self.query.choices()), 2)
self.options[2].delete()
self.assertEqual(len(self.query.choices()), 0)
def test_delete_query_deletes_options_and_choices(self):
self.assertEqual(len(self.query.options.all()), 3)
self.assertEqual(len(self.query.choices()), 6)
self.query.delete()
self.assertEqual(len(Option.objects.all()), 0)
self.assertEqual(len(Choice.objects.all()), 0)
class OptionModelTest(TestCase):
def setUp(self):
self.query = Query.objects.create(name='When can we meet?')
self.option = Option.objects.create(begin_date='2021-01-01', query=self.query)
def test_option_string(self):
self.assertEqual(str(self.option), '2021-01-01 (When can we meet?)')
self.option.begin_time = time(18, 00)
self.assertEqual(str(self.option), '2021-01-01 18:00 (When can we meet?)')
self.option.end_time = time(19, 00)
self.assertEqual(str(self.option), '2021-01-01 18:00 - 19:00 (When can we meet?)')
self.option.end_date = date(2021, 1, 2)
self.option.end_time = time(3, 00)
self.assertEqual(str(self.option), '2021-01-01 18:00 - 2021-01-02 03:00 (When can we meet?)')
class QueryApiAnonTest(APITestCase):
# TODO: add some fail tests, e.g. invalid ids
@classmethod
def setUpTestData(cls):
cls.query = Query.objects.create(name='When can we meet?')
cls.options = [
Option.objects.create(begin_date='2021-01-01', begin_time='18:00:00', end_date='2021-01-02', end_time='03:00:00', query=cls.query),
Option.objects.create(begin_date='2021-01-02', begin_time='18:00:00', end_date='2021-01-03', end_time='03:00:00', query=cls.query),
Option.objects.create(begin_date='2021-01-03', begin_time='18:00:00', end_date='2021-01-04', end_time='03:00:00', query=cls.query), ]
cls.attendees = [
Attendee.objects.create(name='Alisa'),
Attendee.objects.create(name='Asisa'),
Attendee.objects.create(name='Takatuka'), ]
cls.choices = [
Choice.objects.create(option=cls.options[0], attendee=cls.attendees[0], status='Y'),
Choice.objects.create(option=cls.options[1], attendee=cls.attendees[0], status='N'),
Choice.objects.create(option=cls.options[2], attendee=cls.attendees[0], status='Y'),
Choice.objects.create(option=cls.options[0], attendee=cls.attendees[1], status='M'),
Choice.objects.create(option=cls.options[1], attendee=cls.attendees[1], status='Y'),
Choice.objects.create(option=cls.options[2], attendee=cls.attendees[1], status='Y'), ]
# root --------------------------------------------------------------------
def test_get_root(self):
response = self.client.get('/app/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
json = response.json()
self.assertIsNotNone(json)
self.assertIsNotNone(json['queries'])
self.assertIsNotNone(json['options'])
self.assertIsNotNone(json['choices'])
self.assertIsNotNone(json['attendees'])
self.assertTrue(match(r'^https?://[a-zA-Z-.]+/app/queries/\?format=json$', json['queries']))
self.assertTrue(match(r'^https?://[a-zA-Z-.]+/app/options/\?format=json$', json['options']))
self.assertTrue(match(r'^https?://[a-zA-Z-.]+/app/choices/\?format=json$', json['choices']))
self.assertTrue(match(r'^https?://[a-zA-Z-.]+/app/attendees/\?format=json$', json['attendees']))
def test_post_root(self):
response = self.client.post('/app/', {}, format='json')
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_put_root(self):
response = self.client.put('/app/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_patch_root(self):
response = self.client.patch('/app/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_delete_root(self):
response = self.client.delete('/app/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_options_root(self):
response = self.client.options('/app/', {'format': 'json'})
self.assertEqual(response.status_code, 200)
# TODO: implement me
# query list --------------------------------------------------------------
def test_get_query_list(self):
response = self.client.get('/app/queries/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_post_query_list(self):
response = self.client.post('/app/queries/', {'name': 'New Query'}, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
json = response.json()
self.assertIsNotNone(json)
self.assertGreaterEqual(int(json['id']), 1)
self.assertEqual(json['name'], 'New Query')
self.assertEqual(json['options'], [])
def test_put_query_list(self):
response = self.client.put('/app/queries/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_patch_query_list(self):
response = self.client.patch('/app/queries/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_delete_query_list(self):
response = self.client.delete('/app/queries/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_options_query_list(self):
response = self.client.options('/app/queries/', {'format': 'json'})
self.assertEqual(response.status_code, 200)
# TODO: implement me
# query item --------------------------------------------------------------
def test_get_query_item(self):
response = self.client.get('/app/queries/' + str(self.query.id) + '/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
json = response.json()
self.assertIsNotNone(json)
self.assertEqual(json['id'], self.query.id)
self.assertEqual(json['name'], 'When can we meet?')
self.assertEqual(len(json['options']), 3)
self.assertEqual(json['options'][0]['id'], self.options[0].id)
self.assertEqual(json['options'][1]['id'], self.options[1].id)
self.assertEqual(json['options'][2]['id'], self.options[2].id)
self.assertEqual(json['options'][0]['begin_date'], self.options[0].begin_date)
self.assertEqual(json['options'][1]['begin_date'], self.options[1].begin_date)
self.assertEqual(json['options'][2]['begin_date'], self.options[2].begin_date)
self.assertEqual(json['options'][0]['begin_time'], self.options[0].begin_time)
self.assertEqual(json['options'][1]['begin_time'], self.options[1].begin_time)
self.assertEqual(json['options'][2]['begin_time'], self.options[2].begin_time)
self.assertEqual(json['options'][0]['end_date'], self.options[0].end_date)
self.assertEqual(json['options'][1]['end_date'], self.options[1].end_date)
self.assertEqual(json['options'][2]['end_date'], self.options[2].end_date)
self.assertEqual(json['options'][0]['end_time'], self.options[0].end_time)
self.assertEqual(json['options'][1]['end_time'], self.options[1].end_time)
self.assertEqual(json['options'][2]['end_time'], self.options[2].end_time)
self.assertEqual(len(json['options'][0]['choices']), 2)
self.assertEqual(len(json['options'][1]['choices']), 2)
self.assertEqual(len(json['options'][2]['choices']), 2)
self.assertEqual(json['options'][0]['choices'][0]['id'], self.choices[0].id)
self.assertEqual(json['options'][0]['choices'][0]['attendee'], self.choices[0].attendee.name)
self.assertEqual(json['options'][0]['choices'][0]['attendee_id'], self.choices[0].attendee.id)
self.assertEqual(json['options'][0]['choices'][0]['status'], self.choices[0].status)
self.assertEqual(json['options'][1]['choices'][0]['id'], self.choices[1].id)
self.assertEqual(json['options'][1]['choices'][0]['attendee'], self.choices[1].attendee.name)
self.assertEqual(json['options'][1]['choices'][0]['attendee_id'], self.choices[1].attendee.id)
self.assertEqual(json['options'][1]['choices'][0]['status'], self.choices[1].status)
self.assertEqual(json['options'][2]['choices'][0]['id'], self.choices[2].id)
self.assertEqual(json['options'][2]['choices'][0]['attendee'], self.choices[2].attendee.name)
self.assertEqual(json['options'][2]['choices'][0]['attendee_id'], self.choices[2].attendee.id)
self.assertEqual(json['options'][2]['choices'][0]['status'], self.choices[2].status)
self.assertEqual(json['options'][0]['choices'][1]['id'], self.choices[3].id)
self.assertEqual(json['options'][0]['choices'][1]['attendee'], self.choices[3].attendee.name)
self.assertEqual(json['options'][0]['choices'][1]['attendee_id'], self.choices[3].attendee.id)
self.assertEqual(json['options'][0]['choices'][1]['status'], self.choices[3].status)
self.assertEqual(json['options'][1]['choices'][1]['id'], self.choices[4].id)
self.assertEqual(json['options'][1]['choices'][1]['attendee'], self.choices[4].attendee.name)
self.assertEqual(json['options'][1]['choices'][1]['attendee_id'], self.choices[4].attendee.id)
self.assertEqual(json['options'][1]['choices'][1]['status'], self.choices[4].status)
self.assertEqual(json['options'][2]['choices'][1]['id'], self.choices[5].id)
self.assertEqual(json['options'][2]['choices'][1]['attendee'], self.choices[5].attendee.name)
self.assertEqual(json['options'][2]['choices'][1]['attendee_id'], self.choices[5].attendee.id)
self.assertEqual(json['options'][2]['choices'][1]['status'], self.choices[5].status)
def test_post_query_item(self):
response = self.client.post('/app/queries/' + str(self.query.id) + '/', {}, format='json')
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_put_query_item(self):
response = self.client.put('/app/queries/' + str(self.query.id) + '/', {'name': 'New Query'}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
json = response.json()
self.assertIsNotNone(json)
self.assertEqual(json['id'], self.query.id)
self.assertEqual(json['name'], 'New Query')
self.assertEqual(len(json['options']), 3)
self.assertEqual(json['options'][0]['id'], self.options[0].id)
self.assertEqual(json['options'][1]['id'], self.options[1].id)
self.assertEqual(json['options'][2]['id'], self.options[2].id)
self.assertEqual(json['options'][0]['begin_date'], self.options[0].begin_date)
self.assertEqual(json['options'][1]['begin_date'], self.options[1].begin_date)
self.assertEqual(json['options'][2]['begin_date'], self.options[2].begin_date)
self.assertEqual(json['options'][0]['begin_time'], self.options[0].begin_time)
self.assertEqual(json['options'][1]['begin_time'], self.options[1].begin_time)
self.assertEqual(json['options'][2]['begin_time'], self.options[2].begin_time)
self.assertEqual(json['options'][0]['end_date'], self.options[0].end_date)
self.assertEqual(json['options'][1]['end_date'], self.options[1].end_date)
self.assertEqual(json['options'][2]['end_date'], self.options[2].end_date)
self.assertEqual(json['options'][0]['end_time'], self.options[0].end_time)
self.assertEqual(json['options'][1]['end_time'], self.options[1].end_time)
self.assertEqual(json['options'][2]['end_time'], self.options[2].end_time)
self.assertEqual(len(json['options'][0]['choices']), 2)
self.assertEqual(len(json['options'][1]['choices']), 2)
self.assertEqual(len(json['options'][2]['choices']), 2)
self.assertEqual(json['options'][0]['choices'][0]['id'], self.choices[0].id)
self.assertEqual(json['options'][0]['choices'][0]['attendee'], self.choices[0].attendee.name)
self.assertEqual(json['options'][0]['choices'][0]['attendee_id'], self.choices[0].attendee.id)
self.assertEqual(json['options'][0]['choices'][0]['status'], self.choices[0].status)
self.assertEqual(json['options'][1]['choices'][0]['id'], self.choices[1].id)
self.assertEqual(json['options'][1]['choices'][0]['attendee'], self.choices[1].attendee.name)
self.assertEqual(json['options'][1]['choices'][0]['attendee_id'], self.choices[1].attendee.id)
self.assertEqual(json['options'][1]['choices'][0]['status'], self.choices[1].status)
self.assertEqual(json['options'][2]['choices'][0]['id'], self.choices[2].id)
self.assertEqual(json['options'][2]['choices'][0]['attendee'], self.choices[2].attendee.name)
self.assertEqual(json['options'][2]['choices'][0]['attendee_id'], self.choices[2].attendee.id)
self.assertEqual(json['options'][2]['choices'][0]['status'], self.choices[2].status)
self.assertEqual(json['options'][0]['choices'][1]['id'], self.choices[3].id)
self.assertEqual(json['options'][0]['choices'][1]['attendee'], self.choices[3].attendee.name)
self.assertEqual(json['options'][0]['choices'][1]['attendee_id'], self.choices[3].attendee.id)
self.assertEqual(json['options'][0]['choices'][1]['status'], self.choices[3].status)
self.assertEqual(json['options'][1]['choices'][1]['id'], self.choices[4].id)
self.assertEqual(json['options'][1]['choices'][1]['attendee'], self.choices[4].attendee.name)
self.assertEqual(json['options'][1]['choices'][1]['attendee_id'], self.choices[4].attendee.id)
self.assertEqual(json['options'][1]['choices'][1]['status'], self.choices[4].status)
self.assertEqual(json['options'][2]['choices'][1]['id'], self.choices[5].id)
self.assertEqual(json['options'][2]['choices'][1]['attendee'], self.choices[5].attendee.name)
self.assertEqual(json['options'][2]['choices'][1]['attendee_id'], self.choices[5].attendee.id)
self.assertEqual(json['options'][2]['choices'][1]['status'], self.choices[5].status)
def test_patch_query_item(self):
response = self.client.patch('/app/queries/' + str(self.query.id) + '/', {'name': 'Updated Query'}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
json = response.json()
self.assertIsNotNone(json)
self.assertEqual(json['id'], self.query.id)
self.assertEqual(json['name'], 'Updated Query')
self.assertEqual(len(json['options']), 3)
self.assertEqual(json['options'][0]['id'], self.options[0].id)
self.assertEqual(json['options'][1]['id'], self.options[1].id)
self.assertEqual(json['options'][2]['id'], self.options[2].id)
self.assertEqual(json['options'][0]['begin_date'], self.options[0].begin_date)
self.assertEqual(json['options'][1]['begin_date'], self.options[1].begin_date)
self.assertEqual(json['options'][2]['begin_date'], self.options[2].begin_date)
self.assertEqual(json['options'][0]['begin_time'], self.options[0].begin_time)
self.assertEqual(json['options'][1]['begin_time'], self.options[1].begin_time)
self.assertEqual(json['options'][2]['begin_time'], self.options[2].begin_time)
self.assertEqual(json['options'][0]['end_date'], self.options[0].end_date)
self.assertEqual(json['options'][1]['end_date'], self.options[1].end_date)
self.assertEqual(json['options'][2]['end_date'], self.options[2].end_date)
self.assertEqual(json['options'][0]['end_time'], self.options[0].end_time)
self.assertEqual(json['options'][1]['end_time'], self.options[1].end_time)
self.assertEqual(json['options'][2]['end_time'], self.options[2].end_time)
self.assertEqual(len(json['options'][0]['choices']), 2)
self.assertEqual(len(json['options'][1]['choices']), 2)
self.assertEqual(len(json['options'][2]['choices']), 2)
self.assertEqual(json['options'][0]['choices'][0]['id'], self.choices[0].id)
self.assertEqual(json['options'][0]['choices'][0]['attendee'], self.choices[0].attendee.name)
self.assertEqual(json['options'][0]['choices'][0]['attendee_id'], self.choices[0].attendee.id)
self.assertEqual(json['options'][0]['choices'][0]['status'], self.choices[0].status)
self.assertEqual(json['options'][1]['choices'][0]['id'], self.choices[1].id)
self.assertEqual(json['options'][1]['choices'][0]['attendee'], self.choices[1].attendee.name)
self.assertEqual(json['options'][1]['choices'][0]['attendee_id'], self.choices[1].attendee.id)
self.assertEqual(json['options'][1]['choices'][0]['status'], self.choices[1].status)
self.assertEqual(json['options'][2]['choices'][0]['id'], self.choices[2].id)
self.assertEqual(json['options'][2]['choices'][0]['attendee'], self.choices[2].attendee.name)
self.assertEqual(json['options'][2]['choices'][0]['attendee_id'], self.choices[2].attendee.id)
self.assertEqual(json['options'][2]['choices'][0]['status'], self.choices[2].status)
self.assertEqual(json['options'][0]['choices'][1]['id'], self.choices[3].id)
self.assertEqual(json['options'][0]['choices'][1]['attendee'], self.choices[3].attendee.name)
self.assertEqual(json['options'][0]['choices'][1]['attendee_id'], self.choices[3].attendee.id)
self.assertEqual(json['options'][0]['choices'][1]['status'], self.choices[3].status)
self.assertEqual(json['options'][1]['choices'][1]['id'], self.choices[4].id)
self.assertEqual(json['options'][1]['choices'][1]['attendee'], self.choices[4].attendee.name)
self.assertEqual(json['options'][1]['choices'][1]['attendee_id'], self.choices[4].attendee.id)
self.assertEqual(json['options'][1]['choices'][1]['status'], self.choices[4].status)
self.assertEqual(json['options'][2]['choices'][1]['id'], self.choices[5].id)
self.assertEqual(json['options'][2]['choices'][1]['attendee'], self.choices[5].attendee.name)
self.assertEqual(json['options'][2]['choices'][1]['attendee_id'], self.choices[5].attendee.id)
self.assertEqual(json['options'][2]['choices'][1]['status'], self.choices[5].status)
def test_delete_query_item(self):
response = self.client.delete('/app/queries/' + str(self.query.id) + '/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
response = self.client.get('/app/queries/' + str(self.query.id) + '/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_options_query_item(self):
response = self.client.options('/app/queries/' + str(self.query.id) + '/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
# TODO: implement me
# option list -------------------------------------------------------------
def test_get_option_list(self):
response = self.client.get('/app/options/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_post_option_list(self):
response = self.client.post('/app/options/', {
'query_id': self.query.id,
'begin_date': '2021-01-01', 'begin_time': '18:00:00',
'end_date': '2021-01-02', 'end_time': '03:00:00'}, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
json = response.json()
self.assertIsNotNone(json)
self.assertGreaterEqual(int(json['id']), 1)
self.assertEqual(json['query_id'], self.query.id)
self.assertEqual(json['begin_date'], '2021-01-01')
self.assertEqual(json['begin_time'], '18:00:00')
self.assertEqual(json['end_date'], '2021-01-02')
self.assertEqual(json['end_time'], '03:00:00')
def test_put_option_list(self):
response = self.client.put('/app/options/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_patch_option_list(self):
response = self.client.patch('/app/options/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_delete_option_list(self):
response = self.client.delete('/app/options/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_options_option_list(self):
response = self.client.options('/app/options/', {'format': 'json'})
self.assertEqual(response.status_code, 200)
# TODO: implement me
# option item -------------------------------------------------------------
def test_get_option_item(self):
response = self.client.get('/app/options/' + str(self.options[0].id) + '/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
json = response.json()
self.assertIsNotNone(json)
self.assertEqual(json['id'], self.options[0].id)
self.assertEqual(json['query_id'], self.options[0].query.id)
self.assertEqual(json['begin_date'], self.options[0].begin_date)
self.assertEqual(json['begin_time'], self.options[0].begin_time)
self.assertEqual(json['end_date'], self.options[0].end_date)
self.assertEqual(json['end_time'], self.options[0].end_time)
def test_post_option_item(self):
response = self.client.post('/app/options/' + str(self.options[0].id) + '/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_put_option_item(self):
response = self.client.put('/app/options/' + str(self.options[0].id) + '/', {
'query_id': self.query.id,
'begin_date': '2021-01-11', 'begin_time': '20:30:00',
'end_date': '2021-01-11', 'end_time': '21:00:00'}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
json = response.json()
self.assertIsNotNone(json)
self.assertEqual(int(json['id']), self.options[0].id)
self.assertEqual(json['query_id'], self.query.id)
self.assertEqual(json['begin_date'], '2021-01-11')
self.assertEqual(json['begin_time'], '20:30:00')
self.assertEqual(json['end_date'], '2021-01-11')
self.assertEqual(json['end_time'], '21:00:00')
def test_patch_option_item(self):
response = self.client.patch('/app/options/' + str(self.options[0].id) + '/', {
'begin_time': '18:30:00'}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
json = response.json()
self.assertIsNotNone(json)
self.assertEqual(int(json['id']), self.options[0].id)
self.assertEqual(json['query_id'], self.query.id)
self.assertEqual(json['begin_date'], '2021-01-01')
self.assertEqual(json['begin_time'], '18:30:00')
self.assertEqual(json['end_date'], '2021-01-02')
self.assertEqual(json['end_time'], '03:00:00')
def test_delete_option_item(self):
response = self.client.delete('/app/options/' + str(self.options[0].id) + '/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
response = self.client.get('/app/options/' + str(self.options[0].id) + '/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_options_option_item(self):
response = self.client.options('/app/options/' + str(self.options[0].id) + '/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
# TODO: implement me
# choice list -------------------------------------------------------------
def test_get_choice_list(self):
response = self.client.get('/app/choices/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_post_choice_list(self):
response = self.client.post('/app/choices/', {
'option_id': self.options[0].id,
'attendee_id': self.attendees[2].id,
'status': 'Y'}, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
json = response.json()
self.assertIsNotNone(json)
self.assertGreaterEqual(int(json['id']), 1)
self.assertEqual(int(json['option_id']), self.options[0].id)
self.assertEqual(int(json['attendee_id']), self.attendees[2].id)
self.assertEqual(json['attendee'], self.attendees[2].name)
self.assertEqual(json['status'], 'Y')
def test_put_choice_list(self):
response = self.client.put('/app/choices/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_patch_choice_list(self):
response = self.client.patch('/app/choices/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_delete_choice_list(self):
response = self.client.delete('/app/choices/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_choices_option_list(self):
response = self.client.options('/app/options/', {'format': 'json'})
self.assertEqual(response.status_code, 200)
# TODO: implement me
# choice item -------------------------------------------------------------
def test_get_choice_item(self):
response = self.client.get('/app/choices/' + str(self.choices[0].id) + '/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
json = response.json()
self.assertIsNotNone(json)
self.assertEqual(int(json['id']), self.choices[0].id)
self.assertEqual(int(json['option_id']), self.choices[0].option_id)
self.assertEqual(int(json['attendee_id']), self.choices[0].attendee_id)
self.assertEqual(json['attendee'], self.choices[0].attendee.name)
self.assertEqual(json['status'], self.choices[0].status)
def test_post_choice_item(self):
response = self.client.post('/app/choices/' + str(self.choices[0].id) + '/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_put_choice_item(self):
response = self.client.put('/app/choices/' + str(self.choices[0].id) + '/', {
'option_id': self.options[1].id,
'attendee_id': self.attendees[2].id,
'status': 'N'}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
json = response.json()
self.assertIsNotNone(json)
self.assertEqual(int(json['id']), self.choices[0].id)
self.assertEqual(int(json['option_id']), self.options[1].id)
self.assertEqual(int(json['attendee_id']), self.attendees[2].id)
self.assertEqual(json['attendee'], self.attendees[2].name)
self.assertEqual(json['status'], 'N')
def test_patch_choice_item(self):
response = self.client.patch('/app/choices/' + str(self.choices[0].id) + '/', {
'status': 'N'}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
json = response.json()
self.assertIsNotNone(json)
self.assertEqual(int(json['id']), self.choices[0].id)
self.assertEqual(int(json['option_id']), self.options[0].id)
self.assertEqual(int(json['attendee_id']), self.attendees[0].id)
self.assertEqual(json['attendee'], self.attendees[0].name)
self.assertEqual(json['status'], 'N')
def test_delete_choice_item(self):
response = self.client.delete('/app/choices/' + str(self.choices[0].id) + '/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
response = self.client.get('/app/choices/' + str(self.choices[0].id) + '/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_choices_option_item(self):
response = self.client.options('/app/choices/' + str(self.choices[0].id) + '/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
# TODO: implement me
# # attendee list -------------------------------------------------------------
def test_get_attendee_list(self):
response = self.client.get('/app/attendees/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_post_attendee_list(self):
response = self.client.post('/app/attendees/', {
'name': 'new attendee'}, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
json = response.json()
self.assertIsNotNone(json)
self.assertGreaterEqual(int(json['id']), 1)
self.assertEqual(json['name'], 'new attendee')
def test_put_attendee_list(self):
response = self.client.put('/app/attendees/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_patch_attendee_list(self):
response = self.client.patch('/app/attendees/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_delete_attendee_list(self):
response = self.client.delete('/app/attendees/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_attendees_option_list(self):
response = self.client.options('/app/options/', {'format': 'json'})
self.assertEqual(response.status_code, 200)
# TODO: implement me
# attendee item -------------------------------------------------------------
def test_get_attendee_item(self):
response = self.client.get('/app/attendees/' + str(self.attendees[0].id) + '/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
json = response.json()
self.assertIsNotNone(json)
self.assertEqual(json['id'], self.attendees[0].id)
self.assertEqual(json['name'], self.attendees[0].name)
def test_post_attendee_item(self):
response = self.client.post('/app/attendees/' + str(self.attendees[0].id) + '/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_put_attendee_item(self):
response = self.client.put('/app/attendees/' + str(self.attendees[0].id) + '/', {
'name': 'new attendee'}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
json = response.json()
self.assertIsNotNone(json)
self.assertEqual(int(json['id']), self.attendees[0].id)
self.assertEqual(json['name'], 'new attendee')
def test_patch_attendee_item(self):
response = self.client.patch('/app/attendees/' + str(self.attendees[0].id) + '/', {
'name': 'new attendee'}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
json = response.json()
self.assertIsNotNone(json)
self.assertEqual(int(json['id']), self.attendees[0].id)
self.assertEqual(json['name'], 'new attendee')
def test_delete_attendee_item(self):
response = self.client.delete('/app/attendees/' + str(self.attendees[0].id) + '/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
response = self.client.get('/app/attendees/' + str(self.attendees[0].id) + '/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_attendees_option_item(self):
response = self.client.options('/app/attendees/' + str(self.options[0].id) + '/', {'format': 'json'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
# TODO: implement me
class APIAuthTest(APITestCase):
@classmethod
def setUpTestData(cls):
User.objects.create_user(
'testuser', 'test@email.com', 'testpassword')
def setUp(self):
token_response = self.client.post('/app/auth-token/', {
'username': 'testuser', 'password': 'testpassword', })
self.assertEqual(token_response.status_code, 200)
json = token_response.json()
token = json['token']
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token)
def test_something(self):
pass
|
{"/app/tests.py": ["/app/models.py"], "/app/views.py": ["/app/serializers.py", "/app/models.py"], "/app/admin.py": ["/app/models.py"], "/app/serializers.py": ["/app/models.py"]}
|
13,092
|
rti/poodle-backend-django
|
refs/heads/main
|
/app/views.py
|
from rest_framework import viewsets, mixins # , permissions
from .serializers import QuerySerializer, OptionSerializer, ChoiceSerializer, AttendeeSerializer
from .models import Query, Option, Choice, Attendee
class QueryViewSet(mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet):
queryset = Query.objects.all()
serializer_class = QuerySerializer
# def get_permissions(self):
# if self.action == 'retrieve':
# permission_classes = [permissions.AllowAny]
# elif self.action == 'list':
# permission_classes = [permissions.AllowAny]
# else:
# permission_classes = [permissions.IsAuthenticated]
#
# return [permission() for permission in permission_classes]
class OptionViewSet(mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet):
queryset = Option.objects.all()
serializer_class = OptionSerializer
class ChoiceViewSet(mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet):
queryset = Choice.objects.all()
serializer_class = ChoiceSerializer
class AttendeeViewSet(mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet):
queryset = Attendee.objects.all()
serializer_class = AttendeeSerializer
|
{"/app/tests.py": ["/app/models.py"], "/app/views.py": ["/app/serializers.py", "/app/models.py"], "/app/admin.py": ["/app/models.py"], "/app/serializers.py": ["/app/models.py"]}
|
13,093
|
rti/poodle-backend-django
|
refs/heads/main
|
/app/admin.py
|
from django.contrib import admin
from .models import Query, Option, Attendee, Choice
class OptionInline(admin.StackedInline):
model = Option
extra = 1
@admin.register(Query)
class QueryAdmin(admin.ModelAdmin):
search_fields = ['title']
inlines = [OptionInline]
@admin.register(Attendee)
class AttendeeAdmin(admin.ModelAdmin):
search_fields = ['name']
@admin.register(Choice)
class ChoiceAdmin(admin.ModelAdmin):
list_display = ('attendee', 'query', 'option', 'status')
list_display_links = ('attendee', 'query', 'option', 'status')
list_filter = ('attendee', 'option__query')
def query(self, obj):
return obj.option.query
|
{"/app/tests.py": ["/app/models.py"], "/app/views.py": ["/app/serializers.py", "/app/models.py"], "/app/admin.py": ["/app/models.py"], "/app/serializers.py": ["/app/models.py"]}
|
13,094
|
rti/poodle-backend-django
|
refs/heads/main
|
/app/urls.py
|
from django.urls import include, path
from rest_framework import routers
from rest_framework.authtoken import views as authtoken_views
from . import views as app_views
router = routers.DefaultRouter()
router.register('queries', app_views.QueryViewSet)
router.register('options', app_views.OptionViewSet)
router.register('choices', app_views.ChoiceViewSet)
router.register('attendees', app_views.AttendeeViewSet)
urlpatterns = [
path('', include(router.urls)),
path('auth-token/', authtoken_views.obtain_auth_token),
]
|
{"/app/tests.py": ["/app/models.py"], "/app/views.py": ["/app/serializers.py", "/app/models.py"], "/app/admin.py": ["/app/models.py"], "/app/serializers.py": ["/app/models.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.