repo_name stringclasses 400 values | branch_name stringclasses 4 values | file_content stringlengths 16 72.5k | language stringclasses 1 value | num_lines int64 1 1.66k | avg_line_length float64 6 85 | max_line_length int64 9 949 | path stringlengths 5 103 | alphanum_fraction float64 0.29 0.89 | alpha_fraction float64 0.27 0.89 |
|---|---|---|---|---|---|---|---|---|---|
aymane081/python_algo | refs/heads/master | class Solution:
def get_minimum(self, nums):
left, right = 0, len(nums) - 1
while left < right:
if nums[left] <= nums[right]: # sorted array, return nums[left]
break
mid = (left + right) // 2
if nums[mid] < nums[left]: # min is either mid the left side of mid
right = mid
else: # nums[mid] >= nums[left] > num[right] => mid is not min
left = mid + 1
return nums[left]
solution = Solution()
# nums = [3, 4, 5, 6, 7, 1, 2]
# nums = [1, 2, 3, 4, 5, 6]
nums = [7, 8, 1, 2, 3, 4, 5, 6]
print(solution.get_minimum(nums)) | Python | 18 | 34.666668 | 79 | /arrays/find_minimum_rotated_sorted_arary.py | 0.49766 | 0.458658 |
aymane081/python_algo | refs/heads/master | # 234
from utils.listNode import ListNode
class Solution:
def is_palindrome(self, head):
if not head:
return False
rev, slow, fast = None, head, head
while fast and fast.next:
fast = fast.next.next
rev, rev.next, slow = slow, rev, slow.next
if fast:
slow = slow.next
while rev and rev.value == slow.value:
rev = rev.next
slow = slow.next
return not rev
one = ListNode(1)
two = ListNode(2)
three = ListNode(3)
four = ListNode(4)
five = ListNode(3)
six = ListNode(2)
seven = ListNode(1)
one.next = two
two.next = three
three.next = four
four.next = five
five.next = six
six.next = seven
print(one)
solution = Solution()
print(solution.is_palindrome(one))
| Python | 43 | 18.209303 | 53 | /linkedList/palindrome_linked_list.py | 0.568319 | 0.556227 |
aymane081/python_algo | refs/heads/master | class Solution(object):
# O(N) time and space
def rotate_array(self, numbers, k):
"""
:type numbers: List[int]
:type k: int
:rtype: List[int]
"""
n = len(numbers)
if k > n:
raise ValueError('The array is not long enough to be rotated')
return numbers[n - k:] + numbers[:n - k]
# O(N) time, O(1) time
def rotate_array2(self, numbers, k):
"""
:type numbers: List[int]
:type k: int
:rtype: List[int]
"""
n = len(numbers)
if k > n:
raise ValueError('The array is not long enough')
self.reverse(numbers, 0, n - 1)
self.reverse(numbers, 0, k - 1)
self.reverse(numbers, k, n - 1)
def reverse(self, numbers, left, right):
while left < right:
numbers[left], numbers[right] = numbers[right], numbers[left]
left += 1
right -= 1
solution = Solution()
# print(solution.rotate_array([1,2,3,4,5,6,7], 1))
numbers = [1,2,3,4,5,6,7]
solution.rotate_array2(numbers, 3)
print(numbers)
| Python | 39 | 27.461538 | 74 | /arrays/rotate_array.py | 0.523423 | 0.5 |
aymane081/python_algo | refs/heads/master | from utils.treeNode import TreeNode
class Solution:
#O(N) time, O(1) space
def get_left_leaves_sum(self, node):
result = 0
if not node:
return result
if node.left and not node.left.left and not node.left.right:
result += node.left.value
else:
result += self.get_left_leaves_sum(node.left)
result += self.get_left_leaves_sum(node.right)
return result
node1 = TreeNode(1)
node2 = TreeNode(2)
node3 = TreeNode(3)
node4 = TreeNode(4)
node5 = TreeNode(5)
node6 = TreeNode(6)
node7 = TreeNode(7)
node1.left = node2
node1.right = node3
node2.left = node4
node2.right = node5
node3.left = node6
node6.left = node7
print(node1)
solution = Solution()
print(solution.get_left_leaves_sum(node1))
def left_leaves_sum(root):
if not root:
return 0
result = 0
if root.left and not root.left.left and not root.left.right:
result += root.left.value
result += left_leaves_sum(root.left) + left_leaves_sum(root.right) | Python | 51 | 19.490196 | 70 | /trees/left_leaves_sum.py | 0.636364 | 0.605742 |
aymane081/python_algo | refs/heads/master | class Solution:
def get_min_length(self, nums, target):
"""
type nums: List[int]
type target: int
:rtype : int
"""
min_length, sum_so_far, start = len(nums), 0, 0
for i, num in enumerate(nums):
sum_so_far += num
while sum_so_far - nums[start] >= target:
sum_so_far -= nums[start]
start += 1
min_length = min(min_length, i - start + 1)
return min_length if min_length < len(nums) else 0
solution = Solution()
nums = [2, 3, 1, 2, 4, 3]
print(solution.get_min_length(nums, 7)) | Python | 20 | 30.200001 | 59 | /arrays/minimum_size_subarray_sum.py | 0.505618 | 0.486356 |
aymane081/python_algo | refs/heads/master | class Solution(object):
def missing_element(self, numbers):
"""
:type numbers: List[int]
:rtype: int
"""
n = len(numbers)
return (n * (n + 1) // 2) - sum(numbers)
solution = Solution()
numbers = [0, 2, 5, 3, 1]
print(solution.missing_element(numbers)) | Python | 12 | 24.416666 | 48 | /arrays/missing_element.py | 0.546053 | 0.523026 |
alan-valenzuela93/port-scanner | refs/heads/main | import socket
import argparse
from grabber import banner_grabbing
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--target', help='Enter your target address', required=True)
parser = parser.parse_args()
ports = [21, 22, 25, 53, 66, 80, 88, 110, 139, 443, 445, 8080, 9050] # These are some of the most interesting ports to scan
def get_ip(target):
return str(socket.gethostbyname(target))
def scan(host, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((host, port))
s.settimeout(0.2) # Increase scanning speed
except:
return False
else:
return True
def main():
for p in ports:
if scan(parser.target, p):
print(banner_grabbing(parser.target, p))
if __name__ == '__main__':
print('TCP/IP scan started at IP ' + get_ip(parser.target))
main()
| Python | 34 | 24.735294 | 124 | /port-scanner.py | 0.619362 | 0.579758 |
alan-valenzuela93/port-scanner | refs/heads/main | import socket
def banner_grabbing(addr, port):
print("Getting service information for open TCP/IP port: ", port + "...")
socket.setdefaulttimeout(10)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((addr, port))
data = ''
headers = \
"GET / HTTP/1.1\r\n" \
f"Host: {addr}\r\n" \
"User-Agent: python-custom-script/2.22.0\r\n" \
"Accept-Encoding: gzip, deflate\r\nAccept: */*\r\n" \
"Connection: keep-alive\r\n\r\n"
print("\n\n" + headers)
cycle = True
try: # If banner can't be reach, print a message
while cycle: # Keep looping until the banner is found
data = str(s.recv(4096))
if data != '':
s.send(headers.encode()) # Send request
cycle = False
s.close()
except:
print("Connection refused... banner unreachable")
return data + '\n'
| Python | 29 | 30.931034 | 77 | /grabber.py | 0.540314 | 0.527749 |
thevaccinetracker/data_engine | refs/heads/master | from settings import GOOGLE_DRIVER, DATA_PATH
import time
def WebScrap():
print("Raps webscrap: Started...")
driver = GOOGLE_DRIVER
driver.get('https://www.raps.org/news-and-articles/news-articles/2020/3/covid-19-vaccine-tracker')
table = driver.find_element_by_id("vax_wrapper")
table.find_element_by_name("vax_length").send_keys("100")
rows = table.find_element_by_class_name("dataTable").find_elements_by_tag_name("tr")
tableData = []
isColumn = True
for row in rows:
rowData = []
colTag = "td"
if isColumn:
isColumn = False
colTag = "th"
colFirst = True
for col in row.find_elements_by_tag_name(colTag):
if colFirst:
colFirst = False
continue
rowData.append(col.text.encode('utf-8').decode('utf-8'))
tableData.append(rowData)
import csv
with open(DATA_PATH + r'/raps.org.tabledata.csv', 'w') as file:
writer = csv.writer(file, delimiter='|', lineterminator='\n')
writer.writerows(tableData)
time.sleep(60 * 1)
print("Raps webscrap: Completed...")
| Python | 39 | 28.641026 | 102 | /web_scrap/raps_org.py | 0.603806 | 0.59083 |
thevaccinetracker/data_engine | refs/heads/master | import time
from settings import GOOGLE_DRIVER
def WebScrap():
print("Airtable webscrap: Started...")
driver = GOOGLE_DRIVER
driver.get('https://airtable.com/shrSAi6t5WFwqo3GM/tblEzPQS5fnc0FHYR/viweyymxOAtNvo7yH?blocks=bipZFzhJ7wHPv7x9z')
table = driver.find_element_by_id("table")
table.find_element_by_class_name('viewConfigContainer').find_element_by_class_name('link-quiet').click()
time.sleep(5)
table.find_element_by_class_name('viewSwitcherContainer').find_elements_by_tag_name('li')[2].click()
time.sleep(5)
viewMenuPopover = table.find_elements_by_class_name("viewMenuPopover")[0]
viewMenuPopover.click()
time.sleep(3)
viewMenuPopover.find_element_by_class_name("menu").find_element_by_tag_name("li").click()
time.sleep(60 * 1)
print("Airtable webscrap: Completed...")
# References
# https://medium.com/@moungpeter/how-to-automate-downloading-files-using-python-selenium-and-headless-chrome-9014f0cdd196
# https://www.programcreek.com/python/example/100025/selenium.webdriver.ChromeOptions
| Python | 26 | 39.807693 | 121 | /web_scrap/airtable_com.py | 0.742696 | 0.713478 |
thevaccinetracker/data_engine | refs/heads/master | from settings import GOOGLE_DRIVER, DATA_PATH
import time
def WebScrap():
print("WHO webscrap: Started...")
driver = GOOGLE_DRIVER
driver.get('https://www.who.int/publications/m/item/draft-landscape-of-covid-19-candidate-vaccines')
body = driver.find_element_by_tag_name("body")
body.find_element_by_class_name('button-blue-background').click()
time.sleep(60 * 1)
print("WHO webscrap: Completed...")
| Python | 17 | 24.588236 | 104 | /web_scrap/who_int.py | 0.694253 | 0.682759 |
thevaccinetracker/data_engine | refs/heads/master | from web_scrap import airtable_com, raps_org, who_int
import time
airtable_com.WebScrap()
raps_org.WebScrap()
who_int.WebScrap()
print("Sleep for 1 min")
time.sleep(60 * 1)
from preprocess_data import pdf_read_table,airtable
pdf_read_table.TransformPDFData()
airtable.PreProcessAirtableData()
print("Sleep for 1 min")
time.sleep(60 * 1)
import googleDb
googleDb.MainGSheetUpdate()
| Python | 21 | 17.476191 | 53 | /main_exce.py | 0.768041 | 0.747423 |
thevaccinetracker/data_engine | refs/heads/master | import gspread
from oauth2client.service_account import ServiceAccountCredentials
import time
from settings import GSHEET_CRED_FILE, GSHEET_SCOPE, GSHEET_FILE, GSHEET_WORKSHEET
from settings import WHO_INPUT_DATA, RAPS_INPUT_DATA, AIRTABLE_INPUT_DATA
from settings import VT_CORPS
import get_cosine.get_cosine
# use creds to create a client to interact with the Google Drive API
creds = ServiceAccountCredentials.from_json_keyfile_name(GSHEET_CRED_FILE, GSHEET_SCOPE)
client = gspread.authorize(creds)
# Find a workbook by name and open the first sheet
# Make sure you use the right name here.
sheet = client.open(GSHEET_FILE).get_worksheet(GSHEET_WORKSHEET)
# Extract and print all of the values
list_of_hashes = sheet.get_all_records()
def GetDataFromFile(file, separator):
with open(file) as who_file:
file_data = who_file.readlines()
for index in range(len(file_data)):
file_data[index] = file_data[index].split(separator)
return file_data
def GetRow(data, matchString, col):
perfactMatch = None
perfactMatchPer = 0
for row in data:
# try:
# print(row[col] , matchString)
cosineSim = get_cosine.get_cosine.GetCosineSim([row[col], matchString])
if cosineSim > 0.70:
if perfactMatchPer < cosineSim:
perfactMatch = row
perfactMatchPer = cosineSim
# if row[col] == matchString:
# return row
# except:
# print("Error:", row)
# print(perfactMatch, perfactMatchPer, cosineSim)
return perfactMatch, perfactMatchPer
def UpdateGoogleSheet(settings, data, gSheet):
sheetCol = settings["sheetCol"]
dataCol = settings["dataCol"]
currentSheetRow = settings["currentSheetRow"]
updateSheetCol = settings["updateSheetCol"]
dataColForUpdate = settings["dataColForUpdate"]
currentIndex = 0
for sheetRow in gSheet.get_all_values():
try:
foundRow, foundRowMatchPer = GetRow(data, sheetRow[sheetCol], dataCol)
# print(foundRowMatchPer, sheetRow[sheetCol], foundRow)
if foundRow:
gSheet.update_cell(currentSheetRow, updateSheetCol, foundRow[dataColForUpdate])
gSheet.update_cell(currentSheetRow, updateSheetCol + 1, foundRowMatchPer)
time.sleep(3)
except:
print(currentSheetRow, updateSheetCol, dataColForUpdate, foundRow)
currentSheetRow += 1
currentIndex += 1
print("WHO data loading start...")
whoData = GetDataFromFile(WHO_INPUT_DATA, "|")
print("WHO data loading complete...")
print("RAPS data loading start...")
rapsData = GetDataFromFile(RAPS_INPUT_DATA, "|")
print("RAPS data loading complete...")
print("AirTable data loading start...")
airTableData = GetDataFromFile(AIRTABLE_INPUT_DATA, "|")
print("AirTable data loading complete...")
time.sleep(10)
whoSettings = {
'sheetCol': 2,
'dataCol': 2,
'currentSheetRow': 1,
'updateSheetCol': 8,
'dataColForUpdate': 4
}
rapsSettings = {
'sheetCol': 3,
'dataCol': 1,
'currentSheetRow': 1,
'updateSheetCol': 10,
'dataColForUpdate': 2
}
airTableSettings = {
'sheetCol': 1,
'dataCol': 0,
'currentSheetRow': 1,
'updateSheetCol': 6,
'dataColForUpdate': 3
}
print("Updating GSheet for WHO...")
UpdateGoogleSheet(whoSettings, whoData, sheet)
print("Updating GSheet for WHO Completed...")
time.sleep(10)
print("Updating GSheet for RAPS...")
UpdateGoogleSheet(rapsSettings, rapsData, sheet)
print("Updating GSheet for RAPS Completed...")
time.sleep(10)
print("Updating GSheet for AirTable...")
UpdateGoogleSheet(airTableSettings, airTableData, sheet)
print("Updating GSheet for AirTable Completed...")
time.sleep(10)
def GetPhaseCorp():
with open(VT_CORPS, 'r') as file:
data = file.readlines()
phase = {}
for row in data:
col = row.split(':')
phase[col[0]] = col[1].split(',')
return phase
def GetStagePhase(stage):
stage = stage.lower().replace(' ', '')
findStageIn = []
for key in phase:
for p in phase[key]:
if p.lower().replace(' ', '') in stage:
findStageIn.append(key)
findStageIn = sorted(list(set(findStageIn)), reverse=True)
if len(findStageIn) > 0:
return findStageIn[0]
return '0'
def GetFinalPhase(all_stage):
initLen = len(all_stage)
final_stage = dict()
final_stage_result = "Not Sure"
for d in all_stage:
if d not in final_stage:
final_stage[d] = 1
else:
final_stage[d] += 1
if len(final_stage) == initLen:
final_stage_result = "Not Sure"
final_stage = sorted(final_stage.items(), key=lambda x: x[1], reverse=True)
if len(final_stage):
final_stage_result = final_stage[0][0]
if final_stage_result == '0':
final_stage_result = "Not Sure"
return final_stage_result
def UpdateGoogleSheetFinalStage(gSheet):
currentSheetRow = 2
updateSheetCol = 15
index = 0
for sheetRow in gSheet.get_all_values():
if index == 0:
index = 1
continue
WHOStage = GetStagePhase(sheetRow[7])
RAPSStage = GetStagePhase(sheetRow[9])
AIRTableStage = GetStagePhase(sheetRow[5])
finalStage = GetFinalPhase([WHOStage, RAPSStage, AIRTableStage])
gSheet.update_cell(currentSheetRow, updateSheetCol, finalStage)
currentSheetRow += 1
time.sleep(3)
phase = dict(GetPhaseCorp())
def MainGSheetUpdate():
print("Updating GSheet for Final Stage...")
UpdateGoogleSheetFinalStage(sheet)
print("Updating GSheet for Final Stage Completed...")
| Python | 194 | 28.572165 | 95 | /googleDb.py | 0.652432 | 0.64267 |
thevaccinetracker/data_engine | refs/heads/master | import sys
sys.path.append(r'C:\Users\v-shvi\Desktop\Personal\VT\data_engine')
sys.path.append(r'C:\Users\v-shvi\Desktop\Personal\VT\data_engine\web_scrap_data')
sys.path.append(r'C:\Users\v-shvi\Desktop\Personal\VT\data_engine\get_cosine')
sys.path.append(r'C:\Users\v-shvi\Desktop\Personal\VT\data_engine\preprocess_data')
ROOT_PATH = "../"
DATA_PATH = "data"
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--window-size=1920x1080")
chrome_options.add_argument("--disable-notifications")
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--verbose')
chrome_options.add_experimental_option("prefs", {
"download.default_directory": r"C:\Users\v-shvi\Desktop\Personal\VT\data_engine\data",
"download.prompt_for_download": False,
"download.directory_upgrade": True,
"safebrowsing_for_trusted_sources_enabled": False,
"safebrowsing.enabled": False
})
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--disable-software-rasterizer')
# chrome_options = chrome_options
# GOOGLE_DRIVER = webdriver.Chrome(executable_path='driver/chromedriver.exe')
GOOGLE_DRIVER = webdriver.Chrome(executable_path='driver/chromedriver.exe', chrome_options=chrome_options)
# SETTINGS DATA
GSHEET_CRED_FILE = "credentials.json"
GSHEET_SCOPE = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
GSHEET_FILE = "Data Engine Database"
GSHEET_WORKSHEET = 6
WHO_INPUT_DATA = "data/who.int.transformed_data.csv"
RAPS_INPUT_DATA = "data/raps.org.tabledata.csv"
# AIRTABLE_INPUT_DATA = "data/COVID-19 Tracker-Vaccines.csv"
AIRTABLE_INPUT_DATA = "data/airtable.transformed_data.csv"
VT_CORPS = 'vt_corp/phase.txt'
STOPWORDS = 'english'
| Python | 49 | 36.693878 | 106 | /settings.py | 0.757576 | 0.751623 |
thevaccinetracker/data_engine | refs/heads/master | from settings import DATA_PATH
import csv
def parseRowToCell(row):
isSingleWord = False;
word = ""
rowArray = []
for letter in row:
if letter == "\"" and not isSingleWord:
isSingleWord = True
elif letter == "\"" and isSingleWord:
isSingleWord = False
elif letter == "," and not isSingleWord:
rowArray.append(word)
word = ""
else:
word += letter
return rowArray
def PreProcessAirtableData():
print("Airtable csv pre-processing: Started...")
# with open(r"../data/COVID-19 Tracker-Vaccines.csv") as file:
with open(DATA_PATH + r"/COVID-19 Tracker-Vaccines.csv") as file:
data = file.readlines()
dataMatrix = []
for row in data:
if ("\n" in row):
row = row.replace('\n', '')
if ("\"" in row):
dataMatrix.append(parseRowToCell(row))
else:
dataMatrix.append(row.split(","))
with open(DATA_PATH + r'/airtable.transformed_data.csv', 'w') as file:
writer = csv.writer(file, delimiter='|', lineterminator='\n')
writer.writerows(dataMatrix)
print("Airtable csv pre-processing: Completed...")
# PreProcessAirtableData()
| Python | 43 | 28.60465 | 74 | /preprocess_data/airtable.py | 0.565593 | 0.562451 |
thevaccinetracker/data_engine | refs/heads/master | import tabula
from settings import DATA_PATH
file = DATA_PATH + "/novel-coronavirus-landscape-covid-19-(1).pdf"
tabula.convert_into(file, DATA_PATH + "/who_covid_data.csv", output_format="csv", pages='all')
import csv
file_CSV = open(DATA_PATH + '/who_covid_data.csv')
data_CSV = csv.reader(file_CSV)
list_CSV = list(data_CSV)
def transformData(data):
if len(data) <= 0:
return []
tempData = data[0]
data.remove(tempData)
for r in data:
index = 0
for c in range(len(tempData)):
col = tempData[c] + " " + r[c].lstrip('\r\n').rstrip('\r\n').strip()
tempData[c] = col.strip()
cleanCol = []
for col in tempData:
cleanCol.append(col.replace("\n", " "))
return cleanCol
def TransformPDFData():
print("WHO pdf pre-processing: Started...")
indexStartFrom = 3
row = []
transformedData = []
for data in range(indexStartFrom, len(list_CSV)):
if list_CSV[data][3] != '':
if len(row) > 0:
transformedData.append(transformData(row))
row = []
row.append(list_CSV[data])
with open(DATA_PATH + r'/who.int.transformed_data.csv', 'w') as file:
writer = csv.writer(file, delimiter='|', lineterminator='\n')
writer.writerows(transformedData)
print("WHO pdf pre-processing: Completed...")
| Python | 48 | 27.375 | 94 | /preprocess_data/pdf_read_table.py | 0.598385 | 0.591777 |
thevaccinetracker/data_engine | refs/heads/master | statement = """"Institute of Medical Biology, Chinese Academy of Medical Sciences",Vaccine,Inactivated virus,Phase II,Phase II began June 2020,Inactivated,NCT04412538,Unknown,,,N/A,https://docs.google.com/document/d/1Y4nCJJ4njzD1wiHbufCY6gqfRmj49Qn_qNgOJD62Wik/edit,6/23/2020"""
def parseRowToCell(row):
isSingleWord = False;
word = ""
rowArray = []
for letter in row:
if letter == "\"" and not isSingleWord:
isSingleWord = True
elif letter == "\"" and isSingleWord:
isSingleWord = False
elif letter == "," and not isSingleWord:
rowArray.append(word)
word = ""
else:
word += letter
print(rowArray)
return rowArray
parseRowToCell(statement) | Python | 21 | 35.142857 | 278 | /test.py | 0.643799 | 0.60686 |
thevaccinetracker/data_engine | refs/heads/master | import string
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import CountVectorizer
from nltk.corpus import stopwords
from settings import STOPWORDS
stopwords = stopwords.words(STOPWORDS)
def cosine_sim_vectors(vec1, vec2):
vec1 = vec1.reshape(1, -1)
vec2 = vec2.reshape(1, -1)
return cosine_similarity(vec1, vec2)[0][0]
def clean_string(text):
text = ''.join([word for word in text if word not in string.punctuation])
text = text.lower()
text = ' '.join([word for word in text.split() if word not in stopwords])
return text
def GetCosineSim(sentanceList):
try:
cleaned = list(map(clean_string, sentanceList))
vectorizer = CountVectorizer().fit_transform(cleaned)
vectors = vectorizer.toarray()
# csim = cosine_similarity(vectors)
return cosine_sim_vectors(vectors[0], vectors[1])
except:
return 0
| Python | 33 | 27.363636 | 77 | /get_cosine/get_cosine.py | 0.695513 | 0.67735 |
marioxe301/ParserDR | refs/heads/master | from pyparsing import (Regex, White, Literal ,
ZeroOrMore, OneOrMore, Group , Combine ,
Word , alphanums, Suppress)
import sys
class Tokens(object):
def __init__(self,tag,token):
self.tag = tag
self.token = token
#esta clase permitira guardar en la lista
class Lexer(object):
def __init__(self,path):
#TERMINALES
WHITESPACE = White(ws=" \t\r")
LETTER = Regex('[a-zA-Z]')
DIGIT = Regex('[0-9]')
DATE_TYPE = Literal('date')
STRING_TYPE = Literal('String')
REAL_TYPE = Literal('real')
VOID_TYPE = Literal('void')
BOOLEAN_TYPE = Literal('boolean')
ANYTYPE_TYPE = Literal('anytype')
INT_TYPE = Literal('int')
STATIC_TKN = Literal('static')
RETURN = Literal('EXIT')
IF = Literal('if')
WHILE = Literal('while')
#TERMINALES LITERALES
DATE_LITERAL = DIGIT + DIGIT + Literal('/') + DIGIT + DIGIT + Literal('/') + DIGIT + DIGIT + DIGIT + DIGIT
STRING_LITERAL = Combine(Literal('"')+ ZeroOrMore(LETTER | DIGIT | Literal(' ') | Literal('%')|Literal('@')| Literal(',')| Literal('-')|Literal('=')|Literal('(')|Literal(')')|Literal('_')) +Literal('"'))
REAL_LITERAL = Combine(OneOrMore(DIGIT) + Literal('.') + OneOrMore(DIGIT))
INT_LITERAL = Combine(OneOrMore(DIGIT))
TRUE_LITERAL = Literal('true')
FALSE_LITERAL = Literal('false')
BOOLEAN_LITERAL = TRUE_LITERAL | FALSE_LITERAL
INCR = Literal('++')
DDPERIOD = Literal('::')
PAR_OP = Literal('(')
PAR_CL = Literal(')')
SEMICOLON = Literal(';')
COMA = Literal(',')
BRACK_OP = Literal('{')
BRACK_CL = Literal('}')
PERIOD = Literal('.')
ASIG = Literal(':=')
REL_OP = Literal('>') | Literal('<') | Literal('==') | Literal('<=') | Literal('>=')
LOG_OP = Literal('||') | Literal('&&')
MULT_OP = Literal('/') | Literal('*')
ADD_OP = Literal('+') | Literal('-')
ID = Combine((LETTER | Literal('_')) + ZeroOrMore( LETTER | DIGIT ) )
TEXT = ZeroOrMore(Word(alphanums)| WHITESPACE )
COMMENT = Combine((Literal('//')+ TEXT + Literal('\n') ) | (Literal('//')+ TEXT) )
program = ZeroOrMore( Suppress(COMMENT) | Group(DATE_TYPE)("DATE-TY") | Group(STRING_TYPE)("STRING-TY") | Group(REAL_TYPE)("REAL-TY") | Group( VOID_TYPE)("VOID-TY") | Group(BOOLEAN_TYPE)("BOOLEAN-TY") | Group(ANYTYPE_TYPE)("ANY-TY")
| Group(INT_TYPE)("INT-TY") | Group(STATIC_TKN)("STATIC-TY")| Group(RETURN)("RETURN-TOK") | Group(IF)("IF-TOK") | Group(WHILE)("WHILE-TOK") | Group(DATE_LITERAL)("DATE-TOK") | Group(STRING_LITERAL)("STRING-TOK")
| Group(COMA)("COMA-TOK") | Group(REAL_LITERAL)("REAL-TOK") | Group(INT_LITERAL)("INT-TOK") | Group(BOOLEAN_LITERAL)("BOOLEAN-TOK") | Group(INCR)("INCR-TOK") |Group( DDPERIOD)("DDPERIOD-TOK") | Group(PAR_OP)("PAR-OP-TOK") | Group(PAR_CL)("PAR-CL-TOK") | Group(SEMICOLON)("SEMICOLON-TOK")
| Group(BRACK_OP)("BRACK-OP-TOK") | Group(BRACK_CL)("BRACK-CL-TOK") | Group(PERIOD)("PERIOD-TOK") | Group(ASIG)("ASIG-TOK") | Group( REL_OP)("REL-OP-TOK") | Group(LOG_OP)("LOG-OP-TOK") | Group(MULT_OP)("MULT-OP-TOK") | Group( ADD_OP)("ADD-OP-TOK") | Group(ID)("ID-TOK")
)
#manda las palabras reservadas que acepta la gramatica
self.lexer = program
self.tokenList = []
self.path = path
#Divide en Tokens el archivo que se le manda
def tokenFile(self):
try:
return self.lexer.parseFile(self.path)
except Exception:
print("Invalid token found")
sys.exit()
def tokenize(self):
tokenItems = self.tokenFile()
for items in tokenItems:
tok = Tokens(items.getName(),items[0])
self.tokenList.append(tok)
def printAllTokens(self):
for tok in self.tokenList:
print("TAG:",tok.tag," ","TOKEN:",tok.token,"\n")
#lex = Lexer('Program.g4')
#lex.tokenize()
#lex.printAllTokens() | Python | 92 | 43.836956 | 299 | /Lexer.py | 0.558923 | 0.557953 |
marioxe301/ParserDR | refs/heads/master | from Lexer import Lexer
from treelib import Node, Tree
#verifica unicamente la declaracion de un int int <ID> := <NUMERO>
class ParserExample(object):
def __init__(self,path):
lex = Lexer(path)
lex.tokenize()
self.TOKENS = lex.tokenList
self.INDEX = 0
tree = Tree()
self.TREE = tree
def nextToken(self):
if self.INDEX < len(self.TOKENS):
x = self.INDEX
self.INDEX+=1
return [True ,self.TOKENS[x]]
else:
#self.INDEX = 0
return [False]
def parseCheck(self):
if self.variable():
print("Gramatica Correcta")
else:
print("Gramatica Incorrecta")
def variable(self):
if self.TYPE():
if self.ID():
if self.ASSIG():
if self.NUMBER():
return True
def TYPE(self):
x = self.nextToken()
if x[0]:
if x[1].tag == "INT-TY":
return True
else:
return False
else:
return False
def ID(self):
x = self.nextToken()
if x[0]:
if x[1].tag == "ID-TOK":
return True
else:
return False
else:
return False
def ASSIG(self):
x = self.nextToken()
if x[0]:
if x[1].tag == "ASIG-TOK":
return True
else:
return False
else:
return False
def NUMBER(self):
x = self.nextToken()
if x[0]:
if x[1].tag == "INT-TOK":
return True
else:
return False
else:
return False
def ImprimirArbol(self):
self.TREE.show()
Par = ParserExample('Program.g4')
Par.parseCheck()
#Par.ImprimirArbol() | Python | 77 | 23.688313 | 66 | /ParserSimpleExample.py | 0.460526 | 0.454211 |
marioxe301/ParserDR | refs/heads/master | from Lexer import Lexer
from treelib import Node, Tree
import re
from termcolor import colored,cprint
class Parser(object):
def __init__(self,path):
lex = Lexer(path)
lex.tokenize()
self.TOKENS = lex.tokenList
self.TYPES= re.compile(".*-TY")
def nextToken(self):
if len(self.TOKENS) != 0:
x = self.TOKENS[0]
self.TOKENS.pop(0)
return [True,x]
else:
return [False]
def seekNextToken(self):
if len(self.TOKENS) > 2:
return [True,self.TOKENS[1]]
else:
return [False]
def seekActualToken(self):
if len(self.TOKENS)!= 0:
return [True,self.TOKENS[0]]
else:
return [False]
def Parse(self):
if self.Program():
cprint("Grammar Correct\n","green",attrs=['bold'])
else:
cprint("Grammar Incorrect\n","red",attrs=['bold'])
def Program(self):
print("Program\n")
if self.Opt_funct_decl():
return True
else:
return False
def Opt_funct_decl(self):
print("Opt_funct_decl\n")
if self.Funct_head():
if self.Body():
return True
else:
return False
else:
return False
def Funct_head(self):
print("Funct_head\n")
if self.Funct_name():
token = self.nextToken()
if token[0] and token[1].tag == 'PAR-OP-TOK':
print("PAREN_OP_TOKEN")
print("Token: ",token[1].token,"\n")
if self.Param_list_opt():
return True
else:
cprint("Expected a ( TOKEN\n","red",attrs=['bold'])
return False
else:
return False
def Funct_name(self):
print("Funct_name\n")
if self.Funct_type():
token = self.nextToken()
if token[0] and token[1].tag == 'ID-TOK':
print("ID_TOKEN")
print("Token: ",token[1].token,"\n")
return True
else:
cprint("Expected a ID TOKEN\n","red",attrs=['bold'])
return False
else:
return False
def Funct_type(self):
print("Funct_type\n")
token = self.nextToken()
if token[0] and token[1].tag == 'STATIC-TY':
print("STATIC_TOKEN")
print("Token: ",token[1].token,"\n")
if self.Decl_type():
return True
else:
cprint("Expected a STATIC TOKEN\n","red",attrs=['bold'])
return False
else:
return False
def Decl_type(self):
print("Decl_type\n")
token = self.nextToken()
if token[0] and self.TYPES.match(token[1].tag) is not None:
print("TYPE_TOKEN")
print("Token: ",token[1].token,"\n")
return True
else:
cprint("Expected a TYPE TOKEN\n","red",attrs=['bold'])
return False
def Param_list_opt(self):
print("Param_list_opt\n")
Token = self.seekActualToken()
if Token[0] and Token[1].tag == 'PAR-CL-TOK':
print("PAREN_CL_TOKEN")
print("Token: ",Token[1].token,"\n")
self.nextToken() # para quitar el parentesis
return True
elif Token[0] and self.TYPES.match(Token[1].tag) is not None:
while True:
if self.Decl_param():
Token = self.seekActualToken()
if Token[0] and Token[1].tag == 'COMA-TOK':
print("COMA_TOKEN")
print("Token: ",Token[1].token,"\n")
self.nextToken() #solo para descartar la coma
continue
elif Token[0] and Token[1].tag == 'PAR-CL-TOK':
print("PAREN_CL_TOKEN")
print("Token: ",Token[1].token,"\n")
self.nextToken() # para descartar el parentesis
return True
else:
cprint("Expected a COMA or ) TOKEN\n","red",attrs=['bold'])
return False
else:
cprint("Expected a ) TOKEN\n","red",attrs=['bold'])
return False
def Decl_param(self):
print("Decl_param\n")
if self.Decl_type():
token = self.nextToken()
if token[0] and token[1].tag == 'ID-TOK':
print("ID_TOKEN")
print("Token: ",token[1].token,"\n")
return True
else:
cprint("Expected a ID TOKEN\n","red",attrs=['bold'])
return False
else:
return False
def Body(self):
print("Body\n")
token = self.nextToken()
if token[0] and token[1].tag == 'BRACK-OP-TOK':
print("BRACK_OP_TOKEN")
print("Token: ",token[1].token,"\n")
if self.Stmt_list():
return True
else:
return False
else:
cprint("Expected a { TOKEN\n","red",attrs=['bold'])
return False
def Stmt_list(self):
print("Stmt_list\n")
if self.Stmts():
return True
else:
return False
def Stmts(self):
print("Stmts\n")
BrackToken = self.seekActualToken()
if BrackToken[0] and BrackToken[1].tag == 'BRACK-CL-TOK':
print("BRACK_CL_TOKEN")
print("Token: ",BrackToken[1].token,"\n")
self.nextToken() # para quitar el braket
return True
else:
while True:
if self.Stmt():
BrackToken = self.seekActualToken()
if BrackToken[0] and BrackToken[1].tag == 'BRACK-CL-TOK':
print("BRACK_CL_TOKEN")
print("Token: ",BrackToken[1].token,"\n")
self.nextToken() # descarta el bracket
return True
else:
continue
else:
cprint("Unexpected TOKEN found\n","red",attrs=['bold'])
return False
def Stmt(self):
print("Stmt\n")
Token = self.seekActualToken()
if Token[0] and Token[1].tag == 'IF-TOK':
if self.If_stmt():
return True
else:
return False
elif Token[0] and Token[1].tag == 'WHILE-TOK' :
if self.While_stmt():
return True
else:
return False
elif Token[0] and Token[1].tag == 'RETURN-TOK':
if self.Return_stmt():
return True
else:
return False
elif Token[0] and self.TYPES.match(Token[1].tag) is not None:
if self.Assign_stmt():
return True
else:
return False
else:
return False
def If_stmt(self):
print("If_stmt\n")
IfToken = self.nextToken()
ParToken = self.nextToken()
if IfToken[0] and IfToken[1].tag == 'IF-TOK' and ParToken[0] and ParToken[1].tag == 'PAR-OP-TOK':
print("IF_TOKEN")
print("Token: ",IfToken[1].token,"\n")
print("PAR_OP_TOKEN")
print("Token: ",ParToken[1].token,"\n")
if self.Bool_expr():
ParToken = self.nextToken()
if ParToken[0] and ParToken[1].tag == 'PAR-CL-TOK':
print("PAR_CL_TOKEN")
print("Token: ",ParToken[1].token,"\n")
if self.Body():
return True
else:
return False
else:
cprint("Expected a ) TOKEN\n","red",attrs=['bold'])
return False
else:
return False
else:
cprint("Expected a IF or ( or TOKEN\n","red",attrs=['bold'])
return False
def Bool_expr(self):
print("Bool_expr\n")
Token = self.seekActualToken()
if Token[0] and Token[1].tag == 'BOOLEAN-TOK':
print("BOOLEAN_TOKEN")
print("Token: ",Token[1].token,"\n")
self.nextToken() #Descartar el token
return True
else:
if self.Constant():
Token = self.nextToken()
if Token[0] and (Token[1].tag == 'REL-OP-TOK' or Token[1].tag == 'LOG-OP-TOK'):
print("LOGICAL_TOKEN")
print("Token: ",Token[1].token,"\n")
if self.Constant():
return True
else:
return False
else:
cprint("Expected a RELATIONAL or LOGICAL TOKEN\n","red",attrs=['bold'])
return False
else:
return False
def Constant(self):
print("Constant\n")
Token = self.nextToken()
if Token[0] and Token[1].tag == 'INT-TOK':
print("INT_TOKEN")
print("Token: ",Token[1].token,"\n")
return True
elif Token[0] and Token[1].tag == 'STRING-TOK':
print("STRING_TOKEN")
print("Token: ",Token[1].token,"\n")
return True
elif Token[0] and Token[1].tag == 'REAL-TOK':
print("REAL_TOKEN")
print("Token: ",Token[1].token,"\n")
return True
elif Token[0] and Token[1].tag == 'DATE-TOK':
print("DATE_TOKEN")
print("Token: ",Token[1].token,"\n")
return True
elif Token[0] and Token[1].tag == 'BOOLEAN-TOK':
print("BOOLEAN_TOKEN")
print("Token: ",Token[1].token,"\n")
return True
else:
cprint("Expected a CONSTANT TOKEN\n","red",attrs=['bold'])
return False
def While_stmt(self):
print("While_stmt\n")
WhileToken = self.nextToken()
ParToken = self.nextToken()
if WhileToken[0] and ParToken[0] and WhileToken[1].tag == 'WHILE-TOK' and ParToken[1].tag == 'PAR-OP-TOK':
print("WHILE_TOKEN")
print("Token: ",WhileToken[1].token,"\n")
print("PAR_OP_TOKEN")
print("Token: ",ParToken[1].token,"\n")
if self.Bool_expr():
ParToken = self.nextToken()
if ParToken[0] and ParToken[1].tag == 'PAR-CL-TOK':
print("PAR_CL_TOKEN")
print("Token: ",ParToken[1].token,"\n")
if self.Body():
return True
else:
return False
else:
return False
else:
return False
else:
cprint("Expected a WHILE or ( TOKEN\n","red",attrs=['bold'])
return False
def Return_stmt(self):
print("Return_stmt\n")
Token = self.nextToken()
if Token[0] and Token[1].tag == 'RETURN-TOK':
print("RETURN_TOKEN")
print("Token: ",Token[1].token,"\n")
Semicolon = self.seekActualToken()
if Semicolon[0] and Semicolon[1].tag == 'SEMICOLON-TOK':
print("SEMICOLON_TOKEN")
print("Token: ",Semicolon[1].token,"\n")
self.nextToken()
return True
else:
if self.Constant():
Semicolon = self.seekActualToken()
if Semicolon[0] and Semicolon[1].tag == 'SEMICOLON-TOK':
print("SEMICOLON_TOKEN")
print("Token: ",Semicolon[1].token,"\n")
self.nextToken()
return True
else:
return False
else:
return False
else:
cprint("Expected a RETURN TOKEN\n","red",attrs=['bold'])
return False
def Assign_stmt(self):
print("Assign_stmt\n")
if self.Decl_type():
Token = self.nextToken()
if Token[0] and Token[1].tag == 'ID-TOK':
print("ID_TOKEN")
print("Token: ",Token[1].token,"\n")
Token = self.nextToken()
if Token[0] and Token[1].tag == 'ASIG-TOK':
print("ASSIGN_TOKEN")
print("Token: ",Token[1].token,"\n")
if self.Constant():
Token = self.nextToken()
if Token[0] and Token[1].tag == 'SEMICOLON-TOK':
print("SEMICOLON_TOKEN")
print("Token: ",Token[1].token,"\n")
return True
else:
cprint("Expected a SEMICOLON TOKEN\n","red",attrs=['bold'])
return False
else:
return False
else:
cprint("Expected a ASSIGN TOKEN\n","red",attrs=['bold'])
return False
else:
cprint("Expected a ID TOKEN\n","red",attrs=['bold'])
return False
Pars = Parser('Program.g4')
Pars.Parse() | Python | 390 | 34.005127 | 114 | /Parser.py | 0.450443 | 0.442458 |
p4squ4lle/PI-Controller-Communication | refs/heads/main | # -*- coding: utf-8 -*-
import serial
import subprocess
import logging
from datetime import datetime
from pipython import GCSDevice, pitools
from time import sleep
# Set-Up logging
dt = datetime.now()
dt_string = dt.strftime("%H-%M_%d%m%Y")
logging.basicConfig(level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[logging.FileHandler(f"log/{dt_string}.log"),
logging.StreamHandler()
]
)
position_file = open(f'log/motor_positions_{dt_string}.csv', 'a')
position_file.write("#pos_m1, pos_m2, pos_m3 [mm]\n")
# Laser Desk COM port
LASER_DESK_COM = 'COM6'
# Start Laser Desk
# laser_desk_path = r'C:\Program Files\SCANLAB\laserDesk\SLLaserDesk.exe'
# print(f'Starting Laser Desk application at {laser_desk_path}')
# subprocess.Popen([laser_desk_path])
# print('Succesfully started Laser Desk application')
# Initialize PI Motor Controller
SN = '120060504'
STAGES = ['M-521.DG1', 'M-405.DG(FW000.000)', 'M-405.DG(FW000.000)',]
REFMODE = 'FRF'
PI = GCSDevice('C-844')
PI.ConnectUSB(serialnum=SN)
logging.info('connected: {}'.format(PI.qIDN().strip()))
print('-----------------------------------------------')
if PI.HasqVER():
logging.info('version info: {}'.format(PI.qVER().strip()))
print('-----------------------------------------------')
logging.info(f'initialize connected stages: {STAGES}')
pitools.startup(PI, stages=STAGES, refmodes=REFMODE)
logging.info(f'Connected Stages: {PI.qCST()}')
print('-----------------------------------------------')
servo_dict = PI.qSVO()
reference_dict = PI.qFRF()
if all(v for v in servo_dict.values()):
logging.info('Servo-control is set ON for all axes')
else:
logging.warning('Servo-control is not set ON for axes',
f'{[k for k in servo_dict.keys() if servo_dict[k]==False]}')
if all(v for v in reference_dict.values()):
logging.info('All axes have been succesfully referenced.')
position_file.write(f"{PI.qPOS()['1']}, {PI.qPOS()['2']}, {PI.qPOS()['3']}\n")
else:
logging.warning('The following axes have not been referenced properly',
f'{[k for k in reference_dict.keys() if reference_dict[k]==False]}')
rangemin = list(PI.qTMN().values())
rangemax = list(PI.qTMX().values())
ranges = zip(rangemin, rangemax)
# error_dict = {i: PI.TranslateError(i) for i in range(10000)
# if PI.TranslateError(i) != str(i)}
pi_error = PI.qERR()
if pi_error > 0:
logging.warning(f'WARNING: an error occurred (error code: {pi_error})',
PI.TranslateError(pi_error))
LaserDesk = serial.Serial(LASER_DESK_COM)
if LaserDesk.is_open:
logging.info('Serial connection was successfully established.')
else:
logging.warning('Serial port could not be opened.')
print('===============================================')
listen = True
while listen:
bytes_waiting = LaserDesk.in_waiting
if bytes_waiting==0:
continue
input_bytes = LaserDesk.read_until(b'\x03')
input_string = input_bytes.decode()[1:-1]
if input_string=='End':
logging.info("Recieved 'End' command. Stop listening")
listen = False
continue
controller_ready_flag = PI.IsControllerReady()
#while any(v for v in PI.IsMoving().values()):
# sleep(0.5)
try:
PI.send(input_string)
logging.info(f'string sent to pi controller: {input_string}')
if any(v for v in PI.IsMoving().values()):
print('axes are moving', end='')
while any(v for v in PI.IsMoving().values()):
print('.', end='')
sleep(1)
if all(v for v in PI.qONT().values()):
logging.info('axes stopped moving and are on target')
logging.info('absolute motor positions now are:')
logging.info(f'{PI.qPOS()}')
position_file.write(f"{PI.qPOS()['1']}, {PI.qPOS()['2']}, {PI.qPOS()['3']}\n")
print('===============================================')
else:
logging.warning(f'some axes are not on target: {PI.qONT()}')
print('===============================================')
LaserDesk.write(b'\x02 1 \x03')
except Exception as e:
logging.error('An exception occured while sending the command to the PI controller:')
logging.error(e)
LaserDesk.write(b'\x02 0 \x03')
position_file.close()
LaserDesk.close()
logging.info("Serial connection was closed. End of script.") | Python | 131 | 33.793892 | 93 | /PIController.py | 0.565607 | 0.550885 |
p4squ4lle/PI-Controller-Communication | refs/heads/main | # -*- coding: utf-8 -*-
import serial
import subprocess
import logging
from pipython import GCSDevice, pitools
# Initialize PI Motor Controller
SN = '120060504'
STAGES = ['M-521.DG1', 'M-405.DG(FW000.000)', 'M-405.DG(FW000.000)',]
REFMODE = 'FRF'
PI = GCSDevice('C-844')
PI.ConnectUSB(serialnum=SN)
print('connected: {}'.format(PI.qIDN().strip()))
print('===============================================')
if PI.HasqVER():
print('version info: {}'.format(PI.qVER().strip()))
print('===============================================')
print('initialize connected stages...')
pitools.startup(PI, stages=STAGES, refmodes=REFMODE)
print('===============================================')
| Python | 27 | 25.555555 | 69 | /PIControllerConnection.py | 0.5 | 0.453333 |
strawberryblackhole/hippopotamus | refs/heads/master | from htmlParser import getFormatedArticle
from chunkGenerator import *
from ZIMply.zimply import ZIMFile
from os import path
import math
import time
import argparse
from amulet.world_interface import load_world
def generateChunkList(totalArticleCount, chunkBookCapacity, target_pos, outputForceload = False):
#generate a square, that could fit (more than) all articles
sideLength = math.ceil(math.sqrt(totalArticleCount/chunkBookCapacity))
if outputForceload:
command = "/chunkgenerator:generatechunks %d %d %d %d"%(target_pos[0] - 1, target_pos[1] - 1, target_pos[0] + sideLength + 1, target_pos[1] + sideLength + 1)#+- 1 to include the outer border of the library
print(command)
return
chunkList = []
for x in range(sideLength):
for z in range(sideLength):
if len(chunkList) >= math.ceil(totalArticleCount/chunkBookCapacity): #stop if we have enough chunks
break
chunkList.append([x + target_pos[0] // 16, z + target_pos[1] // 16])
return chunkList
def generateWallList(chunkList):
wallChunkWithSlice = []
for chunk in chunkList:
#create chunk slices for the 4 chunks that would have walls to the center chunk
potentialWalls = []
potentialWalls.append([[1,0], [0, slice(0,16)]])
potentialWalls.append([[0,1], [slice(0,16), 0]])
potentialWalls.append([[-1,0], [15, slice(0,16)]])
potentialWalls.append([[0,-1], [slice(0,16), 15]])
#turn its local coordinates into world coordinates
for potWall in potentialWalls:
potWall[0][0] += chunk[0]
potWall[0][1] += chunk[1]
#only keep the wallchunk if its not in use
for potWall in potentialWalls:
if potWall[0] in chunkList:
continue
wallChunkWithSlice.append(potWall)
return wallChunkWithSlice
def getLastArticleId(zimfile):
article = None
for article in zimfile:
pass
return article[2]
def fill( booksPerBarrel,
position,
world = False,
dimension = "overworld",
skipChunk = 0,
skipArticles = 0,
filePath = "",
totalArticleCount = -1):
zimfile = ZIMFile(filePath,"utf-8")
if totalArticleCount == -1:
totalArticleCount = getLastArticleId(zimfile)
print("Article count: ", totalArticleCount)
barrelPositionList = generateBarrelPositionList()
barrelsPerChunk = len(barrelPositionList)
chunkBookCapacity = barrelsPerChunk * booksPerBarrel
chunkList = generateChunkList(totalArticleCount, chunkBookCapacity, position, world == False)
if world:
wallChunkList = generateWallList(chunkList)
totalChunkCount = len(chunkList) + len(wallChunkList)
completedChunks = 0
currentArticle = skipArticles
for chunkCoords in chunkList:
if skipChunk > 0:
skipChunk -= 1
completedChunks += 1
currentArticle += booksPerBarrel * barrelsPerChunk
continue
start = time.perf_counter()
worldObj = load_world(path.expandvars(world))
chunk = worldObj.get_chunk(chunkCoords[0], chunkCoords[1], dimension)
fillChunk(chunk, barrelPositionList, worldObj, dimension, currentArticle, booksPerBarrel, filePath, chunkList, position)
currentArticle += booksPerBarrel * barrelsPerChunk
worldObj.create_undo_point()#workaround suggested by amulet team so that saving works (can possibly be removed in the future)
worldObj.save()
worldObj.close()
completedChunks += 1
print("chunk time (m): ", (time.perf_counter() - start)/60)
print("completed chunk: ", completedChunks)
yield 100 * completedChunks / totalChunkCount
for wallChunkCoords, orientation in wallChunkList:
chunk = worldObj.get_chunk(wallChunkCoords[0], wallChunkCoords[1], dimension)
placeWall(chunk, orientation, worldObj)
completedChunks += 1
yield 100 * completedChunks / totalChunkCount
worldObj.create_undo_point()#workaround suggested by amulet team so that saving works (can possibly be removed in the future)
worldObj.save()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Puts a wiki into a Minecraft world')
parser.add_argument('-wiki', type=str, help='Location of the wiki file')
parser.add_argument('-world', type=str, help='Location of the world file. You may use %%APPDATA%%')
parser.add_argument('-booksPerBarrel', type=int, help='Number of books to put in a barrel', default=27)
parser.add_argument('-chunkSkip', type=int, help='Number of chunks to skip', default=0)
parser.add_argument('-articleCount', type=int, help='If the number of articles was counted before, specifying this can save startup time', default=-1)
parser.add_argument('-pos', metavar=("X","Z"),type=int, help='X Z coordinates of the starting chunk (block coordinates)', default=[0,0], nargs=2)
args = parser.parse_args()
#debug vars
bookSkip = 0
args.world = '%APPDATA%\\.minecraft\\saves\\loadedWorld\\'
args.chunkSkip = 5
args.booksPerBarrel = 50
args.pos = [0,0]
#args.wiki = path.dirname(path.realpath(__file__)) + "\\wikipedia_de_chemistry_nopic_2020-04.zim"
#args.articleCount = ????
args.wiki = path.dirname(path.realpath(__file__)) + "\\wikipedia_de_all_nopic_2020-04.zim"
#args.articleCount = 3979758
if args.world is not None:
for progress in fill(args.booksPerBarrel,
args.pos,
world = args.world,
skipArticles = bookSkip,
skipChunk = args.chunkSkip,
filePath = args.wiki,
totalArticleCount = args.articleCount):
print(progress)
else:
for progress in fill(args.booksPerBarrel,
args.pos,
world = False,
skipArticles = bookSkip,
skipChunk = args.chunkSkip,
filePath = args.wiki):
pass
| Python | 156 | 40.224358 | 213 | /fillWithWiki.py | 0.615827 | 0.599503 |
strawberryblackhole/hippopotamus | refs/heads/master | from amulet.api.block import Block
import amulet_nbt
from amulet.api.block_entity import BlockEntity
from htmlParser import getFormatedArticle
from functools import partial
import multiprocessing
from multiprocessing.pool import Pool
from multiprocessing.pool import ThreadPool
from ZIMply.zimply import ZIMFile
import time
import re
import json
def getBlock(world, block):
"""turns a block object into a usable block object, no idea what this actually does"""
tmp = world.world_wrapper.translation_manager.get_version(
"java",
(1, 15, 2)
).block.to_universal(
block
)[0]
return world.palette.get_add_block(tmp)
def createBlocks(world):
"""generates all needed Block objects"""
barrel = getBlock(world, Block("minecraft", "barrel", {"facing" : amulet_nbt.TAG_String("up"), "open" : amulet_nbt.TAG_String("false")}))
wool = getBlock(world, Block("minecraft", "red_wool"))
air = getBlock(world, Block("minecraft", "air"))
stone = getBlock(world, Block("minecraft", "stone"))
glowstone = getBlock(world, Block("minecraft", "glowstone"))
lantern = getBlock(world, Block("minecraft", "lantern", {"hanging" : amulet_nbt.TAG_String("false")}))
sign_north = getBlock(world, Block("minecraft", "acacia_wall_sign", {"facing" : amulet_nbt.TAG_String("north")}))
sign_south = getBlock(world, Block("minecraft", "acacia_wall_sign", {"facing" : amulet_nbt.TAG_String("south")}))
return [barrel, wool, glowstone, sign_north, sign_south, air, stone, lantern]
def generateBarrelPositionList():
"""Generates a list of coordinates in a chunk (16x16) where barrels should be"""
barrels = []
for row in [0,8]:
for y in range(5,7):
for x in range(1,8,2):
subList = [(x, y, z) for z in range(1 + row, 7 + row)]
barrels.extend(subList)
for x in range(8,15,2):
subList = [(x, y, z) for z in range(1 + row, 7 + row)]
barrels.extend(subList)
return barrels
def generateSignEntity(x, y, z, direction):
"""Generates the entity to make the sign display its position"""
return BlockEntity("java", "acacia_wall_sign", x, y, z,\
amulet_nbt.NBTFile(\
value = amulet_nbt.TAG_Compound(\
{\
"utags": amulet_nbt.TAG_Compound(\
{\
"keepPacked": amulet_nbt.TAG_Byte(0),\
"Text4": amulet_nbt.TAG_String("{\"text\":\"\"}"),\
"Text3": amulet_nbt.TAG_String("{\"text\":\"\"}"),\
"Text2": amulet_nbt.TAG_String("{\"text\":\"%d - %d\"}"%(z + direction, z + direction * 6)), \
"Text1": amulet_nbt.TAG_String("{\"text\":\"%d\"}"%x)\
}),\
"Color": amulet_nbt.TAG_String("black")\
})))
def fillSigns(chunk, world, dimension, sign_north, sign_south):
"""Generates all signs in the chunk and fills them with text"""
for z in [0, 8]:
for x in list(range(1,8,2)) + list(range(8,15,2)):
chunk.blocks[x,6,z] = sign_north
chunk.block_entities.insert(generateSignEntity(x + chunk.cx * 16, 6, z + chunk.cz * 16, 1))
for z in [7, 15]:
for x in list(range(1,8,2)) + list(range(8,15,2)):
chunk.blocks[x,6,z] = sign_south
chunk.block_entities.insert(generateSignEntity(x + chunk.cx * 16, 6, z + chunk.cz * 16, -1))
def fillbarrels(chunk, barrelPositionList, barrelBlock, currentArticle, booksPerBarrel, zimFilePath, chunkList, target_pos):
"""Generates all barrels in the chunk and fills them with books/articles"""
for barrelPos in barrelPositionList:
books = []
titles = []
start = time.perf_counter()
if booksPerBarrel > 30:
pool = Pool(processes=4) #on my laptop ~4 processes was faster than any amount of threads (4 = logic core count)
else:
pool = ThreadPool(processes=3)#the article reading is mostly cpu limited, so going high on process count doesnt help
outputs = pool.map(partial(tryGetArticle, zimFilePath = zimFilePath, barrelPositionList = barrelPositionList, booksPerBarrel = booksPerBarrel, chunkList = chunkList, target_pos = target_pos), range(currentArticle,currentArticle + booksPerBarrel))
pool.close()
#outputs = []
#for id in range(currentArticle, currentArticle + booksPerBarrel):
# outputs.append(tryGetArticle(id, zimFilePath))
currentArticle += booksPerBarrel
for output in outputs:
if output[0] == None:
continue
titles.append(output[1])
books.append(output[0])
stop = time.perf_counter()
#print("generating a book", (stop-start)/booksPerBarrel)
chunk.blocks[barrelPos] = barrelBlock
barrelEntity = BlockEntity("java", "barrel", barrelPos[0] + chunk.cx * 16, barrelPos[1], barrelPos[2] + chunk.cz * 16,\
amulet_nbt.NBTFile(\
value = amulet_nbt.TAG_Compound(\
{\
"utags": amulet_nbt.TAG_Compound(\
{\
"keepPacked": amulet_nbt.TAG_Byte(0),\
"isMovable": amulet_nbt.TAG_Byte(1),\
"Findable": amulet_nbt.TAG_Byte(0),\
"CustomName": amulet_nbt.TAG_String("{\"text\":\"x:%d y:%d z:%d\"}"%(barrelPos[0] + chunk.cx * 16, barrelPos[1], barrelPos[2] + chunk.cz * 16)),\
"Items": amulet_nbt.TAG_List(\
value = [
amulet_nbt.TAG_Compound(\
{\
"Slot": amulet_nbt.TAG_Byte(iBook),\
"Count": amulet_nbt.TAG_Byte(1),\
"id": amulet_nbt.TAG_String("minecraft:written_book"),\
"tag": amulet_nbt.TAG_Compound(\
{
"pages": amulet_nbt.TAG_List(\
value=[amulet_nbt.TAG_String(page) for page in books[iBook]],\
list_data_type = 8\
),\
"title": amulet_nbt.TAG_String(titles[iBook]),\
"author": amulet_nbt.TAG_String("Pos: x:%d y:%d z:%d, ID: %d"%(barrelPos[0] + chunk.cx * 16, barrelPos[1], barrelPos[2] + chunk.cz * 16, currentArticle + iBook)),
})
})
for iBook in range(len(books))
], list_data_type = 9\
)
})\
})))
chunk.block_entities.insert(barrelEntity)
def tryGetArticle(id, zimFilePath, barrelPositionList, booksPerBarrel, chunkList, target_pos):
"""Tries to find the article with the given id, returns [False, False] if no article was found, else article and its title are returned"""
start = time.perf_counter()
zimFile = ZIMFile(zimFilePath,"utf-8")
stop = time.perf_counter()
#print("some overhead ", stop - start)
start = time.perf_counter()
article = zimFile._get_article_by_index(id, follow_redirect=False)
if article != None:
if article.mimetype == "text/html":
articleTitle, articleContent = getFormatedArticle(article.data.decode("utf-8"), zimFile, barrelPositionList, booksPerBarrel, chunkList, target_pos)
re_pattern = re.compile(u'[^\u0000-\uD7FF\uE000-\uFFFF]', re.UNICODE)
articleContent = [re_pattern.sub(u'\uFFFD', page).replace(u'\xa0', u' ') for page in articleContent] # seems like mc cant handle 💲. (found in the article about the $ sign), this lead me to the assumption, that mc cant handle any surrogate unicode pair. https://stackoverflow.com/questions/3220031/how-to-filter-or-replace-unicode-characters-that-would-take-more-than-3-bytes/3220210#3220210
stop = time.perf_counter()
#print("parsing ", stop - start)
return articleContent, json.dumps(article.url.replace(u'\xa0', u' '), ensure_ascii=False)[1:-1]
if article.is_redirect == True:
coordinates = getArticleLocationById(article.mimetype, barrelPositionList, booksPerBarrel, chunkList, target_pos)
return ["{\"text\":\"Redirect to article with ID %d at x:%d y:%d z:%d\"}"%tuple([id] + coordinates)], json.dumps(article.url.replace(u'\xa0', u' '), ensure_ascii=False)[1:-1]
return None, None
def getArticleLocationById(id, barrelPositionList, booksPerBarrel, chunkList, target_pos):
booksPerChunk = len(barrelPositionList) * booksPerBarrel
chunk = int(id) // booksPerChunk
bookNumberInChunk = (int(id) - chunk * booksPerChunk)
barrel = (bookNumberInChunk - 1)// booksPerBarrel #-1 because if booksNumberInChunk == booksPerBarrel, it should be 0
return [chunkList[chunk][0] * 16 + barrelPositionList[barrel][0] + target_pos[0], barrelPositionList[barrel][1], chunkList[chunk][1] * 16 + barrelPositionList[barrel][2] + target_pos[1]]
def fillChunk(chunk, barrelPositionList, world, dimension, currentArticle, booksPerBarrel, zimfilePath, chunkList, target_pos):
"""Fills the chunk with all blocks and content"""
barrel, wool, glowstone, sign_north, sign_south, air, stone, lantern = createBlocks(world)
chunk.blocks[:,5:9:,:] = air
chunk.blocks[:,3,:] = stone
chunk.blocks[:,9,:] = stone
for innerRow in [1,5,14,10]:
for positionInRow in [6,9]:
chunk.blocks[innerRow,7,positionInRow] = lantern
for outerRow in [3,7,8,12]:
for positionInRow in [1,14]:
chunk.blocks[outerRow,7,positionInRow] = lantern
fillSigns(chunk, world, dimension, sign_north, sign_south)
chunk.blocks[:,4,:] = wool
chunk.blocks[0,4,7:9] = glowstone
chunk.blocks[0,4,0] = glowstone
chunk.blocks[0,4,15] = glowstone
chunk.blocks[15,4,7:9] = glowstone
chunk.blocks[15,4,0] = glowstone
chunk.blocks[15,4,15] = glowstone
fillbarrels(chunk, barrelPositionList, barrel, currentArticle, booksPerBarrel, zimfilePath, chunkList, target_pos)
chunk.changed = True
def placeWall(chunk, orientation, world):
"""Places a wall on the wanted side of the chunk"""
barrel, wool, glowstone, sign_north, sign_south, air, stone, lantern = createBlocks(world)
chunk.blocks[orientation[0],3:9,orientation[1]] = stone
chunk.changed = True | Python | 227 | 47.123348 | 402 | /chunkGenerator.py | 0.577589 | 0.558912 |
strawberryblackhole/hippopotamus | refs/heads/master | from html.parser import HTMLParser
from bs4 import BeautifulSoup
import json
class MyHTMLParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
def feed(self, in_html, zimFile, barrelPositionList, booksPerBarrel, chunkList, target_pos):
self._data = [""]
self._formats = [[[],[]]]
self._attrs = []
self._title = ""
self._zimFile = zimFile
self._barrelPositionList = barrelPositionList
self._booksPerBarrel = booksPerBarrel
self._chunkList = chunkList
self._target_pos = target_pos
super(MyHTMLParser, self).feed(in_html)
articleContent = self._data[0]
articleFormating = self._formats[0]
pages = ['{"extra":[{"text":"']
charsOnPage = 0
for iChar in range(len(articleContent)):
#if page not too long
if charsOnPage < 200:
#if the formating has to be defined
if charsOnPage == 0 or articleFormating[0][iChar] != articleFormating[0][iChar -1] or articleFormating[1][iChar] != articleFormating[1][iChar -1]:
pages[-1] += '"},{'
if articleFormating[0][iChar] > 0:
pages[-1] += '"bold":true,'
if articleFormating[1][iChar] > 0:
pages[-1] += '"italic":true,'
pages[-1] += '"text":"'
pages[-1] += json.dumps(articleContent[iChar], ensure_ascii=False)[1:-1]
charsOnPage += 1
if articleContent[iChar] == "\n":
charsOnPage += 12
else:
pages[-1] += '"}],"text":""}'
pages.append('{"extra":[{')
if articleFormating[0][iChar] > 0:
pages[-1] += '"bold":true,'
if articleFormating[1][iChar] > 0:
pages[-1] += '"italic":true,'
pages[-1] +='"text":"' + json.dumps(articleContent[iChar], ensure_ascii=False)[1:-1]
charsOnPage = 0
pages[-1] += ' The original work has been modified."}],"text":""}'
return json.dumps(self._title, ensure_ascii=False), pages
def handle_data(self, data):
self._data[-1] += data
for formating in self._formats[-1]:
formating.extend([0]*len(data))
def handle_starttag(self, tag, attrs):
self._data.append("")
self._formats.append([[],[]])
self._attrs.append(attrs)
def remove_data(self, replacement = "", replacementFormatings = [0,0]):
self._data[-1] = replacement
self._formats[-1] = [[0] * len(replacement), [0] * len(replacement)]
self.collaps_last_block_and_format(formatings=replacementFormatings)
def collaps_last_block_and_format(self, prefix = "", postfix = "", formatings = [0,0]):
self._data[-1] = prefix + self._data[-1] + postfix
#extend format by pre/postfix length
for iFormat in range(len(self._formats[-1])):
#turn on formating, but dont turn it off (because allready collapsed formats should keep their formating and should not be overwritten)
for iElement in range(len(self._formats[-1][iFormat])):
self._formats[-1][iFormat][iElement] += formatings[iFormat]
self._formats[-1][iFormat][:0] = [formatings[iFormat]] * len(prefix)
self._formats[-1][iFormat].extend([formatings[iFormat]] * len(postfix))
#collaps the last array entry
self._data[-2] += self._data[-1]
for iFormat in range(len(self._formats[-2])):
self._formats[-2][iFormat].extend(self._formats[-1][iFormat])
#delete last array entry
self._data.pop()
self._formats.pop()
self._attrs.pop()
def handle_endtag(self, tag):
if tag == 'a' :
foundiAtt = -1
for iAtt in range(len(self._attrs[-1])):
try:
self._attrs[-1][iAtt].index("href")
foundiAtt = iAtt
break
except ValueError:
continue
if foundiAtt != -1:
url = self._attrs[-1][iAtt][1].split("#")[0]
entry, idx = self._zimFile._get_entry_by_url("A", url)
if(idx != None):
location = getArticleLocationById(idx,self._barrelPositionList, self._booksPerBarrel, self._chunkList, self._target_pos)
self.collaps_last_block_and_format("", "[ID %d at x:%d y:%d z:%d]"%tuple([idx] + location))
else:
self.collaps_last_block_and_format("", "[%s]"%url)
else:
self.collaps_last_block_and_format()
elif tag == 'br' :
self.collaps_last_block_and_format("\n", "")
elif tag == 'div' :
if self._data[-1] != "" and self._data[-1][-1] != "\n":
self.collaps_last_block_and_format("\n ", "\n")
else:
self.collaps_last_block_and_format()
elif tag == 'h1' :
if ('class', 'section-heading') in self._attrs[-1]: #if its the title of the article
self._title = self._data[-1]
self.collaps_last_block_and_format("", "\n", [1,0])
else:
self.collaps_last_block_and_format("\n\n", "\n", [1,0])
elif tag == 'h2' :
self.collaps_last_block_and_format("\n\n", "\n", [1,0])
elif tag == 'h3' :
self.collaps_last_block_and_format("\n\n", "\n")
elif tag == 'li' :
self.collaps_last_block_and_format("\n -", "")
elif tag == 'p' :
if self._data[-1] != "":
self.collaps_last_block_and_format("\n ", "\n")
else:
self.collaps_last_block_and_format()
elif tag == 'ol' :
self.collaps_last_block_and_format("\n")
elif tag == 'ul' :
self.collaps_last_block_and_format("\n")
elif tag == 'script' :
self.remove_data()
elif tag == 'style' :
self.remove_data()
elif tag == 'table' :
self.remove_data("\nCan't display table\n", [0,1])
elif tag == 'title' :
self.remove_data()
else:
self.collaps_last_block_and_format()
def getArticleLocationById(id, barrelPositionList, booksPerBarrel, chunkList, target_pos):
booksPerChunk = len(barrelPositionList) * booksPerBarrel
chunk = int(id) // booksPerChunk
bookNumberInChunk = (int(id) - chunk * booksPerChunk)
barrel = (bookNumberInChunk - 1)// booksPerBarrel #-1 because if booksNumberInChunk == booksPerBarrel, it should be 0
return [chunkList[chunk][0] * 16 + barrelPositionList[barrel][0] + target_pos[0], barrelPositionList[barrel][1], chunkList[chunk][1] * 16 + barrelPositionList[barrel][2] + target_pos[1]]
def getFormatedArticle(html, zimFile, barrelPositionList, booksPerBarrel, chunkList, target_pos):
parser = MyHTMLParser()
soup = BeautifulSoup(html, features ="html.parser")
title, text = parser.feed(str(soup).replace("\n", "").replace("\t", ""), zimFile, barrelPositionList, booksPerBarrel, chunkList, target_pos)
#text = parser.feed(html.replace("\n", "").replace("\t", "")) # some things break when not using bfs
parser.close()
return title, text
| Python | 177 | 41.305084 | 190 | /htmlParser.py | 0.534589 | 0.520967 |
strawberryblackhole/hippopotamus | refs/heads/master | from amulet.api.selection import SelectionGroup
from amulet.api.block import Block
from amulet.api.data_types import Dimension
from amulet import log
import amulet_nbt
from amulet.api.block_entity import BlockEntity
from ZIMply.zimply import ZIMFile
import os
import math
import time
from fillWithWiki import getFormatedArticle
zimfile = ZIMFile(os.path.dirname(os.path.realpath(__file__)) + "\\wikipedia_de_basketball_nopic_2020-04.zim","utf-8")
articleCount = list(zimfile)[-1][2]
count = 0
articles = list(zimfile)
for article in range(articleCount):
print(article)
start = time.perf_counter()
article = [x for x in articles if x[2] == article]
print("article search", time.perf_counter() - start)
if len(article) > 1:
raise Exception()
foundArticle = len(article) == 1
articleStop = 0
if foundArticle:
article = article[0]
articleTitle = article[1]
articleId = article[2]
start = time.perf_counter()
a = zimfile._get_article_by_index(articleId).data.decode("utf-8")
print("article read", time.perf_counter() - start)
start = time.perf_counter()
formatedArticle = getFormatedArticle(a)
print("article parse", time.perf_counter() - start)
print(formatedArticle)
if count > 4:
break
count += 1 | Python | 50 | 26.219999 | 118 | /parserTester.py | 0.667647 | 0.652941 |
ViktorMihalik/Save-the-world | refs/heads/main | import random
# ///Welcoming screen///
print("""
Ooooh welcome unknown player. I'm going to destroy all humans! But of course I have to give you a change to defend yourself.
So you suppose to be the savior of the earth? Let ma laugh- HA-HA-HA.
If you would like to save the earth you have to beat me in 4 games. Okay than let's play a game.
""")
player_name = input("But at first 'savior' tell my your name: ")
# /// Game over///
game_over = ("""World will be destroyed and it's your fault {}
▄▀▀█▄▄ ▄▀▀▀▀▄ ▄▀▀▀▀▄ ▄▀▀▄ ▄▀▄ ▄▀▀█▄▄▄▄ ▄▀▀█▄▄
█ ▄▀ █ █ █ █ █ █ █ ▀ █ ▐ ▄▀ ▐ █ ▄▀ █
▐ █ █ █ █ █ █ ▐ █ █ █▄▄▄▄▄ ▐ █ █
█ █ ▀▄ ▄▀ ▀▄ ▄▀ █ █ █ ▌ █ █
▄▀▄▄▄▄▀ ▀▀▀▀ ▀▀▀▀ ▄▀ ▄▀ ▄▀▄▄▄▄ ▄▀▄▄▄▄▀
█ ▐ █ █ █ ▐ █ ▐
▐ ▐ ▐ ▐ ▐
_.-^^---....,,--
_-- --_
< >)
| |
\._ _./
```--. . , ; .--'''
| | |
.-=|| | |=-.
`-=#$%&%$#=-'
| ; :|
_____.,-#%&$@%#&#~,._____
""".format(player_name))
good= ("""
..--+++--..
.-' | `-.
+' | `+
' | `
' | `
: | :
: +'|`+ :
. +' | `+ ;
+ +' | `+ +
`. +' | `+ .'
`._ | _.'
`--.._|_..--'
I decided not to destroy the world...
""")
# ///rock, paper, scissors///
# header
print("""
###### ####### ##### # # ###### # ###### ####### ###### ##### ##### ### ##### ##### ####### ###### #####
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # #
###### # # # ### ###### # # ###### ##### ###### ##### # # ##### ##### # # ###### #####
# # # # # # # # ####### # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # ####### ##### # # # # # # ####### # # ##### ##### ### ##### ##### ####### # # #####
""")
maxpoints_rsp = 3 # We can set higher points for win
print("{}, in this first game called rock, paper, scissor the one who scores {} points first wins ".format(player_name,maxpoints_rsp))
# Game mechanics
def player_wins(i,j):
if (i == "rock" and j == "scissors") or (i == "scissors" and j == "paper") or (i == "paper" and j == "rock"):
return True
elif (i == "rock" and j == "paper") or (i == "scissors" and j == "rock") or (i == "paper" and j == "scissors"):
return False
#game body
player_point_RPS = 0
computer_points_RSP = 0
while computer_points_RSP or player_point_RPS != maxpoints_rsp:
player_choice= input("Comon choose rock, paper or scissors: ")
rsp= "rock","paper","scissors"
computer_choice = random.choice(rsp)
if player_choice not in rsp:
player_choice= input("You can choose only choose rock, paper or scissors: ")
if player_wins(player_choice,computer_choice) is True:
player_point_RPS += 1
print("I choose {} so point for you.".format(computer_choice))
if player_wins (player_choice,computer_choice) is False:
computer_points_RSP += 1
print("{} beat {} so point for me.".format(computer_choice,player_choice))
if player_choice == computer_choice:
print("Hey {}, you've red my mind I choose {} to.".format(player_name,computer_choice))
pass
print("Player points: {} ".format(player_point_RPS))
print("Computer point: {}".format(computer_points_RSP))
print()
if player_point_RPS == maxpoints_rsp:
print("GRHHHH How it’s possible that you beat me?!")
break
if computer_points_RSP == maxpoints_rsp:
print(game_over)
exit()
# ///Card game///
# header
print("""
.------..------..------..------..------..------..------..------.
|C.--. ||A.--. ||R.--. ||D.--. ||W.--. ||A.--. ||R.--. ||S.--. |
| :/\: || (\/) || :(): || :/\: || :/\: || (\/) || :(): || :/\: |
| :\/: || :\/: || ()() || (__) || :\/: || :\/: || ()() || :\/: |
| '--'C|| '--'A|| '--'R|| '--'D|| '--'W|| '--'A|| '--'R|| '--'S|
`------'`------'`------'`------'`------'`------'`------'`------'
""")
win_points = 3 # Setting points to win
print("Okay beginner's luck. {} let’s play another game called CARD WARS.\n"
"The rules are easy- who have higher card get a point('A' is the highest card). The one who get the first {} points is winner. Of course, it will be me.".format(player_name,win_points))
# Game mechanics
cards = ["6","7","8","9","10","J","Q","K","A"] # Wee can change type of cards
def flip_coin(): # In case of equal points
player_coin = input("It's a draw, let's flip a coin. What's your choice tail or head: ").lower()
coin = "tail","head"
coin_side = random.choice(coin)
if player_coin not in coin:
player_coin= input("please choose betwen tail or head: ").lower()
if player_coin in coin:
if player_coin==coin_side:
print("Yeah good guees It's {}, player wins.".format(coin_side))
else:
print("It's {}, I'm a winner.".format(coin_side))
print(game_over)
exit()
def shuffling_cards(r):
hand= []
for i in range(r):
random_cards = [str(random.choice(cards))]
hand = hand+random_cards
return hand
#game body
player_point_CRD = 0
computer_points_CRD = 0
players_cards= shuffling_cards(5)
computer_cards= shuffling_cards(5)
while computer_points_CRD or player_point_CRD != win_points:
computer_choice_CRD = str(random.choice(computer_cards))
player_choice=input("Please choose one card from your hand {}:".format(players_cards)).upper()
if player_choice not in players_cards:
player_choice=input("Again check your card and choose one {}:".format(players_cards)).upper()
if cards.index(computer_choice_CRD) > cards.index(player_choice):
print("{} is higher than {} that means point for me".format(computer_choice_CRD,player_choice))
computer_points_CRD+=1
if cards.index(computer_choice_CRD) < cards.index(player_choice):
print("Okay I have {}, point for you".format(computer_choice_CRD))
player_point_CRD+=1
if cards.index(computer_choice_CRD) == cards.index(player_choice):
print("My is also {}, boring no one get a point".format(computer_choice_CRD))
players_cards.remove(player_choice)
computer_cards.remove(computer_choice_CRD)
print("{}'s points:{} and computer:{}".format(player_name,player_point_CRD,computer_points_CRD))
print()
if not players_cards:
if player_point_CRD > computer_points_CRD:
print("Hmm you won another game. Maybe I have to take it seriously")
break
if computer_points_CRD > player_point_CRD:
print(game_over)
exit()
if computer_points_CRD == player_point_CRD:
flip_coin()
break
if player_point_CRD == win_points:
print("Hmm you won another game. Maybe I have to take it seriously")
break
if computer_points_CRD == win_points:
print(game_over)
exit()
# ///Guess the number///
# header
print("""
_ _ _ _ __ __ ____ ______ _____ ____ _____
| \ | | | | | \/ | _ \| ____| __ \ / __ \ / ____|
| \| | | | | \ / | |_) | |__ | |__) | | | | (___
| . ` | | | | |\/| | _ <| __| | _ /| | | |\___ \
| |\ | |__| | | | | |_) | |____| | \ \| |__| |____) |
|_| \_|\____/|_| |_|____/|______|_| \_\\____/|_____/
""")
print()
print ( "My systems do not report any armageddon, What a pitty. But it's time to change that"
"Guess the number I am thinking of betwen 1 and 20. But be careful {} you only have 4 attempts". format(player_name))
print()
# Game mechanics
guesses = 3
number = random.randrange(1,20)
player_guess= int(input("Guess the number I am thinking of: "))
#game body
if player_guess == number:
print("You are a wizard {}, you guess it for the first time. ".format(player_name))
while player_guess != number:
if player_guess < number:
player_guess= int(input("Don't be humble it's higher. C'mon guess again: "))
guesses -= 1
if player_guess > number:
player_guess= int(input("No no no, go lower. C'mon guess again: "))
guesses -= 1
if guesses == 0 and player_guess != number:
print("My number was {}".format(number))
print(game_over)
quit()
if player_guess == number and guesses<=1:
print("IT was closed but you save the world for now {}.".format(player_name))
break
if player_guess == number and guesses>1:
print("That is correct. I think I chose very easy game for you.".format(guesses))
break
# ///Player can decides if he can end the game or continue///
print("Okay that wasn't bad. Look I have a little proposition for you before our last game. You can quit now and I will not destroy the world (maybe)\n"
"On the other hand you can take a risk and earn unknow reaward")
print()
player_choose = input("So what's you choice? Would you like to continue? Choose yes or no : ").lower()
if player_choose == "no":
consequences = good,game_over
computer_chose = random.choice(consequences)
print(computer_chose)
quit()
choice= "yes","no"
if player_choose not in choice:
player_choose = input("I know that it can be hard for you bud just write me yes or no: ")
else :
pass
# ///Player decided to continue///
# ///Dices///
# header
print("""
▄████████ ▄████████ ▄████████ ▄███████▄ ▄██ ▄ ████████▄ ▄█ ▄████████ ▄████████ ▄████████
███ ███ ███ ███ ███ ███ ██▀ ▄██ ███ ██▄ ███ ▀███ ███ ███ ███ ███ ███ ███ ███
███ █▀ ███ ███ ███ ███ ▄███▀ ███▄▄▄███ ███ ███ ███▌ ███ █▀ ███ █▀ ███ █▀
███ ▄███▄▄▄▄██▀ ███ ███ ▀█▀▄███▀▄▄ ▀▀▀▀▀▀███ ███ ███ ███▌ ███ ▄███▄▄▄ ███
███ ▀▀███▀▀▀▀▀ ▀███████████ ▄███▀ ▀ ▄██ ███ ███ ███ ███▌ ███ ▀▀███▀▀▀ ▀███████████
███ █▄ ▀███████████ ███ ███ ▄███▀ ███ ███ ███ ███ ███ ███ █▄ ███ █▄ ███
███ ███ ███ ███ ███ ███ ███▄ ▄█ ███ ███ ███ ▄███ ███ ███ ███ ███ ███ ▄█ ███
████████▀ ███ ███ ███ █▀ ▀████████▀ ▀█████▀ ████████▀ █▀ ████████▀ ██████████ ▄████████▀
███ ███
""")
max_score = 5 # Score to winn the game
print("I have to admit you are brave. The last {} points wil decides the fate of humanity\n Let's get this over.".format(max_score))
# Game mechanics
rolling_dice = random.randint(1,6)
players_point = 0
cumpoters_point = 0
player_total= random.randint(1,6)
computer_total= random.randint(1,6)
#game body
while players_point or cumpoters_point!=max_score:
print()
input("press enter to roll the dice")
player_roll1 = random.randint(1,6)
player_roll2 = random.randint(1,6)
player_roll3 = random.randint(1,6)
player_total = player_roll1+player_roll2+player_roll3
computer_roll1 = random.randint(1,6)
computer_roll2 = random.randint(1,6)
computer_roll3 = random.randint(1,6)
computer_total = computer_roll1+computer_roll2+computer_roll3
print()
print("{0} you roll({1}, {2} and {3} = {7} total ) and computer has ({4}, {5} and {6} = {8} total )".format(player_name,player_roll1,player_roll2, player_roll3, computer_roll1, computer_roll2,computer_roll3,player_total,computer_total))
if player_total > computer_total:
players_point += 1
elif player_total == computer_total:
players_point+0
else:
cumpoters_point+=1
print("{}'s points: {} and computer: {}".format(player_name,players_point,cumpoters_point))
if players_point == max_score:
print(good)
print("Not becouse you won. I'm just not in the mood to destry the world. however promis is promis here is your reward:")
currency = "€","£","৳","ƒ","₹","₡","Kč","₣","₪","¥",".ރ","₮","₲","₽","฿","Ft"
print("{0},{1}{2}{3},{4}{5}{6} {7}".format(random.randint(1,5),random.randint(0,9),random.randint(0,9),random.randint(0,9),random.randint(0,9),
random.randint(0,9),random.randint(0,9),random.choice(currency)))
quit()
if cumpoters_point == max_score:
print(game_over)
quit()
| Python | 347 | 37.561958 | 240 | /Save the world.py | 0.437791 | 0.430151 |
flinteller/unit_eleven | refs/heads/master | import pygame
import random
class Paddle(pygame.sprite.Sprite):
def __init__(self, main_surface, color, height, width):
"""
This function creates creates a surface using each other params
:param main_surface:
:param color:
:param height:
:param width:
"""
# initialize sprite super class
super().__init__()
# finish setting the class variables to the parameters
self.main_surface = main_surface
self.color = color
self.height = height
self.width = width
# Create a surface with the correct height and width
self.image = pygame.Surface((width, height))
# Get the rect coordinates
self.rect = self.image.get_rect()
# Fill the surface with the correct color
self.image.fill(color)
def move_left(self):
"""
This function moves the paddle left and stops the paddle form going off screen
:return:
"""
self.rect.x = self.rect.x - 7
if self.rect.left < 0:
self.rect.x = 1
def move_right(self):
"""
This function moves the paddle right and stops the paddle form going off screen
:return:
"""
self.rect.x = self.rect.x + 7
if self.rect.right > 400:
self.rect.x = 335
def resize(self):
"""
This function creates a new surface with a random size and keeps its color
:return:
"""
self.width = random.randint(20, 100)
self.image = pygame.Surface((self.width, self.height))
self.image.fill(self.color)
| Python | 59 | 26.966103 | 87 | /paddle.py | 0.574713 | 0.565638 |
flinteller/unit_eleven | refs/heads/master | import pygame
class Ball(pygame.sprite.Sprite):
def __init__(self, color, window_width, window_height, radius):
# initialize sprite super class
super().__init__()
# finish setting the class variables to the parameters
self.color = color
self.radius = radius
self.window_width = window_width
self.window_height = window_height
self.speedx = 6
self.speedy = 8
# Create a surface, get the rect coordinates, fill the surface with a white color (or whatever color the
# background of your breakout game will be.
self.image = pygame.image.load("chrome copy.png")
self.rect = self.image.get_rect()
# Add a circle to represent the ball to the surface just created.
def move(self):
"""
This makes the ball move and keeps it on the screen
:return:
"""
self.rect.top += self.speedy
self.rect.left += self.speedx
if self.rect.top < 0:
self.speedy = -self.speedy
elif self.rect.left < 0 or self.rect.right > self.window_width:
self.speedx = -self.speedx
def collide(self, paddle_group, brick_group):
"""
This detects collisions and plays a sound accordingly
:param paddle_group:
:param brick_group:
:return:
"""
if pygame.sprite.spritecollide(self, brick_group, True):
self.speedx = self.speedx
self.speedy = -self.speedy
pygame.mixer.init()
pygame.init()
sound = pygame.mixer.Sound("Bleep-sound.wav")
sound.play()
if pygame.sprite.spritecollide(self, paddle_group, False):
self.speedx = self.speedx
self.speedy = -self.speedy
pygame.mixer.init()
pygame.init()
sound = pygame.mixer.Sound("Paddle_bounce_sound.wav")
sound.play()
| Python | 58 | 32.517242 | 112 | /ball.py | 0.581921 | 0.579866 |
flinteller/unit_eleven | refs/heads/master | import pygame
import sys
from pygame.locals import *
import brick
import ball
import paddle
def main():
# Constants that will be used in the program
APPLICATION_WIDTH = 400
APPLICATION_HEIGHT = 600
PADDLE_Y_OFFSET = 30
BRICKS_PER_ROW = 10
BRICK_SEP = 4 # The space between each brick
BRICK_Y_OFFSET = 70
BRICK_WIDTH = (APPLICATION_WIDTH - (BRICKS_PER_ROW -1) * BRICK_SEP) / BRICKS_PER_ROW
BRICK_HEIGHT = 8
PADDLE_HEIGHT = 10
PADDLE_WIDTH = 60
RADIUS_OF_BALL = 10
NUM_TURNS = 3
# Sets up the colors
BLUE = (30, 144, 255)
RED = (255, 48, 48)
YELLOW = (255, 215, 0)
GREEN =(0, 201, 87)
WHITE = (255, 255, 255)
pygame.init()
main_window = pygame.display.set_mode((APPLICATION_WIDTH, APPLICATION_HEIGHT), 32, 0)
pygame.display.set_caption("AD Blocker")
pygame.display.update()
# Step 1: Use loops to draw the rows of bricks. The top row of bricks should be 70 pixels away from the top of
# the screen (BRICK_Y_OFFSET)
brick_group = pygame.sprite.Group()
paddle_group = pygame.sprite.Group()
x_pos = 0
y_pos = BRICK_Y_OFFSET
# Places bricks with correct colors
colors = [BLUE, RED, YELLOW, BLUE, GREEN]
for color in colors:
for y in range(2):
for z in range(10):
my_brick = brick.Brick(BRICK_WIDTH, BRICK_HEIGHT, color)
brick_group.add(my_brick)
my_brick.rect.y = y_pos
my_brick.rect.x = x_pos
main_window.blit(my_brick.image, my_brick.rect)
x_pos += (BRICK_SEP + BRICK_WIDTH)
x_pos = 0
y_pos += BRICK_HEIGHT + BRICK_SEP
# Places ball and passes it parameters
my_ball = ball.Ball(RED, APPLICATION_WIDTH, APPLICATION_HEIGHT, RADIUS_OF_BALL)
my_ball.rect.x = 200
my_ball.rect.y = 200
# Places paddle and passes it parameters
my_paddle = paddle.Paddle(main_window, GREEN, PADDLE_HEIGHT, PADDLE_WIDTH)
paddle_group.add(my_paddle)
my_paddle.rect.x = APPLICATION_WIDTH / 2
my_paddle.rect.y = APPLICATION_HEIGHT - PADDLE_Y_OFFSET
pygame.display.update()
# Event detection loop
while True:
for event in pygame.event.get():
if event == QUIT:
pygame.quit()
sys.exit()
if pygame.key.get_pressed()[K_LEFT]:
my_paddle.move_left()
if pygame.key.get_pressed()[K_RIGHT]:
my_paddle.move_right()
if pygame.key.get_pressed()[K_SPACE]:
my_paddle.resize()
if my_ball.rect.bottom > 590:
NUM_TURNS -= 1
pygame.mixer.init()
pygame.init()
sound = pygame.mixer.Sound("Error_sound.wav")
sound.play()
my_ball.rect.x = 200
my_ball.rect.y = 20
main_window.fill(WHITE)
# Prints number of lives
mouse_font = pygame.font.SysFont("Verdana", 32)
mouse_label = mouse_font.render("Lives: " + str(NUM_TURNS), 1, BLUE)
main_window.blit(mouse_label, (30, 30))
pygame.display.update()
# Prints message if you win
if len(brick_group) == 0:
mouse_font = pygame.font.SysFont("Verdana", 32)
mouse_label = mouse_font.render("You Win!!!", 1, BLUE)
main_window.blit(mouse_label, (135, 200))
pygame.mixer.init()
pygame.init()
sound = pygame.mixer.Sound("Win_sound.wav")
sound.play()
pygame.display.update()
if len(brick_group) == 0:
pygame.time.wait(2000)
break
# Prints message if you loose
if NUM_TURNS == 1 and my_ball.rect.bottom > 585:
mouse_font = pygame.font.SysFont("Verdana", 32)
mouse_label = mouse_font.render("Game Over", 1, RED)
main_window.blit(mouse_label, (135, 200))
pygame.mixer.init()
pygame.init()
sound = pygame.mixer.Sound("Game_over_sound.wav")
sound.play()
pygame.display.update()
if NUM_TURNS == 0:
pygame.time.wait(2000)
break
# Moves and blits ball
my_ball.move()
main_window.blit(my_ball.image, my_ball.rect)
if my_ball.rect.bottom > my_ball.window_height:
NUM_TURNS -= 1
# Blits each brick
for a_brick in brick_group:
main_window.blit(a_brick.image, a_brick.rect)
# Calls collision function
my_ball.collide(paddle_group, brick_group)
# Blits paddle
main_window.blit(my_paddle.image, my_paddle.rect)
pygame.display.update()
main()
| Python | 149 | 30.523489 | 114 | /breakout.py | 0.572493 | 0.545455 |
rafatmyo/Definition-Creator | refs/heads/master | import re
def main():
try:
print('Exit with Ctrl-C\n')
while True:
print('Please enter your article:')
input_text = input()
# Remove pronounciation, language origin, date of birth, etc.
simplified_text = re.sub('[\(\[].*?[\)\]]', '', input_text)
# Split the term and it's definition around 'is' or 'was'
is_split = simplified_text.partition(' is ')
was_split = simplified_text.partition(' was ')
text_before_is = is_split[0]
text_before_was = was_split[0]
# Found the keyword 'is' first
if len(text_before_is) < len(text_before_was) and is_split[1]:
# Strip surrounding whitespace from the term and it's definition
term = text_before_is.strip()
definition = is_split[2].partition('.')[0].strip()
# Process the completed term and definition
handle_phrase(term + ': ' + definition + '.')
# Found the keyword 'was' first
elif len(text_before_is) > len(text_before_was) and was_split[1]:
# Strip surrounding whitespace from the term and it's definition
term = text_before_was.strip()
definition = was_split[2].partition('.')[0].strip()
# Process the completed term and definition
handle_phrase(term + ': ' + definition + '.')
# Handle incomplete entry
else:
print("\nA definition was not created because an 'is' or 'was' could not be found.\n")
# Allow loop to end gracefully
except KeyboardInterrupt:
pass
# For debugging
except Exception as e:
print(e)
# Process the final phrase
def handle_phrase(full_phrase):
print('\nDefinition created:')
print(full_phrase + '\n')
with open('definitions.txt', 'a') as text_file:
text_file.write(full_phrase + '\n\n')
# Run this code if being executed directly
if __name__ == '__main__':
main() | Python | 67 | 30.089552 | 102 | /program.py | 0.553794 | 0.549952 |
thejakeboyd/SEproject | refs/heads/main | from tkinter import *
import random
totalcustomers = 0
seats = []
seatstaken = []
bonly = [109, 110, 111, 112, 113, 114, 115,116, 117, 119, 119, 120]
for x in range(1, 121):
seats.append(x)
alph = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T']
customers = []
satisfaction = []
def family():
familywin = Tk()
familywin.configure(bg='grey')
familywin.title('Capital Flights')
Label(familywin, text='Family', bg='grey', font=('Arial', 65)).grid(row=0, column=0)
Label(familywin, text='How many people are in your group?: ', bg='grey', font=('Arial', 35)).grid(row=1, column=0)
Label(familywin, text='Enter First Passenger Name: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)
Label(familywin, text='Enter Second Passenger Name: ', bg='grey', font=('Arial', 35)).grid(row=3, column=0)
Label(familywin, text='Enter Third Passenger Name: ', bg='grey', font=('Arial', 35)).grid(row=4, column=0)
Label(familywin, text='Enter Fourth Passenger Name: ', bg='grey', font=('Arial', 35)).grid(row=5, column=0)
Label(familywin, text='Enter Fifth Passenger Name: ', bg='grey', font=('Arial', 35)).grid(row=6, column=0)
numgroup = Entry(familywin)
numgroup.grid(row=1, column=1)
familyname1 = Entry(familywin)
familyname1.grid(row=2, column=1)
familyname2 = Entry(familywin)
familyname2.grid(row=3, column=1)
familyname3 = Entry(familywin)
familyname3.grid(row=4, column=1)
familyname4 = Entry(familywin)
familyname4.grid(row=5, column=1)
familyname5 = Entry(familywin)
familyname5.grid(row=6, column=1)
Label(familywin, text="Leave Name blank if N/A", bg='white', font=('Arial', 20)).grid(row=7, column=0)
Button(familywin, text='SUBMIT', command=lambda: ticket3(numgroup, familyname1, familyname2, familyname3, familyname4, familyname5, familywin)).grid(row=8,
column=1)
def tourist():
touristwin = Tk()
touristwin.configure(bg='grey')
touristwin.title('Capital Flights')
Label(touristwin, text='Tourists', bg='grey', font=('Arial', 65)).grid(row=0, column=0)
Label(touristwin, text='Enter First Passenger Name: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)
Label(touristwin, text='Enter Second Passenger Name: ', bg='grey', font=('Arial', 35)).grid(row=3, column=0)
touristname1 = Entry(touristwin)
touristname1.grid(row=2, column=1)
touristname2 = Entry(touristwin)
touristname2.grid(row=3, column=1)
Button(touristwin, text='SUBMIT', command=lambda: ticket2(touristname1, touristname2, touristwin)).grid(row=4, column=1)
def business():
businesswin = Tk()
businesswin.configure(bg='grey')
businesswin.title('Capital Flights')
Label(businesswin, text='Business Customer', bg='grey', font=('Arial', 65)).grid(row=0, column=0)
Label(businesswin, text='Enter your Name: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)
bussinessname = Entry(businesswin)
bussinessname.grid(row=2, column=1)
Button(businesswin, text='SUBMIT', command=lambda : ticket(bussinessname, businesswin)).grid(row=3, column=1)
def manager3(manwin2):
manwin2.destroy()
manwin3 = Tk()
manwin3.title("FLIGHT REPORT")
Label(manwin3, text='TOTAL CUSTOMERS: ', bg='grey').grid(row=0, column=0)
Label(manwin3, text=len(seatstaken)).grid(row=1, column=0)
Label(manwin3, text='AVG SATISFACTION: ', bg='grey').grid(row=2, column=0)
final = sum(satisfaction) / len(satisfaction)
Label(manwin3, text=final).grid(row=3, column=0)
def manager2(manwin):
manwin.destroy()
manwin2 = Tk()
manwin2.configure(bg='grey')
manwin2.title('MANAGER SCREEN')
x=0
y=6
i=1
j=0
Label(manwin2, text='ROW', bg='grey').grid(row=0, column=0)
while i <= 20:
Label(manwin2, text=alph[j]).grid(row=i, column=0)
i += 1
j += 1
i=1
while y <= 120:
Label(manwin2, text=seats[x:y]).grid(row=i, column=1)
x += 6
y += 6
i += 1
Label(manwin2, text='SEATS TAKEN: ', bg='grey').grid(row=0, column=3)
Label(manwin2, text='ROW', bg='grey').grid(row=0, column=0)
Label(manwin2, text=seatstaken).grid(row=2, column=3)
Label(manwin2, text='TOTAL CUSTOMERS: ', bg='grey').grid(row=4, column=3)
Label(manwin2, text=len(seatstaken)).grid(row=6, column=3)
Button(manwin2, text='FINISH FLIGHT // GENERATE REPORT', command=lambda : manager3(manwin2)).grid(row=8, column=3)
def manager():
manwin = Tk()
manwin.title("MANAGER LOGIN")
Label(manwin, text='User Name').grid(row=0, column=0)
username = StringVar()
usernameEntry = Entry(manwin, textvariable=username).grid(row=0, column=1)
Label(manwin, text='Password'). grid(row=1, column=0)
password = StringVar()
passwordEntry = Entry(manwin, textvariable=password, show='*').grid(row=1, column=1)
Button(manwin, text='LOGIN', command=lambda : manager2(manwin)).grid(row=2, column=1)
def customer():
customerwin = Tk()
customerwin.configure(bg='grey')
customerwin.title("Capital Flights")
Label(customerwin, text='Customer Login', bg='grey', font=('Arial', 65)).grid(row=2, column=0)
Button(customerwin, text='Business', command=lambda : business()).grid(row=3, column=0)
Button(customerwin, text='Tourist', command=lambda : tourist()).grid(row=4, column=0)
Button(customerwin, text='Family', command=lambda : family()).grid(row=5, column=0)
def ticket(businessname, businesswin):
ticketwin = Tk()
ticketwin.configure(bg='grey')
ticketwin.title("TICKET")
Label(ticketwin, text='PLANE TICKET', bg='grey', font=('Arial', 65)).grid(row=0, column=0)
Label(ticketwin, text='Name: ', bg='grey', font=('Arial', 35)).grid(row=1, column=0)
Label(ticketwin, text='Row: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)
Label(ticketwin, text='Seat Number: ', bg='grey', font=('Arial', 35)).grid(row=3, column=0)
customers.append(1)
name = businessname.get()
businesswin.destroy()
Label(ticketwin, text=name, bg='grey', font=('Arial', 35)).grid(row=1, column=1)
try:
var = 108
SI = 0
while SI == 0:
if seats[var] not in seatstaken:
seatstaken.append(seats[var])
SI = 1
else:
var += 1
alphI = (var) // 6
row = alph[alphI]
print(row, seats[var])
Label(ticketwin, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)
Label(ticketwin, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)
satisfaction.append(0)
except IndexError:
var = 0
SI = 0
while SI == 0:
if seats[var] not in seatstaken:
seatstaken.append(seats[var])
SI = 1
else:
var += 1
alphI = (var) // 6
row = alph[alphI]
print(row, seats[var])
Label(ticketwin, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)
Label(ticketwin, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)
satisfaction.append(-5)
def ticket2(touristname1, touristname2, touristwin):
ticketwin2 = Tk()
ticketwin2.configure(bg='grey')
ticketwin2.title("TICKET")
Label(ticketwin2, text='PLANE TICKET', bg='grey', font=('Arial', 65)).grid(row=0, column=0)
Label(ticketwin2, text='Name: ', bg='grey', font=('Arial', 35)).grid(row=1, column=0)
Label(ticketwin2, text='Row: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)
Label(ticketwin2, text='Seat Number: ', bg='grey', font=('Arial', 35)).grid(row=3, column=0)
customers.append(1)
name = touristname1.get()
Label(ticketwin2, text=name, bg='grey', font=('Arial', 35)).grid(row=1, column=1)
try:
var = 0
SI = 0
while SI == 0:
if seats[var] not in seatstaken and seats[var] not in bonly:
seatstaken.append(seats[var])
SI = 1
else:
var += 6
alphI = (var) // 6
row = alph[alphI]
print(row, seats[var])
Label(ticketwin2, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)
Label(ticketwin2, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)
satisfaction.append(5)
except IndexError:
var = 5
SI = 0
while SI == 0:
if seats[var] not in seatstaken:
seatstaken.append(seats[var])
SI = 1
else:
var += 1
alphI = (var) // 6
row = alph[alphI]
print(row, seats[var])
Label(ticketwin2, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)
Label(ticketwin2, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)
satisfaction.append(5)
ticketwin3 = Tk()
ticketwin3.configure(bg='grey')
ticketwin3.title("TICKET")
Label(ticketwin3, text='PLANE TICKET', bg='grey', font=('Arial', 65)).grid(row=0, column=0)
Label(ticketwin3, text='Name: ', bg='grey', font=('Arial', 35)).grid(row=1, column=0)
Label(ticketwin3, text='Row: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)
Label(ticketwin3, text='Seat Number: ', bg='grey', font=('Arial', 35)).grid(row=3, column=0)
customers.append(1)
name = touristname2.get()
touristwin.destroy()
Label(ticketwin3, text=name, bg='grey', font=('Arial', 35)).grid(row=1, column=1)
try:
var = 1
SI = 0
while SI == 0:
if seats[var] not in seatstaken and seats[var] not in bonly:
seatstaken.append(seats[var])
SI = 1
else:
var += 6
alphI = (var) // 6
row = alph[alphI]
print(row, seats[var])
Label(ticketwin3, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)
Label(ticketwin3, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)
satisfaction.append(10)
except IndexError:
var = 4
SI = 0
while SI == 0:
if seats[var] not in seatstaken:
seatstaken.append(seats[var])
SI = 1
else:
var += 1
alphI = (var) // 6
row = alph[alphI]
print(row, seats[var])
Label(ticketwin3, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)
Label(ticketwin3, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)
satisfaction.append(10)
def ticket3(numgroup, familyname1, familyname2, familyname3, familyname4, familyname5, familywin):
numgroup = numgroup.get()
if numgroup == '3':
ticketwin4 = Tk()
ticketwin4.configure(bg='grey')
ticketwin4.title("TICKET")
Label(ticketwin4, text='PLANE TICKET', bg='grey', font=('Arial', 65)).grid(row=0, column=0)
Label(ticketwin4, text='Name: ', bg='grey', font=('Arial', 35)).grid(row=1, column=0)
Label(ticketwin4, text='Row: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)
Label(ticketwin4, text='Seat Number: ', bg='grey', font=('Arial', 35)).grid(row=3, column=0)
customers.append(1)
name = familyname1.get()
Label(ticketwin4, text=name, bg='grey', font=('Arial', 35)).grid(row=1, column=1)
try:
var = 2
SI = 0
while SI == 0:
if seats[var] not in seatstaken and seats[var] not in bonly:
seatstaken.append(seats[var])
SI = 1
else:
var += 6
alphI = (var) // 6
row = alph[alphI]
print(row, seats[var])
Label(ticketwin4, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)
Label(ticketwin4, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)
satisfaction.append(5)
except IndexError:
var = 3
SI = 0
while SI == 0:
if seats[var] not in seatstaken:
seatstaken.append(seats[var])
SI = 1
else:
var += 1
alphI = (var) // 6
row = alph[alphI]
print(row, seats[var])
Label(ticketwin4, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)
Label(ticketwin4, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)
satisfaction.append(5)
ticketwin5 = Tk()
ticketwin5.configure(bg='grey')
ticketwin5.title("TICKET")
Label(ticketwin5, text='PLANE TICKET', bg='grey', font=('Arial', 65)).grid(row=0, column=0)
Label(ticketwin5, text='Name: ', bg='grey', font=('Arial', 35)).grid(row=1, column=0)
Label(ticketwin5, text='Row: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)
Label(ticketwin5, text='Seat Number: ', bg='grey', font=('Arial', 35)).grid(row=3, column=0)
customers.append(1)
name2 = familyname2.get()
Label(ticketwin5, text=name2, bg='grey', font=('Arial', 35)).grid(row=1, column=1)
try:
var = 3
SI = 0
while SI == 0:
if seats[var] not in seatstaken and seats[var] not in bonly:
seatstaken.append(seats[var])
SI = 1
else:
var += 6
alphI = (var) // 6
row = alph[alphI]
print(row, seats[var])
Label(ticketwin5, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)
Label(ticketwin5, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)
satisfaction.append(5)
except IndexError:
var = 8
SI = 0
while SI == 0:
if seats[var] not in seatstaken:
seatstaken.append(seats[var])
SI = 1
else:
var += 1
alphI = (var) // 6
row = alph[alphI]
print(row, seats[var])
Label(ticketwin5, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)
Label(ticketwin5, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)
satisfaction.append(5)
ticketwin6 = Tk()
ticketwin6.configure(bg='grey')
ticketwin6.title("TICKET")
Label(ticketwin6, text='PLANE TICKET', bg='grey', font=('Arial', 65)).grid(row=0, column=0)
Label(ticketwin6, text='Name: ', bg='grey', font=('Arial', 35)).grid(row=1, column=0)
Label(ticketwin6, text='Row: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)
Label(ticketwin6, text='Seat Number: ', bg='grey', font=('Arial', 35)).grid(row=3, column=0)
customers.append(1)
name3 = familyname3.get()
familywin.destroy()
Label(ticketwin6, text=name3, bg='grey', font=('Arial', 35)).grid(row=1, column=1)
try:
var = 8
SI = 0
while SI == 0:
if seats[var] not in seatstaken and seats[var] not in bonly:
seatstaken.append(seats[var])
SI = 1
else:
var += 6
alphI = (var) // 6
row = alph[alphI]
print(row, seats[var])
Label(ticketwin6, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)
Label(ticketwin6, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)
except IndexError:
var = 9
SI = 0
while SI == 0:
if seats[var] not in seatstaken:
seatstaken.append(seats[var])
SI = 1
else:
var += 1
alphI = (var) // 6
row = alph[alphI]
print(row, seats[var])
Label(ticketwin6, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)
Label(ticketwin6, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)
satisfaction.append(10)
if numgroup == '4':
ticketwin4 = Tk()
ticketwin4.configure(bg='grey')
ticketwin4.title("TICKET")
Label(ticketwin4, text='PLANE TICKET', bg='grey', font=('Arial', 65)).grid(row=0, column=0)
Label(ticketwin4, text='Name: ', bg='grey', font=('Arial', 35)).grid(row=1, column=0)
Label(ticketwin4, text='Row: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)
Label(ticketwin4, text='Seat Number: ', bg='grey', font=('Arial', 35)).grid(row=3, column=0)
customers.append(1)
name = familyname1.get()
Label(ticketwin4, text=name, bg='grey', font=('Arial', 35)).grid(row=1, column=1)
try:
var = 2
SI = 0
while SI == 0:
if seats[var] not in seatstaken and seats[var] not in bonly:
seatstaken.append(seats[var])
SI = 1
else:
var += 6
alphI = (var) // 6
row = alph[alphI]
print(row, seats[var])
Label(ticketwin4, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)
Label(ticketwin4, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)
satisfaction.append(5)
except IndexError:
var = 3
SI = 0
while SI == 0:
if seats[var] not in seatstaken:
seatstaken.append(seats[var])
SI = 1
else:
var += 1
alphI = (var) // 6
row = alph[alphI]
print(row, seats[var])
Label(ticketwin4, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)
Label(ticketwin4, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)
satisfaction.append(5)
ticketwin5 = Tk()
ticketwin5.configure(bg='grey')
ticketwin5.title("TICKET")
Label(ticketwin5, text='PLANE TICKET', bg='grey', font=('Arial', 65)).grid(row=0, column=0)
Label(ticketwin5, text='Name: ', bg='grey', font=('Arial', 35)).grid(row=1, column=0)
Label(ticketwin5, text='Row: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)
Label(ticketwin5, text='Seat Number: ', bg='grey', font=('Arial', 35)).grid(row=3, column=0)
customers.append(1)
name2 = familyname2.get()
Label(ticketwin5, text=name2, bg='grey', font=('Arial', 35)).grid(row=1, column=1)
try:
var = 3
SI = 0
while SI == 0:
if seats[var] not in seatstaken and seats[var] not in bonly:
seatstaken.append(seats[var])
SI = 1
else:
var += 6
alphI = (var) // 6
row = alph[alphI]
print(row, seats[var])
Label(ticketwin5, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)
Label(ticketwin5, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)
satisfaction.append(5)
except IndexError:
var = 8
SI = 0
while SI == 0:
if seats[var] not in seatstaken:
seatstaken.append(seats[var])
SI = 1
else:
var += 1
alphI = (var) // 6
row = alph[alphI]
print(row, seats[var])
Label(ticketwin5, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)
Label(ticketwin5, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)
satisfaction.append(5)
ticketwin6 = Tk()
ticketwin6.configure(bg='grey')
ticketwin6.title("TICKET")
Label(ticketwin6, text='PLANE TICKET', bg='grey', font=('Arial', 65)).grid(row=0, column=0)
Label(ticketwin6, text='Name: ', bg='grey', font=('Arial', 35)).grid(row=1, column=0)
Label(ticketwin6, text='Row: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)
Label(ticketwin6, text='Seat Number: ', bg='grey', font=('Arial', 35)).grid(row=3, column=0)
customers.append(1)
name3 = familyname3.get()
Label(ticketwin6, text=name3, bg='grey', font=('Arial', 35)).grid(row=1, column=1)
try:
var = 8
SI = 0
while SI == 0:
if seats[var] not in seatstaken and seats[var] not in bonly:
seatstaken.append(seats[var])
SI = 1
else:
var += 6
alphI = (var) // 6
row = alph[alphI]
print(row, seats[var])
Label(ticketwin6, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)
Label(ticketwin6, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)
satisfaction.append(5)
except IndexError:
var = 9
SI = 0
while SI == 0:
if seats[var] not in seatstaken:
seatstaken.append(seats[var])
SI = 1
else:
var += 1
alphI = (var) // 6
row = alph[alphI]
print(row, seats[var])
Label(ticketwin6, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)
Label(ticketwin6, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)
satisfaction.append(5)
ticketwin7 = Tk()
ticketwin7.configure(bg='grey')
ticketwin7.title("TICKET")
Label(ticketwin7, text='PLANE TICKET', bg='grey', font=('Arial', 65)).grid(row=0, column=0)
Label(ticketwin7, text='Name: ', bg='grey', font=('Arial', 35)).grid(row=1, column=0)
Label(ticketwin7, text='Row: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)
Label(ticketwin7, text='Seat Number: ', bg='grey', font=('Arial', 35)).grid(row=3, column=0)
customers.append(1)
name4 = familyname4.get()
familywin.destroy()
Label(ticketwin7, text=name4, bg='grey', font=('Arial', 35)).grid(row=1, column=1)
try:
var = 9
SI = 0
while SI == 0:
if seats[var] not in seatstaken and seats[var] not in bonly:
seatstaken.append(seats[var])
SI = 1
else:
var += 6
alphI = (var) // 6
row = alph[alphI]
print(row, seats[var])
Label(ticketwin7, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)
Label(ticketwin7, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)
satisfaction.append(5)
except IndexError:
var = 14
SI = 0
while SI == 0:
if seats[var] not in seatstaken:
seatstaken.append(seats[var])
SI = 1
else:
var += 1
alphI = (var) // 6
row = alph[alphI]
print(row, seats[var])
Label(ticketwin7, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)
Label(ticketwin7, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)
satisfaction.append(5)
satisfaction.append(10)
if numgroup == '5':
ticketwin4 = Tk()
ticketwin4.configure(bg='grey')
ticketwin4.title("TICKET")
Label(ticketwin4, text='PLANE TICKET', bg='grey', font=('Arial', 65)).grid(row=0, column=0)
Label(ticketwin4, text='Name: ', bg='grey', font=('Arial', 35)).grid(row=1, column=0)
Label(ticketwin4, text='Row: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)
Label(ticketwin4, text='Seat Number: ', bg='grey', font=('Arial', 35)).grid(row=3, column=0)
customers.append(1)
name = familyname1.get()
Label(ticketwin4, text=name, bg='grey', font=('Arial', 35)).grid(row=1, column=1)
try:
var = 2
SI = 0
while SI == 0:
if seats[var] not in seatstaken and seats[var] not in bonly:
seatstaken.append(seats[var])
SI = 1
else:
var += 6
alphI = (var) // 6
row = alph[alphI]
print(row, seats[var])
Label(ticketwin4, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)
Label(ticketwin4, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)
satisfaction.append(5)
except IndexError:
var = 3
SI = 0
while SI == 0:
if seats[var] not in seatstaken:
seatstaken.append(seats[var])
SI = 1
else:
var += 1
alphI = (var) // 6
row = alph[alphI]
print(row, seats[var])
Label(ticketwin4, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)
Label(ticketwin4, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)
satisfaction.append(5)
ticketwin5 = Tk()
ticketwin5.configure(bg='grey')
ticketwin5.title("TICKET")
Label(ticketwin5, text='PLANE TICKET', bg='grey', font=('Arial', 65)).grid(row=0, column=0)
Label(ticketwin5, text='Name: ', bg='grey', font=('Arial', 35)).grid(row=1, column=0)
Label(ticketwin5, text='Row: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)
Label(ticketwin5, text='Seat Number: ', bg='grey', font=('Arial', 35)).grid(row=3, column=0)
customers.append(1)
name2 = familyname2.get()
Label(ticketwin5, text=name2, bg='grey', font=('Arial', 35)).grid(row=1, column=1)
try:
var = 3
SI = 0
while SI == 0:
if seats[var] not in seatstaken and seats[var] not in bonly:
seatstaken.append(seats[var])
SI = 1
else:
var += 6
alphI = (var) // 6
row = alph[alphI]
print(row, seats[var])
Label(ticketwin5, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)
Label(ticketwin5, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)
satisfaction.append(5)
except IndexError:
var = 8
SI = 0
while SI == 0:
if seats[var] not in seatstaken:
seatstaken.append(seats[var])
SI = 1
else:
var += 1
alphI = (var) // 6
row = alph[alphI]
print(row, seats[var])
Label(ticketwin5, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)
Label(ticketwin5, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)
satisfaction.append(5)
ticketwin6 = Tk()
ticketwin6.configure(bg='grey')
ticketwin6.title("TICKET")
Label(ticketwin6, text='PLANE TICKET', bg='grey', font=('Arial', 65)).grid(row=0, column=0)
Label(ticketwin6, text='Name: ', bg='grey', font=('Arial', 35)).grid(row=1, column=0)
Label(ticketwin6, text='Row: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)
Label(ticketwin6, text='Seat Number: ', bg='grey', font=('Arial', 35)).grid(row=3, column=0)
customers.append(1)
name3 = familyname3.get()
Label(ticketwin6, text=name3, bg='grey', font=('Arial', 35)).grid(row=1, column=1)
try:
var = 8
SI = 0
while SI == 0:
if seats[var] not in seatstaken and seats[var] not in bonly:
seatstaken.append(seats[var])
SI = 1
else:
var += 6
alphI = (var) // 6
row = alph[alphI]
print(row, seats[var])
Label(ticketwin6, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)
Label(ticketwin6, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)
satisfaction.append(5)
except IndexError:
var = 9
SI = 0
while SI == 0:
if seats[var] not in seatstaken:
seatstaken.append(seats[var])
SI = 1
else:
var += 1
alphI = (var) // 6
row = alph[alphI]
print(row, seats[var])
Label(ticketwin6, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)
Label(ticketwin6, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)
satisfaction.append(5)
ticketwin7 = Tk()
ticketwin7.configure(bg='grey')
ticketwin7.title("TICKET")
Label(ticketwin7, text='PLANE TICKET', bg='grey', font=('Arial', 65)).grid(row=0, column=0)
Label(ticketwin7, text='Name: ', bg='grey', font=('Arial', 35)).grid(row=1, column=0)
Label(ticketwin7, text='Row: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)
Label(ticketwin7, text='Seat Number: ', bg='grey', font=('Arial', 35)).grid(row=3, column=0)
customers.append(1)
name4 = familyname4.get()
Label(ticketwin7, text=name4, bg='grey', font=('Arial', 35)).grid(row=1, column=1)
try:
var = 9
SI = 0
while SI == 0:
if seats[var] not in seatstaken and seats[var] not in bonly:
seatstaken.append(seats[var])
SI = 1
else:
var += 6
alphI = (var) // 6
row = alph[alphI]
print(row, seats[var])
Label(ticketwin7, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)
Label(ticketwin7, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)
satisfaction.append(5)
except IndexError:
var = 14
SI = 0
while SI == 0:
if seats[var] not in seatstaken:
seatstaken.append(seats[var])
SI = 1
else:
var += 1
alphI = (var) // 6
row = alph[alphI]
print(row, seats[var])
Label(ticketwin7, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)
Label(ticketwin7, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)
satisfaction.append(5)
ticketwin8 = Tk()
ticketwin8.configure(bg='grey')
ticketwin8.title("TICKET")
Label(ticketwin8, text='PLANE TICKET', bg='grey', font=('Arial', 65)).grid(row=0, column=0)
Label(ticketwin8, text='Name: ', bg='grey', font=('Arial', 35)).grid(row=1, column=0)
Label(ticketwin8, text='Row: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)
Label(ticketwin8, text='Seat Number: ', bg='grey', font=('Arial', 35)).grid(row=3, column=0)
customers.append(1)
name5 = familyname5.get()
familywin.destroy()
Label(ticketwin8, text=name5, bg='grey', font=('Arial', 35)).grid(row=1, column=1)
try:
var = 13
SI = 0
while SI == 0:
if seats[var] not in seatstaken and seats[var] not in bonly:
seatstaken.append(seats[var])
SI = 1
else:
var += 6
alphI = (var) // 6
row = alph[alphI]
print(row, seats[var])
Label(ticketwin8, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)
Label(ticketwin8, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)
except IndexError:
var = 14
SI = 0
while SI == 0:
if seats[var] not in seatstaken:
seatstaken.append(seats[var])
SI = 1
else:
var += 1
alphI = (var) // 6
row = alph[alphI]
print(row, seats[var])
Label(ticketwin8, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)
Label(ticketwin8, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)
satisfaction.append(10)
mainroot = Tk()
mainroot.configure(bg='grey')
mainroot.title("Capital Flights")
Label(mainroot, text='Capital Flights', bg='grey', font=('Arial', 80)).grid(row=2, column=0)
Button(mainroot, text='Customer', command=lambda : customer()).grid(row=3, column=0)
Button(mainroot, text='Manager', command=lambda : manager()).grid(row=6, column=0)
mainroot.mainloop() | Python | 748 | 43.775402 | 159 | /CapitalAirlines.py | 0.529247 | 0.492849 |
sudheermouni/NeckTie | refs/heads/main | from django.db import models
from .doctors import Doctors
from .patients import Patient
class PatentDoctorTb(models.Model):
'''
we can add extra fields here
'''
doctor = models.ForeignKey(Doctors, blank=False, null=False, on_delete=models.CASCADE)
patient = models.ForeignKey(Patient, blank=False, null=False, on_delete=models.CASCADE)
| Python | 13 | 26.692308 | 91 | /Necktie/necktieapp/models/patent_doctorTb.py | 0.736111 | 0.736111 |
sudheermouni/NeckTie | refs/heads/main | # Generated by Django 3.2.8 on 2021-10-28 06:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('necktieapp', '0005_auto_20211028_1129'),
]
operations = [
migrations.AlterField(
model_name='patient',
name='doctor',
field=models.ManyToManyField(blank=True, null=True, through='necktieapp.PatentDoctorTb', to='necktieapp.Doctors'),
),
]
| Python | 18 | 24.888889 | 126 | /Necktie/necktieapp/migrations/0006_alter_patient_doctor.py | 0.626609 | 0.560086 |
sudheermouni/NeckTie | refs/heads/main | # Generated by Django 3.2.8 on 2021-10-27 16:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('necktieapp', '0002_alter_doctors_d_phone'),
]
operations = [
migrations.CreateModel(
name='Patient',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('p_surname', models.CharField(blank=True, max_length=20, null=True)),
('p_fullname', models.CharField(blank=True, max_length=20, null=True)),
('p_username', models.CharField(max_length=40)),
('p_phone', models.CharField(blank=True, max_length=10, null=True)),
('p_country', models.CharField(blank=True, max_length=50, null=True)),
('p_state', models.CharField(blank=True, max_length=50, null=True)),
('doctor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='necktieapp.doctors')),
],
),
]
| Python | 27 | 40.148148 | 117 | /Necktie/necktieapp/migrations/0003_patient.py | 0.59586 | 0.567957 |
sudheermouni/NeckTie | refs/heads/main | from django.db import models
from model_utils import Choices
SPECIALIZATIONS = Choices(
("CD", "Cardiology"),
("GS", "General Surgery"),
("EC", "Endocrinology"),
("NT", "Neonatology"),
)
class Doctors(models.Model):
d_surname = models.CharField(max_length=20, blank=True, null=True)
d_firstname = models.CharField(max_length=20, blank=True, null=True)
d_username = models.CharField(max_length=40, blank=False, null=False, unique=True)
d_phone = models.CharField(max_length=10, blank=True, null=True)
d_address = models.TextField(blank=True, null=True)
d_country = models.CharField(max_length=30)
d_specialization = models.CharField(
choices=SPECIALIZATIONS,
max_length=4,
blank=False,
null=False,
)
d_pincode = models.IntegerField()
def __str__(self):
return self.d_username
| Python | 28 | 30.25 | 86 | /Necktie/necktieapp/models/doctors.py | 0.661714 | 0.649143 |
sudheermouni/NeckTie | refs/heads/main | from .doctor_view import DoctorViewset # noqa: F401
from .patient_view import PatientViewset # noqa: F401 | Python | 2 | 53 | 54 | /Necktie/necktieapp/views/__init__.py | 0.785047 | 0.728972 |
sudheermouni/NeckTie | refs/heads/main | # Generated by Django 3.2.8 on 2021-10-27 16:56
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('necktieapp', '0003_patient'),
]
operations = [
migrations.RemoveField(
model_name='patient',
name='doctor',
),
migrations.CreateModel(
name='PatentDoctorTb',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('doctor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='necktieapp.doctors')),
('patient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='necktieapp.patient')),
],
),
migrations.AddField(
model_name='patient',
name='doctor',
field=models.ManyToManyField(through='necktieapp.PatentDoctorTb', to='necktieapp.Doctors'),
),
]
| Python | 31 | 32.354839 | 117 | /Necktie/necktieapp/migrations/0004_auto_20211027_2226.py | 0.596712 | 0.578337 |
sudheermouni/NeckTie | refs/heads/main | # Generated by Django 3.2.8 on 2021-10-28 05:59
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('necktieapp', '0004_auto_20211027_2226'),
]
operations = [
migrations.RenameField(
model_name='doctors',
old_name='d_state',
new_name='d_specialization',
),
migrations.RenameField(
model_name='doctors',
old_name='d_surnam',
new_name='d_surname',
),
]
| Python | 23 | 21.695652 | 50 | /Necktie/necktieapp/migrations/0005_auto_20211028_1129.py | 0.545977 | 0.48659 |
sudheermouni/NeckTie | refs/heads/main | from .doctor_serializer import DoctorSerializer
from .patient_serializer import PatientSerializer | Python | 2 | 48 | 49 | /Necktie/necktieapp/serializers/__init__.py | 0.886598 | 0.886598 |
sudheermouni/NeckTie | refs/heads/main | # Generated by Django 3.2.8 on 2021-10-27 16:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('necktieapp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='doctors',
name='d_phone',
field=models.CharField(blank=True, max_length=10, null=True),
),
]
| Python | 18 | 21.388889 | 73 | /Necktie/necktieapp/migrations/0002_alter_doctors_d_phone.py | 0.590571 | 0.538462 |
sudheermouni/NeckTie | refs/heads/main | from django.db import models
from .doctors import Doctors
class Patient(models.Model):
p_surname = models.CharField(max_length=20, blank=True, null=True)
doctor = models.ManyToManyField(Doctors, through="PatentDoctorTb", null=True, blank=True)
p_fullname = models.CharField(max_length=20, blank=True, null=True)
p_username = models.CharField(max_length=40, blank=False, null=False)
p_phone = models.CharField(max_length=10, blank=True, null=True)
p_country = models.CharField(max_length=50, blank=True, null=True)
p_state = models.CharField(max_length=50, blank=True, null=True)
def __str__(self):
return self.p_username | Python | 16 | 40.5625 | 93 | /Necktie/necktieapp/models/patients.py | 0.71988 | 0.701807 |
sudheermouni/NeckTie | refs/heads/main | from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import viewsets, filters
from rest_framework.permissions import IsAuthenticated
from necktieapp.models import Patient
from necktieapp.serializers import PatientSerializer
class PatientViewset(viewsets.ModelViewSet):
permission_classes = (IsAuthenticated,)
queryset = Patient.objects.all()
serializer_class = PatientSerializer
filter_backends = [DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter]
filterset_fields = ['id', 'p_surname', 'p_username']
search_fields = ['id', 'p_surname', 'p_username']
ordering_fields = ['id', 'p_surname', 'p_username']
| Python | 16 | 41.5 | 89 | /Necktie/necktieapp/views/patient_view.py | 0.767647 | 0.767647 |
sudheermouni/NeckTie | refs/heads/main | from django.contrib import admin
from .models import Doctors, Patient, PatentDoctorTb
admin.site.register(Doctors)
admin.site.register(Patient)
admin.site.register(PatentDoctorTb)
| Python | 7 | 25 | 52 | /Necktie/necktieapp/admin.py | 0.82967 | 0.82967 |
sudheermouni/NeckTie | refs/heads/main | from rest_framework.routers import DefaultRouter
from django.conf.urls import url, include
from necktieapp import views
router = DefaultRouter(trailing_slash=False)
router.register(r'doctors', views.DoctorViewset)
router.register(r'patients', views.PatientViewset)
urlpatterns = [
url(r'^v1/', include(router.urls)),
] | Python | 12 | 26.166666 | 50 | /Necktie/necktieapp/urls.py | 0.784615 | 0.781538 |
sudheermouni/NeckTie | refs/heads/main | from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import viewsets, filters
from rest_framework.permissions import IsAuthenticated
from necktieapp.models import Doctors
from necktieapp.serializers import DoctorSerializer
class DoctorViewset(viewsets.ModelViewSet):
permission_classes = (IsAuthenticated,)
queryset = Doctors.objects.all()
serializer_class = DoctorSerializer
filter_backends = [DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter]
filterset_fields = ['id', 'd_specialization', 'd_username']
search_fields = ['id', 'd_specialization', 'd_username']
ordering_fields = ['id', 'd_specialization', 'd_username']
| Python | 16 | 42.625 | 89 | /Necktie/necktieapp/views/doctor_view.py | 0.773639 | 0.773639 |
sudheermouni/NeckTie | refs/heads/main | import random
import string
from django.core.management.base import BaseCommand
from django.utils.crypto import get_random_string
from necktieapp.models import Doctors
sample_data = {
'd_surname': get_random_string(),
'd_firstname': get_random_string(),
'd_username': "",
'd_phone': get_random_string(),
'd_address': get_random_string(),
'd_country': get_random_string(),
'd_specialization': "CD",
'd_pincode': 524101,
}
class Command(BaseCommand):
help = 'Create random doctors'
def add_arguments(self, parser):
parser.add_argument('total', type=int, help='Indicates the number of users to be created')
def handle(self, *args, **kwargs):
total = kwargs['total']
list_instances = []
Doctors.objects.all().delete()
for i in range(total):
sample_data['d_username'] = ''.join(random.choices(string.ascii_uppercase + string.digits, k=8))
list_instances.append(Doctors(**sample_data))
Doctors.objects.bulk_create(list_instances)
| Python | 35 | 28.942858 | 108 | /Necktie/necktieapp/management/commands/bulk_create.py | 0.651718 | 0.645038 |
sudheermouni/NeckTie | refs/heads/main | from django.test import TestCase, TransactionTestCase
from necktieapp.models import Doctors
sample_data = {
'd_surname': "sudheer",
'd_firstname': "mandi",
'd_username': "smre",
'd_phone': "7702231789",
'd_address': "Ramalingapuram",
'd_country': "India",
'd_specialization': "CD",
'd_pincode': 524101,
}
class TestDoctor(TransactionTestCase):
fixtures = ["doctors.json"]
def test_create_new_record(self):
model_instance = Doctors.objects.create(**sample_data)
self.assertIsInstance(model_instance, Doctors)
self.assertEqual(model_instance.d_username, "smre")
def test_update_record(self):
instance = Doctors.objects.get(id=1)
instance.d_phone = "9177935906"
instance.save()
self.assertEqual(instance.d_phone, "9177935906")
def test_should_not_save_duplicate_username(self):
before_count = Doctors.objects.count()
sample_data["d_username"] = "smreddy"
try:
Doctors.objects.create(**sample_data)
except Exception as e:
after_count = Doctors.objects.count()
self.assertEqual(before_count, after_count)
| Python | 37 | 30.837837 | 62 | /Necktie/necktieapp/tests/test_doctors.py | 0.64207 | 0.610687 |
sudheermouni/NeckTie | refs/heads/main | from rest_framework import serializers
from necktieapp.models import Doctors
class DoctorSerializer(serializers.ModelSerializer):
class Meta:
model = Doctors
fields = "__all__" | Python | 7 | 27.285715 | 52 | /Necktie/necktieapp/serializers/doctor_serializer.py | 0.730964 | 0.730964 |
sudheermouni/NeckTie | refs/heads/main | # Generated by Django 3.2.8 on 2021-10-27 15:55
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Doctors',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('d_surnam', models.CharField(blank=True, max_length=20, null=True)),
('d_firstname', models.CharField(blank=True, max_length=20, null=True)),
('d_username', models.CharField(max_length=40, unique=True)),
('d_phone', models.IntegerField(blank=True, null=True)),
('d_address', models.TextField(blank=True, null=True)),
('d_country', models.CharField(max_length=30)),
('d_state', models.CharField(choices=[('CD', 'Cardiology'), ('GS', 'General Surgery'), ('EC', 'Endocrinology'), ('NT', 'Neonatology')], max_length=4)),
('d_pincode', models.IntegerField()),
],
),
]
| Python | 28 | 38.785713 | 167 | /Necktie/necktieapp/migrations/0001_initial.py | 0.566427 | 0.544883 |
sudheermouni/NeckTie | refs/heads/main | from django.apps import AppConfig
class NecktieappConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'necktieapp'
| Python | 6 | 24.333334 | 56 | /Necktie/necktieapp/apps.py | 0.756579 | 0.756579 |
sudheermouni/NeckTie | refs/heads/main | from .doctors import Doctors
from .patients import Patient
from .patent_doctorTb import PatentDoctorTb
| Python | 3 | 33.333332 | 43 | /Necktie/necktieapp/models/__init__.py | 0.84466 | 0.84466 |
AlexsandroMO/Bitcoin | refs/heads/master | import pandas as pd
import pandasql as pdsql
import sqlite3
from datetime import date
from datetime import datetime
import CreateTable_SQL
#--------------------------------------------
#Adicionar dados no banco - VarBitcoin
def add_var_bitcoin(btc_last,btc_buy,btc_sell,date_btc):
# CreateTable_SQL.create_VarBTC()
# CreateTable_SQL.create_Wallet()
conn = sqlite3.connect('DB/DB_COINS.db')
c = conn.cursor()
qsl_datas = f"""INSERT INTO VARBTC(VAR_BTC_LAST,VAR_BTC_BUY,VAR_BTC_SELL,VAR_BTC_DATE)
VALUES ({btc_last},{btc_buy},{btc_sell},'{date_btc}');
"""
c.execute(qsl_datas)
conn.commit()
conn.close()
def add_var_wallet(my_wallet_control,profit,date_today):
# CreateTable_SQL.create_VarBTC()
# CreateTable_SQL.create_Wallet()
conn = sqlite3.connect('DB/DB_COINS.db')
c = conn.cursor()
qsl_datas = f"""INSERT INTO WALLET(VAR_WALLET,WIN_LOSE,DATE_NEGOCIATION)
VALUES ({my_wallet_control}, {profit},'{date_today}');
"""
c.execute(qsl_datas)
conn.commit()
conn.close()
def add_var_wallet_start(wallet,win_lose,date_today):
# CreateTable_SQL.create_VarBTC()
# CreateTable_SQL.create_Wallet()
conn = sqlite3.connect('DB/DB_COINS.db')
c = conn.cursor()
qsl_datas = f"""INSERT INTO COINCOIN(VAR_WALLET,WIN_LOSE,DATE_NEGOCIATION)
VALUES ({wallet},{win_lose},'{date_today}');
"""
c.execute(qsl_datas)
conn.commit()
conn.close()
| Python | 61 | 22.622952 | 88 | /Write_SQL.py | 0.622503 | 0.61984 |
AlexsandroMO/Bitcoin | refs/heads/master |
from django.contrib import admin
from .models import TypeWallet, MYWallet
class ListaMYWallet(admin.ModelAdmin):
list_display = ('name_wallet','var_wallet','type_wallet','log_create')
admin.site.register(TypeWallet)
admin.site.register(MYWallet, ListaMYWallet)
| Python | 9 | 28.777779 | 74 | /coin/admin.py | 0.749104 | 0.749104 |
AlexsandroMO/Bitcoin | refs/heads/master | import pandas as pd
import pandasql as pdsql
import sqlite3
from datetime import date
from datetime import datetime
import os
def verify():
directory = 'DB'
dir = directory
if not os.path.exists(directory):
os.makedirs(dir)
#-------------------------------------------------------
#Criar Tabela VARBITCOIN
def create_VarBTC():
verify()
conn = sqlite3.connect('DB/DB_COINS.db')
c = conn.cursor()
table_createdb = f"""
CREATE TABLE IF NOT EXISTS VARBTC (
ID INTEGER PRIMARY KEY,
VAR_BTC_LAST DOUBLE NOT NULL,
VAR_BTC_BUY DOUBLE NOT NULL,
VAR_BTC_SELL DOUBLE NOT NULL,
VAR_BTC_DATE DATE NOT NULL
)
"""
c.execute(table_createdb)
conn.commit()
conn.close()
#-------------------------------------------------------
#Criar Tabela Wallet
def create_Wallet():
verify()
conn = sqlite3.connect('DB/DB_COINS.db')
c = conn.cursor()
table_createdb = f"""
CREATE TABLE IF NOT EXISTS COINCOIN (
ID INTEGER PRIMARY KEY,
VAR_WALLET DOUBLE NOT NULL,
WIN_LOSE DOUBLE NOT NULL,
DATE_NEGOCIATION DATE NOT NULL
)
"""
c.execute(table_createdb)
conn.commit()
conn.close()
#-------------------------
#db = TinyDB('db.json')
#Ft = Query() | Python | 70 | 16.914286 | 56 | /CreateTable_SQL.py | 0.543873 | 0.541604 |
AlexsandroMO/Bitcoin | refs/heads/master | import pandas as pd
import pandasql as pdsql
import sqlite3
from datetime import date
from datetime import datetime
def read_sql_btc():
conn = sqlite3.connect('DB/DB_COINS.db')
sql_datas = f"""
SELECT * FROM VARBTC;
"""
read_db = pd.read_sql_query(sql_datas, conn)
conn.close()
return read_db
def read_sql_wallet():
conn = sqlite3.connect('DB/DB_COINS.db')
sql_datas = f"""
SELECT * FROM COINCOIN;
"""
read_db = pd.read_sql_query(sql_datas, conn)
conn.close()
return read_db | Python | 33 | 15.69697 | 46 | /Read_SQL.py | 0.589347 | 0.584192 |
AlexsandroMO/Bitcoin | refs/heads/master | from django.test import TestCase
# pip install django-crispy-forms
'''Upload documents on Github
git clone <nome>
<entra na pasta criada>
git add .
git commit -m "texto"
git push
''' | Python | 17 | 10.235294 | 33 | /coin/tests.py | 0.660194 | 0.660194 |
kawa-kokosowa/urlink | refs/heads/master | # builtin
import datetime
# 3rd party
import flask_sqlalchemy
import flask_user
db = flask_sqlalchemy.SQLAlchemy()
class User(db.Model, flask_user.UserMixin):
"""Generic User data model for flask_user as seen
in their documentation.
http://pythonhosted.org/Flask-User/basic_app.html
"""
id = db.Column(db.Integer, primary_key=True)
# User authentication information
password = db.Column(db.String(255), nullable=False, server_default='')
reset_password_token = db.Column(db.String(100), nullable=False, server_default='')
# User email information
email = db.Column(db.String(255), nullable=False, unique=True)
confirmed_at = db.Column(db.DateTime())
# User information
active = db.Column('is_active', db.Boolean(), nullable=False, server_default='0')
first_name = db.Column(db.String(100), nullable=False, server_default='')
last_name = db.Column(db.String(100), nullable=False, server_default='')
class Url(db.Model):
"""A URL belonging to a user, accompanied by a description
of 140 characters or less.
Belongs to /urls/x
"""
__tablename__ = 'urls'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey(User.id)) # should never be null :o
created = db.Column(db.DateTime, default=datetime.datetime.utcnow)
url = db.Column(db.String()) # should neve rbe null :o
description = db.Column(db.String(140))
title = db.Column(db.String())
content_type = db.Column(db.String()) # isn't this a certain number of bytes max? should b required
user = db.relationship('User', foreign_keys='Url.user_id', lazy='subquery')
def __init__(self, user_id, url, description, content_type=None, title=None):
self.user_id = user_id
self.url = url
self.description = description
# these are derived from util.fetch_searchable_data()
self.title = title
self.content_type = content_type
def __repr__(self):
return '<URL #%s %s (%s)>' % (self.id, self.title, self.url)
def to_dict(self):
"""Create a dictionary representing this URL.
Returns:
dict: contains the id, url, and description of
this URL.
"""
data_to_return = {
'id': self.id,
# TODO:
# 'created': self.created,
'url': self.url,
'description': self.description,
'title': self.title,
'content_type': self.content_type,
}
return data_to_return
| Python | 83 | 30.084337 | 104 | /models.py | 0.626744 | 0.617829 |
kawa-kokosowa/urlink | refs/heads/master | """urlink Flask App
"""
# builtin
import os
# local
import models
import config
import urlhelper
# 3rd party/pip
import flask
import flask_mail
import flask_user
import flask_login
import flask_script
import flask_migrate
import sqlalchemy
import wtforms
# flask app setup
app = flask.Flask(__name__)
app.config.from_object(config)
migrate = flask_migrate.Migrate(app, models.db)
manager = flask_migrate.Manager(app)
manager.add_command('db', flask_migrate.MigrateCommand)
models.db.init_app(app) # ???
# flask user
mail = flask_mail.Mail(app)
db_adapter = flask_user.SQLAlchemyAdapter(models.db, models.User)
user_manager = flask_user.UserManager(db_adapter, app)
class AddUrlForm(wtforms.Form):
"""Validation and fields for the form/page which allows a user
to save/add a URL/link.
"""
url = wtforms.StringField(
'url',
[wtforms.validators.URL(require_tld=True),],
render_kw={
"placeholder": "URL/Link",
"class": "form-control input-lg",
"id": "url",
"autofocus": True
},
)
description = wtforms.TextAreaField(
'description',
[wtforms.validators.Length(max=140),],
render_kw={
"placeholder": "Description/about URL",
"class": "form-control input-lg",
"id": "description",
"maxlength": 140,
},
)
class SearchForm(wtforms.Form):
"""For live searching/filtering the bookmarks.
Uses the /autocomplete endpoint (see: autocomplete()).
"""
autocomp = wtforms.TextField('autocomp', id='autocomplete')
# TODO: newest first.
@app.route('/')
def home_page():
"""Rendered Jinja/HTML page for live-searching bookmarks.
Form on this page can use normal form submission, however,
this page includes jQuery which implements the live-searching
feature, it updates the page with values from `/autocomplete`,
i.e., autocomplete().
If the user isn't logged in, they are redirected to the about page.
"""
if flask_login.current_user.is_authenticated:
# this form doesn't need validating
search_form = SearchForm(flask.request.form)
# if we have at least search term, the user has GET'd search form
search_term = flask.request.args.get('term')
search_type = flask.request.args.get('type')
if search_term:
urls = url_search(search_term, search_type=search_type)
else:
urls = models.Url.query.filter_by(
user=flask_login.current_user
).all()
content_types = set([url.content_type for url in urls if url.content_type])
return flask.render_template(
"ur_links.html",
search_form=search_form,
urls=urls,
options=content_types,
)
else:
return flask.render_template("landing.html")
def url_search(search_term, search_type=None):
if search_type:
search_results = models.Url.query.filter(
models.Url.user_id == flask_login.current_user.id,
sqlalchemy.or_(
models.Url.url.ilike("%" + search_term + "%"),
models.Url.description.ilike("%" + search_term + "%"),
),
models.Url.content_type == search_type,
)
else:
search_results = models.Url.query.filter(
models.Url.user_id == flask_login.current_user.id,
sqlalchemy.or_(
models.Url.url.ilike("%" + search_term + "%"),
models.Url.description.ilike("%" + search_term + "%"),
),
)
return search_results
@app.route('/autocomplete', methods=['GET'])
@flask_user.login_required
def autocomplete():
"""Provides JSON response of URLs where
the search term is in the description.
Query for URLs owned by the current user, whose descriptions
in the database contain `term`.
Returns:
json: A list of dictionaries describing each
matching URL.
"""
search_term = flask.request.args.get('term')
search_type = flask.request.args.get('type')
urls = url_search(search_term, search_type=search_type)
urls = [url.to_dict() for url in urls]
return flask.jsonify(urls)
@app.route('/urls/add', methods=['POST', 'GET'])
@flask_user.login_required
def add_url():
"""Process and provide the form for adding a new URL to the
current user's urls.
"""
form = AddUrlForm(flask.request.form)
# Either process the form from POST or show the form.
if flask.request.method == 'POST' and form.validate():
# There's no reason to prevent the URL from being created
# using the POST'd information. Create and show the URL.
url = flask.request.form['url']
searchable_data = urlhelper.fetch_searchable_data(url)
new_url = models.Url(
user_id=flask_login.current_user.id,
url=url,
description=flask.request.form['description'],
**searchable_data,
)
models.db.session.add(new_url)
models.db.session.commit()
return flask.redirect(flask.url_for('home_page'))
else:
return flask.render_template("add_url.html", form=form)
# Create the database
if __name__=='__main__':
manager.run()
| Python | 193 | 26.699482 | 83 | /app.py | 0.618032 | 0.616723 |
kawa-kokosowa/urlink | refs/heads/master | """empty message
Revision ID: a77719286100
Revises: ae0cb4fef303
Create Date: 2016-10-03 13:03:02.448316
"""
# revision identifiers, used by Alembic.
revision = 'a77719286100'
down_revision = 'ae0cb4fef303'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('urls', sa.Column('title', sa.String(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('urls', 'title')
### end Alembic commands ###
| Python | 26 | 21.461538 | 73 | /migrations/versions/a77719286100_.py | 0.669521 | 0.597603 |
kawa-kokosowa/urlink | refs/heads/master | """Get as much info as possible about a URL.
"""
import mimetypes
import requests
import bs4
MAXIMUM_REDIRECTS = 4
FIELDS = [
{
'name': 'title',
'soup_find': ('title', {}),
},
]
_session = requests.Session()
class MaxRedirectError(Exception):
def __init__(self):
self.message = (
"Head request redirected %d times (max is %d)"
% (MAXIMUM_REDIRECTS + 1, MAXIMUM_REDIRECTS)
)
class HttpError(Exception):
def __init__(self, status_code):
self.status_code = status_code
self.message = "Encountered HTTP error %d" % status_code
def head_until_no_redirect(url, maximum_redirects=MAXIMUM_REDIRECTS):
"""Keep fetching the redirect URL until 200 (not 301) or fail.
Return:
url, Response:
None:
"""
if maximum_redirects:
response = _session.head(url)
if response.status_code == 301:
maximum_redirects -= 1
return head_until_no_redirect(
response.headers['Location'],
maximum_redirects
)
elif response.status_code == 200:
return url, response
else:
raise HttpError(response.status_code)
# maximum redirects is 0; we recursively reached the end
else:
raise MaxRedirectError()
def searchable_data_from_soup(soup):
tags_to_return = {}
for field in FIELDS:
arg, kwargs = field['soup_find']
found_tag = soup.find(arg, **kwargs)
if found_tag:
tags_to_return[field['name']] = found_tag.text
return tags_to_return
# TODO: this docstring sucks, also clean all of this up
def fetch_searchable_data(url):
"""Fetch the title and meta tags of a remote
HTML document, or fail and return None.
May be expanded in the future.
Note:
does note check file extension for mimetype first, becuase more
searchable data is hoped for than simply content_type
Arguments:
url (str): ---
Returns:
dict: Dictionary of searchable data...
"""
searchable_data = {}
# Try to get the HTTP header for this resource. This may fail
# so as a last-ditch effort try to get a type from the URL's
# file extension.
# first try file extension, if can't tell type then determine with head...
# once you can get first x bytes for <head> info (meta, title, etc).
try:
# note that the new url is the final url we were directed to
url, head_response = head_until_no_redirect(url)
except (HttpError, MaxRedirectError) as e:
# we can at least try to guess the mimetype from file extension
mimetype = mimetypes.guess_type(url)
return {"content_type": mimetype[0]} if mimetype else None
# Determine resource's type from the 'Content-Type' HTTP header.
headers_from_url = head_response.headers
content_type = headers_from_url['Content-Type'].split(';', 1)[0]
# TODO: should be able to handle extrapolating meta
# from images, PDFs, music, etc.
#
# Bail if we can't extrapolate any further information
# about this Content-Type (because beyond here we are just
# extrapolating HTML information).
if content_type != "text/html":
return {"content_type": content_type}
# ...now we know the content_type is text/html!
searchable_data['content_type'] = "text/html"
# First try to only request the first 400 bytes to get all of the
# desired tags (which will be used to create searchable data).
#
# If this fails we request bytes 401 onward and combine,
# extrapolating what we can
response = _session.get(url, headers={'Range': 'bytes=0-400'})
soup = bs4.BeautifulSoup(response.text, 'html.parser')
more_searchable_data = searchable_data_from_soup(soup)
# we couldn't find all of the tags we wanted in
# the first 400 bytes of the response
if not len(more_searchable_data) == len(FIELDS):
# Store the old response text so we can skip getting it again
old_response_text = response.text
# Get the full page, but skip the part we already have (skip the
# first 400 bytes), combining this new part with
# the old_response_text!
# FIXME: could be stream of data! Set an upper limit on bytes range!
new_response = _session.get(url, headers={'Range': 'bytes=401-'})
soup = bs4.BeautifulSoup(old_response_text + new_response.text, 'html.parser')
searchable_data.update(searchable_data_from_soup(soup))
return searchable_data
| Python | 149 | 29.852348 | 86 | /urlhelper.py | 0.63759 | 0.628671 |
kawa-kokosowa/urlink | refs/heads/master | """Really sloppy configuration that will be overhauled
to include environment-specific configs (develop, test, production).
Mostly due to a Heroku headache.
"""
import os
DEBUG = False
TESTING = False
SECRET_KEY = os.getenv('SECRET_KEY')
SQLALCHEMY_DATABASE_URI = os.getenv(
'SQLALCHEMY_DATABASE_URI', # if not this... then below
os.getenv('DATABASE_URL', 'sqlite:////tmp/debug.db'), # heroku
)
# flask-user
USER_ENABLE_USERNAME = False
USER_ENABLE_CHANGE_USERNAME = False
# flask-mail settings for flask-user
# (email confirmation, password reset)
# setup for gmail by default
# NOTE, FIXME, TODO: the only reason this is false
# is because gmail isn't allowing me use their smtp
# anymore!
USER_ENABLE_CONFIRM_EMAIL = False
# this email stuff is all moot because of above note
# will renable once have smtp service
"""
MAIL_USERNAME = os.getenv('MAIL_USERNAME') # example@gmail.com
MAIL_PASSWORD = os.getenv('MAIL_PASSWORD')
MAIL_DEFAULT_SENDER = '"urlink" <noreply@urlink.link>'
MAIL_SERVER = os.getenv('MAIL_SERVER')
MAIL_PORT = int(os.getenv('MAIL_PORT'))
MAIL_USE_SSL = True
MAIL_USE_TLS = True
"""
# this is used by email:
USER_APP_NAME = 'urlink'
| Python | 42 | 26.404762 | 68 | /config.py | 0.721112 | 0.721112 |
kawa-kokosowa/urlink | refs/heads/master | import unittest
import os
import tempfile
import app
class UrlinkTestCase(unittest.TestCase):
def setUp(self):
"""Deploy the test DB (sqlite).
"""
self.db_handle, app.app.config['DATABASE'] = tempfile.mkstemp()
self.app = app.app.test_client()
with app.app.app_context():
app.init_db() # nope
def tearDown(self):
"""Delete the test DB (sqlite).
"""
os.close(self.db_fd)
os.unlink(app.app.config['DATABASE'])
if __name__ == '__main__':
unittest.main()
| Python | 31 | 17.032259 | 71 | /tests.py | 0.567084 | 0.567084 |
nakulrathore/Machine-Learning-Projects-ud120 | refs/heads/master | #!/usr/bin/python
def outlierCleaner(predictions, ages, net_worths):
"""
clean away the 10% of points that have the largest
residual errors (different between the prediction
and the actual net worth)
return a list of tuples named cleaned_data where
each tuple is of the form (age, net_worth, error)
"""
### your code goes here
errors = abs(predictions - net_worths)
#print predictions[0][0]
#print net_worths[0][0]
#print errors[0][0]
#using zip
not_cleaned_data = zip(ages,net_worths,errors)
#print cleaned_data
#sorting ,ref: http://stackoverflow.com/questions/13669252/what-is-key-lambda
not_cleaned_data.sort(key=lambda tup: tup[2])
#print not_cleaned_data
#keeping only 90% data means, 0.9*lenth of net_worths
cleaned_data = not_cleaned_data[:int(len(net_worths)*0.9)]
#print cleaned_data
return cleaned_data | Python | 28 | 32.392857 | 81 | /outliers/outlier_cleaner.py | 0.638918 | 0.614984 |
nakulrathore/Machine-Learning-Projects-ud120 | refs/heads/master | #!/usr/bin/python
import pickle
import sys
import matplotlib.pyplot
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
### read in data dictionary, convert to numpy array
data_dict = pickle.load( open("../final_project/final_project_dataset.pkl", "r") )
features = ["salary", "bonus"]
data_dict.pop('TOTAL',0)
data = featureFormat(data_dict, features)
#print data[0]
### your code below
for point in data:
salary = point[0]
bonus = point[1]
matplotlib.pyplot.scatter( salary, bonus )
matplotlib.pyplot.xlabel("salary")
matplotlib.pyplot.ylabel("bonus")
matplotlib.pyplot.show()
len_data = len(data_dict)
#print data_dict.items()[0][1]['salary']
print "__________________________________________________\n\n"
print "biggest enron outlier, , comment line:13 for this"
temp = 0
name = ""
for i in range(0, len_data):
if data_dict.items()[i][1]['bonus'] > temp and data_dict.items()[i][1]['bonus'] != 'NaN':
temp = data_dict.items()[i][1]['bonus']
name = data_dict.items()[i][0]
print temp
print "biggest enron outlier is :", name,"\n"
print "__________________________________________________\n\n"
print "more enron outliers, , un-comment line:13 for this\n"
print "serching for some other outliers in data....\n"
for i in range(0, len_data):
if data_dict.items()[i][1]['bonus'] > 5000000 and data_dict.items()[i][1]['salary'] > 1000000 and data_dict.items()[i][1]['bonus'] != 'NaN':
temp = data_dict.items()[i][1]['bonus']
name = data_dict.items()[i][0]
print temp
print name,"\n" | Python | 51 | 30.294117 | 144 | /outliers/enron_outliers.py | 0.594529 | 0.572644 |
nakulrathore/Machine-Learning-Projects-ud120 | refs/heads/master | #!/usr/bin/python
"""
Starter code for the regression mini-project.
Loads up/formats a modified version of the dataset
(why modified? we've removed some trouble points
that you'll find yourself in the outliers mini-project).
Draws a little scatterplot of the training/testing data
You fill in the regression code where indicated:
"""
import sys
import pickle
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
dictionary = pickle.load( open("../final_project/final_project_dataset_modified.pkl", "r") )
### list the features you want to look at--first item in the
### list will be the "target" feature
features_list = ["bonus", "salary"]
#features_list = ["bonus", "long_term_incentive"]
data = featureFormat( dictionary, features_list, remove_any_zeroes=True)
target, features = targetFeatureSplit( data )
### training-testing split needed in regression, just like classification
from sklearn.cross_validation import train_test_split
feature_train, feature_test, target_train, target_test = train_test_split(features, target, test_size=0.5, random_state=42)
train_color = "b"
test_color = "r"
### Your regression goes here!
### Please name it reg, so that the plotting code below picks it up and
### plots it correctly. Don't forget to change the test_color above from "b" to
### "r" to differentiate training points from test points.
print ""
print "__________________________________________________\n\n"
print "Etracting Slope And Intercept"
from sklearn.linear_model import LinearRegression
reg = LinearRegression()
reg.fit(feature_train, target_train)
slope = reg.coef_
print "Slope is : ",slope[0]
icept = reg.intercept_
print "Intercept is : ",icept
print "__________________________________________________\n\n"
print "Regression Score : Training Data"
training_score = reg.score(feature_train,target_train)
print "Regression Score on Training Data : ",training_score
print "__________________________________________________\n\n"
print "Regression Score : Test Data"
test_score = reg.score(feature_test,target_test)
print "Regression Score on Test Data : ",test_score
print "__________________________________________________\n\n"
print "Regressing Bonus against LTI(long_term_incentive)"
print "using __ features_list = ['bonus', 'long_term_incentive'] __"
print "Regression Score on Test Data : -0.59271289995"
### draw the scatterplot, with color-coded training and testing points
import matplotlib.pyplot as plt
for feature, target in zip(feature_test, target_test):
plt.scatter( feature, target, color=test_color )
for feature, target in zip(feature_train, target_train):
plt.scatter( feature, target, color=train_color )
### labels for the legend
plt.scatter(feature_test[0], target_test[0], color=test_color, label="test")
plt.scatter(feature_test[0], target_test[0], color=train_color, label="train")
### draw the regression line, once it's coded
try:
plt.plot( feature_test, reg.predict(feature_test) )
except NameError:
pass
#reg.fit(feature_test, target_test)
#plt.plot(feature_train, reg.predict(feature_train), color="b")
plt.xlabel(features_list[1])
plt.ylabel(features_list[0])
plt.legend()
plt.show()
print "__________________________________________________\n"
print "##for get this correct , uncomment last two comments\n##(right before plt.xlabel(features_list[1]):)"
print "sneak peek"
slope = reg.coef_
print "Sneak peek, Slope is : ",slope[0]
| Python | 103 | 32.980583 | 123 | /regression/finance_regression.py | 0.657698 | 0.650763 |
Shally1130/CS7641-assignment3 | refs/heads/master | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import datetime as datetime
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
from sklearn.decomposition import PCA
from sklearn.decomposition import FastICA
from sklearn.random_projection import GaussianRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics import silhouette_samples, silhouette_score
#################################################
#Data set 1: wine quality data set
data = pd.read_csv('abalone.csv')
X = data.iloc[:,:8]
y = data.iloc[:,8]
features = list(X.columns.values)
scaler = MinMaxScaler(feature_range=[0,100])
scaler.fit(X)
X_norm = pd.DataFrame(scaler.transform(X))
print(X_norm)
#################################################
#K means clustering
range_n_clusters = [5,10,15,20,25]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.2, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X_norm) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10).fit(X_norm)
cluster_labels = clusterer.labels_
print("NMI score: %.6f" % normalized_mutual_info_score(y, cluster_labels))
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X_norm, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X_norm, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
cmap = cm.get_cmap("Spectral")
color = cmap(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.2, -0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
cmap = cm.get_cmap("Spectral")
colors = cmap(cluster_labels.astype(float) / n_clusters)
ax2.scatter( X_norm.iloc[:, 7], X_norm.iloc[:, 4], marker='.', s=30, lw=0, alpha=0.7,
c=colors, edgecolor='k')
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 7], centers[:, 4], marker='o',
c="white", alpha=1, s=200, edgecolor='k')
for i, c in enumerate(centers):
ax2.scatter( c[7], c[4], marker='$%d$' % i, alpha=1,
s=50, edgecolor='k')
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
#################################################
#Expectation Maximization clustering
for n_clusters in range_n_clusters:
fig = plt.gcf()
fig.set_size_inches(7, 7)
ax = fig.add_subplot(111)
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = GaussianMixture(n_components=n_clusters, random_state=10).fit(X_norm)
cluster_labels = clusterer.predict(X_norm)
print("NMI score: %.6f" % normalized_mutual_info_score(y, cluster_labels))
# 2nd Plot showing the actual clusters formed
cmap = cm.get_cmap("Spectral")
colors = cmap(cluster_labels.astype(float) / n_clusters)
plt.scatter( X_norm.iloc[:, 7], X_norm.iloc[:, 4], marker='.', s=30, lw=0, alpha=0.7,
c=colors, edgecolor='k')
# Labeling the clusters
centers = clusterer.means_
# Draw white circles at cluster centers
plt.scatter(centers[:, 7], centers[:, 4], marker='o',
c="white", alpha=1, s=200, edgecolor='k')
for i, c in enumerate(centers):
ax.scatter( c[7], c[4], marker='$%d$' % i, alpha=1,
s=50, edgecolor='k')
ax.set_title("The visualization of the clustered data.")
ax.set_xlabel("Feature space for the 1st feature")
ax.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Clusters plot for EM clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
#################################################
#PCA feature transformation
pca = PCA(n_components=8, random_state=10)
X_r = pca.fit(X).transform(X)
X_pca = X_r
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
colors = ["b","g","r","c","m","y","k"]
lw = 5
for color, i in zip(colors, [5,10,15,20,25]):
plt.scatter(X_r[y == i, 1], X_r[y == i, 2], color=color, alpha=.8, lw=lw, label=i)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('PCA of Abalone dataset')
#################################################
#ICA feature transformation
ica = FastICA(n_components=8, random_state=10)
X_r = ica.fit(X).transform(X)
X_ica = X_r
plt.figure()
colors = ["b","g","r","c","m","y","k"]
lw = 5
for color, i in zip(colors, [5,10,15,20,25]):
plt.scatter(X_r[y == i, 1], X_r[y == i, 2], color=color, alpha=.8, lw=lw, label=i)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('ICA of Abalone dataset')
#################################################
#Random Projection feature transformation
rca = GaussianRandomProjection(n_components=8, random_state=10)
X_r = rca.fit_transform(X)
X_rca = X_r
plt.figure()
colors = ["b","g","r","c","m","y","k"]
lw = 5
for color, i in zip(colors, [5,10,15,20,25]):
plt.scatter(X_r[y == i, 1], X_r[y == i, 2], color=color, alpha=.8, lw=lw, label=i)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('Random Projection of Abalone dataset')
#################################################
#Univariate feature selection (K best)
from sklearn.feature_selection import chi2
from sklearn.feature_selection import mutual_info_classif
X_new = SelectKBest(chi2, k=5).fit_transform(X, y)
X_fs = X_new
plt.figure()
colors = ["b","g","r","c","m","y","k"]
lw = 5
for color, i in zip(colors, [5,10,15,20,25]):
plt.scatter(X_new[y == i, 1], X_new[y == i, 2], color=color, alpha=.8, lw=lw, label=i)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('Chi square feature selection of Abalone dataset')
plt.show()
#################################################
#Rerun clustering on transformed features
# range_n_clusters = [5,10,15,20,25]
# # X_test=pd.DataFrame(X_pca)
# X_test=pd.DataFrame(X_ica)
# # X_test=pd.DataFrame(X_rca)
# # X_test=pd.DataFrame(X_fs)
# for n_clusters in range_n_clusters:
# fig = plt.gcf()
# fig.set_size_inches(7, 7)
# ax = fig.add_subplot(111)
# clusterer = KMeans(n_clusters=n_clusters, random_state=10).fit(X_test)
# cluster_labels = clusterer.labels_
# silhouette_avg = silhouette_score(X_test, cluster_labels)
# print("For n_clusters =", n_clusters,
# "The average silhouette_score is :", silhouette_avg)
# print("The NMI score is: %.6f" % normalized_mutual_info_score(y, cluster_labels))
# cmap = cm.get_cmap("Spectral")
# colors = cmap(cluster_labels.astype(float) / n_clusters)
# ax.scatter( X_test.iloc[:, 1], X_test.iloc[:, 2], marker='.', s=30, lw=0, alpha=0.7,
# c=colors, edgecolor='k')
# centers = clusterer.cluster_centers_
# ax.scatter(centers[:, 1], centers[:, 2], marker='o',
# c="white", alpha=1, s=200, edgecolor='k')
# for i, c in enumerate(centers):
# ax.scatter( c[1], c[2], marker='$%d$' % i, alpha=1,
# s=50, edgecolor='k')
# ax.set_title("The visualization of the clustered data.")
# ax.set_xlabel("Feature space for the 1st feature")
# ax.set_ylabel("Feature space for the 2nd feature")
# plt.suptitle(("KMeans clustering using ICA feature transformation "
# "with n_clusters = %d" % n_clusters),
# fontsize=14, fontweight='bold')
# plt.show()
# for n_clusters in range_n_clusters:
# fig = plt.gcf()
# fig.set_size_inches(7, 7)
# ax = fig.add_subplot(111)
# clusterer = GaussianMixture(n_components=n_clusters, random_state=10).fit(X_test)
# cluster_labels = clusterer.predict(X_test)
# print("NMI score: %.6f" % normalized_mutual_info_score(y, cluster_labels))
# cmap = cm.get_cmap("Spectral")
# colors = cmap(cluster_labels.astype(float) / n_clusters)
# plt.scatter( X_test.iloc[:, 1], X_test.iloc[:, 2], marker='.', s=30, lw=0, alpha=0.7,
# c=colors, edgecolor='k')
# centers = clusterer.means_
# plt.scatter(centers[:, 1], centers[:, 2], marker='o',
# c="white", alpha=1, s=200, edgecolor='k')
# for i, c in enumerate(centers):
# ax.scatter( c[1], c[2], marker='$%d$' % i, alpha=1,
# s=50, edgecolor='k')
# ax.set_title("The visualization of the clustered data.")
# ax.set_xlabel("Feature space for the 1st feature")
# ax.set_ylabel("Feature space for the 2nd feature")
# plt.suptitle(("Clusters plot for EM clustering on PCA data "
# "with n_clusters = %d" % n_clusters), fontsize=14, fontweight='bold')
# plt.show()
#################################################
# Rerun clustering on transformed features
range_n_clusters = [2,4,6,8,10]
X_test=pd.DataFrame(X_pca)
n_clusters = 20
# for n_clusters in range_n_clusters:
fig = plt.gcf()
fig.set_size_inches(7, 7)
ax = fig.add_subplot(111)
clusterer = KMeans(n_clusters=n_clusters, random_state=10).fit(X_test)
cluster_labels = clusterer.labels_
silhouette_avg = silhouette_score(X_test, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
print("kmeans pca The NMI score is: %.6f" % normalized_mutual_info_score(y, cluster_labels))
cmap = cm.get_cmap("Spectral")
colors = cmap(cluster_labels.astype(float) / n_clusters)
ax.scatter( X_test.iloc[:, 1], X_test.iloc[:, 2], marker='.', s=30, lw=0, alpha=0.7,
c=colors, edgecolor='k')
centers = clusterer.cluster_centers_
ax.scatter(centers[:, 1], centers[:, 2], marker='o',
c="white", alpha=1, s=200, edgecolor='k')
for i, c in enumerate(centers):
ax.scatter( c[1], c[2], marker='$%d$' % i, alpha=1,
s=50, edgecolor='k')
ax.set_title("The visualization of the clustered data.")
ax.set_xlabel("Feature space for the 1st feature")
ax.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("KMeans clustering using PCA feature transformation "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
################################################################
n_clusters = 20
X_test=pd.DataFrame(X_ica)
# for n_clusters in range_n_clusters:
fig = plt.gcf()
fig.set_size_inches(7, 7)
ax = fig.add_subplot(111)
clusterer = KMeans(n_clusters=n_clusters, random_state=10).fit(X_test)
cluster_labels = clusterer.labels_
silhouette_avg = silhouette_score(X_test, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
print("kmeans ica The NMI score is: %.6f" % normalized_mutual_info_score(y, cluster_labels))
cmap = cm.get_cmap("Spectral")
colors = cmap(cluster_labels.astype(float) / n_clusters)
ax.scatter( X_test.iloc[:, 1], X_test.iloc[:, 2], marker='.', s=30, lw=0, alpha=0.7,
c=colors, edgecolor='k')
centers = clusterer.cluster_centers_
ax.scatter(centers[:, 1], centers[:, 2], marker='o',
c="white", alpha=1, s=200, edgecolor='k')
for i, c in enumerate(centers):
ax.scatter( c[1], c[2], marker='$%d$' % i, alpha=1,
s=50, edgecolor='k')
ax.set_title("The visualization of the clustered data.")
ax.set_xlabel("Feature space for the 1st feature")
ax.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("KMeans clustering using ICA feature transformation "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
# ###################################################################
n_clusters = 20
X_test=pd.DataFrame(X_fs)
# for n_clusters in range_n_clusters:
fig = plt.gcf()
fig.set_size_inches(7, 7)
ax = fig.add_subplot(111)
clusterer = KMeans(n_clusters=n_clusters, random_state=10).fit(X_test)
cluster_labels = clusterer.labels_
silhouette_avg = silhouette_score(X_test, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
print("kmeans fs The NMI score is: %.6f" % normalized_mutual_info_score(y, cluster_labels))
cmap = cm.get_cmap("Spectral")
colors = cmap(cluster_labels.astype(float) / n_clusters)
ax.scatter( X_test.iloc[:, 1], X_test.iloc[:, 2], marker='.', s=30, lw=0, alpha=0.7,
c=colors, edgecolor='k')
centers = clusterer.cluster_centers_
ax.scatter(centers[:, 1], centers[:, 2], marker='o',
c="white", alpha=1, s=200, edgecolor='k')
for i, c in enumerate(centers):
ax.scatter( c[1], c[2], marker='$%d$' % i, alpha=1,
s=50, edgecolor='k')
ax.set_title("The visualization of the clustered data.")
ax.set_xlabel("Feature space for the 1st feature")
ax.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("KMeans clustering using feature selection transformation "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
# ###################################################################
n_clusters = 20
X_test=pd.DataFrame(X_rca)
# for n_clusters in range_n_clusters:
fig = plt.gcf()
fig.set_size_inches(7, 7)
ax = fig.add_subplot(111)
clusterer = KMeans(n_clusters=n_clusters, random_state=10).fit(X_test)
cluster_labels = clusterer.labels_
silhouette_avg = silhouette_score(X_test, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
print("kmeans rca The NMI score is: %.6f" % normalized_mutual_info_score(y, cluster_labels))
cmap = cm.get_cmap("Spectral")
colors = cmap(cluster_labels.astype(float) / n_clusters)
ax.scatter( X_test.iloc[:, 1], X_test.iloc[:, 2], marker='.', s=30, lw=0, alpha=0.7,
c=colors, edgecolor='k')
centers = clusterer.cluster_centers_
ax.scatter(centers[:, 1], centers[:, 2], marker='o',
c="white", alpha=1, s=200, edgecolor='k')
for i, c in enumerate(centers):
ax.scatter( c[1], c[2], marker='$%d$' % i, alpha=1,
s=50, edgecolor='k')
ax.set_title("The visualization of the clustered data.")
ax.set_xlabel("Feature space for the 1st feature")
ax.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("KMeans clustering using RCA transformation "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
###################################################################
n_clusters = 20
X_test=pd.DataFrame(X_rca)
# for n_clusters in range_n_clusters:
fig = plt.gcf()
fig.set_size_inches(7, 7)
ax = fig.add_subplot(111)
clusterer = GaussianMixture(n_components=n_clusters, random_state=10).fit(X_test)
cluster_labels = clusterer.predict(X_test)
print("RCA NMI score: %.6f" % normalized_mutual_info_score(y, cluster_labels))
cmap = cm.get_cmap("Spectral")
colors = cmap(cluster_labels.astype(float) / n_clusters)
plt.scatter( X_test.iloc[:, 1], X_test.iloc[:, 2], marker='.', s=30, lw=0, alpha=0.7,
c=colors, edgecolor='k')
centers = clusterer.means_
plt.scatter(centers[:, 1], centers[:, 2], marker='o',
c="white", alpha=1, s=200, edgecolor='k')
for i, c in enumerate(centers):
ax.scatter( c[1], c[2], marker='$%d$' % i, alpha=1,
s=50, edgecolor='k')
ax.set_title("The visualization of the clustered data.")
ax.set_xlabel("Feature space for the 1st feature")
ax.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Clusters plot for EM clustering on RCA data "
"with n_clusters = %d" % n_clusters), fontsize=14, fontweight='bold')
plt.show()
##################################################################
n_clusters = 20
X_test=pd.DataFrame(X_ica)
fig = plt.gcf()
fig.set_size_inches(7, 7)
ax = fig.add_subplot(111)
clusterer = GaussianMixture(n_components=n_clusters, random_state=10).fit(X_test)
cluster_labels = clusterer.predict(X_test)
print("ICA NMI score: %.6f" % normalized_mutual_info_score(y, cluster_labels))
cmap = cm.get_cmap("Spectral")
colors = cmap(cluster_labels.astype(float) / n_clusters)
plt.scatter( X_test.iloc[:, 1], X_test.iloc[:, 2], marker='.', s=30, lw=0, alpha=0.7,
c=colors, edgecolor='k')
centers = clusterer.means_
plt.scatter(centers[:, 1], centers[:, 2], marker='o',
c="white", alpha=1, s=200, edgecolor='k')
for i, c in enumerate(centers):
ax.scatter( c[1], c[2], marker='$%d$' % i, alpha=1,
s=50, edgecolor='k')
ax.set_title("The visualization of the clustered data.")
ax.set_xlabel("Feature space for the 1st feature")
ax.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Clusters plot for EM clustering on ICA data "
"with n_clusters = %d" % n_clusters), fontsize=14, fontweight='bold')
plt.show()
##################################################################
n_clusters = 20
X_test=pd.DataFrame(X_fs)
# for n_clusters in range_n_clusters:
fig = plt.gcf()
fig.set_size_inches(7, 7)
ax = fig.add_subplot(111)
clusterer = GaussianMixture(n_components=n_clusters, random_state=10).fit(X_test)
cluster_labels = clusterer.predict(X_test)
print("FS NMI score: %.6f" % normalized_mutual_info_score(y, cluster_labels))
cmap = cm.get_cmap("Spectral")
colors = cmap(cluster_labels.astype(float) / n_clusters)
plt.scatter( X_test.iloc[:, 1], X_test.iloc[:, 2], marker='.', s=30, lw=0, alpha=0.7,
c=colors, edgecolor='k')
centers = clusterer.means_
plt.scatter(centers[:, 1], centers[:, 2], marker='o',
c="white", alpha=1, s=200, edgecolor='k')
for i, c in enumerate(centers):
ax.scatter( c[1], c[2], marker='$%d$' % i, alpha=1,
s=50, edgecolor='k')
ax.set_title("The visualization of the clustered data.")
ax.set_xlabel("Feature space for the 1st feature")
ax.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Clusters plot for EM clustering on feature selection data "
"with n_clusters = %d" % n_clusters), fontsize=14, fontweight='bold')
plt.show()
#####################################################
n_clusters = 20
X_test=pd.DataFrame(X_pca)
# for n_clusters in range_n_clusters:
fig = plt.gcf()
fig.set_size_inches(7, 7)
ax = fig.add_subplot(111)
clusterer = GaussianMixture(n_components=n_clusters, random_state=10).fit(X_test)
cluster_labels = clusterer.predict(X_test)
print("PCA NMI score: %.6f" % normalized_mutual_info_score(y, cluster_labels))
cmap = cm.get_cmap("Spectral")
colors = cmap(cluster_labels.astype(float) / n_clusters)
plt.scatter( X_test.iloc[:, 1], X_test.iloc[:, 2], marker='.', s=30, lw=0, alpha=0.7,
c=colors, edgecolor='k')
centers = clusterer.means_
plt.scatter(centers[:, 1], centers[:, 2], marker='o',
c="white", alpha=1, s=200, edgecolor='k')
for i, c in enumerate(centers):
ax.scatter( c[1], c[2], marker='$%d$' % i, alpha=1,
s=50, edgecolor='k')
ax.set_title("The visualization of the clustered data.")
ax.set_xlabel("Feature space for the 1st feature")
ax.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Clusters plot for EM clustering on PCA data "
"with n_clusters = %d" % n_clusters), fontsize=14, fontweight='bold')
plt.show()
#################################################
#Rerun ANN on transformed features
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import learning_curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
time = datetime.datetime.now()
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
print("{}: {}".format(title, datetime.datetime.now() - time))
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
plt.show()
clf = MLPClassifier(hidden_layer_sizes=(8, 8, 8), random_state=0, solver="lbfgs")
# time = datetime.datetime.now()
plot_learning_curve(clf, "MLP using PCA transformed features", X_pca, y, ylim=[0,1])
# print("MLP using PCA transformed features Performence: {}".format(datetime.datetime.now() - time))
# time = datetime.datetime.now()
plot_learning_curve(clf, "MLP using ICA transformed features", X_ica, y, ylim=[0,1])
# print("MLP using ICA transformed features Performence: {}".format(datetime.datetime.now() - time))
# time = datetime.datetime.now()
plot_learning_curve(clf, "MLP using RCA transformed features", X_rca, y, ylim=[0,1])
# print("MLP using RCA transformed features Performence: {}".format(datetime.datetime.now() - time))
# time = datetime.datetime.now()
plot_learning_curve(clf, "MLP using Selected 5 features", X_fs, y, ylim=[0,1])
# print("MLP using Selected 5 features Performence: {}".format(datetime.datetime.now() - time))
#################################################
#Rerun ANN on transformed features with clusters new feature
clf = MLPClassifier(hidden_layer_sizes=(8, 8, 8), random_state=0, solver="lbfgs")
clusterer = KMeans(n_clusters=20, random_state=10).fit(X_pca)
y_kmeans = clusterer.labels_
X_df = pd.DataFrame(X_pca)
X_df[11] = y_kmeans
plot_learning_curve(clf, "MLP using PCA transformed features", X_df, y, ylim=[0,1])
clusterer = KMeans(n_clusters=20, random_state=10).fit(X_ica)
y_kmeans = clusterer.labels_
X_df = pd.DataFrame(X_ica)
X_df[11] = y_kmeans
plot_learning_curve(clf, "MLP using ICA transformed features", X_df, y, ylim=[0,1])
clusterer = KMeans(n_clusters=20, random_state=10).fit(X_rca)
y_kmeans = clusterer.labels_
X_df = pd.DataFrame(X_rca)
X_df[11] = y_kmeans
plot_learning_curve(clf, "MLP using RCA transformed features", X_df, y, ylim=[0,1])
clusterer = KMeans(n_clusters=20, random_state=10).fit(X_fs)
y_kmeans = clusterer.labels_
X_df = pd.DataFrame(X_fs)
X_df[11] = y_kmeans
plot_learning_curve(clf, "MLP using selected 5 features", X_df, y, ylim=[0,1])
| Python | 685 | 35.80584 | 100 | /abalone.py | 0.626899 | 0.602665 |
kamranrafiq/Automate-Boring-Work | refs/heads/master | import socket
for x in range(0, 65):
try:
ports = socket.getservbyport(x)
print("Port Number ",x, " runs service ", ports.upper())
except:
continue | Python | 7 | 24.714285 | 64 | /Get_Service_By_Ports.py | 0.586592 | 0.569832 |
kamranrafiq/Automate-Boring-Work | refs/heads/master | import random
name = input("What's your name? ")
print("Well! " + name + " I am thinking of a number between 1 and 20.\nYou have 6 tries to guess that number.")
secret_number = random.randint(1,20)
for guesstaken in range(1,7):
guess = int(input("Take a guess: "))
if guess < secret_number:
print("Your guess is low. Try again")
elif guess > secret_number:
print("Your guess is high. Try again")
else:
break
if guess == secret_number:
print("Your guess is correct! You guessed the correct number in " + str(guesstaken) + " guesses")
else:
print("Sorry! You were not able to guess the correct number. The number i was thinking of was " + str(secret_number) + ".")
| Python | 24 | 27.458334 | 124 | /Guess_game.py | 0.688141 | 0.674963 |
kamranrafiq/Automate-Boring-Work | refs/heads/master | import socket
port_numbers = input("Enter the port number: ")
service = socket.getservbyport(int(port_numbers))
print ("Port number",port_numbers,"runs service:", service.upper()) | Python | 5 | 35.200001 | 67 | /Input_port_to_get_service.py | 0.75 | 0.75 |
rafaelawon/pwnable | refs/heads/master | from pwn import *
context.log_level = 'debug'
argvs = ["" for i in range(100)]
argvs[0] = "./input"
argvs[65] = "\x00"
argvs[66] = "\x20\x0a\x0d"
target = process(executable='/home/input2/input', arvs=argvs)
target.interactive()
| Python | 12 | 18.416666 | 61 | /won/input.py | 0.651064 | 0.587234 |
Garlinsk/Password-Locker | refs/heads/main | import unittest #Importing the unittest module
from passlock import User #Importing the user class
from passlock import Credentials
import pyperclip
class TestCredentials(unittest.Testcase):
"""
Test class that defines test cases for the User class.
Args:
unittest.TestCase: TestCase class that helps in creating test cases
"""
def setUp(self):
"""
Method that runs before each individual test methods run.
"""
self.new_user = User("FrankGarlinsk","1234zx", "garlinsk@email.com")
self.new_credential = Credentials('email','FrankGarlinsk','1234zx')
def test_init(self):
"""
test class to check if the object has been initialized correctly
"""
def save_details(self):
"""
method to store a new credential to the credentials list
"""
Credentials.credentials_list.append(self)
self.assertEqual(self.new_user.username,"FrankGarlinsk")
self.assertEqual(self.new_user.password,"1234zx")
self.assertEqual(self.new_user.email,"garlinsk@email.com")
def test_save_user(self):
"""
test case to test if a new user instance has been saved into the User list
"""
self.new_user.save_user() # saving the new user
self.assertEqual(len(User.user_list),1)
def test_save_multiple_user(self):
"""
test_save_multiple_user to check if we can save multiple user
objects to our user_list
"""
self.new_user.save_user()
test_user = User("Test","user","1235zx","test@email.com") # new user
test_user.save_user()
self.assertEqual(len(User.user_list),2)
def test_del_user(self):
"""
test class to test delete user method
"""
self.new_user.delete_user()# Deleting a contact object
self.assertEqual(len(User.user_list),0)
def test_find_user_by_username(self):
'''
test to check if we can find a user by username and display information
'''
self.new_user.save_user()
test_user = User("Test","user","1235zx","test@user.com") # new user
test_user.save_user()
found_user = User.find_by_username("FrankGarlinsk")
self.assertEqual(found_user.email,test_user.email)
def save_credential_test(self):
"""
test case to test if the crential object is saved into the credentials list.
"""
self.new_credential.save_details()
self.assertEqual(len(Credentials.credentials_list),1)
def test_save_many_accounts(self):
'''
test to check if we can save multiple credentials objects to our credentials list
'''
self.new_credential.save_details()
test_credential = Credentials("Twitter","mikeycharles","Mfh45hfk")
test_credential.save_details()
self.assertEqual(len(Credentials.credentials_list),2)
if __name__ == "__main__":
unittest.main()
| Python | 90 | 32.111111 | 89 | /passlock_test.py | 0.621649 | 0.616622 |
Garlinsk/Password-Locker | refs/heads/main | #!/usr/bin/env python3.9
from os import name
from passlock import User
def create_contact(fname,lname,phone,email):
'''
Function to create a new user
'''
new_user = User(fname,lname,phone,email)
return new_user
def save_users(user):
'''
Function to save user
'''
user.save_user()
def del_user(user):
'''
Function to delete a user
'''
user.delete_user()
def find_user(name):
'''
Function that finds a contact by number and returns the user
'''
return name.find_by_username(name)
def check_existing_users(name):
'''
Function that check if a user exists with that name and return a Boolean
'''
return User.user_exist(name)
def display_users():
'''
Function that returns all the saved users
'''
return User.display_users() | Python | 40 | 19.65 | 76 | /run.py | 0.637576 | 0.635152 |
Garlinsk/Password-Locker | refs/heads/main | from os import name
import pyperclip
class User:
"""
Class that generates new instances of user.
"""
user_list = [] # Empty user list
def __init__(self, username, password, email):
# docstring removed for simplicity
self.username = username
self.password = password
self.email = email
def save_user(self):
"""
A method that saves a new user instace into the user list
"""
User.user_list.append(self)
def save_multiple_user(self):
"""
save_multiple_user method is to check if we can save multiple user
objects to our user_list
"""
self.new_user.save_user()
def delete_user(self):
'''
delete_account method deletes a saved account from the list
'''
User.user_list.remove(self)
class Credentials():
"""
Create credentials class to help create new objects of credentials
"""
credentials_list = []
@classmethod
def find_by_username(cls,user):
'''
Method that takes in a name and returns a username that matches that name.
Args:
user: Username to search for
Returns :
Name of person that matches the user.
'''@classmethod
def copy_email(cls,user):
user_found = user.find_by_username(user)
pyperclip.copy(user_found.email)
for user in cls.user_list:
if user.user_name == user:
return user
def __init__(self,account,userName, password):
"""
method that defines user credentials to be stored
"""
def save_details(self):
"""
method to store a new credential to the credentials list
"""
Credentials.credentials_list.append(self)
@classmethod
def copy_email(cls,user):
user_found = User.find_by_username(name)
pyperclip.copy(user_found.email) | Python | 82 | 23.390244 | 82 | /passlock.py | 0.57314 | 0.57314 |
sarobe/VGDLEntityCreator | refs/heads/master | '''
Video game description language -- parser, framework and core game classes.
@author: Tom Schaul
'''
from random import choice
from collections import defaultdict
import pygame
from tools import Node, indentTreeParser
from vgdl.tools import roundedPoints
class VGDLParser(object):
""" Parses a string into a Game object. """
verbose = False
@staticmethod
def playGame(game_str, map_str):
""" Parses the game and level map strings, and starts the game. """
g = VGDLParser().parseGame(game_str)
g.buildLevel(map_str)
g.startGame()
@staticmethod
def playSubjectiveGame(game_str, map_str):
from pybrain.rl.experiments.episodic import EpisodicExperiment
from vgdl.interfaces import GameTask
from vgdl.subjective import SubjectiveGame
from vgdl.agents import InteractiveAgent, UserTiredException
g = VGDLParser().parseGame(game_str)
g.buildLevel(map_str)
senv = SubjectiveGame(g, actionDelay=100, recordingEnabled=True)
task = GameTask(senv)
iagent = InteractiveAgent()
exper = EpisodicExperiment(task, iagent)
try:
exper.doEpisodes(1)
except UserTiredException:
pass
def parseGame(self, tree):
""" Accepts either a string, or a tree. """
if not isinstance(tree, Node):
tree = indentTreeParser(tree).children[0]
sclass, args = self._parseArgs(tree.content)
self.game = sclass(**args)
for c in tree.children:
if c.content == "SpriteSet":
self.parseSprites(c.children)
if c.content == "InteractionSet":
self.parseInteractions(c.children)
if c.content == "LevelMapping":
self.parseMappings(c.children)
if c.content == "TerminationSet":
self.parseTerminations(c.children)
return self.game
def _eval(self, estr):
""" Whatever is visible in the global namespace (after importing the ontologies)
can be used in the VGDL, and is evaluated.
"""
from vgdl.ontology import *
return eval(estr)
def parseInteractions(self, inodes):
for inode in inodes:
if ">" in inode.content:
pair, edef = [x.strip() for x in inode.content.split(">")]
eclass, args = self._parseArgs(edef)
self.game.collision_eff.append(tuple([x.strip() for x in pair.split(" ") if len(x)>0]
+[eclass, args]))
if self.verbose:
print "Collision", pair, "has effect:", edef
def parseTerminations(self, tnodes):
for tn in tnodes:
sclass, args = self._parseArgs(tn.content)
if self.verbose:
print "Adding:", sclass, args
self.game.terminations.append(sclass(**args))
def parseSprites(self, snodes, parentclass=None, parentargs={}, parenttypes=[]):
for sn in snodes:
assert ">" in sn.content
key, sdef = [x.strip() for x in sn.content.split(">")]
sclass, args = self._parseArgs(sdef, parentclass, parentargs.copy())
stypes = parenttypes+[key]
if 'singleton' in args:
if args['singleton']==True:
self.game.singletons.append(key)
args = args.copy()
del args['singleton']
if len(sn.children) == 0:
if self.verbose:
print "Defining:", key, sclass, args, stypes
self.game.sprite_constr[key] = (sclass, args, stypes)
if key in self.game.sprite_order:
# last one counts
self.game.sprite_order.remove(key)
self.game.sprite_order.append(key)
else:
self.parseSprites(sn.children, sclass, args, stypes)
def parseMappings(self, mnodes):
for mn in mnodes:
c, val = [x.strip() for x in mn.content.split(">")]
assert len(c) == 1, "Only single character mappings allowed."
# a char can map to multiple sprites
keys = [x.strip() for x in val.split(" ") if len(x)>0]
if self.verbose:
print "Mapping", c, keys
self.game.char_mapping[c] = keys
def _parseArgs(self, s, sclass=None, args=None):
if not args:
args = {}
sparts = [x.strip() for x in s.split(" ") if len(x) > 0]
if len(sparts) == 0:
return sclass, args
if not '=' in sparts[0]:
sclass = self._eval(sparts[0])
sparts = sparts[1:]
for sp in sparts:
k, val = sp.split("=")
try:
args[k] = self._eval(val)
except:
args[k] = val
return sclass, args
class BasicGame(object):
""" This regroups all the components of a game's dynamics, after parsing. """
MAX_SPRITES = 10000
default_mapping = {'w': ['wall'],
'A': ['avatar'],
}
block_size = 10
frame_rate = 20
def __init__(self, **kwargs):
from vgdl.ontology import Immovable, DARKGRAY, MovingAvatar
for name, value in kwargs.items():
if hasattr(self, name):
self.__dict__[name] = value
else:
print "WARNING: undefined parameter '%s' for game! "%(name)
# contains mappings to constructor (just a few defaults are known)
self.sprite_constr = {'wall': (Immovable, {'color': DARKGRAY}, ['wall']),
'avatar': (MovingAvatar, {}, ['avatar']),
}
# z-level of sprite types (in case of overlap)
self.sprite_order = ['wall',
'avatar',
]
# contains instance lists
self.sprite_groups = defaultdict(list)
# which sprite types (abstract or not) are singletons?
self.singletons = []
# collision effects (ordered by execution order)
self.collision_eff = []
# for reading levels
self.char_mapping = {}
# termination criteria
self.terminations = [Termination()]
self.num_sprites = 0
self.kill_list=[]
self.is_stochastic = False
def buildLevel(self, lstr):
from vgdl.ontology import stochastic_effects
lines = [l for l in lstr.split("\n") if len(l)>0]
lengths = map(len, lines)
assert min(lengths)==max(lengths), "Inconsistent line lengths."
self.width = lengths[0]
self.height = len(lines)
assert self.width > 1 and self.height > 1, "Level too small."
# rescale pixels per block to adapt to the level
self.block_size = max(2,int(800./max(self.width, self.height)))
self.screensize = (self.width*self.block_size, self.height*self.block_size)
# create sprites
for row, l in enumerate(lines):
for col, c in enumerate(l):
if c in self.char_mapping:
pos = (col*self.block_size, row*self.block_size)
self._createSprite(self.char_mapping[c], pos)
elif c in self.default_mapping:
pos = (col*self.block_size, row*self.block_size)
self._createSprite(self.default_mapping[c], pos)
self.kill_list=[]
for _, _, effect, _ in self.collision_eff:
if effect in stochastic_effects:
self.is_stochastic = True
# guarantee that avatar is always visible
self.sprite_order.remove('avatar')
self.sprite_order.append('avatar')
def emptyBlocks(self):
alls = [s for s in self]
res = []
for col in range(self.width):
for row in range(self.height):
r = pygame.Rect((col*self.block_size, row*self.block_size), (self.block_size, self.block_size))
free = True
for s in alls:
if r.colliderect(s.rect):
free = False
break
if free:
res.append((col*self.block_size, row*self.block_size))
return res
def randomizeAvatar(self):
if len(self.getAvatars()) == 0:
self._createSprite(['avatar'], choice(self.emptyBlocks()))
def _createSprite(self, keys, pos):
res = []
for key in keys:
if self.num_sprites > self.MAX_SPRITES:
print "Sprite limit reached."
return
sclass, args, stypes = self.sprite_constr[key]
# verify the singleton condition
anyother = False
for pk in stypes[::-1]:
if pk in self.singletons:
if self.numSprites(pk) > 0:
anyother = True
break
if anyother:
continue
s = sclass(pos=pos, size=(self.block_size, self.block_size), **args)
s.stypes = stypes
s.name = key
self.sprite_groups[key].append(s)
self.num_sprites += 1
if s.is_stochastic:
self.is_stochastic = True
res.append(s)
return res
def _initScreen(self, size):
from vgdl.ontology import LIGHTGRAY
pygame.init()
self.screen = pygame.display.set_mode(size)
self.background = pygame.Surface(size)
self.background.fill(LIGHTGRAY)
self.screen.blit(self.background, (0,0))
def __iter__(self):
""" Iterator over all sprites """
for key in self.sprite_order:
if key not in self.sprite_groups:
# abstract type
continue
for s in self.sprite_groups[key]:
yield s
def numSprites(self, key):
""" Abstract sprite groups are computed on demand only """
deleted = len([s for s in self.kill_list if key in s.stypes])
if key in self.sprite_groups:
return len(self.sprite_groups[key])-deleted
else:
return len([s for s in self if key in s.stypes])-deleted
def getSprites(self, key):
if key in self.sprite_groups:
return [s for s in self.sprite_groups[key] if s not in self.kill_list]
else:
return [s for s in self if key in s.stypes and s not in self.kill_list]
def getAvatars(self):
""" The currently alive avatar(s) """
return [s for s in self if isinstance(s, Avatar) and s not in self.kill_list]
def _clearAll(self, onscreen=True):
for s in set(self.kill_list):
if onscreen:
s._clear(self.screen, self.background, double=True)
self.sprite_groups[s.name].remove(s)
if onscreen:
for s in self:
s._clear(self.screen, self.background)
self.kill_list = []
def _drawAll(self):
for s in self:
s._draw(self.screen)
def _updateCollisionDict(self):
# create a dictionary that maps type pairs to a list of sprite pairs
self.lastcollisions = defaultdict(list)
nonstatics = [s for s in self if not s.is_static]
statics = [s for s in self if s.is_static]
for i, s1 in enumerate(nonstatics):
for s2 in (nonstatics+statics)[i+1:]:
assert s1 != s2
if s1.rect.colliderect(s2.rect):
for key1 in s1.stypes:
for key2 in s2.stypes:
self.lastcollisions[(key1, key2)].append((s1, s2))
self.lastcollisions[(key2, key1)].append((s2, s1))
# detect end-of-screen
if not pygame.Rect((0,0), self.screensize).contains(s1.rect):
for key1 in s1.stypes:
self.lastcollisions[(key1, 'EOS')].append((s1, None))
def _eventHandling(self):
for g1, g2, effect, args in self.collision_eff:
for s1, s2 in set(self.lastcollisions[(g1, g2)]):
# TODO: this is not a bullet-proof way, but seems to work
if s1 not in self.kill_list:
effect(s1, s2, self, **args)
def startGame(self):
self._initScreen(self.screensize)
clock = pygame.time.Clock()
self.time = 0
self.kill_list=[]
pygame.display.flip()
ended = False
win = False
while not ended:
clock.tick(self.frame_rate)
self.time += 1
self._clearAll()
# gather events
pygame.event.pump()
self.keystate = pygame.key.get_pressed()
# termination criteria
for t in self.terminations:
ended, win = t.isDone(self)
if ended:
break
# update sprites
for s in self:
s.update(self)
# handle collision effects
self._updateCollisionDict()
self._eventHandling()
self._drawAll()
pygame.display.update(VGDLSprite.dirtyrects)
VGDLSprite.dirtyrects = []
if win:
print "Dude, you're a born winner!"
else:
print "Dang. Try again..."
pygame.time.wait(50)
class VGDLSprite(object):
""" Base class for all sprite types. """
COLOR_DISC = [20,80,140,200]
dirtyrects = []
is_static= False
is_avatar= False
is_stochastic = False
color = None
cooldown = 0 # pause ticks in-between two moves
speed = None
mass = 1
physicstype=None
shrinkfactor=0
def __init__(self, pos, size=(10,10), color=None, speed=None, cooldown=None, physicstype=None, **kwargs):
self.rect = pygame.Rect(pos, size)
self.lastrect = self.rect
if physicstype is not None:
self.physicstype = physicstype
elif self.physicstype is None:
from vgdl.ontology import GridPhysics
self.physicstype = GridPhysics
self.physics = self.physicstype(size)
if speed is not None:
self.speed = speed
if cooldown is not None:
self.cooldown = cooldown
if color:
self.color = color
elif self.color is None:
self.color = (choice(self.COLOR_DISC), choice(self.COLOR_DISC), choice(self.COLOR_DISC))
for name, value in kwargs.items():
if hasattr(self, name):
self.__dict__[name] = value
else:
print "WARNING: undefined parameter '%s' for sprite '%s'! "%(name, self.__class__.__name__)
# how many timesteps ago was the last move?
self.lastmove = 0
def update(self, game):
""" The main place where subclasses differ. """
self.lastrect = self.rect
# no need to redraw if nothing was updated
self.lastmove += 1
if not self.is_static:
self.physics.passiveMovement(self)
def _updatePos(self, orientation, speed=None):
if speed is None:
speed = self.speed
if not(self.cooldown > self.lastmove or abs(orientation[0])+abs(orientation[1])==0):
self.rect = self.rect.move((orientation[0]*speed, orientation[1]*speed))
self.lastmove = 0
def _velocity(self):
""" Current velocity vector. """
if self.speed is None or self.speed==0 or not hasattr(self, 'orientation'):
return (0,0)
else:
return (self.orientation[0]*self.speed, self.orientation[1]*self.speed)
@property
def lastdirection(self):
return (self.rect[0]-self.lastrect[0], self.rect[1]-self.lastrect[1])
def _draw(self, screen):
from vgdl.ontology import LIGHTGREEN
if self.shrinkfactor != 0:
shrunk = self.rect.inflate(-self.rect.width*self.shrinkfactor,
-self.rect.height*self.shrinkfactor)
else:
shrunk = self.rect
if self.is_avatar:
rounded = roundedPoints(shrunk)
pygame.draw.polygon(screen, self.color, rounded)
pygame.draw.lines(screen, LIGHTGREEN, True, rounded, 2)
r = self.rect.copy()
elif not self.is_static:
rounded = roundedPoints(shrunk)
pygame.draw.polygon(screen, self.color, rounded)
r = self.rect.copy()
else:
r = screen.fill(self.color, shrunk)
VGDLSprite.dirtyrects.append(r)
def _clear(self, screen, background, double=False):
r = screen.blit(background, self.rect, self.rect)
VGDLSprite.dirtyrects.append(r)
if double:
r = screen.blit(background, self.lastrect, self.lastrect)
VGDLSprite.dirtyrects.append(r)
def __repr__(self):
return self.name+" at (%s,%s)"%(self.rect.left, self.rect.top)
class Avatar(object):
""" Abstract superclass of all avatars. """
shrinkfactor=0.15
class Termination(object):
""" Base class for all termination criteria. """
def isDone(self, game):
""" returns whether the game is over, with a win/lose flag """
from pygame.locals import K_ESCAPE, QUIT
if game.keystate[K_ESCAPE] or pygame.event.peek(QUIT):
return True, False
else:
return False, None | Python | 474 | 37.829113 | 111 | /vgdl/core.py | 0.526136 | 0.519833 |
sarobe/VGDLEntityCreator | refs/heads/master | from vgdl.examples.gridphysics.mazes.mazegames import polarmaze_game, maze_game
from vgdl.examples.gridphysics.mazes.simple import maze_level_1, maze_level_2 | Python | 2 | 78 | 79 | /vgdl/examples/gridphysics/mazes/__init__.py | 0.840764 | 0.828025 |
sarobe/VGDLEntityCreator | refs/heads/master | __author__ = 'Samuel Roberts'
| Python | 1 | 21 | 21 | /entitycreator/__init__.py | 0.454545 | 0.454545 |
sarobe/VGDLEntityCreator | refs/heads/master | import random
from vgdl.core import VGDLParser
UP = (0, -1)
DOWN = (0, 1)
LEFT = (-1, 0)
RIGHT = (1, 0)
BASEDIRS = [UP, LEFT, DOWN, RIGHT]
gravity = 0.5
REPEATS = 1
ACTIONS = 5
ended = False
win = False
def runLunarLander():
# import lunar lander
from vgdl.examples.continuousphysics.lander import lander_game, lander_level
# build the game
g = VGDLParser().parseGame(lander_game)
g.buildLevel(lander_level)
# TODO: Determine how to not need to bring up the pygame display in order to run the game.
g._initScreen([1, 1])
ship = g.getAvatars()[0]
# store initial ship state
initState = [ship.rect.x, ship.rect.y, ship.speed, ship.orientation]
print "starting position: " + str(ship)
print "starting state: " + str(initState)
# get random actions
actions = generateInput(ACTIONS)
states = [initState]
# move ship based on random actions
print actions
for a in actions:
for i in range(REPEATS):
ship.action = a
updateGame(g, a)
if ended:
print a, i
break
states.append(makeState(ship))
endState = states[len(states)-1]
# confirm final position
print "first final position after actions: " + str(ship)
print "final state: " + str(endState)
# reroll ship back to initial state
setState(ship, initState)
# vary action sequence
# first pick a point to vary
random.seed(10466)
varyIndex = random.randint(0, len(actions) - 1)
# then change that action
oldAction = actions[varyIndex]
actions[varyIndex] = BASEDIRS[random.randint(0, len(BASEDIRS) - 1)]
# print out the change and the full list of actions
print "changed action " + str(varyIndex) + " to " + str(actions[varyIndex])
print "new actions: " + str(actions)
# predict through simple calculation how the final position should be
predictState = predictOutcome(states, actions, oldAction, varyIndex)
print "predicted state " + str(predictState)
# find out where the actual final position is
for a in actions:
for i in range(REPEATS):
updateGame(g, a)
if ended:
print a, i
break
endState = makeState(ship)
print "actual ending position: " + str(ship)
print "ending state: " + str(endState)
# get error
error = [endState[0] - predictState[0], endState[1] - predictState[1]]
print "prediction error: " + str(error)
def predictOutcome(states, actions, oldAction, newActionIndex):
# determine the effective change in action
newAction = actions[newActionIndex]
changedAction = [newAction[0] - oldAction[0], newAction[1] - oldAction[1]]
# try one! adjust position only without compensating for velocity!
# endState = states[len(states)-1]
# finalX = endState[0] + (changedAction[0] * REPEATS)
# finalY = endState[1] + (changedAction[1] * REPEATS)
# didn't work, unsurprisingly
# try two! compensate for velocity!
endState = states[len(states)-1]
stateOfChange = states[newActionIndex]
velocityBeforeChange = [stateOfChange[2] * stateOfChange[3][0], stateOfChange[2] * stateOfChange[3][1]]
velocityCausedByChange = [changedAction[0] * REPEATS, changedAction[1] * REPEATS]
changedVelocity = [velocityBeforeChange[0] + velocityCausedByChange[0],
velocityBeforeChange[1] + velocityCausedByChange[1]]
# compensate for gravity
changedVelocity[1] += gravity * REPEATS
# calculate the ending impact this has on the position after this action
diffX = changedVelocity[0]
diffY = changedVelocity[1]
finalX = endState[0] + diffX
finalY = endState[1] + diffY
return [finalX, finalY]
def makeState(ship):
return [ship.rect.x, ship.rect.y, ship.speed, ship.orientation]
def setState(ship, state):
ship.rect.x = state[0]
ship.rect.y = state[1]
ship.speed = state[2]
ship.orientation = state[3]
def generateInput(totalActions):
random.seed(1234)
actions = []
for i in range(totalActions):
actions.append(BASEDIRS[random.randint(0, len(BASEDIRS) - 1)])
return actions
def setKeystate(game, action):
from pygame.locals import K_LEFT, K_RIGHT, K_UP, K_DOWN
# an incredible kludge that makes me somewhat ashamed
keystate = [False] * 350
if action == RIGHT:
keystate[K_RIGHT] = True
elif action == LEFT:
keystate[K_LEFT] = True
if action == UP:
keystate[K_UP] = True
elif action == DOWN:
keystate[K_DOWN] = True
return keystate
def updateGame(game, action):
game.keystate = setKeystate(game, action)
setKeystate(game, action)
# termination criteria
for t in game.terminations:
ended, win = t.isDone(game)
if ended:
break
# update sprites
for s in game:
s.update(game)
# handle collision effects
game._updateCollisionDict()
game._eventHandling()
if __name__ == '__main__':
runLunarLander() | Python | 184 | 26.63587 | 107 | /entitycreator/lunarlandertest.py | 0.644965 | 0.631393 |
yz3007/bigdata | refs/heads/master | with open('result.csv', 'r') as result, open("testdata.csv", 'r') as testdata:
k = 0.0
b = 0.0
result.readline()
testdata.readline()
while 1:
b = b+1
r1 = result.readline()
if r1 == '':
break
else:
r = r1.strip().split(" ")
t = testdata.readline().strip().split(",")
print t[23],r[1:],t[23] in r[1:]
if t[23] in r[1:]:
k = k+1
print "Accuracy:",k/b
| Python | 17 | 27.411764 | 78 | /accuracy.py | 0.434783 | 0.395445 |
yz3007/bigdata | refs/heads/master | #usr/bin/python2.7
from pyspark import SparkContext
from pyspark import SparkConf
from operator import add
conf = SparkConf().setAppName("expedia_hotel")
sc = SparkContext(conf=conf)
arr = sc.textFile("./traindata.csv")
print arr.take(2)
arr = arr.map(lambda x:x.split(","))
def get_best_hotels_od_ulc(arr):
if arr[5] != '' and arr[6] != '':
return ((arr[5], arr[6],arr[23]),1)
else:
return ((arr[5], arr[6],arr[23]),0)
def get_best_hotels_search_dest(arr):
if arr[16] != '' and arr[21] != '' and arr[22] != '' and int(arr[0][:4]) == 2014:
return ((arr[16], arr[21], arr[22], arr[23]), int(arr[18]) * 17 + 3)
else:
return ((arr[16], arr[21], arr[22], arr[23]), 0)
def get_best_hotels_search_dest1(arr):
if arr[16] != '':
return ((arr[16], arr[23]) ,int(arr[18]) * 17 + 3)
else:
return ((arr[16], arr[23]), 0)
def get_best_hotel_country(arr):
if arr[21] != '':
return ((arr[21], arr[23]), 1 + 5 * int(arr[18]))
else:
return ((arr[21], arr[23]), 0)
def get_popular_hotel_cluster(arr):
return (arr[23],1)
best_hotels_od_ulc = arr.map(lambda x:get_best_hotels_od_ulc(x))
best_hotels_od_ulc = best_hotels_od_ulc.foldByKey(0, add).cache()
best_hotels_search_dest = arr.map(lambda x:get_best_hotels_search_dest(x))
best_hotels_search_dest = best_hotels_search_dest.foldByKey(0, add).cache()
best_hotels_search_dest1 = arr.map(lambda x:get_best_hotels_search_dest1(x))
best_hotels_search_dest1 = best_hotels_search_dest1.foldByKey(0, add).cache()
best_hotel_country = arr.map(lambda x:get_best_hotel_country(x))
best_hotel_country = best_hotel_country.foldByKey(0, add).cache()
popular_hotel_cluster = arr.map(lambda x:get_popular_hotel_cluster(x))
popular_hotel_cluster = popular_hotel_cluster.foldByKey(0, add).cache()
path = 'result.csv'
out = open(path, "w")
f = open("./testdata.csv", "r")
schema = f.readline()
total = 0
out.write("id,hotel_cluster\n")
topclasters = popular_hotel_cluster.sortBy(lambda x: -x[1]).map(lambda x:x[0]).take(5)
idnumber = 0
while 1:
line = f.readline().strip()
total += 1
if total % 10 == 0:
print('Write {} lines...'.format(total))
if total % 999 == 0:
break
arr = line.split(",")
id = idnumber
idnumber = idnumber + 1
print arr
user_location_city = arr[5]
orig_destination_distance = arr[6]
srch_destination_id = arr[16]
hotel_country = arr[21]
hotel_market = arr[22]
out.write(str(id) + ',')
filled = []
Topitems = best_hotels_od_ulc.filter(lambda x:(x[0][0] == user_location_city)&(x[0][1] == orig_destination_distance))
Topitems = Topitems.sortBy(lambda x: -x[1]).map(lambda x:x[0][2])
topitems = Topitems.take(5)
for i in range(len(topitems)):
if topitems[i] in filled:
continue
if len(filled) == 5:
break
out.write(' ' + topitems[i])
filled.append(topitems[i])
if len(filled) < 5:
Topitems = best_hotels_search_dest.filter(lambda x: (x[0][0] == srch_destination_id) & (x[0][1] == hotel_country) & (x[0][2] == hotel_market))
Topitems = Topitems.sortBy(lambda x: -x[1]).map(lambda x: x[0][3])
topitems = Topitems.take(5)
for i in range(len(topitems)):
if topitems[i] in filled:
continue
if len(filled) == 5:
break
out.write(' ' + topitems[i])
filled.append(topitems[i])
if len(filled) < 5:
if len(topitems) != 0:
Topitems = best_hotels_search_dest1.filter(lambda x: (x[0][0] == srch_destination_id))
Topitems = Topitems.sortBy(lambda x: -x[1]).map(lambda x: x[0][1])
topitems = Topitems.take(5)
for i in range(len(topitems)):
if topitems[i] in filled:
continue
if len(filled) == 5:
break
out.write(' ' + topitems[i])
filled.append(topitems[i])
if len(filled) < 5:
Topitems = best_hotel_country.filter(lambda x: (x[0][0] == hotel_country))
Topitems = Topitems.sortBy(lambda x: -x[1]).map(lambda x: x[0][1])
for i in range(len(topitems)):
if topitems[i] in filled:
continue
if len(filled) == 5:
break
out.write(' ' + topitems[i])
filled.append(topitems[i])
if len(filled) < 5:
for i in range(5):
if topclasters[i] in filled:
continue
if len(filled) == 5:
break
out.write(' ' + topclasters[i])
filled.append(topclasters[i])
out.write("\n")
out.close()
print('Completed!') | Python | 149 | 30.95302 | 150 | /recommend.py | 0.569118 | 0.536134 |
TrackingBird/pfm2png | refs/heads/master | import os
import sys
import re
import numpy as np
from PIL import Image
from matplotlib import pyplot as plt
import cv2
'''
Load a PFM file into a Numpy array. Note that it will have
a shape of H x W, not W x H. Returns a tuple containing the
loaded image and the scale factor from the file.
'''
def load_pfm(file):
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip().decode('UTF-8')
#print(header)
if header == 'PF':
color = True
elif header == 'Pf':
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline().decode('UTF-8'))
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().decode('UTF-8').rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
return np.reshape(data, shape), scale
if len(sys.argv)==1:
print('Usage: {} scale [files]'.format('pfmToPng'))
sys.exit()
scale_factor = int(sys.argv[1])
temp = sys.argv[2:]
files = []
for f in temp:
if os.path.exists(f):
files.append(f)
else:
print('Skipping {}, file not found'.format(f))
for i,f in enumerate(files):
with open(f,'rb') as f_in:
disp, scale = load_pfm(f_in)
disp[np.where(disp>0)]=0
disp = scale_factor * np.flipud(disp)
disp = disp.astype(np.uint16)
cv2.imwrite(f.replace('.pfm','.png'),disp)
print('{}/{}'.format(i,len(files)),end='\r')
# pippo = Image.open(path)
# plt.imshow(pippo)
# plt.show()
os.remove(f)
print('DONE!')
| Python | 74 | 24.648649 | 78 | /pfm2png.py | 0.581138 | 0.573762 |
briis/unifiprotect | refs/heads/master | """UniFi Protect Platform."""
from __future__ import annotations
import asyncio
from datetime import timedelta
import logging
from aiohttp import CookieJar
from aiohttp.client_exceptions import ServerDisconnectedError
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
CONF_VERIFY_SSL,
EVENT_HOMEASSISTANT_STOP,
Platform,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryAuthFailed, ConfigEntryNotReady
from homeassistant.helpers import entity_registry as er
from homeassistant.helpers.aiohttp_client import async_create_clientsession
from pyunifiprotect import NotAuthorized, NvrError, ProtectApiClient
from pyunifiprotect.data import ModelType
from .const import (
CONF_ALL_UPDATES,
CONF_DOORBELL_TEXT,
CONF_OVERRIDE_CHOST,
CONFIG_OPTIONS,
DEFAULT_SCAN_INTERVAL,
DEVICES_FOR_SUBSCRIBE,
DEVICES_THAT_ADOPT,
DOMAIN,
MIN_REQUIRED_PROTECT_V,
OUTDATED_LOG_MESSAGE,
PLATFORMS,
)
from .data import ProtectData
from .services import async_cleanup_services, async_setup_services
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=DEFAULT_SCAN_INTERVAL)
@callback
async def _async_migrate_data(
hass: HomeAssistant, entry: ConfigEntry, protect: ProtectApiClient
) -> None:
# already up to date, skip
if CONF_ALL_UPDATES in entry.options:
return
_LOGGER.info("Starting entity migration...")
# migrate entry
options = dict(entry.options)
data = dict(entry.data)
options[CONF_ALL_UPDATES] = False
if CONF_DOORBELL_TEXT in options:
del options[CONF_DOORBELL_TEXT]
hass.config_entries.async_update_entry(entry, data=data, options=options)
# migrate entities
registry = er.async_get(hass)
mac_to_id: dict[str, str] = {}
mac_to_channel_id: dict[str, str] = {}
bootstrap = await protect.get_bootstrap()
for model in DEVICES_THAT_ADOPT:
attr = model.value + "s"
for device in getattr(bootstrap, attr).values():
mac_to_id[device.mac] = device.id
if model != ModelType.CAMERA:
continue
for channel in device.channels:
channel_id = str(channel.id)
if channel.is_rtsp_enabled:
break
mac_to_channel_id[device.mac] = channel_id
count = 0
entities = er.async_entries_for_config_entry(registry, entry.entry_id)
for entity in entities:
new_unique_id: str | None = None
if entity.domain != Platform.CAMERA.value:
parts = entity.unique_id.split("_")
if len(parts) >= 2:
device_or_key = "_".join(parts[:-1])
mac = parts[-1]
device_id = mac_to_id[mac]
if device_or_key == device_id:
new_unique_id = device_id
else:
new_unique_id = f"{device_id}_{device_or_key}"
else:
parts = entity.unique_id.split("_")
if len(parts) == 2:
mac = parts[1]
device_id = mac_to_id[mac]
channel_id = mac_to_channel_id[mac]
new_unique_id = f"{device_id}_{channel_id}"
else:
device_id = parts[0]
channel_id = parts[2]
extra = "" if len(parts) == 3 else "_insecure"
new_unique_id = f"{device_id}_{channel_id}{extra}"
if new_unique_id is None:
continue
_LOGGER.debug(
"Migrating entity %s (old unique_id: %s, new unique_id: %s)",
entity.entity_id,
entity.unique_id,
new_unique_id,
)
try:
registry.async_update_entity(entity.entity_id, new_unique_id=new_unique_id)
except ValueError:
_LOGGER.warning(
"Could not migrate entity %s (old unique_id: %s, new unique_id: %s)",
entity.entity_id,
entity.unique_id,
new_unique_id,
)
else:
count += 1
_LOGGER.info("Migrated %s entities", count)
if count != len(entities):
_LOGGER.warning("%s entities not migrated", len(entities) - count)
@callback
def _async_import_options_from_data_if_missing(
hass: HomeAssistant, entry: ConfigEntry
) -> None:
options = dict(entry.options)
data = dict(entry.data)
modified = False
for importable_option in CONFIG_OPTIONS:
if importable_option not in entry.options and importable_option in entry.data:
options[importable_option] = entry.data[importable_option]
del data[importable_option]
modified = True
if modified:
hass.config_entries.async_update_entry(entry, data=data, options=options)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up the UniFi Protect config entries."""
_async_import_options_from_data_if_missing(hass, entry)
session = async_create_clientsession(hass, cookie_jar=CookieJar(unsafe=True))
protect = ProtectApiClient(
host=entry.data[CONF_HOST],
port=entry.data[CONF_PORT],
username=entry.data[CONF_USERNAME],
password=entry.data[CONF_PASSWORD],
verify_ssl=entry.data[CONF_VERIFY_SSL],
session=session,
subscribed_models=DEVICES_FOR_SUBSCRIBE,
override_connection_host=entry.options.get(CONF_OVERRIDE_CHOST, False),
ignore_stats=not entry.options.get(CONF_ALL_UPDATES, False),
)
_LOGGER.debug("Connect to UniFi Protect")
data_service = ProtectData(hass, protect, SCAN_INTERVAL, entry)
try:
nvr_info = await protect.get_nvr()
except NotAuthorized as err:
raise ConfigEntryAuthFailed(err) from err
except (asyncio.TimeoutError, NvrError, ServerDisconnectedError) as err:
raise ConfigEntryNotReady from err
if nvr_info.version < MIN_REQUIRED_PROTECT_V:
_LOGGER.error(
OUTDATED_LOG_MESSAGE,
nvr_info.version,
MIN_REQUIRED_PROTECT_V,
)
return False
await _async_migrate_data(hass, entry, protect)
if entry.unique_id is None:
hass.config_entries.async_update_entry(entry, unique_id=nvr_info.mac)
await data_service.async_setup()
if not data_service.last_update_success:
raise ConfigEntryNotReady
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = data_service
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
async_setup_services(hass)
entry.async_on_unload(entry.add_update_listener(_async_options_updated))
entry.async_on_unload(
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, data_service.async_stop)
)
return True
async def _async_options_updated(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Update options."""
await hass.config_entries.async_reload(entry.entry_id)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload UniFi Protect config entry."""
if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS):
data: ProtectData = hass.data[DOMAIN][entry.entry_id]
await data.async_stop()
hass.data[DOMAIN].pop(entry.entry_id)
async_cleanup_services(hass)
return bool(unload_ok)
async def async_migrate_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Migrate old entry."""
_LOGGER.debug("Migrating from version %s", config_entry.version)
if config_entry.version == 1:
new = {**config_entry.data}
# keep verify SSL false for anyone migrating to maintain backwards compatibility
new[CONF_VERIFY_SSL] = False
if CONF_DOORBELL_TEXT in new:
del new[CONF_DOORBELL_TEXT]
config_entry.version = 2
hass.config_entries.async_update_entry(config_entry, data=new)
_LOGGER.info("Migration to version %s successful", config_entry.version)
return True
| Python | 241 | 32.875519 | 88 | /custom_components/unifiprotect/__init__.py | 0.640495 | 0.639025 |
briis/unifiprotect | refs/heads/master | """This component provides binary sensors for UniFi Protect."""
from __future__ import annotations
from copy import copy
from dataclasses import dataclass
import logging
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
BinarySensorEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_LAST_TRIP_TIME, ATTR_MODEL
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from pyunifiprotect.data import NVR, Camera, Event, Light, MountType, Sensor
from .const import DOMAIN
from .data import ProtectData
from .entity import (
EventThumbnailMixin,
ProtectDeviceEntity,
ProtectNVREntity,
async_all_device_entities,
)
from .models import ProtectRequiredKeysMixin
from .utils import get_nested_attr
_LOGGER = logging.getLogger(__name__)
_KEY_DOOR = "door"
@dataclass
class ProtectBinaryEntityDescription(
ProtectRequiredKeysMixin, BinarySensorEntityDescription
):
"""Describes UniFi Protect Binary Sensor entity."""
ufp_last_trip_value: str | None = None
MOUNT_DEVICE_CLASS_MAP = {
MountType.GARAGE: BinarySensorDeviceClass.GARAGE_DOOR,
MountType.WINDOW: BinarySensorDeviceClass.WINDOW,
MountType.DOOR: BinarySensorDeviceClass.DOOR,
}
CAMERA_SENSORS: tuple[ProtectBinaryEntityDescription, ...] = (
ProtectBinaryEntityDescription(
key="doorbell",
name="Doorbell",
device_class=BinarySensorDeviceClass.OCCUPANCY,
icon="mdi:doorbell-video",
ufp_required_field="feature_flags.has_chime",
ufp_value="is_ringing",
ufp_last_trip_value="last_ring",
),
ProtectBinaryEntityDescription(
key="dark",
name="Is Dark",
icon="mdi:brightness-6",
ufp_value="is_dark",
),
)
LIGHT_SENSORS: tuple[ProtectBinaryEntityDescription, ...] = (
ProtectBinaryEntityDescription(
key="dark",
name="Is Dark",
icon="mdi:brightness-6",
ufp_value="is_dark",
),
ProtectBinaryEntityDescription(
key="motion",
name="Motion Detected",
device_class=BinarySensorDeviceClass.MOTION,
ufp_value="is_pir_motion_detected",
ufp_last_trip_value="last_motion",
),
)
SENSE_SENSORS: tuple[ProtectBinaryEntityDescription, ...] = (
ProtectBinaryEntityDescription(
key=_KEY_DOOR,
name="Contact",
device_class=BinarySensorDeviceClass.DOOR,
ufp_value="is_opened",
ufp_last_trip_value="open_status_changed_at",
ufp_enabled="is_contact_sensor_enabled",
),
ProtectBinaryEntityDescription(
key="battery_low",
name="Battery low",
device_class=BinarySensorDeviceClass.BATTERY,
entity_category=EntityCategory.DIAGNOSTIC,
ufp_value="battery_status.is_low",
),
ProtectBinaryEntityDescription(
key="motion",
name="Motion Detected",
device_class=BinarySensorDeviceClass.MOTION,
ufp_value="is_motion_detected",
ufp_last_trip_value="motion_detected_at",
ufp_enabled="is_motion_sensor_enabled",
),
ProtectBinaryEntityDescription(
key="tampering",
name="Tampering Detected",
device_class=BinarySensorDeviceClass.TAMPER,
ufp_value="is_tampering_detected",
ufp_last_trip_value="tampering_detected_at",
),
)
MOTION_SENSORS: tuple[ProtectBinaryEntityDescription, ...] = (
ProtectBinaryEntityDescription(
key="motion",
name="Motion",
device_class=BinarySensorDeviceClass.MOTION,
ufp_value="is_motion_detected",
ufp_last_trip_value="last_motion",
),
)
DISK_SENSORS: tuple[ProtectBinaryEntityDescription, ...] = (
ProtectBinaryEntityDescription(
key="disk_health",
name="Disk {index} Health",
device_class=BinarySensorDeviceClass.PROBLEM,
entity_category=EntityCategory.DIAGNOSTIC,
),
)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up binary sensors for UniFi Protect integration."""
data: ProtectData = hass.data[DOMAIN][entry.entry_id]
entities: list[ProtectDeviceEntity] = async_all_device_entities(
data,
ProtectDeviceBinarySensor,
camera_descs=CAMERA_SENSORS,
light_descs=LIGHT_SENSORS,
sense_descs=SENSE_SENSORS,
)
entities += _async_motion_entities(data)
entities += _async_nvr_entities(data)
async_add_entities(entities)
@callback
def _async_motion_entities(
data: ProtectData,
) -> list[ProtectDeviceEntity]:
entities: list[ProtectDeviceEntity] = []
for device in data.api.bootstrap.cameras.values():
for description in MOTION_SENSORS:
entities.append(ProtectEventBinarySensor(data, device, description))
_LOGGER.debug(
"Adding binary sensor entity %s for %s",
description.name,
device.name,
)
return entities
@callback
def _async_nvr_entities(
data: ProtectData,
) -> list[ProtectDeviceEntity]:
entities: list[ProtectDeviceEntity] = []
device = data.api.bootstrap.nvr
for index, _ in enumerate(device.system_info.storage.devices):
for description in DISK_SENSORS:
entities.append(
ProtectDiskBinarySensor(data, device, description, index=index)
)
_LOGGER.debug(
"Adding binary sensor entity %s",
(description.name or "{index}").format(index=index),
)
return entities
class ProtectDeviceBinarySensor(ProtectDeviceEntity, BinarySensorEntity):
"""A UniFi Protect Device Binary Sensor."""
device: Camera | Light | Sensor
entity_description: ProtectBinaryEntityDescription
@callback
def _async_update_device_from_protect(self) -> None:
super()._async_update_device_from_protect()
if self.entity_description.key == "doorbell":
new_value = self.entity_description.get_ufp_value(self.device)
if new_value != self.is_on:
_LOGGER.debug(
"Changing doorbell sensor from %s to %s", self.is_on, new_value
)
self._attr_is_on = self.entity_description.get_ufp_value(self.device)
if self.entity_description.ufp_last_trip_value is not None:
last_trip = get_nested_attr(
self.device, self.entity_description.ufp_last_trip_value
)
attrs = self.extra_state_attributes or {}
self._attr_extra_state_attributes = {
**attrs,
ATTR_LAST_TRIP_TIME: last_trip,
}
# UP Sense can be any of the 3 contact sensor device classes
if self.entity_description.key == _KEY_DOOR and isinstance(self.device, Sensor):
self.entity_description.device_class = MOUNT_DEVICE_CLASS_MAP.get(
self.device.mount_type, BinarySensorDeviceClass.DOOR
)
class ProtectDiskBinarySensor(ProtectNVREntity, BinarySensorEntity):
"""A UniFi Protect NVR Disk Binary Sensor."""
entity_description: ProtectBinaryEntityDescription
def __init__(
self,
data: ProtectData,
device: NVR,
description: ProtectBinaryEntityDescription,
index: int,
) -> None:
"""Initialize the Binary Sensor."""
description = copy(description)
description.key = f"{description.key}_{index}"
description.name = (description.name or "{index}").format(index=index)
self._index = index
super().__init__(data, device, description)
@callback
def _async_update_device_from_protect(self) -> None:
super()._async_update_device_from_protect()
disks = self.device.system_info.storage.devices
disk_available = len(disks) > self._index
self._attr_available = self._attr_available and disk_available
if disk_available:
disk = disks[self._index]
self._attr_is_on = not disk.healthy
self._attr_extra_state_attributes = {ATTR_MODEL: disk.model}
class ProtectEventBinarySensor(EventThumbnailMixin, ProtectDeviceBinarySensor):
"""A UniFi Protect Device Binary Sensor with access tokens."""
device: Camera
@callback
def _async_get_event(self) -> Event | None:
"""Get event from Protect device."""
event: Event | None = None
if self.device.is_motion_detected and self.device.last_motion_event is not None:
event = self.device.last_motion_event
return event
| Python | 275 | 31.072727 | 88 | /custom_components/unifiprotect/binary_sensor.py | 0.658844 | 0.658503 |
briis/unifiprotect | refs/heads/master | """UniFi Protect Integration services."""
from __future__ import annotations
import asyncio
import functools
from typing import Any
from homeassistant.config_entries import ConfigEntryState
from homeassistant.const import ATTR_DEVICE_ID
from homeassistant.core import HomeAssistant, ServiceCall, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv, device_registry as dr
from homeassistant.helpers.service import async_extract_referenced_entity_ids
from pydantic import ValidationError
from pyunifiprotect.api import ProtectApiClient
from pyunifiprotect.exceptions import BadRequest
import voluptuous as vol
from .const import ATTR_MESSAGE, DOMAIN
from .data import ProtectData
SERVICE_ADD_DOORBELL_TEXT = "add_doorbell_text"
SERVICE_REMOVE_DOORBELL_TEXT = "remove_doorbell_text"
SERVICE_SET_DEFAULT_DOORBELL_TEXT = "set_default_doorbell_text"
ALL_GLOBAL_SERIVCES = [
SERVICE_ADD_DOORBELL_TEXT,
SERVICE_REMOVE_DOORBELL_TEXT,
SERVICE_SET_DEFAULT_DOORBELL_TEXT,
]
DOORBELL_TEXT_SCHEMA = vol.All(
vol.Schema(
{
**cv.ENTITY_SERVICE_FIELDS,
vol.Required(ATTR_MESSAGE): cv.string,
},
),
cv.has_at_least_one_key(ATTR_DEVICE_ID),
)
def _async_all_ufp_instances(hass: HomeAssistant) -> list[ProtectApiClient]:
"""All active UFP instances."""
return [
data.api for data in hass.data[DOMAIN].values() if isinstance(data, ProtectData)
]
@callback
def _async_unifi_mac_from_hass(mac: str) -> str:
# MAC addresses in UFP are always caps
return mac.replace(":", "").upper()
@callback
def _async_get_macs_for_device(device_entry: dr.DeviceEntry) -> list[str]:
return [
_async_unifi_mac_from_hass(cval)
for ctype, cval in device_entry.connections
if ctype == dr.CONNECTION_NETWORK_MAC
]
@callback
def _async_get_ufp_instances(
hass: HomeAssistant, device_id: str
) -> tuple[dr.DeviceEntry, ProtectApiClient]:
device_registry = dr.async_get(hass)
if not (device_entry := device_registry.async_get(device_id)):
raise HomeAssistantError(f"No device found for device id: {device_id}")
if device_entry.via_device_id is not None:
return _async_get_ufp_instances(hass, device_entry.via_device_id)
macs = _async_get_macs_for_device(device_entry)
ufp_instances = [
i for i in _async_all_ufp_instances(hass) if i.bootstrap.nvr.mac in macs
]
if not ufp_instances:
# should not be possible unless user manually enters a bad device ID
raise HomeAssistantError( # pragma: no cover
f"No UniFi Protect NVR found for device ID: {device_id}"
)
return device_entry, ufp_instances[0]
@callback
def _async_get_protect_from_call(
hass: HomeAssistant, call: ServiceCall
) -> list[tuple[dr.DeviceEntry, ProtectApiClient]]:
referenced = async_extract_referenced_entity_ids(hass, call)
instances: list[tuple[dr.DeviceEntry, ProtectApiClient]] = []
for device_id in referenced.referenced_devices:
instances.append(_async_get_ufp_instances(hass, device_id))
return instances
async def _async_call_nvr(
instances: list[tuple[dr.DeviceEntry, ProtectApiClient]],
method: str,
*args: Any,
**kwargs: Any,
) -> None:
try:
await asyncio.gather(
*(getattr(i.bootstrap.nvr, method)(*args, **kwargs) for _, i in instances)
)
except (BadRequest, ValidationError) as err:
raise HomeAssistantError(str(err)) from err
async def add_doorbell_text(hass: HomeAssistant, call: ServiceCall) -> None:
"""Add a custom doorbell text message."""
message: str = call.data[ATTR_MESSAGE]
instances = _async_get_protect_from_call(hass, call)
await _async_call_nvr(instances, "add_custom_doorbell_message", message)
async def remove_doorbell_text(hass: HomeAssistant, call: ServiceCall) -> None:
"""Remove a custom doorbell text message."""
message: str = call.data[ATTR_MESSAGE]
instances = _async_get_protect_from_call(hass, call)
await _async_call_nvr(instances, "remove_custom_doorbell_message", message)
async def set_default_doorbell_text(hass: HomeAssistant, call: ServiceCall) -> None:
"""Set the default doorbell text message."""
message: str = call.data[ATTR_MESSAGE]
instances = _async_get_protect_from_call(hass, call)
await _async_call_nvr(instances, "set_default_doorbell_message", message)
def async_setup_services(hass: HomeAssistant) -> None:
"""Set up the global UniFi Protect services."""
services = [
(
SERVICE_ADD_DOORBELL_TEXT,
functools.partial(add_doorbell_text, hass),
DOORBELL_TEXT_SCHEMA,
),
(
SERVICE_REMOVE_DOORBELL_TEXT,
functools.partial(remove_doorbell_text, hass),
DOORBELL_TEXT_SCHEMA,
),
(
SERVICE_SET_DEFAULT_DOORBELL_TEXT,
functools.partial(set_default_doorbell_text, hass),
DOORBELL_TEXT_SCHEMA,
),
]
for name, method, schema in services:
if hass.services.has_service(DOMAIN, name):
continue
hass.services.async_register(DOMAIN, name, method, schema=schema)
def async_cleanup_services(hass: HomeAssistant) -> None:
"""Cleanup global UniFi Protect services (if all config entries unloaded)."""
loaded_entries = [
entry
for entry in hass.config_entries.async_entries(DOMAIN)
if entry.state == ConfigEntryState.LOADED
]
if len(loaded_entries) == 1:
for name in ALL_GLOBAL_SERIVCES:
hass.services.async_remove(DOMAIN, name)
| Python | 172 | 32.180233 | 88 | /custom_components/unifiprotect/services.py | 0.684423 | 0.684072 |
briis/unifiprotect | refs/heads/master | """Support for Ubiquiti's UniFi Protect NVR."""
from __future__ import annotations
import logging
from homeassistant.components.button import ButtonDeviceClass, ButtonEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from pyunifiprotect.data.base import ProtectAdoptableDeviceModel
from .const import DEVICES_THAT_ADOPT, DOMAIN
from .data import ProtectData
from .entity import ProtectDeviceEntity
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Discover devices on a UniFi Protect NVR."""
data: ProtectData = hass.data[DOMAIN][entry.entry_id]
async_add_entities(
[
ProtectButton(
data,
device,
)
for device in data.get_by_types(DEVICES_THAT_ADOPT)
]
)
class ProtectButton(ProtectDeviceEntity, ButtonEntity):
"""A Ubiquiti UniFi Protect Reboot button."""
_attr_entity_registry_enabled_default = False
_attr_device_class = ButtonDeviceClass.RESTART
def __init__(
self,
data: ProtectData,
device: ProtectAdoptableDeviceModel,
) -> None:
"""Initialize an UniFi camera."""
super().__init__(data, device)
self._attr_name = f"{self.device.name} Reboot Device"
async def async_press(self) -> None:
"""Press the button."""
_LOGGER.debug("Rebooting %s with id %s", self.device.model, self.device.id)
await self.device.reboot()
| Python | 57 | 28.456141 | 83 | /custom_components/unifiprotect/button.py | 0.679571 | 0.679571 |
briis/unifiprotect | refs/heads/master | """UniFi Protect Integration utils."""
from __future__ import annotations
from enum import Enum
from typing import Any
def get_nested_attr(obj: Any, attr: str) -> Any:
"""Fetch a nested attribute."""
attrs = attr.split(".")
value = obj
for key in attrs:
if not hasattr(value, key):
return None
value = getattr(value, key)
if isinstance(value, Enum):
value = value.value
return value
| Python | 21 | 20.380953 | 48 | /custom_components/unifiprotect/utils.py | 0.616926 | 0.616926 |
briis/unifiprotect | refs/heads/master | """The unifiprotect integration models."""
from __future__ import annotations
from collections.abc import Callable, Coroutine
from dataclasses import dataclass
import logging
from typing import Any
from homeassistant.helpers.entity import EntityDescription
from pyunifiprotect.data import NVR, ProtectAdoptableDeviceModel
from .utils import get_nested_attr
_LOGGER = logging.getLogger(__name__)
@dataclass
class ProtectRequiredKeysMixin:
"""Mixin for required keys."""
ufp_required_field: str | None = None
ufp_value: str | None = None
ufp_value_fn: Callable[[ProtectAdoptableDeviceModel | NVR], Any] | None = None
ufp_enabled: str | None = None
def get_ufp_value(self, obj: ProtectAdoptableDeviceModel | NVR) -> Any:
"""Return value from UniFi Protect device."""
if self.ufp_value is not None:
return get_nested_attr(obj, self.ufp_value)
if self.ufp_value_fn is not None:
return self.ufp_value_fn(obj)
# reminder for future that one is required
raise RuntimeError( # pragma: no cover
"`ufp_value` or `ufp_value_fn` is required"
)
def get_ufp_enabled(self, obj: ProtectAdoptableDeviceModel | NVR) -> bool:
"""Return value from UniFi Protect device."""
if self.ufp_enabled is not None:
return bool(get_nested_attr(obj, self.ufp_enabled))
return True
@dataclass
class ProtectSetableKeysMixin(ProtectRequiredKeysMixin):
"""Mixin to for settable values."""
ufp_set_method: str | None = None
ufp_set_method_fn: Callable[
[ProtectAdoptableDeviceModel, Any], Coroutine[Any, Any, None]
] | None = None
async def ufp_set(self, obj: ProtectAdoptableDeviceModel, value: Any) -> None:
"""Set value for UniFi Protect device."""
assert isinstance(self, EntityDescription)
_LOGGER.debug("Setting %s to %s for %s", self.name, value, obj.name)
if self.ufp_set_method is not None:
await getattr(obj, self.ufp_set_method)(value)
elif self.ufp_set_method_fn is not None:
await self.ufp_set_method_fn(obj, value)
| Python | 61 | 34.19672 | 82 | /custom_components/unifiprotect/models.py | 0.673498 | 0.673498 |
marina-kantar/Python-for-Everybody | refs/heads/master | import re
name = input ('Enter file name: ')
if len(name) <= 1 : name = 'mbox-short.txt'
y = list()
handle = open(name)
for line in handle :
line = line.rstrip()
y= y+ re.findall('^From: (\S+@\S+)', line)
if len(y) < 1 : continue
print(y)
| Python | 10 | 24.1 | 46 | /mail_regex.py | 0.581673 | 0.573705 |
marina-kantar/Python-for-Everybody | refs/heads/master | import re
name = input('Enter file name: ')
if len(name) <= 1 : name = 'mbox-short.txt'
handle = open(name)
for line in handle:
line = line.rstrip()
if re.search ('From: ', line):
print(line)
| Python | 8 | 25 | 43 | /first_regex.py | 0.605769 | 0.600962 |
marina-kantar/Python-for-Everybody | refs/heads/master | # Write a Python program to display the content of robot.txt for en.wikipedia.org.
import urllib.request, urllib.parse, urllib.error
from bs4 import BeautifulSoup
import ssl
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
html= urllib.request.urlopen('http://en.wikipedia.org/robots.txt', context=ctx).read()
soup =BeautifulSoup(html, 'html.parser')
print(soup)
| Python | 14 | 28.428572 | 86 | /open_text_from_page.py | 0.767554 | 0.765133 |
marina-kantar/Python-for-Everybody | refs/heads/master | import sqlite3
conn = sqlite3.connect('music.sqlite')
cur = conn.cursor ()
cur.execute('DROP TABLE IF EXISTS Tracks')
cur.execute('CREATE TABLE Tracks (title TEXT, plays INTEGER)')
cur.execute('INSERT INTO Tracks (title, plays) VALUES (?, ?)', ('My Way', 15))
cur.execute('INSERT INTO Tracks (title, plays) VALUES (?, ?)', ('Thunderstone', 20))
conn.commit()
print('Tracks:')
cur.execute('SELECT title, plays FROM Tracks')
for row in cur :
print(row)
cur.execute('DELETE FROM Tracks WHERE plays < 19')
conn.commit()
print('New Tracks:')
cur.execute('SELECT title, plays FROM Tracks')
for row in cur :
print(row)
conn.close() | Python | 24 | 25.583334 | 84 | /first_db.py | 0.695447 | 0.682889 |
marina-kantar/Python-for-Everybody | refs/heads/master | import re
name = input ('Enter file name: ')
if len(name) <=1 : name = 'mbox-short.txt'
handle = open(name)
y = list()
for line in handle :
line = line.rstrip()
y = y+ re.findall ('^From .+ ([0-9]+:[0-9]+:[0-9]+)', line)
if len(y) < 1 : continue
print(y)
| Python | 10 | 25.700001 | 63 | /time_regex.py | 0.561798 | 0.531835 |
marina-kantar/Python-for-Everybody | refs/heads/master | import re
name = input('Enter file name: ')
if len(name) <= 1 : name = 'exp.txt'
handle = open(name)
for line in handle :
line = line.rstrip()
y = re.findall('[0-9][0-9][0-9][0-9]* [0-9][0-9][0-9][0-9]* [0-9][0-9][0-9][0-9]*', line)
if len(y) < 1 : continue
print(y) | Python | 9 | 30.444445 | 93 | /phone_regex.py | 0.539007 | 0.446809 |
marina-kantar/Python-for-Everybody | refs/heads/master | import sqlite3
import re
conn = sqlite3.connect('domsql.sqlite')
cur = conn.cursor()
cur.execute('DROP TABLE IF EXISTS Counts')
cur.execute('CREATE TABLE Counts (org TEXT, count INTEGER)')
fname = input('Enter file name: ')
if len(fname)< 1 : fname = 'mbox.txt'
handle = open(fname)
for line in handle :
line = line.rstrip()
dom = re.findall('^From:.+@([a-z.]+)', line)
if len(dom) < 1 : continue
org = dom[0]
cur.execute('SELECT count FROM Counts WHERE org= ?', (org,))
row = cur.fetchone()
if row is None :
cur.execute('INSERT INTO Counts (org, count) VALUES (?, 1)', (org,))
else :
cur.execute('UPDATE Counts SET count = count + 1 WHERE org = ?', (org,))
conn.commit()
# ogranici na 10
sqlstr = 'SELECT org, count FROM Counts ORDER BY count'
for row in cur.execute(sqlstr):
print(str(row[0]), row[1])
conn.commit()
cur.close() | Python | 33 | 26 | 80 | /sql_assignment.py | 0.632584 | 0.620225 |
marina-kantar/Python-for-Everybody | refs/heads/master | #Write a Python program to get the number of datasets currently listed on data.gov.
from bs4 import BeautifulSoup
import requests
source = requests.get('https://www.data.gov/').text
soup = BeautifulSoup(source, 'html.parser')
#print(soup.prettify)
x = soup.small.a.text
#print(x)
l =x.split()
print('Number of datasets currently listed on data.gov is: ', l[0])
| Python | 14 | 25.071428 | 83 | /number_of_datasets.py | 0.714286 | 0.708995 |
marina-kantar/Python-for-Everybody | refs/heads/master | import xml.etree.ElementTree as ET
data = '''
<person>
<name> Chuck </name>
<phone type="init">
+1 73 4465 789
</phone>
<email hide="yes"/>
</person>'''
tree = ET.fromstring(data)
print('Name: ', tree.find('name').text)
print('Atrr: ', tree.find('email').get('hide')) | Python | 14 | 22 | 47 | /xml1.py | 0.535826 | 0.504673 |
marina-kantar/Python-for-Everybody | refs/heads/master | import urllib.request, urllib.parse, urllib.error
from bs4 import BeautifulSoup
import ssl
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
url = input ('Enter - ')
html = urllib.request.urlopen(url, context=ctx).read()
soup = BeautifulSoup(html, 'html.parser')
suma = 0
count =0
# sve linkove
tags = soup ('span')
for tag in tags :
#print(tag.contents[0])
suma = suma + int(tag.contents[0])
count = count + 1
print('Count ', count)
print('Sum', suma)
| Python | 21 | 23.333334 | 54 | /assignment_beaut_soup.py | 0.694716 | 0.682975 |
marina-kantar/Python-for-Everybody | refs/heads/master | # To run this, download the BeautifulSoup zip file
# http://www.py4e.com/code3/bs4.zip
# and unzip it in the same directory as this file
import urllib.request, urllib.parse, urllib.error
from bs4 import BeautifulSoup
import ssl
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
url = input('Enter - ')
count = input ('Enter count: ')
if int(count) < 1 :
print('Error')
count = 7
count = int(count)
position = input('Enter position: ')
if int(position) < 1 :
print('Error')
position = 18
position = int(position)
# Retrieve all of the anchor tags
for i in range(count):
html = urllib.request.urlopen(url, context=ctx).read()
soup = BeautifulSoup(html, 'html.parser')
tags = soup('a')
l = list()
for tag in tags:
x= tag.get('href', None)
l.append(x)
url = l[position-1]
print(tags[position-1].contents[0])
| Python | 39 | 23.179487 | 58 | /assignment2_beautiful_soup.py | 0.669141 | 0.656416 |
marina-kantar/Python-for-Everybody | refs/heads/master | import re
y = list()
zbir = 0
name = input('Enter file name: ')
if len(name) <=1 : name = 'regex_sum_468299.txt'
handle = open(name)
for line in handle :
line = line.rstrip()
y=y+ re.findall('[0-9]+', line)
#print(y)
for i in y :
zbir = zbir + int(i)
print(zbir)
| Python | 13 | 20.153847 | 48 | /assigment-regulare.py | 0.6 | 0.563636 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.