blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
952b3d75732ae11da3e647c86bc4e5258444150e | 242b8f14ed221979a1211c9dfea9750d1dc847b3 | /app/__ini__.py | 6f184298c8bb73efc0785b548027da9dea09d546 | [
"MIT"
] | permissive | Jonathpc/company-flaskapp | f7a221f1409a0596fffa36aab0e99336190b1615 | 1e0e5631e8131f4f4a58d1810fbe5a0e44e2e148 | refs/heads/master | 2022-11-13T05:18:47.195236 | 2020-07-15T02:42:08 | 2020-07-15T02:42:08 | 279,735,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 347 | py | from flask import Flask
app = Flask(__name__)
if app.config["ENV"] == "production":
app.config.from_object("config.ProductionConfig")
elif app.config["ENV"] == "development":
app.config.from_object("config.DevelopmentConfig")
else:
app.config.from_object("config.ProductionConfig")
from app import main
from app import forms
| [
"noreply@github.com"
] | Jonathpc.noreply@github.com |
a673490634f6d5ccbf0aab99ff19ac353dd7567a | c21faf85627b1cfd96494aac73cc40e5f11ebb46 | /results/test_188.py | 0b34ac896ae50c37329c5d2aad8aae65d061d40a | [] | no_license | ekkya/Cyclomatic-Complexity | d02c61e009087e7d51738e60605875741532b878 | 172db2efdd974f5abad964e335552aec974b47cb | refs/heads/master | 2021-08-28T17:13:14.718314 | 2017-12-12T22:04:13 | 2017-12-12T22:04:13 | 112,042,202 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 102,256 | py | """
Author: Ankit Agarwal (ankit167)
Usage: python google.py <keyword>
Description: Script googles the keyword and opens
top 5 (max) search results in separate
tabs in the browser
Version: 1.0
"""
import webbrowser, sys, pyperclip, requests, bs4
def main():
if len(sys.argv) > 1:
keyword = ' '.join(sys.argv[1:])
else:
# if no keyword is entered, the script would search for the keyword
# copied in the clipboard
keyword = pyperclip.paste()
res=requests.get('http://google.com/search?q='+ keyword)
res.raise_for_status()
soup = bs4.BeautifulSoup(res.text)
linkElems = soup.select('.r a')
numOpen = min(5, len(linkElems))
for i in range(numOpen):
webbrowser.open('http://google.com' + linkElems[i].get('href'))
if __name__ == '__main__':
main()"""Get the number of each character in any given text.
Inputs:
A txt file -- You will be asked for an input file. Simply input the name
of the txt file in which you have the desired text.
"""
import pprint
import collections
def main():
file_input = input('File Name: ')
with open(file_input, 'r') as info:
count = collections.Counter(info.read().upper())
value = pprint.pformat(count)
print(value)
if __name__ == "__main__":
main()# Script Name : pscheck.py
# Author : Craig Richards
# Created : 19th December 2011
# Last Modified : 17th June 2013
# Version : 1.1
# Modifications : 1.1 - 17/06/13 - CR - Changed to functions, and check os before running the program
# Description : Process check on Nix boxes, diplsay formatted output from ps command
import commands, os, string
def ps():
program = raw_input("Enter the name of the program to check: ")
try:
#perform a ps command and assign results to a list
output = commands.getoutput("ps -f|grep " + program)
proginfo = string.split(output)
#display results
print "\n\
Full path:\t\t", proginfo[5], "\n\
Owner:\t\t\t", proginfo[0], "\n\
Process ID:\t\t", proginfo[1], "\n\
Parent process ID:\t", proginfo[2], "\n\
Time started:\t\t", proginfo[4]
except:
print "There was a problem with the program."
def main():
if os.name == "posix": # Unix/Linux/MacOS/BSD/etc
ps() # Call the function
elif os.name in ("nt", "dos", "ce"): # if the OS is windows
print "You need to be on Linux or Unix to run this"
if __name__ == '__main__':
main()from bs4 import BeautifulSoup
import datetime
import mechanize
import urllib2
# Create a Browser
b = mechanize.Browser()
# Disable loading robots.txt
b.set_handle_robots(False)
b.addheaders = [('User-agent',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows 98;)')]
# Navigate
b.open('http://cbseresults.nic.in/jee/jee_2015.htm')
# Choose a form
b.select_form(nr=0)
# Fill it out
b['regno'] = '37000304'
currentdate = datetime.date(1997,3,10)
enddate = datetime.date(1998,4,1)
while currentdate <= enddate:
ct=0
#print currentdate
yyyymmdd = currentdate.strftime("%Y/%m/%d")
ddmmyyyy = yyyymmdd[8:] + "/" + yyyymmdd[5:7] + "/" +yyyymmdd[:4]
print(ddmmyyyy)
b.open('http://cbseresults.nic.in/jee/jee_2015.htm')
b.select_form(nr=0)
b['regno'] = '37000304'
b['dob'] = ddmmyyyy
fd = b.submit()
#print(fd.read())
soup = BeautifulSoup(fd.read(),'html.parser')
for writ in soup.find_all('table'):
ct = ct + 1;
#print (ct)
if ct == 6:
print("---fail---")
else:
print("--true--")
break;
currentdate += datetime.timedelta(days=1)
#print fd.read()# Script Name : new_script.py
# Author : Craig Richards
# Created : 20th November 2012
# Last Modified :
# Version : 1.0
# Modifications :
# Description : This will create a new basic template for a new script
import os # Load the library module
import sys # Load the library module
import datetime # Load the library module
text = '''You need to pass an argument for the new script you want to create, followed by the script name. You can use
-python : Python Script
-bash : Bash Script
-ksh : Korn Shell Script
-sql : SQL Script'''
if len(sys.argv) < 3:
print text
sys.exit()
if '-h' in sys.argv or '--h' in sys.argv or '-help' in sys.argv or '--help' in sys.argv:
print text
sys.exit()
else:
if '-python' in sys.argv[1]:
config_file = "python.cfg"
extension = ".py"
elif '-bash' in sys.argv[1]:
config_file = "bash.cfg"
extension = ".bash"
elif '-ksh' in sys.argv[1]:
config_file = "ksh.cfg"
extension = ".ksh"
elif '-sql' in sys.argv[1]:
config_file = "sql.cfg"
extension = ".sql"
else:
print 'Unknown option - ' + text
sys.exit()
confdir = os.getenv("my_config")
scripts = os.getenv("scripts")
dev_dir = "Development"
newfile = sys.argv[2]
output_file = (newfile + extension)
outputdir = os.path.join(scripts,dev_dir)
script = os.path.join(outputdir, output_file)
input_file = os.path.join(confdir,config_file)
old_text = " Script Name : "
new_text = (" Script Name : " + output_file)
if not(os.path.exists(outputdir)):
os.mkdir(outputdir)
newscript = open(script, 'w')
input = open(input_file, 'r')
today = datetime.date.today()
old_date = " Created :"
new_date = (" Created : " + today.strftime("%d %B %Y"))
for line in input:
line = line.replace(old_text, new_text)
line = line.replace(old_date, new_date)
newscript.write(line)
# Script Name : osinfo.py
# Authors : {'geekcomputers': 'Craig Richards', 'dmahugh': 'Doug Mahugh','rutvik1010':'Rutvik Narayana Nadimpally','y12uc231': 'Satyapriya Krishna', 'minto4644':'Mohit Kumar'}
# Created : 5th April 2012
# Last Modified : July 19 2016
# Version : 1.0
# Modification 1 : Changed the profile to list again. Order is important. Everytime we run script we don't want to see different ordering.
# Modification 2 : Fixed the AttributeError checking for all properties. Using hasttr().
# Modification 3 : Removed ': ' from properties inside profile.
# Description : Displays some information about the OS you are running this script on
import platform as pl
profile = [
'architecture',
'linux_distribution',
'mac_ver',
'machine',
'node',
'platform',
'processor',
'python_build',
'python_compiler',
'python_version',
'release',
'system',
'uname',
'version',
]
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
for key in profile:
if hasattr(pl, key):
print(key + bcolors.BOLD + ": " + str(getattr(pl, key)()) + bcolors.ENDC)
# author:zhangshuyx@gmail.com
#!/usr/bin/env python
# -*- coding=utf-8 -*-
import os
# define the result filename
resultfile = 'result.csv'
# the merge func
def merge():
"""merge csv files to one file"""
# use list save the csv files
csvfiles = [f for f in os.listdir('.') if f != resultfile and f.split('.')[1]=='csv']
# open file to write
with open(resultfile,'w') as writefile:
for csvfile in csvfiles:
with open(csvfile) as readfile:
print('File {} readed.'.format(csvfile))
# do the read and write
writefile.write(readfile.read()+'\n')
print('\nFile {} wrote.'.format(resultfile))
# the main program
if __name__ == '__main__':
merge()import mechanize
import re
import urllib2
from random import *
br=mechanize.Browser()
br.addheaders = [('User-Agent','Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.120 Safari/537.36')]
br.set_handle_robots(False)
#For page exploration
page=raw_input('Enter Page No:')
#print type(page)
p=urllib2.Request('https://www.google.co.in/search?q=gate+psu+2017+ext:pdf&start='+page)
ht=br.open(p)
text='<cite\sclass="_Rm">(.+?)</cite>'
patt=re.compile(text)
h=ht.read()
urls=re.findall(patt,h)
int=0
while int<len(urls):
urls[int]=urls[int].replace("<b>","")
urls[int]=urls[int].replace("</b>","")
int=int+1
print urls
for url in urls:
try:
temp=url.split("/")
q=temp[len(temp)-1]
if "http" in url:
r=urllib2.urlopen(url)
else:
r=urllib2.urlopen("http://"+url)
file=open('psu2'+q+'.pdf','wb')
file.write(r.read())
file.close()
print "Done"
except urllib2.URLError as e:
print "Sorry there exists a problem with this URL Please Download this Manually "+str(url)
# Script Name : logs.py
# Author : Craig Richards
# Created : 13th October 2011
# Last Modified : 14 February 2016
# Version : 1.2
#
# Modifications : 1.1 - Added the variable zip_program so you can set it for the zip program on whichever OS, so to run on a different OS just change the locations of these two variables.
# : 1.2 - Tidy up comments and syntax
#
# Description : This script will search for all *.log files in the given directory, zip them using the program you specify and then date stamp them
import os # Load the Library Module
from time import strftime # Load just the strftime Module from Time
logsdir = "c:\puttylogs" # Set the Variable logsdir
zip_program = "zip.exe" # Set the Variable zip_program - 1.1
for files in os.listdir(logsdir): # Find all the files in the directory
if files.endswith(".log"): # Check to ensure the files in the directory end in .log
files1 = files + "." + strftime("%Y-%m-%d") + ".zip" # Create the Variable files1, this is the files in the directory, then we add a suffix with the date and the zip extension
os.chdir(logsdir) # Change directory to the logsdir
os.system(zip_program + " " + files1 +" "+ files) # Zip the logs into dated zip files for each server. - 1.1
os.remove(files) # Remove the original log files"""
Author: Shreyas Daniel (shreydan)
Install: tweepy - "pip install tweepy"
API: Create a twitter app "apps.twitter.com" to get your OAuth requirements.
Version: 1.0
Tweet text and pics directly from the terminal.
"""
import tweepy, os
def getStatus():
lines = []
while True:
line = raw_input()
if line:
lines.append(line)
else:
break
status = '\n'.join(lines)
return status
def tweetthis(type):
if type == "text":
print "Enter your tweet "+user.name
tweet = getStatus()
try:
api.update_status(tweet)
except Exception as e:
print e
return
elif type == "pic":
print "Enter pic path "+user.name
pic = os.path.abspath(raw_input())
print "Enter status "+user.name
title = getStatus()
try:
api.update_with_media(pic, status=title)
except Exception as e:
print e
return
print "\n\nDONE!!"
def initialize():
global api, auth, user
ck = "here" # consumer key
cks = "here" # consumer key SECRET
at = "here" # access token
ats = "here" # access token SECRET
auth = tweepy.OAuthHandler(ck,cks)
auth.set_access_token(at,ats)
api = tweepy.API(auth)
user = api.me()
def main():
doit = int(raw_input("\n1. text\n2. picture\n"))
initialize()
if doit == 1:
tweetthis("text")
elif doit == 2:
tweetthis("pic")
else:
print "OK, Let's try again!"
main()
main()# Script Name : dice.py
# Author : Craig Richards
# Created : 05th February 2017
# Last Modified :
# Version : 1.0
# Modifications :
# Description : This will randomly select two numbers, like throwing dice, you can change the sides of the dice if you wish
import random
class Die(object):
#A dice has a feature of number about how many sides it has when it's established,like 6.
def __init__(self):
self.sides=6
"""because a dice contains at least 4 planes.
So use this method to give it a judgement when you need to change the instance attributes."""
def set_sides(self,sides_change):
if self.sides_change>=4:
self.sides=self.sides_change
print("change sides!")
else:
print("wrong sides!")
def roll(self):
return random.randint(1, self.sides)
d = Die()
d1 = Die()
d.set_sides(4)
d1.set_sides(4)
print (d.roll(), d1.roll())from sys import argv # import argment variable
script, rows, columns = argv #define rows and columns for the table and assign them to the argument variable
def table(rows, columns):
for i in range(1, int(rows) + 1 ): #it's safe to assume that the user would mean 12 rows when they provide 12 as an argument, b'coz 12 will produce 11 rows
print "\t", i,
print "\n\n"
for i in range(1, int(columns) + 1 ):
print i,
for j in range(1, int(rows) + 1 ):
print "\t",i*j,
print "\n\n"
table(rows, columns)import os
import sys
import shutil
Music = ['MP3', 'WAV', 'WMA', 'MKA', 'AAC', 'MID', 'RA', 'RAM', 'RM', 'OGG']
Codes = ['CPP', 'RB', 'PY', 'HTML', 'CSS', 'JS']
Compressed = ['RAR', 'JAR', 'ZIP', 'TAR', 'MAR', 'ISO', 'LZ', '7ZIP', 'TGZ', 'GZ', 'BZ2']
Documents = ['DOC', 'DOCX', 'PPT', 'PPTX', 'PAGES', 'PDF', 'ODT', 'ODP', 'XLSX', 'XLS', 'ODS', 'TXT', 'IN', 'OUT', 'MD']
Images = ['JPG', 'JPEG', 'GIF', 'PNG', 'SVG']
Executables = ['LNK','DEB', 'EXE', 'SH', 'BUNDLE']
Video = ['FLV', 'WMV', 'MOV', 'MP4', 'MPEG', '3GP', 'MKV','AVI']
def getVideo():
return Video
def getMusic():
return Music
def getCodes():
return Codes
def getCompressed():
return Compressed
def getImages():
return Images
def getExe():
return Executables
def getDoc():
return Documents
# taking the location of the Folder to Arrange
try:
arrange_dir = str(sys.argv[1])
except IndexError:
arrange_dir = str(raw_input("Enter the Path of directory: "))
# when we make a folder that already exist then WindowsError happen
# changing directory may give WindowsError
def change(direc):
try:
os.chdir(direc)
#print "path changed"
except WindowsError:
print "Error! Cannot change the Directory"
print "Enter a valid directory!"
direc = str(raw_input("Enter the Path of directory: "))
change(direc)
change(arrange_dir)
# now we will get the list of all the directories in the folder
list_dir = os.listdir(os.getcwd())
#print list_dir
#check_Folder = False # for organising Folders
check_Music = False
check_Video = False
check_Exe = False
check_Code = False
check_Compressed = False
check_Img = False
check_Docs = False
main_names = ['Video','Folders','Images','Documents','Music','Codes','Executables','Compressed']
for name in list_dir:
#print name.split('.')
if len(name.split('.')) == 2:
if name.split('.')[1].upper() in getVideo():
try:
os.mkdir("Video")
print "Video Folder Created"
except WindowsError:
print "Images Folder Exists"
old_dir = arrange_dir + "\\" + name
new_dir = arrange_dir + "\Video"
os.chdir(new_dir)
shutil.move(old_dir, new_dir + "\\" + name)
print os.getcwd()
os.chdir(arrange_dir)
#print "It is a folder"
elif name.split('.')[1].upper() in getImages():
try:
os.mkdir("Images")
print "Images Folder Created"
except WindowsError:
print "Images Folder Exists"
old_dir = arrange_dir + "\\" + name
new_dir = arrange_dir + "\Images"
os.chdir(new_dir)
shutil.move(old_dir, new_dir + "\\" + name)
print os.getcwd()
os.chdir(arrange_dir)
#print "It is a folder"
elif name.split('.')[1].upper() in getMusic():
try:
os.mkdir("Music")
print "Music Folder Created"
except WindowsError:
print "Music Folder Exists"
old_dir = arrange_dir + "\\" + name
new_dir = arrange_dir + "\Music"
os.chdir(new_dir)
shutil.move(old_dir, new_dir + "\\" + name)
print os.getcwd()
os.chdir(arrange_dir)
#print "It is a folder"
elif name.split('.')[1].upper() in getDoc():
try:
os.mkdir("Documents")
print "Documents Folder Created"
except WindowsError:
print "Documents Folder Exists"
old_dir = arrange_dir + "\\" + name
new_dir = arrange_dir + "\Documents"
os.chdir(new_dir)
shutil.move(old_dir, new_dir + "\\" + name)
print os.getcwd()
os.chdir(arrange_dir)
#print "It is a folder"
elif name.split('.')[1].upper() in getCodes():
try:
os.mkdir("Codes")
print "Codes Folder Created"
except WindowsError:
print "Codes Folder Exists"
old_dir = arrange_dir + "\\" + name
new_dir = arrange_dir + "\Codes"
os.chdir(new_dir)
shutil.move(old_dir, new_dir + "\\" + name)
print os.getcwd()
os.chdir(arrange_dir)
#print "It is a folder"
elif name.split('.')[1].upper() in getCompressed():
try:
os.mkdir("Compressed")
print "Compressed Folder Created"
except WindowsError:
print "Compressed Folder Exists"
old_dir = arrange_dir + "\\" + name
new_dir = arrange_dir + "\Compressed"
os.chdir(new_dir)
shutil.move(old_dir, new_dir + "\\" + name)
print os.getcwd()
os.chdir(arrange_dir)
#print "It is a folder"
elif name.split('.')[1].upper() in getExe():
try:
os.mkdir("Executables")
print "Executables Folder Created"
except WindowsError:
print "Executables Folder Exists"
old_dir = arrange_dir + "\\" + name
new_dir = arrange_dir + "\Executables"
os.chdir(new_dir)
shutil.move(old_dir, new_dir + "\\" + name)
print os.getcwd()
os.chdir(arrange_dir)
#print "It is a folder"
else:
if name not in main_names:
try:
os.mkdir("Folders")
print "Folders Folder Created"
except WindowsError:
print "Folders Folder Exists"
old_dir = arrange_dir + "\\" + name
new_dir = arrange_dir + "\Folders"
os.chdir(new_dir)
shutil.move(old_dir, new_dir + "\\" + name)
print os.getcwd()
os.chdir(arrange_dir)
print "Done Arranging Files and Folder in your specified directory""""
Written by: Shreyas Daniel - github.com/shreydan
Written on: 26 April 2017
Description: Download latest XKCD Comic with this program.
NOTE:
if this script is launched from the cloned repo, a new folder is created.
Please move the file to another directory to avoid messing with the folder structure.
"""
import requests
from lxml import html
import urllib.request
import os
def main():
# opens xkcd.com
try:
page = requests.get("https://www.xkcd.com")
except requests.exceptions.RequestException as e:
print (e)
exit()
# parses xkcd.com page
tree = html.fromstring(page.content)
# finds image src url
image_src = tree.xpath(".//*[@id='comic']/img/@src")[0]
image_src = "https:" + str(image_src)
# gets comic name from the image src url
comic_name = image_src.split('/')[-1]
comic_name = comic_name[:-4]
# save location of comic
comic_location = os.getcwd() + '/comics/'
# checks if save location exists else creates
if not os.path.exists(comic_location):
os.makedirs(comic_location)
# creates final comic location including name of the comic
comic_location = comic_location + comic_name
# downloads the comic
urllib.request.urlretrieve(image_src, comic_location)
if __name__ == "__main__":
main()# Script Name : check_for_sqlite_files.py
# Author : Craig Richards
# Created : 07 June 2013
# Last Modified : 14 February 2016
# Version : 1.0.1
# Modifications : 1.0.1 - Remove unecessary line and variable on Line 21
# Description : Scans directories to check if there are any sqlite files in there
from __future__ import print_function
import os
def isSQLite3(filename):
from os.path import isfile, getsize
if not isfile(filename):
return False
if getsize(filename) < 100: # SQLite database file header is 100 bytes
return False
else:
fd = open(filename, 'rb')
Header = fd.read(100)
fd.close()
if Header[0:16] == 'SQLite format 3\000':
return True
else:
return False
log=open('sqlite_audit.txt','w')
for r,d,f in os.walk(r'.'):
for files in f:
if isSQLite3(files):
print(files)
print("[+] '%s' **** is a SQLITE database file **** " % os.path.join(r,files))
log.write("[+] '%s' **** is a SQLITE database file **** " % files+'\n')
else:
log.write("[-] '%s' is NOT a sqlite database file" % os.path.join(r,files)+'\n')
log.write("[-] '%s' is NOT a sqlite database file" % files+'\n')# Script Name : create_dir_if_not_there.py
# Author : Craig Richards
# Created : 09th January 2012
# Last Modified : 22nd October 2015
# Version : 1.0.1
# Modifications : Added exceptions
# : 1.0.1 Tidy up comments and syntax
#
# Description : Checks to see if a directory exists in the users home directory, if not then create it
import os # Import the OS module
MESSAGE = 'The directory already exists.'
TESTDIR = 'testdir'
try:
home = os.path.expanduser("~") # Set the variable home by expanding the user's set home directory
print(home) # Print the location
if not os.path.exists(os.path.join(home, TESTDIR)): # os.path.join() for making a full path safely
os.makedirs(os.path.join(home, TESTDIR)) # If not create the directory, inside their home directory
else:
print(MESSAGE)
except Exception as e:
print(e)
# Script Name : move_files_over_x_days.py
# Author : Craig Richards
# Created : 8th December 2011
# Last Modified :
# Version : 1.0
# Modifications :
# Description : This will move all the files from the src directory that are over 240 days old to the destination directory.
import shutil
import sys
import time
import os
src = 'u:\\test' # Set the source directory
dst = 'c:\\test' # Set the destination directory
now = time.time() # Get the current time
for f in os.listdir(src): # Loop through all the files in the source directory
if os.stat(f).st_mtime < now - 240 * 86400: # Work out how old they are, if they are older than 240 days old
if os.path.isfile(f): # Check it's a file
shutil.move(f, dst) # Move the files
# Script Name : sqlite_table_check.py
# Author : Craig Richards
# Created : 07 June 2013
# Last Modified :
# Version : 1.0
# Modifications :
# Description : Checks the main SQLITE database to ensure all the tables should exist
import sqlite3
import sys
import os
dropbox = os.getenv("dropbox")
config = os.getenv("my_config")
dbfile = ("Databases\jarvis.db")
listfile = ("sqlite_master_table.lst")
master_db = os.path.join(dropbox, dbfile)
config_file = os.path.join(config, listfile)
tablelist = open(config_file,'r');
conn = sqlite3.connect(master_db)
cursor = conn.cursor()
cursor.execute('SELECT SQLITE_VERSION()')
data = cursor.fetchone()
if str(data) == "(u'3.6.21',)":
print ("\nCurrently " + master_db + " is on SQLite version: %s" % data + " - OK -\n")
else:
print ("\nDB On different version than master version - !!!!! \n")
conn.close()
print ("\nCheckling " + master_db + " against " + config_file + "\n")
for table in tablelist.readlines():
conn = sqlite3.connect(master_db)
cursor = conn.cursor()
cursor.execute("select count(*) from sqlite_master where name = ?",(table.strip(), ))
res = cursor.fetchone()
if (res[0]):
print ('[+] Table : ' + table.strip() + ' exists [+]')
else:
print ('[-] Table : ' + table.strip() + ' does not exist [-]')
# Script Name : puttylogs.py
# Author : Craig Richards
# Created : 13th October 2011
# Last Modified : 29th February 2012
# Version : 1.2
# Modifications : 1.1 - Added the variable zip_program so you can set it for the zip program on whichever OS, so to run on a different OS just change the locations of these two variables.
# : 1.2 - 29-02-12 - CR - Added shutil module and added one line to move the zipped up logs to the zipped_logs directory
# Description : Zip up all the logs in the given directory
import os # Load the Library Module
import shutil # Load the Library Module - 1.2
from time import strftime # Load just the strftime Module from Time
logsdir="c:\logs\puttylogs" # Set the Variable logsdir
zipdir="c:\logs\puttylogs\zipped_logs" # Set the Variable zipdir - 1.2
zip_program="zip.exe" # Set the Variable zip_program - 1.1
for files in os.listdir(logsdir): # Find all the files in the directory
if files.endswith(".log"): # Check to ensure the files in the directory end in .log
files1=files+"."+strftime("%Y-%m-%d")+".zip" # Create the Variable files1, this is the files in the directory, then we add a suffix with the date and the zip extension
os.chdir(logsdir) # Change directory to the logsdir
os.system(zip_program + " " + files1 +" "+ files) # Zip the logs into dated zip files for each server. - 1.1
shutil.move(files1, zipdir) # Move the zipped log files to the zipped_logs directory - 1.2
os.remove(files) # Remove the original log files
# Script Name : daily_checks.py
# Author : Craig Richards
# Created : 07th December 2011
# Last Modified : 01st May 2013
# Version : 1.5
#
# Modifications : 1.1 Removed the static lines for the putty sessions, it now reads a file, loops through and makes the connections.
# : 1.2 Added a variable filename=sys.argv[0] , as when you use __file__ it errors when creating an exe with py2exe.
# : 1.3 Changed the server_list.txt file name and moved the file to the config directory.
# : 1.4 Changed some settings due to getting a new pc
# : 1.5 Tidy comments and syntax
#
# Description : This simple script loads everything I need to carry out the daily checks for our systems.
import platform # Load Modules
import os
import subprocess
import sys
from time import strftime # Load just the strftime Module from Time
def clear_screen(): # Function to clear the screen
if os.name == "posix": # Unix/Linux/MacOS/BSD/etc
os.system('clear') # Clear the Screen
elif os.name in ("nt", "dos", "ce"): # DOS/Windows
os.system('CLS') # Clear the Screen
def print_docs(): # Function to print the daily checks automatically
print ("Printing Daily Check Sheets:")
# The command below passes the command line string to open word, open the document, print it then close word down
subprocess.Popen(["C:\\Program Files (x86)\Microsoft Office\Office14\winword.exe", "P:\\\\Documentation\\Daily Docs\\Back office Daily Checks.doc", "/mFilePrintDefault", "/mFileExit"]).communicate()
def putty_sessions(): # Function to load the putty sessions I need
for server in open(conffilename): # Open the file server_list.txt, loop through reading each line - 1.1 -Changed - 1.3 Changed name to use variable conffilename
subprocess.Popen(('putty -load '+server)) # Open the PuTTY sessions - 1.1
def rdp_sessions():
print ("Loading RDP Sessions:")
subprocess.Popen("mstsc eclr.rdp") # Open up a terminal session connection and load the euroclear session
def euroclear_docs():
# The command below opens IE and loads the Euroclear password document
subprocess.Popen('"C:\\Program Files\\Internet Explorer\\iexplore.exe"' '"file://fs1\pub_b\Pub_Admin\Documentation\Settlements_Files\PWD\Eclr.doc"')
# End of the functions
# Start of the Main Program
def main():
filename = sys.argv[0] # Create the variable filename
confdir = os.getenv("my_config") # Set the variable confdir from the OS environment variable - 1.3
conffile = ('daily_checks_servers.conf') # Set the variable conffile - 1.3
conffilename = os.path.join(confdir, conffile) # Set the variable conffilename by joining confdir and conffile together - 1.3
clear_screen() # Call the clear screen function
# The command below prints a little welcome message, as well as the script name, the date and time and where it was run from.
print ("Good Morning " + os.getenv('USERNAME') + ", "+
filename, "ran at", strftime("%Y-%m-%d %H:%M:%S"), "on",platform.node(), "run from",os.getcwd())
print_docs() # Call the print_docs function
putty_sessions() # Call the putty_session function
rdp_sessions() # Call the rdp_sessions function
euroclear_docs() # Call the euroclear_docs function
if __name__ == "__main__":
main()
import serial
import sys
#A serial port-scanner for linux and windows platforms
#Author: Julio César Echeverri Marulanda
#e-mail: julio.em7@gmail.com
#blog: blogdelingeniero1.wordpress.com
#You should have installed the PySerial module to use this method.
#You can install pyserial with the following line: pip install pyserial
def ListAvailablePorts():
#This function return a list containing the string names for Virtual Serial Ports
#availables in the computer (this function works only for Windows & Linux Platforms but you can extend it)
#if there isn't available ports, returns an empty List
AvailablePorts = []
platform = sys.platform
if platform == 'win32':
for i in range(255):
try:
ser = serial.Serial(i,9600)
except serial.serialutil.SerialException:
pass
else:
AvailablePorts.append(ser.portstr)
ser.close()
elif platform == 'linux':
for i in range(0,255):
try:
ser = serial.Serial('/dev/ttyUSB'+str(i))
except serial.serialutil.SerialException:
pass
else:
AvailablePorts.append('/dev/ttyUSB'+str(i))
ser.close()
else:
print '''This method was developed only for linux and windows
the current platform isn't recognised'''
return AvailablePorts
# EXAMPLE OF HOW IT WORKS
# if an Arduino is connected to the computer, the port will be show in the terminal
# print ListAvailablePorts()# Script Name : nslookup_check.py
# Author : Craig Richards
# Created : 5th January 2012
# Last Modified :
# Version : 1.0
# Modifications :
# Description : This very simple script opens the file server_list.txt and the does an nslookup for each one to check the DNS entry
import subprocess # Import the subprocess module
for server in open('server_list.txt'): # Open the file and read each line
subprocess.Popen(('nslookup ' + server)) # Run the nslookup command for each server in the listimport urllib
import json
import sys
import os
accessToken = 'TOKENVALUE' # YOUR ACCESS TOKEN GETS INSERTED HERE
userId = sys.argv[1] #USERID
limit=100
url='https://graph.facebook.com/'+userId+'/posts?access_token='+accessToken +'&limit='+str(limit) #FB Link
data = json.load(urllib.urlopen(url))
id=0
print str(id)
for item in data['data']:
time=item['created_time'][11:19]
date=item['created_time'][5:10]
year=item['created_time'][0:4]
if 'shares' in item:
num_share=item['shares']['count']
else:
num_share=0
if 'likes' in item:
num_like=item['likes']['count']
else:
num_like=0
id+=1
print str(id)+'\t'+ time.encode('utf-8')+'\t'+date.encode('utf-8')+'\t'+year.encode('utf-8')+'\t'+ str(num_share)+'\t'+str(num_like)"""
Written by: Shreyas Daniel - github.com/shreydan
Description: an overview of 'timy' module - pip install timy
A great alternative to Pythons 'timeit' module and easier to use.
"""
import timy # begin by importing timy
@timy.timer(ident = 'listcomp', loops = 1) # timy decorator
def listcomprehension(): # the function whose execution time is calculated.
li = [x for x in range(0,100000,2)]
listcomprehension()
"""
this is how the above works:
timy decorator is created.
any function underneath the timy decorator is the function whose execution time
need to be calculated.
after the function is called. The execution time is printed.
in the timy decorator:
ident: an identity for each timy decorator, handy when using a lot of them
loops: no. of times this function has to be executed
"""
# this can also be accomplished by 'with' statement:
# tracking points in between code can be added
# to track specific instances in the program
def listcreator():
with timy.Timer() as timer:
li = []
for i in range(0,100000,2):
li.append(i)
if i == 50000:
timer.track('reached 50000')
listcreator()
"""
there are many more aspects to 'timy' module.
check it out here: https://github.com/ramonsaraiva/timy
"""'''Simple million word count program.
main idea is Python pairs words
with the number of times
that number appears in the triple quoted string.
Credit to William J. Turkel and Adam Crymble for the word
frequency code used below. I just merged the two ideas.
'''
wordstring = '''SCENE I. Yorkshire. Gaultree Forest.
Enter the ARCHBISHOP OF YORK, MOWBRAY, LORD HASTINGS, and others
ARCHBISHOP OF YORK
What is this forest call'd?
HASTINGS
'Tis Gaultree Forest, an't shall please your grace.
ARCHBISHOP OF YORK
Here stand, my lords; and send discoverers forth
To know the numbers of our enemies.
HASTINGS
We have sent forth already.
ARCHBISHOP OF YORK
'Tis well done.
My friends and brethren in these great affairs,
I must acquaint you that I have received
New-dated letters from Northumberland;
Their cold intent, tenor and substance, thus:
Here doth he wish his person, with such powers
As might hold sortance with his quality,
The which he could not levy; whereupon
He is retired, to ripe his growing fortunes,
To Scotland: and concludes in hearty prayers
That your attempts may overlive the hazard
And fearful melting of their opposite.
MOWBRAY
Thus do the hopes we have in him touch ground
And dash themselves to pieces.
Enter a Messenger
HASTINGS
Now, what news?
Messenger
West of this forest, scarcely off a mile,
In goodly form comes on the enemy;
And, by the ground they hide, I judge their number
Upon or near the rate of thirty thousand.
MOWBRAY
The just proportion that we gave them out
Let us sway on and face them in the field.
ARCHBISHOP OF YORK
What well-appointed leader fronts us here?
Enter WESTMORELAND
MOWBRAY
I think it is my Lord of Westmoreland.
WESTMORELAND
Health and fair greeting from our general,
The prince, Lord John and Duke of Lancaster.
ARCHBISHOP OF YORK
Say on, my Lord of Westmoreland, in peace:
What doth concern your coming?
WESTMORELAND
Then, my lord,
Unto your grace do I in chief address
The substance of my speech. If that rebellion
Came like itself, in base and abject routs,
Led on by bloody youth, guarded with rags,
And countenanced by boys and beggary,
I say, if damn'd commotion so appear'd,
In his true, native and most proper shape,
You, reverend father, and these noble lords
Had not been here, to dress the ugly form
Of base and bloody insurrection
With your fair honours. You, lord archbishop,
Whose see is by a civil peace maintained,
Whose beard the silver hand of peace hath touch'd,
Whose learning and good letters peace hath tutor'd,
Whose white investments figure innocence,
The dove and very blessed spirit of peace,
Wherefore do you so ill translate ourself
Out of the speech of peace that bears such grace,
Into the harsh and boisterous tongue of war;
Turning your books to graves, your ink to blood,
Your pens to lances and your tongue divine
To a trumpet and a point of war?
ARCHBISHOP OF YORK
Wherefore do I this? so the question stands.
Briefly to this end: we are all diseased,
And with our surfeiting and wanton hours
Have brought ourselves into a burning fever,
And we must bleed for it; of which disease
Our late king, Richard, being infected, died.
But, my most noble Lord of Westmoreland,
I take not on me here as a physician,
Nor do I as an enemy to peace
Troop in the throngs of military men;
But rather show awhile like fearful war,
To diet rank minds sick of happiness
And purge the obstructions which begin to stop
Our very veins of life. Hear me more plainly.
I have in equal balance justly weigh'd
What wrongs our arms may do, what wrongs we suffer,
And find our griefs heavier than our offences.
We see which way the stream of time doth run,
And are enforced from our most quiet there
By the rough torrent of occasion;
And have the summary of all our griefs,
When time shall serve, to show in articles;
Which long ere this we offer'd to the king,
And might by no suit gain our audience:
When we are wrong'd and would unfold our griefs,
We are denied access unto his person
Even by those men that most have done us wrong.
The dangers of the days but newly gone,
Whose memory is written on the earth
With yet appearing blood, and the examples
Of every minute's instance, present now,
Hath put us in these ill-beseeming arms,
Not to break peace or any branch of it,
But to establish here a peace indeed,
Concurring both in name and quality.
WESTMORELAND
When ever yet was your appeal denied?
Wherein have you been galled by the king?
What peer hath been suborn'd to grate on you,
That you should seal this lawless bloody book
Of forged rebellion with a seal divine
And consecrate commotion's bitter edge?
ARCHBISHOP OF YORK
My brother general, the commonwealth,
To brother born an household cruelty,
I make my quarrel in particular.
WESTMORELAND
There is no need of any such redress;
Or if there were, it not belongs to you.
MOWBRAY
Why not to him in part, and to us all
That feel the bruises of the days before,
And suffer the condition of these times
To lay a heavy and unequal hand
Upon our honours?
WESTMORELAND
O, my good Lord Mowbray,
Construe the times to their necessities,
And you shall say indeed, it is the time,
And not the king, that doth you injuries.
Yet for your part, it not appears to me
Either from the king or in the present time
That you should have an inch of any ground
To build a grief on: were you not restored
To all the Duke of Norfolk's signories,
Your noble and right well remember'd father's?
MOWBRAY
What thing, in honour, had my father lost,
That need to be revived and breathed in me?
The king that loved him, as the state stood then,
Was force perforce compell'd to banish him:
And then that Harry Bolingbroke and he,
Being mounted and both roused in their seats,
Their neighing coursers daring of the spur,
Their armed staves in charge, their beavers down,
Their eyes of fire sparking through sights of steel
And the loud trumpet blowing them together,
Then, then, when there was nothing could have stay'd
My father from the breast of Bolingbroke,
O when the king did throw his warder down,
His own life hung upon the staff he threw;
Then threw he down himself and all their lives
That by indictment and by dint of sword
Have since miscarried under Bolingbroke.
WESTMORELAND
You speak, Lord Mowbray, now you know not what.
The Earl of Hereford was reputed then
In England the most valiant gentlemen:
Who knows on whom fortune would then have smiled?
But if your father had been victor there,
He ne'er had borne it out of Coventry:
For all the country in a general voice
Cried hate upon him; and all their prayers and love
Were set on Hereford, whom they doted on
And bless'd and graced indeed, more than the king.
But this is mere digression from my purpose.
Here come I from our princely general
To know your griefs; to tell you from his grace
That he will give you audience; and wherein
It shall appear that your demands are just,
You shall enjoy them, every thing set off
That might so much as think you enemies.
MOWBRAY
But he hath forced us to compel this offer;
And it proceeds from policy, not love.
WESTMORELAND
Mowbray, you overween to take it so;
This offer comes from mercy, not from fear:
For, lo! within a ken our army lies,
Upon mine honour, all too confident
To give admittance to a thought of fear.
Our battle is more full of names than yours,
Our men more perfect in the use of arms,
Our armour all as strong, our cause the best;
Then reason will our heart should be as good
Say you not then our offer is compell'd.
MOWBRAY
Well, by my will we shall admit no parley.
WESTMORELAND
That argues but the shame of your offence:
A rotten case abides no handling.
HASTINGS
Hath the Prince John a full commission,
In very ample virtue of his father,
To hear and absolutely to determine
Of what conditions we shall stand upon?
WESTMORELAND
That is intended in the general's name:
I muse you make so slight a question.
ARCHBISHOP OF YORK
Then take, my Lord of Westmoreland, this schedule,
For this contains our general grievances:
Each several article herein redress'd,
All members of our cause, both here and hence,
That are insinew'd to this action,
Acquitted by a true substantial form
And present execution of our wills
To us and to our purposes confined,
We come within our awful banks again
And knit our powers to the arm of peace.
WESTMORELAND
This will I show the general. Please you, lords,
In sight of both our battles we may meet;
And either end in peace, which God so frame!
Or to the place of difference call the swords
Which must decide it.
ARCHBISHOP OF YORK
My lord, we will do so.
Exit WESTMORELAND
MOWBRAY
There is a thing within my bosom tells me
That no conditions of our peace can stand.
HASTINGS
Fear you not that: if we can make our peace
Upon such large terms and so absolute
As our conditions shall consist upon,
Our peace shall stand as firm as rocky mountains.
MOWBRAY
Yea, but our valuation shall be such
That every slight and false-derived cause,
Yea, every idle, nice and wanton reason
Shall to the king taste of this action;
That, were our royal faiths martyrs in love,
We shall be winnow'd with so rough a wind
That even our corn shall seem as light as chaff
And good from bad find no partition.
ARCHBISHOP OF YORK
No, no, my lord. Note this; the king is weary
Of dainty and such picking grievances:
For he hath found to end one doubt by death
Revives two greater in the heirs of life,
And therefore will he wipe his tables clean
And keep no tell-tale to his memory
That may repeat and history his loss
To new remembrance; for full well he knows
He cannot so precisely weed this land
As his misdoubts present occasion:
His foes are so enrooted with his friends
That, plucking to unfix an enemy,
He doth unfasten so and shake a friend:
So that this land, like an offensive wife
That hath enraged him on to offer strokes,
As he is striking, holds his infant up
And hangs resolved correction in the arm
That was uprear'd to execution.
HASTINGS
Besides, the king hath wasted all his rods
On late offenders, that he now doth lack
The very instruments of chastisement:
So that his power, like to a fangless lion,
May offer, but not hold.
ARCHBISHOP OF YORK
'Tis very true:
And therefore be assured, my good lord marshal,
If we do now make our atonement well,
Our peace will, like a broken limb united,
Grow stronger for the breaking.
MOWBRAY
Be it so.
Here is return'd my Lord of Westmoreland.
Re-enter WESTMORELAND
WESTMORELAND
The prince is here at hand: pleaseth your lordship
To meet his grace just distance 'tween our armies.
MOWBRAY
Your grace of York, in God's name then, set forward.
ARCHBISHOP OF YORK
Before, and greet his grace: my lord, we come.
Exeunt'''
wordlist = wordstring.split()
wordfreq = [wordlist.count(w) for w in wordlist]
print("String\n {} \n".format(wordstring))
print("List\n {} \n".format(str(wordlist)))
print("Frequencies\n {} \n".format(str(wordfreq)))
print("Pairs\n {}".format(str(list(zip(wordlist, wordfreq)))))#!/usr/bin/python
import urllib2
import cookielib
from getpass import getpass
import sys
username = raw_input('Enter mobile number:')
passwd = getpass()
message = raw_input('Enter Message:')
#Fill the list with Recipients
x=raw_input('Enter Mobile numbers seperated with comma:')
num=x.split(',')
message = "+".join(message.split(' '))
#Logging into the SMS Site
url = 'http://site24.way2sms.com/Login1.action?'
data = 'username='+username+'&password='+passwd+'&Submit=Sign+in'
#For Cookies:
cj = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
# Adding Header detail:
opener.addheaders = [('User-Agent','Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.120 Safari/537.36')]
try:
usock = opener.open(url, data)
except IOError:
print "Error while logging in."
sys.exit(1)
jession_id = str(cj).split('~')[1].split(' ')[0]
send_sms_url = 'http://site24.way2sms.com/smstoss.action?'
opener.addheaders = [('Referer', 'http://site25.way2sms.com/sendSMS?Token='+jession_id)]
try:
for number in num:
send_sms_data = 'ssaction=ss&Token='+jession_id+'&mobile='+number+'&message='+message+'&msgLen=136'
sms_sent_page = opener.open(send_sms_url,send_sms_data)
except IOError:
print "Error while sending message"
sys.exit(1)
print "SMS has been sent."# Script Name : get_info_remoute_srv.py
# Author : Pavel Sirotkin
# Created : 3th April 2016
# Last Modified : -
# Version : 1.0.0
# Modifications :
# Description : this will get info about remoute server on linux through ssh connection. Connect these servers must be through keys
import subprocess
HOSTS = ('proxy1', 'proxy')
COMMANDS = ('uname -a', 'uptime')
for host in HOSTS:
result = []
for command in COMMANDS:
ssh = subprocess.Popen(["ssh", "%s" % host, command],
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
result.append(ssh.stdout.readlines())
print('--------------- ' + host + ' --------------- ')
for res in result:
if not res:
print(ssh.stderr.readlines())
break
else:
print(res)# Script Name : portscanner.py
# Author : Craig Richards
# Created : 20 May 2013
# Last Modified :
# Version : 1.0
# Modifications :
# Description : Port Scanner, you just pass the host and the ports
import optparse # Import the module
from socket import * # Import the module
from threading import * # Import the module
screenLock = Semaphore(value=1) # Prevent other threads from preceeding
def connScan(tgtHost, tgtPort): # Start of the function
try:
connSkt = socket(AF_INET, SOCK_STREAM) # Open a socket
connSkt.connect((tgtHost, tgtPort))
connSkt.send('')
results=connSkt.recv(100)
screenLock.acquire() # Acquire the lock
print '[+] %d/tcp open'% tgtPort
print '[+] ' + str(results)
except:
screenLock.acquire()
print '[-] %d/tcp closed '% tgtPort
finally:
screenLock.release()
connSkt.close()
def portScan(tgtHost, tgtPorts): # Start of the function
try:
tgtIP = gethostbyname(tgtHost) # Get the IP from the hostname
except:
print "[-] Cannot resolve '%s': Unknown host"%tgtHost
return
try:
tgtName = gethostbyaddr(tgtIP) # Get hostname from IP
print '\n[+] Scan Results for: ' +tgtName[0]
except:
print '\n[+] Scan Results for: ' + tgtIP
setdefaulttimeout(1)
for tgtPort in tgtPorts: # Scan host and ports
t = Thread(target=connScan, args=(tgtHost, int(tgtPort)))
t.start()
def main():
parser = optparse.OptionParser('usage %prog -H'+' <target host> -p <target port>')
parser.add_option('-H', dest='tgtHost', type='string', help='specify target host')
parser.add_option('-p', dest='tgtPort',type='string', help='specify target port[s] seperated by a comma')
(options, args) = parser.parse_args()
tgtHost = options.tgtHost
tgtPorts = str(options.tgtPort).split(',')
if (tgtHost == None) | (tgtPorts[0] == None):
print parser.usage
exit(0)
portScan(tgtHost, tgtPorts)
if __name__ == '__main__':
main()# Script Name : work_connect.py
# Author : Craig Richards
# Created : 11th May 2012
# Last Modified : 31st October 2012
# Version : 1.1
# Modifications : 1.1 - CR - Added some extra code, to check an argument is passed to the script first of all, then check it's a valid input
# Description : This simple script loads everything I need to connect to work etc
import subprocess # Load the Library Module
import sys # Load the Library Module
import os # Load the Library Module
import time # Load the Library Module
dropbox = os.getenv("dropbox") # Set the variable dropbox, by getting the values of the environment setting for dropbox
rdpfile = ("remote\\workpc.rdp") # Set the variable logfile, using the arguments passed to create the logfile
conffilename=os.path.join(dropbox, rdpfile) # Set the variable conffilename by joining confdir and conffile together
remote = (r"c:\windows\system32\mstsc.exe ") # Set the variable remote with the path to mstsc
text = '''You need to pass an argument
-c Followed by login password to connect
-d to disconnect''' # Text to display if there is no argument passed or it's an invalid option - 1.2
if len(sys.argv) < 2: # Check there is at least one option passed to the script - 1.2
print text # If not print the text above - 1.2
sys.exit() # Exit the program - 1.2
if '-h' in sys.argv or '--h' in sys.argv or '-help' in sys.argv or '--help' in sys.argv: # Help Menu if called
print text # Print the text, stored in the text variable - 1.2
sys.exit(0) # Exit the program
else:
if sys.argv[1].lower().startswith('-c'): # If the first argument is -c then
passwd = sys.argv[2] # Set the variable passwd as the second argument passed, in this case my login password
subprocess.Popen((r"c:\Program Files\Checkpoint\Endpoint Connect\trac.exe connect -u username -p "+passwd))
subprocess.Popen((r"c:\geektools\puttycm.exe"))
time.sleep(15) # Sleep for 15 seconds, so the checkpoint software can connect before opening mstsc
subprocess.Popen([remote, conffilename])
elif sys.argv[1].lower().startswith('-d'): # If the first argument is -d then disconnect my checkpoint session.
subprocess.Popen((r"c:\Program Files\Checkpoint\Endpoint Connect\trac.exe disconnect "))
else:
print 'Unknown option - ' + text # If any other option is passed, then print Unknown option and the text from above - 1.2# Script Name : testlines.py
# Author : Craig Richards
# Created : 08th December 2011
# Last Modified :
# Version : 1.0
# Modifications : beven nyamande
# Description : This is a very simple script that opens up a file and writes whatever is set "
def write_to_file(filename, txt):
with open(filename, 'w') as file_object:
s = file_object.write(txt)
if __name__ == '__main__':
write_to_file('test.txt', 'I am beven')
# Script Name : ping_subnet.py
# Author : Craig Richards
# Created : 12th January 2012
# Last Modified :
# Version : 1.0
# Modifications :
# Description : After supplying the first 3 octets it will scan the final range for available addresses
import os # Load the Library Module
import subprocess # Load the Library Module
import sys # Load the Library Module
filename = sys.argv[0] # Sets a variable for the script name
if '-h' in sys.argv or '--h' in sys.argv or '-help' in sys.argv or '--help' in sys.argv: # Help Menu if called
print '''
You need to supply the first octets of the address Usage : ''' + filename + ''' 111.111.111 '''
sys.exit(0)
else:
if (len(sys.argv) < 2): # If no arguments are passed then display the help and instructions on how to run the script
sys.exit (' You need to supply the first octets of the address Usage : ' + filename + ' 111.111.111')
subnet = sys.argv[1] # Set the variable subnet as the three octets you pass it
if os.name == "posix": # Check the os, if it's linux then
myping = "ping -c 2 " # This is the ping command
elif os.name in ("nt", "dos", "ce"): # Check the os, if it's windows then
myping = "ping -n 2 " # This is the ping command
f = open('ping_' + subnet + '.log', 'w') # Open a logfile
for ip in range(2,255): # Set the ip variable for the range of numbers
ret = subprocess.call(myping + str(subnet) + "." + str(ip) ,
shell=True, stdout=f, stderr=subprocess.STDOUT) # Run the command pinging the servers
if ret == 0: # Depending on the response
f.write (subnet + "." + str(ip) + " is alive" + "\n") # Write out that you can receive a reponse
else:
f.write (subnet + "." + str(ip) + " did not respond" + "\n") # Write out you can't reach the box# Script Name : ping_servers.py
# Author : Craig Richards
# Created : 9th May 2012
# Last Modified : 14th May 2012
# Version : 1.1
# Modifications : 1.1 - 14th May 2012 - CR Changed it to use the config directory to store the server files
# Description : This script will, depending on the arguments supplied will ping the servers associated with that application group.
import os # Load the Library Module
import subprocess # Load the Library Module
import sys # Load the Library Module
if '-h' in sys.argv or '--h' in sys.argv or '-help' in sys.argv or '--help' in sys.argv: # Help Menu if called
print '''
You need to supply the application group for the servers you want to ping, i.e.
dms
swaps
Followed by the site i.e.
155
bromley'''
sys.exit(0)
else:
if (len(sys.argv) < 3): # If no arguments are passed,display the help/instructions on how to run the script
sys.exit ('\nYou need to supply the app group. Usage : ' + filename + ' followed by the application group i.e. \n \t dms or \n \t swaps \n then the site i.e. \n \t 155 or \n \t bromley')
appgroup = sys.argv[1] # Set the variable appgroup as the first argument you supply
site = sys.argv[2] # Set the variable site as the second argument you supply
if os.name == "posix": # Check the os, if it's linux then
myping = "ping -c 2 " # This is the ping command
elif os.name in ("nt", "dos", "ce"): # Check the os, if it's windows then
myping = "ping -n 2 " # This is the ping command
if 'dms' in sys.argv: # If the argument passed is dms then
appgroup = 'dms' # Set the variable appgroup to dms
elif 'swaps' in sys.argv: # Else if the argment passed is swaps then
appgroup = 'swaps' # Set the variable appgroup to swaps
if '155' in sys.argv: # If the argument passed is 155 then
site = '155' # Set the variable site to 155
elif 'bromley' in sys.argv: # Else if the argument passed is bromley
site = 'bromley' # Set the variable site to bromley
filename = sys.argv[0] # Sets a variable for the script name
logdir = os.getenv("logs") # Set the variable logdir by getting the OS environment logs
logfile = 'ping_' + appgroup + '_' + site + '.log' # Set the variable logfile, using the arguments passed to create the logfile
logfilename = os.path.join(logdir, logfile) # Set the variable logfilename by joining logdir and logfile together
confdir = os.getenv("my_config") # Set the variable confdir from the OS environment variable - 1.2
conffile = (appgroup + '_servers_' + site + '.txt') # Set the variable conffile - 1.2
conffilename = os.path.join(confdir, conffile) # Set the variable conffilename by joining confdir and conffile together - 1.2
f = open(logfilename, "w") # Open a logfile to write out the output
for server in open(conffilename): # Open the config file and read each line - 1.2
ret = subprocess.call(myping + server, shell=True, stdout=f, stderr=subprocess.STDOUT) # Run the ping command for each server in the list.
if ret == 0: # Depending on the response
f.write (server.strip() + " is alive" + "\n") # Write out that you can receive a reponse
else:
f.write (server.strip() + " did not respond" + "\n") # Write out you can't reach the box
print ("\n\tYou can see the results in the logfile : " + logfilename); # Show the location of the logfile# Script Name : backup_automater_services.py
# Author : Craig Richards
# Created : 24th October 2012
# Last Modified : 13th February 2016
# Version : 1.0.1
# Modifications : 1.0.1 - Tidy up the comments and syntax
# Description : This will go through and backup all my automator services workflows
import datetime # Load the library module
import os # Load the library module
import shutil # Load the library module
today = datetime.date.today() # Get Today's date
todaystr = today.isoformat() # Format it so we can use the format to create the directory
confdir = os.getenv("my_config") # Set the variable by getting the value from the OS setting
dropbox = os.getenv("dropbox") # Set the variable by getting the value from the OS setting
conffile = ('services.conf') # Set the variable as the name of the configuration file
conffilename = os.path.join(confdir, conffile) # Set the variable by combining the path and the file name
sourcedir = os.path.expanduser('~/Library/Services/') # Source directory of where the scripts are located
destdir = os.path.join(dropbox, "My_backups" + "/" +
"Automater_services" + todaystr + "/") # Combine several settings to create
# the destination backup directory
for file_name in open(conffilename): # Walk through the configuration file
fname = file_name.strip() # Strip out the blank lines from the configuration file
if fname: # For the lines that are not blank
sourcefile = os.path.join(sourcedir, fname) # Get the name of the source files to backup
destfile = os.path.join(destdir, fname) # Get the name of the destination file names
shutil.copytree(sourcefile, destfile) # Copy the directories# Script Name : powerup_checks.py
# Author : Craig Richards
# Created : 25th June 2013
# Last Modified :
# Version : 1.0
# Modifications :
# Description : Creates an output file by pulling all the servers for the given site from SQLITE database, then goes through the list pinging the servers to see if they are up on the network
import sys # Load the Library Module
import sqlite3 # Load the Library Module
import os # Load the Library Module
import subprocess # Load the Library Module
from time import strftime # Load just the strftime Module from Time
dropbox=os.getenv("dropbox") # Set the variable, by getting the value of the variable from the OS
config=os.getenv("my_config") # Set the variable, by getting the value of the variable from the OS
dbfile=("Databases/jarvis.db") # Set the variable to the database
master_db=os.path.join(dropbox, dbfile) # Create the variable by linking the path and the file
listfile=("startup_list.txt") # File that will hold the servers
serverfile=os.path.join(config,listfile) # Create the variable by linking the path and the file
outputfile=('server_startup_'+strftime("%Y-%m-%d-%H-%M")+'.log')
# Below is the help text
text = '''
You need to pass an argument, the options the script expects is
-site1 For the Servers relating to site1
-site2 For the Servers located in site2'''
def windows(): # This is the function to run if it detects the OS is windows.
f = open(outputfile, 'a') # Open the logfile
for server in open(serverfile,'r'): # Read the list of servers from the list
#ret = subprocess.call("ping -n 3 %s" % server.strip(), shell=True,stdout=open('NUL', 'w'),stderr=subprocess.STDOUT) # Ping the servers in turn
ret = subprocess.call("ping -n 3 %s" % server.strip(),stdout=open('NUL', 'w'),stderr=subprocess.STDOUT) # Ping the servers in turn
if ret == 0: # Depending on the response
f.write ("%s: is alive" % server.strip().ljust(15) + "\n") # Write out to the logfile is the server is up
else:
f.write ("%s: did not respond" % server.strip().ljust(15) + "\n") # Write to the logfile if the server is down
def linux(): # This is the function to run if it detects the OS is nix.
f = open('server_startup_'+strftime("%Y-%m-%d")+'.log', 'a') # Open the logfile
for server in open(serverfile,'r'): # Read the list of servers from the list
ret = subprocess.call("ping -c 3 %s" % server, shell=True,stdout=open('/dev/null', 'w'),stderr=subprocess.STDOUT) # Ping the servers in turn
if ret == 0: # Depending on the response
f.write ("%s: is alive" % server.strip().ljust(15) + "\n") # Write out to the logfile is the server is up
else:
f.write ("%s: did not respond" % server.strip().ljust(15) + "\n") # Write to the logfile if the server is down
def get_servers(query): # Function to get the servers from the database
conn = sqlite3.connect(master_db) # Connect to the database
cursor = conn.cursor() # Create the cursor
cursor.execute('select hostname from tp_servers where location =?',(query,)) # SQL Statement
print ('\nDisplaying Servers for : ' + query + '\n')
while True: # While there are results
row = cursor.fetchone() # Return the results
if row == None:
break
f = open(serverfile, 'a') # Open the serverfile
f.write("%s\n" % str(row[0])) # Write the server out to the file
print row[0] # Display the server to the screen
f.close() # Close the file
def main(): # Main Function
if os.path.exists(serverfile): # Checks to see if there is an existing server file
os.remove(serverfile) # If so remove it
if len(sys.argv) < 2: # Check there is an argument being passed
print text # Display the help text if there isn't one passed
sys.exit() # Exit the script
if '-h' in sys.argv or '--h' in sys.argv or '-help' in sys.argv or '--help' in sys.argv: # If the ask for help
print text # Display the help text if there isn't one passed
sys.exit(0) # Exit the script after displaying help
else:
if sys.argv[1].lower().startswith('-site1'): # If the argument is site1
query = 'site1' # Set the variable to have the value site
elif sys.argv[1].lower().startswith('-site2'): # Else if the variable is bromley
query = 'site2' # Set the variable to have the value bromley
else:
print '\n[-] Unknown option [-] ' + text # If an unknown option is passed, let the user know
sys.exit(0)
get_servers(query) # Call the get servers funtion, with the value from the argument
if os.name == "posix": # If the OS is linux.
linux() # Call the linux function
elif os.name in ("nt", "dos", "ce"): # If the OS is Windows...
windows() # Call the windows function
print ('\n[+] Check the log file ' + outputfile + ' [+]\n') # Display the name of the log
if __name__ == '__main__':
main() # Call the main function# Script Name : password_cracker.py
# Author : Craig Richards
# Created : 20 May 2013
# Last Modified :
# Version : 1.0
# Modifications :
# Description : Old school password cracker using python
from sys import platform as _platform
# Check the current operating system to import the correct version of crypt
if _platform in ["linux", "linux2", "darwin"]: # darwin is _platform name for Mac OS X
import crypt # Import the module
elif _platform == "win32":
# Windows
try:
import fcrypt # Try importing the fcrypt module
except ImportError:
print 'Please install fcrypt if you are on Windows'
def testPass(cryptPass): # Start the function
salt = cryptPass[0:2]
dictFile = open('dictionary.txt','r') # Open the dictionary file
for word in dictFile.readlines(): # Scan through the file
word = word.strip('\n')
cryptWord = crypt.crypt(word, salt) # Check for password in the file
if (cryptWord == cryptPass):
print "[+] Found Password: "+word+"\n"
return
print "[-] Password Not Found.\n"
return
def main():
passFile = open('passwords.txt') # Open the password file
for line in passFile.readlines(): # Read through the file
if ":" in line:
user = line.split(':')[0]
cryptPass = line.split(':')[1].strip(' ') # Prepare the user name etc
print "[*] Cracking Password For: " + user
testPass(cryptPass) # Call it to crack the users password
if __name__ == "__main__":
main()# Script Name : check_file.py
# Author : Craig Richards
# Created : 20 May 2013
# Last Modified :
# Version : 1.0
# Modifications : with statement added to ensure correct file closure
# Description : Check a file exists and that we can read the file
from __future__ import print_function
import sys # Import the Modules
import os # Import the Modules
# Prints usage if not appropriate length of arguments are provided
def usage():
print('[-] Usage: python check_file.py <filename1> [filename2] ... [filenameN]')
exit(0)
# Readfile Functions which open the file that is passed to the script
def readfile(filename):
with open(filename, 'r') as f: # Ensure file is correctly closed under all circumstances
file = f.read()
print(file)
def main():
if len(sys.argv) >= 2: # Check the arguments passed to the script
filenames = sys.argv[1:]
for filename in filenames: # Iterate for each filename passed in command line argument
if not os.path.isfile(filename): # Check the File exists
print ('[-] ' + filename + ' does not exist.')
filenames.remove(filename) #remove non existing files from filenames list
continue
if not os.access(filename, os.R_OK): # Check you can read the file
print ('[-] ' + filename + ' access denied')
filenames.remove(filename) # remove non readable filenames
continue
else:
usage() # Print usage if not all parameters passed/Checked
# Read the content of each file
for filename in filenames:
print ('[+] Reading from : ' + filename) # Display Message and read the file contents
readfile(filename)
if __name__ == '__main__':
main()
# Script Name : factorial_perm_comp.py
# Author : Ebiwari Williams
# Created : 20th May 2017
# Last Modified :
# Version : 1.0
# Modifications :
# Description : Find Factorial, Permutation and Combination of a Number
def factorial(n):
fact = 1
while(n >= 1 ):
fact = fact * n
n = n - 1
return fact
def permutation(n,r):
return factorial(n)/factorial(n-r)
def combination(n,r):
return permutation(n,r)/factorial(r)
def main():
print('choose between operator 1,2,3')
print('1) Factorial')
print('2) Permutation')
print('3) Combination')
operation = input('\n')
if(operation == '1'):
print('Factorial Computation\n')
while(True):
try:
n = int(input('\n Enter Value for n '))
print('Factorial of {} = {}'.format(n,factorial(n)))
break
except(ValueError):
print('Invalid Value')
continue
elif(operation == '2'):
print('Permutation Computation\n')
while(True):
try:
n = int(input('\n Enter Value for n '))
r = int(input('\n Enter Value for r '))
print('Permutation of {}P{} = {}'.format(n,r,permutation(n,r)))
break
except(ValueError):
print('Invalid Value')
continue
elif(operation == '3'):
print('Combination Computation\n')
while(True):
try:
n = int(input('\n Enter Value for n '))
r = int(input('\n Enter Value for r '))
print('Combination of {}C{} = {}'.format(n,r,combination(n,r)))
break
except(ValueError):
print('Invalid Value')
continue
if __name__ == '__main__':
main()# Script Name : nmap_scan.py
# Author : Craig Richards
# Created : 24th May 2013
# Last Modified :
# Version : 1.0
# Modifications :
# Description : This scans my scripts directory and gives a count of the different types of scripts, you need nmap installed to run this
import nmap # Import the module
import optparse # Import the module
def nmapScan(tgtHost, tgtPort): # Create the function, this fucntion does the scanning
nmScan = nmap.PortScanner()
nmScan.scan(tgtHost, tgtPort)
state = nmScan[tgtHost]['tcp'][int(tgtPort)]['state']
print "[*] " + tgtHost + " tcp/" + tgtPort + " " + state
def main(): # Main Program
parser = optparse.OptionParser('usage%prog ' + '-H <host> -p <port>') # Display options/help if required
parser.add_option('-H', dest='tgtHost', type='string', help='specify host')
parser.add_option('-p', dest='tgtPort', type='string', help='port')
(options, args) = parser.parse_args()
tgtHost = options.tgtHost
tgtPorts = str(options.tgtPort).split(',')
if (tgtHost == None) | (tgtPorts[0] == None):
print parser.usage
exit(0)
for tgtPort in tgtPorts: # Scan the hosts with the ports etc
nmapScan(tgtHost, tgtPort)
if __name__ == '__main__':
main()
# Script Created by Yash Ladha
# Requirements:
# youtube-dl
# aria2c
# 10 Feb 2017
import subprocess
import sys
video_link, threads = sys.argv[1], sys.argv[2]
subprocess.call([
"youtube-dl",
video_link,
"--external-downloader",
"aria2c",
"--external-downloader-args",
"-x"+threads
])import urllib2
try:
urllib2.urlopen("http://google.com", timeout=2)
print ("working connection")
except urllib2.URLError:
print ("No internet connection")# Script Name : sqlite_check.py
# Author : Craig Richards
# Created : 20 May 2013
# Last Modified :
# Version : 1.0
# Modifications :
# Description : Runs checks to check my SQLITE database
import sqlite3 as lite
import sys
import os
dropbox= os.getenv("dropbox")
dbfile=("Databases\jarvis.db")
master_db=os.path.join(dropbox, dbfile)
con = None
try:
con = lite.connect(master_db)
cur = con.cursor()
cur.execute('SELECT SQLITE_VERSION()')
data = cur.fetchone()
print "SQLite version: %s" % data
except lite.Error, e:
print "Error %s:" % e.args[0]
sys.exit(1)
finally:
if con:
con.close()
con = lite.connect(master_db)
cur=con.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table'")
rows = cur.fetchall()
for row in rows:
print row
con = lite.connect(master_db)
cur=con.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table'")
while True:
row = cur.fetchone()
if row == None:
break
print row[0]print list(x for x in range(2,100,2))import pygame, sys, time
from pygame.locals import *
pygame.init()
window = pygame.display.set_mode((400, 300), 0, 32)
pygame.display.set_caption("Shape")
WHITE = (255, 255, 255)
GREEN = ( 0, 255, 0)
window.fill(WHITE)
pygame.draw.polygon(window, GREEN, ((146, 0), (236, 277), (56, 277)))
# Game logic
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
pygame.display.update()# Script Name : fileinfo.py
# Author : Not sure where I got this from
# Created : 28th November 2011
# Last Modified :
# Version : 1.0
# Modifications :
# Description : Show file information for a given file
# get file information using os.stat()
# tested with Python24 vegsaeat 25sep2006
from __future__ import print_function
import os
import sys
import stat # index constants for os.stat()
import time
try_count = 16
while try_count:
file_name = raw_input("Enter a file name: ") # pick a file you have
try_count >>= 1
try:
file_stats = os.stat(file_name)
break
except OSError:
print ("\nNameError : [%s] No such file or directory\n", file_name)
if try_count == 0:
print ("Trial limit exceded \nExiting program")
sys.exit()
# create a dictionary to hold file info
file_info = {
'fname': file_name,
'fsize': file_stats[stat.ST_SIZE],
'f_lm' : time.strftime("%d/%m/%Y %I:%M:%S %p",
time.localtime(file_stats[stat.ST_MTIME])),
'f_la' : time.strftime("%d/%m/%Y %I:%M:%S %p",
time.localtime(file_stats[stat.ST_ATIME])),
'f_ct' : time.strftime("%d/%m/%Y %I:%M:%S %p",
time.localtime(file_stats[stat.ST_CTIME]))
}
print ("\nfile name = %(fname)s", file_info)
print ("file size = %(fsize)s bytes", file_info)
print ("last modified = %(f_lm)s", file_info)
print ("last accessed = %(f_la)s", file_info)
print ("creation time = %(f_ct)s\n", file_info)
if stat.S_ISDIR(file_stats[stat.ST_MODE]):
print ("This a directory")
else:
print ("This is not a directory\n")
print ("A closer look at the os.stat(%s) tuple:" % file_name)
print (file_stats)
print ("\nThe above tuple has the following sequence:")
print ("""st_mode (protection bits), st_ino (inode number),
st_dev (device), st_nlink (number of hard links),
st_uid (user ID of owner), st_gid (group ID of owner),
st_size (file size, bytes), st_atime (last access time, seconds since epoch),
st_mtime (last modification time), st_ctime (time of creation, Windows)"""
)# Script Name : dir_test.py
# Author : Craig Richards
# Created : 29th November 2011
# Last Modified :
# Version : 1.0
# Modifications :
# Description : Tests to see if the directory testdir exists, if not it will create the directory for you
from __future__ import print_function
import os # Import the OS Module
import sys
def main():
if sys.version_info.major >= 3:
input_func = input
else:
input_func = raw_input
CheckDir = input_func("Enter the name of the directory to check : ")
print()
if os.path.exists(CheckDir): # Checks if the dir exists
print("The directory exists")
else:
print("No directory found for " + CheckDir) # Output if no directory
print()
os.makedirs(CheckDir) # Creates a new dir for the given name
print("Directory created for " + CheckDir)
if __name__ == '__main__':
main()import sys
from PIL import ImageDraw, ImageFont, Image
def input_par():
print('Enter the text to insert in image: ')
text = str(input())
print('Enter the desired size: ')
size = int(input())
print('Enter the color for the text(r, g, b): ')
color_value = [int(i) for i in input().split(' ')]
return text, size, color_value
pass
def main():
path_to_image = sys.argv[1]
image_file = Image.open(path_to_image + '.jpg')
image_file = image_file.convert("RGBA")
pixdata = image_file.load()
print(image_file.size)
text, size, color_value = input_par()
font = ImageFont.truetype("C:\\Windows\\Fonts\\Arial.ttf", size=size)
# Clean the background noise, if color != white, then set to black.
# change with your color
for y in range(100):
for x in range(100):
pixdata[x, y] = (255, 255, 255, 255)
image_file.show()
# Drawing text on the picture
draw = ImageDraw.Draw(image_file)
draw.text((0, 2300), text, (color_value[0], color_value[1], color_value[2]), font=font)
draw = ImageDraw.Draw(image_file)
print('Enter the file name: ')
file_name = str(input())
image_file.save(file_name + ".jpg")
pass
if __name__ == '__main__':
main()def get_user_input(start,end):
testcase = False
while testcase == False:
try:
userInput = int(input("Enter Your choice: "))
if userInput > 6 or userInput < 1:
print("Please try again.")
testcase = False
else:
return userInput
except ValueError:
print("Please try again.")
x = get_user_input(1,6)
print(x)
###Asks user to enter something, ie. a number option from a menu.
###While type != interger, and not in the given range,
###Program gives error message and asks for new input."""
Created on Thu Apr 27 16:28:36 2017
@author: barnabysandeford
"""
# Currently works for Safari, but just change to whichever
# browser you're using.
import time
#Changed the method of opening the browser.
#Selenium allows for the page to be refreshed.
from selenium import webdriver
#adding ability to change number of repeats
count = int(raw_input("Number of times to be repeated: "))
#Same as before
x = raw_input("Enter the URL (no https): ")
print( "Length of video:")
minutes = int(raw_input("Minutes "))
seconds = int(raw_input("Seconds "))
#Calculating the refreshrate from the user input
refreshrate = minutes * 60 + seconds
#Selecting Safari as the browser
driver = webdriver.Safari()
driver.get("http://"+x)
for i in range(count):
#Sets the page to refresh at the refreshrate.
time.sleep(refreshrate)
driver.refresh()# batch_file_rename.py
# Created: 6th August 2012
'''
This will batch rename a group of files in a given directory,
once you pass the current and new extensions
'''
__author__ = 'Craig Richards'
__version__ = '1.0'
import os
import sys
import argparse
def batch_rename(work_dir, old_ext, new_ext):
'''
This will batch rename a group of files in a given directory,
once you pass the current and new extensions
'''
# files = os.listdir(work_dir)
for filename in os.listdir(work_dir):
# Get the file extension
file_ext = os.path.splitext(filename)[1]
# Start of the logic to check the file extensions, if old_ext = file_ext
if old_ext == file_ext:
# Returns changed name of the file with new extention
name_list=list(filename)
name_list[len(name_list)-len(old_ext):]=list(new_ext)
newfile=''.join(name_list)
# Write the files
os.rename(
os.path.join(work_dir, filename),
os.path.join(work_dir, newfile)
)
def get_parser():
parser = argparse.ArgumentParser(description='change extension of files in a working directory')
parser.add_argument('work_dir', metavar='WORK_DIR', type=str, nargs=1, help='the directory where to change extension')
parser.add_argument('old_ext', metavar='OLD_EXT', type=str, nargs=1, help='old extension')
parser.add_argument('new_ext', metavar='NEW_EXT', type=str, nargs=1, help='new extension')
return parser
def main():
'''
This will be called if the script is directly invoked.
'''
# adding command line argument
parser = get_parser()
args = vars(parser.parse_args())
# Set the variable work_dir with the first argument passed
work_dir = args['work_dir'][0]
# Set the variable old_ext with the second argument passed
old_ext = args['old_ext'][0]
# Set the variable new_ext with the third argument passed
new_ext = args['new_ext'][0]
batch_rename(work_dir, old_ext, new_ext)
if __name__ == '__main__':
main()
# Script Name : python_sms.py
# Author : Craig Richards
# Created : 16th February 2017
# Last Modified :
# Version : 1.0
# Modifications :
# Description : This will text all the students Karate Club
import urllib # URL functions
import urllib2 # URL functions
import os
from time import strftime
import sqlite3
import sys
dropbox= os.getenv("dropbox")
scripts=os.getenv("scripts")
dbfile=("database/maindatabase.db")
master_db=os.path.join(dropbox, dbfile)
f=open(scripts+'/output/student.txt','a')
tdate=strftime("%d-%m")
conn = sqlite3.connect(master_db)
cursor = conn.cursor()
loc_stmt='SELECT name, number from table'
cursor.execute(loc_stmt)
while True:
row = cursor.fetchone()
if row == None:
break
sname=row[0]
snumber=row[1]
message = (sname + ' There will be NO training tonight on the ' + tdate + ' Sorry for the late notice, I have sent a mail as well, just trying to reach everyone, please do not reply to this message as this is automated')
username = 'YOUR_USERNAME'
sender = 'WHO_IS_SENDING_THE_MAIL'
hash = 'YOUR HASH YOU GET FROM YOUR ACCOUNT'
numbers = (snumber)
# Set flag to 1 to simulate sending, this saves your credits while you are testing your code. # To send real message set this flag to 0
test_flag = 0
#-----------------------------------
# No need to edit anything below this line
#-----------------------------------
values = {'test' : test_flag,
'uname' : username,
'hash' : hash,
'message' : message,
'from' : sender,
'selectednums' : numbers }
url = 'http://www.txtlocal.com/sendsmspost.php'
postdata = urllib.urlencode(values)
req = urllib2.Request(url, postdata)
print ('Attempting to send SMS to '+ sname + ' at ' + snumber + ' on ' + tdate)
f.write ('Attempting to send SMS to '+ sname + ' at ' + snumber + ' on ' + tdate + '\n')
try:
response = urllib2.urlopen(req)
response_url = response.geturl()
if response_url==url:
print 'SMS sent!'
except urllib2.URLError, e:
print 'Send failed!'
print e.reasonfrom sys import argv
script, input_file = argv
def print_all(f):
print f.read()
# seek(n) to read a file's content from byte-n
def rewind(f):
f.seek(0)
def print_a_line(line_count, f):
print line_count, f.readline()
current_file = open(input_file)
print "First let's print the whole file:\n"
print_all(current_file)
print "Now let's rewind, kind of like a tape."
rewind(current_file)
print "Let's print three lines:"
current_line = 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
current_file.close()# Script Name : recyclebin.py
# Author : Craig Richards
# Created : 07th June 2013
# Last Modified :
# Version : 1.0
# Modifications :
# Description : Scans the recyclebin and displays the files in there, originally got this script from the Violent Python book
import os # Load the Module
import optparse # Load the Module
from _winreg import * # Load the Module
def sid2user(sid): # Start of the function to gather the user
try:
key = OpenKey(HKEY_LOCAL_MACHINE, "SOFTWARE\Microsoft\Windows NT\CurrentVersion\ProfileList" + '\\' + sid)
(value, type) = QueryValueEx(key, 'ProfileImagePath')
user = value.split('\\')[-1]
return user
except:
return sid
def returnDir(): # Start of the function to search through the recyclebin
dirs=['c:\\Recycler\\','C:\\Recycled\\','C:\\$RECYCLE.BIN\\']
#dirs=['c:\\$RECYCLE.BIN\\']
for recycleDir in dirs:
if os.path.isdir(recycleDir):
return recycleDir
return None
def findRecycled(recycleDir): # Start of the function, list the contents of the recyclebin
dirList = os.listdir(recycleDir)
for sid in dirList:
files = os.listdir(recycleDir + sid)
user = sid2user(sid)
print '\n[*] Listing Files for User: ' + str(user)
for file in files:
print '[+] Found File: ' + str(file)
def main():
recycleDir = returnDir()
findRecycled(recycleDir)
if __name__ == '__main__':
main()# Script Name : powerdown_startup.py
# Author : Craig Richards
# Created : 05th January 2012
# Last Modified :
# Version : 1.0
# Modifications :
# Description : This goes through the server list and pings the machine, if it's up it will load the putty session, if its not it will notify you.
import os # Load the Library Module
import subprocess # Load the Library Module
from time import strftime # Load just the strftime Module from Time
def windows(): # This is the function to run if it detects the OS is windows.
f = open('server_startup_'+strftime("%Y-%m-%d")+'.log', 'a') # Open the logfile
for server in open('startup_list.txt','r'): # Read the list of servers from the list
ret = subprocess.call("ping -n 3 %s" % server, shell=True,stdout=open('NUL', 'w'),stderr=subprocess.STDOUT) # Ping the servers in turn
if ret == 0: # If you get a response.
f.write ("%s: is alive, loading PuTTY session" % server.strip() + "\n") # Write out to the logfile
subprocess.Popen(('putty -load '+server)) # Load the putty session
else:
f.write ("%s : did not respond" % server.strip() + "\n") # Write to the logfile if the server is down
def linux():
f = open('server_startup_'+strftime("%Y-%m-%d")+'.log', 'a') # Open the logfile
for server in open('startup_list.txt'): # Read the list of servers from the list
ret = subprocess.call("ping -c 3 %s" % server, shell=True,stdout=open('/dev/null', 'w'),stderr=subprocess.STDOUT) # Ping the servers in turn
if ret == 0: # If you get a response.
f.write ("%s: is alive" % server.strip() + "\n") # Print a message
subprocess.Popen(['ssh', server.strip()])
else:
f.write ("%s: did not respond" % server.strip() + "\n")
# End of the functions
# Start of the Main Program
if os.name == "posix": # If the OS is linux...
linux() # Call the linux function
elif os.name in ("nt", "dos", "ce"): # If the OS is Windows...
windows() # Call the windows functionfrom __future__ import print_function
import SimpleHTTPServer
import SocketServer
PORT = 8000 #This will serve at port 8080
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = SocketServer.TCPServer(("", PORT), Handler)
print("serving at port", PORT)
httpd.serve_forever()#Author: OMKAR PATHAK
#This script helps to build a simple stopwatch application using Python's time module.
import time
print('Press ENTER to begin, Press Ctrl + C to stop')
while True:
try:
input() #For ENTER
starttime = time.time()
print('Started')
except KeyboardInterrupt:
print('Stopped')
endtime = time.time()
print('Total Time:', round(endtime - starttime, 2),'secs')
break# Script Name : folder_size.py
# Author : Craig Richards
# Created : 19th July 2012
# Last Modified : 22 February 2016
# Version : 1.0.1
# Modifications : Modified the Printing method and added a few comments
# Description : This will scan the current directory and all subdirectories and display the size.
import os
import sys # Load the library module and the sys module for the argument vector'''
try:
directory = sys.argv[1] # Set the variable directory to be the argument supplied by user.
except IndexError:
sys.exit("Must provide an argument.")
dir_size = 0 # Set the size to 0
fsizedicr = {'Bytes': 1,
'Kilobytes': float(1) / 1024,
'Megabytes': float(1) / (1024 * 1024),
'Gigabytes': float(1) / (1024 * 1024 * 1024)}
for (path, dirs, files) in os.walk(directory): # Walk through all the directories. For each iteration, os.walk returns the folders, subfolders and files in the dir.
for file in files: # Get all the files
filename = os.path.join(path, file)
dir_size += os.path.getsize(filename) # Add the size of each file in the root dir to get the total size.
fsizeList = [str(round(fsizedicr[key] * dir_size, 2)) + " " + key for key in fsizedicr] # List of units
if dir_size == 0: print ("File Empty") # Sanity check to eliminate corner-case of empty file.
else:
for units in sorted(fsizeList)[::-1]: # Reverse sort list of units so smallest magnitude units print first.
print ("Folder Size: " + units)"""
Written by: Shreyas Daniel - github.com/shreydan
Description: Uses Pythons eval() function
as a way to implement calculator
Functions available:
+ : addition
- : subtraction
* : multiplication
/ : division
% : percentage
sine: sin(rad)
cosine: cos(rad)
tangent: tan(rad)
square root: sqrt(n)
pi: 3.141......
"""
import math
def main():
def calc(k):
functions = ['sin', 'cos', 'tan', 'sqrt', 'pi']
for i in functions:
if i in k.lower():
withmath = 'math.' + i
k = k.replace(i, withmath)
try:
k = eval(k)
except ZeroDivisionError:
print ("Can't divide by 0")
exit()
except NameError:
print ("Invalid input")
exit()
return k
print ("\nScientific Calculator\nEg: pi * sin(90) - sqrt(81)")
k = raw_input("\nWhat is ") # Using input() function is causing NameError. Changing it to raw_input() fixes this.
k = k.replace(' ', '')
k = k.replace('^', '**')
k = k.replace('=', '')
k = k.replace('?', '')
k = k.replace('%', '/100')
print ("\n" + str(calc(k)))
if __name__ == "__main__":
main()# Script Name : env_check.py
# Author : Craig Richards
# Created : 14th May 2012
# Last Modified : 14 February 2016
# Version : 1.0.1
# Modifications : 1.0.1 - Tidy up comments and syntax
# Description : This script will check to see if all of the environment variables I require are set
import os
confdir = os.getenv("my_config") # Set the variable confdir from the OS environment variable
conffile = 'env_check.conf' # Set the variable conffile
conffilename = os.path.join(confdir, conffile) # Set the variable conffilename by joining confdir and conffile together
for env_check in open(conffilename): # Open the config file and read all the settings
env_check = env_check.strip() # Set the variable as itsself, but strip the extra text out
print '[{}]'.format(env_check) # Format the Output to be in Square Brackets
newenv = os.getenv(env_check) # Set the variable newenv to get the settings from the OS what is currently set for the settings out the configfile
if newenv is None: # If it doesn't exist
print env_check, 'is not set' # Print it is not set
else: # Else if it does exist
print 'Current Setting for {}={}\n'.format(env_check, newenv) # Print out the details# Script Name : script_count.py
# Author : Craig Richards
# Created : 27th February 2012
# Last Modified : 20th July 2012
# Version : 1.3
# Modifications : 1.1 - 28-02-2012 - CR - Changed inside github and development functions, so instead of if os.name = "posix" do this else do this etc
# : I used os.path.join, so it condensed 4 lines down to 1
# : 1.2 - 10-05-2012 - CR - Added a line to include PHP scripts.
# : 1.3 - 20-07-2012 - CR - Added the line to include Batch scripts
# Description : This scans my scripts directory and gives a count of the different types of scripts
import os # Load the library module
path = os.getenv("scripts") # Set the variable path by getting the value from the OS environment variable scripts
dropbox = os.getenv("dropbox") # Set the variable dropbox by getting the value from the OS environment variable dropbox
def clear_screen(): # Function to clear the screen
if os.name == "posix": # Unix/Linux/MacOS/BSD/etc
os.system('clear') # Clear the Screen
elif os.name in ("nt", "dos", "ce"): # DOS/Windows
os.system('CLS') # Clear the Screen
def count_files(path, extensions): # Start of the function to count the files in the scripts directory, it counts the extension when passed below
counter = 0 # Set the counter to 0
for root, dirs, files in os.walk(path): # Loop through all the directories in the given path
for file in files: # For all the files
counter += file.endswith(extensions) # Count the files
return counter # Return the count
def github(): # Start of the function just to count the files in the github directory
github_dir = os.path.join(dropbox, 'github') # Joins the paths to get the github directory - 1.1
github_count = sum((len(f) for _, _, f in os.walk(github_dir))) # Get a count for all the files in the directory
if github_count > 5: # If the number of files is greater then 5, then print the following messages
print '\nYou have too many in here, start uploading !!!!!'
print 'You have: ' + str(github_count) + ' waiting to be uploaded to github!!'
elif github_count == 0: # Unless the count is 0, then print the following messages
print '\nGithub directory is all Clear'
else: # If it is any other number then print the following message, showing the number outstanding.
print '\nYou have: ' + str(github_count) + ' waiting to be uploaded to github!!'
def development(): # Start of the function just to count the files in the development directory
dev_dir = os.path.join(path, 'development') # Joins the paths to get the development directory - 1.1
dev_count = sum((len(f) for _, _, f in os.walk(dev_dir))) # Get a count for all the files in the directory
if dev_count > 10: # If the number of files is greater then 10, then print the following messages
print '\nYou have too many in here, finish them or delete them !!!!!'
print 'You have: ' + str(dev_count) + ' waiting to be finished!!'
elif dev_count ==0: # Unless the count is 0, then print the following messages
print '\nDevelopment directory is all clear'
else:
print '\nYou have: ' + str(dev_count) + ' waiting to be finished!!' # If it is any other number then print the following message, showing the number outstanding.
clear_screen() # Call the function to clear the screen
print '\nYou have the following :\n'
print 'AutoIT:\t' + str(count_files(path, '.au3')) # Run the count_files function to count the files with the extension we pass
print 'Batch:\t' + str(count_files(path, ('.bat', ',cmd'))) # 1.3
print 'Perl:\t' + str(count_files(path, '.pl'))
print 'PHP:\t' + str(count_files(path, '.php')) # 1.2
print 'Python:\t' + str(count_files(path, '.py'))
print 'Shell:\t' + str(count_files(path, ('.ksh', '.sh', '.bash')))
print 'SQL:\t' + str(count_files(path, '.sql'))
github() # Call the github function
development() # Call the development function#Made on May 27th, 2017
#Made by SlimxShadyx
#Dice Rolling Simulator
import random
#These variables are used for user input and while loop checking.
correct_word = False
dice_checker = False
dicer = False
roller_loop = False
#Checking the user input to start the program.
while correct_word == False:
user_input_raw = raw_input("\r\nWelcome to the Dice Rolling Simulator! We currently support 6, 8, and 12 sided die! Type [start] to begin!\r\n?>")
#Converting the user input to lower case.
user_input = (user_input_raw.lower())
if user_input == 'start':
correct_word = True
else:
print "Please type [start] to begin!\r\n"
#Main program loop. Exiting this, exits the program.
while roller_loop == False:
#Second While loop to ask the user for the certain die they want.
while dice_checker == False:
user_dice_chooser = raw_input("\r\nGreat! Begin by choosing a die! [6] [8] [10]\r\n?>")
user_dice_chooser = int(user_dice_chooser)
if user_dice_chooser == 6:
dice_checker = True
elif user_dice_chooser == 8:
dice_checker = True
elif user_dice_chooser == 12:
dice_checker = True
else:
print "\r\nPlease choose one of the applicable options!\r\n"
#Another inner while loop. This one does the actual rolling, as well as allowing the user to re-roll without restarting the program.
while dicer == False:
if user_dice_chooser == 6:
dice_6 = random.randint(1,6)
print "\r\nYou rolled a " + str(dice_6) + "!\r\n"
dicer = True
user_exit_checker_raw = raw_input("\r\nIf you want to roll another die, type [roll]. To exit, type [exit].\r\n?>")
user_exit_checker = (user_exit_checker_raw.lower())
if user_exit_checker == 'roll':
dicer = False
elif user_exit_checker == 'exit':
roller_loop = True
elif user_dice_chooser == 8:
dice_8 = random.randint(1,8)
print "\r\nYou rolled a " + str(dice_8) + "!"
dicer = True
user_exit_checker_raw = raw_input("\r\nIf you want to roll another die, type [roll]. To exit, type [exit].\r\n?>")
user_exit_checker = (user_exit_checker_raw.lower())
if user_exit_checker == 'roll':
dicer = False
elif user_exit_checker == 'exit':
roller_loop = True
elif user_dice_chooser == 12:
dice_12 = random.randint(1,12)
print "\r\nYou rolled a " + str(dice_12) + "!"
dicer = True
user_exit_checker_raw = raw_input("\r\nIf you want to roll another die, type [roll]. To exit, type [exit].\r\n?>")
user_exit_checker = (user_exit_checker_raw.lower())
if user_exit_checker == 'roll':
dicer = False
elif user_exit_checker == 'exit':
roller_loop = True
print "Thanks for using the Dice Rolling Simulator! Have a great day! =)"
# Script Name : script_listing.py
# Author : Craig Richards
# Created : 15th February 2012
# Last Modified : 29th May 2012
# Version : 1.2
# Modifications : 1.1 - 28-02-2012 - CR - Added the variable to get the logs directory, I then joined the output so the file goes to the logs directory
# : 1.2 - 29-05/2012 - CR - Changed the line so it doesn't ask for a directory, it now uses the environment varaible scripts
# Description : This will list all the files in the given directory, it will also go through all the subdirectories as well
import os # Load the library module
logdir = os.getenv("logs") # Set the variable logdir by getting the value from the OS environment variable logs
logfile = 'script_list.log' # Set the variable logfile
path = os.getenv("scripts") # Set the varable path by getting the value from the OS environment variable scripts - 1.2
#path = (raw_input("Enter dir: ")) # Ask the user for the directory to scan
logfilename = os.path.join(logdir, logfile) # Set the variable logfilename by joining logdir and logfile together
log = open(logfilename, 'w') # Set the variable log and open the logfile for writing
for dirpath, dirname, filenames in os.walk(path): # Go through the directories and the subdirectories
for filename in filenames: # Get all the filenames
log.write(os.path.join(dirpath, filename)+'\n') # Write the full path out to the logfile
print ("\nYour logfile " , logfilename, "has been created") # Small message informing the user the file has been created# Requirements:
# pip install numpy
# sudo apt-get install python-openCV
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
cv2.imshow('frame',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows() | [
"ekkya@tcd.ie"
] | ekkya@tcd.ie |
6c907dbb07bf1ef1ce4fdced214be391d28b2ca8 | eda9187adfd53c03f55207ad05d09d2d118baa4f | /python3_base/python_class_method.py | 4bb8dc5866ce970db1d955879a443c4426f31c41 | [] | no_license | HuiZhaozh/python_tutorials | 168761c9d21ad127a604512d7c6c6b38b4faa3c7 | bde4245741081656875bcba2e4e4fcb6b711a3d9 | refs/heads/master | 2023-07-07T20:36:20.137647 | 2020-04-24T07:18:25 | 2020-04-24T07:18:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,137 | py | # -*- coding:utf-8 -*-
# /usr/bin/python
'''
@Author: Yan Errol
@Email:2681506@gmail.com
@Date: 2019-05-20 17:22
@File:class_method.py
@Describe:静态方法
'''
from math import sqrt
class Triangle(object):
def __init__(self, a, b, c):
self._a = a
self._b = b
self._c = c
@staticmethod
def is_valid(a, b, c):
return a + b > c and b + c > a and a + c > b
def perimeter(self):
return self._a + self._b + self._c
def area(self):
half = self.perimeter() / 2
return sqrt(half * (half - self._a) *
(half - self._b) * (half - self._c))
def main():
a, b, c = 3, 4, 5
# 静态方法和类方法都是通过给类发消息来调用的
if Triangle.is_valid(a, b, c):
t = Triangle(a, b, c)
print(t.perimeter())
# 也可以通过给类发消息来调用对象方法但是要传入接收消息的对象作为参数
# print(Triangle.perimeter(t))
print(t.area())
# print(Triangle.area(t))
else:
print('无法构成三角形.')
if __name__ == "__main__":
main() | [
"2681506@gmail.com"
] | 2681506@gmail.com |
b6db4caaa1b3f409974642244d2e45e58bea2b74 | d94d22ab20a11ab6c473d8aba4038c97f75806c4 | /python小栗子/t57.py | c34766c279355c2457734c45293ae01587fccbaf | [] | no_license | songszw/python | a1d0419b995df13aee5997d24c09dccab91ac9e0 | 5135a3efcdcc2a37f7321ae19271c9315f48bcaf | refs/heads/master | 2020-02-26T16:24:28.411919 | 2017-04-26T09:08:41 | 2017-04-26T09:08:41 | 71,195,225 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,392 | py | print('|--- 欢迎进入宋氏通讯录 ---|')
print('|--- 1:查询联系人资料 ---|')
print('|--- 2:插入新的联系人 ---|')
print('|--- 3:删除已有联系人 ---|')
print('|--- 4:退出通讯录程序 ---|')
contacts = dict()
while 1:
num = int(input('please enter the number you want to do: '))
if num==1:
name = input('please enter the name you waht to check: ')
if name in contacts:
print(name+':'+contacts[name])
else:
print('sorry,the man who wasn\'t here')
if num==2:
name = input('please enter your name:')
if name in contacts:
print('sorry, the man is already in the contacts -->>',end=' ')
print(name+":"+contacts[name])
if input('do you want to change the name ?[YES/NO]:')=='YES':
contacts[name]=input('please enter the phone number:')
else:
contacts[name] =input('please enter the phone number:')
else:
contacts[name]=input('please enter the phone number:')
if num==3:
name = input('please enter the name who you want to delete:')
if name in contacts:
contacts.pop(name)
else:
print('sorry, the man who wasn\'t here')
if num==4:
break
print('|--- 感谢使用通讯录程序 ---|')
| [
"noreply@github.com"
] | songszw.noreply@github.com |
3b1ef4589a9160918ff64aa8241df8fa9664909d | cd2efd6a8f7a8c7eec88e03c18932171c78ee69b | /app/api/crud.py | c4d36257fac50aaa8778262b4f86fcead832ff7d | [] | no_license | Milchdealer/ragna0-vendings | db3d895387c28482c84650d8c1d40628d1f8b63a | f8350ff489cefb3105c5ff89fc93ecf52811877d | refs/heads/main | 2023-02-08T01:35:39.021253 | 2020-12-15T15:50:18 | 2020-12-15T15:50:18 | 321,670,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 738 | py | """
CRUD endpoints for the API.
"""
from datetime import datetime
from sqlalchemy import func
from sqlalchemy.orm import Session
from scraper.src import db as model
def get_vendings_latest(db: Session):
latest = db.query(func.max(model.Vending.run_id)).one()[0]
return db.query(model.Vending).filter(model.Vending.run_id == latest).all()
def get_vendings_timerange(db: Session, dt_start: datetime, dt_end: datetime):
return (db.query(model.Vending)
.filter(model.Vending.created_at >= dt_start)
.filter(model.Vending.created_at <= dt_end)
.all())
def get_item(db: Session, item_id: int):
return db.query(model.VendingEntry).filter(model.VendingEntry.item_id == item_id).all()
| [
"teraku@teraku.de"
] | teraku@teraku.de |
198dda57318ae5a0180b8547867dd3d1b3034380 | 4df33fdbf9d00919ba99e3dff90dc08c65269ff8 | /app/request.py | 13ff6cf1510522b73c45757fa4c1428087eff6d1 | [
"MIT"
] | permissive | theonilahtash/News-Highlights | 89a0911d9884788fde5b56a4c582e556883dd84d | d50d81ba63dc3de3941c2a65d3d7c08c802ef604 | refs/heads/master | 2020-04-05T10:49:11.961749 | 2018-11-14T08:59:34 | 2018-11-14T08:59:34 | 156,811,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,353 | py | import urllib.request,json
from .models import Source
from .models import Articles
from . import main
# News = news.News
# Articles = articles.Articles
#Getting api key
api_key = None
#Getting the news base url
# base_url = None
base_url='https://newsapi.org/v2/sources?&category={}&apiKey={}'
#Getting the articles base url
base2_url ='https://newsapi.org/v2/top-headlines?sources={}&apiKey={}'
def configure_request(app):
global api_key
api_key = app.config['SOURCE_API_KEY']
print(api_key)
def get_source(category):
get_source_url = base_url.format(category, api_key)
print(get_source_url)
with urllib.request.urlopen(get_source_url)as url:
get_source_data = url.read()
print(get_source_data)
get_source_response =json.loads(get_source_data)
source_result = None
if get_source_response["sources"]:
news_source_list = get_source_response["sources"]
source_result = process_source(news_source_list)
return source_result
def process_source(source_list):
source_results=[]
for source_item in source_list:
id = source_item.get('id')
name = source_item.get('name')
description = source_item.get('description')
url = source_item.get('url')
category = source_item.get('category')
language = source_item.get('language')
country = source_item.get('country')
if url:
source_object = Source(id,name,description,url,category,language,country)
source_results.append(source_object)
return source_results
def get_articles(category_news):
get_articles_url = 'https://newsapi.org/v2/top-headlines?sources={}&apiKey={}'.format(
category_news, api_key)
print(get_articles_url)
with urllib.request.urlopen(get_articles_url) as url:
get_articles_data = url.read()
print(get_articles_data)
get_articles_response =json.loads(get_articles_data)
articles_results = None
if get_articles_response["articles"]:
articles_results_list = get_articles_response["articles"]
articles_results = process_articles(articles_results_list)
return articles_results
def process_articles(articles_list):
articles_results=[]
for article in articles_list:
author = article.get('author')
title = article.get('title')
description = article.get('description')
url = article.get('url')
image = article.get('urlToImage')
time = article.get('publishedAt')
if title:
articles_object = Articles(author,title,description,url,image,time)
articles_results.append(articles_object)
return articles_results
def search_source(source_name):
search_source_url = 'https://newsapi.org/v2/top-headlines?sources={}&apiKey={}'
with urllib.request.urlopen(search_source_url) as url:
search_source_data = url.read()
print(search_source_data)
search_source_response = json.loads(search_source_data)
search_source_results = None
if search_source_response['results']:
search_source_list = search_source_response['results']
# search_source_results = process_results(search_source_list)
return search_source_results
| [
"theonilahtash@gmail.com"
] | theonilahtash@gmail.com |
1b59163e267a1c4ca5354b123286b31c6efaf4ad | 1105e5a86af5fba61bac894a485d42f5e075e029 | /Client_machine/Test/04_pyshark/live_inf.py | 96a6b77f61cc3cc7f335a3329eba32386d564fd2 | [] | no_license | baoquocnguyen/HAS | 7d0de1a5a94f5aadb249df9d7c517e94cc510070 | 7a9df72ded8c32145e0b338b6761ee8acd857482 | refs/heads/master | 2021-07-05T08:21:25.806871 | 2017-09-29T10:12:07 | 2017-09-29T10:12:07 | 105,249,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | import pyshark
capture = pyshark.LiveCapture(interface='ens160')
capture.sniff(timeout=10)
capture
for packet in capture.sniff_continuously(packet_count=5):
print 'Just arrived:', packet
print '############:', packet['ip'].dst
#def print_callback(pkt):
# print 'Just arrived:', pkt
#capture.apply_on_packets(print_callback, timeout=5)
#packet['ip'].dst
#packet.ip.src # By protocol attribute
#packet[2].src # By layer index
#cap = pyshark.FileCapture(self._pcap, keep_packets=False, display_filter='http')
#for pkt in capture:
# print pkt
| [
"baoquoc.nguyen@orange.com"
] | baoquoc.nguyen@orange.com |
daa50c983df30fee70ede9c50eeba583cdcbcea9 | 1bd27697b31a0f9916500b6ffe48102ddb63f0d5 | /compute_total_intronic_composition_dm_melanogaster.py | 672a8280e99b259aac5b33f0000e2bc164be6c0c | [] | no_license | rhondene/Misc_bioinformatics | 60eb77bde02957d47ae758a4da3e3f92c14e556d | 48ef12183d33a4393cb5b63853fe24fc5603ca1e | refs/heads/master | 2022-06-22T19:51:51.454325 | 2022-06-20T19:01:58 | 2022-06-20T19:01:58 | 197,657,023 | 2 | 1 | null | 2019-10-31T22:38:56 | 2019-07-18T21:06:07 | Python | UTF-8 | Python | false | false | 4,428 | py | """Author: Rhondene Wint
Purpose: Using intron-only bed file and the genome gtf file to extract the transcript witht the longest
intron for each gene in D.melanogaster in order to compute total intronic composition in D.melanogaster
You can also use this output to download these intronic sequences from flybase or ensembl"""
from interval import interval, inf, imath ## interval requires running python on Linux or Mac
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn
## to get total introns, I will merge all overlapping introns in the bed file of a transcript
## then map the transcript ID to the gene IDs
all_introns = pd.read_table('dmel_introns_ensembl.bed', sep='\t',header=None, names=['Chr','Start','End','ID', 'Phase', 'Strand'])
##split columns to get transcript IDs
IDs = []
for name in all_introns['ID'].values:
n = name.split('_')[0]
IDs.append(n)
all_introns['Transcript_ID']=IDs
# compute intron lengths
all_introns['Length']= all_introns['End']- all_introns['Start']
all_introns.head()
transc_ids = all_introns['Transcript_ID'].unique()
transcripts = all_introns.groupby('Transcript_ID')
"""merge overlapping introons for each gene and compute length"""
merged_regions = dict()
## iterate over each gene
for ID in transc_ids:
df = transcripts.get_group(ID).sort_values(by='Start', ascending=True) ##sort by ascending order
#set the initial value to start of the earliest intron
consol_region = interval[df['Start'].values[0]]
##iterate over exons of the gene
for i in range(df.shape[0]):
#create an interval of an individual exon region
intron_size = interval[df['Start'].values[i],df['End'].values[i]]
##consolidate overlapping the intron region
consol_region= consol_region | intron_size
##finally store a list of non-overlapping intronic intervals of a gene
merged_regions[ID]=consol_region
##store total_intron_size for each transcript
transc_total_introns=dict()
for ID in transc_ids:
consol_region = merged_regions[ID]
total=0
for region in consol_region:
total+= region[1]-region[0]
transc_total_introns[ID] = total ##for my own use when I want to look intron sise distbtion
##store the total intron lenghths for transcript for each gene in a table
total_introns_transc = pd.DataFrame.from_dict(transc_total_introns,orient='index').reset_index()
total_introns_transc.columns=['Transcript ID', 'Total Intron Size']
##select transcript entries for mRNA and ncRNA fromt the gtf file
all_transc= fb_gtf.query('Feature!="gene" and Feature!="exon" and Feature!= "5UTR" and Feature!="3UTR" and Feature!="stop_codon" and Feature!="start_codon" and Feature!="CDS"')
##parse Attributes to obtain transcript IDs
trans_id = []
for attr in all_transc['Attributes'].values:
ID = attr.split(";")[2].split(" ")[2].replace('"',"")
trans_id.append(ID)
all_transc['Transcript ID'] = trans_id
#### Okay so 83 transcript ID in the ensembl intron annotation based on
##Flybase release 6.22 is missing in the flybase release 6.27 gtf, so those 83 gonna get dropped
""" identify missing transcript entries """
present = []
for i in range(total_introns_transc.shape[0]):
transc_id = total_introns_transc['Transcript ID'].values[i]
if transc_id not in all_transc['Transcript ID'].values:
present.append('Missing')
else:
present.append('Yes')
total_introns_transc['Present']=present
""" select entries that are present in both annotation, i.e. filter out the 83 transcitps"""
total_introns2 = total_introns_transc.query('Present=="Yes"')
"""update the code for mapping transcript ID to gen ID"""
parent_gene = []
for ID in total_introns2['Transcript ID'].values:
for gene in list(gene_dict.keys()):
if ID in gene_dict[gene]:
parent_gene.append(gene)
break
else:
continue
""" compute total intron size"""
parent_genes = total_introns2['Gene ID'].unique()
total_intron_size = 0
genes = total_introns2.groupby('Gene ID')
for gene in parent_genes:
##get all transcripts for each gene
df = genes.get_group(gene)
##update the total genomic intron with the max intron length
total_intron_size+=df['Total Intron Size'].max()
total_intron_size
| [
"noreply@github.com"
] | rhondene.noreply@github.com |
706cb3178cf5d5edfbbc2dabb13877b5af9ec5fb | 2a4d311b3949ac6921745de038b456059b60c061 | /fab_utils/fabfile/upstart.py | 1b831bb147ea43bb637c3d2b38a984cb5d4d098b | [] | no_license | jonathanmarmor/fab-utils | ec2684a55c47062f3c26fb02121013ca30b593e2 | 73808fefba877be0dcb6760aba099eb7b45a68bc | refs/heads/master | 2021-01-10T09:55:26.547383 | 2013-05-09T18:46:15 | 2013-05-09T18:46:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 973 | py | from fabric.api import task, run, sudo, execute
from fabric.contrib.files import upload_template
@task
def start(service_name):
sudo('initctl start {}'.format(service_name))
@task
def stop(service_name):
sudo('initctl stop {}'.format(service_name))
@task
def restart(service_name):
result = sudo('initctl restart {}'.format(service_name))
if 'initctl: Unknown instance' in result:
execute(start, service_name)
@task
def list():
run('initctl list')
@task
def status(service_name):
run('initctl status {}'.format(service_name))
@task
def upload_config(service_name, node_env):
print '* Compiling and uploading upstart config file.'
ctx = {
'service_name': service_name,
'node_env': node_env
}
dest = '/etc/init/{}.conf'.format(service_name)
upload_template('upstart.tpl', dest, ctx, use_jinja=True,
mirror_local_mode=True, use_sudo=True)
sudo('chown -R root:root {}'.format(dest))
| [
"jm@ex.fm"
] | jm@ex.fm |
c40402e723acadf3f972ad05fd151038b1f037eb | 73c3d705d2849e300af44f1f423527d55cb5dfaf | /tsim/core/network/endpoint.py | 8bb21085431781c33d1c3d0a8502057dd8247a23 | [
"MIT"
] | permissive | eduardomezencio/tsim | 366d0ffa1baf14a447ce626cc47b0734ffbb8e06 | 60ac63152a98fd7dabb59c66367bca216e6a7370 | refs/heads/master | 2023-03-19T01:38:28.027648 | 2021-03-13T03:55:51 | 2021-03-13T03:55:51 | 282,103,131 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | """Endpoint class."""
from __future__ import annotations
from enum import Enum
class Endpoint(Enum):
"""Endpoints of a Way."""
START = 0
END = 1
@property
def other(self) -> Endpoint:
"""Get the opposite endpoint."""
return Endpoint.END if self is Endpoint.START else Endpoint.START
| [
"eduardomezencio@protonmail.com"
] | eduardomezencio@protonmail.com |
d89d6629586383af05bc3f8d4f0ee681a7e0143c | e6ff0823bf0bd21214582e74c6a277153372e41e | /website1/website1/wsgi.py | 69fd7c443fbd3c111c40db6ac67e0d499e924a4b | [] | no_license | DarkerPWQ/Amazon | 573295b60f024ee345a898fb8fb7ec6116055c09 | c5e8e46d7664854a26b6ef84b1d33cd02abb440c | refs/heads/master | 2021-01-11T00:20:09.142432 | 2016-10-11T07:31:03 | 2016-10-11T07:31:04 | 70,566,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
WSGI config for website1 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import sys
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "website1.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| [
"584371093@qq.com"
] | 584371093@qq.com |
b4f910b51a755afc1ed0f93bd5283b676fd72f78 | 8f2308656cb33d245e66ae0647dbc12e9323cd57 | /Week 1/Grocery_App/todo.py | ae08b848501eb73a7a3a483a6fdc0f71a3f15f72 | [] | no_license | Garton67/Digitalcrafts | c906d81fdf8b3f8a1aec197d115c4f47b1438d09 | 84fef9bcc8defcbe38a4b2b7b824cd82e6bbb881 | refs/heads/master | 2023-08-02T13:39:27.214321 | 2021-09-17T05:05:37 | 2021-09-17T05:05:37 | 384,536,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 776 | py | tasks = []
def add_task(user_input, tasks):
task = input("Enter the task: ")
priority = input("Enter the priority (high/medium/low): ")
todo = {"task": task, "Priority": priority}
tasks.append(todo)
return tasks
def view_task(user_input, tasks):
if len(tasks) == 0:
print("You have no tasks.")
for i in range(len(tasks)):
print(f"{i + 1} - {tasks[i]['task']} - {tasks[i]['Priority']}")
while True:
user_input = input(
"Press 1 to add task, 2 to view all tasks, q to quit: ")
if user_input == '1':
add_task(user_input, tasks)
elif user_input == '2':
view_task(user_input, tasks)
elif user_input == 'q':
print("Goodbye.")
break
else:
print("Try again.")
| [
"50034619+Garton67@users.noreply.github.com"
] | 50034619+Garton67@users.noreply.github.com |
36641dbb4d592eb32dae7fdb53a0d1aaec0d5277 | 72a9976d9e6c11459134521cc1e9d3ee0becf765 | /knowledge/urls.py | 851d34685226756f84e602d6a9f1cb923e3863c1 | [] | no_license | vinaykumar1908/082021i | a0f69745b726baa3a4981208b040956073a57597 | 85950350f17781e791b87718eeecc1f65d39a056 | refs/heads/master | 2023-08-28T00:36:05.818798 | 2021-09-16T18:24:27 | 2021-09-16T18:24:27 | 400,771,913 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | from django.urls import path, include
from .views import PostListView, PostDetailView, PostCreateView, PostUpdateView, PostDeleteView, UserPostListView
from . import views
urlpatterns = [
path('', PostListView.as_view(), name='kblogHome'),
path('kuser/<str:username>', UserPostListView.as_view(), name='kuser-posts'),
path('kpost/<int:pk>/', PostDetailView.as_view(), name='kpost-detail'),
path('kpost/new/', PostCreateView.as_view(), name='kpost-create'),
path('kpost/<int:pk>/update/', PostUpdateView.as_view(), name='kpost-update'),
path('kpost/<int:pk>/delete/', PostDeleteView.as_view(), name='kpost-delete'),
#path('success/', views.homeView, name='TestLink'),
path('kpost/<int:pk>/comment/', views.add_comment_to_post,name='kadd_comment_to_post'),
#path('comment/<int:pk>/approve/', views.comment_approve, name='comment_approve'),
path('kcomment/<int:pk>/remove/', views.comment_remove, name='kcomment_remove'),
]
| [
"vinaaykumar1908@gmail.com"
] | vinaaykumar1908@gmail.com |
100efc94fe97678e4d050167f49f1a7ead921301 | 4dd87e032760f77855727a36c02ab670d9ca8ff8 | /file-uploader/appengine_config.py | 21778b35475168809f3e6b5f3adbd15fdeca3de6 | [] | no_license | ramuta/gaedevs-examples | 09f14ae4fd444d898483f303aead08fd93839935 | f8a077bad98a2f40739d59321aad67dad6407c54 | refs/heads/master | 2020-04-08T09:56:13.315017 | 2018-11-27T17:01:33 | 2018-11-27T17:01:33 | 159,246,286 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60 | py | from google.appengine.ext import vendor
vendor.add('libs')
| [
"matej.ramuta@gmail.com"
] | matej.ramuta@gmail.com |
52544a68196b2e0b07ee8c06ab9dee21967ae929 | 7f0eb1b364d7762a95146126cd8d9dcc470f0b10 | /main.py | 36d2dce80a99645bc7eeeeeee59bc9078f64f03a | [] | no_license | dudulangjiao/NLP_Speech | 46395b640092d3a98d159fda5afd560d1e3abf9c | f54ba70072d176f3259eab87b9da407f707764e8 | refs/heads/master | 2020-04-28T22:50:56.649472 | 2019-03-31T08:20:38 | 2019-03-31T08:20:38 | 175,234,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,230 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
from models.base import LtpProcess
from models.other import process_page
from pyltp import SentenceSplitter
import mysql.connector
import time
def main():
"""从讲稿数据库取出文本内容,交给ltp处理。"""
print('程序开始运行......')
time_start_main = time.time()
cnx = mysql.connector.connect(user='root', password='314159',
host='localhost',
database='SpeechCollection')
cursor = cnx.cursor(dictionary=True)
# 计算讲稿数量
count_row_speech = 'SELECT COUNT(speech_id) FROM speech_sheet'
cursor.execute(count_row_speech)
row_no_dict = cursor.fetchone()
row_no = row_no_dict['COUNT(speech_id)']
# 循环读取稿子
for sp_id in range(row_no):
time_start = time.time() # 开始处理一篇文稿的时间
sp_id = sp_id + 1 # 讲稿id
query_speech = ('SELECT speech_content FROM speech_sheet WHERE speech_id = {0}').format(sp_id)
cursor.execute(query_speech)
outcome = cursor.fetchone()
#print(type(outcome))
data_speech_str = str(outcome['speech_content'])
#调用函数删除“\n”“■”和空格
data_speech = process_page(data_speech_str)
#print(data_speech)
# 创建LtpProcess()实例,进行分句、分词、词性分析等一系列处理
sentence_list = SentenceSplitter.split(data_speech)
#print("*************总结果*************")
# 循环进行分句处理,存入sentence_sheet表
index_sen_in_sp = 0 # 设置句子表speech_sheet的句子在讲稿的位置索引
for sentence_str in sentence_list:
index_sen_in_sp = index_sen_in_sp + 1
insert_sentence = ("INSERT INTO sentence_sheet (speech_id_of_sentence, index_sentence_in_speech, sentence_content)"
"VALUE ({0}, {1}, '{2}')").format(sp_id, index_sen_in_sp, sentence_str)
cursor.execute(insert_sentence)
cnx.commit()
# 用类LtpProcess进行分词处理
ltp_instance = LtpProcess(sentence_str)
word_list = ltp_instance.ltp_word() # word_list类型是二维列表
# 循环进行分词、词性标记处理,存入word_sheet表
index_w_in_sen = 0 # 设置词组表word_sheet的词组在句子的位置索引
for word_str in word_list:
index_w_in_sen = index_w_in_sen + 1
insert_word = ("INSERT INTO word_sheet (speech_id_of_word, index_sentence_of_word_in_speech,"
"index_word_in_sentence, word_content, part_speech_tag_word,"
"named_entity_tag_word, depend_syntax_head_word, depend_syntax_tag_word)"
"VALUE ({0}, {1}, {2}, '{3}', '{4}', '{5}', {6}, '{7}')").format(sp_id, index_sen_in_sp,
index_w_in_sen,
word_str[0],
word_str[1],
word_str[2],
word_str[3],
word_str[4])
cursor.execute(insert_word)
cnx.commit()
time_end = time.time() # 结束一篇文稿处理的时间
time_process = round((time_end - time_start)/60, 1) # 处理一篇文稿花费的时间
time_main = round((time_end - time_start_main)/3600, 2) # 目前程序运行的时间
print('稿子总篇数:共' + str(row_no) + '篇。 完成进度:已完成第' + str(sp_id) + '篇。 处理该篇稿子用时' + str(time_process) +
'分钟。 程序已运行' + str(time_main) + '小时。')
cursor.close()
cnx.close() | [
"lin_bo_lin@126.com"
] | lin_bo_lin@126.com |
21b5a3e7546bab907c383a15e03edce15bad86b0 | 7af6aac4433d1a7da6213ae3b3310d9157587eab | /common/do_mysql.py | 1fc1b5fd5dd5739a351f104b30f33c8ddf45d824 | [] | no_license | Leihj/newest-code-api | ec9c074f381b64d793ce965000319ac566ee7fb7 | 67c3d5f2118623f2934b038e2f18a1a9828f85be | refs/heads/master | 2022-12-21T04:06:51.887419 | 2019-08-01T08:01:48 | 2019-08-01T08:01:48 | 199,588,319 | 1 | 0 | null | 2022-12-08T05:20:06 | 2019-07-30T06:23:21 | Python | UTF-8 | Python | false | false | 2,461 | py | # -*- coding: utf-8 -*-
# @File : do_mysql.PY
# @Date : 2019/7/9-11:58
# @Author : leihuijuan
# @Emali : huijuan_lei@163.com
import pymysql
from common.logger import get_log
from common.config import config
logger=get_log(__name__)
class Do_Mysql:
def __init__(self):
self.host=config.get_str("DB","host")
self.user=config.get_str("DB","user")
self.password=config.get_str("DB","password")
self.db=config.get_str("DB","db")
self.port = config.get_int("DB","port")
self.charset=config.get_str("DB","charset")
self.conn=None
self.cur=None
#连接数据库
def Connect_Database(self):
try:
self.conn=pymysql.connect(host=self.host,user=self.user,password=self.password,db=self.db,port=self.port,charset=self.charset)
except:
logger.error("connectDatabase failed")
return False
# self.cur=self.conn.cursor() #使用cursor()方法获取操作游标
self.cur=self.conn.cursor(pymysql.cursors.DictCursor) #创建游标,以字典格式返回
return True
#获取一条数据--用来查询表数据 返回元组
def fetch_one(self,sql):
self.execute_sql(sql,params=None)
self.conn.commit()
return self.cur.fetchone()
#获取全部数据--用来查询表数据 返回嵌套元组
def fetch_all(self,sql):
self.execute_sql(sql, params=None)
self.conn.commit()
return self.cur.fetchall()
#执行数据库的sql语句,主要用来做插入操作
def execute_sql(self,sql,params=None):
#连接数据库
self.Connect_Database()
try:
if self.conn and self.cur:
#正常逻辑。执行sql,提交操作
self.cur.execute(sql,params)
self.conn.commit() #提交sql数据
except:
logger.error("execute failed:"+sql)
logger.error("params:"+params)
self.close()
return False
return True
#关闭数据库
def close(self):
#如果数据打开,则先关闭游标后关闭数据库。否则没有操作
if self.conn and self.cur:
self.cur.close()
self.conn.close()
return True
if __name__ == '__main__':
sql="select LeaveAmount from future.member where MobilePhone = '18607353919'"
RES = Do_Mysql().fetch_one(sql)
print(RES) | [
"leihuijuan18607353919"
] | leihuijuan18607353919 |
509b043958ecf41f0f38c5f2c9c22a9f3826f34b | 074279d6b63c9cd25c1353624710ed1fb422b30f | /j2ee模式-前端控制器模式.py | 53e5ab14544ddcb6c8ff3233c365a535f8179b88 | [] | no_license | qqizai/python36patterns | edd106f496a1aa7eda5d9070a6d82f142a808621 | 39052df13db9a54cb8322d87edbc2dbe6ff06a07 | refs/heads/master | 2022-11-12T14:01:32.341802 | 2020-06-29T02:23:46 | 2020-06-29T02:23:46 | 281,970,231 | 0 | 1 | null | 2020-07-23T14:13:31 | 2020-07-23T14:13:30 | null | UTF-8 | Python | false | false | 2,562 | py | # -*- coding: utf-8 -*-
# @Author : ydf
# @Time : 2019/10/9 0009 15:32
"""
前端控制器模式(Front Controller Pattern)是用来提供一个集中的请求处理机制,所有的请求都将由一个单一的处理程序处理。该处理程序可以做认证/授权/记录日志,或者跟踪请求,然后把请求传给相应的处理程序。以下是这种设计模式的实体。
前端控制器(Front Controller) - 处理应用程序所有类型请求的单个处理程序,应用程序可以是基于 web 的应用程序,也可以是基于桌面的应用程序。
调度器(Dispatcher) - 前端控制器可能使用一个调度器对象来调度请求到相应的具体处理程序。
视图(View) - 视图是为请求而创建的对象。
从java转化来,命名规范懒得改了。
"""
from abc import ABCMeta, abstractmethod
from monkey_print2 import print
class HomeView:
def show(self):
print('显示 Home 页面')
class StudentView:
def show(self):
print('显示 Student 页面')
class Dispatcher:
def __init__(self):
self.student_view = StudentView()
self.home_view = HomeView()
def dispatch(self, request: str):
if request.upper() == 'STUDENT':
self.student_view.show()
else:
self.home_view.show()
class FrontController:
def __init__(self):
self.__dispatcher = Dispatcher()
def is_authentic_user(self):
print("用户鉴权成功")
return True
def track_request(self, request):
print("被请求页面: " + request)
def dispatch_request(self, request):
self.track_request(request)
if self.is_authentic_user():
self.__dispatcher.dispatch(request)
if __name__ == '__main__':
front_controller = FrontController()
front_controller.dispatch_request("HOME")
front_controller.dispatch_request("STUDENT")
"""
"D:/coding2/python36patterns/j2ee模式-前端控制器模式.py:49" 16:54:03 被请求页面: HOME
"D:/coding2/python36patterns/j2ee模式-前端控制器模式.py:45" 16:54:03 用户鉴权成功
"D:/coding2/python36patterns/j2ee模式-前端控制器模式.py:20" 16:54:03 显示 Home 页面
"D:/coding2/python36patterns/j2ee模式-前端控制器模式.py:49" 16:54:03 被请求页面: STUDENT
"D:/coding2/python36patterns/j2ee模式-前端控制器模式.py:45" 16:54:03 用户鉴权成功
"D:/coding2/python36patterns/j2ee模式-前端控制器模式.py:25" 16:54:03 显示 Student 页面
"""
| [
"909686719@qq.com"
] | 909686719@qq.com |
e225625a7101aae65a7497d1bbd3476b83a2339a | 983e8054677b225a3adfde8610c45089562a14e1 | /src/AST/StatementType/WhileStatement.py | bdba04ed35ed57b19e5be6f05ccd5f6337b4afcd | [] | no_license | Roooooobin/final-project-for-Practice-of-Compiler-Construction | 792c8e621b891d0936db7eb788b60d28f8d7b5bf | c1fa44080fa3612b73c5e709693e6419412a2d10 | refs/heads/master | 2020-08-27T23:05:43.109765 | 2019-11-08T01:41:56 | 2019-11-08T01:41:56 | 217,514,218 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,568 | py | """
# -*- coding: utf-8 -*-
# @FileName: WhileStatement.py
# @Author : Robin
# @Time : 2019/11/2 8:58
"""
from src.AST.Statement import Statement
from src.utils import padding
class WhileStatement(Statement):
def __init__(self, expression, statement, symbol_table):
Statement.__init__(self)
self.expression = expression
self.statement = statement
self.symbol_table = symbol_table
def __str__(self):
out = "While(" + str(self.expression) + ")\n"
out += " " + str(self.statement) + "\n"
return out
def compile(self):
# 符号表中开一个loop
self.symbol_table.open_loop()
begin_label = self.symbol_table.get_begin_loop()
end_label = self.symbol_table.get_end_loop()
code = str(begin_label) + ":\n"
# while中的条件不成立则调至结束label
code += self.expression.compile() + "fjp " + str(end_label) + "\n"
# 执行statement
code += self.statement.compile()
code += "ujp " + str(begin_label) + "\n"
code += str(end_label) + ":\n"
self.symbol_table.close_loop()
return code
def serialize(self, level):
output = ""
if self.expression:
output += padding(level) + "WHILE:\n"
output += padding(level + 1) + self.expression.serialize(0)
else:
output += "WHILE"
if self.statement:
output += padding(level) + "THEN:\n"
output += self.statement.serialize(level + 1)
return output
| [
"35558127+Roooooobin@users.noreply.github.com"
] | 35558127+Roooooobin@users.noreply.github.com |
596bac80eab215fb0ec1623425822f1414b5821e | d9f1d1a9201d0a717ddc6ce6ff8181a252d686f4 | /apps/applications/earbud/qcc512x_qcc302x/QCC3020-AA_DEV-BRD-R2-AA/dfu/20190514144426/scripts/20190514144426.py | 32db395178f1250940a2be41f5e8642bd0aa6d17 | [] | no_license | hfutxqd/QCC_DEV | a9f155938a0cba26551d45e48813fd33e1c39c56 | fa440e13297f7e178803a6b5756a23332acd3ef5 | refs/heads/master | 2022-01-04T23:50:03.527914 | 2019-05-15T01:38:20 | 2019-05-15T01:38:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,477 | py | #!/usr/bin/env python
# Automatically generated input script for dfu_file_generator.py
dfu_image_parameters = {
"gen_flash_image": "True",
"bank": "bank1"
}
flash_device_parameters = {
"block_size": 65536,
"boot_block_size": None,
"alt_image_offset": 2097152
}
host_tools_parameters = {
"devkit": r"C:\qtil\ADK_QCC512x_QCC302x_WIN_6.3.0.154",
"NvsCmd": r"C:\qtil\ADK_QCC512x_QCC302x_WIN_6.3.0.154\tools\bin\nvscmd.exe",
"SecurityCmd": r"C:\qtil\ADK_QCC512x_QCC302x_WIN_6.3.0.154\tools\bin\SecurityCmd.exe",
"UpgradeFileGen": r"C:\qtil\ADK_QCC512x_QCC302x_WIN_6.3.0.154\tools\bin\UpgradeFileGen.exe",
"crescendo_upd_config": r"G:\Fang_WorkNotes\project\W1\SW\W1_V005_ADK630\apps\applications\earbud\qcc512x_qcc302x\QCC3020-AA_DEV-BRD-R2-AA\dfu\20190514144426\scripts\20190514144426.upd",
"dfu_dir": r"G:\Fang_WorkNotes\project\W1\SW\W1_V005_ADK630\apps\applications\earbud\qcc512x_qcc302x\QCC3020-AA_DEV-BRD-R2-AA\dfu\20190514144426\output",
"folder_for_rsa_files": r"G:/Fang_WorkNotes/project/W1/SW/W1_V005_ADK630/apps/applications/earbud/qcc512x_qcc302x/QCC3020-AA_DEV-BRD-R2-AA/dfu",
}
flash0 = {
"flash_device": flash_device_parameters,
"dfu_image": dfu_image_parameters,
"host_tools": host_tools_parameters,
"chip_type": "QCC512X",
"encrypt": True,
"hardware_encrypted": False,
"encryption_file": r"G:/Fang_WorkNotes/project/W1/SW/W1_V005_ADK630/apps/applications/earbud/qcc512x_qcc302x/QCC3020-AA_DEV-BRD-R2-AA/efuse_key.txt",
"signing_mode": "all",
"layout": [
("curator_fs", {
"src_file" : r"G:\Fang_WorkNotes\project\W1\SW\W1_V005_ADK630\apps\applications\earbud\qcc512x_qcc302x\QCC3020-AA_DEV-BRD-R2-AA\dfu\20190514144426\input\curator_config_filesystem.xuv",
"src_file_signed": False,
"authenticate": True,
"capacity": 1024,
}),
("apps_p0", {
"src_file" : r"G:\Fang_WorkNotes\project\W1\SW\W1_V005_ADK630\apps\applications\earbud\qcc512x_qcc302x\QCC3020-AA_DEV-BRD-R2-AA\dfu\20190514144426\input\apps_p0_firmware.xuv",
"src_file_signed": True,
"authenticate": True,
"capacity": 589824,
}),
("apps_p1", {
"src_file" : r"G:\Fang_WorkNotes\project\W1\SW\W1_V005_ADK630\apps\applications\earbud\qcc512x_qcc302x\QCC3020-AA_DEV-BRD-R2-AA\dfu\20190514144426\input\earbud.xuv",
"authenticate": True,
"capacity": 524288,
}),
("device_ro_fs", {
"authenticate": True,
"capacity": 4096,
"inline_auth_hash": True,
}),
("rw_config", {
"capacity": 131072,
}),
("rw_fs", {
"capacity": 65536,
}),
("ro_cfg_fs", {
"src_file" : r"G:\Fang_WorkNotes\project\W1\SW\W1_V005_ADK630\apps\applications\earbud\qcc512x_qcc302x\QCC3020-AA_DEV-BRD-R2-AA\dfu\20190514144426\input\firmware_config_filesystem_dfu.xuv",
"authenticate": True,
"capacity": 131072,
}),
("ro_fs", {
"src_file" : r"G:\Fang_WorkNotes\project\W1\SW\W1_V005_ADK630\apps\applications\earbud\qcc512x_qcc302x\QCC3020-AA_DEV-BRD-R2-AA\dfu\20190514144426\input\customer_ro_filesystem_dfu.xuv",
"authenticate": True,
"capacity": 393216,
}),
]
}
flash1 = {
"flash_device": flash_device_parameters,
"layout": []
}
| [
"xihu.fang@bolutek.com"
] | xihu.fang@bolutek.com |
d81ca3d2f986e4c7af9c64432aef10385266e46b | 8cc30a27835e205a3476783106ca1605a6a85c48 | /amy/workshops/migrations/0066_merge.py | ef0455252831a8b0cfaf3e51343f4267be07ade1 | [
"MIT"
] | permissive | gaybro8777/amy | d968edc78bbd3f63f3353450334721628dbbc0f4 | 3cf99aed58a0f0acf83d2645a30d8408208ccea9 | refs/heads/develop | 2023-03-07T22:08:28.692700 | 2021-02-23T18:06:06 | 2021-02-23T18:06:06 | 341,930,505 | 0 | 0 | MIT | 2021-02-24T17:22:08 | 2021-02-24T14:40:43 | null | UTF-8 | Python | false | false | 304 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('workshops', '0064_dc_instructor_badge'),
('workshops', '0065_multiple_memberships'),
]
operations = [
]
| [
"piotr@banaszkiewicz.org"
] | piotr@banaszkiewicz.org |
b096305b5cc4c3410d6bd9eabf721a6570288e92 | f28c8f8e53ed41b6d1bccd02ba1d50045e214762 | /sortSpecial.py | 402474e07c3ed0c928f6ba9c8b4da838dd008841 | [] | no_license | keyplay/pythonLearning | 3461f5bcae54f8464f2617131f9a84630c417b4c | b8fccf0fbf0a835e30b9280d7369149ee967e481 | refs/heads/master | 2020-07-23T15:49:51.438309 | 2016-12-19T15:33:21 | 2016-12-19T15:33:21 | 66,745,818 | 0 | 0 | null | 2016-09-10T12:06:50 | 2016-08-28T02:39:37 | Python | UTF-8 | Python | false | false | 187 | py | #! python2.7
# sortSpecial.py - sort the tuple by name.
def by_name(t):
return t[0]
L = [('Bob', 75), ('Adam', 92), ('Bart', 66), ('Lisa', 88)]
L2 = sorted(L, key=by_name)
print L2
| [
"noreply@github.com"
] | keyplay.noreply@github.com |
134b914fc5e20015e1f5c86296b15c48c344eab8 | d7e9cdf4e92388b897066010c1b8d12d22ada267 | /arduino/code/Nano_A0A1_LCDPrint.py | e94e78a7071271cb88b519007f71f1bd0b71e976 | [] | no_license | adavidson32/EOTG | 700c146944c98ea88f4288bed97e7ab7f738996a | 8ad02cfb0e71e972b7a8789ffaba2a60ab2d29ee | refs/heads/master | 2020-05-26T13:55:54.343848 | 2017-04-25T18:43:40 | 2017-04-25T18:43:40 | 85,002,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 669 | py | include "LCD LIB HERE"
int A0_samples = 0; // value read from the pot
int A1_samples = 0;
void setup() {
Serial.begin(115200);
}
void loop() {
A0_samples = analogRead(0);
A1_samples = analogRead(1);
Serial.print("A0: "); Serial.print(A0_samples); Serial.print(" \t A1: "); Serial.println(A1_samples);
float A0_voltage = ((A0_samples / 1024.00) * 5.00);
float A1_voltage = ((A1_samples / 1024.00) * 5.00);
Serial.print("A0: "); Serial.print(A0_voltage); Serial.print("V \t A1:"); Serial.print(A1_voltage); Serial.println("V");
Serial.println(""); Serial.println(""); Serial.println(""); Serial.println(""); Serial.println("");
delay(500);
}
| [
"noreply@github.com"
] | adavidson32.noreply@github.com |
3b20726f7cd79414687d2480e0d2043e30abc44a | 5996263a6285f6388c1a60bf4abf81c3bd199ffb | /src/house_prices_functions.py | 03d02839f650bec90b04631d2b149a74d98fbf95 | [] | no_license | hamedrazavi/house_price_prediction_kaggle | c175169073e640b816f8ba152067d506688dec34 | c4395f40939200fe91fcb484ca061a3aa2696065 | refs/heads/master | 2020-03-14T11:30:24.127043 | 2018-07-20T11:46:26 | 2018-07-20T11:46:26 | 131,591,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,630 | py | import seaborn as sns
from matplotlib import pyplot as plt
from sklearn import model_selection, ensemble, linear_model
from sklearn.kernel_ridge import KernelRidge
import lightgbm as lgb
import xgboost as xgb
from sklearn import metrics
import pandas as pd
def PlotCorr(df, feature):
sns.barplot(x=df[feature], y=df['SalePrice'])
print(df[feature].value_counts())
def PlotCorr2(df, feature):
sns.barplot(x=df[feature], y=df['PricePerArea'])
print(df[feature].value_counts())
def combineTrTest(dfTr, dfTest):
dfCombined = pd.concat(objs=[dfTr, dfTest], axis=0).reset_index(drop=True)
return (dfCombined)
def correlation_heatmap(df):
_, ax = plt.subplots(figsize=(14, 12))
colormap = sns.diverging_palette(220, 10, as_cmap=True)
_ = sns.heatmap(
df.corr(),
cmap=colormap,
square=True,
cbar_kws={'shrink': .9},
ax=ax,
annot=True,
linewidths=0.1, vmax=1.0, linecolor='white',
annot_kws={'fontsize': 12}
)
plt.title('Pearson Correlation of Features', y=1.05, size=15)
def find_cv_error(Xtrain, ytrain):
clfList = [linear_model.LinearRegression(), ensemble.RandomForestRegressor(), ensemble.GradientBoostingRegressor(),
xgb.XGBRegressor(), KernelRidge(), linear_model.BayesianRidge(), lgb.LGBMRegressor(verbose = -1)]
cvSplit = model_selection.ShuffleSplit(n_splits=10, train_size=0.5, test_size=0.5, random_state=0)
maxDepthList = [2, 4]
nEstimatorsList = [400, 500]
num_leavesList = [4, 5]
etaList = [0.1, 0.05, 0.01]
rndStateList = [0, 1, 2]
gammaList = [0]
colsample_bytreeList = [0.4]
alphaList = [4]
degreeList = [1]
gridBool = [True, False]
paramGridList = [
[{'fit_intercept': gridBool}], [{'max_depth': [4, 10], 'random_state': rndStateList}],
[{'n_estimators': nEstimatorsList, 'max_depth': maxDepthList, 'random_state': rndStateList}],
[{'max_depth': maxDepthList, 'gamma': gammaList, 'colsample_bytree': colsample_bytreeList}],
[{'alpha': alphaList, 'degree': degreeList}], [{}],
[{'num_leaves': num_leavesList, 'n_estimators': nEstimatorsList}]
]
bestScoreList = []
for clf, param in zip(clfList, paramGridList):
bestSearch = model_selection.GridSearchCV(estimator=clf, param_grid=param,
cv=cvSplit, scoring='neg_mean_squared_error', n_jobs=4)
bestSearch.fit(Xtrain, ytrain)
bestParam = bestSearch.best_params_
bestScore = round((-bestSearch.best_score_) ** 0.5, 5)
print('The best parameter for {} is {} with a runtime of seconds with an error of {}'.format(
clf.__class__.__name__, bestParam, bestScore))
clf.set_params(**bestParam)
bestScoreList.append(bestScore)
print("--" * 45, "\nMax cross-validation score is {}".format(round(min(bestScoreList), 5)))
print("--" * 45,
"\nAverage cross-validation score is {}".format(sum(sorted(bestScoreList, reverse=False)[0:2]) / 2))
return [round(min(bestScoreList), 5), sum(sorted(bestScoreList, reverse=False)[0:2]) / 2]
def eval_cv(clf, X, y, cvNum):
score = []
for i in range(cvNum):
Xtrain, Xtest, ytrain, ytest = model_selection.train_test_split(X, y, test_size=0.5, train_size=0.5,
random_state=i)
clf.fit(Xtrain, ytrain) # Note we fit the Whole X, y
arpredict = clf.predict(Xtest)
score.append(metrics.mean_squared_error(ytest, arpredict) ** 0.5)
return sum(score) / len(score)
# Thanks to Serigne on kaggle.com
class AveragingModels():
def __init__(self, models, coeffs):
self.models = models
self.coeff = coeffs
# we define clones of the original models to fit the data in
def fit(self, X, y):
self.models_ = [clone(x) for x in self.models]
# Train cloned base models
for model in self.models_:
model.fit(X, y)
return self
# Now we do the predictions for cloned models and average them
def predict(self, X):
predictions = np.column_stack([
model.predict(X) for model in self.models_
])
pred = 0
for i in range(len(self.models)):
pred += self.coeff[i] * predictions[:, i]
return pred
def rmsle_cv(model):
kf = model_selection.KFold(n_folds, shuffle=True, random_state=42).get_n_splits(X)
rmse = np.sqrt(-model_selection.cross_val_score(model, X, y, scoring="neg_mean_squared_error", cv=kf))
return (sum(rmse) / n_folds) | [
"srazavi@biorobpc30.epfl.ch"
] | srazavi@biorobpc30.epfl.ch |
f9023a1da5efba1124204d1d8a76778d9c15c29d | d18f74c0683fa412833fc7b68f737226dcf0f5df | /setup.py | 70e68c224d914b125b04f0aa01c8f602ff39fa0f | [] | no_license | phymhan/gomoku | ab22b19c2f59ea63aba3015f2b3ce53bf1b440e5 | e48e215fe24236ccccfa5edb0709a22bed4624b9 | refs/heads/master | 2021-08-28T23:06:50.620937 | 2017-12-13T07:49:45 | 2017-12-13T07:49:45 | 114,087,358 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,361 | py | import cx_Freeze
executables = [cx_Freeze.Executable("fivechessonline21.py")]
cx_Freeze.setup(
name = "Five-Chess",
options = {"build_exe": {"packages": ["pygame"],
"include_files": ["./sources/pics/board.png",
"./sources/pics/cp_k_29.png",
"./sources/pics/cp_w_29.png",
"./sources/pics/panel.png",
"./sources/pics/catsmall.png",
"./sources/music/BackgroundMusic.ogg",
"./sources/music/Snd_click.ogg"]}},
executables = executables
)
##cx_Freeze.setup(
## name = "Five-Chess",
## options = {"build_exe": {"packages": ["pygame"],
## "include_files": ["board.png",
## "cp_k_29.png",
## "cp_w_29.png",
## "panel.png",
## "catsmall.png",
## "BackgroundMusic.ogg",
## "Snd_click.ogg"]}},
## executables = executables
## )
| [
"hanligong@gmail.com"
] | hanligong@gmail.com |
03f2726d315572406a731e6e09cafe15414c11c8 | b5020ca3f914d5f8ce1cde36376e37fa5f8b1322 | /weirdtextapi/weirdtextapi/wsgi.py | 57b28f1a806c42ec84b76a40c491de144f8a54c1 | [] | no_license | michalilski/weird-text-api | 848fc10a89b6671c46b2694108abea64c978267b | d60b132c9a4663b6b8482e4bc5d6af272d7d8eea | refs/heads/main | 2023-05-14T19:03:17.096316 | 2021-05-24T13:49:20 | 2021-05-24T13:49:20 | 370,341,676 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
WSGI config for weirdtextapi project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'weirdtextapi.settings')
application = get_wsgi_application()
| [
"michal_ilski@icloud.com"
] | michal_ilski@icloud.com |
54fdb90defd17f79a01648b7ef2c33d62cb46d3b | c4b8e1e09dedbccd37ca008ecaaca4438610bbaf | /cpmpy/sliding_sum.py | 01b48796c45a4a65f16a0e45cf1d93b7cf1cdcf1 | [
"MIT"
] | permissive | hakank/hakank | 4806598b98cb36dd51b24b0ab688f52dadfe9626 | c337aaf8187f15dcdc4d5b09cd2ed0dbdb2e72c2 | refs/heads/master | 2023-08-15T00:21:52.750270 | 2023-07-27T16:21:40 | 2023-07-27T16:21:40 | 11,933,517 | 336 | 97 | MIT | 2023-07-27T11:19:42 | 2013-08-06T20:12:10 | JavaScript | UTF-8 | Python | false | false | 1,355 | py | """
Sliding sum constraint in cpmpy.
From Global Constraint Catalogue
http://www.emn.fr/x-info/sdemasse/gccat/Csliding_sum.html
'''
sliding_sum(LOW,UP,SEQ,VARIABLES)
Purpose
Constrains all sequences of SEQ consecutive variables of the collection VARIABLES so that the
sum of the variables belongs to interval [LOW, UP].
Example
(
3, 7, 4,<1, 4, 2, 0, 0, 3, 4>
)
The example considers all sliding sequences of SEQ=4 consecutive values of <1, 4, 2, 0,0,3, 4>
collection and constraints the sum to be in [LOW,UP] = [3, 7]. The sliding_sum constraint holds
since the sum associated with the corresponding subsequences 1 4 2 0, 4 2 0 0, 2 0 0 3, and
0 0 3 4 are respectively 7, 6, 5 and 7.
'''
This cpmpy model was written by Hakan Kjellerstrand (hakank@gmail.com)
See also my cpmpy page: http://hakank.org/cpmpy/
"""
from cpmpy import *
import cpmpy.solvers
import numpy as np
from cpmpy_hakank import *
def sliding_sum_test(n=7,seq=4,low=3,up=7):
x = intvar(0,4,shape=n,name="x")
# low = intvar(0,10,name="low")
# up = intvar(0,10,name="up")
model = Model(sliding_sum(low,up,seq,x))
ss = CPM_ortools(model)
ss.ort_solver.parameters.linearization_level = 0
ss.ort_solver.parameters.cp_model_probing_level = 0
num_solutions = ss.solveAll(display=x)
print("num_solutions:", num_solutions)
sliding_sum_test()
| [
"hakank@gmail.com"
] | hakank@gmail.com |
45f9289d172657a7370a84867608022b3c9ae240 | e502d7d14fe9366f4ec25c6a55f974175a18dfcc | /backend/app.py | f6c312f11c6bfa07e0d1def8eb0d5fa2857e316d | [] | no_license | GameGodS3/donation-page | e8cb3adaa77e429ee7a577f62d662273620f8a2c | 11e4ea469e14bb4927fd58b8f36cadc82006d50d | refs/heads/master | 2023-01-09T23:27:50.344687 | 2020-04-21T10:47:50 | 2020-04-21T10:47:50 | 254,671,524 | 0 | 0 | null | 2020-11-09T09:37:27 | 2020-04-10T15:38:08 | CSS | UTF-8 | Python | false | false | 1,139 | py | from flask import Flask, request, render_template, session
import json
import requests
import razorpay
import os
KEY = os.environ['KEY']
SECRET = os.environ['SECRET']
client = razorpay.Client(auth=(KEY, SECRET))
app = Flask(__name__)
@app.route('/health', methods=['GET'])
def health_check():
"""
To check if the server is up or not
"""
return "O.K", 200
@app.route('/', methods=['POST'])
def donation_logic():
if request.method == 'POST':
data = json.loads(request.data.decode('utf-8'))
if 'payment_type' in data:
if data['payment_type'] == "one_time":
session['amount'] = data['amount'] + '00'
return render_template('app.html', amount=session['amount'])
elif data['payment_type'] == "subscription":
pass
@app.route('/charge', methods=['POST'])
def app_charge():
amount = int(session['amount'])
payment_id = request.form['razorpay_payment_id']
client.payment.capture(payment_id, amount)
session.pop('amount', 0)
return json.dumps(client.payment.fetch(payment_id))
if __name__ == '__main__':
app.run() | [
"gopikrishnans1996@gmail.com"
] | gopikrishnans1996@gmail.com |
4dd7cea9ac0e15ebbe0465448d35c50ca6cc3924 | f72c5434750740d01b046c2227baf88349112911 | /266B.py | c75d2cf6210a85707642a410bc52e900fccf749d | [] | no_license | urishabh12/practice | 00145e024daccacc3f631fae3671951f9e2d6f5a | 17143214a024b87a76afc6b4c33b8725885a78a1 | refs/heads/master | 2022-12-27T02:23:09.406093 | 2021-07-10T06:50:12 | 2021-07-10T06:50:12 | 193,316,611 | 0 | 0 | null | 2022-12-14T04:46:18 | 2019-06-23T06:59:51 | Jupyter Notebook | UTF-8 | Python | false | false | 342 | py | n,t=map(int, input().split())
s=input()
l=list(s)
for j in range(t):
i=0
while True:
if i>=len(l):
break
try:
if l[i]=='B' and l[i+1]=='G':
l[i],l[i+1]=l[i+1],l[i]
i+=2
else:
i+=1
except:
i+=1
s=''.join(l)
print(s) | [
"urishabh42@gmail.com"
] | urishabh42@gmail.com |
a2b499ca95fb6dc521d0ed5ec7895c3d990795b7 | 32ba389bb5eec73c75db287d25644c3107270787 | /1--50/12. Integer to Roman.py | d85ceb91e9610db0ff7883f05e4db32a5a6f741d | [] | no_license | lulalulala/leetcode | 9297d7b5d7cb9a834f08f60d3d3d11f928c3d99a | a5f511073e59eaf877f1b90be3a8b2a3e1a1d6a7 | refs/heads/master | 2020-03-22T06:51:40.335752 | 2018-11-10T13:32:53 | 2018-11-10T13:32:53 | 139,662,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 881 | py | """Example 1:
Input: 3
Output: "III"
Example 2:
Input: 4
Output: "IV"
Example 3:
Input: 9
Output: "IX"
Example 4:
Input: 58
Output: "LVIII"
Explanation: C = 100, L = 50, XXX = 30 and III = 3.
Example 5:
Input: 1994
Output: "MCMXCIV"
Explanation: M = 1000, CM = 900, XC = 90 and IV = 4."""
class Solution:
def intToRoman(self, num):
"""
:type num: int
:rtype: str
"""
romans = [["I", "II", "III", "IV", "V", "VI", "VII", "VIII", "IX"],
["X", "XX", "XXX", "XL", "L", "LX", "LXX", "LXXX", "XC"],
["C", "CC", "CCC", "CD", "D", "DC", "DCC", "DCCC", "CM"],
["M", "MM", "MMM"]]
rstr = ''
i = 0
while num > 0:
if num % 10 > 0:
rstr = romans[i][num % 10 - 1] + rstr
i += 1
num = num // 10
return rstr
| [
"1366648494@qq.com"
] | 1366648494@qq.com |
c73953e48af931827b1da62eb65e647668cfd10d | 5e45f1d1d9f58aa1456777b0d75334d6efd43840 | /challenges/hackerrank/algorithms/dynamic/max_subarray/python/max_subarray.py | 7a4bd11931255b0775dd3b4438356b773e6b06e1 | [] | no_license | missingdays/nerdy.school | 604953dc9b3c38a0f71793f066ce2707aa980dae | 051673e0ebc54bc2f7e96a6477697d1d528dc45c | refs/heads/master | 2021-01-17T08:10:19.558851 | 2016-06-06T15:29:01 | 2016-06-06T15:29:01 | 59,897,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,187 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2015 missingdays <missingdays@missingdays>
#
# Distributed under terms of the MIT license.
"""
Maximum subarray problem solution
"""
def max_subarray(array):
curr_sum = 0
curr_index = 0
best_sum = 0
best_start_index = 0
best_ending_index = 0
for i in range(len(array)):
val = curr_sum + array[i]
if val > 0:
if curr_sum == 0:
curr_index = i
curr_sum = val
else:
curr_sum = 0
if curr_sum > best_sum:
best_sum = curr_sum
best_start_index = curr_index
best_ending_index = i
return array[best_start_index:best_ending_index+1]
def sum_positive(array):
s = 0
for elem in array:
if elem > 0:
s += elem
if s == 0:
mv = array[0]
for elem in array:
if elem > mv:
mv = elem
return mv
else:
return s
for i in range(int(input())):
n = input()
inp = list(map(int, input().split()))
print(sum(max_subarray(inp)), end=" ")
print(sum_positive(inp))
| [
"rebovykin@gmail.com"
] | rebovykin@gmail.com |
878bdb34e11bc1501de1b6b0dfd2018dfcf3aa4a | 63191be7f688591af69263972d68423d76fb5f74 | /geekshop/adminapp/views/categories.py | b42b65dd0c4181601279fe52418c7aef8c7ee7a5 | [] | no_license | IliaNiyazof/Django | 5eee4c226a1f06178fdbb5626444fff406886de7 | 052cb4f3f142c4224454ebac9fb27f63de9cbc47 | refs/heads/master | 2021-07-19T05:52:56.620026 | 2020-06-05T16:17:47 | 2020-06-05T16:17:47 | 166,776,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,332 | py | from django.contrib.auth.decorators import user_passes_test
from django.shortcuts import render, HttpResponseRedirect, reverse, get_object_or_404
from mainapp.models import ProductCategory
from adminapp.models.categories import ProductCategoryEditForm
@user_passes_test(lambda u: u.is_superuser)
def categories(request):
title = 'админка/категории'
categories_list = ProductCategory.objects.all()
content = {
'title': title,
'objects': categories_list
}
return render(request, 'adminapp/categories/read.html', content)
@user_passes_test(lambda u: u.is_superuser)
def category_create(request):
title = 'категории/создание'
if request.method == 'POST':
category_form = ProductCategoryEditForm(request.POST, request.FILES)
if category_form.is_valid():
category_form.save()
return HttpResponseRedirect(reverse('admin:categories'))
else:
category_form = ProductCategoryEditForm()
content = {'title': title, 'update_form': category_form}
return render(request, 'adminapp/categories/update.html', content)
@user_passes_test(lambda u: u.is_superuser)
def category_update(request, pk):
title = 'категории/редактирование'
edit_category = get_object_or_404(ProductCategory, pk=pk)
if request.method == 'POST':
edit_form = ProductCategoryEditForm(request.POST, request.FILES, instance=edit_category)
if edit_form.is_valid():
edit_form.save()
return HttpResponseRedirect(reverse('admin:category_update', args=[edit_category.pk]))
else:
edit_form = ProductCategoryEditForm(instance=edit_category)
content = {'title': title, 'update_form': edit_form}
return render(request, 'adminapp/categories/update.html', content)
@user_passes_test(lambda u: u.is_superuser)
def category_delete(request, pk):
title = 'категории/удаление'
category = get_object_or_404(ProductCategory, pk=pk)
if request.method == 'POST':
category.is_active = False
category.save()
return HttpResponseRedirect(reverse('admin:categories'))
content = {'title': title, 'category_to_delete': category}
return render(request, 'adminapp/categories/delete.html', content)
| [
"IFHRJCFY@yandex.ru"
] | IFHRJCFY@yandex.ru |
600a20e0412cec0307760847c1f728edf555ecb4 | 68f3c53b7701565e60951ebe5287cbff1cc9cbbc | /myWebsiteBuilder/views.py | a89227511fa99049af95e8d1a6347f013c73ba06 | [] | no_license | vishveshjain/websiteBuilder | adea7224167940287698dd189efbd72b1ca3c87e | 997f848b40a32cada6565179ef082f33d5e66880 | refs/heads/master | 2023-06-16T06:14:56.131037 | 2021-07-18T22:48:50 | 2021-07-18T22:48:50 | 383,830,410 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,159 | py | from django.http.response import JsonResponse
from django.shortcuts import redirect, render
from rest_framework.parsers import JSONParser
from .models import aboutMe, contactForm, websiteDetail,socialLink
from .forms import ContactForm, websiteDetailForm
from .serializers import websiteSerializer
from django.views.decorators.csrf import csrf_exempt
# Create your views here.
def websiteDetails(request):
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = websiteDetailForm(request.POST)
# check whether it's valid:
if form.is_valid():
category = form.cleaned_data['category']
websiteName = form.cleaned_data['websiteName']
fbLink = form.cleaned_data['fbLink']
twittterLink = form.cleaned_data['twittterLink']
googleLink = form.cleaned_data['googleLink']
gitLink = form.cleaned_data['gitLink']
linkedinLink = form.cleaned_data['linkedinLink']
InstaLink = form.cleaned_data['InstaLink']
homepageContent = form.cleaned_data['homepageContent']
title = form.cleaned_data['title']
body = form.cleaned_data['body']
reg1 = websiteDetail(category=category, websiteName=websiteName, homepageContent=homepageContent)
reg1.save()
reg2 = socialLink(fbLink=fbLink, twittterLink=twittterLink, googleLink=googleLink, gitLink=gitLink, linkedinLink=linkedinLink, InstaLink=InstaLink)
reg2.save()
reg3 = aboutMe(title=title, body=body)
reg3.save()
return redirect('index-page')
else:
form=websiteDetailForm()
return render(request,'websiteDetails.html',{
'form':form
})
def index(request):
data = websiteDetail.objects.all()
socialLinkData = socialLink.objects.all().last()
dataCount=data.count()
baseUrl='https://source.unsplash.com/'
lastIndex = data[dataCount-1]
return render(request,'index.html',{
'baseUrl':baseUrl,
'websiteData': data,
'lastIndex': lastIndex,
'socialLink':socialLinkData,
})
def gallery(request):
data = websiteDetail.objects.all()
socialLinkData = socialLink.objects.all().last()
dataCount=data.count()
lastIndex = data[dataCount-1]
baseUrl='https://source.unsplash.com/'
return render(request,'gallery.html',
{
'numbers':range(1,10),
'baseUrl':baseUrl,
'websiteData': data,
'lastIndex': lastIndex,
'socialLink':socialLinkData
})
def about(request):
data = websiteDetail.objects.all()
socialLinkData = socialLink.objects.all().last()
aboutme = aboutMe.objects.all().last()
dataCount=data.count()
lastIndex = data[dataCount-1]
return render(request,'about.html',
{
'websiteData': data,
'lastIndex': lastIndex,
'socialLink':socialLinkData,
'aboutMe': aboutme,
})
def contact(request):
data = websiteDetail.objects.all()
socialLinkData = socialLink.objects.all().last()
dataCount=data.count()
lastIndex = data[dataCount-1]
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = ContactForm(request.POST)
# check whether it's valid:
if form.is_valid():
# process the data in form.cleaned_data as required
# ...
# redirect to a new URL:
name = form.cleaned_data['name']
phone = form.cleaned_data['phone']
subject = form.cleaned_data['subject']
message = form.cleaned_data['message']
sender = form.cleaned_data['sender']
reg = contactForm(name=name, phone=phone, subject=subject,message=message,sender=sender)
reg.save()
# send_mail(subject, message, sender, recipients)
return redirect('submit-success')
# if a GET (or any other method) we'll create a blank form
else:
form = ContactForm()
return render(request,'contact.html',
{
'websiteData': data,
'lastIndex': lastIndex,
'socialLink':socialLinkData,
'form': form
})
def blog(request):
data = websiteDetail.objects.all()
socialLinkData = socialLink.objects.all().last()
dataCount=data.count()
lastIndex = data[dataCount-1]
return render(request,'blog.html',
{
'websiteData': data,
'lastIndex': lastIndex,
'socialLink':socialLinkData
})
def submit_success(request):
data = websiteDetail.objects.all()
socialLinkData = socialLink.objects.all().last()
dataCount=data.count()
lastIndex = data[dataCount-1]
return render(request,'form-success.html',
{
'websiteData': data,
'lastIndex': lastIndex,
'socialLink':socialLinkData
})
@csrf_exempt
def UserViewSet(request, id=0):
if request.method == 'GET':
queryset = websiteDetail.objects.all()
serializer_class = websiteSerializer(queryset, many=True)
return JsonResponse(serializer_class.data, safe=False)
elif request.method == 'POST':
websiteData = JSONParser.parse(request)
serializer_class = websiteSerializer(data = websiteData)
if serializer_class.is_valid():
serializer_class.save()
return JsonResponse('Added Successfully!', safe=False)
return JsonResponse('Failed to Add', safe=False)
elif request.method == 'PUT':
websiteData = JSONParser.parse(request)
website = websiteDetail.objects.get(id = websiteData['id'])
serializer_class = websiteSerializer(website, data = websiteData)
if serializer_class.is_valid():
serializer_class.save()
return JsonResponse('Updated Successfully!', safe=False)
return JsonResponse('Failed to Update', safe=False)
elif request.method == 'DELETE':
website = websiteDetail.objects.get(id = id)
website.delete()
return JsonResponse('Deleted Successfully!', safe=False)
| [
"Vishvesh2851994@gmail.com"
] | Vishvesh2851994@gmail.com |
a7e3426263f54ac3afb4be232a310b06048ae37e | b199a222da41b8bf0e1b131a1c15b86e5eafbef2 | /Phase1/day15/homework.py | 1b1133ce156652f63993e02f941ac37affe9ff3f | [] | no_license | Nagisama4/Learn-python | 8e72714ba33d7b2b29de05f0207d4d9de8e1ee9b | 7a038d0ce5e1d98e12f6dd5d96ba2e8c7f7acce4 | refs/heads/master | 2020-04-28T04:39:55.118748 | 2019-06-05T00:42:52 | 2019-06-05T00:42:52 | 174,987,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,535 | py | import math
class Company:
def __init__(self, name):
self.name = name
def salary(self):
raise NotImplementedError()
class Programmer(Company):
def __init__(self, name, basement = 0, profit = 0):
super().__init__(name)
self.basement = basement
self.profit = profit
def salary(self):
return self.basement + self.profit
class Program_test(Company):
def __init__(self, name, basement2 = 0, bugs = 0):
super().__init__(name)
self.basement2 = basement2
self.bugs = bugs
def salary(self):
return self.basement2 + self.bugs * 5
class Sales(Company):
def __init__(self, name, basement3 = 0, sales_value = 0):
super().__init__(name)
self.basement3 = basement3
self.sales_value = sales_value
def salary(self):
return self.basement3 + self.sales_value * 0.05
class CompanyManager:
def __init__(self):
self.employees = []
def calculate_all(self):
sum = 0
for item in self.employees:
sum += item.salary()
return sum
p01 = Programmer("全栈", 15000, 5000)
p02 = Programmer("算法", 18000, 7000)
pt01 = Program_test("前端", 5000, 30)
pt02 = Program_test("后端", 6000, 50)
s01 = Sales("销售代表", 3000, 20000)
s02 = Sales("分销", 2000, 15000)
manager = CompanyManager()
manager.employees.append(p01)
manager.employees.append(p02)
manager.employees.append(pt01)
manager.employees.append(pt02)
manager.employees.append(s01)
manager.employees.append(s02)
total_salary = manager.calculate_all()
print(total_salary) | [
"noreply@github.com"
] | Nagisama4.noreply@github.com |
620463d168213b56d9a8697f28f277ae7230f533 | 250d29acc7f868105328a0a4a010151e2977c987 | /Week02/Homework6.py | b5ec361c6d93a0044d77a1b300627472709aef7c | [] | no_license | nghiahhnguyen/Data-Science-Course-1-by-Ha-Xuan-Nguyen | 255401b31217bbc2d32f114df76bef6dc018124c | e366d49813e782e9e1fd558cfd0dd80e58e24303 | refs/heads/master | 2022-01-29T06:17:23.097140 | 2018-10-01T02:52:45 | 2018-10-01T02:52:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | M = [[1, 2, 3, 4, 5],
[3, 4, 2, 5, 6],
[1, 6, 3, 2, 5]]
m = len(M)
n = len(M[0])
l = []
for i in range(n):
cnt = 0
for j in range(m):
cnt += M[j][i]
l.append(cnt)
print(l)
| [
"34543734+huunghia160799@users.noreply.github.com"
] | 34543734+huunghia160799@users.noreply.github.com |
5c07062055fe3db9f15c9dcad65e2c571e705ce2 | c51cefe6e00a2b50b155f8a635729a4cf8504c1f | /setup.py | eb0708f8e753845c33ea72705d663bef5755a5c9 | [] | no_license | Raph40/Exame_package | c56d8e2781f570ac17602b9da824d908461f69ed | 07ed1d18a4a9b630cd7db170627cfc4732f0d48b | refs/heads/master | 2022-11-11T14:57:03.633566 | 2020-07-03T11:36:02 | 2020-07-03T11:36:02 | 276,882,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | py | from distutils.core import setup
setup(name='hello',
version='1.0',
descriptation='Hello Discriptation Module',
author='Rafael'
author_email='rafajoselopes@gmail.com'
url='https://github.com/Raph40/Exame_package.git'
py_module=['Exame_package']
)
| [
"noreply@github.com"
] | Raph40.noreply@github.com |
c587ec7413230286af6334350f20af42130ac04a | fa3f5dcfb0e01bffb823cd584db095d4bdcb3183 | /build/py_action_pkg/launch/maze_action_srv.launch.py | 541ad20c1cc8e9e23586b00098f9d2059ca09b0e | [] | no_license | adrianChoi11/ros2_gcamp | 59511a17b4c53da24db7fb5b65233770ae39e9c8 | 584a47d4566e52d578ec3a74f6387603825ca074 | refs/heads/master | 2023-08-28T14:37:56.559831 | 2021-10-06T02:00:08 | 2021-10-06T02:00:08 | 412,407,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | /home/apple/gcamp_ros2_ws/src/gcamp_ros2_basic/py_action_pkg/launch/maze_action_srv.launch.py | [
"choiyh1875@gmail.com"
] | choiyh1875@gmail.com |
5ece7943d3c757b1fec275554e2faaa97e44ca30 | 462232447fc046828a26dbc8a2225835bc812c1e | /labelpick.py | 05c390a9a85a97a6de3546e0ee41649546bb8b6a | [] | no_license | xmxoxo/Text-Opinion-Mining | 74e6bef618a8c55fa20a114f8b68b426269e984f | 3b6b2a14070eb3cf9446260f87d21a32ef5ed185 | refs/heads/master | 2023-02-22T22:58:04.496770 | 2021-01-29T01:52:50 | 2021-01-29T01:52:50 | 204,389,839 | 40 | 18 | null | 2019-08-26T03:38:39 | 2019-08-26T03:38:39 | null | UTF-8 | Python | false | false | 11,667 | py | #!/usr/bin/env python3
#coding:utf-8
__author__ = 'xmxoxo'
'''
NER预测结果提取工具
自动分析指定目录下的label_test.txt和token_test.txt,
提取出已经识别的结果,并保存到result_test.txt中
提取后的格式如下:
```
index 位置索引,从0开始
txt 文本内容
label 标签
```
提取结果样例:
```
index txt label
5 金庸 PER
65 提婆达多 PER
309 帝释 PER
```
'''
import os
import sys
import re
import pandas as pd
# 读入文件
def readtxtfile(fname):
pass
with open(fname,'r',encoding='utf-8') as f:
data=f.read()
return data
#保存文件
def savetofile(txt,filename):
pass
with open(filename,'w',encoding='utf-8') as f:
f.write(str(txt))
return 1
#主方法
def doprocess(path):
#读取原句子
print('正在读取原句')
fndat = './data/reviews.csv'
df_review = pd.read_csv(fndat)
#获取所有句子
lstReview = list(df_review['Reviews'])
print(lstReview[:5])
print('-'*30)
#获得子句索引
lstSubSeg = [getSubSeg(x) for x in lstReview]
print(lstSubSeg[:5])
print('-'*30)
#-----------------------------------------
tokenfile = os.path.join(path, 'token_test.txt')
labelfile = os.path.join(path, 'label_test.txt')
#读取索引信息
txttoken = pd.read_csv(tokenfile,delimiter="\t", header = None)
txtlbl = pd.read_csv(labelfile,delimiter="\t", header = None)
#合并
txtout = pd.merge(txttoken,txtlbl,left_index=True,right_index=True,how='outer')
mergefn = os.path.join(path, 'merge_test.txt')
txtout.to_csv(mergefn,index=False,sep="\t",header = None)
print(txtout.head(10))
print('-'*30)
#生成句子拆分标识索引号
#2019/8/30 这里有错误,要改成统一的字段名
f_index0 = txtout[txtout.columns[0]][txtout[txtout.columns[0]].isin(['[CLS]'])]
f_index1 = txtout[txtout.columns[0]][txtout[txtout.columns[0]].isin(['[SEP]'])]
lstSeg = list(zip(list(f_index0.index),list(f_index1.index)))
print(lstSeg[:10])
print(len(lstSeg))
#print(lstSeg[217:220])
#for i in range(len(lstSeg)):
# print('%d : %s' % (i, lstSeg[i]) )
#return 0
#-----------------------------------------
#返回索引所在的位置
#给数据增加ID号, lstSeg 记录着每条记录的开始与结束
#test:[(0, 39), (40, 70), (71, 86), (87, 100)]
#train:[(0, 10), (11, 28), (29, 53), (54, 82)]
def getid (index,lstS = lstSeg):
for i in range(len(lstS)):
tseg = lstS[i]
if tseg[0]<=index<tseg[1]:
return int(i+1)
break
return 0
#-----------------------------------------
#print(getid(5391, lstSeg) )
#print('-'*30)
#sys.exit()
#提取label
labels = ["B-ASP", "I-ASP", "B-OPI", "I-OPI"]
fout = txtout[txtout[txtout.columns[1]].isin(labels)]
print(fout.head(10))
print('-'*30)
#if not fout:
# print('数据错误...')
# return ''
#把标注数据结果提取出来,循环遍历记录
lstid = []
lstindex = []
lsttxt = []
lstlabel = []
lstSubPos = []
lstindex_new = []
lstSegIndex = []
seg = ''
index = 0
lastlbl = ''
fid = 0
subindex = 0 #当前句索引
for x in fout.index:
word = fout[fout.columns[0]][x]
lbl = fout[fout.columns[1]][x]
#ffid = getid(x)
subindex +=1
#跨句子
if x > lstSeg[fid][1]:
if seg and lastlbl:
#print ('当前id:%d' % ffid)
fid = getid(index)-1
lstid.append( fid+1 )
lstindex.append(index)
lsttxt.append(seg)
lstlabel.append(lastlbl[-3:])
#计算当前句位置
lstSegIndex.append(lstSeg[fid][0])
#2019/8/31 注意要多减1
indexnew = index - lstSeg[fid][0]-1
lstindex_new.append(indexnew)
#第几个子句
lstSubPos.append(getid(indexnew, lstS = lstSubSeg[fid]) )
seg = word
index = x
lastlbl = lbl
fid += 1
subindex = 0
continue
#2019/8/30 如果标注不连续也要进行处理,增加: or ( x - (index + len(seg)) )>1
if lbl[0] == 'B' or lastlbl[-3:] != lbl[-3:] or ( x - (index + len(seg)) )>1 :
if seg and lastlbl:
fid = getid(index)-1
lstid.append( fid+1 )
lstindex.append(index)
lsttxt.append(seg)
lstlabel.append(lastlbl[-3:])
#计算当前句位置
lstSegIndex.append(lstSeg[fid][0])
#print(x,fid,lstSeg[fid])
indexnew = index - lstSeg[fid][0]-1
lstindex_new.append(indexnew)
#第几个子句
lstSubPos.append(getid(indexnew, lstS = lstSubSeg[fid]) )
seg = word
index = x
else:
seg +=word
lastlbl = lbl
#if x>100:
#pass
#break
#循环结束后最后一条记录要处理
print('最后记录')
print(fid, seg , lastlbl, x)
if seg and lastlbl:
#lstid.append(fid+1)
fid = getid(index)-1
lstid.append( fid +1 )
lstindex.append(x)
lsttxt.append(seg)
lstlabel.append(lastlbl[-3:])
#计算当前句位置
lstSegIndex.append(lstSeg[fid][0])
#print(x,fid,lstSeg[fid])
indexnew = index - lstSeg[fid][0]
lstindex_new.append(indexnew)
#print(indexnew)
#第几个子句
lstSubPos.append(getid(indexnew, lstS = lstSubSeg[fid]) )
#转为字典
dictDat = {
'id':lstid,
'index':lstindex,
'txt':lsttxt,
'label':lstlabel,
'segIndex': lstSegIndex,
'index_new':lstindex_new, #本句索引位置
'subPos': lstSubPos, #所在分句
}
#转为DataFrame
outdf = pd.DataFrame(dictDat)
print(outdf.head(10))
print('-'*30)
#return 0
#----- 2019/8/30 以下部分合并到循环中去了-----
#outdf['id'] = outdf['index'].apply(getid)
#outdf = outdf[['id','index','txt','label']]
#print(outdf.head(10))
#print('-'*30)
#Todo: 还要把ASP和OPI进行组合下
#把索引转换成本句的索引, 用索引号减去lstSeg[句子id]即可
#outdf['index_new'] = outdf['index'].apply(lambda x: x - lstSeg[getid(x)-1][0])
#print(outdf.head(10))
#print('-'*30)
#求出子句的位置,放在字段subPos;相关字段:'id':第几句,要-1;'index_new':本句位置
#outdf['subPos'] = outdf.apply(lambda x: getid(x['index_new'], lstS=lstSubSeg[x['id']-1]), axis=1)
#print('标识子句位置suubPos:')
#print(outdf.head(10))
#print('-'*30)
#-----------------------------------------
#存个临时数据用于分析
outfile = os.path.join(path, 'seg_test.txt')
outdf.to_csv(outfile,index=False)
#return 0
#合并最后的结果,相关字段:id:第几句,subPos:第几个子句
#字段: id , index , txt, label , index_new , subPos
#循环遍历所有的记录,如果ID与subPos都相同,进行拼接
#最终结果字段: id, ASP, OPI
lstID = []
lstASP = []
lstOPI = []
lstAStar = []
lstOStar = []
cID = 0
cSub = 0
lastID = 0
lastSub = 0
lastTxt = ''
lastLbl = ''
txt = ''
lbl = ''
lastASP = ''
lastOPI = ''
lastAStar = ''
lastOStar = ''
for x in outdf.index:
cID = outdf['id'][x]
txt = outdf['txt'][x]
lbl = outdf['label'][x]
cSub = outdf['subPos'][x]
cPos = outdf['index_new'][x]
#判断是否同一个子句
if cID==lastID and cSub==lastSub:
#是同一个子句,判断填充内容
#2019/8/30 增加特殊情况,同一个子句中连续出现同一个标签
if (lastASP and lastOPI) or \
(lastASP and lbl=='ASP') or (lastOPI and lbl=='OPI' ):
if lastASP=='':lastASP = '_'
if lastOPI=='':lastOPI = '_'
lstID.append(lastID)
lstASP.append(lastASP)
lstOPI.append(lastOPI)
lastASP = ''
lastOPI = ''
lstAStar.append(lastAStar)
lstOStar.append(lastOStar)
lastAStar = ''
lastOStar = ''
else:
#不是同一句,之前又有数据,则先旧数据保存起来
if lastASP or lastOPI:
if lastASP=='':lastASP = '_'
if lastOPI=='':lastOPI = '_'
lstID.append(lastID)
lstASP.append(lastASP)
lstOPI.append(lastOPI)
lastASP = ''
lastOPI = ''
lstAStar.append(lastAStar)
lstOStar.append(lastOStar)
lastAStar = ''
lastOStar = ''
lastID = cID
lastTxt = txt
lastLbl = lbl
lastSub = cSub
if lbl=='ASP':
if not lastASP:
lastASP = lastTxt
lastAStar = cPos
if lbl=='OPI':
if not lastOPI:
lastOPI = lastTxt
lastOStar = cPos
#print(lastID,lastASP,lastOPI)
#print('-'*10)
#if x>10:
# break
#循环结果后还有结果要处理下
if lastASP or lastOPI:
if lastASP=='':lastASP = '_'
if lastOPI=='':lastOPI = '_'
lstID.append(lastID)
lstASP.append(lastASP)
lstOPI.append(lastOPI)
lstAStar.append(lastAStar)
lstOStar.append(lastOStar)
'''
print(lstID[:10])
print(lstASP[:10])
print(lstOPI[:10])
print('-'*30)
#return 0
'''
#转为字典
dictDat = {
'id':lstID,
'ASP':lstASP,
'OPI':lstOPI,
'A_start':lstAStar,
'O_start':lstOStar,
}
#转为DataFrame
outdf = pd.DataFrame(dictDat)
print(outdf.head(10))
#return 0
#保存提取的结果
outfile = os.path.join(path, 'picklabel_test.txt')
outdf.to_csv(outfile,index=False) #,sep="\t"
print('提取记录数: %d' % outdf.shape[0])
print('提取结果保存完成: %s' % outfile)
print('-'*30)
#把句子拆分子句,并返回各句子的起止索引号
# txt = '最近太忙一直没有空来评价,东西已试过是正品,擦在脸上勾称白嫩,是个不错的商品'
#返回结果: [(0, 13), (13, 22), (22, 31), (31, 38)]
def getSubSeg(txt):
#2019/8/30 子句拆分优化:连续空格替换成单空格;子句分隔加入空格
#txt = re.sub('([ ]+)',r" ",txt)
txt = re.sub(r'([ ,;。!])',r"\1\n", txt)
nlist = txt.splitlines()
#print(nlist)
lstPos = [len(x) for x in nlist]
#print(lstPos)
x = 0
lstRet = []
for i in range(len(lstPos)):
lstRet.append( (x, x + lstPos[i]) )
x += lstPos[i]
return lstRet
#命令行方法
def maincli ():
pass
path = './output/'
if len(sys.argv)>1:
path = sys.argv[1]
#print('目录:%s' % path)
if not os.path.exists(path):
print('目录%s不存在,请检查!' % path)
sys.exit(0)
doprocess(path)
if __name__ == '__main__':
pass
maincli()
| [
"xmxoxo@qq.com"
] | xmxoxo@qq.com |
489ee2dd4085318fa62c0d200e0001c0a15b6d01 | 38181cfc62625ca146e4911ebe9c1713124b3c81 | /counter.py | d3c3565ea35faa1a3757ce50a1bb9091c2b4dff7 | [] | no_license | hthomas60/Toolbox-Pickling | 86adec981ba8e8f4097de30cbb6846c9289afec8 | 2bd5ee350ced1af6c3a880016cfa53ad006e70ea | refs/heads/master | 2021-07-24T02:50:28.325198 | 2017-11-02T15:31:49 | 2017-11-02T15:31:49 | 109,283,399 | 0 | 0 | null | 2017-11-02T15:30:23 | 2017-11-02T15:30:22 | null | UTF-8 | Python | false | false | 2,361 | py | """ A program that stores and updates a counter using a Python pickle file"""
from os.path import exists
import sys
from pickle import dump, load
import pickle
import os.path
def update_counter(file_name, reset=False):
""" Updates a counter stored in the file 'file_name'
A new counter will be created and initialized to 1 if none exists or if
the reset flag is True.
If the counter already exists and reset is False, the counter's value will
be incremented.
file_name: the file that stores the counter to be incremented. If the file
doesn't exist, a counter is created and initialized to 1.
reset: True if the counter in the file should be rest.
returns: the new counter value
>>> update_counter('blah.txt',True)
1
>>> update_counter('blah.txt')
2
>>> update_counter('blah2.txt',True)
1
>>> update_counter('blah.txt')
3
>>> update_counter('blah2.txt')
2
"""
if os.path.exists(file_name)== False:
f = open(file_name, '')
pickle.dump(1, f)
f.close()
if reset == True:
f = open(file_name, 'wb')
pickle.dump(1, f)
f.close()
else:
f = open(file_name, 'br')
#print(f)
filecounter = pickle.load(f)
f.close()
f = open(file_name, 'wb')
filecounter+=1
pickle.dump(filecounter, f)
f.close()
input_file = open(file_name, 'rb')
counter = pickle.load(input_file)
#print(counter)
f.close()
return counter
"""
These files were used in my textmining project
"""
def loadbooks():
"""
Loads books from gutenberg.org. Book id has to be manualy changed each book.
"""
downloaded_book = requests.get('http://www.gutenberg.org/ebooks/1522.txt.utf-8').text
return downloaded_book
def savebook(book_text, filename):
"""
Saves a the text of a book into a file.
"""
f = open(filename, 'wb')
pickle.dump(book_text, f)
f.close()
def opensavedbook(file):
"""
Opens a file that is saved on the computer
"""
input_file = open(file, 'rb')
opened_text = pickle.load(input_file)
return opened_text
if __name__ == '__main__':
if len(sys.argv) <= 2:
import doctest
doctest.testmod()
else:
print("new value is " + str(update_counter(sys.argv[1])))
| [
"noreply@github.com"
] | hthomas60.noreply@github.com |
91feb4ba59077254f4d6a9ed644bd5d3663554bf | 60bb3ef7dd8a147761918f1fa021918d6898202d | /.history/main_20210623141903.py | eee65d95a67254bb6a9d60f7a4da85315eba9d6c | [] | no_license | sanjayMamidipaka/bbtautomationscripts | c1d29d9ea5c0fa982a53895b10db50b66e475c8f | 12c35a3459cb0ead71ae616b2efad21c555cf8a0 | refs/heads/master | 2023-06-06T06:25:09.152797 | 2021-06-23T18:21:34 | 2021-06-23T18:21:34 | 362,836,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,370 | py | import pandas as pd
import numpy as np
import csv
import xlsxwriter
from timeprofile import timeprofile
from masterdata import masterdata
from virtualReference import virtualReference
from keyfigures import keyfigures
from planninglevels import planninglevels
from attributesaskf import attributesaskf
from attributes import attributes
#Steps:
# create class
# add import
# change writer
#change this line to the desired output path
output_path = '/Users/sanjaymamidipaka/Downloads/Energizer_Latest_output1.xlsx'
writer = pd.ExcelWriter(output_path, engine='xlsxwriter')
paths = []
masterdatapath = str(input('Enter the masterdata path: '))
plevelspath = str(input('Enter the masterdata path: '))
keyfigurespath = str(input('Enter the masterdata path: '))
attributesaskfpath = str(input('Enter the masterdata path: '))
timeprofilepath = str(input('Enter the masterdata path: '))
paa = str(input('Enter the masterdata path: '))
energizerpaths = ['/Users/sanjaymamidipaka/Downloads/Energizer Production_ZSAPIBP1C_2021-05-04_21_01/ZSAPIBP1C_MASTERDATATYPES_2021-05-04_21_01.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer Production_ZSAPIBP1C_2021-05-04_21_01/ZSAPIBP1C_PLEVELS_ATTRS_2021-05-04_21_01.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer Production_ZSAPIBP1C_2021-05-04_21_01/ZSAPIBP1C_KEYFIGURES_2021-05-04_21_01.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer Production_ZSAPIBP1C_2021-05-04_21_01/ZSAPIBP1C_ATTRIBUTES_AS_KEYFIGURE_2021-05-04_21_01.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer Production_ZSAPIBP1C_2021-05-04_21_01/ZSAPIBP1C_TIMEPROFILE_2021-05-04_21_01.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer Production_ZSAPIBP1C_2021-05-04_21_01/ZSAPIBP1C_PA_ATTRIBUTES_2021-05-04_21_01.csv']
natureswaypaths = ['/Users/sanjaymamidipaka/Downloads/natureswaydata/CFGSNA2_MASTERDATATYPES_2020-12-02_15_09.csv',
'/Users/sanjaymamidipaka/Downloads/natureswaydata/CFGSNA2_PLEVELS_ATTRS_2020-12-02_15_09.csv',
'/Users/sanjaymamidipaka/Downloads/natureswaydata/CFGSNA2_KEYFIGURES_2020-12-02_15_09.csv',
'/Users/sanjaymamidipaka/Downloads/natureswaydata/CFGSNA2_ATTRIBUTES_AS_KEYFIGURE_2020-12-02_15_09.csv',
'/Users/sanjaymamidipaka/Downloads/natureswaydata/CFGSNA2_TIMEPROFILE_2020-12-02_15_09.csv',
'/Users/sanjaymamidipaka/Downloads/natureswaydata/CFGSNA2_PA_ATTRIBUTES_2020-12-02_15_09.csv']
energizertestpaths = ['/Users/sanjaymamidipaka/Downloads/Energizer_2021-05-05_21_29_Test/ZSAPIBP1C_MASTERDATATYPES_2021-05-05_21_29.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer_2021-05-05_21_29_Test/ZSAPIBP1C_PLEVELS_ATTRS_2021-05-05_21_29.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer_2021-05-05_21_29_Test/ZSAPIBP1C_KEYFIGURES_2021-05-05_21_29.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer_2021-05-05_21_29_Test/ZSAPIBP1C_ATTRIBUTES_AS_KEYFIGURE_2021-05-05_21_29.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer_2021-05-05_21_29_Test/ZSAPIBP1C_TIMEPROFILE_2021-05-05_21_29.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer_2021-05-05_21_29_Test/ZSAPIBP1C_PA_ATTRIBUTES_2021-05-05_21_29.csv']
energizerproductionspaths = ['/Users/sanjaymamidipaka/Downloads/Energizer_2021-05-05_21_32_Production/ZSAPIBP1C_MASTERDATATYPES_2021-05-05_21_32.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer_2021-05-05_21_32_Production/ZSAPIBP1C_PLEVELS_ATTRS_2021-05-05_21_32.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer_2021-05-05_21_32_Production/ZSAPIBP1C_KEYFIGURES_2021-05-05_21_32.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer_2021-05-05_21_32_Production/ZSAPIBP1C_ATTRIBUTES_AS_KEYFIGURE_2021-05-05_21_32.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer_2021-05-05_21_32_Production/ZSAPIBP1C_TIMEPROFILE_2021-05-05_21_32.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer_2021-05-05_21_32_Production/ZSAPIBP1C_PA_ATTRIBUTES_2021-05-05_21_32.csv']
energizerlatestpaths = ['/Users/sanjaymamidipaka/Downloads/Energizer Latest/ZSAPIBP1C_MASTERDATATYPES_2021-05-05_23_58.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer Latest/ZSAPIBP1C_PLEVELS_ATTRS_2021-05-05_23_58.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer Latest/ZSAPIBP1C_KEYFIGURES_2021-05-05_23_58.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer Latest/ZSAPIBP1C_ATTRIBUTES_AS_KEYFIGURE_2021-05-05_23_58.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer Latest/ZSAPIBP1C_TIMEPROFILE_2021-05-05_23_58.csv',
'/Users/sanjaymamidipaka/Downloads/Energizer Latest/ZSAPIBP1C_PA_ATTRIBUTES_2021-05-05_23_58.csv']
timeprofile_instance = timeprofile(writer, energizerlatestpaths)
timeprofile_instance.run()
masterdata_instance = masterdata(writer, energizerlatestpaths)
masterdata_instance.run()
virtualReference_instance = virtualReference(writer, energizerlatestpaths)
virtualReference_instance.run()
attributes_instance = attributes(writer, energizerlatestpaths)
attributes_instance.run()
planninglevels_instance = planninglevels(writer, energizerlatestpaths)
planninglevels_instance.run()
keyfigures_instance = keyfigures(writer, energizerlatestpaths)
keyfigures_instance.run()
attributesaskf_instance = attributesaskf(writer, energizerlatestpaths)
attributesaskf_instance.run()
writer.book.close() #close the workbook
| [
"sanjay.mamidipaka@gmail.com"
] | sanjay.mamidipaka@gmail.com |
415c39978ef734344c8c0e19216d0d80366e3e2d | f7db5372865fc77a048d19be47612852ae7225f8 | /teams_fbv/views.py | 1ac43a12b4105930216376480818e3d18993ac15 | [] | no_license | deannariddlespur/djangocrudapp | 73540ea4290ffec9fc6bfb985e218d08b4c4446b | 3269777115bf7877ea9b5542a400462a78f12003 | refs/heads/Team | 2016-09-10T19:52:22.609437 | 2015-08-13T19:46:14 | 2015-08-13T19:46:14 | 40,374,589 | 0 | 1 | null | 2015-08-13T19:46:14 | 2015-08-07T18:20:20 | HTML | UTF-8 | Python | false | false | 1,291 | py | from django.shortcuts import render, redirect, get_object_or_404
from django.forms import ModelForm
from teams_fbv.models import Team
class TeamForm(ModelForm):
class Meta:
model = Team
fields = ['name', 'pages']
def team_list(request, template_name='teams_fbv/team_list.html'):
team = Team.objects.all()
data = {}
data['object_list'] = team
return render(request, template_name, data)
def team_create(request, template_name='teams_fbv/team_form.html'):
form = TeamForm(request.POST or None)
if form.is_valid():
form.save()
return redirect('teams_fbv:team_list')
return render(request, template_name, {'form':form})
def team_update(request, pk, template_name='teams_fbv/team_form.html'):
team= get_object_or_404(Team, pk=pk)
form = TeamForm(request.POST or None, instance=team)
if form.is_valid():
form.save()
return redirect('teams_fbv:team_list')
return render(request, template_name, {'form':form})
def team_delete(request, pk, template_name ='teams_fbv/team_confirm_delete.html'):
team = get_object_or_404(Team, pk=pk)
if request.method == 'POST':
team.delete()
return redirect('teams_fbv:team_list')
return render(request, template_name, {'object' :team})
| [
"webdevdea@gmail.com"
] | webdevdea@gmail.com |
36d9c91d69aca7968a17522c7b514c8a5289ecbf | 5fc40a32fcd5e0ce07dc7d565f59f526217e2ead | /candle.py | 879895d3828a90fbfa37c6f18cdbde4eedf92ec9 | [] | no_license | steiryx/raspberrypi-app | b276d75cda3ad8bba5f30f29600bb0bbd2297021 | b8abfe890f16211090da5ed9712512aebb2d0e8e | refs/heads/master | 2020-12-06T02:46:36.049250 | 2016-11-03T15:18:47 | 2016-11-03T15:18:47 | 65,949,665 | 0 | 0 | null | 2016-11-03T15:18:48 | 2016-08-17T23:38:31 | Python | UTF-8 | Python | false | false | 1,306 | py | import RPi.GPIO as GPIO
import time
import random
#Set the PWM output we are using for the LED
LED = 18
def setup():
global pwm
# GPIO uses broadcom numbering (GPIO numbers)
GPIO.setmode(GPIO.BCM)
# Set the LED pin as an output
GPIO.setup(LED, GPIO.OUT)
# Start PWM on the LED pin at 200Hz with a
# 100% duty cycle. At lower frequencies the LED
# would flicker even when we wanted it on solidly
pwm = GPIO.PWM(LED, 200)
# Start at a brightness of 100%
pwm.start(100)
def set_brightness(new_brightness):
# Sets brightness of the LED by changing duty cycle
pwm.ChangeDutyCycle(new_brightness)
def flicker():
# We want a random brightness between 0% to 100%.
# Then we'll hold it for a random time
# between 0.01 to 0.1 seconds to get a nice flicker
# effect. Play with these values to make the effect
# suit your liking
set_brightness(random.randrange(0, 100))
time.sleep(random.randrange(1, 10) * 0.01)
# The wrapper around the flicker function makes sure the
# GPIO hardware is cleaned up when the user presses CTRL-C
def loop():
try:
while True:
flicker()
except KeyboardInterrupt:
pass
finally:
GPIO.cleanup()
# setup the hardware
setup()
# start flickering
loop()
| [
"steiryx@yahoo.com.ph"
] | steiryx@yahoo.com.ph |
241837974835bf97cb4b70de61bc88a3abd55f74 | 78bc431cec6d56fbf10771fbb36419c12ee97001 | /_archive/oauth.py | 83bb63d0449114c9eed41952e440e59219ca6438 | [] | no_license | njligames/email_sender | c9cb7a7d7a4719e182c2c1dffba4fcc578b896f1 | 2a0da7309e97ce7ee9b6e85235e84e06a0b3f2c5 | refs/heads/master | 2021-01-11T12:07:29.147215 | 2016-12-22T23:32:54 | 2016-12-22T23:32:54 | 76,519,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,885 | py | import json
import flask
import httplib2
from apiclient import discovery
from oauth2client import client
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.redirect(flask.url_for('static', filename='oauth2callback'))
return flask.redirect(flask.url_for('static', filename='oauth2callback'))
if 'credentials' not in flask.session:
return flask.redirect(flask.url_for('static', filename='oauth2callback'))
credentials = client.OAuth2Credentials.from_json(flask.session['credentials'])
if credentials.access_token_expired:
return flask.redirect(flask.url_for('static', filename='oauth2callback'))
else:
http_auth = credentials.authorize(httplib2.Http())
# drive_service = discovery.build('drive', 'v2', http_auth)
# files = drive_service.files().list().execute()
try:
message = (service.users().messages().send(userId='me', body="message").execute())
print 'Message Id: %s' % message['id']
return json.dumps(message)
except errors.HttpError, error:
print 'An error occurred: %s' % error
return json.dumps(error)
@app.route('/oauth2callback')
def oauth2callback():
flow = client.flow_from_clientsecrets(
'client_secrets.json',
scope='https://www.googleapis.com/auth/drive.metadata.readonly',
redirect_uri=flask.url_for('static', filename='oauth2callback', _external=True),
include_granted_scopes=True)
if 'code' not in flask.request.args:
auth_uri = flow.step1_get_authorize_url()
return flask.redirect(auth_uri)
else:
auth_code = flask.request.args.get('code')
credentials = flow.step2_exchange(auth_code)
flask.session['credentials'] = credentials.to_json()
return flask.redirect(flask.url_for('static', filename='index'))
if __name__ == '__main__':
import uuid
app.secret_key = str(uuid.uuid4())
app.debug = True
app.run() | [
"jamesfolk@Jamess-MacBook-Pro.local"
] | jamesfolk@Jamess-MacBook-Pro.local |
b99ba968add596a4b3cd78f541624aaa17f3ccc0 | dc685792ffd8bbc7e1cc69747203ebba2a7eea3a | /wikipedia_table/wikipedia_table/items.py | 30a3d9fc7b28e0394ec286574ca5214a71b64f4c | [] | no_license | zinon/scrapy | f702f96fb7b357d8144492e4c1137b40359a8507 | 652166a90d8acab142174f8036fc6ea627fa8eb7 | refs/heads/master | 2022-10-16T16:23:15.520027 | 2020-06-08T12:53:15 | 2020-06-08T12:53:15 | 266,174,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class WikipediaTableItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| [
"zinonas@x1.fritz.box"
] | zinonas@x1.fritz.box |
21d99fda2332a08a6c601961863dace125bdfbf5 | aa5180780d25acf66d5b9ff53c0ef7d0e7768e72 | /pageRank.py | 129e49530e6a73b060dc386960f1132f42687c0e | [] | no_license | AnnnndOpen/miniSE | ab72bc0691ccdb7dcb729c583fdbe994fed60b66 | 871e2ae08b80704224aef93d8f1e00b915668f8c | refs/heads/master | 2021-09-02T06:51:11.143031 | 2017-12-31T06:54:21 | 2017-12-31T06:54:21 | 115,848,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,078 | py | import os
import json
def pageRank():
fInput=open('bookkeeping.json','r')
dic1=json.load(fInput)
fInput.close()
dica={}
dich={}
dic2={}
dicr={}
for value in dic1.values():
dica.setdefault(value,float(1))
dich.setdefault(value,float(1))
dicr.setdefault(value,float(1))
for key in dic1.keys():
dic2.setdefault(dic1[key],key)
dicp={}
dicn={}
dicnOfp={}
#print(dic1['0/7'])
for i in range(0,75):
for j in range(0,500):
fpstring='WEBPAGES_RAW/%d/%dnotrashout'%(i,j)
if os.path.exists(fpstring):
numOfLink=0
fInput=open('WEBPAGES_RAW/%d/%d'%(i,j))
allText=fInput.read().lower()
fInput.close()
startp=allText.find('<a href="')
if startp>=0:
allText=allText[startp+9:]
startp=0
endp=allText.find('"')
baseU=dic1['%d/%d'%(i,j)]
dicTmp={}
while startp>=0:
numOfLink=numOfLink+1
Url=''
subU=allText[:endp]
if subU.find('http://')==0:
Url=subU[7:]
elif subU.find('https://')==0:
Url=subU[8:]
elif subU.find('www.')==0:
Url=subU
else:
if (subU.find('../')==0):
Url=baseU
while subU.find('../')==0:
subU=subU[3:]
Url=Url[0:(Url.rfind('/'))]
Url=Url+'/'+subU
elif (subU.find('./')==0):
Url=baseU
while subU.find('./')==0:
subU=subU[2:]
Url=subU[:subU.rfind('/')+1]
Url=Url+subU
elif (subU.find('/')==0):
if baseU.find('https://')==0:
tmpurl=base[8:]
tmpint=tmpurl.find('/')
if tmpint<0:
Url=baseU+subU
else:
Url='https://'+tmpurl[:tmpint]+subU
elif baseU.find('http://')==0:
tmpurl = base[7:]
tmpint = tmpurl.find('/')
if tmpint < 0:
Url = baseU + subU
else:
Url = 'http://' + tmpurl[:tmpint] + subU
else:
Url=baseU[:baseU.find('/')]+subU
'''
else:
#print(baseU+' '+subU)
Url=''
for tmpi in range(0,len(baseU)):
Url=Url+baseU[tmpi]
Url=Url+'/'
for tmpi in range(0,len(subU)):
Url=Url+subU[tmpi]
'''
if dicTmp.has_key(Url)==False:
dicTmp.setdefault(Url,1)
if dica.has_key(Url):
dicn.setdefault(baseU,[]).append(Url)
dicp.setdefault(Url,[]).append(baseU)
allText=allText[endp:]
startp = allText.find('<a href="')
if startp>=0:
allText=allText[startp+9:]
startp=0
endp=allText.find('"')
dicnOfp.setdefault(baseU,numOfLink)
print('in folder %d'%(i))
print('conection created')
fOutput=open('connection','w')
for key in dic1.keys():
if dicn.has_key(dic1[key]):
fOutput.write(key+' ')
fOutput.write('%d '%(len(dicn[dic1[key]])))
for j in range(0,len(dicn[dic1[key]])):
fOutput.write(dic2[dicn[dic1[key]][j]]+' ')
fOutput.write('\n')
fOutput.close()
maxa=0
maxh=0
maxr=0
for i in range(0,5):
dicTa={}
dicTh={}
dicTr={}
for key in dic1.values():
dicTa.setdefault(key,float(0))
dicTh.setdefault(key,float(0))
dicTr.setdefault(key,0.15)
print('round %d'%(i+1))
for key in dic1.values():
if dicn.has_key(key):
for j in range(0,len(dicn[key])):
tmpS=dicn[key][j]
dicTr[tmpS]=dicTr[tmpS]+0.85*(float(dicr[key])/float(dicnOfp[key]))
dicTh[key]=dicTh[key]+dica[tmpS]
dicTa[tmpS]=dicTa[tmpS]+dich[key]
'''
if dicTa[tmpS]>max:
max=dicTa[tmpS]
maxK=tmpS
'''
dich={}
dica={}
dicr={}
for key in dicTr.keys():
dicr.setdefault(key,dicTr[key])
if dicTr[key]>maxr:
maxr=dicTr[key]
for key in dicTh.keys():
dich.setdefault(key,dicTh[key])
if dicTh[key]>maxh:
maxh=dicTh[key]
for key in dicTa.keys():
dica.setdefault(key,dicTa[key])
if dicTa[key]>maxa:
maxa=dicTa[key]
#print('%d'%(max))
#print(maxK)
#for key in dica.keys():
# print(dic2[key]+' %d'%(dica[key]))
#print(' %d'%(dica[maxK]))
#print(' %d'%(dich[maxK]))
fOutput2=open('pageRank2','w')
fOutput=open('pageRank','w')
fOutput2.write('%d %d\n'%(maxh,maxa))
fOutput.write('%.6f \n'%(maxr))
for key in dic1.keys():
a1=False
a2=False
if dich.has_key(dic1[key]):
if dich[dic1[key]]>0:
a1=True
if dica.has_key(dic1[key]):
if dica[dic1[key]]>0:
a2=True
if a1==True or a2==True:
fOutput2.write(key+' ')
if dich.has_key(dic1[key]):
fOutput2.write('%d '%(dich[dic1[key]]))
else:
fOutput2.write('0 ')
if dica.has_key(dic1[key]):
fOutput2.write('%d '%(dica[dic1[key]]))
else:
fOutput2.write('0 ')
fOutput2.write('\n')
if dicr.has_key(dic1[key]):
if dicr[dic1[key]]>0:
fOutput.write(key+' ')
fOutput.write('%.6f \n'%(dicr[dic1[key]]))
fOutput.close()
fOutput2.close()
'''
key=dic1['0/1']
print(key)
#print('%d'%(len(dicn[key])))
print (dicn.has_key(key))
'''
pageRank()
| [
"harold.jiang@outlook.com"
] | harold.jiang@outlook.com |
a8cbba0de93781000809b93f8575857658d4e504 | f36968a6d3195bdd2c56fd5dc47abb29bd627b0a | /fuzzers/cfctx_dataflow_svf_llc/fuzzer.py | c63a725311e5851cb81826e56454963bd405c328 | [
"Apache-2.0"
] | permissive | threatintel-c/fuzzbench | 1ef7b1da56c856948a0a9d5b2b1728a760660c6f | d620aa9b152ac11314eab2aa64910679398dfbea | refs/heads/master | 2023-07-13T22:11:14.737617 | 2021-08-27T16:15:49 | 2021-08-27T16:15:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,104 | py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration code for AFLplusplus fuzzer."""
import os
from fuzzers.cfctx_basic import fuzzer as aflplusplus_fuzzer
def build(): # pylint: disable=too-many-branches,too-many-statements
"""Build benchmark."""
os.environ["CGC_STRATEGY"] = "dataflow"
os.environ["CGC_MAXMAP"] = "8388608" # 8Mb
aflplusplus_fuzzer.build("pcguard", "cmplog", "dict2file", "no_icp")
def fuzz(input_corpus, output_corpus, target_binary):
"""Run fuzzer."""
aflplusplus_fuzzer.fuzz(input_corpus, output_corpus, target_binary)
| [
"noreply@github.com"
] | threatintel-c.noreply@github.com |
6b2ebd43502013dff3246b7e195a17343a75b527 | b6dc31c578fbec0d028701a946670a884530a2c2 | /pythonDictionaries_Exercise2.py | 29cdce8b36cac285d5845750e83adf9eb9015264 | [] | no_license | dustinblainemyers/Digital_Crafts_Day5 | a324e6d75d7bd3d28a9ea361d7f209b9ce8c0865 | f29a9afc2dd8292601092d274d9d8ca3990fc1e3 | refs/heads/master | 2020-12-14T03:44:00.243459 | 2020-01-17T20:49:44 | 2020-01-17T20:49:44 | 234,626,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | #python nested dictionaries exercise
ramit = {
'name': 'Ramit',
'email': 'ramit@gmail.com',
'interests': ['movies', 'tennis'],
'friends': [
{
'name': 'Jasmine',
'email': 'jasmine@yahoo.com',
'interests': ['photography', 'tennis']
},
{
'name': 'Jan',
'email': 'jan@hotmail.com',
'interests': ['movies', 'tv']
}
]
}
# get email address of ramit
print(ramit['email'])
# write expression that gets first of Ramit's interests
print(ramit['interests'][0])
# write a python expression that gets the email address of Jasmine
print(ramit['friends'][0]['email'])
# write a python expression that gets the second of Jan's two interests.
print(ramit['friends'][1]['interests'][1])
| [
"dustinblaine.myers@gmail.com"
] | dustinblaine.myers@gmail.com |
ad56100aae986b9d39225d2009c1864556132f8f | 5a7a3447d434a458a7bb63f2aa11b64c284d5492 | /test/image_load.py | 834165562d2d100c68a6bd98d20ca2faaea7dd90 | [] | no_license | woshimayi/mypython | 35792e12036a7a05f12d3ef7006637b2b03f0e2e | 7f1eb38e8585bf6d2f21d3ad0f64dace61425875 | refs/heads/master | 2023-09-01T08:59:12.301836 | 2023-08-30T05:30:54 | 2023-08-30T05:30:54 | 130,017,052 | 4 | 0 | null | 2018-12-02T16:18:14 | 2018-04-18T06:50:36 | HTML | UTF-8 | Python | false | false | 622 | py | #coding=utf-8
import urllib
import re
def getHtml(url):
page = urllib.urlopen(url)
html = page.read()
return html
def getImg(html):
reg = r'src="(.+?\.jpg)" pic_ext'
imgre = re.compile(reg)
imglist = re.findall(imgre,html)
x = 0
for imgurl in imglist:
urllib.urlretrieve(imgurl,'%s.jpg' % x)
x+=1
html = getHtml("http://cn.bing.com/images/search?q=%E8%8B%B1%E5%9B%BD%E8%AE%AE%E4%BC%9A%E5%A4%A7%E5%8E%A6%E6%81%90%E6%80%96%E8%A2%AD%E5%87%BB&FORM=ISTRTH&id=F1E1C03F7EB1F290F78351F68318CB06438FD2B9&cat=%E4%BB%8A%E6%97%A5%E7%83%AD%E5%9B%BE&lpversion=")
print getImg(html)
| [
"woshidamayi@Gmail.com"
] | woshidamayi@Gmail.com |
ef9743d94d29deebd30fc55ae31439a2db8b093d | f87f51ec4d9353bc3836e22ac4a944951f9c45c0 | /.history/HW06_20210715222321.py | fcf2188b6928a2756355ea80e53ded7f525f6620 | [] | no_license | sanjayMamidipaka/cs1301 | deaffee3847519eb85030d1bd82ae11e734bc1b7 | 9ddb66596497382d807673eba96853a17884d67b | refs/heads/main | 2023-06-25T04:52:28.153535 | 2021-07-26T16:42:44 | 2021-07-26T16:42:44 | 389,703,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,763 | py | """
Georgia Institute of Technology - CS1301
HW06 - Text Files & CSV
Collaboration Statement:
"""
#########################################
"""
Function Name: findCuisine()
Parameters: filename (str), cuisine (str)
Returns: list of restaurants (list)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def findCuisine(filename, cuisine):
file = open(filename,'r')
content = file.readlines()
listOfRestaurants = []
for i in range(len(content)):
if content[i].strip() == cuisine:
listOfRestaurants.append(content[i-1].strip()) #add the name of the restaurant, which is the previous line
file.close()
return listOfRestaurants
"""
Function Name: restaurantFilter()
Parameters: filename (str)
Returns: dictionary that maps cuisine type (str)
to a list of restaurants of the same cuisine type (list)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def restaurantFilter(filename):
dict = {}
file = open(filename,'r')
content = file.readlines()
cuisines = []
for i in range(1,len(content),4):
line = content[i].strip()
if line not in cuisines:
cuisines.append(line)
for i in range(len(cuisines)):
dict[cuisines[i]] = []
for i in range(0,len(content),4):
line = content[i].strip()
lineBelow = content[i+1].strip()
dict[lineBelow].append(line)
return dict
"""
Function Name: createDirectory()
Parameters: filename (str), output filename (str)
Returns: None (NoneType)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def createDirectory(filename, outputFilename):
readFile = open(filename, 'r')
writeFile = open(outputFilename, 'w')
content = readFile.readlines()
fastfood = []
sitdown = []
fastfoodcounter = 1
sitdowncouter = 1
for i in range(2,len(content), 4):
restaurant = content[i-2].strip()
cuisine = content[i-1].strip()
group = content[i].strip()
if group == 'Fast Food':
fastfood.append(str(fastfoodcounter) + '. ' + restaurant + ' - ' + cuisine + '\n')
fastfoodcounter += 1
else:
sitdown.append(str(sitdowncouter) + '. ' + restaurant + ' - ' + cuisine)
sitdowncouter += 1
writeFile.write('Restaurant Directory' + '\n')
writeFile.write('Fast Food' + '\n')
writeFile.writelines(fastfood)
writeFile.write('Sit-down' + '\n')
for i in range(len(sitdown)):
if i != len(sitdown)-1:
writeFile.write(sitdown[i] + '\n')
else:
writeFile.write(sitdown[i])
"""
Function Name: extraHours()
Parameters: filename (str), hour (int)
Returns: list of (person, extra money) tuples (tuple)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def extraHours(filename, hour):
overtime = []
file = open(filename, 'r')
header = file.readline()
content = file.readlines()
for i in content:
line = i.strip().split(',')
name = line[0]
wage = int(line[2])
hoursWorked = int(line[4])
if hoursWorked > hour:
compensation = (hoursWorked - hour) * wage
overtime.append((name, compensation))
return overtime
"""
Function Name: seniorStaffAverage()
Parameters: filename (str), year (int)
Returns: average age of senior staff members (float)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def seniorStaffAverage(filename, year):
file = open(filename, 'r')
header = file.readline()
content = file.readlines()
for i in content:
line = i.strip().split(',')
age = int(line[1])
yearHired = line[3]
hoursWorked = int(line[4])
if hoursWorked > hour:
compensation = (hoursWorked - hour) * wage
overtime.append((name, compensation))
"""
Function Name: ageDict()
Parameters: filename (str), list of age ranges represented by strings (list)
Returns: dictionary (dict) that maps each age range (str) to a list of employees (list)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
# print(findCuisine('restaurants.txt', 'Mexican'))
#print(restaurantFilter('restaurants.txt'))
#print(createDirectory('restaurants.txt','output.txt'))
# print(extraHours('employees.csv', 40))
| [
"sanjay.mamidipaka@gmail.com"
] | sanjay.mamidipaka@gmail.com |
c505acbdebad70a8f1c3c18f825107eeff1f405a | a7971eb657d01b9d20373be128273d9d608ed0c6 | /problems/rlpy/Representations/Fourier.py | a0fc3aa7804ed1c4b988a43dce6023f1b08792aa | [] | no_license | yijiawang1/RSBO | 99dda028612ac43d4c9ba4177a9ccbc27d5e7110 | edbdcdce94c839857e04e4129cb7770f63e17336 | refs/heads/master | 2023-04-27T12:29:06.095076 | 2023-04-24T20:21:16 | 2023-04-24T20:21:16 | 238,536,148 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,195 | py | """Fourier representation"""
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from builtins import super
from future import standard_library
standard_library.install_aliases()
from builtins import map
from past.utils import old_div
from .Representation import Representation
from numpy import indices, pi, cos, dot
from numpy.linalg import norm
import numpy
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = ["Alborz Geramifard", "Robert H. Klein", "Christoph Dann",
"William Dabney", "Jonathan P. How"]
__license__ = "BSD 3-Clause"
class Fourier(Representation):
""" Fourier representation.
Represents the value function using a Fourier series of the specified
order (eg 3rd order, 5th order, etc).
See Konidaris, Osentoski, and Thomas, "Value Function Approximation in
Reinforcement Learning using Fourier Basis" (2011).
http://lis.csail.mit.edu/pubs/konidaris-aaai11a.pdf
"""
def __init__(self, domain, order=3, scaling=False):
"""
:param domain: the problem :py:class:`~rlpy.Domains.Domain.Domain` to learn
:param order: The degree of approximation to use in the Fourier series
(eg 3rd order, 5th order, etc). See reference paper in class API.
"""
dims = domain.state_space_dims
self.coeffs = indices((order,) * dims).reshape((dims, -1)).T
self.features_num = self.coeffs.shape[0]
if scaling:
coeff_norms = numpy.array(list(map(norm, self.coeffs)))
coeff_norms[0] = 1.0
self.alpha_scale = numpy.tile(old_div(1.0,coeff_norms), (domain.actions_num,))
else:
self.alpha_scale = 1.0
super(Fourier, self).__init__(domain)
def phi_nonTerminal(self, s):
# normalize the state
s_min, s_max = self.domain.statespace_limits.T
norm_state = old_div((s - s_min), (s_max - s_min))
return cos(pi * dot(self.coeffs, norm_state))
def featureType(self):
return float
def featureLearningRate(self):
return self.alpha_scale
| [
"58349385+rlbo@users.noreply.github.com"
] | 58349385+rlbo@users.noreply.github.com |
27b40d5f4d6e34ca94bb8678bc5ab493499da234 | a1431c25ebd62daead742e0120a16253c4cf67ca | /env/bin/rst2pseudoxml.py | f5df7e1eeb081261c7377ab286cc4b34ed5a0fc3 | [] | no_license | KonradMarzec1991/my_MDB | f840cbf495c23272b3e39db68c241219a60d63bd | d77339a4c37a3d7ae21b6d28bd9644ce15130f10 | refs/heads/master | 2022-04-29T10:15:37.109422 | 2019-11-03T20:13:57 | 2019-11-03T20:13:57 | 207,375,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | #!/home/konrad/PycharmProjects/my_mdb/env/bin/python3
# $Id: rst2pseudoxml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing pseudo-XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates pseudo-XML from standalone reStructuredText '
'sources (for testing purposes). ' + default_description)
publish_cmdline(description=description)
| [
"konrimarzec@gmail.com"
] | konrimarzec@gmail.com |
cda99f750bc8ce22a58a5fcbe1b2f645c9ea9e7b | 0872b38585605d2a61b6b5f8819724b84850cb38 | /dashboard/admin.py | 869037259ea247c7bd00454fbba28989af86aa5d | [] | no_license | Ceribit/notichart | 90b775f1baa87ec14db22808c843379b834b40db | 2f1429cc4f413201b707080387cbdb1288f22e8b | refs/heads/master | 2020-03-22T04:11:27.687980 | 2018-07-05T05:31:58 | 2018-07-05T05:31:58 | 139,479,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import Notes
# Register your models here.
admin.site.register(Notes)
| [
"ceribit@gmail.com"
] | ceribit@gmail.com |
98e70e75d9edd1588cd18c4c8ded0686dd961158 | 3e3ae24a7cc47a0edc950c3ab5a046a8a889f092 | /selfpowers.py | 4a447e019a8ad2507879e05390ef7ff7a66d62eb | [] | no_license | diolaoyeyele/euler-programs | c5e79d8e2555fb2798ccfc04f654ca1d1e48b11b | 554c76962783114fbc975f09de6dc80e1e04059d | refs/heads/master | 2020-03-18T01:57:04.253683 | 2018-05-20T16:22:46 | 2018-05-20T16:22:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py |
d = []
def series(n):
while n > 0:
d.append(n**n)
n -= 1
series(10)
sum(d)
10405071317
d = []
series(1000)
print(sum(d))
print(len(str(9110846700)))
| [
"didi.oyeyele@gmail.com"
] | didi.oyeyele@gmail.com |
1a17c0e753532ecf7f5f5d0b99fb308e5ec83ca9 | bdcab42a9124d7a3878a904076170bd4bff7451f | /src/hessian/random_sample_points.py | 44047bd5934ab4c7ec808b9b9c3a87972695717a | [] | no_license | hwang595/data_augmentation_playground | aa30685213083bb271ae56996d8aff831ef975ab | 5b11a5d5c2d9254b5ffa293eebf8e3e6269edd69 | refs/heads/master | 2021-01-25T09:14:30.059368 | 2020-03-01T21:33:06 | 2020-03-01T21:33:06 | 93,801,194 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,705 | py | import numpy as np
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
DIST_ = 20
def rand_point_generator(point_num=None):
'''
we want y \in [1.5, 2.5], x \in [-0.5 0.5] for datapoints with label 1
we want y \in [-0.5, 0.5], x \in [1.5 2.5] for datapoints with label -1
return:
point_num data points with label 1, point_num data points with label -1
'''
pos_data_points = []
neg_data_points = []
while len(pos_data_points) < point_num or len(neg_data_points) < point_num:
# first settings
x_pos_ = np.random.randint(low=-1000, high=-100) / float(1000) * DIST_
y_pos_ = np.random.randint(low=600, high=1400) / float(1000) * DIST_
x_neg_ = np.random.randint(low=500, high=1500) / float(1000) * DIST_
y_neg_ = np.random.randint(low=-1000, high=-200) / float(1000) * DIST_
# second settings shift very far
'''
x_pos_ = np.random.randint(low=-1000, high=-200) / float(1000)
y_pos_ = np.random.randint(low=50000, high=51000) / float(1000)
x_neg_ = np.random.randint(low=29000, high=31000) / float(1000)
y_neg_ = np.random.randint(low=-5000, high=-4000) / float(1000)
'''
if [x_pos_, y_pos_] not in pos_data_points:
pos_data_points.append([x_pos_, y_pos_, 1])
if [x_neg_, y_neg_] not in neg_data_points:
neg_data_points.append([x_neg_, y_neg_, -1])
return np.array(pos_data_points), np.array(neg_data_points)
def find_point_with_distance(center_point_0=None, center_point_1=None, distance=None):
# find normalized direction vector between center0 and center1
v_ = (center_point_1 - center_point_0) / float(np.linalg.norm(center_point_1 - center_point_0))
return center_point_0 + distance * v_
def rand_point_generator_high_dim(point_num=None, dim=None, dist=None):
'''
param: point_num: num of data points we want for both pos and neg dataset
param: dim: in what dimension the data points in
param: dist: how far away we want the two data points
'''
np.random.seed(seed=42)
POS_HIGH_ = -200
POS_LOW_ = -1200
NEG_HIGH_ = 1800
NEG_LOW_ = 400
sigma_ = 0.1
pos_data_points = []
neg_data_points = []
pos_labels = []
neg_labels = []
tmp_pos_ = np.zeros(dim)
tmp_neg_ = np.zeros(dim)
# we randomly generate two data points first, then based on them, we further generate more
# data points
for i in range(dim):
tmp_pos_[i] = np.random.randint(low=POS_LOW_, high=POS_HIGH_) / float(1000)
tmp_neg_[i] = np.random.randint(low=NEG_LOW_, high=NEG_HIGH_) / float(1000)
# we generate another center by one center and distance predefined
while len(pos_data_points) < point_num or len(neg_data_points) < point_num:
pos_data_point = np.zeros(dim)
neg_data_point = np.zeros(dim)
for i in range(dim):
pos_data_point[i] = np.random.randint(low=POS_LOW_, high=POS_HIGH_) / float(1000) * dist
neg_data_point[i] = np.random.randint(low=NEG_LOW_, high=NEG_HIGH_) / float(1000) * dist
pos_data_points.append(pos_data_point)
neg_data_points.append(neg_data_point)
pos_labels.append(1)
neg_labels.append(-1)
'''
pos = tmp_pos_
new_neg = find_point_with_distance(tmp_pos_, tmp_neg_, distance=dist)
while len(pos_data_points) < point_num or len(neg_data_points) < point_num:
pos_data_point = np.zeros(dim)
neg_data_point = np.zeros(dim)
for i in range(dim):
pos_data_point[i] = np.random.normal(pos[i], sigma_)
neg_data_point[i] = np.random.normal(new_neg[i], sigma_)
pos_data_points.append(pos_data_point)
neg_data_points.append(neg_data_point)
pos_labels.append(1)
neg_labels.append(-1)
'''
return np.array(pos_data_points), np.array(neg_data_points), np.array(pos_labels), np.array(neg_labels)
def get_transformation(angle=None):
'''
angles determined here is in anti-clockwise
'''
theta = np.radians(angle)
c, s = np.cos(theta), np.sin(theta)
R = np.matrix('{} {}; {} {}'.format(c, -s, s, c))
return np.array(R)
if __name__ == "__main__":
np.random.seed(seed=42)
X_pos, X_neg, y_pos, y_neg = rand_point_generator_high_dim(point_num=50, dim=6, dist=0.5)
X = np.concatenate((X_pos, X_neg), axis=0)
#plt.show()
'''
pca_pos = PCA(n_components=2)
pca_neg = PCA(n_components=2)
X_decomp_pos=pca_pos.fit_transform(X_pos)
X_decomp_neg=pca_neg.fit_transform(X_neg)
'''
pca = PCA(n_components=2)
X_decomp = pca.fit_transform(X)
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# ax.scatter(X_pos[:, 0], X_pos[:, 1], X_pos[:, 2], c='r', marker='^')
# ax.scatter(X_neg[:, 0], X_neg[:, 1], X_neg[:, 2], c='b', marker='s')
# plt.show()
#print(X_decomp_pos.shape)
#print(X_decomp_neg.shape)
plt.figure(2)
plt.hold(True)
for i in range(X_decomp.shape[0]):
if i < X_decomp.shape[0] / 2:
plt.plot(X_decomp[i, 0], X_decomp[i, 1], '^r')
else:
plt.plot(X_decomp[i, 0], X_decomp[i, 1], '^b')
#plt.plot(X_decomp_neg[:, 0], X_decomp_neg[:, 1], 'sb')
plt.show()
#print(np.linalg.norm(tmp_pos-new_neg))
#print(tmp_pos.shape)
#print(new_neg.shape)
'''
pos_data_points, neg_data_points=rand_point_generator(point_num=50)
dataset = np.concatenate((pos_data_points, neg_data_points), axis=0)
rotation_matrix = get_transformation(angle=60)
pos_transformed = np.dot(pos_data_points[:,0:2], rotation_matrix)
neg_transformed = np.dot(neg_data_points[:,0:2], rotation_matrix)
fig = plt.figure(1)
plt.scatter([x[0] for x in pos_data_points], [x[1] for x in pos_data_points], c='r')
plt.scatter([x[0] for x in neg_data_points], [x[1] for x in neg_data_points], c='b')
#fig_2 = plt.figure(2)
plt.scatter([x[0] for x in pos_transformed], [x[1] for x in pos_transformed], c='r', marker='^')
plt.scatter([x[0] for x in neg_transformed], [x[1] for x in neg_transformed], c='b', marker='^')
plt.show()
'''
| [
"hongyiwang@cs.wisc.edu"
] | hongyiwang@cs.wisc.edu |
4891003b91c46f136ba3b75ab7e48fc4bc66ceaf | 8cfc885d0d461a8dac456e819c69d763c6d21169 | /sirepo/modules.py | 3235090615af2b9a8b8f17f97a65613435151fd2 | [
"Apache-2.0"
] | permissive | radiasoft/sirepo | 07763cfed492b2b796394d2e15cab54061cd9a74 | baa21720a1749c49c291f040796da5ee4f2ae8f5 | refs/heads/master | 2023-08-22T07:24:47.736445 | 2023-08-18T20:06:04 | 2023-08-18T20:06:04 | 37,476,480 | 56 | 30 | Apache-2.0 | 2023-09-14T01:56:12 | 2015-06-15T16:16:24 | JavaScript | UTF-8 | Python | false | false | 2,383 | py | # -*- coding: utf-8 -*-
"""initialize modules based on mode
:copyright: Copyright (c) 2022 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdc, pkdlog, pkdp
import importlib
def import_and_init(name, want_flask=False):
values = PKDict(
want_apis=name != "sirepo.job_supervisor",
want_flask=want_flask,
)
def _base(qual):
return qual.split(".")[-1]
def _i(to_import, kw):
pkdc("{}", to_import)
m = importlib.import_module(to_import)
m.init_module(**{k: values[k] for k in kw})
values[_base(to_import)] = m
return m
# Modules must protect themselves from duplicate initialization
# TODO(robnagler) eliminate this by storing global state in modules that is read by _i()
_i("sirepo.srtime", [])
_i("sirepo.flask", ["want_flask"])
_i("sirepo.job", [])
# Not a real initialization, but needed in values, and actually makes sense to do
_i("sirepo.simulation_db", ["want_flask"])
if name != "sirepo.pkcli.job_agent":
_i("sirepo.auth_db", [])
_i("sirepo.spa_session", ["want_flask"])
_i("sirepo.cookie", [])
_i("sirepo.auth", ["simulation_db"])
if name in "sirepo.pkcli.job_agent":
return None
m = _i("sirepo.uri_router", ["want_apis", "simulation_db"])
if "sirepo.uri_router" == name:
# Used only by sirepo.server so everything else should already be initialized
return m
m = _i("sirepo.uri", ["simulation_db", "uri_router"])
_i("sirepo.http_request", ["simulation_db"])
_i("sirepo.reply", ["simulation_db"])
_i("sirepo.uri", ["simulation_db", "uri_router"])
_i("sirepo.quest", ["reply", "http_request", "uri_router"])
if name in ("sirepo.auth", "sirepo.uri", "sirepo.auth_db"):
return values[_base(name)]
if "sirepo.job_supervisor" == name:
# job_supervisor doesn't need job_driver in its init so hack this
values.job_driver = importlib.import_module("sirepo.job_driver")
_i("sirepo.job_supervisor", ["job_driver"])
_i("sirepo.job_driver", ["job_supervisor"])
return values[_base(name)]
if "sirepo.server" == name:
return _i("sirepo.server", [])
raise AssertionError(f"unsupported module={name}")
| [
"noreply@github.com"
] | radiasoft.noreply@github.com |
dd1298ed530cf7f181af02b10acee55f73549f85 | c444997e83ef7e88f6b617a324e7ac66541c1574 | /Q4/q4.py | 9e25d048cf90bb8af0b7ccc9d1a4bd89cfc029d9 | [] | no_license | YoheiSaito/LogMonitoring | 1386c8122e187874331307f519af7c87925d1c10 | c951055a80fd4dc13472b955f394211b147a05b2 | refs/heads/master | 2023-03-31T01:58:44.004036 | 2021-04-08T11:21:50 | 2021-04-08T11:21:50 | 355,876,418 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,059 | py | #!/usr/bin/env python3
import sys, os
import argparse
import csv
import math
import re
from datetime import datetime, date
def main():
try:
# 1 コマンドライン引数を解析し, 入力ファイル名を決定する.
args = parseCommandLine()
# 2 決定された入力ファイルを読み込み以下の形式で集計する.
data = parseCSV(args.log_file)
# 3 2で集計されたデータ形式をIPごとに集計し直す.
data_byIP = accumerateByIP(data, args.m)
# 4 3で得られた連想配列をもとに故障状態を求める.
failure_periods = detectFailurePeriods(data_byIP, args.N)
# 5 故障状態を表示する
printFailurePeriods(failure_periods, "IP")
# 6 3で得られた連想配列をもとに過負荷期間を求める.
highload_periods = detectHighLoadPeriods(data_byIP, args.m, args.t)
# 7 過負荷期間を表示する
printHighLoadPeriods(highload_periods)
# 8 データをネットワークアドレスごとに集計し直す
data_bynetaddress = accumerateByNetAddress(data_byIP)
# 9 ネットワークアドレス毎に故障期間を求める.
failure_periods_bynetaddress = detectFailurePeriods(data_bynetaddress, args.N)
# 10 故障状態を表示する
printFailurePeriods(failure_periods_bynetaddress, "NetAddress")
except ValueError as e:
print(e)
# 1 コマンドライン引数を解析し, 入力ファイル名を決定する.
# argparseライブラリによって, コマンドライン引数を設定する
def parseCommandLine():
parser = argparse.ArgumentParser()
parser.add_argument("log_file", help="監視結果を保存したカンマ区切形式のログファイル")
parser.add_argument("--N", type=int, default=1,\
help="連続してタイムアウトしたときに故障とみなす回数")
parser.add_argument("--m", type=int, default=1,\
help="負荷判断するためのping回数")
parser.add_argument("--t", type=int, default=4294967295,\
help="負荷判断のしきい値時間[ms]")
args = parser.parse_args()
if(args.m < 1):
raise ValueError("mは1以上の整数値を入力してください")
if(args.t < 0):
raise ValueError("tは0以上の整数値を入力してください")
return args
# 2 決定された入力ファイルを読み込み以下の形式で集計する.
# csvライブラリをつかって, CSVをパースする
# 要素が3以外の行は読み込まない.
def parseCSV(filename):
data_csv = []
with open(filename, newline='') as fp:
reader = csv.reader(fp, delimiter=',')
for (i, data_line) in enumerate(reader):
if(len(data_line) == 3):
data_csv.append((i, data_line))
return data_csv
# 3 2で集計されたデータ形式をIPごとに集計し直す.
#
def accumerateByIP(data, m=1):
data_byIP = dict()
for (line, data_strs) in data:
key = data_strs[1];
date = data2datetime(data_strs[0], line)
ping = data2ping(data_strs[2], line)
if(key in data_byIP):
data_byIP[key].append((date, ping))
else:
data_byIP[key] = [(date, ping)]
for key,data in data_byIP.items():
# logの順序が入れ替わっているときのために, ソートする.
data_byIP[key] = sorted(data_byIP[key], key=lambda x:x[0])
# m回のpingの平均を求める
appended_average = []
for (i, (date, ping)) in enumerate(data):
if(i < m-1):
appended_average.append((date, ping, None))
continue
cnt = 0
acc = 0
for j in range(m):
if(data[i-j][1] != None):
cnt += 1
acc += data[i-j][1]
if(acc != 0):
appended_average.append((date, ping, acc/cnt))
else:
appended_average.append((date, ping, None))
data_byIP[key] = appended_average
return data_byIP
# logfileのタイムスタンプをdatetimeに変換する
def data2datetime(date_str, line):
try:
return datetime.strptime(date_str,"%Y%m%d%H%M%S")
except Exception as e:
raise ValueError("タイムスタンプのフォーマットが不正 @line = " + str(line+1))
# logfileのpingをint/Noneに変換する
def data2ping(ping, line):
val = None
try:
val = int(ping)
except Exception as e:
if(ping == None or ping != '-'):
raise ValueError("ping値が不正 @line = " + str(line+1))
return val
# IPごとに集計されたログデータから故障期間を求める
def detectFailurePeriods(data_ByIP, continuous_timeout):
failures = dict()
for key, info_list in data_ByIP.items():
if(len(info_list) == 0):
continue
failures[key] = detectFailurePeriod(info_list, continuous_timeout)
return failures
# 一つのIPに対して, 故障期間を計算する
def detectFailurePeriod(info_list, continuous_timeout=1):
failures = []
i = 0
while(i < len(info_list)):
(date, ping, ping_average) = info_list[i]
if(ping == None):
begin_i = i
begin = date
end = None
while(i < len(info_list)):
(date, ping, ping_average) = info_list[i]
if(ping != None):
end = date
break
i += 1
if(i - begin_i >= continuous_timeout):
failure = period_as_string(i - begin_i, begin, end)
failures.append(failure)
i += 1
return failures
# 時間差分を文字列に変換する
def deltatime2str(delta):
days = delta.days
seconds = delta.seconds%60
mins = (delta.seconds - seconds)//60%60
hours = (delta.seconds - seconds)//60//60
ret = ""
if(days != 0):
ret = ret + "{}日".format(days)
if(hours != 0):
ret = ret + "{}時間".format(hours)
if(mins != 0):
ret = ret + "{}分".format(mins)
if(seconds != 0):
ret = ret + "{}秒".format(seconds)
return ret + "間"
# 故障区間を表示する文字列に変換する.
def period_as_string(n, begin, end):
start_str = datetime.strftime(begin,"%Y年%m月%d日%H時%M分%S秒")
if(end != None):
end_str = datetime.strftime(end,"%Y年%m月%d日%H時%M分%S秒")
diff = (end - begin)
deltatime2str(diff)
term = deltatime2str(diff)
else:
end_str = "継続中"
term = ""
return (n, start_str, end_str, term)
# 6 IPごとに集計されたログデータから過負荷期間を求める
def detectHighLoadPeriods(data_ByIP, m, t):
highloads = dict()
for key, info_list in data_ByIP.items():
if(len(info_list) == 0):
continue
highloads[key] = detectHighLoadPeriod(info_list, m, t)
return highloads
# 一つのIPに対して, 過負荷期間を計算する
def detectHighLoadPeriod(info_list, m, t):
highloads = []
i = 0
while(i < len(info_list)):
(date, ping, ping_average) = info_list[i]
if(ping_average != None and ping_average >= t):
begin_i = i
begin = date
end = None
while(i < len(info_list)):
(date, ping, ping_average) = info_list[i]
if(ping_average != None and ping_average < t):
end = date
break
i += 1
highload = period_as_string(i - begin_i, begin, end)
highloads.append(highload)
i += 1
return highloads
# 5 故障区間の標準出力
def printFailurePeriods(failure_periods, groupName="IP"):
print("{}\t\t故障はじめ\t\t\t故障終わり\t\t\t期間".format(groupName))
for key, failures in failure_periods.items():
if(len(failures) != 0):
print(key)
for f in failures:
print("\t\t{}\t{}\t{}".format(f[1], f[2], f[3]))
# 7 過負荷期間の標準出力
def printHighLoadPeriods(highload_periods):
if(len(highload_periods.items()) == 0):
return
print("IP\t\t過負荷はじめ\t\t\t過負荷終わり\t\t\t期間")
for key, highloads in highload_periods.items():
if(len(highloads) != 0):
print(key)
for f in highloads:
print("\t\t{}\t{}\t{}".format(f[1], f[2], f[3]))
def accumerateByNetAddress(data_ByIP):
data_by_netaddress = dict()
for key, info_list in data_ByIP.items():
netaddress = getNetAddressFromIP(key)
if(netaddress not in data_by_netaddress):
data_by_netaddress[netaddress] = []
for l in data_ByIP[key]:
data_by_netaddress[netaddress].append(l)
for key in data_by_netaddress:
# logの順序が入れ替わっているときのために, ソートする.
data_by_netaddress[key] = sorted(data_by_netaddress[key], key=lambda x:x[0])
return data_by_netaddress
def getNetAddressFromIP(IP):
match = re.match(r'(\d+)\.(\d+)\.(\d+)\.(\d+)/(\d+)$', IP)
subnet_prefix = int(match.group(5))
ip_as_int = \
int(pow(256, 3)) * int(match.group(1)) + \
int(pow(256, 2)) * int(match.group(2)) + \
int(pow(256, 1)) * int(match.group(3)) + \
int(pow(256, 0)) * int(match.group(4))
mask = (int(math.pow(2, subnet_prefix)) - 1) * int(math.pow(2, 32-subnet_prefix))
netaddress = mask & ip_as_int
netaddress_A = netaddress // int(pow(256, 3)) % 256
netaddress_B = netaddress // int(pow(256, 2)) % 256
netaddress_C = netaddress // int(pow(256, 1)) % 256
netaddress_D = netaddress // int(pow(256, 0)) % 256
return "{}.{}.{}.{}".format(netaddress_A, netaddress_B, netaddress_C, netaddress_D)
if __name__ == "__main__":
main()
| [
"teyo.13324@gmail.com"
] | teyo.13324@gmail.com |
07ad821ad3e0604ebbb247747904fb17811db18e | f6e9cbe9b23f17e47a6305174fdb35ca1aa2a84f | /lib/miband2constants.py | 510dc553d14c8c0bd4703cb36e5313c905c6dc8e | [] | no_license | xoan/PyBand2 | 843d76122b41dacce6a77488061a534da776613c | da93949c6dc135dc30a4e2631769c644868084fc | refs/heads/master | 2020-03-21T22:32:12.789973 | 2018-06-29T09:37:46 | 2018-06-29T09:37:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,573 | py | UUID_SVC_MIBAND = "0000fee0-0000-1000-8000-00805f9b34fb"
UUID_SVC_MIBAND2 = "0000fee1-0000-1000-8000-00805f9b34fb"
UUID_CHAR_AUTH = "00000009-0000-3512-2118-0009af100700"
UUID_SVC_ALERT = "00001802-0000-1000-8000-00805f9b34fb"
UUID_CHAR_ALERT = "00002a06-0000-1000-8000-00805f9b34fb"
UUID_SVC_ALERT_NOTIFICATION = "00001811-0000-1000-8000-00805f9b34fb"
UUID_CHAR_NEW_ALERT = "00002a46-0000-1000-8000-00805f9b34fb"
UUID_SVC_HEART_RATE = "0000180d-0000-1000-8000-00805f9b34fb"
UUID_CHAR_HRM_MEASURE = "00002a37-0000-1000-8000-00805f9b34fb"
UUID_CHAR_HRM_CONTROL = "00002a39-0000-1000-8000-00805f9b34fb"
BASE_UUID = "0000fee1-0000-1000-8000-00805f9b34fb"
UUID_SERVICE_HEART_RATE = "0000180d-0000-1000-8000-00805f9b34fb"
UUID_SERVICE_FIRMWARE_SERVICE = "00001530-0000-3512-2118-0009af100700"
UUID_CHARACTERISTIC_FIRMWARE = "00001531-0000-3512-2118-0009af100700"
UUID_CHARACTERISTIC_FIRMWARE_DATA = "00001532-0000-3512-2118-0009af100700"
UUID_UNKNOWN_CHARACTERISTIC0 = "00000000-0000-3512-2118-0009af100700"
UUID_UNKNOWN_CHARACTERISTIC1 = "00000001-0000-3512-2118-0009af100700"
UUID_UNKNOWN_CHARACTERISTIC2 = "00000002-0000-3512-2118-0009af100700"
# Alarms, Display and other configuration.
UUID_CHARACTERISTIC_CURRENT_TIME = "00002a2b-0000-1000-8000-00805f9b34fb"
UUID_CHARACTERISTIC_3_CONFIGURATION = "00000003-0000-3512-2118-0009af100700"
UUID_CHARACTERISTIC_4_FETCH = "00000004-0000-3512-2118-0009af100700"
UUID_CHARACTERISTIC_5_ACTIVITY_DATA = "00000005-0000-3512-2118-0009af100700"
UUID_CHARACTERISTIC_6_BATTERY_INFO = "00000006-0000-3512-2118-0009af100700"
UUID_CHARACTERISTIC_7_REALTIME_STEPS = "00000007-0000-3512-2118-0009af100700"
UUID_CHARACTERISTIC_8_USER_SETTINGS = "00000008-0000-3512-2118-0009af100700"
UUID_CHARACTERISTIC_AUTH = "00000009-0000-3512-2118-0009af100700"
UUID_CHARACTERISTIC_DEVICEEVENT = "00000010-0000-3512-2118-0009af100700"
ALERT_LEVEL_NONE = 0
ALERT_LEVEL_MESSAGE = 1
ALERT_LEVEL_PHONE_CALL = 2
ALERT_LEVEL_VIBRATE_ONLY = 3
HRM_COMMAND = 0x15
HRM_MODE_SLEEP = 0x00
HRM_MODE_CONTINUOUS = 0x01
HRM_MODE_ONE_SHOT = 0x02
CCCD_UUID = 0x2902
RESPONSE = 0x10
SUCCESS = 0x01
COMMAND_ACTIVITY_DATA_START_DATE = 0x01
COMMAND_ACTIVITY_DATA_TYPE_ACTIVTY = 0x01
COMMAND_ACTIVITY_DATA_TYPE_UNKNOWN_2 = 0x02
COMMAND_ACTIVITY_DATA_XXX_DATE = 0x02
COMMAND_ENABLE_HR_SLEEP_MEASUREMENT = b'\x15\x00\x01'
COMMAND_DISABLE_HR_SLEEP_MEASUREMENT = b'\x15\x00\x00'
COMMAND_SET_PERIODIC_HR_MEASUREMENT_INTERVAL = 0x14
ALARM_MON = 1
ALARM_TUE = 2
ALARM_WED = 4
ALARM_THU = 8
ALARM_FRI = 16
ALARM_SAT = 32
ALARM_SUN = 64
COMMAND_FIRMWARE_REBOOT = 0x05
| [
"trigork@gmail.com"
] | trigork@gmail.com |
be3034c5966a720e78812520ab4c3d13cd6e0f6f | 3ea89d88f05c5814fd2baa2bb3fc32bec64037a8 | /class/ajax.py | 9c87afc8e39f080a05074dd11bd86e17b5dee231 | [] | no_license | jiuin1104/Happy_Panel | 5e8d7a7666b6bb672c2f51ad652d2f848007ce58 | 757fd6129948589d2b17a67ea1e47d276ccdc5bb | refs/heads/main | 2023-03-04T03:45:22.334924 | 2021-02-23T15:27:02 | 2021-02-23T15:27:02 | 341,596,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50,127 | py | #coding: utf-8
# +-------------------------------------------------------------------
# | 宝塔Linux面板
# +-------------------------------------------------------------------
# | Copyright (c) 2015-2016 宝塔软件(http://bt.cn) All rights reserved.
# +-------------------------------------------------------------------
# | Author: hwliang <hwl@bt.cn>
# +-------------------------------------------------------------------
from BTPanel import session,request
import public,os,json,time,apache,psutil
class ajax:
def GetApacheStatus(self,get):
a = apache.apache()
return a.GetApacheStatus()
def GetProcessCpuPercent(self,i,process_cpu):
try:
pp = psutil.Process(i)
if pp.name() not in process_cpu.keys():
process_cpu[pp.name()] = float(pp.cpu_percent(interval=0.1))
process_cpu[pp.name()] += float(pp.cpu_percent(interval=0.1))
except:
pass
def GetNginxStatus(self,get):
try:
if not os.path.exists('/www/server/nginx/sbin/nginx'): return public.returnMsg(False,'未安装nginx')
process_cpu = {}
worker = int(public.ExecShell("ps aux|grep nginx|grep 'worker process'|wc -l")[0])-1
workermen = int(public.ExecShell("ps aux|grep nginx|grep 'worker process'|awk '{memsum+=$6};END {print memsum}'")[0]) / 1024
for proc in psutil.process_iter():
if proc.name() == "nginx":
self.GetProcessCpuPercent(proc.pid,process_cpu)
time.sleep(0.1)
#取Nginx负载状态
self.CheckStatusConf()
result = public.httpGet('http://127.0.0.1/nginx_status')
tmp = result.split()
data = {}
if "request_time" in tmp:
data['accepts'] = tmp[8]
data['handled'] = tmp[9]
data['requests'] = tmp[10]
data['Reading'] = tmp[13]
data['Writing'] = tmp[15]
data['Waiting'] = tmp[17]
else:
data['accepts'] = tmp[9]
data['handled'] = tmp[7]
data['requests'] = tmp[8]
data['Reading'] = tmp[11]
data['Writing'] = tmp[13]
data['Waiting'] = tmp[15]
data['active'] = tmp[2]
data['worker'] = worker
data['workercpu'] = round(float(process_cpu["nginx"]),2)
data['workermen'] = "%s%s" % (int(workermen), "MB")
return data
except Exception as ex:
public.WriteLog('信息获取',"Nginx负载状态获取失败: %s" % ex)
return public.returnMsg(False,'数据获取失败!')
def GetPHPStatus(self,get):
#取指定PHP版本的负载状态
try:
version = get.version
uri = "/phpfpm_"+version+"_status?json"
result = public.request_php(version,uri,'')
tmp = json.loads(result)
fTime = time.localtime(int(tmp['start time']))
tmp['start time'] = time.strftime('%Y-%m-%d %H:%M:%S',fTime)
return tmp
except Exception as ex:
public.WriteLog('信息获取',"PHP负载状态获取失败: {}".format(public.get_error_info()))
return public.returnMsg(False,'负载状态获取失败!')
def CheckStatusConf(self):
if public.get_webserver() != 'nginx': return
filename = session['setupPath'] + '/panel/vhost/nginx/phpfpm_status.conf'
if os.path.exists(filename):
if public.ReadFile(filename).find('nginx_status')!=-1: return
conf = '''server {
listen 80;
server_name 127.0.0.1;
allow 127.0.0.1;
location /nginx_status {
stub_status on;
access_log off;
}
}'''
public.writeFile(filename,conf)
public.serviceReload()
def GetTaskCount(self,get):
#取任务数量
return public.M('tasks').where("status!=?",('1',)).count()
def GetSoftList(self,get):
#取软件列表
import json,os
tmp = public.readFile('data/softList.conf')
data = json.loads(tmp)
tasks = public.M('tasks').where("status!=?",('1',)).field('status,name').select()
for i in range(len(data)):
data[i]['check'] = public.GetConfigValue('root_path')+'/'+data[i]['check']
for n in range(len(data[i]['versions'])):
#处理任务标记
isTask = '1'
for task in tasks:
tmp = public.getStrBetween('[',']',task['name'])
if not tmp:continue
tmp1 = tmp.split('-')
if data[i]['name'] == 'PHP':
if tmp1[0].lower() == data[i]['name'].lower() and tmp1[1] == data[i]['versions'][n]['version']: isTask = task['status'];
else:
if tmp1[0].lower() == data[i]['name'].lower(): isTask = task['status']
#检查安装状态
if data[i]['name'] == 'PHP':
data[i]['versions'][n]['task'] = isTask
checkFile = data[i]['check'].replace('VERSION',data[i]['versions'][n]['version'].replace('.',''))
else:
data[i]['task'] = isTask
version = public.readFile(public.GetConfigValue('root_path')+'/server/'+data[i]['name'].lower()+'/version.pl')
if not version:continue
if version.find(data[i]['versions'][n]['version']) == -1:continue
checkFile = data[i]['check']
data[i]['versions'][n]['status'] = os.path.exists(checkFile)
return data
def GetLibList(self,get):
#取插件列表
import json,os
tmp = public.readFile('data/libList.conf')
data = json.loads(tmp)
for i in range(len(data)):
data[i]['status'] = self.CheckLibInstall(data[i]['check'])
data[i]['optstr'] = self.GetLibOpt(data[i]['status'], data[i]['opt'])
return data
def CheckLibInstall(self,checks):
for cFile in checks:
if os.path.exists(cFile): return '已安装'
return '未安装'
#取插件操作选项
def GetLibOpt(self,status,libName):
optStr = ''
if status == '未安装':
optStr = '<a class="link" href="javascript:InstallLib(\''+libName+'\');">安装</a>'
else:
libConfig = '配置'
if(libName == 'beta'): libConfig = '内测资料'
optStr = '<a class="link" href="javascript:SetLibConfig(\''+libName+'\');">'+libConfig+'</a> | <a class="link" href="javascript:UninstallLib(\''+libName+'\');">卸载</a>'
return optStr
#取插件AS
def GetQiniuAS(self,get):
filename = public.GetConfigValue('setup_path') + '/panel/data/'+get.name+'As.conf'
if not os.path.exists(filename): public.writeFile(filename,'')
data = {}
data['AS'] = public.readFile(filename).split('|')
data['info'] = self.GetLibInfo(get.name)
if len(data['AS']) < 3:
data['AS'] = ['','','','']
return data
#设置插件AS
def SetQiniuAS(self,get):
info = self.GetLibInfo(get.name)
filename = public.GetConfigValue('setup_path') + '/panel/data/'+get.name+'As.conf'
conf = get.access_key.strip() + '|' + get.secret_key.strip() + '|' + get.bucket_name.strip() + '|' + get.bucket_domain.strip()
public.writeFile(filename,conf)
public.ExecShell("chmod 600 " + filename)
result = public.ExecShell(public.get_python_bin() + " " + public.GetConfigValue('setup_path') + "/panel/script/backup_"+get.name+".py list")
if result[0].find("ERROR:") == -1:
public.WriteLog("插件管理", "设置插件["+info['name']+"]AS!")
return public.returnMsg(True, '设置成功!')
return public.returnMsg(False, 'ERROR: 无法连接到'+info['name']+'服务器,请检查[AK/SK/存储空间]设置是否正确!')
#设置内测
def SetBeta(self,get):
data = {}
data['username'] = get.bbs_name
data['qq'] = get.qq
data['email'] = get.email
result = public.httpPost(public.GetConfigValue('home') + '/Api/LinuxBeta',data);
import json;
data = json.loads(result)
if data['status']:
public.writeFile('data/beta.pl',get.bbs_name + '|' + get.qq + '|' + get.email);
return data
#取内测资格状态
def GetBetaStatus(self,get):
try:
return public.readFile('data/beta.pl').strip()
except:
return 'False'
#获取指定插件信息
def GetLibInfo(self,name):
import json
tmp = public.readFile('data/libList.conf')
data = json.loads(tmp)
for lib in data:
if name == lib['opt']: return lib
return False
#获取文件列表
def GetQiniuFileList(self,get):
try:
import json
result = public.ExecShell(public.get_python_bin() + " " + public.GetConfigValue('setup_path') + "/panel/script/backup_"+get.name+".py list")
return json.loads(result[0])
except:
return public.returnMsg(False, '获取列表失败,请检查[AK/SK/存储空间]设是否正确!');
#取网络连接列表
def GetNetWorkList(self,get):
import psutil
netstats = psutil.net_connections()
networkList = []
for netstat in netstats:
tmp = {}
if netstat.type == 1:
tmp['type'] = 'tcp'
else:
tmp['type'] = 'udp'
tmp['family'] = netstat.family
tmp['laddr'] = netstat.laddr
tmp['raddr'] = netstat.raddr
tmp['status'] = netstat.status
p = psutil.Process(netstat.pid)
tmp['process'] = p.name()
tmp['pid'] = netstat.pid
networkList.append(tmp)
del(p)
del(tmp)
networkList = sorted(networkList, key=lambda x : x['status'], reverse=True)
return networkList
#取进程列表
def GetProcessList(self,get):
import psutil,pwd
Pids = psutil.pids()
processList = []
for pid in Pids:
try:
tmp = {}
p = psutil.Process(pid)
if p.exe() == "": continue
tmp['name'] = p.name(); #进程名称
if self.GoToProcess(tmp['name']): continue
tmp['pid'] = pid; #进程标识
tmp['status'] = p.status(); #进程状态
tmp['user'] = p.username(); #执行用户
cputimes = p.cpu_times()
tmp['cpu_percent'] = p.cpu_percent(0.1)
tmp['cpu_times'] = cputimes.user #进程占用的CPU时间
tmp['memory_percent'] = round(p.memory_percent(),3) #进程占用的内存比例
pio = p.io_counters()
tmp['io_write_bytes'] = pio.write_bytes #进程总共写入字节数
tmp['io_read_bytes'] = pio.read_bytes #进程总共读取字节数
tmp['threads'] = p.num_threads() #进程总线程数
processList.append(tmp)
del(p)
del(tmp)
except:
continue
import operator
processList = sorted(processList, key=lambda x : x['memory_percent'], reverse=True)
processList = sorted(processList, key=lambda x : x['cpu_times'], reverse=True)
return processList
#结束指定进程
def KillProcess(self,get):
#return public.returnMsg(False,'演示服务器,禁止此操作!');
import psutil
p = psutil.Process(int(get.pid))
name = p.name()
if name == 'python': return public.returnMsg(False,'KILL_PROCESS_ERR')
p.kill()
public.WriteLog('TYPE_PROCESS','KILL_PROCESS',(get.pid,name))
return public.returnMsg(True,'KILL_PROCESS',(get.pid,name))
def GoToProcess(self,name):
ps = ['sftp-server','login','nm-dispatcher','irqbalance','qmgr','wpa_supplicant','lvmetad','auditd','master','dbus-daemon','tapdisk','sshd','init','ksoftirqd','kworker','kmpathd','kmpath_handlerd','python','kdmflush','bioset','crond','kthreadd','migration','rcu_sched','kjournald','iptables','systemd','network','dhclient','systemd-journald','NetworkManager','systemd-logind','systemd-udevd','polkitd','tuned','rsyslogd']
for key in ps:
if key == name: return True
return False
def GetNetWorkIo(self,get):
#取指定时间段的网络Io
data = public.M('network').dbfile('system').where("addtime>=? AND addtime<=?",(get.start,get.end)).field('id,up,down,total_up,total_down,down_packets,up_packets,addtime').order('id asc').select()
return self.ToAddtime(data)
def GetDiskIo(self,get):
#取指定时间段的磁盘Io
data = public.M('diskio').dbfile('system').where("addtime>=? AND addtime<=?",(get.start,get.end)).field('id,read_count,write_count,read_bytes,write_bytes,read_time,write_time,addtime').order('id asc').select()
return self.ToAddtime(data)
def GetCpuIo(self,get):
#取指定时间段的CpuIo
data = public.M('cpuio').dbfile('system').where("addtime>=? AND addtime<=?",(get.start,get.end)).field('id,pro,mem,addtime').order('id asc').select()
return self.ToAddtime(data,True)
def get_load_average(self,get):
data = public.M('load_average').dbfile('system').where("addtime>=? AND addtime<=?",(get.start,get.end)).field('id,pro,one,five,fifteen,addtime').order('id asc').select()
return self.ToAddtime(data)
def ToAddtime(self,data,tomem = False):
import time
#格式化addtime列
if tomem:
import psutil
mPre = (psutil.virtual_memory().total / 1024 / 1024) / 100
length = len(data)
he = 1
if length > 100: he = 1
if length > 1000: he = 3
if length > 10000: he = 15
if he == 1:
for i in range(length):
data[i]['addtime'] = time.strftime('%m/%d %H:%M',time.localtime(float(data[i]['addtime'])))
if tomem and data[i]['mem'] > 100: data[i]['mem'] = data[i]['mem'] / mPre
return data
else:
count = 0
tmp = []
for value in data:
if count < he:
count += 1
continue
value['addtime'] = time.strftime('%m/%d %H:%M',time.localtime(float(value['addtime'])))
if tomem and value['mem'] > 100: value['mem'] = value['mem'] / mPre
tmp.append(value)
count = 0
return tmp
def GetInstalleds(self,softlist):
softs = ''
for soft in softlist['data']:
try:
for v in soft['versions']:
if v['status']: softs += soft['name'] + '-' + v['version'] + '|'
except:
pass
return softs
#获取SSH爆破次数
def get_ssh_intrusion(self):
fp = open('/var/log/secure','rb')
l = fp.readline()
intrusion_total = 0
while l:
if l.find('Failed password for root') != -1: intrusion_total += 1
l = fp.readline()
fp.close()
return intrusion_total
#申请内测版
def apple_beta(self,get):
try:
userInfo = json.loads(public.ReadFile('data/userInfo.json'))
p_data = {}
p_data['uid'] = userInfo['uid']
p_data['access_key'] = userInfo['access_key']
p_data['username'] = userInfo['username']
result = public.HttpPost(public.GetConfigValue('home') + '/api/panel/apple_beta',p_data,5)
try:
return json.loads(result)
except: return public.returnMsg(False,'AJAX_CONN_ERR')
except: return public.returnMsg(False,'AJAX_USER_BINDING_ERR')
def to_not_beta(self,get):
try:
userInfo = json.loads(public.ReadFile('data/userInfo.json'))
p_data = {}
p_data['uid'] = userInfo['uid']
p_data['access_key'] = userInfo['access_key']
p_data['username'] = userInfo['username']
result = public.HttpPost(public.GetConfigValue('home') + '/api/panel/to_not_beta',p_data,5)
try:
return json.loads(result)
except: return public.returnMsg(False,'AJAX_CONN_ERR')
except: return public.returnMsg(False,'AJAX_USER_BINDING_ERR')
def to_beta(self):
try:
userInfo = json.loads(public.ReadFile('data/userInfo.json'))
p_data = {}
p_data['uid'] = userInfo['uid']
p_data['access_key'] = userInfo['access_key']
p_data['username'] = userInfo['username']
public.HttpPost(public.GetConfigValue('home') + '/api/panel/to_beta',p_data,5)
except: pass
def get_uid(self):
try:
userInfo = json.loads(public.ReadFile('data/userInfo.json'))
return userInfo['uid']
except: return 0
#获取最新的5条测试版更新日志
def get_beta_logs(self,get):
try:
data = json.loads(public.HttpGet(public.GetConfigValue('home') + '/api/panel/get_beta_logs'))
return data
except:
return public.returnMsg(False,'AJAX_CONN_ERR')
def get_other_info(self):
other = {}
other['ds'] = []
ds = public.M('domain').field('name').select()
for d in ds:
other['ds'].append(d['name'])
return ','.join(other['ds'])
def UpdatePanel(self,get):
try:
if not public.IsRestart(): return public.returnMsg(False,'EXEC_ERR_TASK')
import json
if int(session['config']['status']) == 0:
public.HttpGet(public.GetConfigValue('home')+'/Api/SetupCount?type=Linux')
public.M('config').where("id=?",('1',)).setField('status',1)
#取回远程版本信息
if 'updateInfo' in session and hasattr(get,'check') == False:
updateInfo = session['updateInfo']
else:
logs = public.get_debug_log()
import psutil,system,sys
mem = psutil.virtual_memory()
import panelPlugin
mplugin = panelPlugin.panelPlugin()
mplugin.ROWS = 10000
panelsys = system.system()
data = {}
data['ds'] = '' #self.get_other_info()
data['sites'] = str(public.M('sites').count())
data['ftps'] = str(public.M('ftps').count())
data['databases'] = str(public.M('databases').count())
data['system'] = panelsys.GetSystemVersion() + '|' + str(mem.total / 1024 / 1024) + 'MB|' + str(public.getCpuType()) + '*' + str(psutil.cpu_count()) + '|' + str(public.get_webserver()) + '|' +session['version']
data['system'] += '||'+self.GetInstalleds(mplugin.getPluginList(None))
data['logs'] = logs
data['client'] = request.headers.get('User-Agent')
data['oem'] = ''
data['intrusion'] = 0
data['uid'] = self.get_uid()
#msg = public.getMsg('PANEL_UPDATE_MSG');
data['o'] = ''
filename = '/www/server/panel/data/o.pl'
if os.path.exists(filename): data['o'] = str(public.readFile(filename))
sUrl = public.GetConfigValue('home') + '/api/panel/updateLinux'
updateInfo = json.loads(public.httpPost(sUrl,data))
if not updateInfo: return public.returnMsg(False,"CONNECT_ERR")
#updateInfo['msg'] = msg;
session['updateInfo'] = updateInfo
#检查是否需要升级
if updateInfo['is_beta'] == 1:
if updateInfo['beta']['version'] ==session['version']: return public.returnMsg(False,updateInfo)
else:
if updateInfo['version'] ==session['version']: return public.returnMsg(False,updateInfo)
#是否执行升级程序
if(updateInfo['force'] == True or hasattr(get,'toUpdate') == True or os.path.exists('data/autoUpdate.pl') == True):
if updateInfo['is_beta'] == 1: updateInfo['version'] = updateInfo['beta']['version']
setupPath = public.GetConfigValue('setup_path')
uptype = 'update'
httpUrl = public.get_url()
if httpUrl: updateInfo['downUrl'] = httpUrl + '/install/' + uptype + '/LinuxPanel-' + updateInfo['version'] + '.zip'
public.downloadFile(updateInfo['downUrl'],'panel.zip')
if os.path.getsize('panel.zip') < 1048576: return public.returnMsg(False,"PANEL_UPDATE_ERR_DOWN")
public.ExecShell('unzip -o panel.zip -d ' + setupPath + '/')
import compileall
if os.path.exists('/www/server/panel/runserver.py'): public.ExecShell('rm -f /www/server/panel/*.pyc')
if os.path.exists('/www/server/panel/class/common.py'): public.ExecShell('rm -f /www/server/panel/class/*.pyc')
if os.path.exists('panel.zip'):os.remove("panel.zip")
session['version'] = updateInfo['version']
if 'getCloudPlugin' in session: del(session['getCloudPlugin'])
if updateInfo['is_beta'] == 1: self.to_beta()
public.ExecShell("/etc/init.d/bt start")
public.writeFile('data/restart.pl','True')
return public.returnMsg(True,'PANEL_UPDATE',(updateInfo['version'],))
#输出新版本信息
data = {
'status' : True,
'version': updateInfo['version'],
'updateMsg' : updateInfo['updateMsg']
}
public.ExecShell('rm -rf /www/server/phpinfo/*')
return public.returnMsg(True,updateInfo)
except Exception as ex:
return public.get_error_info()
return public.returnMsg(False,"CONNECT_ERR")
#检查是否安装任何
def CheckInstalled(self,get):
checks = ['nginx','apache','php','pure-ftpd','mysql']
import os
for name in checks:
filename = public.GetConfigValue('root_path') + "/server/" + name
if os.path.exists(filename): return True
return False
#取已安装软件列表
def GetInstalled(self,get):
import system
data = system.system().GetConcifInfo()
return data
#取PHP配置
def GetPHPConfig(self,get):
import re,json
filename = public.GetConfigValue('setup_path') + '/php/' + get.version + '/etc/php.ini'
if public.get_webserver() == 'openlitespeed':
filename = '/usr/local/lsws/lsphp{}/etc/php/{}.{}/litespeed/php.ini'.format(get.version,get.version[0],get.version[1])
if os.path.exists('/etc/redhat-release'):
filename = '/usr/local/lsws/lsphp' + get.version + '/etc/php.ini'
if not os.path.exists(filename): return public.returnMsg(False,'PHP_NOT_EXISTS')
phpini = public.readFile(filename)
data = {}
rep = "disable_functions\s*=\s{0,1}(.*)\n"
tmp = re.search(rep,phpini).groups()
data['disable_functions'] = tmp[0]
rep = "upload_max_filesize\s*=\s*([0-9]+)(M|m|K|k)"
tmp = re.search(rep,phpini).groups()
data['max'] = tmp[0]
rep = u"\n;*\s*cgi\.fix_pathinfo\s*=\s*([0-9]+)\s*\n"
tmp = re.search(rep,phpini).groups()
if tmp[0] == '0':
data['pathinfo'] = False
else:
data['pathinfo'] = True
self.getCloudPHPExt(get)
phplib = json.loads(public.readFile('data/phplib.conf'))
libs = []
tasks = public.M('tasks').where("status!=?",('1',)).field('status,name').select()
phpini_ols = None
for lib in phplib:
lib['task'] = '1'
for task in tasks:
tmp = public.getStrBetween('[',']',task['name'])
if not tmp:continue
tmp1 = tmp.split('-')
if tmp1[0].lower() == lib['name'].lower():
lib['task'] = task['status']
lib['phpversions'] = []
lib['phpversions'].append(tmp1[1])
if public.get_webserver() == 'openlitespeed':
lib['status'] = False
get.php_version = "{}.{}".format(get.version[0],get.version[1])
if not phpini_ols:
phpini_ols = self.php_info(get)['phpinfo']['modules'].lower()
phpini_ols = phpini_ols.split()
for i in phpini_ols:
if lib['check'][:-3].lower() == i :
lib['status'] = True
break
if "ioncube" in lib['check'][:-3].lower() and "ioncube" == i:
lib['status'] = True
break
else:
if phpini.find(lib['check']) == -1:
lib['status'] = False
else:
lib['status'] = True
libs.append(lib)
data['libs'] = libs
return data
#获取PHP扩展
def getCloudPHPExt(self,get):
import json
try:
if 'php_ext' in session: return True
if not session.get('download_url'): session['download_url'] = 'http://download.bt.cn'
download_url = session['download_url'] + '/install/lib/phplib.json'
tstr = public.httpGet(download_url)
data = json.loads(tstr)
if not data: return False
public.writeFile('data/phplib.conf',json.dumps(data))
session['php_ext'] = True
return True
except:
return False
#取PHPINFO信息
def GetPHPInfo(self,get):
if public.get_webserver() == "openlitespeed":
shell_str = "/usr/local/lsws/lsphp{}/bin/php -i".format(get.version)
return public.ExecShell(shell_str)[0]
sPath = '/www/server/phpinfo'
if os.path.exists(sPath):
public.ExecShell("rm -rf " + sPath)
p_file = '/dev/shm/phpinfo.php'
public.writeFile(p_file,'<?php phpinfo(); ?>')
phpinfo = public.request_php(get.version,'/phpinfo.php','/dev/shm')
if os.path.exists(p_file): os.remove(p_file)
return phpinfo.decode()
#清理日志
def delClose(self,get):
if not 'uid' in session: session['uid'] = 1
if session['uid'] != 1: return public.returnMsg(False,'没有权限!')
if 'tmp_login_id' in session:
return public.returnMsg(False,'没有权限!')
public.M('logs').where('id>?',(0,)).delete()
public.WriteLog('TYPE_CONFIG','LOG_CLOSE')
return public.returnMsg(True,'LOG_CLOSE')
def __get_webserver_conffile(self):
webserver = public.get_webserver()
if webserver == 'nginx':
filename = public.GetConfigValue('setup_path') + '/nginx/conf/nginx.conf'
elif webserver == 'openlitespeed':
filename = public.GetConfigValue('setup_path') + "/panel/vhost/openlitespeed/detail/phpmyadmin.conf"
else:
filename = public.GetConfigValue('setup_path') + '/apache/conf/extra/httpd-vhosts.conf'
return filename
# 获取phpmyadmin ssl配置
def get_phpmyadmin_conf(self):
if public.get_webserver() == "nginx":
conf_file = "/www/server/panel/vhost/nginx/phpmyadmin.conf"
rep = r"listen\s*(\d+)"
else:
conf_file = "/www/server/panel/vhost/apache/phpmyadmin.conf"
rep = r"Listen\s*(\d+)"
return {"conf_file":conf_file,"rep":rep}
# 设置phpmyadmin路径
def set_phpmyadmin_session(self):
import re
conf_file = self.get_phpmyadmin_conf()
conf = public.readFile(conf_file["conf_file"])
rep = conf_file["rep"]
if conf:
port = re.search(rep,conf).group(1)
if session['phpmyadminDir']:
path = session['phpmyadminDir'].split("/")[-1]
ip = public.GetHost()
session['phpmyadminDir'] = "https://{}:{}/{}".format(ip, port, path)
# 获取phpmyadmin ssl状态
def get_phpmyadmin_ssl(self,get):
import re
conf_file = self.get_phpmyadmin_conf()
conf = public.readFile(conf_file["conf_file"])
rep = conf_file["rep"]
if conf:
port = re.search(rep, conf).group(1)
return {"status":True,"port":port}
return {"status":False,"port":""}
# 修改php ssl端口
def change_phpmyadmin_ssl_port(self,get):
if public.get_webserver() == "openlitespeed":
return public.returnMsg(False, 'OpenLiteSpeed 目前尚不支持该操作')
import re
try:
port = int(get.port)
if 1 > port > 65535:
return public.returnMsg(False, '端口范围不正确')
except:
return public.returnMsg(False, '端口格式不正确')
for i in ["nginx","apache"]:
file = "/www/server/panel/vhost/{}/phpmyadmin.conf".format(i)
conf = public.readFile(file)
if not conf:
return public.returnMsg(False,"没有找到{}配置文件,请尝试关闭ssl端口设置后再打开".format(i))
rulePort = ['80', '443', '21', '20', '8080', '8081', '8089', '11211', '6379']
if get.port in rulePort:
return public.returnMsg(False, 'AJAX_PHPMYADMIN_PORT_ERR')
if i == "nginx":
if not os.path.exists("/www/server/panel/vhost/apache/phpmyadmin.conf"):
return public.returnMsg(False, "没有找到 apache phpmyadmin ssl 配置文件,请尝试关闭ssl端口设置后再打开")
rep = r"listen\s*([0-9]+)\s*.*;"
oldPort = re.search(rep, conf)
if not oldPort:
return public.returnMsg(False, '没有检测到 nginx phpmyadmin监听的端口,请确认是否手动修改过文件')
oldPort = oldPort.groups()[0]
conf = re.sub(rep, 'listen ' + get.port + ' ssl;', conf)
else:
rep = r"Listen\s*([0-9]+)\s*\n"
oldPort = re.search(rep, conf)
if not oldPort:
return public.returnMsg(False, '没有检测到 apache phpmyadmin监听的端口,请确认是否手动修改过文件')
oldPort = oldPort.groups()[0]
conf = re.sub(rep, "Listen " + get.port + "\n", conf, 1)
rep = r"VirtualHost\s*\*:[0-9]+"
conf = re.sub(rep, "VirtualHost *:" + get.port, conf, 1)
if oldPort == get.port: return public.returnMsg(False, 'SOFT_PHPVERSION_ERR_PORT')
public.writeFile(file, conf)
public.serviceReload()
if i=="apache":
import firewalls
get.ps = public.getMsg('SOFT_PHPVERSION_PS')
fw = firewalls.firewalls()
fw.AddAcceptPort(get)
public.serviceReload()
public.WriteLog('TYPE_SOFT', 'SOFT_PHPMYADMIN_PORT', (get.port,))
get.id = public.M('firewall').where('port=?', (oldPort,)).getField('id')
get.port = oldPort
fw.DelAcceptPort(get)
return public.returnMsg(True, 'SET_PORT_SUCCESS')
# 设置phpmyadmin ssl
def set_phpmyadmin_ssl(self,get):
if public.get_webserver() == "openlitespeed":
return public.returnMsg(False, 'OpenLiteSpeed 目前尚不支持该操作')
if not os.path.exists("/www/server/panel/ssl/certificate.pem"):
return public.returnMsg(False,'面板证书不存在,请申请面板证书后再试')
if get.v == "1":
# nginx配置文件
ssl_conf = """server
{
listen 887 ssl;
server_name phpmyadmin;
index index.html index.htm index.php;
root /www/server/phpmyadmin;
#SSL-START SSL相关配置,请勿删除或修改下一行带注释的404规则
#error_page 404/404.html;
ssl_certificate /www/server/panel/ssl/certificate.pem;
ssl_certificate_key /www/server/panel/ssl/privateKey.pem;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:HIGH:!aNULL:!MD5:!RC4:!DHE;
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
error_page 497 https://$host$request_uri;
#SSL-END
include enable-php.conf;
location ~ .*\.(gif|jpg|jpeg|png|bmp|swf)$
{
expires 30d;
}
location ~ .*\.(js|css)?$
{
expires 12h;
}
location ~ /\.
{
deny all;
}
access_log /www/wwwlogs/access.log;
}"""
public.writeFile("/www/server/panel/vhost/nginx/phpmyadmin.conf",ssl_conf)
import panelPlugin
get.sName = "phpmyadmin"
v = panelPlugin.panelPlugin().get_soft_find(get)
public.writeFile("/tmp/2",str(v["ext"]["phpversion"]))
# apache配置
ssl_conf = '''Listen 887
<VirtualHost *:887>
ServerAdmin webmaster@example.com
DocumentRoot "/www/server/phpmyadmin"
ServerName 0b842aa5.phpmyadmin
ServerAlias phpmyadmin.com
#ErrorLog "/www/wwwlogs/BT_default_error.log"
#CustomLog "/www/wwwlogs/BT_default_access.log" combined
#SSL
SSLEngine On
SSLCertificateFile /www/server/panel/ssl/certificate.pem
SSLCertificateKeyFile /www/server/panel/ssl/privateKey.pem
SSLCipherSuite EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH
SSLProtocol All -SSLv2 -SSLv3
SSLHonorCipherOrder On
#PHP
<FilesMatch \.php$>
SetHandler "proxy:unix:/tmp/php-cgi-{}.sock|fcgi://localhost"
</FilesMatch>
#DENY FILES
<Files ~ (\.user.ini|\.htaccess|\.git|\.svn|\.project|LICENSE|README.md)$>
Order allow,deny
Deny from all
</Files>
#PATH
<Directory "/www/wwwroot/bt.youbadbad.cn/">
SetOutputFilter DEFLATE
Options FollowSymLinks
AllowOverride All
Require all granted
DirectoryIndex index.php index.html index.htm default.php default.html default.htm
</Directory>
</VirtualHost>'''.format(v["ext"]["phpversion"])
public.writeFile("/www/server/panel/vhost/apache/phpmyadmin.conf", ssl_conf)
else:
if os.path.exists("/www/server/panel/vhost/nginx/phpmyadmin.conf"):
os.remove("/www/server/panel/vhost/nginx/phpmyadmin.conf")
if os.path.exists("/www/server/panel/vhost/apache/phpmyadmin.conf"):
os.remove("/www/server/panel/vhost/apache/phpmyadmin.conf")
public.serviceReload()
return public.returnMsg(True, '关闭成功')
public.serviceReload()
return public.returnMsg(True,'开启成功,请手动放行phpmyadmin ssl端口')
#设置PHPMyAdmin
def setPHPMyAdmin(self,get):
import re
#try:
filename = self.__get_webserver_conffile()
conf = public.readFile(filename)
if not conf: return public.returnMsg(False,'ERROR')
if hasattr(get,'port'):
mainPort = public.readFile('data/port.pl').strip()
rulePort = ['80','443','21','20','8080','8081','8089','11211','6379']
oldPort = "888"
if get.port in rulePort:
return public.returnMsg(False,'AJAX_PHPMYADMIN_PORT_ERR')
if public.get_webserver() == 'nginx':
rep = r"listen\s+([0-9]+)\s*;"
oldPort = re.search(rep,conf).groups()[0]
conf = re.sub(rep,'listen ' + get.port + ';\n',conf)
elif public.get_webserver() == 'apache':
rep = r"Listen\s+([0-9]+)\s*\n"
oldPort = re.search(rep,conf).groups()[0]
conf = re.sub(rep,"Listen " + get.port + "\n",conf,1)
rep = r"VirtualHost\s+\*:[0-9]+"
conf = re.sub(rep,"VirtualHost *:" + get.port,conf,1)
else:
filename = '/www/server/panel/vhost/openlitespeed/listen/888.conf'
conf = public.readFile(filename)
reg = r"address\s+\*:(\d+)"
tmp = re.search(reg,conf)
if tmp:
oldPort = tmp.groups(1)
conf = re.sub(reg,"address *:{}".format(get.port),conf)
if oldPort == get.port: return public.returnMsg(False,'SOFT_PHPVERSION_ERR_PORT')
public.writeFile(filename,conf)
import firewalls
get.ps = public.getMsg('SOFT_PHPVERSION_PS')
fw = firewalls.firewalls()
fw.AddAcceptPort(get)
public.serviceReload()
public.WriteLog('TYPE_SOFT','SOFT_PHPMYADMIN_PORT',(get.port,))
get.id = public.M('firewall').where('port=?',(oldPort,)).getField('id')
get.port = oldPort
fw.DelAcceptPort(get)
return public.returnMsg(True,'SET_PORT_SUCCESS')
if hasattr(get,'phpversion'):
if public.get_webserver() == 'nginx':
filename = public.GetConfigValue('setup_path') + '/nginx/conf/enable-php.conf'
conf = public.readFile(filename)
rep = r"php-cgi.*\.sock"
conf = re.sub(rep,'php-cgi-' + get.phpversion + '.sock',conf,1)
elif public.get_webserver() == 'apache':
rep = r"php-cgi.*\.sock"
conf = re.sub(rep,'php-cgi-' + get.phpversion + '.sock',conf,1)
else:
reg = r'/usr/local/lsws/lsphp\d+/bin/lsphp'
conf = re.sub(reg,'/usr/local/lsws/lsphp{}/bin/lsphp'.format(get.phpversion),conf)
public.writeFile(filename,conf)
public.serviceReload()
public.WriteLog('TYPE_SOFT','SOFT_PHPMYADMIN_PHP',(get.phpversion,))
return public.returnMsg(True,'SOFT_PHPVERSION_SET')
if hasattr(get,'password'):
import panelSite
if(get.password == 'close'):
return panelSite.panelSite().CloseHasPwd(get)
else:
return panelSite.panelSite().SetHasPwd(get)
if hasattr(get,'status'):
if conf.find(public.GetConfigValue('setup_path') + '/stop') != -1:
conf = conf.replace(public.GetConfigValue('setup_path') + '/stop',public.GetConfigValue('setup_path') + '/phpmyadmin')
msg = public.getMsg('START')
else:
conf = conf.replace(public.GetConfigValue('setup_path') + '/phpmyadmin',public.GetConfigValue('setup_path') + '/stop')
msg = public.getMsg('STOP')
public.writeFile(filename,conf)
public.serviceReload()
public.WriteLog('TYPE_SOFT','SOFT_PHPMYADMIN_STATUS',(msg,))
return public.returnMsg(True,'SOFT_PHPMYADMIN_STATUS',(msg,))
#except:
#return public.returnMsg(False,'ERROR');
def ToPunycode(self,get):
import re
get.domain = get.domain.encode('utf8')
tmp = get.domain.split('.')
newdomain = ''
for dkey in tmp:
#匹配非ascii字符
match = re.search(u"[\x80-\xff]+",dkey)
if not match:
newdomain += dkey + '.'
else:
newdomain += 'xn--' + dkey.decode('utf-8').encode('punycode') + '.'
return newdomain[0:-1]
#保存PHP排序
def phpSort(self,get):
if public.writeFile('/www/server/php/sort.pl',get.ssort): return public.returnMsg(True,'SUCCESS')
return public.returnMsg(False,'ERROR')
#获取广告代码
def GetAd(self,get):
try:
return public.HttpGet(public.GetConfigValue('home') + '/Api/GetAD?name='+get.name + '&soc=' + get.soc)
except:
return ''
#获取进度
def GetSpeed(self,get):
return public.getSpeed()
#检查登陆状态
def CheckLogin(self,get):
return True
#获取警告标识
def GetWarning(self,get):
warningFile = 'data/warning.json'
if not os.path.exists(warningFile): return public.returnMsg(False,'AJAX_WARNING_ERR')
import json,time;
wlist = json.loads(public.readFile(warningFile))
wlist['time'] = int(time.time())
return wlist
#设置警告标识
def SetWarning(self,get):
wlist = self.GetWarning(get)
id = int(get.id)
import time,json;
for i in xrange(len(wlist['data'])):
if wlist['data'][i]['id'] == id:
wlist['data'][i]['ignore_count'] += 1
wlist['data'][i]['ignore_time'] = int(time.time())
warningFile = 'data/warning.json'
public.writeFile(warningFile,json.dumps(wlist))
return public.returnMsg(True,'SET_SUCCESS')
#获取memcached状态
def GetMemcachedStatus(self,get):
import telnetlib,re;
conf = public.readFile('/etc/init.d/memcached')
result = {}
result['bind'] = re.search('IP=(.+)',conf).groups()[0]
result['port'] = int(re.search('PORT=(\d+)',conf).groups()[0])
result['maxconn'] = int(re.search('MAXCONN=(\d+)',conf).groups()[0])
result['cachesize'] = int(re.search('CACHESIZE=(\d+)',conf).groups()[0])
tn = telnetlib.Telnet(result['bind'],result['port'])
tn.write(b"stats\n")
tn.write(b"quit\n")
data = tn.read_all()
if type(data) == bytes: data = data.decode('utf-8')
data = data.replace('STAT','').replace('END','').split("\n")
res = ['cmd_get','get_hits','get_misses','limit_maxbytes','curr_items','bytes','evictions','limit_maxbytes','bytes_written','bytes_read','curr_connections'];
for d in data:
if len(d)<3: continue
t = d.split()
if not t[0] in res: continue
result[t[0]] = int(t[1])
result['hit'] = 1
if result['get_hits'] > 0 and result['cmd_get'] > 0:
result['hit'] = float(result['get_hits']) / float(result['cmd_get']) * 100
return result
#设置memcached缓存大小
def SetMemcachedCache(self,get):
import re
confFile = '/etc/init.d/memcached'
conf = public.readFile(confFile)
conf = re.sub('IP=.+','IP='+get.ip,conf)
conf = re.sub('PORT=\d+','PORT='+get.port,conf)
conf = re.sub('MAXCONN=\d+','MAXCONN='+get.maxconn,conf)
conf = re.sub('CACHESIZE=\d+','CACHESIZE='+get.cachesize,conf)
public.writeFile(confFile,conf)
public.ExecShell(confFile + ' reload')
return public.returnMsg(True,'SET_SUCCESS')
#取redis状态
def GetRedisStatus(self,get):
import re
c = public.readFile('/www/server/redis/redis.conf')
port = re.findall('\n\s*port\s+(\d+)',c)[0]
password = re.findall('\n\s*requirepass\s+(.+)',c)
if password:
password = ' -a ' + password[0]
else:
password = ''
data = public.ExecShell('/www/server/redis/src/redis-cli -p ' + port + password + ' info')[0];
res = [
'tcp_port',
'uptime_in_days', #已运行天数
'connected_clients', #连接的客户端数量
'used_memory', #Redis已分配的内存总量
'used_memory_rss', #Redis占用的系统内存总量
'used_memory_peak', #Redis所用内存的高峰值
'mem_fragmentation_ratio', #内存碎片比率
'total_connections_received',#运行以来连接过的客户端的总数量
'total_commands_processed', #运行以来执行过的命令的总数量
'instantaneous_ops_per_sec', #服务器每秒钟执行的命令数量
'keyspace_hits', #查找数据库键成功的次数
'keyspace_misses', #查找数据库键失败的次数
'latest_fork_usec' #最近一次 fork() 操作耗费的毫秒数
]
data = data.split("\n")
result = {}
for d in data:
if len(d)<3: continue
t = d.strip().split(':')
if not t[0] in res: continue
result[t[0]] = t[1]
return result
#取PHP-FPM日志
def GetFpmLogs(self,get):
import re
fpm_path = '/www/server/php/' + get.version + '/etc/php-fpm.conf'
if not os.path.exists(fpm_path): return public.returnMsg(False,'AJAX_LOG_FILR_NOT_EXISTS')
fpm_conf = public.readFile(fpm_path)
log_tmp = re.findall(r"error_log\s*=\s*(.+)",fpm_conf)
if not log_tmp: return public.returnMsg(False,'AJAX_LOG_FILR_NOT_EXISTS')
log_file = log_tmp[0].strip()
if log_file.find('var/log') == 0:
log_file = '/www/server/php/' +get.version + '/'+ log_file
return public.returnMsg(True,public.GetNumLines(log_file,1000))
#取PHP慢日志
def GetFpmSlowLogs(self,get):
import re
fpm_path = '/www/server/php/' + get.version + '/etc/php-fpm.conf'
if not os.path.exists(fpm_path): return public.returnMsg(False,'AJAX_LOG_FILR_NOT_EXISTS')
fpm_conf = public.readFile(fpm_path)
log_tmp = re.findall(r"slowlog\s*=\s*(.+)",fpm_conf)
if not log_tmp: return public.returnMsg(False,'AJAX_LOG_FILR_NOT_EXISTS')
log_file = log_tmp[0].strip()
if log_file.find('var/log') == 0:
log_file = '/www/server/php/' +get.version + '/'+ log_file
return public.returnMsg(True,public.GetNumLines(log_file,1000))
#取指定日志
def GetOpeLogs(self,get):
if not os.path.exists(get.path): return public.returnMsg(False,'AJAX_LOG_FILR_NOT_EXISTS')
return public.returnMsg(True,public.GetNumLines(get.path,1000))
#检查用户绑定是否正确
def check_user_auth(self,get):
m_key = 'check_user_auth'
if m_key in session: return session[m_key]
u_path = 'data/userInfo.json'
try:
userInfo = json.loads(public.ReadFile(u_path))
except:
if os.path.exists(u_path): os.remove(u_path)
return public.returnMsg(False,'宝塔帐户绑定已失效,请在[设置]页面重新绑定!')
pdata = {'access_key':userInfo['access_key'],'secret_key':userInfo['secret_key']}
result = public.HttpPost(public.GetConfigValue('home') + '/api/panel/check_auth_key',pdata,3)
if result == '0':
if os.path.exists(u_path): os.remove(u_path)
return public.returnMsg(False,'宝塔帐户绑定已失效,请在[设置]页面重新绑定!')
if result == '1':
session[m_key] = public.returnMsg(True,'绑定有效!')
return session[m_key]
return public.returnMsg(True,result)
#PHP探针
def php_info(self,args):
php_version = args.php_version.replace('.','')
php_path = '/www/server/php/'
if public.get_webserver() == 'openlitespeed':
php_path = '/usr/local/lsws/lsphp'
php_bin = php_path + php_version + '/bin/php'
php_ini = php_path + php_version + '/etc/php.ini'
if not os.path.exists('/etc/redhat-release'):
php_ini = php_path + php_version + '/etc/php/'+args.php_version+'/litespeed/php.ini'
tmp = public.ExecShell(php_bin + ' /www/server/panel/class/php_info.php')[0]
result = json.loads(tmp)
result['phpinfo'] = {}
result['phpinfo']['php_version'] = result['php_version']
result['phpinfo']['php_path'] = php_path
result['phpinfo']['php_bin'] = php_bin
result['phpinfo']['php_ini'] = php_ini
result['phpinfo']['modules'] = ' '.join(result['modules'])
result['phpinfo']['ini'] = result['ini']
result['phpinfo']['keys'] = { "1cache": "缓存器", "2crypt": "加密解密库", "0db": "数据库驱动", "4network": "网络通信库", "5io_string": "文件和字符串处理库", "3photo":"图片处理库","6other":"其它第三方库"}
del(result['php_version'])
del(result['modules'])
del(result['ini'])
return result
#取指定行
def get_lines(self,args):
if not os.path.exists(args.filename): return public.returnMsg(False,'指定日志文件不存在!')
s_body = public.ExecShell("tail -n {} {}".format(args.num,args.filename))[0]
return public.returnMsg(True,s_body)
| [
"colin0055@foxmail.com"
] | colin0055@foxmail.com |
327f91a56b1e16ae43e885378f571cc26fc8b87c | 2c3ad939c59c347a0d282fd5011cf6eb4309cbae | /Neues Textdokument.py | 9fd931bdf5682dced31e64e64bb15c7280be6f21 | [] | no_license | msleibi/Servers-Authorization-and-CRUD | 9bc10a3e5f0b2abe6d3061c91ab6906a9527f909 | 8cc883a92ea93f5c090848e90440008da1dad851 | refs/heads/master | 2020-06-04T02:52:46.949236 | 2019-07-31T19:22:49 | 2019-07-31T19:22:49 | 191,842,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,694 | py | from flask import Flask, render_template, request, redirect, url_for, flash
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Restaurant, MenuItem
app = Flask(__name__)
engine = create_engine('sqlite:///restaurantmenu.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
@app.route('/')
@app.route('/restaurants/<int:restaurant_id>/menu')
def restaurantMenu(restaurant_id):
restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()
items = session.query(MenuItem).filter_by(restaurant_id=restaurant_id)
return render_template(
'menu.html', restaurant=restaurant, items=items, restaurant_id=restaurant_id)
@app.route('/restaurants/<int:restaurant_id>/new', methods=['GET', 'POST'])
def newMenuItem(restaurant_id):
if request.method == 'POST':
newItem = MenuItem(name=request.form['name'], description=request.form[
'description'], price=request.form['price'], course=request.form['course'], restaurant_id=restaurant_id)
session.add(newItem)
session.commit()
flash("new menu item created!")
return redirect(url_for('restaurantMenu', restaurant_id=restaurant_id))
else:
return render_template('newmenuitem.html', restaurant_id=restaurant_id)
@app.route('/restaurants/<int:restaurant_id>/<int:menu_id>/edit',
methods=['GET', 'POST'])
def editMenuItem(restaurant_id, menu_id):
editedItem = session.query(MenuItem).filter_by(id=menu_id).one()
if request.method == 'POST':
if request.form['name']:
editedItem.name = request.form['name']
session.add(editedItem)
session.commit()
flash("Menu Item has been edited")
return redirect(url_for('restaurantMenu', restaurant_id=restaurant_id))
else:
return render_template(
'editmenuitem.html', restaurant_id=restaurant_id, menu_id=menu_id, item=editedItem)
# DELETE MENU ITEM SOLUTION
@app.route('/restaurants/<int:restaurant_id>/<int:menu_id>/delete',
methods=['GET', 'POST'])
def deleteMenuItem(restaurant_id, menu_id):
itemToDelete = session.query(MenuItem).filter_by(id=menu_id).one()
if request.method == 'POST':
session.delete(itemToDelete)
session.commit()
flash("Menu Item has been deleted")
return redirect(url_for('restaurantMenu', restaurant_id=restaurant_id))
else:
return render_template('deleteconfirmation.html', item=itemToDelete)
if __name__ == '__main__':
app.secret_key = 'super_secret_key'
app.debug = True
app.run(host='0.0.0.0', port=5000) | [
"msalibi83@gmail.com"
] | msalibi83@gmail.com |
475f56cddea7da88cb0c4c18cc4e1649dc2a16ba | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flashblade/FB_2_8/models/keytab_file_base64.py | e7f84a50b47b69c9b000c62ecaa4a1e8b70b635e | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 2,912 | py | # coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.8, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.8
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_8 import models
class KeytabFileBase64(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
required_args = {
}
def __init__(
self,
):
"""
Keyword args:
"""
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `KeytabFileBase64`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(KeytabFileBase64, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, KeytabFileBase64):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"tlewis@purestorage.com"
] | tlewis@purestorage.com |
54aa25a623bcd141ceb60503e4862c6560334415 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_340/ch79_2020_04_08_17_16_37_430613.py | 3692e00bb61b220fb835ac8e529d71a5ac2851ad | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | def monta_dicionario(lista1, lista2):
dicionario={}
for i in range(len(lista1)):
dicionario[lista1[i]]=lista2[i]
| [
"you@example.com"
] | you@example.com |
63709722f02bfa86517a0dc1d5711143b4ff4af5 | 222108085d7c2bcd758a18efa405bcd8cd98f8b7 | /server/code/seed/stages.py | 1176fe1a6c366625a7bbc5b57788af0acd0565e4 | [] | no_license | mattdillabough/cnatra-lms | 13a201b078d30d789dc99e01b78c72e67ca671dc | 045d87e2ab7119348e4671dd3a8974815a9d3713 | refs/heads/master | 2023-04-19T17:58:42.602716 | 2021-04-23T18:49:51 | 2021-04-23T18:49:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | # python imports
# project imports
from models.stages import StageModel
# records to insert
stages = [
{
'stage': 'N',
'stage_name': 'Navigation'
}
]
# function to insert
def insert_stages():
# delete records (if any)
StageModel.objects().delete()
# loop through records
for stage in stages:
# create record
stage_model = StageModel(**stage)
# insert record
stage_model.save() | [
"aneesh.kodali@gmail.com"
] | aneesh.kodali@gmail.com |
2a93de28f79bb717067bf87f3759571d9769cc98 | 7584372f28ce3b7337de570277ac743f047a0095 | /crawler/utils/dockerevent.py | 96083be033e50bbbb82d2d4cf7c476d28af22dfb | [
"Apache-2.0"
] | permissive | maheshbabugorantla/agentless-system-crawler | c87f35830024db3e274ad9340fbe1e4f31921cf7 | 376cc211b0037f4e2a9cc7df39127325dae4e39e | refs/heads/master | 2020-09-13T07:56:35.664099 | 2019-07-15T20:16:32 | 2019-07-15T20:16:32 | 222,702,730 | 0 | 0 | Apache-2.0 | 2019-11-19T13:28:49 | 2019-11-19T13:28:48 | null | UTF-8 | Python | false | false | 469 | py | """
Docker container event object
"""
class DockerContainerEvent(object):
def __init__(self, contId, imgId, event, etime):
self.contId = contId
self.imgId = imgId
self.event = event
self.eventTime = etime
def get_containerid(self):
return self.contId
def get_imgageid(self):
return self.imgId
def get_event(self):
return self.event
def get_eventTime(self):
return self.eventTime
| [
"nadgowda@us.ibm.com"
] | nadgowda@us.ibm.com |
3c87dcbcbbeaa1e6728588cac549519c87b9defb | 195fa04f47a56ae17b98223c7085e93f7b3ce167 | /transfer/买家OCR数据导入/asyncExport.py | 320b11d9d0f04e1fc1d68c85aab07f285c695ab6 | [] | no_license | huanghuizhou/pyTest | d2d0e4ecd242f116f8d2dc3691c3d27c9c7d26fd | 65ea660874edb7e02b5c34f00e072fc8d519c3cd | refs/heads/master | 2022-08-12T01:15:07.778255 | 2019-07-31T04:03:19 | 2019-07-31T04:03:19 | 149,726,497 | 0 | 0 | null | 2022-07-29T22:34:01 | 2018-09-21T07:27:36 | Python | UTF-8 | Python | false | false | 7,470 | py | #!/usr/bin/env python3
# coding=utf-8
import json
import logging
import os
import sys
import threading
import time
from concurrent import CountDownLatch
from queue import Queue, Empty
import pymysql
from dns import resolver
from pymysql.connections import Connection
# MYSQL_HOST = '192.168.2.203'
# MYSQL_PORT = 3306
# MYSQL_USER = 'gt_user'
# MYSQL_PASSWD = 'greatTao1314!@#$'
# stage
# MYSQL_HOST = '192.168.2.203'
# MYSQL_PORT = 3308
# MYSQL_USER = 'greattao'
# MYSQL_PASSWD = 'greatTao.5877'
#
# dev
# MYSQL_HOST = '192.168.2.203'
# MYSQL_USER = 'greatTao'
# MYSQL_PASSWD = 'greatTao.1314'
# MYSQL_PORT = 3306
MYSQL_HOST = 'localhost'
MYSQL_USER = 'root'
MYSQL_PASSWD = '123456'
MYSQL_PORT = 3306
#################################################################################
WORKER_COUNT = os.cpu_count() * 16
input_queue = Queue(WORKER_COUNT)
# output_queue = Queue(WORKER_COUNT)
count_lock = threading.Lock()
done_thread_count = 0
data_input_done = False
# domains has MX record
# with open('domain.txt') as f:
# mx_domains = set((x.strip() for x in f.readlines()))
def get_logger(name):
log = logging.getLogger(name)
log.setLevel(logging.DEBUG)
# Standard output handler
sh = logging.StreamHandler()
sh.setLevel(logging.DEBUG)
sh.setFormatter(logging.Formatter('%(levelname)s - %(name)s:%(lineno)s: %(message)s'))
log.addHandler(sh)
return log
logger = get_logger(__file__)
def new_mysql_conn() -> Connection:
return pymysql.connect(
host=MYSQL_HOST,
port=MYSQL_PORT,
user=MYSQL_USER,
password=MYSQL_PASSWD,
db='gttown_crm',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor
)
def work(latch: CountDownLatch):
global done_thread_count
conn = new_mysql_conn()
count = 0
while True:
while True:
try:
data = input_queue.get(timeout=5)
except Empty:
if data_input_done:
with count_lock:
done_thread_count += 1
conn.commit()
conn.close()
latch.count_down()
print('[worker thread', threading.current_thread().name, 'done]')
return
else:
continue
else:
break
try:
insertMysql(conn, data)
count += 1
if count % 50 == 0:
conn.commit()
print("commit count is", count)
except Exception as e:
print(data, e, file=sys.stderr)
continue
# output_queue.put(ret)
def main():
global data_input_done
latch = CountDownLatch(WORKER_COUNT)
threads = [threading.Thread(target=work, args=(latch,), name='worker' + str(i), daemon=True) for i in
range(WORKER_COUNT)]
for thread in threads:
thread.start()
fileName = './data.json'
dataOpen = open(fileName)
dataList = json.loads(dataOpen.read())
for data in dataList:
input_queue.put(data)
data_input_done = True
# waiting for thread done
latch.wait()
def insertMysql(db, data):
company = ''
if 'company' in data:
company = data['company']
if (company.strip() == ''):
return
address = ''
if 'address' in data:
address = data['address']
country = ''
if 'country' in data:
country = data['country']
tel = ''
if 'tel' in data:
tel = data['tel']
if (len(str(tel)) > 20):
tel = tel[0:20]
fax = ''
if 'fax' in data:
fax = data['fax']
email = ''
if 'email' in data:
email = data['email']
email_status = 0
if (email != ''):
if (is_valid_email(email)):
email_status = 1
else:
email_status = 2
website = ''
if 'website' in data:
website = data['website']
if (len(str(website)) > 50 or website == 'N.A.' or website == 'n/a'):
website = ''
contact = '-'
if 'contact' in data:
contact = data['contact']
if contact == 'N.A.':
contact = '-'
position = ''
if 'position' in data:
position = data['position']
products = ''
if 'products' in data:
products = data['products']
skype = ''
if 'skype' in data:
skype = data['skype']
requirement_remark = ''
if 'requirement_remark' in data:
requirement_remark = data['requirement_remark']
industry = ''
if 'industry' in data:
industry = data['industry']
extra_data = ''
if 'extra_data' in data:
extra_data = json.dumps(data['extra_data'])
sql1 = """INSERT INTO channel_customer (company,country,address,company_website,contact_name,requirement_remark,extra_data,origin,last_update_time,audit_status,distribute_status,account_distribute_status,status,type,role,industry) value(%s,%s,%s,%s,%s,%s,%s,4,now(),2,1,1,1,1,1,%s)"""
sql2 = """INSERT INTO channel_contact (customer_id,email,fax,telephone_number,position,email_status,skype) value(%s,%s,%s,%s,%s,%s,%s)"""
sql3 = """INSERT INTO channel_product (customer_id,product_content) value(%s,%s)"""
try:
with db.cursor() as cursor:
cursor.execute(sql1,
(company, getCountry(country), address, website, contact, requirement_remark, extra_data,
industry))
id = cursor.lastrowid
cursor.execute(sql2,
(id, email, fax, tel, position, email_status, skype))
for product in products:
cursor.execute(sql3, (id, product))
print(id, 'inserted')
except Exception as e:
logger.warning('Failed to insert %s' % data, e)
time.sleep(2)
def is_valid_email(email: str) -> bool:
try:
# 查询DNS MX记录
resolver.query(email.split('@')[-1], 'MX')
# return True
except Exception:
return False
else:
return True
dataOpen = open('./country.json')
dataList = json.loads(dataOpen.read())
countryDict = {}
for data in dataList:
countryDict[data['Name_zh']] = data['_id']
countryEnDict = {}
for data in dataList:
countryEnDict[data['Name_en']] = data['_id']
def getCountry(country):
if country in countryEnDict:
return countryEnDict[country]
if country.replace(' ', '').find('香港') != -1 or country.find('Hong Kong') != -1:
return "HK"
if country.replace(' ', '').find('澳门') != -1:
return "MO"
if country.replace(' ', '').find('台湾') != -1 or country.find('Taiwan') != -1:
return "TW"
if is_ustr(country.replace(' ', '')) in countryDict:
return countryDict[is_ustr(country)]
return ''
def is_ustr(in_str):
out_str = ''
for i in range(len(in_str)):
if is_uchar(in_str[i]):
out_str = out_str + in_str[i]
else:
out_str = out_str + ''
return out_str
# 去除中文外字符
def is_uchar(uchar):
"""判断一个unicode是否是汉字"""
if uchar >= u'\u4e00' and uchar <= u'\u9fa5':
return True
return False
if __name__ == '__main__':
print(time.localtime())
main()
# conn = new_mysql_conn()
# update_customer(conn, 'Costco Wholesale Corporation', ['jsinegal@costco.com'])
| [
"huanghuizhou@egtcp.com"
] | huanghuizhou@egtcp.com |
bc7a7b4bbd1891c00f0473aa1e114615174c94d0 | b364d4e4c2d66821c4256add68e8e964ba405ac0 | /gdp1/EscapeFromTheProgrammingLab/src/constants.py | 75f9b894de9115034b4db2fbc63b55e8b7eca81c | [] | no_license | brendanv7/VanAllen-GameDesign | bf318ca05308eacb9444a6282147b4326b225733 | 11ff0ba4dca198cfee5027f1934719dd69e23358 | refs/heads/master | 2023-03-21T05:05:48.822632 | 2021-03-11T17:49:29 | 2021-03-11T17:49:29 | 346,785,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
LIGHT_GREY = (190, 190, 190)
DARK_GREY = (100, 100, 100)
TAN = (230, 210, 180)
LIGHT_BLUE = (150, 185, 225)
SCREEN_WIDTH = 1000
SCREEN_HEIGHT = 721
| [
"brendan.vanallen1@marist.edu"
] | brendan.vanallen1@marist.edu |
ff93f81a89b9f25fa80f463b60f894e744aea0dd | 69a36ca23409b994a31759bad58971b197cad236 | /config/settings.py | 3445021fab5cfbc2c6ca87cdbd98f719463686c2 | [] | no_license | matt700395/awesome_repo | 56601cf817106df0e210e78c7bb1f11af1e60c3a | d49b1e55b6ade24f1f2058319ac6859b45e511bc | refs/heads/master | 2023-08-27T16:45:15.570115 | 2021-11-11T14:16:06 | 2021-11-11T14:16:06 | 427,026,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,346 | py | """
Django settings for config project.
Generated by 'django-admin startproject' using Django 3.0.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "cc)*5=(s+i2-&9x7&&&o+y7$g5!db3tvu85ykok#mwxf#6gir2"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
DJANGO_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
]
PROJECT_APPS = [
"core.apps.CoreConfig",
"users.apps.UsersConfig",
"rooms.apps.RoomsConfig",
]
THIRD_PARTY_APPS = []
INSTALLED_APPS = DJANGO_APPS + PROJECT_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "config.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "config.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = "/static/"
MEDIA_ROOT = os.path.join(BASE_DIR, "uploads")
MEDIA_URL = "/media/"
# Auth
AUTH_USER_MODEL = "users.User"
| [
"matt7003@korea.ac.kr"
] | matt7003@korea.ac.kr |
5f9961456622b0f6d96037bb0a22309da6be1c47 | 957fda191066427f6e9ed3341e097b44bd9c9622 | /src/zzsn2021/utils/rundir.py | cee50dbaf550492b0b7a5acbd949e37a38288f3b | [
"MIT"
] | permissive | uuukaaasz/template | a9839fd5ca0f5445372bd1d6d5714e5712b7680c | 0cea7744701bb702d563acbf0cbd9ecf371f45bf | refs/heads/main | 2023-04-15T09:52:13.745674 | 2021-04-22T17:18:07 | 2021-04-22T17:18:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | from __future__ import annotations
import os
from datetime import datetime
import coolname # type: ignore
from dotenv import load_dotenv
load_dotenv()
def setup_rundir():
"""
Create a working directory with a randomly generated run name.
"""
date = datetime.now().strftime("%Y%m%d-%H%M")
name = coolname.generate_slug(2) # type: ignore
os.environ['RUN_NAME'] = f'{date}-{name}'
results_root = f'{os.getenv("RESULTS_DIR")}/{os.getenv("WANDB_PROJECT")}'
if os.getenv('RUN_MODE', '').lower() == 'debug':
run_dir = f'{results_root}/_debug/{os.getenv("RUN_NAME")}'
os.environ['WANDB_MODE'] = 'disabled'
else:
run_dir = f'{results_root}/{os.getenv("RUN_NAME")}'
os.makedirs(run_dir, exist_ok=True)
os.environ['RUN_DIR'] = run_dir
| [
"karol@piczak.com"
] | karol@piczak.com |
c474ddcdc642369145b11ba23644182f63331500 | 116a4a2fcd3e9c3d216f96103006c707daa6001a | /HelloDjango/apps/awards/migrations/0017_auto_20200726_0254.py | 1dccd60f9a05a0237dcea616506c43eae765cb60 | [] | no_license | Eldar1988/a_white_birds | 22d743ed1fa651062f070c0e81b7ac665be7a72a | 0430d5322b3a55b6f55e9541675d6670f5d8a518 | refs/heads/master | 2022-12-18T20:23:26.293059 | 2020-09-15T04:27:59 | 2020-09-15T04:27:59 | 283,169,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,212 | py | # Generated by Django 3.0.6 on 2020-07-25 20:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('awards', '0016_juryapproved_project'),
]
operations = [
migrations.RemoveField(
model_name='jury',
name='user',
),
migrations.DeleteModel(
name='Promocode',
),
migrations.AddField(
model_name='profile',
name='interview',
field=models.URLField(null=True, verbose_name='Ссылка на интервью (только для жюри)'),
),
migrations.AddField(
model_name='profile',
name='preview',
field=models.TextField(max_length=500, null=True, verbose_name='Краткая информация - один абзац (только для жюри)'),
),
migrations.AddField(
model_name='profile',
name='professional',
field=models.CharField(max_length=200, null=True, verbose_name='Профессия (только для жюри)'),
),
migrations.DeleteModel(
name='Jury',
),
]
| [
"elfarych@gmail.com"
] | elfarych@gmail.com |
ca4f2e41e9d066e4f5f7ccdf3227ca710af932bf | bd831e22b73e570abccd66d510c9e72224f34af6 | /tests/clients/test_rest_client.py | 98db7ea5c21208efdc09ae77ae2d95221f4f1f1a | [
"Apache-2.0"
] | permissive | binderjoe/sdu-commons | b971c56b1335bc281feba04dd123e506cc88650f | 7c6b609aced4beb166754c589db7dfc22239a7df | refs/heads/master | 2020-05-24T04:10:48.201303 | 2019-05-16T19:14:35 | 2019-05-16T19:14:35 | 187,087,280 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,923 | py | import json
import pytest
import responses
from osdu_commons.clients.rest_client import (HttpClientException, HttpServerException, HttpUnrecognizedException,
RestClient)
TEST_BASE_URL = 'https://example.com'
@pytest.fixture()
def rest_client():
return RestClient(base_url=TEST_BASE_URL)
@responses.activate
def test_post_successful(rest_client):
responses.add(
responses.POST,
f'{TEST_BASE_URL}/Test',
json={},
status=200
)
rest_client.post(
json={'a': 'b'},
path=f'{TEST_BASE_URL}/Test',
params={'c': 'd'},
headers={'user-agent': 'test-test'}
)
assert len(responses.calls) == 1
call = responses.calls[0]
assert json.loads(call.request.body) == {'a': 'b'}
assert call.request.path_url == '/Test?c=d'
assert call.request.headers['user-agent'] == 'test-test'
@responses.activate
def test_post_400(rest_client):
responses.add(
responses.POST,
f'{TEST_BASE_URL}/Test',
json={},
status=400
)
with pytest.raises(HttpClientException):
rest_client.post(
json={'a': 'b'},
path=f'{TEST_BASE_URL}/Test',
)
@responses.activate
def test_post_500(rest_client):
responses.add(
responses.POST,
f'{TEST_BASE_URL}/Test',
json={},
status=500
)
with pytest.raises(HttpServerException):
rest_client.post(
json={'a': 'b'},
path=f'{TEST_BASE_URL}/Test',
)
@responses.activate
def test_post_raises_unrecognized_exception(rest_client):
responses.add(
responses.POST,
f'{TEST_BASE_URL}/Test',
json={},
status=999
)
with pytest.raises(HttpUnrecognizedException):
rest_client.post(
json={'a': 'b'},
path=f'{TEST_BASE_URL}/Test',
)
| [
"jbinder@Joes-MBP.guest.corp.microsoft.com"
] | jbinder@Joes-MBP.guest.corp.microsoft.com |
cdbcd65e95134f17dda941cd57c63ebf20839815 | 5e277e986ed8e9e8cff4317cc75b24df0f0342ed | /python/PromonXmlParser/PromonXmlParser.py | 79a7028bc9a4a79897fe0f367758ed695c138936 | [] | no_license | kimgudtjr/python | 57322ff78ff3e99b39f0b105c1706231e1403167 | aff09c9025e0dd2a68d9c9eb97bc63ab27f465e5 | refs/heads/master | 2021-01-01T18:01:56.004679 | 2014-05-29T06:55:26 | 2014-05-29T06:55:26 | 19,400,031 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,291 | py | import xml.etree.ElementTree as ET
import re
import getopt
import sys
import os
import sqlite3
class CSigDbInto:
def __init__(self,a_sToolName,a_sFilePath,a_sRegPath):
self.m_sToolName = a_sToolName
self.m_sFilePath = a_sFilePath
self.m_sRegPath = a_sRegPath
def InsertData(self):
self.m_db = sqlite3.connect('sig.db')
self.m_cur = self.m_db.cursor()
self.m_cur.execute('create table if not exists registry (action NUMERIC, tool NUMERIC, key TEXT)')
self.m_cur.execute('create table if not exists file (tool NUMERIC, action NUMERIC, full_path TEXT)')
self.m_cur.execute('create table if not exists tool (type text, detail text, name text, tool_num INTEGER PRIMARY KEY)')
self.m_cur.execute("select max(tool_num) from tool")
row = self.m_cur.fetchone()
total_tool = 0 if row[0] == None else row[0]+1
self.m_cur.execute( 'insert into tool values ( "", "", (?), (?))', (self.m_sToolName, total_tool) )
self.m_cur.execute('insert into registry values (2, (?), (?))', ( total_tool, self.m_sRegPath ) )
self.m_cur.execute('insert into file values ((?), 2, (?))', ( total_tool, self.m_sFilePath ) )
self.m_cur.close()
self.m_db.commit()
self.m_db.close()
class CProMonXmlParser:
def __init__(self,a_sFileName=None):
self.m_Root = None
self.m_ProcessList = None
self.m_EventList = None
self.m_RegSet = set()
self.m_FileSet = set()
self.m_sFilePath = None
self.m_sRegPath = None
if a_sFileName != None:
tree = ET.parse(a_sFileName)
self.m_Root = tree.getroot()
self.m_ProcessList = self.m_Root[0]
self.m_EventList = self.m_Root[1]
def GetList(self):
for event in self.m_EventList.findall('event'):
sOperation = event.find('Operation').text
sPath = event.find('Path').text
sResult = event.find('Result').text
sDetail = event.find('Detail').text
if (sOperation == "CreateFile") and (sResult == "SUCCESS"):
sPath = os.path.dirname(sPath)
self.m_FileSet.add(sPath)
m = re.match('Reg',sOperation)
if (m != None) and (sResult == "SUCCESS"):
sPath = os.path.dirname(sPath)
self.m_RegSet.add(sPath)
def Show(self):
print "[CreateFile List]"
index = 0
for k in self.m_FileSet:
print index,k
index = index + 1;
print "[RegAccess List]"
index = 0
for k in self.m_RegSet:
print index,k
index = index + 1;
def SelectGetIndex(self):
iFileIndex = raw_input("Select CreateFile Index : ")
iRegIndex = raw_input("Select Reg Index : ")
vListFile = list(self.m_FileSet)
vListReg = list(self.m_RegSet)
iFileIndex = int(iFileIndex)
iRegIndex = int(iRegIndex)
sFilePath = vListFile[iFileIndex]
sRegPath = vListReg[iRegIndex]
if sFilePath.find('AppData') != -1:
sFilePath = sFilePath.split('AppData')[1]
print sFilePath
print sRegPath
self.m_sFilePath = sFilePath
self.m_sRegPath = sRegPath
def Start(self):
self.GetList()
self.Show()
self.SelectGetIndex()
if __name__ == '__main__':
try:
sFileName = sys.argv[1]
c = CProMonXmlParser(sFileName)
c.Start()
sToolName = raw_input('Input Tools Name : ')
sFilePath = c.m_sFilePath
sRegPath = c.m_sRegPath
SigDb = CSigDbInto(sToolName,sFilePath,sRegPath)
SigDb.InsertData()
except:
print "Error"
| [
"kimgudtjr@naver.com"
] | kimgudtjr@naver.com |
97419284e839b43eb605855d9914c0f755e16bfd | 42d6e6251a8cbb1e8fab33d94b0a5ce33467bee1 | /fabpress/tasks/fs.py | 400599c169945ccb8bb2008ae6a8bbaf5bea6603 | [
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | agateblue/fabpress | f392d24cdb29a5eb85554e4b941ba5abb2a81168 | 5036575f5e80cf22fac355691537899e4e692891 | refs/heads/master | 2021-05-27T23:52:58.811715 | 2014-10-29T10:16:06 | 2014-10-29T10:16:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,281 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import base
import os
from fabpress import utils
from fabric.api import hide, warn_only
class WPFilesDrop(base.ConfirmTask, base.TargetTask):
"""Remove all files of target, including target directory"""
name = "drop"
def operation(self, target):
command = 'find {0}/ -name "*" | xargs rm -rf '.format(os.path.join(utils.setting("path", target)))
self.subtask(base.run_target, target=target, command=command)
drop = WPFilesDrop()
class WPSymlink(base.AbstractBaseTask):
"""Create a symlink on local to target directory"""
name = "symlinks"
symlink_directory = ""
target = "local"
def operation(self, target, symlink_name):
path = os.path.join(utils.setting('path', self.target), self.symlink_directory, symlink_name)
self.info("Symlinking {0} to {1}".format(target, path))
command = "ln -s '{0}' '{1}'".format(target, path)
with hide('everything'), warn_only():
self.subtask(base.run_target, 'local', command)
class WPPluginSymlink(WPSymlink):
symlink_directory="wp-content/plugins"
class WPThemeSymlink(WPSymlink):
symlink_directory="wp-content/themes"
plugin_symlink = WPPluginSymlink()
theme_symlink = WPThemeSymlink()
| [
"contact@eliotberriot.com"
] | contact@eliotberriot.com |
65c63d03ce6dae0284f287af1d709c0f3f9895b9 | c0377e118c629965a42a05198bb9f2faa789db5f | /AGC044/A.py | ede868bb89e312bd9601ae597d15e634a1cf32f4 | [] | no_license | Craft055/AtCoder | 0e3a078951b1cc3c991d3d18b15b6ddc697c8f1f | 40011b7033717726de593f2781331d4c67b10dd1 | refs/heads/master | 2023-03-01T11:08:29.611377 | 2021-02-07T17:27:35 | 2021-02-07T17:27:35 | 240,670,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,516 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
def input():
return sys.stdin.readline()
def check_minus(coin_num, x, n, d):
sub_x = n - x
return coin_num + sub_x * d
def check_exponentiation(coin_num, plus_coin, multi, x, n, d):
coin_num += plus_coin
x *= multi
if x >= n:
sub_x = x - n
return coin_num + sub_x * d, coin_num, x
else:
return None, coin_num, x
def check_initial(n, d, div):
if n > d:
temp = n
counter = 0
while temp >= d:
temp /= div
counter += 1
return counter
else:
return None
def solve(i, test_case:list):
n = test_case[0]
a = test_case[1]
b = test_case[2]
c = test_case[3]
d = test_case[4]
# 初回操作 dを使う
coin_num = d
x = 1
ans_list = []
a_count = check_initial(a, d, 2)
b_count = check_initial(b, d, 3)
c_count = check_initial(c, d, 5)
# ひたすら5乗
if c_count is not None:
coin_num += c_count * d
x += c_count
while x < n:
coin_num += c
x *= 5
sub_x = x - n
ans_list.append(coin_num + sub_x * d)
# 一回戻す
coin_num -= c
x /= 5
# マイナスの場合でも比較
candidate = check_minus(coin_num, x, n, d)
ans_list.append(candidate)
# 2乗から調べる
flag_4times = False
candidate, coin_num, x = check_exponentiation(coin_num, a, 2, x, n, d)
if candidate is not None:
flag_4times = True
ans_list.append(candidate)
# もう一回
candidate, coin_num, x = check_exponentiation(coin_num, a, 2, x, n, d)
if candidate is not None:
ans_list.append(candidate)
# 3乗
coin_num -= a
x /= 2
if flag_4times:
coin_num -= a
x /= 2
# マイナスの場合でも比較
candidate = check_minus(coin_num, x, n, d)
ans_list.append(candidate)
candidate, coin_num, x = check_exponentiation(coin_num, b, 3, x, n, d)
if candidate is not None:
ans_list.append(candidate)
coin_num -= b
x /= 3
# マイナスの場合でも比較
candidate = check_minus(coin_num, x, n, d)
ans_list.append(candidate)
# 最初3乗
coin_num = d
x = 1
if b_count is not None:
coin_num += b_count * d
x += b_count
while x < n:
coin_num += b
x *= 3
sub_x = x - n
ans_list.append(coin_num + sub_x * d)
coin_num -= b
x /= 3
# マイナスの場合でも比較
candidate = check_minus(coin_num, x, n, d)
ans_list.append(candidate)
# a2乗から調べる
flag_4times = False
candidate, coin_num, x = check_exponentiation(coin_num, a, 2, x, n, d)
if candidate is not None:
flag_4times = True
ans_list.append(candidate)
# もう一回
candidate, coin_num, x = check_exponentiation(coin_num, a, 2, x, n, d)
if candidate is not None:
ans_list.append(candidate)
# cの場合
coin_num -= a
x /= 2
if flag_4times:
coin_num -= a
x /= 2
# マイナスの場合でも比較
candidate = check_minus(coin_num, x, n, d)
ans_list.append(candidate)
candidate, coin_num, x = check_exponentiation(coin_num, c, 5, x, n, d)
if candidate is not None:
ans_list.append(candidate)
coin_num -= c
x /= 5
# マイナスの場合でも比較
candidate = check_minus(coin_num, x, n, d)
ans_list.append(candidate)
# 最初a
coin_num = d
x = 1
if a_count is not None:
coin_num += a_count * d
x += a_count
print(coin_num, x)
while x < n:
coin_num += a
x *= 2
sub_x = x - n
ans_list.append(coin_num + sub_x * d)
coin_num -= a
x /= 2
# マイナスの場合でも比較
candidate = check_minus(coin_num, x, n, d)
ans_list.append(candidate)
# b3乗から調べる
candidate, coin_num, x = check_exponentiation(coin_num, b, 3, x, n, d)
if candidate is not None:
ans_list.append(candidate)
coin_num -= b
x /= 3
# マイナスの場合でも比較
candidate = check_minus(coin_num, x, n, d)
ans_list.append(candidate)
# c5倍の場合
candidate, coin_num, x = check_exponentiation(coin_num, c, 5, x, n, d)
if candidate is not None:
ans_list.append(candidate)
coin_num -= c
x /= 5
# マイナスの場合でも比較
candidate = check_minus(coin_num, x, n, d)
ans_list.append(candidate)
# aの場合、1/4のパターンも考える
coin_num -= a
x /= 2
# マイナスの場合でも比較
candidate = check_minus(coin_num, x, n, d)
ans_list.append(candidate)
# b3乗から調べる
candidate, coin_num, x = check_exponentiation(coin_num, b, 3, x, n, d)
if candidate is not None:
ans_list.append(candidate)
coin_num -= b
x /= 3
# マイナスの場合でも比較
candidate = check_minus(coin_num, x, n, d)
ans_list.append(candidate)
# c5倍の場合
candidate, coin_num, x = check_exponentiation(coin_num, c, 5, x, n, d)
if candidate is not None:
ans_list.append(candidate)
coin_num -= c
x /= 5
# マイナスの場合でも比較
candidate = check_minus(coin_num, x, n, d)
ans_list.append(candidate)
# aの場合、1/4のパターンも考える
coin_num -= a
x /= 2
# マイナスの場合でも比較
candidate = check_minus(coin_num, x, n, d)
ans_list.append(candidate)
# b3乗から調べる
candidate, coin_num, x = check_exponentiation(coin_num, b, 3, x, n, d)
if candidate is not None:
ans_list.append(candidate)
coin_num -= b
x /= 3
# マイナスの場合でも比較
candidate = check_minus(coin_num, x, n, d)
ans_list.append(candidate)
# c5倍の場合
candidate, coin_num, x = check_exponentiation(coin_num, c, 5, x, n, d)
if candidate is not None:
ans_list.append(candidate)
coin_num -= c
x /= 5
# マイナスの場合でも比較
candidate = check_minus(coin_num, x, n, d)
ans_list.append(candidate)
print(min(ans_list))
def resolve():
test_case_num = int(input().rstrip())
test_case_list = [list(map(int, input().split())) for i in range(test_case_num)]
for i, test_case in enumerate(test_case_list):
solve(i, test_case)
if __name__ == "__main__":
resolve()
| [
"craftsman.jvyeu@gmail.com"
] | craftsman.jvyeu@gmail.com |
12513ea2b9098c18f048fcf10de5c443839e434d | 44b1ba9b6587a341ff79319de0a3c7d387988fbc | /accurate_demand/sys/vis.py | 50a4c53504195befe545f612a67f7f165f7777fb | [] | no_license | charmainewu/A-Deep-Learning-Perspective-on-Storage-Control | 93a65964d3612861bfca2b911fe5af1c2baad160 | f6fa9866495973b6d85d51b7a63c4a2ec237d7dc | refs/heads/master | 2022-11-28T00:36:18.548827 | 2020-07-28T15:30:34 | 2020-07-28T15:30:34 | 283,249,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,411 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 1 18:55:05 2020
@author: jmwu
"""
import warnings
warnings.filterwarnings("ignore")
from scipy.stats import norm
from sklearn import mixture
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, Dropout, LSTM
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib as mpl
from datetime import datetime, date
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from keras.models import load_model
from seq2seq import SimpleSeq2Seq, Seq2Seq, AttentionSeq2Seq
from matplotlib import rcParams
rcParams['font.family'] = 'serif'
rcParams['font.serif'] = ['Times New Roman']
import matplotlib.pyplot as plt
deta = np.load("./data/cost_deta.npy")
ofl = np.load("./data/cost_ofl.npy")
nos = np.load("./data/cost_nos.npy")
mpc = np.load("./data/cost_mpc.npy")
thb = np.load("./data/cost_thb.npy")
rl = np.load("./data/cost_rl.npy")
s2slstm = np.load("./data/cost_seta.npy")
vectorlstm = np.load("./data/cost_veta.npy")
pointlstm = np.load("./data/cost_peta.npy")
x = list(range(100,3600,300))
fig, axs = plt.subplots(figsize=(6, 4),constrained_layout=True)
#axs.plot(x, nos[0:36:3]/ofl[0:36:3], linewidth=2,c = 'royalblue',marker = "o", markersize = 10, label='No Storage')
axs.plot(x, rl[0:36:3]/ofl[0:36:3], linewidth=2,marker = "p", markersize = 12,c='lightseagreen',label='RL')
axs.plot(x, mpc[0:36:3]/ofl[0:36:3], linewidth=2,marker = "h", markersize = 12,c='gold',label='MPC')
axs.plot(x, thb[0:36:3]/ofl[0:36:3], linewidth=2,marker = "<", markersize = 12,c='royalblue',label='THB')
axs.plot(x, deta[0:36:3]/ofl[0:36:3], linewidth=2,marker = "*", markersize = 12,c='darkgray',label='DETA')
axs.plot(x, s2slstm[0:36:3]/ofl[0:36:3], linewidth=2, marker = ">", markersize = 10,c='orange',label='SPTA')
axs.plot(x, vectorlstm[0:36:3]/ofl[0:36:3], linewidth=2, marker = "s", markersize = 10,c='salmon', label='VPTA')
axs.plot(x, pointlstm[0:36:3]/ofl[0:36:3], linewidth=2, marker = "v", markersize = 10,c='yellowgreen', label='PPTA')
plt.xlabel('Time/h', fontsize = 20)
plt.ylabel(r'$ \alpha $', fontsize = 20)
plt.xticks(fontsize = 17)
plt.yticks(fontsize = 17)
axs.legend(fontsize = 15)
plt.savefig("./figure/accur_sys.pdf",bbox_inches='tight') | [
"charmainewu@users.noreply.github.com"
] | charmainewu@users.noreply.github.com |
68d35f37ee51ac6ecc23ab134a6f07354933595d | fdf166ebd78925e94bc34f2a58395173e9736b34 | /upbeat/upbeat/urls.py | 7cda2c4247b8cf58e1e66b9563542fa5105eaf1a | [] | no_license | RommelMC/UpBeat | bc2d5997fd5fa0cd37ab2f7020468224c39f8aef | 01ba260f2dfe0f1d8643516857d3077c071c53ae | refs/heads/master | 2021-01-24T16:19:23.027116 | 2018-02-27T20:21:07 | 2018-02-27T20:21:07 | 123,182,906 | 0 | 0 | null | 2018-02-27T20:16:23 | 2018-02-27T20:16:22 | null | UTF-8 | Python | false | false | 763 | py | """upbeat URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
| [
"Rommel@Rommels-MacBook-Pro-2.local"
] | Rommel@Rommels-MacBook-Pro-2.local |
85a392d9adcd3c2433a4d76b8092bc3e7b0784f2 | df757c3b91f5bb4463f4f42d6823bc86cd0681d4 | /tests/e2e/iothub_e2e/aio/test_infrastructure.py | a2b02490f19900e0c3bf25cdf7d643b92541042c | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | BertKleewein/azure-iot-sdk-python | c40475fd45a3f465227217de3095d922615bc105 | f3e84fc448b023e147502ceb81d0b1389d0bbcfa | refs/heads/main | 2023-06-21T03:36:28.965410 | 2023-05-09T17:18:15 | 2023-05-09T17:18:15 | 247,747,815 | 0 | 0 | MIT | 2022-05-09T15:13:47 | 2020-03-16T15:32:21 | Python | UTF-8 | Python | false | false | 951 | py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
import pytest
import uuid
@pytest.mark.describe("ServiceHelper object")
class TestServiceHelper(object):
@pytest.mark.it("returns None when wait_for_event_arrival times out")
async def test_validate_wait_for_eventhub_arrival_timeout(self, service_helper):
# Because we have to support py27, we can't use `threading.Condition.wait_for`.
# make sure our stand-in functionality behaves the same way when dealing with
# timeouts. The 'non-timeout' case is exercised in every test that uses
# `service_helper.wait_for_eventhub_arrival`, so we don't need a specific test
# for that here.
# TODO: make this test unnecessary
event = await service_helper.wait_for_eventhub_arrival(uuid.uuid4(), timeout=2)
assert event is None
| [
"noreply@github.com"
] | BertKleewein.noreply@github.com |
c0cee1075b023c67dbe7a78d32cc9f8113950192 | 995aba01536c8ee1be6ccdd36e2ee90861687253 | /Rosalind/Adafruit_TCS34725_pigpio.py | 127ef417ad5f4e06c39f2322d7393418c5658309 | [] | no_license | profshanks/A_Stage_Robot | 54155da6e3436e8432625e98a100e4bbda3a8516 | 067b621f28cb5e204f8372dc8eb6c0c42e09608c | refs/heads/master | 2023-04-13T11:18:43.716296 | 2023-04-08T07:03:06 | 2023-04-08T07:03:06 | 143,154,728 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 13,163 | py | '''
The following adaptation of Tony D's Adafruit_TCS34275 module was hacked
together by Michael Greminger and Sam Shanks on 7/31/18 to allow the TCS34725
sensor to be operated remotely (one RPi controlling another across a WiFi
hotspot) using the pigpio library and daemon.
Tony D's original code has been commented out in places. The new lines of
code are flagged with "# _pigpio" to indicate that calls to that library.
Tony D. and Adafruit deserve the lion-share of the credit for what follows.
-profshanks-
'''
# The MIT License (MIT)
#
# Copyright (c) 2016 Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
import pigpio
import struct
TCS34725_ADDRESS = 0x29
TCS34725_ID = 0x12 # 0x44 = TCS34721/TCS34725, 0x4D = TCS34723/TCS34727
TCS34725_COMMAND_BIT = 0x80
TCS34725_ENABLE = 0x00
TCS34725_ENABLE_AIEN = 0x10 # RGBC Interrupt Enable
TCS34725_ENABLE_WEN = 0x08 # Wait enable - Writing 1 activates the wait timer
TCS34725_ENABLE_AEN = 0x02 # RGBC Enable - Writing 1 actives the ADC, 0 disables it
TCS34725_ENABLE_PON = 0x01 # Power on - Writing 1 activates the internal oscillator, 0 disables it
TCS34725_ATIME = 0x01 # Integration time
TCS34725_WTIME = 0x03 # Wait time (if TCS34725_ENABLE_WEN is asserted)
TCS34725_WTIME_2_4MS = 0xFF # WLONG0 = 2.4ms WLONG1 = 0.029s
TCS34725_WTIME_204MS = 0xAB # WLONG0 = 204ms WLONG1 = 2.45s
TCS34725_WTIME_614MS = 0x00 # WLONG0 = 614ms WLONG1 = 7.4s
TCS34725_AILTL = 0x04 # Clear channel lower interrupt threshold
TCS34725_AILTH = 0x05
TCS34725_AIHTL = 0x06 # Clear channel upper interrupt threshold
TCS34725_AIHTH = 0x07
TCS34725_PERS = 0x0C # Persistence register - basic SW filtering mechanism for interrupts
TCS34725_PERS_NONE = 0b0000 # Every RGBC cycle generates an interrupt
TCS34725_PERS_1_CYCLE = 0b0001 # 1 clean channel value outside threshold range generates an interrupt
TCS34725_PERS_2_CYCLE = 0b0010 # 2 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_3_CYCLE = 0b0011 # 3 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_5_CYCLE = 0b0100 # 5 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_10_CYCLE = 0b0101 # 10 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_15_CYCLE = 0b0110 # 15 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_20_CYCLE = 0b0111 # 20 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_25_CYCLE = 0b1000 # 25 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_30_CYCLE = 0b1001 # 30 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_35_CYCLE = 0b1010 # 35 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_40_CYCLE = 0b1011 # 40 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_45_CYCLE = 0b1100 # 45 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_50_CYCLE = 0b1101 # 50 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_55_CYCLE = 0b1110 # 55 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_60_CYCLE = 0b1111 # 60 clean channel values outside threshold range generates an interrupt
TCS34725_CONFIG = 0x0D
TCS34725_CONFIG_WLONG = 0x02 # Choose between short and long (12x) wait times via TCS34725_WTIME
TCS34725_CONTROL = 0x0F # Set the gain level for the sensor
TCS34725_ID = 0x12 # 0x44 = TCS34721/TCS34725, 0x4D = TCS34723/TCS34727
TCS34725_STATUS = 0x13
TCS34725_STATUS_AINT = 0x10 # RGBC Clean channel interrupt
TCS34725_STATUS_AVALID = 0x01 # Indicates that the RGBC channels have completed an integration cycle
TCS34725_CDATAL = 0x14 # Clear channel data
TCS34725_CDATAH = 0x15
TCS34725_RDATAL = 0x16 # Red channel data
TCS34725_RDATAH = 0x17
TCS34725_GDATAL = 0x18 # Green channel data
TCS34725_GDATAH = 0x19
TCS34725_BDATAL = 0x1A # Blue channel data
TCS34725_BDATAH = 0x1B
TCS34725_INTEGRATIONTIME_2_4MS = 0xFF # 2.4ms - 1 cycle - Max Count: 1024
TCS34725_INTEGRATIONTIME_24MS = 0xF6 # 24ms - 10 cycles - Max Count: 10240
TCS34725_INTEGRATIONTIME_50MS = 0xEB # 50ms - 20 cycles - Max Count: 20480
TCS34725_INTEGRATIONTIME_101MS = 0xD5 # 101ms - 42 cycles - Max Count: 43008
TCS34725_INTEGRATIONTIME_154MS = 0xC0 # 154ms - 64 cycles - Max Count: 65535
TCS34725_INTEGRATIONTIME_700MS = 0x00 # 700ms - 256 cycles - Max Count: 65535
TCS34725_GAIN_1X = 0x00 # No gain
TCS34725_GAIN_4X = 0x01 # 2x gain
TCS34725_GAIN_16X = 0x02 # 16x gain
TCS34725_GAIN_60X = 0x03 # 60x gain
# Lookup table for integration time delays.
INTEGRATION_TIME_DELAY = {
0xFF: 0.0024, # 2.4ms - 1 cycle - Max Count: 1024
0xF6: 0.024, # 24ms - 10 cycles - Max Count: 10240
0xEB: 0.050, # 50ms - 20 cycles - Max Count: 20480
0xD5: 0.101, # 101ms - 42 cycles - Max Count: 43008
0xC0: 0.154, # 154ms - 64 cycles - Max Count: 65535
0x00: 0.700 # 700ms - 256 cycles - Max Count: 65535
}
# Utility methods:
def calculate_color_temperature(r, g, b):
"""Converts the raw R/G/B values to color temperature in degrees Kelvin."""
# 1. Map RGB values to their XYZ counterparts.
# Based on 6500K fluorescent, 3000K fluorescent
# and 60W incandescent values for a wide range.
# Note: Y = Illuminance or lux
X = (-0.14282 * r) + (1.54924 * g) + (-0.95641 * b)
Y = (-0.32466 * r) + (1.57837 * g) + (-0.73191 * b)
Z = (-0.68202 * r) + (0.77073 * g) + ( 0.56332 * b)
# Check for divide by 0 (total darkness) and return None.
if (X + Y + Z) == 0:
return None
# 2. Calculate the chromaticity co-ordinates
xc = (X) / (X + Y + Z)
yc = (Y) / (X + Y + Z)
# Check for divide by 0 again and return None.
if (0.1858 - yc) == 0:
return None
# 3. Use McCamy's formula to determine the CCT
n = (xc - 0.3320) / (0.1858 - yc)
# Calculate the final CCT
cct = (449.0 * (n ** 3.0)) + (3525.0 *(n ** 2.0)) + (6823.3 * n) + 5520.33
return int(cct)
def calculate_lux(r, g, b):
"""Converts the raw R/G/B values to luminosity in lux."""
illuminance = (-0.32466 * r) + (1.57837 * g) + (-0.73191 * b)
return int(illuminance)
class TCS34725(object):
"""TCS34725 color sensor. Code modified to run remotely using pigpio"""
def __init__(self, pi, integration_time=TCS34725_INTEGRATIONTIME_2_4MS,
gain=TCS34725_GAIN_4X, address=TCS34725_ADDRESS, i2c=None, **kwargs):
"""Initialize the TCS34725 sensor."""
'''
Setup I2C interface for the device.
if i2c is None:
import Adafruit_GPIO.I2C as I2C
i2c = I2C
self._device = i2c.get_i2c_device(address, **kwargs)
'''
self.pi2 = pi # _pigpio
self._device = self.pi2.i2c_open(1,0x29) # _pigpio
# Make sure we're connected to the sensor.
chip_id = self._readU8(TCS34725_ID)
if chip_id != 0x44:
raise RuntimeError('Failed to read TCS34725 chip ID, check your wiring.')
# Set default integration time and gain.
self.set_integration_time(integration_time)
self.set_gain(gain)
# Enable the device (by default, the device is in power down mode on bootup).
self.enable()
def _readU8(self, reg):
"""Read an unsigned 8-bit register."""
#return self._device.readU8(TCS34725_COMMAND_BIT | reg)
return self.pi2.i2c_read_byte_data(self._device,TCS34725_COMMAND_BIT | reg) # _pigpio
def _readU16LE(self, reg):
"""Read a 16-bit little endian register."""
#return self._device.readU16LE(TCS34725_COMMAND_BIT | reg)
(c, d) = self.pi2.i2c_read_i2c_block_data(self._device,(TCS34725_COMMAND_BIT | reg), 2) # _pigpio
neat = struct.unpack('H'*1, d) # _pigpio
return neat[0] # _pigpio
def _write8(self, reg, value):
"""Write a 8-bit value to a register."""
#self._device.write8(TCS34725_COMMAND_BIT | reg, value)
self.pi2.i2c_write_byte_data(self._device,TCS34725_COMMAND_BIT | reg, value) # _pigpio
def enable(self):
"""Enable the chip."""
# Flip on the power and enable bits.
self._write8(TCS34725_ENABLE, TCS34725_ENABLE_PON)
time.sleep(0.01)
self._write8(TCS34725_ENABLE, (TCS34725_ENABLE_PON | TCS34725_ENABLE_AEN))
def disable(self):
"""Disable the chip (power down)."""
# Flip off the power on and enable bits.
reg = self._readU8(TCS34725_ENABLE)
reg &= ~(TCS34725_ENABLE_PON | TCS34725_ENABLE_AEN)
self._write8(TCS34725_ENABLE, reg)
def set_integration_time(self, integration_time):
"""Sets the integration time for the TC34725. Provide one of these
constants:
- TCS34725_INTEGRATIONTIME_2_4MS = 2.4ms - 1 cycle - Max Count: 1024
- TCS34725_INTEGRATIONTIME_24MS = 24ms - 10 cycles - Max Count: 10240
- TCS34725_INTEGRATIONTIME_50MS = 50ms - 20 cycles - Max Count: 20480
- TCS34725_INTEGRATIONTIME_101MS = 101ms - 42 cycles - Max Count: 43008
- TCS34725_INTEGRATIONTIME_154MS = 154ms - 64 cycles - Max Count: 65535
- TCS34725_INTEGRATIONTIME_700MS = 700ms - 256 cycles - Max Count: 65535
"""
self._integration_time = integration_time
self._write8(TCS34725_ATIME, integration_time)
def get_integration_time(self):
"""Return the current integration time value. This will be one of the
constants specified in the set_integration_time doc string.
"""
return self._readU8(TCS34725_ATIME)
def set_gain(self, gain):
"""Adjusts the gain on the TCS34725 (adjusts the sensitivity to light).
Use one of the following constants:
- TCS34725_GAIN_1X = No gain
- TCS34725_GAIN_4X = 2x gain
- TCS34725_GAIN_16X = 16x gain
- TCS34725_GAIN_60X = 60x gain
"""
self._write8(TCS34725_CONTROL, gain)
def get_gain(self):
"""Return the current gain value. This will be one of the constants
specified in the set_gain doc string.
"""
return self._readU8(TCS34725_CONTROL)
def get_raw_data(self):
"""Reads the raw red, green, blue and clear channel values. Will return
a 4-tuple with the red, green, blue, clear color values (unsigned 16-bit
numbers).
"""
# Read each color register.
r = self._readU16LE(TCS34725_RDATAL)
g = self._readU16LE(TCS34725_GDATAL)
b = self._readU16LE(TCS34725_BDATAL)
c = self._readU16LE(TCS34725_CDATAL)
# Delay for the integration time to allow for next reading immediately.
time.sleep(INTEGRATION_TIME_DELAY[self._integration_time])
return (r, g, b, c)
def set_interrupt(self, enabled):
"""Enable or disable interrupts by setting enabled to True or False."""
enable_reg = self._readU8(TCS34725_ENABLE)
if enabled:
enable_reg |= TCS34725_ENABLE_AIEN
else:
enable_reg &= ~TCS34725_ENABLE_AIEN
self._write8(TCS34725_ENABLE, enable_reg)
time.sleep(1)
def clear_interrupt(self):
"""Clear interrupt."""
self._device.write8(0x66 & 0xff)
def set_interrupt_limits(self, low, high):
"""Set the interrupt limits to provied unsigned 16-bit threshold values.
"""
self._device.write8(0x04, low & 0xFF)
self._device.write8(0x05, low >> 8)
self._device.write8(0x06, high & 0xFF)
self._device.write8(0x07, high >> 8)
| [
"yeboa037@umn.edu"
] | yeboa037@umn.edu |
770c81235aff3f5430684a2acb08650f166984fa | 4f712782a533415a5062c5aaf8444ebb13839ff0 | /20200910_3/generator.py | a226489ada911e40a6cb083faf306b8576c6d60a | [] | no_license | TheGhost8/pythonprac | 607517d2db95959f78396e4654c03aa4099755cc | 6167e7c6803a5e9509dce0742cac8d62bf70f96a | refs/heads/master | 2023-05-04T14:57:42.669597 | 2021-05-17T09:09:33 | 2021-05-17T09:09:33 | 292,591,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | import random as rd
values = []
values.append(eval(input()))
for i in range(9): values.append(rd.randint(1,100))
print(values) | [
"vantank36@gmial.com"
] | vantank36@gmial.com |
4bc5adb9d6b6ee939e6b179dd53b509d2566a42f | a730bc9d3b37c2b0765d13420b7fee0c24bc00f7 | /components/models/exterior.py | ca011b7ee8041d9ac097fe2f819a94bb90981fca | [] | no_license | Aarongold1869/VanBuilder | fc2b442f5712552c1240c392c746cafe6ad3748d | 4c360d5094fdfc2d47c554542decd45a6395c0c3 | refs/heads/main | 2023-07-21T18:51:04.791821 | 2021-09-01T21:03:23 | 2021-09-01T21:03:23 | 382,648,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,294 | py | from django.db import models
from .categories import Category
# External Components
class ExteriorPaint(models.Model):
category = models.ForeignKey(Category,on_delete=models.PROTECT,default=None)
brand = models.CharField(max_length=30, null=True, blank=True)
color = models.CharField(max_length=30, null=True, blank=True)
unit_measure = models.CharField(max_length=30, null=True, blank=True)
cost_per_unit = models.DecimalField(max_digits=15,decimal_places=2, null=True, blank=True)
weight_per_unit = models.DecimalField(max_digits=15,decimal_places=2, null=True, blank=True)
description = models.TextField(null=True, blank=True)
class RoofRack(models.Model):
category = models.ForeignKey(Category,on_delete=models.PROTECT,default=None)
item = models.CharField(max_length=30, null=True, blank=True)
brand = models.CharField(max_length=30, null=True, blank=True)
dimensions = models.CharField(max_length=30, null=True, blank=True)
material = models.CharField(max_length=30, null=True, blank=True)
cost_per_unit = models.DecimalField(max_digits=15,decimal_places=2, null=True, blank=True)
weight_per_unit = models.DecimalField(max_digits=15,decimal_places=2, null=True, blank=True)
description = models.TextField(null=True, blank=True)
class HeadLights(models.Model):
category = models.ForeignKey(Category,on_delete=models.PROTECT,default=None)
brand = models.CharField(max_length=30, null=True, blank=True)
style = models.CharField(max_length=30, null=True, blank=True)
cost_per_unit = models.DecimalField(max_digits=15,decimal_places=2, null=True, blank=True)
weight_per_unit = models.DecimalField(max_digits=15,decimal_places=2, null=True, blank=True)
description = models.TextField(null=True, blank=True)
class Suspension(models.Model):
category = models.ForeignKey(Category,on_delete=models.PROTECT,default=None)
brand = models.CharField(max_length=30, null=True, blank=True)
style = models.CharField(max_length=30, null=True, blank=True)
cost_per_unit = models.DecimalField(max_digits=15,decimal_places=2, null=True, blank=True)
weight_per_unit = models.DecimalField(max_digits=15,decimal_places=2, null=True, blank=True)
description = models.TextField(null=True, blank=True)
class Window(models.Model):
category = models.ForeignKey(Category,on_delete=models.PROTECT,default=None)
brand = models.CharField(max_length=30, null=True, blank=True)
dimensions = models.CharField(max_length=30, null=True, blank=True)
cost_per_unit = models.DecimalField(max_digits=15,decimal_places=2, null=True, blank=True)
weight_per_unit = models.DecimalField(max_digits=15,decimal_places=2, null=True, blank=True)
description = models.TextField(null=True, blank=True)
class ExternalFeature(models.Model):
category = models.ForeignKey(Category,on_delete=models.PROTECT,default=None)
item = models.CharField(max_length=30, null=True, blank=True)
brand = models.CharField(max_length=30, null=True, blank=True)
cost_per_unit = models.DecimalField(max_digits=15,decimal_places=2, null=True, blank=True)
weight_per_unit = models.DecimalField(max_digits=15,decimal_places=2, null=True, blank=True)
description = models.TextField(null=True, blank=True)
| [
"aarongold1869@gmail.com"
] | aarongold1869@gmail.com |
dfaf750523ecbc531bef85d4f708bf9d7cb86301 | 4e5d7fef1eaee614560042a4128575ea0681b19a | /gem5art-env/lib/python3.6/codecs.py | 86c35b99bbc3ca082d8935403c523cffdedc29dc | [] | no_license | JinpengMiao/gem5_cache | eeeb3260aecfb8a61800933dee8488eb0bc562b1 | d218f78dc1708eacfd3250ed3cff5ed00011b5b8 | refs/heads/master | 2022-11-12T08:42:00.467107 | 2020-07-06T20:23:49 | 2020-07-06T20:23:49 | 277,221,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59 | py | /home/jinpeng/.pyenv/versions/3.6.9/lib/python3.6/codecs.py | [
"jinpeng.miao@outlook.com"
] | jinpeng.miao@outlook.com |
5531a83724096dfb63aacb9d71bde66816f09bb7 | a4ef92a5333c393d6351f1b92a224bc9a73ca393 | /machine-learning-maths/newtonmethods.py | 333e5c591c56286983abc41a6b1e52e8643dc3ce | [] | no_license | dksingh04/python_projects | d3375d933768596bbb24132c66e22963455f5708 | 8f4be4ce0433ee2ba142fbf0e09657f41dec8a14 | refs/heads/master | 2021-07-06T11:57:24.588164 | 2018-12-27T17:39:24 | 2018-12-27T17:39:24 | 43,363,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,661 | py | '''
𝑦(𝑥)=𝑎0 +(𝑥−𝑥1)𝑎1 +(𝑥−𝑥1)(𝑥−𝑥2)𝑎2 +⋯+(𝑥−𝑥1)(𝑥−𝑥2)...(𝑥−𝑥𝑛)𝑎𝑛
where a0 = y1^1, a1 = y2^2 .... an = yn+1^(n+1)
yi^(j+1) = (yi^j - yj^j) / (xi - xj) where j = 1,2,3, 4 ... n and i = j+1 ... n+1
for e.g. y2^2 = (y2^1 - y1^1) / (x2 - x1) and y1^1 = y1 and y2^1 = y2 so on.
So in mathematical terms the Newton's formula can be written as
y(x) = a0 + ∑ [∏(x - xj)]ai where i = 1 .. n and j = 1 .. i+1
Implementation of Newtons Method for Linear Interpolation
'''
import numpy as np
from matplotlib import pyplot as plt
x = [0.0, 1.5, 2.8, 4.4, 6.1, 8.0]
y = [0.0, 0.9, 2.5, 6.6, 7.7, 8.0]
def findYp(xp, x, y):
n = len(x) - 1
Dy = np.zeros((n+1, n+1))
Dy[:,0] = y
#calculate the values for rest of the matrix
for j in range(n):
for i in range(j+1, n+1):
Dy[i, j+1] = (Dy[i,j] - Dy[j, j]) / (x[i] - x[j])
yp = Dy[0,0]
for i in range(n):
xProd = 1
for j in range(i+1):
xProd *= (xp - x[j])
yp += xProd * Dy[i+1, i+1]
return yp
xp = float(input('Enter x: '))
yp = findYp(xp, x, y)
print('For x = %.1f, y = %.1f' %(xp, yp))
'''
Output:
Enter x: 2.4
For x = 2.4, y = 1.72.4
'''
# Now plot a graph using matplotlib
xarray = np.linspace(x[0], x[-1], 50)
yarray = np.empty_like(xarray)
idx = 0
for xp in xarray:
yp = findYp(xp, x, y)
yarray[idx] = yp
idx += 1
plt.plot(x, y, "ro--", xarray, yarray)
plt.xlabel("x")
plt.ylabel("Y")
plt.title("Newton's Method of Linear Interpolation")
plt.legend(["linear", "newton's polynomial method"])
plt.show()
| [
"deepaksingh04@gmail.com"
] | deepaksingh04@gmail.com |
3a5f45accdfdbdc6c8d87ba4039032fc700555aa | 214bd135c12027aa8aef8180ad9ce1c05edc0237 | /Metrix.py | 9babf84eee186aaec38a26dfac3637597f3302bd | [] | no_license | Tanvish-IITG/BTP | c848da3002e40d596028927d7ed8d350c8b8c803 | 64d5f7f1139356a58ad9ee17c8f981e4c6d3b459 | refs/heads/master | 2023-08-21T19:04:43.563677 | 2021-10-15T03:59:28 | 2021-10-15T03:59:28 | 415,786,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,246 | py | import torch
def IOU(y,label):
predicted_label = torch.argmax(y,dim = 1)
intersection = torch.stack([torch.logical_and(label == i,predicted_label == i).sum() for i in range(8)])
union = torch.stack([torch.logical_or(label == i,predicted_label == i).sum() for i in range(8)])
return torch.div(intersection,union)
def F_Score(y,label):
predicted_label = torch.argmax(y,dim = 1)
TP = torch.stack([torch.logical_and(label == i,predicted_label == i).sum() for i in range(8)])
FP = torch.stack([torch.logical_and(label != i,predicted_label == i).sum() for i in range(8)])
TN = torch.stack([torch.logical_and(label != i,predicted_label != i).sum() for i in range(8)])
FN = torch.stack([torch.logical_and(label == i,predicted_label != i).sum() for i in range(8)])
Precison = torch.div(TP,torch.add(TP , FP))
Recall = torch.div(TP,torch.add(TP , FN))
return Precison, Recall
def HarmonicMean(x,y):
return torch.div(torch.mul(2*x,y) , torch.add(x,y))
if __name__ == "__main__":
from VGG16 import VGG16_OCR
import DataLoader
trainDataset = DataLoader.DatasetSegmentation("./data/train/SUIMDATA/train_val","images","masks",transform=DataLoader.transformSeg)
model = VGG16_OCR()
| [
"Tanvish2001@gmail.com"
] | Tanvish2001@gmail.com |
ec6e3a87299b3f0b27c39ebb22357a57cd9e2f35 | 04afb34356de112445c3e5733fd2b773d92372ef | /Sem1/FP/S13/venv/Scripts/pip-script.py | ecfdd60747e705166efa7dda1830c8ac7fb753a9 | [] | no_license | AndreeaCimpean/Uni | a4e48e5e1dcecbc0c28ad45ddd3b0989ff7985c8 | 27df09339e4f8141be3c22ae93c4c063ffd2b172 | refs/heads/master | 2020-08-21T19:12:49.840044 | 2020-05-15T17:22:50 | 2020-05-15T17:22:50 | 216,222,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | #!D:\Facultate\UniRepo\Sem1\FP\S13\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"andreeacimpean.910@gmail.com"
] | andreeacimpean.910@gmail.com |
51071e102616ea0cafa25dbfadf9afbdc6a732f2 | 33d018fdd6ce541c617c22cce68680d1d63b6b8f | /ATL/ast_to_halide.py | 2dc2c49927934cfe6daf9579be8cd0093b91c9fc | [
"BSD-2-Clause"
] | permissive | Playfloor/atl | 82465fe55bc1c326ef2e28f311bb8f81a3832c41 | d8cb8f9b9e4f596fc9d3e4628ec69cecf623d0e8 | refs/heads/master | 2023-04-04T05:35:04.970642 | 2021-04-14T19:21:57 | 2021-04-14T19:21:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,134 | py |
from .prelude import *
from . import atl_types as T
from .frontend import AST
from .halide_ir import HIR, HIR_CPP_String
from .halide_ir import CJit as HIR_CJit
from fractions import Fraction
import numpy as np
# --------------------------------------------------------------------------- #
# --------------------------------------------------------------------------- #
class Compile:
def __init__(self, ast):
self._ast = ast
self._ctxt = Context()
self._ctxt.push()
self._vars = []
self._rdoms = []
self._funcs = []
self._params = []
self._imgs = []
self._stmts = []
self._outputs = []
self._rel_sizes = {}
# bind size variables to pipeline parameters
for sd in ast.sizes:
sz_param = HIR.Param( sd.name, HIR.i32 )
self._params.append(sz_param)
self._ctxt.set(sd.name, sz_param)
# bind variables to images or pipeline parameters
for vd in ast.vars:
def bind_var(typ=vd.type,name=vd.name):
if typ == T.num:
# create a parameter
v_param = HIR.Param( name, HIR.f64 )
self._params.append(v_param)
return v_param
elif type(typ) is T.Tensor:
assert not typ.has_tuples()
# create an image / func
n_dims = len(typ.shape())
v_img = HIR.Img( name, n_dims, HIR.f64 )
self._imgs.append(v_img)
return HIR.ImgFunc(v_img)
else:
assert type(typ) is T.Tuple
labels = ( typ.labels.names if typ.labels is not None else
[ str(i) for i in range(0,len(typ.types)) ] )
return tuple( bind_var(subtyp, Sym(f"{name}_{label}"))
for label, subtyp in zip(labels, typ.types) )
self._ctxt.set( vd.name, bind_var() )
# bind relations to boolean images
for rd in ast.relations:
rngs = [ self._get_range(sz) for sz in rd.sizes ]
self._rel_sizes[rd.name] = rngs
n_dims = len(rd.sizes)
v_img = HIR.Img( rd.name, n_dims, HIR.u8 )
self._imgs.append(v_img)
self._ctxt.set( rd.name, HIR.ImgFunc(v_img) )
# process all of the statements
body = ast.body
if type(body) is AST.Let:
for s in body.stmts:
self._stmts.append( self._compile_stmt(s) )
body = body.ret
# process the output, potentially as a tuple...
def bind_output(e=body, name=Sym('out')):
if type(e) is AST.Tuple:
args = [ bind_output(a, Sym(f"{name}_{i}"))
for i,a in enumerate(e.args) ]
return args
elif type(e.type) is T.Tensor:
assert type(e) is AST.Var
out_func = self._ctxt.get(e.name)
self._outputs.append(out_func)
return out_func
else:
assert e.type is T.num
out_sym = name
out_stmt = AST.assign( out_sym, e.type, e, e.srcinfo )
self._stmts.append( self._compile_stmt(out_stmt) )
out_func = self._ctxt.get(out_sym)
self._outputs.append(out_func)
return out_func
bind_output()
self._pipeline = HIR.Pipeline(
vars = self._vars,
rdoms = self._rdoms,
funcs = self._funcs,
params = self._params,
imgs = self._imgs,
stmts = self._stmts,
outputs = self._outputs,
)
#print(self._pipeline)
def make_cjit(self, vs, sizes, relations, out):
if hasattr(self, '_cjit'):
return
params = []
param_typs = []
img_ranges = []
out_ranges = []
# unpack sizes
for sz in sizes:
params.append(sz)
param_typs.append(int)
# unpack vars
def unpack_var(v):
if isinstance(v, tuple):
for subv in v:
unpack_var(subv)
elif type(v) is np.ndarray:
img_ranges.append(v.shape)
else:
assert type(v) is float
params.append(v)
param_typs.append(float)
for v in vs:
unpack_var(v)
# unpack relations
for R in relations:
assert type(R) is np.ndarray
img_ranges.append(R.shape)
def unpack_out(val):
if isinstance(val, tuple):
for o in val:
unpack_out(o)
else:
assert type(val) is np.ndarray
out_ranges.append(val.shape)
unpack_out(out)
# generate a cjit object...
self._cjit = HIR_CJit(self._ast,
self._pipeline,
params,
param_typs,
img_ranges,
out_ranges)
def cpp_str(self, vs, sizes, relations, out):
self.make_cjit(vs, sizes, relations, out)
print(self._pipeline)
return self._cjit.codestr()
def __call__(self, vs, sizes, relations, out):
self.make_cjit(vs, sizes, relations, out)
# we need to pack lists of the following things
params = []
imgs = []
outputs = []
# unpack the sizes into parameters first
for sz in sizes:
params.append(sz)
# then unpack the variables
def unpack_var(v):
if isinstance(v, tuple):
for subv in v:
unpack_var(subv)
elif type(v) is np.ndarray:
imgs.append(v)
else:
assert type(v) is float
params.append(v)
for v in vs:
unpack_var(v)
# finally unpack the relations
for R in relations:
assert type(R) is np.ndarray
imgs.append(R)
# unpack the output in case it's a tuple of buffers
def unpack_out(val=out):
if isinstance(val, tuple):
for o in val:
unpack_out(o)
else:
assert type(val) is np.ndarray
outputs.append(val)
unpack_out(out)
assert len(params) == len(self._pipeline.params)
assert len(imgs) == len(self._pipeline.imgs)
assert len(outputs) == len(self._pipeline.outputs)
#print('IMGS\n',imgs)
# invoke the pipeline
self._cjit(params, imgs, outputs)
#self._pipeline(params, imgs, outputs)
def _get_range(self,atl_rng):
if type(atl_rng) is int:
return HIR.Range( HIR.Const(0,HIR.i32), HIR.Const(atl_rng,HIR.i32) )
else:
assert type(atl_rng) is Sym
sz_p = self._ctxt.get(atl_rng)
assert type(sz_p) is HIR.Param
return HIR.Range( HIR.Const(0,HIR.i32), HIR.Eparam(sz_p) )
def _new_rdom(self, name, rng):
rdom = HIR.RDom(name, [rng])
self._rdoms.append(rdom)
self._ctxt.set(name, rdom)
return rdom
def _compile_stmt(self, stmt):
# create Func symbol and bind it
n_dim = 1 if stmt.type == T.num else len(stmt.type.shape())
func = HIR.Func(stmt.name, n_dim, HIR.f64)
self._ctxt.set(stmt.name, func)
self._funcs.append(func)
self._ctxt.push()
# bind argument/func pure-variables...
ast_rhs = stmt.rhs
if stmt.type == T.num:
arg_vars = [ HIR.Var(Sym('_0_')) ]
self._vars.append(arg_vars[0])
elif type(ast_rhs) is AST.Var:
# special case of tensor aliasing
shape = stmt.type.shape()
arg_vars = [ HIR.Var(Sym(f"_{i}_")) for i in range(0,len(shape)) ]
self._vars += arg_vars
rhs_func = self._ctxt.get(ast_rhs.name)
hir_rhs = HIR.FAccess(rhs_func, [ HIR.Evar(v) for v in arg_vars ])
ast_rhs = None
else:
arg_vars = []
typ = stmt.type
assert typ == ast_rhs.type
while type(ast_rhs) is AST.Gen:
hvar = HIR.Var( ast_rhs.name )
self._vars.append(hvar)
self._ctxt.set(ast_rhs.name, hvar)
arg_vars.append(hvar)
typ = typ.type
ast_rhs = ast_rhs.body
if typ != T.num:
print('*****\n*\n*\n*\n',ast_rhs)
assert typ == T.num
# compile expression
if ast_rhs is not None:
hir_rhs = self._compile_expr(ast_rhs)
self._ctxt.pop()
# return the HIR stmt
return HIR.PureDef( func, arg_vars, hir_rhs )
def _compile_leaf(self,e):
#print('comp leaf ', e)
# extract accesses
accesses = []
while type(e) is AST.Access:
accesses = [ self._compile_expr(ie) for ie in e.idx ] + accesses
e = e.base
acc_e = e
# extract projections
projs = []
while type(e) is AST.Proj:
projs.append(e.idx)
e = e.arg
assert type(e) is AST.Var, "expected particular nesting..."
# unpack any tuple present by projecting statically
#print('post proj unroll : ', e)
var_tup = self._ctxt.get(e.name)
for i in reversed(projs):
assert isinstance(var_tup, tuple), (f"expected lookup of "
f"{e.name} to be a tuple")
var_tup = var_tup[i]
x = var_tup
# now certify that we got a function or param, and generate an access
if type(x) is HIR.Param:
assert acc_e.type == T.num
return HIR.Eparam(x)
else:
assert isinstance(x, HIR.func)
# if this is an input func, protect it with clamping
if type(x) is HIR.ImgFunc:
if type(acc_e.type) is not T.Tensor:
print('XXX', acc_e.type)
print(acc_e)
assert type(acc_e.type) is T.Tensor
rngs = [ self._get_range(sz) for sz in acc_e.type.shape() ]
m1 = lambda x: HIR.BinOp('-', x, HIR.Const(1,HIR.i32))
clamp = lambda x,hi: HIR.Clamp( x,HIR.Const(0,HIR.i32), m1(hi) )
accesses = [ clamp( ie, bd.extent )
for ie,bd in zip(accesses, rngs) ]
# if this is a scalar, then plug in a default access...
if e.type == T.num:
assert len(accesses) == 0
accesses = [ HIR.Const(0,HIR.i32) ]
return HIR.FAccess( x, accesses )
def _compile_expr(self,e):
eclass = type(e)
# Expr Compile #
if eclass is AST.Var or eclass is AST.Proj or eclass is AST.Access:
return self._compile_leaf(e)
elif eclass is AST.Const:
return HIR.Const(e.val, HIR.f64)
elif eclass is AST.BinOp:
lhs = self._compile_expr(e.lhs)
rhs = self._compile_expr(e.rhs)
return HIR.BinOp(e.op, lhs, rhs)
elif eclass is AST.Tuple:
assert False, "Should not be trying to compile Tuples to Halide"
elif eclass is AST.TensorLit:
assert False, "TODO: handle tensor literals in Halide compilation"
elif eclass is AST.Gen:
assert False, (f"AST.Gen compilation should be handled "
f"at the statement level")
elif eclass is AST.Sum:
N = self._get_range(e.range)
self._ctxt.push()
r = self._new_rdom(e.name, N)
body = self._compile_expr(e.body)
self._ctxt.pop()
return HIR.BigSum(r, body)
elif eclass is AST.BuiltIn:
args = [ self._compile_expr(a) for a in e.args ]
return e.f.halide_compile(*args)
elif eclass is AST.Indicate:
guard = self._compile_expr(e.pred)
# maybe indicators should be fused to generators etc?
return HIR.Select( guard, self._compile_expr(e.body),
HIR.Const(0.0,HIR.f64) )
elif eclass is AST.Let:
assert False, (f"AST.Let compilation should be handled "
f"at the top-level")
# Index Exec #
elif eclass is AST.IdxConst:
assert e.val.denominator == 1
return HIR.Const(e.val.numerator, HIR.i32)
elif eclass is AST.IdxVar:
x = self._ctxt.get(e.name)
if type(x) is HIR.Var:
return HIR.Evar(x)
elif type(x) is HIR.RDom:
return HIR.Erdom(x)
else:
assert False, f"unexpected IdxVar type: {type(x)}"
elif eclass is AST.IdxSize:
x = self._ctxt.get(e.name)
assert type(x) is HIR.Param
return HIR.Eparam(x)
elif eclass is AST.IdxAdd or eclass is AST.IdxSub:
op = '+' if eclass is AST.IdxAdd else '-'
return HIR.BinOp( op, self._compile_expr(e.lhs),
self._compile_expr(e.rhs) )
elif eclass is AST.IdxScale:
assert e.coeff.denominator == 1
coeff = HIR.Const(e.coeff.numerator, HIR.i32)
return HIR.BinOp('*', coeff, self._compile_expr(e.idx))
# Pred Exec #
elif eclass is AST.Cmp:
lhs = self._compile_expr(e.lhs)
rhs = self._compile_expr(e.rhs)
return HIR.BinOp(e.op, lhs, rhs)
elif eclass is AST.Relation:
bds = self._rel_sizes[e.name]
m1 = lambda x: HIR.BinOp('-', x, HIR.Const(1,HIR.i32))
clamp = lambda x,hi: HIR.Clamp( x, HIR.Const(0,HIR.i32), m1(hi) )
args = [ clamp( self._compile_expr(ie), bd.extent )
for ie,bd in zip(e.args, bds) ]
Rfunc = self._ctxt.get( e.name )
Acc = HIR.FAccess(Rfunc, args)
return HIR.BinOp("!=",Acc,HIR.Const(0,HIR.u8))
elif eclass is AST.Conj or eclass is AST.Disj:
op = "and" if eclass is AST.Conj else "or"
lhs = self._compile_expr(e.lhs)
rhs = self._compile_expr(e.rhs)
return HIR.BinOp(op, lhs, rhs)
| [
"gilbert@gilbertbernstein.com"
] | gilbert@gilbertbernstein.com |
71144ae74963ba84857ba71318cfb877dd80fabe | d01660d73df1ddd594304e2d761945209ef9b6a4 | /src/UIDonation.py | 7e74d8d35c5e0f0587852a105e036e49d5eb35b0 | [
"MIT"
] | permissive | cpepe/Astibot | 791df87cd7093dba3c7a9d448744c79f3114298d | 7a54c14b4334d1373fcb13c366102c4633405a52 | refs/heads/master | 2023-07-19T01:51:47.860698 | 2021-09-02T18:24:31 | 2021-09-02T18:24:31 | 400,248,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,370 | py | import math
from pyqtgraph.Qt import QtCore, QtGui
from PyQt5.QtWidgets import QFrame
from PyQt5.Qt import QIntValidator
from PyQt5.Qt import QDoubleValidator
import ctypes # Message box popup
import TradingBotConfig as theConfig
class UIDonation(QtGui.QWidget):
STR_CHECKBOX_AUTHORIZATION_TEXT = "I accept to give the present software a full control of my account through the Application Programming Interface. It includes algorithm-based buying or selling of fiat or cryptocurrency money / assets. I understand the risks related to software-based trading and, by entering here my personal API keys access, I am the only responsible for the totality of every action that is performed by this software through the API system even in case of bug, undesired software behavior, unfavorable market, inappropriate buy or sell decision. I have trained myself in Simulation mode to understand the Software trading strategy and, by entering my API keys, I only give control to money / assets that I can afford to loose."
STR_BORDER_BLOCK_STYLESHEET = "QWidget {background-color : #151f2b;}"
STR_QLABEL_STYLESHEET = "QLabel { background-color : #203044; color : white; font: 13px;}"
STR_QLABEL_GREEN_STYLESHEET = "QLabel { background-color : #203044; color : #24b62e; font: bold 14px;}"
STR_QLABEL_RED_STYLESHEET = "QLabel { background-color : #203044; color : #ff2e2e; font: bold 14px;}"
STR_QLABEL_SMALL_STYLESHEET = "QLabel { background-color : #203044; color : #C2C2C2; font: 11px;}"
STR_QCHECKBOX_STYLESHEET = "QCheckBox { background-color : #203044; color : white; font: 10px;}"
STR_QCHECKBOX_LABEL_STYLESHEET = "QLabel { background-color : #203044; color : #C2C2C2; font: 10px;}"
STR_QLABEL_TITLE_STYLESHEET = "QLabel { background-color : #203044; color : #81C6FE; font: bold 16px;}"
STR_QFRAME_SEPARATOR_STYLESHEET = "background-color: rgb(20, 41, 58);"
STR_QBUTTON_CLOSE_STYLESHEET = "QPushButton {background-color: #01599e; border-width: 2px; border-radius: 10px; border-color: white; font: bold 15px; color:white} QPushButton:pressed { background-color: #1d8d24 } QPushButton:hover { background-color: #002c4f }"
STR_QBUTTON_WITHDRAW_ENABLED_STYLESHEET = "QPushButton {background-color: #23b42c; border-width: 2px; border-radius: 10px; border-color: white; font: bold 13px; color:white} QPushButton:pressed { background-color: #1d8d24 } QPushButton:hover { background-color: #1a821f }"
STR_QBUTTON_WITHDRAW_DISABLED_STYLESHEET = "QPushButton {background-color: #9f9f9f; border-width: 2px; border-radius: 10px; border-color: white; font: bold 13px; color:white}"
STR_QTEXTEDIT_STYLESHEET = "QLineEdit { background-color : #203044; color : white; font: bold 13px; border: 1px solid white; border-radius: 4px;} QLineEdit:focus {border: 2px solid #007ad9;}"
RIGHT_LABELS_WIDTH_IN_PX = 75
def __init__(self, settings):
# Here, you should call the inherited class' init, which is QDialog
QtGui.QWidget.__init__(self)
print("UIDO - UI Donating constructor")
# Application settings data instance
self.theSettings = settings
# Functional
self.BTCBalance = -1.0
self.windowIsShown = False
self.timerRefreshBTCBalance = QtCore.QTimer()
self.timerRefreshBTCBalance.timeout.connect(self.TimerRaisedRefreshBTCBalance)
self.timerRefreshBTCBalance.start(200)
self.withdrawHasBeenPerformed = False
# Window settings
self.setWindowModality(QtCore.Qt.ApplicationModal)
self.setWindowTitle('Astibot')
self.setStyleSheet("background-color:#203044;")
self.setWindowIcon(QtGui.QIcon("AstibotIcon.png"))
self.setAutoFillBackground(True);
self.setFixedSize(450, 350)
# Build layout
self.BuildWindowLayout()
def EventWithdrawButtonClick(self):
print("UIDO - Withdraw Click")
# Set to True to keep Withdraw button disabled during transaction
self.withdrawHasBeenPerformed = True
self.SetWithdrawEnabled(False)
self.btnWithdrawForDonating.setText("Withdrawing...")
QtGui.QApplication.processEvents() # Force UI to update previous lines, because we will block main UI loop
# Perform withdraw
withdrawRequestReturn = self.theTransactionManager.TRNM_WithdrawBTC(theConfig.CONFIG_BTC_DESTINATION_ADDRESS, float(self.txtDonationAmountEntry.text()))
if (withdrawRequestReturn != "Error"):
self.btnWithdrawForDonating.setText("Withdraw successful!")
self.MessageBoxPopup("Your donation has been successfully sent: Thank you! Coinbase Pro Transfer ID is %s" % withdrawRequestReturn, 0)
else:
self.MessageBoxPopup("The withdraw failed, you will not be charged. Make sure you authorized the transfer feature when creating your API key.", 0)
self.btnWithdrawForDonating.setText("Withdraw failed")
def EventCloseButtonClick(self):
print("UIDO - Close Click")
self.HideWindow()
def TimerRaisedRefreshBTCBalance(self):
if (self.windowIsShown == True):
# Retrieve balance data
self.BTCBalance = self.theTransactionManager.TRNM_getBTCBalance()
# Fast account refresh required in case the user would currently be withdrawing money, he would like to quickly see the update on the UI
self.theTransactionManager.TRNM_ForceAccountsUpdate()
try:
if (float(self.BTCBalance) >= float(self.txtDonationAmountEntry.text()) and (float(self.txtDonationAmountEntry.text()) >= theConfig.MIN_CRYPTO_AMOUNT_REQUESTED_TO_SELL)):
# If donation has just been performed, do not enable Withdraw button again
if (self.withdrawHasBeenPerformed == False):
self.SetWithdrawEnabled(True)
self.lblAvailableBTCBalance.setText("%s BTC" % str(round(float(self.BTCBalance), 7)))
else:
self.SetWithdrawEnabled(False)
self.lblAvailableBTCBalance.setText("%s BTC" % str(round(float(self.BTCBalance), 7)))
self.btnWithdrawForDonating.setText("Donate %s BTC" % self.txtDonationAmountEntry.text())
except ValueError:
self.SetWithdrawEnabled(False)
def SetWithdrawEnabled(self, bEnable):
if (bEnable == True):
self.btnWithdrawForDonating.setStyleSheet(self.STR_QBUTTON_WITHDRAW_ENABLED_STYLESHEET)
else:
self.btnWithdrawForDonating.setStyleSheet(self.STR_QBUTTON_WITHDRAW_DISABLED_STYLESHEET)
self.btnWithdrawForDonating.setEnabled(bEnable)
## Styles:
## 0 : OK
## 1 : OK | Cancel
## 2 : Abort | Retry | Ignore
## 3 : Yes | No | Cancel
## 4 : Yes | No
## 5 : Retry | No
## 6 : Cancel | Try Again | Continue
def MessageBoxPopup(self, text, style):
title = "Astibot Donating"
return ctypes.windll.user32.MessageBoxW(0, text, title, style)
def BuildWindowLayout(self):
self.rootGridLayout = QtGui.QGridLayout()
self.rootGridLayout.setContentsMargins(0, 0, 0, 0)
self.mainGridLayout = QtGui.QGridLayout()
self.mainGridLayout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.rootGridLayout)
self.rootGridLayout.addLayout(self.mainGridLayout, 1, 1)
rowNumber = 0
# Root left and right
self.rootLeftBlock = QtGui.QWidget()
self.rootLeftBlock.setStyleSheet(self.STR_BORDER_BLOCK_STYLESHEET)
self.rootLeftBlock.setFixedWidth(20)
self.rootRightBlock = QtGui.QWidget()
self.rootRightBlock.setStyleSheet(self.STR_BORDER_BLOCK_STYLESHEET)
self.rootRightBlock.setFixedWidth(20)
self.rootGridLayout.addWidget(self.rootLeftBlock, 0, 0, 3, 1)
self.rootGridLayout.addWidget(self.rootRightBlock, 0, 2, 3, 1)
# Root top and bottom
self.rootTopBlock = QtGui.QWidget()
self.rootTopBlock.setStyleSheet(self.STR_BORDER_BLOCK_STYLESHEET)
self.rootTopBlock.setFixedHeight(20)
self.rootBottomBlock = QtGui.QWidget()
self.rootBottomBlock.setStyleSheet(self.STR_BORDER_BLOCK_STYLESHEET)
self.rootBottomBlock.setFixedHeight(60)
self.rootGridLayout.addWidget(self.rootTopBlock, 0, 0, 1, 3)
self.rootGridLayout.addWidget(self.rootBottomBlock, 2, 0, 1, 3)
# Body layout ===========================================================
self.lblTitleDonating = QtGui.QLabel("Donate & Contribute to Astibot project")
self.lblTitleDonating.setStyleSheet(self.STR_QLABEL_TITLE_STYLESHEET);
self.mainGridLayout.addWidget(self.lblTitleDonating, rowNumber, 0, 1, 2)
rowNumber = rowNumber + 1
self.lblSubTitleDonating = QtGui.QLabel("If you like this project or if you make money with it: please donate to help me make this software better!")
self.lblSubTitleDonating.setStyleSheet(self.STR_QLABEL_STYLESHEET);
self.lblSubTitleDonating.setWordWrap(True)
self.mainGridLayout.addWidget(self.lblSubTitleDonating, rowNumber, 0, 1, 2)
rowNumber = rowNumber + 1
# Available BTC Balance
self.lblAvailableBTCBalanceText = QtGui.QLabel("<b>Available BTC Balance:</b>")
self.lblAvailableBTCBalanceText.setStyleSheet(self.STR_QLABEL_STYLESHEET);
self.lblAvailableBTCBalanceText.setFixedHeight(28)
if (self.BTCBalance >= 0):
if (self.BTCBalance >= theConfig.CONFIG_DONATION_DEFAULT_AMOUNT_IN_BTC):
self.lblAvailableBTCBalance = QtGui.QLabel("%s BTC" % str(round(float(self.BTCBalance))))
else:
self.lblAvailableBTCBalance = QtGui.QLabel("%s BTC (insufficient funds)" % str(round(float(self.BTCBalance))))
else:
self.lblAvailableBTCBalance = QtGui.QLabel("-- BTC")
self.lblAvailableBTCBalance.setStyleSheet(self.STR_QLABEL_STYLESHEET);
self.mainGridLayout.addWidget(self.lblAvailableBTCBalanceText, rowNumber, 0)
self.mainGridLayout.addWidget(self.lblAvailableBTCBalance, rowNumber, 1)
rowNumber = rowNumber + 1
# Donation amount entry
self.lblYourDonation = QtGui.QLabel("<b>Your donation (BTC):</b>")
self.lblYourDonation.setStyleSheet(self.STR_QLABEL_STYLESHEET);
self.lblYourDonation.setFixedHeight(28)
self.txtDonationAmountEntry = QtGui.QLineEdit()
self.txtDonationAmountEntry.setStyleSheet(self.STR_QTEXTEDIT_STYLESHEET)
self.txtDonationAmountEntry.setFixedWidth(80)
self.txtDonationAmountEntry.setText(str(theConfig.CONFIG_DONATION_DEFAULT_AMOUNT_IN_BTC))
#self.txtDonationAmountEntry.changeEvent.connect(self.EventDonationAmountEntryChanged)
self.mainGridLayout.addWidget(self.lblYourDonation, rowNumber, 0)
self.mainGridLayout.addWidget(self.txtDonationAmountEntry, rowNumber, 1)
rowNumber = rowNumber + 1
# Withdraw button
self.btnWithdrawForDonating = QtGui.QPushButton("Donate %s BTC" % theConfig.CONFIG_DONATION_DEFAULT_AMOUNT_IN_BTC)
self.btnWithdrawForDonating.setStyleSheet(self.STR_QBUTTON_WITHDRAW_DISABLED_STYLESHEET)
self.btnWithdrawForDonating.setFixedHeight(35)
self.btnWithdrawForDonating.setFixedWidth(240)
self.btnWithdrawForDonating.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.btnWithdrawForDonating.clicked.connect(self.EventWithdrawButtonClick)
self.mainGridLayout.addWidget(self.btnWithdrawForDonating, rowNumber, 0, 1, 2, QtCore.Qt.AlignCenter)
rowNumber = rowNumber + 1
# Bottom buttons
self.btnClose = QtGui.QPushButton("Close")
self.btnClose.setStyleSheet(self.STR_QBUTTON_CLOSE_STYLESHEET)
self.btnClose.setFixedWidth(120)
self.btnClose.setFixedHeight(38)
self.btnClose.clicked.connect(self.EventCloseButtonClick)
self.btnClose.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.hBoxBottomButtons = QtGui.QHBoxLayout()
self.hBoxBottomButtons.addWidget(self.btnClose, QtCore.Qt.AlignRight)
self.rootBottomBlock.setLayout(self.hBoxBottomButtons)
rowNumber = rowNumber + 1
def UIDO_ShowWindow(self):
print("UIDO - Show")
self.windowIsShown = True
self.withdrawHasBeenPerformed = False
# Force refresh
self.TimerRaisedRefreshBTCBalance()
self.show()
def HideWindow(self):
self.windowIsShown = False
self.hide()
def UIDO_SetTransactionManager(self, transactionManager):
self.theTransactionManager = transactionManager
| [
"noreply@github.com"
] | cpepe.noreply@github.com |
9ef068591ef73db8438f5bb5d3d6af12f69f50e4 | a3e5717ff98340e81a1b0c58f1b59dea55392299 | /nemo/collections/asr/models/classification_models.py | 5b331e0707d9bd30c67b53dc517e365aea06d001 | [
"Apache-2.0"
] | permissive | Tpt/NeMo | 92a11ff79f99f79609b073932491822cab1d3f07 | e816905e286df545ce344e865ea6cf8e4b0c85ca | refs/heads/main | 2023-04-06T03:41:15.246247 | 2021-03-09T22:47:18 | 2021-03-09T22:47:18 | 346,297,599 | 0 | 0 | Apache-2.0 | 2021-03-10T09:15:22 | 2021-03-10T09:15:21 | null | UTF-8 | Python | false | false | 31,493 | py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
import tempfile
from abc import abstractmethod
from math import ceil
from typing import Dict, List, Optional, Union
import onnx
import torch
from omegaconf import DictConfig, ListConfig, OmegaConf
from pytorch_lightning import Trainer
from pytorch_lightning.metrics.regression import MeanAbsoluteError, MeanSquaredError
from nemo.collections.asr.data import audio_to_label_dataset
from nemo.collections.asr.models.asr_model import ASRModel, ExportableEncDecModel
from nemo.collections.asr.parts.features import WaveformFeaturizer
from nemo.collections.asr.parts.perturb import process_augmentations
from nemo.collections.common.losses import CrossEntropyLoss, MSELoss
from nemo.collections.common.metrics import TopKClassificationAccuracy
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.core.neural_types import *
from nemo.utils import logging, model_utils
__all__ = ['EncDecClassificationModel', 'EncDecRegressionModel', 'MatchboxNet']
class _EncDecBaseModel(ASRModel, ExportableEncDecModel):
"""Encoder decoder Classification models."""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
# Get global rank and total number of GPU workers for IterableDataset partitioning, if applicable
# Global_rank and local_rank is set by LightningModule in Lightning 1.2.0
self.world_size = 1
if trainer is not None:
self.world_size = trainer.num_nodes * trainer.num_gpus
# Convert config to a DictConfig
cfg = model_utils.convert_model_config_to_dict_config(cfg)
# Convert config to support Hydra 1.0+ instantiation
cfg = model_utils.maybe_update_config_version(cfg)
self.is_regression_task = cfg.get('is_regression_task', False)
# Change labels if needed
self._update_decoder_config(cfg.labels, cfg.decoder)
super().__init__(cfg=cfg, trainer=trainer)
if hasattr(self._cfg, 'spec_augment') and self._cfg.spec_augment is not None:
self.spec_augmentation = ASRModel.from_config_dict(self._cfg.spec_augment)
else:
self.spec_augmentation = None
if hasattr(self._cfg, 'crop_or_pad_augment') and self._cfg.crop_or_pad_augment is not None:
self.crop_or_pad = ASRModel.from_config_dict(self._cfg.crop_or_pad_augment)
else:
self.crop_or_pad = None
self.preprocessor = self._setup_preprocessor()
self.encoder = self._setup_encoder()
self.decoder = self._setup_decoder()
self.loss = self._setup_loss()
self._setup_metrics()
@abstractmethod
def _setup_preprocessor(self):
"""
Setup preprocessor for audio data
Returns: Preprocessor
"""
pass
@abstractmethod
def _setup_encoder(self):
"""
Setup encoder for the Encoder-Decoder network
Returns: Encoder
"""
pass
@abstractmethod
def _setup_decoder(self):
"""
Setup decoder for the Encoder-Decoder network
Returns: Decoder
"""
pass
@abstractmethod
def _setup_loss(self):
"""
Setup loss function for training
Returns: Loss function
"""
pass
@abstractmethod
def _setup_metrics(self):
"""
Setup metrics to be tracked in addition to loss
Returns: void
"""
pass
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
if hasattr(self.preprocessor, '_sample_rate'):
audio_eltype = AudioSignal(freq=self.preprocessor._sample_rate)
else:
audio_eltype = AudioSignal()
return {
"input_signal": NeuralType(('B', 'T'), audio_eltype),
"input_signal_length": NeuralType(tuple('B'), LengthsType()),
}
@property
@abstractmethod
def output_types(self) -> Optional[Dict[str, NeuralType]]:
pass
def forward(self, input_signal, input_signal_length):
processed_signal, processed_signal_len = self.preprocessor(
input_signal=input_signal, length=input_signal_length,
)
# Crop or pad is always applied
if self.crop_or_pad is not None:
processed_signal, processed_signal_len = self.crop_or_pad(
input_signal=processed_signal, length=processed_signal_len
)
# Spec augment is not applied during evaluation/testing
if self.spec_augmentation is not None and self.training:
processed_signal = self.spec_augmentation(input_spec=processed_signal)
encoded, encoded_len = self.encoder(audio_signal=processed_signal, length=processed_signal_len)
logits = self.decoder(encoder_output=encoded)
return logits
def setup_training_data(self, train_data_config: Optional[Union[DictConfig, Dict]]):
if 'shuffle' not in train_data_config:
train_data_config['shuffle'] = True
# preserve config
self._update_dataset_config(dataset_name='train', config=train_data_config)
self._train_dl = self._setup_dataloader_from_config(config=DictConfig(train_data_config))
# Need to set this because if using an IterableDataset, the length of the dataloader is the total number
# of samples rather than the number of batches, and this messes up the tqdm progress bar.
# So we set the number of steps manually (to the correct number) to fix this.
if 'is_tarred' in train_data_config and train_data_config['is_tarred']:
# We also need to check if limit_train_batches is already set.
# If it's an int, we assume that the user has set it to something sane, i.e. <= # training batches,
# and don't change it. Otherwise, adjust batches accordingly if it's a float (including 1.0).
if isinstance(self._trainer.limit_train_batches, float):
self._trainer.limit_train_batches = int(
self._trainer.limit_train_batches
* ceil((len(self._train_dl.dataset) / self.world_size) / train_data_config['batch_size'])
)
def setup_validation_data(self, val_data_config: Optional[Union[DictConfig, Dict]]):
if 'shuffle' not in val_data_config:
val_data_config['shuffle'] = False
# preserve config
self._update_dataset_config(dataset_name='validation', config=val_data_config)
self._validation_dl = self._setup_dataloader_from_config(config=DictConfig(val_data_config))
def setup_test_data(self, test_data_config: Optional[Union[DictConfig, Dict]]):
if 'shuffle' not in test_data_config:
test_data_config['shuffle'] = False
# preserve config
self._update_dataset_config(dataset_name='test', config=test_data_config)
self._test_dl = self._setup_dataloader_from_config(config=DictConfig(test_data_config))
def test_dataloader(self):
if self._test_dl is not None:
return self._test_dl
def _setup_dataloader_from_config(self, config: DictConfig):
OmegaConf.set_struct(config, False)
config.is_regression_task = self.is_regression_task
OmegaConf.set_struct(config, True)
if 'augmentor' in config:
augmentor = process_augmentations(config['augmentor'])
else:
augmentor = None
featurizer = WaveformFeaturizer(
sample_rate=config['sample_rate'], int_values=config.get('int_values', False), augmentor=augmentor
)
shuffle = config['shuffle']
# Instantiate tarred dataset loader or normal dataset loader
if config.get('is_tarred', False):
if ('tarred_audio_filepaths' in config and config['tarred_audio_filepaths'] is None) or (
'manifest_filepath' in config and config['manifest_filepath'] is None
):
logging.warning(
"Could not load dataset as `manifest_filepath` is None or "
f"`tarred_audio_filepaths` is None. Provided config : {config}"
)
return None
if 'vad_stream' in config and config['vad_stream']:
logging.warning("VAD inference does not support tarred dataset now")
return None
shuffle_n = config.get('shuffle_n', 4 * config['batch_size']) if shuffle else 0
dataset = audio_to_label_dataset.get_tarred_classification_label_dataset(
featurizer=featurizer,
config=OmegaConf.to_container(config),
shuffle_n=shuffle_n,
global_rank=self.global_rank,
world_size=self.world_size,
)
shuffle = False
batch_size = config['batch_size']
collate_func = dataset.collate_fn
else:
if 'manifest_filepath' in config and config['manifest_filepath'] is None:
logging.warning(f"Could not load dataset as `manifest_filepath` is None. Provided config : {config}")
return None
if 'vad_stream' in config and config['vad_stream']:
logging.info("Perform streaming frame-level VAD")
dataset = audio_to_label_dataset.get_speech_label_dataset(
featurizer=featurizer, config=OmegaConf.to_container(config)
)
batch_size = 1
collate_func = dataset.vad_frame_seq_collate_fn
else:
dataset = audio_to_label_dataset.get_classification_label_dataset(
featurizer=featurizer, config=OmegaConf.to_container(config)
)
batch_size = config['batch_size']
collate_func = dataset.collate_fn
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
collate_fn=collate_func,
drop_last=config.get('drop_last', False),
shuffle=shuffle,
num_workers=config.get('num_workers', 0),
pin_memory=config.get('pin_memory', False),
)
@torch.no_grad()
def transcribe(self, paths2audio_files: List[str], batch_size: int = 4, logprobs=False) -> List[str]:
"""
Generate class labels for provided audio files. Use this method for debugging and prototyping.
Args:
paths2audio_files: (a list) of paths to audio files. \
Recommended length per file is approximately 1 second.
batch_size: (int) batch size to use during inference. \
Bigger will result in better throughput performance but would use more memory.
logprobs: (bool) pass True to get log probabilities instead of class labels.
Returns:
A list of transcriptions (or raw log probabilities if logprobs is True) in the same order as paths2audio_files
"""
if paths2audio_files is None or len(paths2audio_files) == 0:
return []
# We will store transcriptions here
labels = []
# Model's mode and device
mode = self.training
device = next(self.parameters()).device
dither_value = self.preprocessor.featurizer.dither
pad_to_value = self.preprocessor.featurizer.pad_to
try:
self.preprocessor.featurizer.dither = 0.0
self.preprocessor.featurizer.pad_to = 0
# Switch model to evaluation mode
self.eval()
logging_level = logging.get_verbosity()
logging.set_verbosity(logging.WARNING)
# Work in tmp directory - will store manifest file there
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'manifest.json'), 'w') as fp:
for audio_file in paths2audio_files:
label = 0.0 if self.is_regression_task else self.cfg.labels[0]
entry = {'audio_filepath': audio_file, 'duration': 100000.0, 'label': label}
fp.write(json.dumps(entry) + '\n')
config = {'paths2audio_files': paths2audio_files, 'batch_size': batch_size, 'temp_dir': tmpdir}
temporary_datalayer = self._setup_transcribe_dataloader(config)
for test_batch in temporary_datalayer:
logits = self.forward(
input_signal=test_batch[0].to(device), input_signal_length=test_batch[1].to(device)
)
if logprobs:
# dump log probs per file
for idx in range(logits.shape[0]):
labels.append(logits[idx])
else:
labels_k = []
top_ks = self._accuracy.top_k
for top_k_i in top_ks:
# replace top k value with current top k
self._accuracy.top_k = top_k_i
labels_k_i = self._accuracy.top_k_predicted_labels(logits)
labels_k.append(labels_k_i)
# convenience: if only one top_k, pop out the nested list
if len(top_ks) == 1:
labels_k = labels_k[0]
labels += labels_k
# reset top k to orignal value
self._accuracy.top_k = top_ks
del test_batch
finally:
# set mode back to its original value
self.train(mode=mode)
self.preprocessor.featurizer.dither = dither_value
self.preprocessor.featurizer.pad_to = pad_to_value
logging.set_verbosity(logging_level)
return labels
def _setup_transcribe_dataloader(self, config: Dict) -> 'torch.utils.data.DataLoader':
"""
Setup function for a temporary data loader which wraps the provided audio file.
Args:
config: A python dictionary which contains the following keys:
Returns:
A pytorch DataLoader for the given audio file(s).
"""
dl_config = {
'manifest_filepath': os.path.join(config['temp_dir'], 'manifest.json'),
'sample_rate': self.preprocessor._sample_rate,
'labels': self.cfg.labels,
'batch_size': min(config['batch_size'], len(config['paths2audio_files'])),
'trim_silence': False,
'shuffle': False,
}
temporary_datalayer = self._setup_dataloader_from_config(config=DictConfig(dl_config))
return temporary_datalayer
@abstractmethod
def _update_decoder_config(self, labels, cfg):
pass
class EncDecClassificationModel(_EncDecBaseModel):
"""Encoder decoder Classification models."""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
if cfg.get("is_regression_task", False):
raise ValueError(f"EndDecClassificationModel requires the flag is_regression_task to be set as false")
super().__init__(cfg=cfg, trainer=trainer)
def _setup_preprocessor(self):
return EncDecClassificationModel.from_config_dict(self._cfg.preprocessor)
def _setup_encoder(self):
return EncDecClassificationModel.from_config_dict(self._cfg.encoder)
def _setup_decoder(self):
return EncDecClassificationModel.from_config_dict(self._cfg.decoder)
def _setup_loss(self):
return CrossEntropyLoss()
def _setup_metrics(self):
self._accuracy = TopKClassificationAccuracy(dist_sync_on_step=True)
@classmethod
def list_available_models(cls) -> Optional[List[PretrainedModelInfo]]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
model = PretrainedModelInfo(
pretrained_model_name="MatchboxNet-3x1x64-v1",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/MatchboxNet-3x1x64-v1.nemo",
description="MatchboxNet model trained on Google Speech Commands dataset (v1, 30 classes) which obtains 97.32% accuracy on test set.",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="MatchboxNet-3x2x64-v1",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/MatchboxNet-3x2x64-v1.nemo",
description="MatchboxNet model trained on Google Speech Commands dataset (v1, 30 classes) which obtains 97.68% accuracy on test set.",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="MatchboxNet-3x1x64-v2",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/MatchboxNet-3x1x64-v2.nemo",
description="MatchboxNet model trained on Google Speech Commands dataset (v2, 35 classes) which obtains 97.12% accuracy on test set.",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="MatchboxNet-3x1x64-v2",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/MatchboxNet-3x1x64-v2.nemo",
description="MatchboxNet model trained on Google Speech Commands dataset (v2, 30 classes) which obtains 97.29% accuracy on test set.",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="MatchboxNet-3x1x64-v2-subset-task",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/MatchboxNet-3x1x64-v2-subset-task.nemo",
description="MatchboxNet model trained on Google Speech Commands dataset (v2, 10+2 classes) which obtains 98.2% accuracy on test set.",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="MatchboxNet-3x2x64-v2-subset-task",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/MatchboxNet-3x2x64-v2-subset-task.nemo",
description="MatchboxNet model trained on Google Speech Commands dataset (v2, 10+2 classes) which obtains 98.4% accuracy on test set.",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="MatchboxNet-VAD-3x2",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/MatchboxNet_VAD_3x2.nemo",
description="Voice Activity Detection MatchboxNet model trained on google speech command (v2) and freesound background data, which obtains 0.992 accuracy on testset from same source and 0.852 TPR for FPR=0.315 on testset (ALL) of AVA movie data",
)
result.append(model)
return result
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {"outputs": NeuralType(('B', 'D'), LogitsType())}
# PTL-specific methods
def training_step(self, batch, batch_nb):
self.training_step_end()
audio_signal, audio_signal_len, labels, labels_len = batch
logits = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
loss_value = self.loss(logits=logits, labels=labels)
tensorboard_logs = {
'train_loss': loss_value,
'learning_rate': self._optimizer.param_groups[0]['lr'],
}
self._accuracy(logits=logits, labels=labels)
top_k = self._accuracy.compute()
for i, top_i in enumerate(top_k):
tensorboard_logs[f'training_batch_accuracy_top@{i}'] = top_i
return {'loss': loss_value, 'log': tensorboard_logs}
def validation_step(self, batch, batch_idx, dataloader_idx=0):
audio_signal, audio_signal_len, labels, labels_len = batch
logits = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
loss_value = self.loss(logits=logits, labels=labels)
acc = self._accuracy(logits=logits, labels=labels)
correct_counts, total_counts = self._accuracy.correct_counts_k, self._accuracy.total_counts_k
return {
'val_loss': loss_value,
'val_correct_counts': correct_counts,
'val_total_counts': total_counts,
'val_acc': acc,
}
def test_step(self, batch, batch_idx, dataloader_idx=0):
audio_signal, audio_signal_len, labels, labels_len = batch
logits = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
loss_value = self.loss(logits=logits, labels=labels)
acc = self._accuracy(logits=logits, labels=labels)
correct_counts, total_counts = self._accuracy.correct_counts_k, self._accuracy.total_counts_k
return {
'test_loss': loss_value,
'test_correct_counts': correct_counts,
'test_total_counts': total_counts,
'test_acc': acc,
}
def multi_validation_epoch_end(self, outputs, dataloader_idx: int = 0):
val_loss_mean = torch.stack([x['val_loss'] for x in outputs]).mean()
correct_counts = torch.stack([x['val_correct_counts'] for x in outputs]).sum(axis=0)
total_counts = torch.stack([x['val_total_counts'] for x in outputs]).sum(axis=0)
self._accuracy.correct_counts_k = correct_counts
self._accuracy.total_counts_k = total_counts
topk_scores = self._accuracy.compute()
tensorboard_log = {'val_loss': val_loss_mean}
for top_k, score in zip(self._accuracy.top_k, topk_scores):
tensorboard_log['val_epoch_top@{}'.format(top_k)] = score
return {'log': tensorboard_log}
def multi_test_epoch_end(self, outputs, dataloader_idx: int = 0):
test_loss_mean = torch.stack([x['test_loss'] for x in outputs]).mean()
correct_counts = torch.stack([x['test_correct_counts'].unsqueeze(0) for x in outputs]).sum(axis=0)
total_counts = torch.stack([x['test_total_counts'].unsqueeze(0) for x in outputs]).sum(axis=0)
self._accuracy.correct_counts_k = correct_counts
self._accuracy.total_counts_k = total_counts
topk_scores = self._accuracy.compute()
tensorboard_log = {'test_loss': test_loss_mean}
for top_k, score in zip(self._accuracy.top_k, topk_scores):
tensorboard_log['test_epoch_top@{}'.format(top_k)] = score
return {'log': tensorboard_log}
@typecheck()
def forward(self, input_signal, input_signal_length):
logits = super().forward(input_signal=input_signal, input_signal_length=input_signal_length)
return logits
def change_labels(self, new_labels: List[str]):
"""
Changes labels used by the decoder model. Use this method when fine-tuning on from pre-trained model.
This method changes only decoder and leaves encoder and pre-processing modules unchanged. For example, you would
use it if you want to use pretrained encoder when fine-tuning on a data in another dataset.
If new_labels == self.decoder.vocabulary then nothing will be changed.
Args:
new_labels: list with new labels. Must contain at least 2 elements. Typically, \
this is set of labels for the dataset.
Returns: None
"""
if new_labels is not None and not isinstance(new_labels, ListConfig):
new_labels = ListConfig(new_labels)
if self._cfg.labels == new_labels:
logging.warning(
f"Old labels ({self._cfg.labels}) and new labels ({new_labels}) match. Not changing anything"
)
else:
if new_labels is None or len(new_labels) == 0:
raise ValueError(f'New labels must be non-empty list of labels. But I got: {new_labels}')
# Update config
self._cfg.labels = new_labels
decoder_config = self.decoder.to_config_dict()
new_decoder_config = copy.deepcopy(decoder_config)
self._update_decoder_config(new_labels, new_decoder_config)
del self.decoder
self.decoder = EncDecClassificationModel.from_config_dict(new_decoder_config)
OmegaConf.set_struct(self._cfg.decoder, False)
self._cfg.decoder = new_decoder_config
OmegaConf.set_struct(self._cfg.decoder, True)
if 'train_ds' in self._cfg and self._cfg.train_ds is not None:
self._cfg.train_ds.labels = new_labels
if 'validation_ds' in self._cfg and self._cfg.validation_ds is not None:
self._cfg.validation_ds.labels = new_labels
if 'test_ds' in self._cfg and self._cfg.test_ds is not None:
self._cfg.test_ds.labels = new_labels
logging.info(f"Changed decoder output to {self.decoder.num_classes} labels.")
def _update_decoder_config(self, labels, cfg):
"""
Update the number of classes in the decoder based on labels provided.
Args:
labels: The current labels of the model
cfg: The config of the decoder which will be updated.
"""
OmegaConf.set_struct(cfg, False)
if 'params' in cfg:
cfg.params.num_classes = len(labels)
else:
cfg.num_classes = len(labels)
OmegaConf.set_struct(cfg, True)
class EncDecRegressionModel(_EncDecBaseModel):
"""Encoder decoder class for speech regression models.
Model class creates training, validation methods for setting up data
performing model forward pass.
"""
@classmethod
def list_available_models(cls) -> List[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
return result
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
if not cfg.get('is_regression_task', False):
raise ValueError(f"EndDecRegressionModel requires the flag is_regression_task to be set as true")
super().__init__(cfg=cfg, trainer=trainer)
def _setup_preprocessor(self):
return EncDecRegressionModel.from_config_dict(self._cfg.preprocessor)
def _setup_encoder(self):
return EncDecRegressionModel.from_config_dict(self._cfg.encoder)
def _setup_decoder(self):
return EncDecRegressionModel.from_config_dict(self._cfg.decoder)
def _setup_loss(self):
return MSELoss()
def _setup_metrics(self):
self._mse = MeanSquaredError()
self._mae = MeanAbsoluteError()
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {"preds": NeuralType(tuple('B'), RegressionValuesType())}
@typecheck()
def forward(self, input_signal, input_signal_length):
logits = super().forward(input_signal=input_signal, input_signal_length=input_signal_length)
return logits.view(-1)
# PTL-specific methods
def training_step(self, batch, batch_idx):
self.training_step_end()
audio_signal, audio_signal_len, targets, targets_len = batch
logits = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
loss = self.loss(preds=logits, labels=targets)
train_mse = self._mse(preds=logits, target=targets)
train_mae = self._mae(preds=logits, target=targets)
tensorboard_logs = {
'train_loss': loss,
'train_mse': train_mse,
'train_mae': train_mae,
'learning_rate': self._optimizer.param_groups[0]['lr'],
}
return {'loss': loss, 'log': tensorboard_logs}
def validation_step(self, batch, batch_idx, dataloader_idx: int = 0):
audio_signal, audio_signal_len, targets, targets_len = batch
logits = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
loss_value = self.loss(preds=logits, labels=targets)
val_mse = self._mse(preds=logits, target=targets)
val_mae = self._mae(preds=logits, target=targets)
return {'val_loss': loss_value, 'val_mse': val_mse, 'val_mae': val_mae}
def test_step(self, batch, batch_idx, dataloader_idx: int = 0):
logs = self.validation_step(batch, batch_idx, dataloader_idx)
return {'test_loss': logs['val_loss'], 'test_mse': logs['test_mse'], 'test_mae': logs['val_mae']}
def multi_validation_epoch_end(self, outputs, dataloader_idx: int = 0):
val_loss_mean = torch.stack([x['val_loss'] for x in outputs]).mean()
val_mse = self._mse.compute()
val_mae = self._mae.compute()
tensorboard_logs = {'val_loss': val_loss_mean, 'val_mse': val_mse, 'val_mae': val_mae}
return {'val_loss': val_loss_mean, 'val_mse': val_mse, 'val_mae': val_mae, 'log': tensorboard_logs}
def multi_test_epoch_end(self, outputs, dataloader_idx: int = 0):
test_loss_mean = torch.stack([x['test_loss'] for x in outputs]).mean()
test_mse = self._mse.compute()
test_mae = self._mae.compute()
tensorboard_logs = {'test_loss': test_loss_mean, 'test_mse': test_mse, 'test_mae': test_mae}
return {'test_loss': test_loss_mean, 'test_mse': test_mse, 'test_mae': test_mae, 'log': tensorboard_logs}
@torch.no_grad()
def transcribe(self, paths2audio_files: List[str], batch_size: int = 4) -> List[float]:
"""
Generate class labels for provided audio files. Use this method for debugging and prototyping.
Args:
paths2audio_files: (a list) of paths to audio files. \
Recommended length per file is approximately 1 second.
batch_size: (int) batch size to use during inference. \
Bigger will result in better throughput performance but would use more memory.
Returns:
A list of predictions in the same order as paths2audio_files
"""
predictions = super().transcribe(paths2audio_files, batch_size, logprobs=True)
return [float(pred) for pred in predictions]
def _update_decoder_config(self, labels, cfg):
OmegaConf.set_struct(cfg, False)
if 'params' in cfg:
cfg.params.num_classes = 1
else:
cfg.num_classes = 1
OmegaConf.set_struct(cfg, True)
class MatchboxNet(EncDecClassificationModel):
pass
| [
"noreply@github.com"
] | Tpt.noreply@github.com |
4ff92b2ddacd27174d22952fad5f04a0b925d449 | f41637236b5f8c35a62c475e6b6bfb3026bc171e | /6.ArmstrongNumber.py | e968bdd227f58091ee6e15d68491bd05877660c3 | [] | no_license | rahulmis/ProblemSolvingWithPython | 5ba48984df21b9a0d5aa66bd07554e7e6d9a31ff | 2cffb7c1cc3e2a09a30541cb8a93f8ae4a868227 | refs/heads/master | 2023-01-20T07:47:52.453165 | 2020-11-28T07:25:15 | 2020-11-28T07:25:15 | 258,501,552 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 650 | py | ####### 153 is ArmStrong ?
# 153 = (1**3)+(5**3)+(3**3)
# 153 = 1 + + 125 + 27
# 153 = 153 It Is a Armstrong Number
####### 21 is ArmStrong ?
# 21 = (2**2)+(1**2)
# 21 = 5 It Is Not a Armstrong Number
def ArmStrong(n1):
for i in range(1,n1+1):
n = i
s = str(n)
cc = 0
for i in range(len(s)):
cc += int(s[i])**len(s)
if(n==cc):
print("The NUmber Is a ArmStrong Number {}".format(n))
else:
# print("The NUmber Is NOt a ArmStrong Number {}".format(n))
pass
n1 = int(input("Enter The Number Your Want To Check Armstrong Or Not ?"))
res = ArmStrong(n1) | [
"rahul.pm259.rm29@gmail.com"
] | rahul.pm259.rm29@gmail.com |
f0d12ffbb5b2a2710606cfa66dd46a0043678345 | daca7d8eaf207caf1413a46aae9dc2c86879d29b | /integration_tests/test_rally.py | 0bd7e019f16fef6ba0fb64e71ca487f39721849c | [] | no_license | OddBloke/centipede | aeba863605f310a3a7c5c06e9c0a31428a5d53e7 | 6fe3d73c79e93ada6d09b04029fadd8ae8682fed | refs/heads/master | 2016-09-06T10:05:13.397317 | 2011-12-20T17:25:47 | 2011-12-20T17:25:47 | 3,011,972 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 889 | py | import requests
from nose.tools import assert_true
def test_ui():
# This test assumes the UI is running on http://127.0.0.1:8001 and is
# configured to communicate with a Centipede instance configured with
# Glasses Direct credentials. It also assumes that US26 has not been
# modified since the test was written.
response = requests.get('http://127.0.0.1:8001/tickets/view/US26/')
for string in ['Drop down nav', 'Accepted', "As product manager",
'I need to be able to change']:
assert_true(string in response.content,
'"{0}" not on page.'.format(string))
def test_tasks():
response = requests.get('http://127.0.0.1:8001/tickets/view/TA62/')
for string in ['Document steps', 'Phil O', 'Completed']:
assert_true(string in response.content,
'"{0}" not on page.'.format(string))
| [
"daniel@daniel-watkins.co.uk"
] | daniel@daniel-watkins.co.uk |
eaea0c0d7d5806b6958ff04ebd0d712d4cb036af | abfea5e33f8807154d21ce27540a6de5281715b2 | /other_train/train_num_labels.py | 810954b5e5d0989b7c499de28e1e1bb89b448eb5 | [
"Apache-2.0"
] | permissive | aaxwaz/youtube-8m | c341fbc723f17e720f11bd57c392ae06ef44e0f7 | 3c3ceae83173d6b9eaef6072308a2804ba56bcf5 | refs/heads/master | 2021-09-20T08:36:12.886488 | 2018-08-07T03:15:50 | 2018-08-07T03:15:50 | 143,671,410 | 0 | 0 | Apache-2.0 | 2018-08-06T03:25:03 | 2018-08-06T03:25:03 | null | UTF-8 | Python | false | false | 28,661 | py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Binary for training Tensorflow models on the YouTube-8M dataset."""
import json
import os
import time
import eval_util
import export_model
import losses
import frame_level_models
import video_level_models
import readers
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow import app
from tensorflow import flags
from tensorflow import gfile
from tensorflow import logging
from tensorflow.python.client import device_lib
import utils
FLAGS = flags.FLAGS
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
if __name__ == "__main__":
# Dataset flags.
flags.DEFINE_string("train_dir", "/tmp/yt8m_model/",
"The directory to save the model files in.")
flags.DEFINE_string(
"train_data_pattern", "",
"File glob for the training dataset. If the files refer to Frame Level "
"features (i.e. tensorflow.SequenceExample), then set --reader_type "
"format. The (Sequence)Examples are expected to have 'rgb' byte array "
"sequence feature as well as a 'labels' int64 context feature.")
flags.DEFINE_string("feature_names", "mean_rgb", "Name of the feature "
"to use for training.")
flags.DEFINE_string("feature_sizes", "1024", "Length of the feature vectors.")
# Model flags.
flags.DEFINE_bool(
"frame_features", False,
"If set, then --train_data_pattern must be frame-level features. "
"Otherwise, --train_data_pattern must be aggregated video-level "
"features. The model must also be set appropriately (i.e. to read 3D "
"batches VS 4D batches.")
flags.DEFINE_string(
"model", "LogisticModel",
"Which architecture to use for the model. Models are defined "
"in models.py.")
flags.DEFINE_bool(
"start_new_model", False,
"If set, this will not resume from a checkpoint and will instead create a"
" new model instance.")
# Training flags.
flags.DEFINE_integer("num_gpu", 1,
"The maximum number of GPU devices to use for training. "
"Flag only applies if GPUs are installed")
flags.DEFINE_integer("batch_size", 1024,
"How many examples to process per batch for training.")
flags.DEFINE_string("label_loss", "CrossEntropyLoss",
"Which loss function to use for training the model.")
flags.DEFINE_float(
"regularization_penalty", 1.0,
"How much weight to give to the regularization loss (the label loss has "
"a weight of 1).")
flags.DEFINE_float("base_learning_rate", 0.01,
"Which learning rate to start with.")
flags.DEFINE_float("learning_rate_decay", 0.95,
"Learning rate decay factor to be applied every "
"learning_rate_decay_examples.")
flags.DEFINE_float("learning_rate_decay_examples", 4000000,
"Multiply current learning rate by learning_rate_decay "
"every learning_rate_decay_examples.")
flags.DEFINE_integer("num_epochs", 5,
"How many passes to make over the dataset before "
"halting training.")
flags.DEFINE_integer("max_steps", None,
"The maximum number of iterations of the training loop.")
flags.DEFINE_integer("export_model_steps", 10000000000,
"The period, in number of steps, with which the model "
"is exported for batch prediction.")
flags.DEFINE_float("save_checkpoint_every_n_hour", 0.4,
"Save the checkpoint every n hours.")
flags.DEFINE_integer("validate_every_n_training_steps", 100,
"eval on training for every n steps")
# Other flags.
flags.DEFINE_integer("num_readers", 12,
"How many threads to use for reading input files.")
flags.DEFINE_string("optimizer", "AdamOptimizer",
"What optimizer class to use.")
flags.DEFINE_float("clip_gradient_norm", 1.0, "Norm to clip gradients to.")
flags.DEFINE_bool(
"log_device_placement", False,
"Whether to write the device on which every op will run into the "
"logs on startup.")
def validate_class_name(flag_value, category, modules, expected_superclass):
"""Checks that the given string matches a class of the expected type.
Args:
flag_value: A string naming the class to instantiate.
category: A string used further describe the class in error messages
(e.g. 'model', 'reader', 'loss').
modules: A list of modules to search for the given class.
expected_superclass: A class that the given class should inherit from.
Raises:
FlagsError: If the given class could not be found or if the first class
found with that name doesn't inherit from the expected superclass.
Returns:
True if a class was found that matches the given constraints.
"""
candidates = [getattr(module, flag_value, None) for module in modules]
for candidate in candidates:
if not candidate:
continue
if not issubclass(candidate, expected_superclass):
raise flags.FlagsError("%s '%s' doesn't inherit from %s." %
(category, flag_value,
expected_superclass.__name__))
return True
raise flags.FlagsError("Unable to find %s '%s'." % (category, flag_value))
def get_input_data_tensors(reader,
data_pattern,
batch_size=1000,
num_epochs=None,
num_readers=1):
"""Creates the section of the graph which reads the training data.
Args:
reader: A class which parses the training data.
data_pattern: A 'glob' style path to the data files.
batch_size: How many examples to process at a time.
num_epochs: How many passes to make over the training data. Set to 'None'
to run indefinitely.
num_readers: How many I/O threads to use.
Returns:
A tuple containing the features tensor, labels tensor, and optionally a
tensor containing the number of frames per video. The exact dimensions
depend on the reader being used.
Raises:
IOError: If no files matching the given pattern were found.
"""
logging.info("Using batch size of " + str(batch_size) + " for training.")
with tf.name_scope("train_input"):
files = gfile.Glob(data_pattern)
if not files:
raise IOError("Unable to find training files. data_pattern='" +
data_pattern + "'.")
logging.info("Number of training files: %s.", str(len(files)))
filename_queue = tf.train.string_input_producer(
files, num_epochs=num_epochs, shuffle=True)
training_data = [
reader.prepare_reader(filename_queue) for _ in range(num_readers)
]
return tf.train.shuffle_batch_join(
training_data,
batch_size=batch_size,
capacity=batch_size * 5,
min_after_dequeue=batch_size,
allow_smaller_final_batch=True,
enqueue_many=True)
def find_class_by_name(name, modules):
"""Searches the provided modules for the named class and returns it."""
modules = [getattr(module, name, None) for module in modules]
return next(a for a in modules if a)
def build_graph(reader,
model,
train_data_pattern,
label_loss_fn=losses.CrossEntropyLossNumLabels(),
batch_size=1000,
base_learning_rate=0.01,
learning_rate_decay_examples=1000000,
learning_rate_decay=0.95,
optimizer_class=tf.train.AdamOptimizer,
clip_gradient_norm=1.0,
regularization_penalty=1,
num_readers=1,
num_epochs=None):
"""Creates the Tensorflow graph.
This will only be called once in the life of
a training model, because after the graph is created the model will be
restored from a meta graph file rather than being recreated.
Args:
reader: The data file reader. It should inherit from BaseReader.
model: The core model (e.g. logistic or neural net). It should inherit
from BaseModel.
train_data_pattern: glob path to the training data files.
label_loss_fn: What kind of loss to apply to the model. It should inherit
from BaseLoss.
batch_size: How many examples to process at a time.
base_learning_rate: What learning rate to initialize the optimizer with.
optimizer_class: Which optimization algorithm to use.
clip_gradient_norm: Magnitude of the gradient to clip to.
regularization_penalty: How much weight to give the regularization loss
compared to the label loss.
num_readers: How many threads to use for I/O operations.
num_epochs: How many passes to make over the data. 'None' means an
unlimited number of passes.
"""
global_step = tf.Variable(0, trainable=False, name="global_step")
local_device_protos = device_lib.list_local_devices()
gpus = [x.name for x in local_device_protos if x.device_type == 'GPU']
gpus = gpus[:FLAGS.num_gpu]
num_gpus = len(gpus)
if num_gpus > 0:
logging.info("Using the following GPUs to train: " + str(gpus))
num_towers = num_gpus
device_string = '/gpu:%d'
else:
logging.info("No GPUs found. Training on CPU.")
num_towers = 1
device_string = '/cpu:%d'
learning_rate = tf.train.exponential_decay(
base_learning_rate,
global_step * batch_size * num_towers,
learning_rate_decay_examples,
learning_rate_decay,
staircase=True)
tf.summary.scalar('learning_rate', learning_rate)
optimizer = optimizer_class(learning_rate)
unused_video_id, model_input_raw, labels_batch, num_frames = (
get_input_data_tensors(
reader,
train_data_pattern,
batch_size=batch_size * num_towers,
num_readers=num_readers,
num_epochs=num_epochs))
tf.summary.histogram("model/input_raw", model_input_raw)
feature_dim = len(model_input_raw.get_shape()) - 1
model_input = tf.nn.l2_normalize(model_input_raw, feature_dim)
tower_inputs = tf.split(model_input, num_towers)
tower_labels = tf.split(labels_batch, num_towers)
tower_num_frames = tf.split(num_frames, num_towers)
tower_gradients = []
tower_predictions = []
tower_label_losses = []
tower_reg_losses = []
tower_num_label_losses = []
for i in range(num_towers):
# For some reason these 'with' statements can't be combined onto the same
# line. They have to be nested.
with tf.device(device_string % i):
with (tf.variable_scope(("tower"), reuse=True if i > 0 else None)):
with (slim.arg_scope([slim.model_variable, slim.variable], device="/cpu:0" if num_gpus!=1 else "/gpu:0")):
result = model.create_model(
tower_inputs[i],
num_frames=tower_num_frames[i],
vocab_size=reader.num_classes,
labels=tower_labels[i])
for variable in slim.get_model_variables():
tf.summary.histogram(variable.op.name, variable)
predictions = result["predictions"]
#predictions_num_labels = result["predictions_num_labels"]
tower_predictions.append(predictions)
if "loss" in result.keys():
label_loss = result["loss"]
else:
label_loss = label_loss_fn.calculate_loss(predictions, tower_labels[i])
print("\n\n\n", label_loss)
if "regularization_loss" in result.keys():
reg_loss = result["regularization_loss"]
else:
reg_loss = tf.constant(0.0)
#num_label_target = tf.reduce_sum(tf.cast(tower_labels[i], tf.float32), 1, keepdims=True)
#num_label_target = tf.log1p(num_label_target)
#num_label_loss = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(num_label_target, predictions_num_labels))))
#tower_num_label_losses.append(num_label_loss)
reg_losses = tf.losses.get_regularization_losses()
if reg_losses:
reg_loss += tf.add_n(reg_losses)
tower_reg_losses.append(reg_loss)
# Adds update_ops (e.g., moving average updates in batch normalization) as
# a dependency to the train_op.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if "update_ops" in result.keys():
update_ops += result["update_ops"]
if update_ops:
with tf.control_dependencies(update_ops):
barrier = tf.no_op(name="gradient_barrier")
with tf.control_dependencies([barrier]):
label_loss = tf.identity(label_loss)
tower_label_losses.append(label_loss)
# Incorporate the L2 weight penalties etc.
final_loss = regularization_penalty * reg_loss + label_loss
gradients = optimizer.compute_gradients(final_loss,
colocate_gradients_with_ops=False)
tower_gradients.append(gradients)
label_loss = tf.reduce_mean(tf.stack(tower_label_losses))
tf.summary.scalar("label_loss", label_loss)
if regularization_penalty != 0:
reg_loss = tf.reduce_mean(tf.stack(tower_reg_losses))
tf.summary.scalar("reg_loss", reg_loss)
#num_label_loss = tf.reduce_mean(tf.stack(tower_num_label_losses))
#tf.summary.scalar("num_label_loss", num_label_loss)
merged_gradients = utils.combine_gradients(tower_gradients)
if clip_gradient_norm > 0:
with tf.name_scope('clip_grads'):
merged_gradients = utils.clip_gradient_norms(merged_gradients, clip_gradient_norm)
train_op = optimizer.apply_gradients(merged_gradients, global_step=global_step)
tf.add_to_collection("global_step", global_step)
tf.add_to_collection("loss", label_loss)
tf.add_to_collection("predictions", tf.concat(tower_predictions, 0))
tf.add_to_collection("input_batch_raw", model_input_raw)
tf.add_to_collection("input_batch", model_input)
tf.add_to_collection("num_frames", num_frames)
tf.add_to_collection("labels", tf.cast(labels_batch, tf.float32))
tf.add_to_collection("train_op", train_op)
class Trainer(object):
"""A Trainer to train a Tensorflow graph."""
def __init__(self, cluster, task, train_dir, model, reader, model_exporter,
log_device_placement=True, max_steps=None,
export_model_steps=1000):
""""Creates a Trainer.
Args:
cluster: A tf.train.ClusterSpec if the execution is distributed.
None otherwise.
task: A TaskSpec describing the job type and the task index.
"""
self.cluster = cluster
self.task = task
self.is_master = (task.type == "master" and task.index == 0)
self.train_dir = train_dir
self.config = tf.ConfigProto(
allow_soft_placement=True,log_device_placement=log_device_placement)
self.model = model
self.reader = reader
self.model_exporter = model_exporter
self.max_steps = max_steps
self.max_steps_reached = False
self.export_model_steps = export_model_steps
self.last_model_export_step = 0
# if self.is_master and self.task.index > 0:
# raise StandardError("%s: Only one replica of master expected",
# task_as_string(self.task))
def run(self, start_new_model=False):
"""Performs training on the currently defined Tensorflow graph.
Returns:
A tuple of the training Hit@1 and the training PERR.
"""
if self.is_master and start_new_model:
self.remove_training_directory(self.train_dir)
if not os.path.exists(self.train_dir):
os.makedirs(self.train_dir)
model_flags_dict = {
"model": FLAGS.model,
"feature_sizes": FLAGS.feature_sizes,
"feature_names": FLAGS.feature_names,
"frame_features": FLAGS.frame_features,
"label_loss": FLAGS.label_loss,
}
flags_json_path = os.path.join(FLAGS.train_dir, "model_flags.json")
if os.path.exists(flags_json_path):
existing_flags = json.load(open(flags_json_path))
if existing_flags != model_flags_dict:
logging.error("Model flags do not match existing file %s. Please "
"delete the file, change --train_dir, or pass flag "
"--start_new_model",
flags_json_path)
logging.error("Ran model with flags: %s", str(model_flags_dict))
logging.error("Previously ran with flags: %s", str(existing_flags))
exit(1)
else:
# Write the file.
with open(flags_json_path, "w") as fout:
fout.write(json.dumps(model_flags_dict))
target, device_fn = self.start_server_if_distributed()
meta_filename = self.get_meta_filename(start_new_model, self.train_dir)
with tf.Graph().as_default() as graph:
if meta_filename:
saver = self.recover_model(meta_filename)
with tf.device(device_fn):
if not meta_filename:
saver = self.build_model(self.model, self.reader)
global_step = tf.get_collection("global_step")[0]
loss = tf.get_collection("loss")[0]
predictions = tf.get_collection("predictions")[0]
labels = tf.get_collection("labels")[0]
train_op = tf.get_collection("train_op")[0]
init_op = tf.global_variables_initializer()
sv = tf.train.Supervisor(
graph,
logdir=self.train_dir,
init_op=init_op,
is_chief=self.is_master,
global_step=global_step,
#save_model_secs=15 * 60,
save_model_secs=int(FLAGS.save_checkpoint_every_n_hour * 3600),
#save_summaries_secs=120,
save_summaries_secs=int(FLAGS.save_checkpoint_every_n_hour * 3600),
saver=saver)
logging.info("%s: Starting managed session.", task_as_string(self.task))
with sv.managed_session(target, config=self.config) as sess:
try:
logging.info("%s: Entering training loop.", task_as_string(self.task))
while (not sv.should_stop()) and (not self.max_steps_reached):
batch_start_time = time.time()
_, global_step_val, loss_val, predictions_val, labels_val = sess.run(
[train_op, global_step, loss, predictions, labels])
seconds_per_batch = time.time() - batch_start_time
examples_per_second = labels_val.shape[0] / seconds_per_batch
if self.max_steps and self.max_steps <= global_step_val:
self.max_steps_reached = True
#if self.is_master and global_step_val % 10 == 0 and self.train_dir:
if self.is_master and global_step_val % FLAGS.validate_every_n_training_steps == 0 and self.train_dir:
eval_start_time = time.time()
hit_at_one = eval_util.calculate_hit_at_one(predictions_val, labels_val)
perr = eval_util.calculate_precision_at_equal_recall_rate(predictions_val,
labels_val)
gap = eval_util.calculate_gap(predictions_val, labels_val)
eval_end_time = time.time()
eval_time = eval_end_time - eval_start_time
logging.info("training step " + str(global_step_val) + " | Loss: " + ("%.2f" % loss_val) +
" Examples/sec: " + ("%.2f" % examples_per_second) + " | Hit@1: " +
("%.2f" % hit_at_one) + " PERR: " + ("%.2f" % perr) +
" GAP: " + ("%.2f" % gap))
sv.summary_writer.add_summary(
utils.MakeSummary("model/Training_Hit@1", hit_at_one),
global_step_val)
sv.summary_writer.add_summary(
utils.MakeSummary("model/Training_Perr", perr), global_step_val)
sv.summary_writer.add_summary(
utils.MakeSummary("model/Training_GAP", gap), global_step_val)
sv.summary_writer.add_summary(
utils.MakeSummary("global_step/Examples/Second",
examples_per_second), global_step_val)
sv.summary_writer.flush()
with open(FLAGS.train_dir + '/global_step_{%d}_training_GAP_{%.6f}.txt' % (global_step_val, gap), 'w') as f:
f.write('\n')
# Exporting the model every x steps
time_to_export = ((self.last_model_export_step == 0) or
(global_step_val - self.last_model_export_step
>= self.export_model_steps))
if self.is_master and time_to_export:
self.export_model(global_step_val, sv.saver, sv.save_path, sess)
self.last_model_export_step = global_step_val
else:
#logging.info("training step " + str(global_step_val) + " | Loss: " +
#("%.2f" % loss_val) + " Examples/sec: " + ("%.2f" % examples_per_second))
continue
except tf.errors.OutOfRangeError:
logging.info("%s: Done training -- epoch limit reached.",
task_as_string(self.task))
logging.info("%s: Exited training loop.", task_as_string(self.task))
sv.Stop()
def export_model(self, global_step_val, saver, save_path, session):
# If the model has already been exported at this step, return.
if global_step_val == self.last_model_export_step:
return
last_checkpoint = saver.save(session, save_path, global_step_val)
model_dir = "{0}/export/step_{1}".format(self.train_dir, global_step_val)
logging.info("%s: Exporting the model at step %s to %s.",
task_as_string(self.task), global_step_val, model_dir)
self.model_exporter.export_model(
model_dir=model_dir,
global_step_val=global_step_val,
last_checkpoint=last_checkpoint)
def start_server_if_distributed(self):
"""Starts a server if the execution is distributed."""
if self.cluster:
logging.info("%s: Starting trainer within cluster %s.",
task_as_string(self.task), self.cluster.as_dict())
server = start_server(self.cluster, self.task)
target = server.target
device_fn = tf.train.replica_device_setter(
ps_device="/job:ps",
worker_device="/job:%s/task:%d" % (self.task.type, self.task.index),
cluster=self.cluster)
else:
target = ""
device_fn = ""
return (target, device_fn)
def remove_training_directory(self, train_dir):
"""Removes the training directory."""
try:
logging.info(
"%s: Removing existing train directory.",
task_as_string(self.task))
gfile.DeleteRecursively(train_dir)
except:
logging.error(
"%s: Failed to delete directory " + train_dir +
" when starting a new model. Please delete it manually and" +
" try again.", task_as_string(self.task))
def get_meta_filename(self, start_new_model, train_dir):
if start_new_model:
logging.info("%s: Flag 'start_new_model' is set. Building a new model.",
task_as_string(self.task))
return None
latest_checkpoint = tf.train.latest_checkpoint(train_dir)
if not latest_checkpoint:
logging.info("%s: No checkpoint file found. Building a new model.",
task_as_string(self.task))
return None
meta_filename = latest_checkpoint + ".meta"
if not gfile.Exists(meta_filename):
logging.info("%s: No meta graph file found. Building a new model.",
task_as_string(self.task))
return None
else:
return meta_filename
def recover_model(self, meta_filename):
logging.info("%s: Restoring from meta graph file %s",
task_as_string(self.task), meta_filename)
return tf.train.import_meta_graph(meta_filename)
def build_model(self, model, reader):
"""Find the model and build the graph."""
label_loss_fn = find_class_by_name(FLAGS.label_loss, [losses])()
optimizer_class = find_class_by_name(FLAGS.optimizer, [tf.train])
build_graph(reader=reader,
model=model,
optimizer_class=optimizer_class,
clip_gradient_norm=FLAGS.clip_gradient_norm,
train_data_pattern=FLAGS.train_data_pattern,
label_loss_fn=label_loss_fn,
base_learning_rate=FLAGS.base_learning_rate,
learning_rate_decay=FLAGS.learning_rate_decay,
learning_rate_decay_examples=FLAGS.learning_rate_decay_examples,
regularization_penalty=FLAGS.regularization_penalty,
num_readers=FLAGS.num_readers,
batch_size=FLAGS.batch_size,
num_epochs=FLAGS.num_epochs)
return tf.train.Saver(max_to_keep=0, keep_checkpoint_every_n_hours=FLAGS.save_checkpoint_every_n_hour)
def get_reader():
# Convert feature_names and feature_sizes to lists of values.
feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
FLAGS.feature_names, FLAGS.feature_sizes)
if FLAGS.frame_features:
reader = readers.YT8MFrameFeatureReader(
feature_names=feature_names, feature_sizes=feature_sizes)
else:
reader = readers.YT8MAggregatedFeatureReader(
feature_names=feature_names, feature_sizes=feature_sizes)
return reader
class ParameterServer(object):
"""A parameter server to serve variables in a distributed execution."""
def __init__(self, cluster, task):
"""Creates a ParameterServer.
Args:
cluster: A tf.train.ClusterSpec if the execution is distributed.
None otherwise.
task: A TaskSpec describing the job type and the task index.
"""
self.cluster = cluster
self.task = task
def run(self):
"""Starts the parameter server."""
logging.info("%s: Starting parameter server within cluster %s.",
task_as_string(self.task), self.cluster.as_dict())
server = start_server(self.cluster, self.task)
server.join()
def start_server(cluster, task):
"""Creates a Server.
Args:
cluster: A tf.train.ClusterSpec if the execution is distributed.
None otherwise.
task: A TaskSpec describing the job type and the task index.
"""
if not task.type:
raise ValueError("%s: The task type must be specified." %
task_as_string(task))
if task.index is None:
raise ValueError("%s: The task index must be specified." %
task_as_string(task))
# Create and start a server.
return tf.train.Server(
tf.train.ClusterSpec(cluster),
protocol="grpc",
job_name=task.type,
task_index=task.index)
def task_as_string(task):
return "/job:%s/task:%s" % (task.type, task.index)
def main(unused_argv):
# Load the environment.
env = json.loads(os.environ.get("TF_CONFIG", "{}"))
# Load the cluster data from the environment.
cluster_data = env.get("cluster", None)
cluster = tf.train.ClusterSpec(cluster_data) if cluster_data else None
# Load the task data from the environment.
task_data = env.get("task", None) or {"type": "master", "index": 0}
task = type("TaskSpec", (object,), task_data)
# Logging the version.
logging.set_verbosity(tf.logging.INFO)
logging.info("%s: Tensorflow version: %s.",
task_as_string(task), tf.__version__)
# Dispatch to a master, a worker, or a parameter server.
if not cluster or task.type == "master" or task.type == "worker":
model = find_class_by_name(FLAGS.model,
[frame_level_models, video_level_models])()
reader = get_reader()
model_exporter = export_model.ModelExporter(
frame_features=FLAGS.frame_features,
model=model,
reader=reader)
Trainer(cluster, task, FLAGS.train_dir, model, reader, model_exporter,
FLAGS.log_device_placement, FLAGS.max_steps,
FLAGS.export_model_steps).run(start_new_model=FLAGS.start_new_model)
elif task.type == "ps":
ParameterServer(cluster, task).run()
else:
raise ValueError("%s: Invalid task_type: %s." %
(task_as_string(task), task.type))
if __name__ == "__main__":
app.run()
| [
"weimin@bb128-106-17-62.singnet.com.sg"
] | weimin@bb128-106-17-62.singnet.com.sg |
53b86d32c30e6b4c77462a29c5f4aa14ed4d539f | 341c2d65456f078d71a36bf884ed7e9447ee58ce | /AttProj/AttProj/settings.py | cf10280ce84c281071f9c681713d2ac8aae0295d | [] | no_license | HammurabiCode/ZYAttention | 0f933df13a47ff1ed47e62c3bd92cf11fa99376c | 0c49df94433d222df73c4774d16f6d4790a7046b | refs/heads/master | 2021-09-07T22:20:18.062566 | 2018-03-02T02:19:46 | 2018-03-02T02:19:46 | 105,235,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,266 | py | """
Django settings for AttProj project.
Generated by 'django-admin startproject' using Django 1.11.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vq@flfwa=@6-%i@)_mvb!q@85g^7)eo&=y_!ip@e%&74$geelj'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '10.0.2.10', '106.14.196.64']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'AttApp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'AttProj.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['AttApp/template'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'AttProj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
# TIME_ZONE = 'UTC'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
] | [
"ranab@qq.com"
] | ranab@qq.com |
ffd865cba0f03999b1660056776ba841aa79f26e | 835f01535a33d2d7ea7450bb9a93be79b6f02adf | /Tout/MPSI1/Informatique/tp 17.06.py | fe914fb4b31e8c403e378c2755e940e2419441d7 | [] | no_license | OnlyAtN1ght/Info-2K19 | 8d338254186846d86fa1ba5aad41ec1bd5b78f4e | 19634fa43d14c0fdf08642f0ac620bdbcdb5a6cb | refs/heads/master | 2020-04-15T06:41:47.249921 | 2019-06-24T17:06:32 | 2019-06-24T17:06:32 | 164,469,931 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | import matplotlib.pyplot as plt
import numpy as np
import math
def dichotomie(f,a,b,e,borne=False):
while b-a>e:
c=(a+b)/2
if f(a)*f(b)<=0:
b=c
else:
a=c
if(borne):
print(a,b)
return a
f=lambda x:x**3-3*x+2
g=lambda x:np.exp(x)-1-x**2
h=lambda x:np.exp(x)-x-2*x
def Trace_f(f,a,b):
X=np.linspace(a,b,200)
Y=[]
for x in X:
Y.append(f(x))
plt.plot(X,Y,linewidth=2,color='r')
| [
"simon.provot12@gmail.com"
] | simon.provot12@gmail.com |
c128a8fa36affaec3993dafefcfcb0b3c4b39c6c | f786209eec7afd96da1e6d17ab70a2a1a5600060 | /dependence/cocos2d-x-3.0beta2/template/multi-platform-js/proj.android/build_native.py | 561a0e89811478d9801290c1ea9b3c225b1957b9 | [] | no_license | pcjbird/MUIEditor | 47a1125c1d43301ff40b061af74b47b2a0114c3c | 27faa531fb6b952a7139229e4da3b109a42733d2 | refs/heads/master | 2021-01-20T13:47:38.343165 | 2014-07-09T05:21:21 | 2014-07-09T05:21:21 | 17,281,030 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,563 | py | #!/usr/bin/python
# build_native.py
# Build native codes
import sys
import os, os.path
import shutil
from optparse import OptionParser
def get_num_of_cpu():
''' The build process can be accelerated by running multiple concurrent job processes using the -j-option.
'''
try:
platform = sys.platform
if platform == 'win32':
if 'NUMBER_OF_PROCESSORS' in os.environ:
return int(os.environ['NUMBER_OF_PROCESSORS'])
else:
return 1
else:
from numpy.distutils import cpuinfo
return cpuinfo.cpu._getNCPUs()
except Exception:
print "Can't know cpuinfo, use default 1 cpu"
return 1
def check_environment_variables_sdk():
''' Checking the environment ANDROID_SDK_ROOT, which will be used for building
'''
try:
SDK_ROOT = os.environ['ANDROID_SDK_ROOT']
except Exception:
print "ANDROID_SDK_ROOT not defined. Please define ANDROID_SDK_ROOT in your environment"
sys.exit(1)
return SDK_ROOT
def check_environment_variables():
''' Checking the environment NDK_ROOT, which will be used for building
'''
try:
NDK_ROOT = os.environ['NDK_ROOT']
except Exception:
print "NDK_ROOT not defined. Please define NDK_ROOT in your environment"
sys.exit(1)
return NDK_ROOT
def select_toolchain_version():
'''Because ndk-r8e uses gcc4.6 as default. gcc4.6 doesn't support c++11. So we should select gcc4.7 when
using ndk-r8e. But gcc4.7 is removed in ndk-r9, so we should determine whether gcc4.7 exist.
Conclution:
ndk-r8e -> use gcc4.7
ndk-r9 -> use gcc4.8
'''
ndk_root = check_environment_variables()
if os.path.isdir(os.path.join(ndk_root,"toolchains/arm-linux-androideabi-4.8")):
os.environ['NDK_TOOLCHAIN_VERSION'] = '4.8'
print "The Selected NDK toolchain version was 4.8 !"
elif os.path.isdir(os.path.join(ndk_root,"toolchains/arm-linux-androideabi-4.7")):
os.environ['NDK_TOOLCHAIN_VERSION'] = '4.7'
print "The Selected NDK toolchain version was 4.7 !"
else:
print "Couldn't find the gcc toolchain."
exit(1)
def do_build(cocos_root, ndk_root, app_android_root,ndk_build_param,sdk_root,android_platform,build_mode):
ndk_path = os.path.join(ndk_root, "ndk-build")
# windows should use ";" to seperate module paths
platform = sys.platform
if platform == 'win32':
ndk_module_path = 'NDK_MODULE_PATH=%s;%s/external;%s/cocos' % (cocos_root, cocos_root, cocos_root)
else:
ndk_module_path = 'NDK_MODULE_PATH=%s:%s/external:%s/cocos' % (cocos_root, cocos_root, cocos_root)
num_of_cpu = get_num_of_cpu()
if ndk_build_param == None:
command = '%s -j%d -C %s %s' % (ndk_path, num_of_cpu, app_android_root, ndk_module_path)
else:
command = '%s -j%d -C %s %s %s' % (ndk_path, num_of_cpu, app_android_root, ''.join(str(e) for e in ndk_build_param), ndk_module_path)
if os.system(command) != 0:
raise Exception("Build dynamic library for project [ " + app_android_root + " ] fails!")
elif android_platform is not None:
sdk_tool_path = os.path.join(sdk_root, "tools/android")
cocoslib_path = os.path.join(cocos_root, "cocos/2d/platform/android/java")
command = '%s update lib-project -t %s -p %s' % (sdk_tool_path,android_platform,cocoslib_path)
if os.system(command) != 0:
raise Exception("update cocos lib-project [ " + cocoslib_path + " ] fails!")
command = '%s update project -t %s -p %s -s' % (sdk_tool_path,android_platform,app_android_root)
if os.system(command) != 0:
raise Exception("update project [ " + app_android_root + " ] fails!")
buildfile_path = os.path.join(app_android_root, "build.xml")
command = 'ant clean %s -f %s -Dsdk.dir=%s' % (build_mode,buildfile_path,sdk_root)
os.system(command)
def copy_files(src, dst):
for item in os.listdir(src):
path = os.path.join(src, item)
# Android can not package the file that ends with ".gz"
if not item.startswith('.') and not item.endswith('.gz') and os.path.isfile(path):
shutil.copy(path, dst)
if os.path.isdir(path):
new_dst = os.path.join(dst, item)
os.mkdir(new_dst)
copy_files(path, new_dst)
def copy_resources(app_android_root):
# remove app_android_root/assets if it exists
assets_dir = os.path.join(app_android_root, "assets")
if os.path.isdir(assets_dir):
shutil.rmtree(assets_dir)
# copy resources
os.mkdir(assets_dir)
resources_dir = os.path.join(app_android_root, "../Resources")
if os.path.isdir(resources_dir):
copy_files(resources_dir, assets_dir)
# jsb project should copy javascript files and resources(shared with cocos2d-html5)
resources_dir = os.path.join(app_android_root, "../cocos2d/cocos/scripting/javascript/script")
copy_files(resources_dir, assets_dir)
def build(ndk_build_param,android_platform,build_mode):
ndk_root = check_environment_variables()
sdk_root = None
select_toolchain_version()
current_dir = os.path.dirname(os.path.realpath(__file__))
cocos_root = os.path.join(current_dir, "../cocos2d")
app_android_root = current_dir
copy_resources(app_android_root)
if android_platform is not None:
sdk_root = check_environment_variables_sdk()
if android_platform.isdigit():
android_platform = 'android-'+android_platform
else:
print 'please use vaild android platform'
exit(1)
if build_mode is None:
build_mode = 'debug'
elif build_mode != 'release':
build_mode = 'debug'
do_build(cocos_root, ndk_root, app_android_root,ndk_build_param,sdk_root,android_platform,build_mode)
# -------------- main --------------
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-n", "--ndk", dest="ndk_build_param", help='parameter for ndk-build')
parser.add_option("-p", "--platform", dest="android_platform",
help='parameter for android-update.Without the parameter,the script just build dynamic library for project. Valid android-platform are:[10|11|12|13|14|15|16|17|18|19]')
parser.add_option("-b", "--build", dest="build_mode",
help='the build mode for java project,debug[default] or release.Get more information,please refer to http://developer.android.com/tools/building/building-cmdline.html')
(opts, args) = parser.parse_args()
build(opts.ndk_build_param,opts.android_platform,opts.build_mode)
| [
"pcjbird@hotmail.com"
] | pcjbird@hotmail.com |
d600955f4d15ef0c2f02da65a5b111866a9f6a6d | 5f27b1540e2d8f0dfe45b036d8a2381dc94a614b | /src/comments_post.py | 6443f7737ecdfae688327f91cec5bcc6878f0e5e | [] | no_license | benji6/serverless-demo | f6d2beea2f5abc330d722b06b16e9dd5710a83ab | 76145423e59585f75cc64706c6827e762629b41e | refs/heads/master | 2020-05-05T10:11:40.339074 | 2019-04-07T08:49:33 | 2019-04-08T16:13:38 | 179,934,261 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | import boto3
from boto3.dynamodb.conditions import Key
from datetime import datetime
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('comments_demo')
def handler(event, context):
item = {
'body': event['body'],
'dateCreated': datetime.now().isoformat(),
'userId': 'test-user',
}
table.put_item(Item=item)
return item
| [
"benji2357@gmail.com"
] | benji2357@gmail.com |
276c5eac0755d7e57d5eb0995fc692a9bc0b349e | f38bb187847005fc121bec01fe10042358004a87 | /introduction/firstprgoram.py | afffd90e68926b02d9b75ef0e157882d27a59167 | [] | no_license | loyolastalin/Python_Training_2020 | 17aaee735711ef3b4673479fb06f34fbf8bdba0b | dcee3c17615ff567b57afb67abbb45d63d72812a | refs/heads/master | 2023-08-14T23:44:43.897149 | 2021-10-05T08:20:28 | 2021-10-05T08:20:28 | 278,421,132 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | '''
Write a prgram to get two inputs from the use
and perfom addition operation and show the result
'''
# comments
" asdfsdf"
'''
'''
# Use input34
a = int(input("Enter the first number"))
b = int(input("Enter the second number"))
# processing
c = a + b
# Writting/ printing
print("The sum is " + c )
| [
"loyolastalin@gmail.com"
] | loyolastalin@gmail.com |
24fe9249b9c948114976eb03ee082aeb308f3db5 | 0907738cae581720a27b17d7a8f26dca22d777d1 | /iso/iso_ellipse.py | 85371eeeb7f538e1f8760dcc6a2c049552776b54 | [] | no_license | jyshangguan/hst_images | a9e9174214129fc986044532f32fd9e56b59f90a | 09e696eed949b41ef5c3581405b8f344c5cc1361 | refs/heads/master | 2021-05-28T11:41:18.613103 | 2015-02-14T08:11:43 | 2015-02-14T08:11:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,698 | py | #This script is to run the stsdas.analyse.isophote.ellipse.
#Obtain the isophotes for galaxy with other sources in the image masked.
import os
import sys
import time
from pyraf import iraf
#This function is to read the input parameter file.
def ReadParList(file_name):
f = open(file_name)
lines = f.readlines()
f.close()
par_dic = {}
for line in lines:
p = line.split()
if not p[0].startswith('#'):
par_dic.update({p[0]:p[2]})
return par_dic
inPtPar = sys.argv[1] #Obtain parameters for the script from the command line.
inPar = ReadParList(inPtPar)
srcName = inPar['srcName']
imgFile = inPar['imgFile']
mskFile = inPar['mskFile']
xcntr = eval(inPar['xcntr'])
ycntr = eval(inPar['ycntr'])
tabFile = srcName+'_sbp.tab'
datFile = srcName+'_dat.txt'
iraf.imcopy(mskFile, imgFile+'.pl')
print mskFile+' is copied into .pl'
#Call the STSDAS.ANALYSIS.ISOPHOTE package
iraf.stsdas()
iraf.analysis()
iraf.isophote()
#Unlearn all the parameters for fitting
iraf.unlearn(iraf.ellipse.geompar)
iraf.unlearn(iraf.ellipse.controlpar)
iraf.unlearn(iraf.ellipse)
#Provide the first guess of the center
iraf.ellipse.geompar.x0 = xcntr
iraf.ellipse.geompar.y0 = ycntr
iraf.ellipse.maxsma = 600
print 'maxsma:', iraf.ellipse.maxsma
#Use interactive mode, otherwise we cannot obtain the isophote extend to sky.
iraf.ellipse.interactive = 'no'
#os.system('ds9 &')
#time.sleep(5)
#Clean the output file
if os.path.exists(tabFile):
os.remove(tabFile)
#Start fitting
iraf.ellipse(input=imgFile, output=tabFile)
print tabFile+' is generated!'
#Dump the table into ASCII .txt data
iraf.tdump(table=tabFile, datafile=datFile)
print datFile+' is generated!'
| [
"shangguan@pku.edu.cn"
] | shangguan@pku.edu.cn |
07ee5ca8244bc40fdcfdffc0e184e8d66225d837 | 91d13f45f8527c368ebc6e44c75142a043f0583b | /test_zappa_cookiecutter/users/tests/test_drf_urls.py | 5c5a28e94cb0566c442fdcd429e5dbf1a914a39c | [
"MIT"
] | permissive | Andrew-Chen-Wang/cookiecutter-django-lambda | 6beed03d82eeecf95281c7f03a279c9c8b2ca85c | c4c64e174f653205c399ffa683918141f2f058d7 | refs/heads/master | 2022-11-16T12:20:00.589856 | 2020-07-19T20:19:41 | 2020-07-19T20:19:41 | 280,943,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 676 | py | import pytest
from django.urls import resolve, reverse
from test_zappa_cookiecutter.users.models import User
pytestmark = pytest.mark.django_db
def test_user_detail(user: User):
assert (
reverse("api:user-detail", kwargs={"username": user.username})
== f"/api/users/{user.username}/"
)
assert resolve(f"/api/users/{user.username}/").view_name == "api:user-detail"
def test_user_list():
assert reverse("api:user-list") == "/api/users/"
assert resolve("/api/users/").view_name == "api:user-list"
def test_user_me():
assert reverse("api:user-me") == "/api/users/me/"
assert resolve("/api/users/me/").view_name == "api:user-me"
| [
"acwangpython@gmail.com"
] | acwangpython@gmail.com |
2bde1ba2abecbf9bf4d93ea2ea5458d64f41f428 | df9f39af208d6928f29705cb81ab160ec04f1f2e | /intentClassifyer.py | b12d58c54f6bcb169369bf628a5fbc0f2ff24933 | [] | no_license | aust1nsoz/Intro_Intent_Classification | 5e0fbc4fa377dfea4a7534eaf68e2f0220439169 | 3834c2b8bff9e8a3d7fd8671b9dc3183606eb5e8 | refs/heads/master | 2020-07-16T08:31:03.541170 | 2019-09-25T23:38:53 | 2019-09-25T23:38:53 | 205,754,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,624 | py | # -*- coding: utf-8 -*-
#Import Needed things
import numpy as np
import pandas as pd
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem.lancaster import LancasterStemmer
import nltk
import re
from sklearn.preprocessing import OneHotEncoder
import matplotlib.pyplot as plt
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.models import Sequential, load_model
from keras.layers import Dense, LSTM, Bidirectional, Embedding, Dropout
from keras.callbacks import ModelCheckpoint
#Function to load data set
#Extracting Fields Sentence, and Intent
def load_dataset(filename):
df = pd.read_csv(filename, encoding = "latin1", names = ["Sentence", "Intent"])
print(df.head())
intent = df["Intent"]
unique_intent = list(set(intent))
sentences = list(df["Sentence"])
return (intent, unique_intent, sentences)
#loading dataset named "Dataset.csv"
intent, unique_intent, sentences = load_dataset("Dataset.csv")
#Print First 5 sentences
print(sentences[:5])
#DOwnloading some needed packages. SHoudl already have them
nltk.download("stopwords")
nltk.download("punkt")
stemmer = LancasterStemmer()
#Cleaning data by removing things like puncuation and special characters
#secondly we lemmatiz them. THe process of changing all similar words to the
#same word i.e churches -> church
def cleaning(sentences):
words = []
for s in sentences:
clean = re.sub(r'[^ a-z A-Z 0-9]', " ", s)
w = word_tokenize(clean)
#stemming
words.append([i.lower() for i in w])
return words
cleaned_words = cleaning(sentences)
print(len(cleaned_words))
print(cleaned_words[:2])
def create_tokenizer(words, filters = '!"#$%&()*+,-./:;<=>?@[\]^_`{|}~'):
token = Tokenizer(filters = filters)
token.fit_on_texts(words)
return token
def max_length(words):
return(len(max(words, key = len)))
word_tokenizer = create_tokenizer(cleaned_words)
vocab_size = len(word_tokenizer.word_index) + 1
max_length = max_length(cleaned_words)
print("Vocab Size = %d and Maximum length = %d" % (vocab_size, max_length))
def encoding_doc(token, words):
return(token.texts_to_sequences(words))
encoded_doc = encoding_doc(word_tokenizer, cleaned_words)
def padding_doc(encoded_doc, max_length):
return(pad_sequences(encoded_doc, maxlen = max_length, padding = "post"))
padded_doc = padding_doc(encoded_doc, max_length)
padded_doc[:5]
print("Shape of padded docs = ",padded_doc.shape)
output_tokenizer = create_tokenizer(unique_intent, filters = '!"#$%&()*+,-/:;<=>?@[\]^`{|}~')
#tokenizer with filter changed
output_tokenizer.word_index
encoded_output = encoding_doc(output_tokenizer, intent)
encoded_output = np.array(encoded_output).reshape(len(encoded_output), 1)
encoded_output.shape
def one_hot(encode):
o = OneHotEncoder(sparse = False)
return(o.fit_transform(encode))
output_one_hot = one_hot(encoded_output)
output_one_hot.shape
from sklearn.model_selection import train_test_split
train_X, val_X, train_Y, val_Y = train_test_split(padded_doc, output_one_hot, shuffle = True, test_size = 0.2)
print("Shape of train_X = %s and train_Y = %s" % (train_X.shape, train_Y.shape))
print("Shape of val_X = %s and val_Y = %s" % (val_X.shape, val_Y.shape))
def create_model(vocab_size, max_length):
model = Sequential()
model.add(Embedding(vocab_size, 128, input_length = max_length, trainable = False))
model.add(Bidirectional(LSTM(128)))
# model.add(LSTM(128))
model.add(Dense(32, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(21, activation = "softmax"))
return model
model = create_model(vocab_size, max_length)
model.compile(loss = "categorical_crossentropy", optimizer = "adam", metrics = ["accuracy"])
model.summary()
filename = 'model.h5'
checkpoint = ModelCheckpoint(filename, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
hist = model.fit(train_X, train_Y, epochs = 100, batch_size = 32, validation_data = (val_X, val_Y), callbacks = [checkpoint])
model = load_model("model.h5")
def predictions(text):
clean = re.sub(r'[^ a-z A-Z 0-9]', " ", text)
test_word = word_tokenize(clean)
test_word = [w.lower() for w in test_word]
test_ls = word_tokenizer.texts_to_sequences(test_word)
print(test_word)
#Check for unknown words
if [] in test_ls:
test_ls = list(filter(None, test_ls))
test_ls = np.array(test_ls).reshape(1, len(test_ls))
x = padding_doc(test_ls, max_length)
pred = model.predict_classes(x)
return pred
def get_final_output(pred, classes):
predictions = pred[0]
classes = np.array(classes)
ids = np.argsort(-predictions)
classes = classes[ids]
predictions = -np.sort(-predictions)
for i in range(pred.shape[1]):
print("%s has confidence = %s" % (classes[i], (predictions[i])))
text = "Can you help me?"
pred = predictions(text)
get_final_output(pred, unique_intent)
# map an integer to a word
def word_for_id(integer, tokenizer):
for word, index in tokenizer.word_index.items():
if index == integer:
return word
return None
text = "bot or human?"
pred = predictions(text)
print(pred)
word = word_for_id(pred, output_tokenizer)
print(word)
| [
"austinsorenson96@gmail.com"
] | austinsorenson96@gmail.com |
a23d7b936809668c97dda106824c90e5f5105e0a | c1bc3bcb7d08851c57f28281bd932e7e4f0fd4b4 | /tasks_editor/forms.py | 8ad06f5278d5ae2d6ae2632c118b90bbd4a6cec4 | [] | no_license | tykva43/tasks_app | a4d07727d1e1a72efa39d99ade8ff56915b2e295 | 43bff9e55a95d8071a7dd7bee884887dc7cd42ef | refs/heads/master | 2023-04-13T18:48:38.598854 | 2021-04-28T08:55:03 | 2021-04-28T08:55:03 | 362,354,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,215 | py | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.forms import Textarea
from task.models import Task, Group, TaskList
class TaskForm(forms.ModelForm):
def __init__(self, user, group, *args, **kwargs):
super(TaskForm, self).__init__(*args, **kwargs)
self.fields['tasklist'].queryset = TaskList.objects.filter(group__users__id=user, group_id=group)#.filter(group__user=user)
# self.fields['group'].queryset = Group.objects.filter(users__id=user)
class Meta:
model = Task
exclude = ('completed_at', 'is_completed', 'group')
widgets = {
'description': Textarea(attrs={'rows': 6, 'cols': 18}),
}
class GroupForm(forms.ModelForm):
class Meta:
model = Group
fields = ('name', 'users', 'type')
class RegistrationForm(UserCreationForm):
email = forms.EmailField(max_length=254, help_text='This field is required')
class Meta:
model = User
fields = ('username', 'email', 'password1', 'password2',)
class TaskListForm(forms.ModelForm):
class Meta:
model = TaskList
fields = ('title',)
| [
"krikunova.olga@mail.ru"
] | krikunova.olga@mail.ru |
cff3803c5fb1185e6c3cbc89bd1ad5be3e322f61 | 8a0dd0e3d987482af35ae1813e4f64bce3d7ab4d | /examples/webhook/flask_skeleton.py | ef9510b07d806880f7c7befaf43cafe1f8ea3c0d | [
"MIT"
] | permissive | NextVisionDevelopers/telepota | a230e2e61762739f8ef30d589ed552f5486fb93c | 78bbd2bd9836edca5309016dc13262e5adf4c3ff | refs/heads/master | 2023-07-10T19:52:02.416666 | 2021-08-27T11:08:29 | 2021-08-27T11:08:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,939 | py | import sys
from flask import Flask, request
import telepot
from telepot.loop import OrderedWebhook
"""
$ python2.7 flask_skeleton.py <token> <listening_port> <webhook_url>
Webhook path is '/webhook', therefore:
<webhook_url>: https://<base>/webhook
"""
def on_chat_message(msg):
content_type, chat_type, chat_id = telepot.glance(msg)
print('Chat Message:', content_type, chat_type, chat_id)
def on_callback_query(msg):
query_id, from_id, data = telepot.glance(msg, flavor='callback_query')
print('Callback query:', query_id, from_id, data)
# need `/setinline`
def on_inline_query(msg):
query_id, from_id, query_string = telepot.glance(msg, flavor='inline_query')
print('Inline Query:', query_id, from_id, query_string)
# Compose your own answers
articles = [{'type': 'article',
'id': 'abc', 'title': 'ABC', 'message_text': 'Good morning'}]
bot.answerInlineQuery(query_id, articles)
# need `/setinlinefeedback`
def on_chosen_inline_result(msg):
result_id, from_id, query_string = telepot.glance(msg, flavor='chosen_inline_result')
print('Chosen Inline Result:', result_id, from_id, query_string)
TOKEN = sys.argv[1]
PORT = int(sys.argv[2])
URL = sys.argv[3]
app = Flask(__name__)
bot = telepot.Bot(TOKEN)
webhook = OrderedWebhook(bot, {'chat': on_chat_message,
'callback_query': on_callback_query,
'inline_query': on_inline_query,
'chosen_inline_result': on_chosen_inline_result})
@app.route('/webhook', methods=['GET', 'POST'])
def pass_update():
webhook.feed(request.data)
return 'OK'
if __name__ == '__main__':
try:
bot.setWebhook(URL)
# Sometimes it would raise this error, but webhook still set successfully.
except telepot.exception.TooManyRequestsError:
pass
webhook.run_as_thread()
app.run(port=PORT, debug=True)
| [
"josephkiurire@gmail.com"
] | josephkiurire@gmail.com |
221eabeb7855ab26b445ce0626620cf82ea4dd10 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/series/a713022194c640d79ae14ee2e504dd88.py | eb7a127a4563a635852c50f164844820a748ca91 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 221 | py | def slices( N, size ):
if (size == 0 and len(N) > 0) or (len(N) < size):
raise ValueError('Bad input!')
return [ [ int(d) for d in N[s:s+size] ]
for s in range( len(N) - size + 1 ) ]
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
deca411428980e2f3479946e16bec2cf5d7bc3c3 | 516932b326f58f9dc7c008e379f80cafd820acc0 | /src/helixtariff/test/logic/test_user_tariff.py | efde456f49fbfc657f1b000a05a4043a1fc4b16b | [] | no_license | sand8080/helixtariff | ffa4021fac16876bbbad8a4a8f1c53a9e4fd71d7 | 0bb56ad9e954509961db6bf636bce3a541709b93 | refs/heads/master | 2020-12-24T14:57:01.276045 | 2012-07-12T14:59:56 | 2012-07-12T14:59:56 | 1,605,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,923 | py | import unittest
from helixcore.error import RequestProcessingError
from helixtariff.test.logic.actor_logic_test import ActorLogicTestCase
class UserTariffTestCase(ActorLogicTestCase):
u_id = 22
def test_add_user_tariff(self):
t_id = self._add_tariff('tariff one', currency='RUB')
self._add_user_tariff(t_id, self.u_id)
def test_add_user_tariff_duplication(self):
name = 'tariff one'
t_id = self._add_tariff(name, currency='RUB')
self._add_user_tariff(t_id, self.u_id)
self.assertRaises(RequestProcessingError, self._add_user_tariff, t_id, self.u_id)
def test_add_wrong_tariff(self):
self.assertRaises(RequestProcessingError, self._add_user_tariff, 555, self.u_id)
def test_delete_user_tariff(self):
t_id = self._add_tariff('t', currency='RUB')
self._add_user_tariff(t_id, self.u_id)
user_tariffs = self._get_user_tariffs([self.u_id])
self.assertEquals([t_id], user_tariffs[0]['tariff_ids'])
sess = self.login_actor()
req = {'session_id': sess.session_id, 'user_id': self.u_id,
'tariff_ids': [t_id]}
resp = self.delete_user_tariffs(**req)
self.check_response_ok(resp)
user_tariffs = self._get_user_tariffs([self.u_id])
self.assertEquals(0, len(user_tariffs))
def test_get_user_tariffs(self):
self._add_tariff('t0', currency='RUB')
t_id_1 = self._add_tariff('t1', currency='RUB')
user_tariffs = self._get_user_tariffs([self.u_id])
self.assertEquals(0, len(user_tariffs))
self._add_user_tariff(t_id_1, self.u_id)
user_tariffs = self._get_user_tariffs([self.u_id])
self.assertEquals(1, len(user_tariffs))
self.assertEquals(self.u_id, user_tariffs[0]['user_id'])
self.assertEquals([t_id_1], user_tariffs[0]['tariff_ids'])
if __name__ == '__main__':
unittest.main() | [
"sand8080@gmail.com"
] | sand8080@gmail.com |
9cc9a464a6dc4cce9662b5e9dc7d486f065d16bd | a59996c5ded42b065817b1393d57c8c8e1105d9d | /numbers.py | a7ecf0342f166069fb23b66213fe5d4c981a3d42 | [
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ElliotEvins/dsc-git-merge-lab-onl01-dtsc-ft-052620 | 4f2634458a101003bfdc5b02bff22a3c91fb9476 | b2387d4a112ef3f4b0b207595272ba261f3632c0 | refs/heads/master | 2022-09-12T00:13:16.606904 | 2020-05-27T19:49:04 | 2020-05-27T19:49:04 | 267,405,602 | 0 | 0 | NOASSERTION | 2020-05-27T19:14:08 | 2020-05-27T19:14:04 | null | UTF-8 | Python | false | false | 36 | py | for i in range(10):
print(i**3)
| [
"ElliotWEvins@gmail.com"
] | ElliotWEvins@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.