text stringlengths 38 1.54M |
|---|
from flask_restful import Resource
from flask import jsonify
from dnd5eApi.models.class_name import ClassName as ClassNameModel
from dnd5eApi.schema.class_name import class_name_schema
from dnd5eApi.schema.class_name import class_names_schema
class ClassName(Resource):
def get(self, id=None):
if id is None:
results = ClassNameModel.query.all()
class_names = class_names_schema.dump(results)
return jsonify(class_names.data)
try:
result = ClassNameModel.query.get(id)
except IntegrityError:
return jsonify({"message": "ClassName could not be found."}), 400
class_name = class_name_schema.dump(result)
return jsonify(class_name.data)
|
import os
import sys
import sqlite3
import hashlib
current_directory = os.getcwd()
database_name = '../psps.db'
def db_check():
try:
open(database_name)
return True
#database does exist
except IOError as e:
if e.args[0] == 2:
print "This database doesn't exist."
sys.exit(1)
#if database doesn't exist, warn user and quit
else:
print e
sys.exit(1)
if db_check() == True:
db = sqlite3.connect(database_name)
cursor = db.cursor()
def repeater(file_name):
file_name_check = raw_input(file_name + "? ")
if file_name_check != "":
file_name = raw_input("Re-input file name or [Q]uit:")
if 'q' in file_name.lower():
print "User decided to quit."
sys.exit(1)
return file_name
#gives user a second chance to enter file name in case they mistyped
def file_name_getter(type):
file_name = ""
while os.path.isfile(file_name) == False:
file_name = raw_input("What's the name of the " + type + " file?")
if "quit" in file_name:
print "User decided to quit."
sys.exit(1)
file_name = repeater(file_name)
if os.path.isfile(file_name) == False:
print "Invalid file given. Try again or type 'Quit'."
return file_name
#checks to make sure the user gave a valid file and doesn't let them continue until they enter a valid one
def file_path(only_file, only_path, name):
if only_file == name:
print "The file is in the current directory."
only_path = os.getcwd()
full_file = os.path.join(only_path, only_file)
#if user only gives a file name and not a path
else:
print "The path to the file was given."
full_file = os.path.join(only_path, only_file)
#if user gives a path to a file
print ""
return full_file
#asks user for file then breaks off the name but keeps the path to the file for reading
def file_info_getter(full_file):
with open(full_file, 'r') as file_info:
data = file_info.read()
return data
#opens file and grabs everything from it
def empty(something):
if something == None or something == "" or something == " " or something == "\n":
return True
else:
return False
#just covers all the different ways a string could be nothing, returns True if string is nothing
print "Make sure you are in the OPF directory."
file_name = file_name_getter("abinet")
only_path, only_file = os.path.split(os.path.normpath(file_name))
abinet_file = only_file
data = file_info_getter(abinet_file)
md5_list = []
m = hashlib.md5(data)
hash = m.hexdigest()
print "\nmd5: "
print hash
#calculates md5
cursor.execute(''' SELECT md5_abinet FROM pseudos ''')
md5_raw_list = cursor.fetchall()
for one_md5 in md5_raw_list:
if one_md5[0] != None:
md5 = one_md5[0].encode('ascii', 'ignore')
md5_list.append(md5)
print "List of current md5's: "
print md5_list
print " "
#grabs all md5's from table
if hash not in md5_list:
print "md5 abinet is not in pseudo table. Use adder_db to add this entry."
sys.exit(1)
#if md5 doesn't exist, stop code
def fill_opts_check(raw_db, name):
file_name = file_name_getter(name)
only_path, only_file = os.path.split(os.path.normpath(file_name))
full_file = file_path(only_file, only_path, file_name)
data = file_info_getter(full_file)
if empty(raw_db):
cursor.execute( ''' UPDATE pseudos SET ''' + name + '''_name=?, ''' + name + '''=? '''
'''WHERE md5_abinet=? ''', (only_file, data, hash,))
print name + " will be added in the database. Note this change will not take effect if you quit."
#if info isn't in database, new info is added to database
else:
info_db = raw_db.encode('ascii', 'ignore')
if data != info_db:
print "The " + name + " information does not match the current information in the database."
sys.exit(1)
#if info is in database, new info must match
cursor.execute( ''' SELECT ''' + name + ''' FROM pseudos WHERE md5_abinet=? ''', (hash,))
retrieved = cursor.fetchall()[0]
correct_info = retrieved[0].encode('ascii', 'ignore')
return correct_info
#checks and updates fill and opts info in database. then retrieves proper info
cursor.execute( ''' SELECT fill, opts FROM pseudos WHERE md5_abinet=? ''', (hash,))
retrieved = cursor.fetchall()[0]
raw_fill_db = retrieved[0]
raw_opts_db = retrieved[1]
#retrieves opts and fill info from database
fill = fill_opts_check(raw_fill_db, "fill")
opts = fill_opts_check(raw_opts_db, "opts")
#gets final information from database for opts and fill
numbers = file_info_getter("edges")
list_numbers = map(int, numbers.split())
edges_z = list_numbers[0]
#for finding the znucl later
N = list_numbers[1]
L = list_numbers[2]
print "N = " + str(N)
print "L = " + str(L)
#getting L and N for database
user_choice = ""
cursor.execute( '''SELECT id FROM core_potential''' )
id_list = cursor.fetchall()
temp_list = []
for id in id_list:
new_id = id[0]
temp_list.append(new_id)
id_list = temp_list
print("\nCurrent id's:"),
print id_list
print ""
for id in id_list:
cursor.execute( '''SELECT N, L, md5_abinet FROM core_potential WHERE id=?''', (id,))
retrieved = cursor.fetchall()[0]
try:
N_db = retrieved[0] + 1 -1
L_db = retrieved[1] + 1 -1
md5_abinet = retrieved[2].encode('ascii', 'ignore')
print "\nmd5_abinet, N, and L from database: " + md5_abinet + ", " + str(N_db) + ", " + str(L_db)
if N_db == N and L_db == L and md5_abinet == hash:
print "This database entry matches the information provided.\n"
user_choice = raw_input("Would you like to [Q]uit, or [A]dd or [O]verwrite the existing entry?")
if user_choice == "":
print "That's not one of the options."
sys.exit(1)
chosen_id = id
break
except AttributeError and TypeError:
print "N, L, and/or md5_abinet equaled None. That entry was skipped."
#functions used in user_choice if/elif statements
###############################################################################################################
def radius_calculator():
radius = file_info_getter("screen.shells")
#get radius from screen.shells file in OPF
radius = float(radius.split()[0])
radius = round(radius, 2)
#print "radius is " + str(radius)
#rounds number from screen.shells to the nearest hundreth
return radius
#gets radius
def core_potential_file_getter(asked_for):
typat_zs = file_info_getter("typat")
typat_zlist = typat_zs.split()
typat_z = int(typat_zlist[edges_z -1])
#get index from edges for which atom is being looked at, then gets index from typat with that to know which z
z_string = file_info_getter("znucl")
z_list = z_string.split()
z = int(z_list[typat_z -1])
#uses index from typat to get the correct z from the znucl file
cursor.execute('''SELECT id FROM pseudos WHERE md5_abinet=?''', (hash,))
main_id = cursor.fetchone()[0]
cursor.execute('''SELECT z FROM main WHERE id=?''', (main_id,))
z_db = cursor.fetchone()[0]
if z != z_db:
print "Something's wrong. znucl from database does not match znucl from file."
sys.exit(1)
#checks that z matches database
edgename = "z%03in%02il%02i" % (z, N, L)
possible_files = []
list_files = os.listdir("zpawinfo")
for one_file in list_files:
if one_file.endswith(edgename):
possible_files.append(one_file)
for possible_file in possible_files:
if possible_file.startswith("vc_bare"):
vc_bare_file = os.path.join("zpawinfo", possible_file)
elif possible_file.startswith("vpseud1"):
vpseud1_file = os.path.join("zpawinfo", possible_file)
elif possible_file.startswith("vvallel"):
vvallel_file = os.path.join("zpawinfo", possible_file)
#finds all files in zpawinfo that endwith edgename and then finds the specific files are needed from those
vc_bare_text = file_info_getter(vc_bare_file)
vpseud1_text = file_info_getter(vpseud1_file)
vvallel_text = file_info_getter(vvallel_file)
if asked_for == "vc_bare":
return vc_bare_text
elif asked_for == "vpseud1":
return vpseud1_text
elif asked_for == "vvallel":
return vvallel_text
#returns file_text for whichever file was asked for
def text_getter():
radius = radius_calculator()
temp = "R" + str(radius)
if len(temp) == 4:
ending = temp + "0"
else:
ending = temp
#round gets rid of the extra 0 if the number is #.00 so this combines "R" with #.##, accounting for #.0 too
list_files = os.listdir("zpawinfo")
for one_file in list_files:
if one_file.startswith("vc_bare") and one_file.endswith(ending):
text_file = os.path.join("zpawinfo", one_file)
#finds vc_bare file that ends with R#.##
text = file_info_getter(text_file)
return text
#gets info for text_file in radii_info using radius
def core_potential_files_update(id):
vc_bare_text = core_potential_file_getter("vc_bare")
vpseud1_text = core_potential_file_getter("vpseud1")
vvallel_text = core_potential_file_getter("vvallel")
cursor.execute('''UPDATE core_potential SET vc_bare=?, vpseud1=?, vvallel=? WHERE id=?''',
(vc_bare_text, vpseud1_text, vvallel_text, id,))
#updates all core_potential file info in database
def overwrite_shortcut(id):
overwrite_choice = raw_input("Would you like to: \nOverwrite [A]ll information,"
"\nOverwrite [c]ore_potential files, [r]adius, or [t]ext_file, \nor [Q]uit?")
if "q" in overwrite_choice.lower():
print "User decided to quit."
sys.exit(1)
elif "t" in overwrite_choice.lower() and "r" in overwrite_choice.lower():
radius = radius_calculator()
text = text_getter()
cursor.execute('''UPDATE radii_info SET radius=?, text_file=? WHERE id=?''', (radius, text, id,))
print "Overwrote text_file and radius."
#user chose to overwrite radius and text_file
elif "c" in overwrite_choice.lower():
core_potential_files_update(id)
print "Overwrote core_potential files."
#user chose to overwrite core_potential files
elif "r" in overwrite_choice.lower():
radius = radius_calculator()
cursor.execute('''UPDATE radii_info SET radius=? WHERE id=?''', (radius, id,))
print "Overwrote radius."
#user chose to overwrite radius
elif "t" in overwrite_choice.lower():
text = text_getter()
cursor.execute('''UPDATE radii_info SET text_file=? WHERE id=?''', (text, id,))
print "Overwrote text_file."
#user chose to overwrite text_file
elif "a" in overwrite_choice.lower():
cursor.execute('''DELETE FROM core_potential WHERE id=?''', (id,))
cursor.execute('''DELETE FROM radii_info WHERE id=?''', (id,))
#delete the pre-existing information first
cursor.execute('''INSERT INTO core_potential(id, md5_abinet, N, L) VALUES(?, ?, ?, ?)''',
(id, hash, N, L,))
#creates new entry in core_potential
radius = radius_calculator()
cursor.execute('''INSERT INTO radii_info(id, radius) VALUES(?, ?)''', (id, radius,))
#creates a new entry in radii_info
core_potential_files_update(id)
#adds info from core_potential files to the new entry in core_potential
text = text_getter()
cursor.execute('''UPDATE radii_info SET text_file=? WHERE id=?''', (text, id,))
#adds the text_file info to new entry in radii_info
print "Overwrote all information."
#creates new entry and fills it out
#overwrites everything or specifically selected part in database
############################################################################################
if user_choice == "":
cursor.execute('''INSERT INTO core_potential(md5_abinet, N, L) VALUES(?, ?, ?)''', (hash, N, L,))
#creates new entry in core_potential
id = cursor.lastrowid
#last id that was entered
cursor.execute('''INSERT INTO radii_info(id) VALUES(?)''', (id,))
#creates a new entry in radii_info
core_potential_files_update(id)
#adds info from core_potential files to the new entry in core_potential
radius = radius_calculator()
text = text_getter()
cursor.execute('''UPDATE radii_info SET radius=?, text_file=? WHERE id=?''', (radius, text, id,))
print "A new entry in the database was created."
#adds the text_file info to new entry in radii_info
#entry doesn't exist, make a new one with the N, L, and md5_abinet under new id, then update all other entries
elif "q" in user_choice.lower():
print "User decided to quit."
sys.exit(1)
#ends code
elif "a" in user_choice.lower():
id = chosen_id
print "\nFor id " + str(id) + " all missing information will be filled out."
needed_list = []
searched_dict = {}
cursor.execute( '''SELECT vc_bare FROM core_potential WHERE id=?''', (id,))
searched_dict["vc_bare"] = cursor.fetchall()[0][0]
cursor.execute( '''SELECT vpseud1 FROM core_potential WHERE id=?''', (id,))
searched_dict["vpseud1"] = cursor.fetchall()[0][0]
cursor.execute( '''SELECT vvallel FROM core_potential WHERE id=?''', (id,))
searched_dict["vvallel"] = cursor.fetchall()[0][0]
#adds names with their text to searched_dict for all core_potential files
cursor.execute( '''SELECT radius FROM radii_info WHERE id=?''', (id,))
searched_dict["radius"] = cursor.fetchone()[0]
cursor.execute( '''SELECT text_file FROM radii_info WHERE id=?''', (id,))
searched_dict["text_file"] = cursor.fetchall()[0][0]
#adds names with their text to searched_dict for radius and text_file
for name in searched_dict:
print(name),
thing = searched_dict[name]
if empty(thing) == True:
print "is blank."
needed_list.append(name)
else:
print "has " + str(thing)[:10]
#for all the entries of searched_list that are none, the name of that entry is put into needed_list
if len(needed_list) != 0:
for needed in needed_list:
if needed == "vc_bare" or needed == "vpseud1" or needed == "vvallel":
text = core_potential_file_getter(needed)
cursor.execute('''UPDATE core_potential SET ''' + needed + '''=? WHERE id=?''',
(text, id,))
#for the different core_potential files, their text is added
elif needed == "text_file":
text = text_getter()
cursor.execute('''UPDATE radii_info SET text_file=? WHERE id=?''', (text, id,))
#text_file info is added
elif needed == "radius":
radius = radius_calculator()
cursor.execute('''UPDATE radii_info SET radius=? WHERE id=?''', (radius, id,))
#radius is added
#what's needed is matched up with info needs to be found, and that info is found.
print "\nMissing information was filled out in database."
#as long as there's nothing in the database for something, that thing will be added to the needed_list
else:
print "There is nothing that needs to be added.\n"
overwrite_shortcut(id)
#only update things that are missing from core_potential and radius
elif "o" in user_choice.lower():
id = chosen_id
overwrite_shortcut(id)
#overwrites entire file or specifically selected parts using a function
db.commit()
db.close()
|
# A script that removes all your stared repos.
# > python3 reset_stars.py --token <token>
import argparse
import csv
import requests
import sys
def get_stars(token):
repos = []
url = "https://api.github.com/user/starred?per_page=100"
while True:
if token:
url += f"&access_token={token}"
resp = requests.get(url)
resp.raise_for_status()
repos.extend(resp.json())
url = resp.links.get('next', {}).get('url')
if not url:
break
return repos
def main(token, read_only):
stars = get_stars(token)
print(f"Found {len(stars)} stars.")
for star in stars:
owner = star['owner']['login']
repo = star['name']
url = f"https://api.github.com/user/starred/{owner}/{repo}"
print(f"Unstarring {owner}/{repo}")
if not read_only:
requests.delete(url)
if __name__ == '__main__':
cl = argparse.ArgumentParser(description="This script removes all your stars.")
cl.add_argument("--token", help="a session token for accessing stars")
cl.add_argument("--read-only", help="lists stars to remove")
args = cl.parse_args()
print("Removing stars from your account")
sys.exit(main(args.token, args.read_only))
|
from tensorflow.examples.tutorials.mnist import input_data
#读取数据
DATA_DIR = '../data/fashion'
fashion_mnist = input_data.read_data_sets(DATA_DIR, one_hot=False, validation_size=0)
train_images = fashion_mnist.train.images
#train_images = train_images.reshape((60000,28,28))
train_labels = fashion_mnist.train.labels
test_images = fashion_mnist.test.images
#test_images = test_images.reshape((10000,28,28))
test_labels = fashion_mnist.test.labels
#from keras import models
#from keras import layers
#
#model = models.Sequential()
#model.add(layers.Conv2D(32, (3, 3), activation='relu',
# input_shape=(28*28,)))
#model.add(layers.MaxPooling2D((2, 2)))
#model.add(layers.Conv2D(64, (3, 3), activation='relu'))
#model.add(layers.MaxPooling2D((2, 2)))
#model.add(layers.Conv2D(128, (3, 3), activation='relu'))
#model.add(layers.MaxPooling2D((2, 2)))
#model.add(layers.Conv2D(128, (3, 3), activation='relu'))
#model.add(layers.MaxPooling2D((2, 2)))
#model.add(layers.Dense(512, activation='relu'))
#model.add(layers.Dense(10, activation='softmax'))
#
#from keras import optimizers
#
#model.compile(optimizer=optimizers.RMSprop(lr=1e-4),
# loss='binary_crossentropy',
# metrics=['acc'])
#
##
#train_images = train_images.reshape((60000,28*28))
#train_images = train_images.astype('float32')/255
#
#test_images = test_images.reshape((10000,28*28))
#test_images = test_images.astype('float32')/255
#
#from keras.utils import to_categorical
#
#train_labels = to_categorical(train_labels)
#test_labels = to_categorical(test_labels)
#
#model.fit(train_images, train_labels, epochs=5, batch_size=128)
#
#test_loss,test_acc = model.evaluate(test_images,test_labels)
#
#print('test_acc:',test_acc)
|
from __future__ import print_function, division, absolute_import
from .compression import compressions, default_compression
from .core import dumps, loads, maybe_compress, decompress, msgpack
from .serialize import (serialize, deserialize, Serialize, Serialized,
to_serialize, register_serialization)
from . import numpy
|
import numpy as np
from CamRunnable import camVideoStream
import math as m
from time import sleep
import cv2
frame_lim = 2000 # Number of independent frames to analyze
curr_frame = 0
previous_time = -1
curr_frame = 0
cam_holder = camVideoStream(0,30,640,480)
cam_holder.start()
# initialize the first frame in the video stream
firstFrame = None
min_area = 2500
frame_refresh = 3
while curr_frame <= frame_lim:
a_frame, time = cam_holder.read()
if time != previous_time:
gray = cv2.cvtColor(a_frame,cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0) # Lets blur the frame
if (firstFrame is None) or (curr_frame%frame_refresh) == 0: # Grabbing the first frame
firstFrame = gray
text = "Unoccupied"
curr_frame = curr_frame + 1
continue
# compute the absolute difference between the current frame and
# first frame
frameDelta = cv2.absdiff(firstFrame, gray) # Important FIRST FUNCTION TO LEARN!!!!
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh = cv2.dilate(thresh, None, iterations=5) # THis function makes thresholded blobs bigger!!!
(image, contours, hierarchy) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) #Second important function!!!! Very useful!!!
# loop over the contours
for c in contours:
# if the contour is too small, ignore it
if cv2.contourArea(c) < min_area:
continue
# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(a_frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Occupied"
cv2.putText(a_frame, "Room Status: {}".format(text), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
# show the frame and record if the user presses a key
cv2.imshow("Security Feed", a_frame)
cv2.imshow("Thresh", thresh)
cv2.imshow("Frame Delta", frameDelta)
previous_time = time
curr_frame = curr_frame + 1
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
sleep(0.017)
cam_holder.stop()
cv2.destroyAllWindows() |
color = "blue"
#color가 레드가 아니면 그린, 그린이 아니면 , 퍼플 퍼플이 아니면 , 블루
if color == "red":
print("It's a red")
elif color == "green":
print("It's a green")
elif color == "purple":
print("It's a purple")
elif color =="blue":
print("It's a blue") |
# This files contains your custom actions which can be used to run
# custom Python code.
#
# See this guide on how to implement these action:
# https://rasa.com/docs/rasa/custom-actions
# This is a simple example for a custom action which utters "Hello World!"
from typing import Any, Text, Dict, List
import requests
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
from rasa_sdk.events import AllSlotsReset,SlotSet
from search_course import Searcher
class ActionSearch(Action):
def name(self) -> Text:
return "action_search"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
money = tracker.get_slot("amount-of-money")
category = tracker.get_slot("category")
keyword = tracker.get_slot("keyword")
res = Searcher(keyword,3)
if res['results'] > 0:
dispatcher.utter_message(text="Đây là gợi ý của tôi dành cho bạn:")
for x in res['data']:
dispatcher.utter_message(image=x['thumbnail'],text=x['title'])
else:
dispatcher.utter_message(text=f"Hiện không có khóa học về {keyword}")
return [SlotSet("knowledge", res['data'][0]['knowledge'] if res['data'][0]['knowledge'] is not None else [])]
class ActionCheckKnowledge(Action):
def name(self) -> Text:
return "action_check_knowledge"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
knowledge = tracker.get_slot("knowledge")
if knowledge is None:
dispatcher.utter_message(response="utter_no_course")
else:
dispatcher.utter_message(text="Trong khóa học này bạn sẽ học được:")
dispatcher.utter_message(text=f"{knowledge}")
return []
|
# Copyright © 2021 Novobi, LLC
# See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
class OpportunityTask(models.Model):
_name = "opportunity.task"
_description = 'Opportunity Tasks'
name = fields.Many2one('opportunity.task.tag', string='Task', help='The task tag for Task.')
lead_id = fields.Many2one('crm.lead', string='Lead/Opportunity')
status = fields.Boolean(string='Status', help='The status of task. It have two state: Todo and Done.')
date = fields.Date(string='Date', help='The date planned for the task.')
|
import cv2
import numpy as np
# simplify
image = cv2.imread('./assets/square.jpg')
ratio = image.shape[0] / float(image.shape[0])
reverse = 255 - image
gray = cv2.cvtColor(reverse, cv2.COLOR_RGB2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]
_, cnts, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for c in cnts:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.04 * peri, True)
if len(approx) == 4:
c = c.astype("float")
c *= ratio
c = c.astype("int")
cv2.drawContours(image, [c], 0, (0, 255, 0), 2)
print(approx)
# display
cv2.imshow('image', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
"""Represent a socket."""
from pytradfri.const import ATTR_DEVICE_STATE, ATTR_SWITCH_PLUG
class Socket:
"""Represent a socket."""
def __init__(self, device, index):
self.device = device
self.index = index
@property
def state(self):
return self.raw.get(ATTR_DEVICE_STATE) == 1
@property
def raw(self):
"""Return raw data that it represents."""
return self.device.raw[ATTR_SWITCH_PLUG][self.index]
def __repr__(self):
state = "on" if self.state else "off"
return (
"<Socket #{} - "
"name: {}, "
"state: {}"
">".format(self.index, self.device.name, state)
)
|
class Solution:
def removeElement(self, nums:list, val:int) -> int:
if not nums:
return 0
i, j = 0, len(nums) - 1
while i < j:
if nums[i] == val and nums[j] != val:
nums[i], nums[j] = nums[j], nums[i]
i += 1
j -= 1
if nums[i] != val:
i += 1
if nums[j] == val:
j -= 1
print('nums:', nums)
if nums[j] == val:
return j
else:
return j+1
def removeElement(self, nums, val):
n, i, j = len(nums), -1, 0
while j <= n-1:
if nums[j] != val:
i += 1
nums[i] = nums[j]
j += 1
return i+1
if __name__ == '__main__':
obj = Solution()
print(obj.removeElement([0,1,2,2,3,0,4,2], 2))
print(obj.removeElement([3,2,2,3], 2))
print(obj.removeElement([1], 1)) |
"""
Given: A positive integer n<=7
Return: The total number of permutations of length n, followed by a list of all
such permutations (in any order).
"""
def perm_help(fixed, to_perm):
# nothing left to permute
if len(to_perm) == 1:
return [1, [fixed+to_perm]] # bc fuck space efficiency amirite? :D
# fix one number from to_perm and permute on the remaining numbers
else:
records = [0, []]
for i in to_perm:
rec = perm_help(fixed+[i],[x for x in to_perm if x != i])
records[0] += rec[0]
records[1] += rec[1]
return records
def perm(n):
ps = perm_help([],[i+1 for i in range(n)])
print(ps[0])
for p in ps[1]:
print(*p, sep=" ")
# perm(7)
# output is a few thousand lines long
# run with `python3 perm.py > out.txt` |
import time
sent_1 = "I'm the first sentence."
print(sent_1)
# write your code here
time.sleep(10)
sent_2 = "And I'm the second sentence."
print(sent_2)
|
import os
import time
import numpy as np
import matplotlib.pyplot as plt
from pylab import grid
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.cm as cm
from pylab import grid
from scipy.stats import norm
import matplotlib.mlab as mlab
from numpy import linalg as LA
from keras.models import Sequential,load_model
from keras.layers.core import Dense
from keras.callbacks import ModelCheckpoint
from keras.layers.recurrent import SimpleRNN
from keras.layers import TimeDistributed, Dense, Activation, Dropout
from keras.utils import plot_model
from keras import metrics
from keras import optimizers
from keras import regularizers
from keras import initializers
from keras import backend as K
import tensorflow as tf
# taking dataset from function
from generate_data_set_time_pulse import *
# To print network status
from print_status_1_inputs_paper import *
def cm2inch(*tupl):
inch = 2.54
if isinstance(tupl[0], tuple):
return tuple(i/inch for i in tupl[0])
else:
return tuple(i/inch for i in tupl)
#Parameters:
sample_size_3 = 3
mem_gap = 20
sample_size = 11 # Data set to print some results
lista_distancia_all =[]
# Generate a data Set to study the Network properties:
x_train, y_train,mask,seq_dur = generate_trials(sample_size,mem_gap)
test = x_train[0:1,:,:] # Here you select from the generated data set which is used for test status
test_set = x_train[0:20,:,:]
y_test_set = y_train[0:20,:,0]
plot_dir="plots_paper"
r_dir="Networks_and_files/networks/.."
lista_neg=[]
lista_pos=[]
total =[]
lista_neg_porc=[]
lista_pos_porc=[]
lista_tot_porc=[]
for root, sub, files in os.walk(r_dir):
files = sorted(files)
for i,f in enumerate(files):
print("file: ",f)
#General network model construction:
model = Sequential()
model = load_model(r_dir+"/"+f)
# Compiling model for each file:
model.compile(loss = 'mse', optimizer='Adam', sample_weight_mode="temporal")
model.load_weights(r_dir+"/"+f)
print"-------------",i
#Esto me permite imprimir los pesos de la red y la estructura!!!
pesos = model.layers[0].get_weights()
pesos__ = model.layers[0].get_weights()[0]
pesos_in = pesos[0]
pesos = model.layers[0].get_weights()[1]
N_rec =len(pesos_in[0]) # it has to match the value of the recorded trained network
neurons = N_rec
colors = cm.rainbow(np.linspace(0, 1, neurons+1))
print"-------------\n-------------"
print"pesos:\n:",pesos
print"-------------\n-------------"
unidades = np.arange(len(pesos))
conection = pesos
print("array: ",np.arange(len(pesos)))
#print("biases: ",biases)
print"##########################"
print"conection",conection
print"##########################\n ##########################"
histo_lista =[]
array_red_list =[]
conection_usar =conection
w, v = LA.eig(conection_usar)
print"Autovalores:\n", w
print"Autovectores:\n",v
print"Distancia:", np.sqrt(w.real*w.real+w.imag*w.imag)
lista_dist = np.c_[w,w.real]
lista_dist_2= np.c_[w,abs(w.real)]
maximo = max(lista_dist, key=lambda item: item[1])
maximo_2= max(lista_dist_2, key=lambda item: item[1])
marcar = maximo[0]
marcar_2= maximo_2[0]
print"Primer elemento",maximo
print"Maximo marcar",marcar
frecuency=0
if marcar_2.imag==0:
frecuency =0
else:
frecuency =abs(float(marcar_2.imag)/(3.14159*float(marcar_2.real)))
print "frecuency",frecuency
################ Fig Eigenvalues ########################
plt.figure(figsize=cm2inch(8.5,7.5))
plt.scatter(w.real,w.imag,color="hotpink",label="Eigenvalue spectrum\n ",s=2)#Total of: "+str(len(w.real))+" values")
#plt.scatter(w.real,w.imag,color="plum",label="Eigenvalue spectrum\n Total of: "+str(len(w.real))+" values")
# for plotting circle line:
a = np.linspace(0, 2*np.pi, 500)
cx,cy = np.cos(a), np.sin(a)
plt.plot(cx, cy,'--', alpha=.5, color="dimgrey") # draw unit circle line
#plt.plot(2*cx, 2*cy,'--', alpha=.5)
plt.scatter(marcar.real,marcar.imag,color="red", label="Eigenvalue maximum real part",s=5)
#plt.scatter(marcar_2.real,marcar_2.imag,color="blue", label="Eigenvalue with maximum abs(Real part)\n"+"Frecuency: "+str(frecuency))
plt.plot([0,marcar.real],[0,marcar.imag],'-',color="grey")
#plt.plot([0,marcar_2.real],[0,marcar_2.imag],'k-')
plt.axvline(x=1,color="salmon",linestyle='--')
plt.xticks(fontsize=4)
plt.yticks(fontsize=4)
plt.xlabel(r'$Re( \lambda)$',fontsize = 11)
plt.ylabel(r'$Im( \lambda)$',fontsize = 11)
plt.legend(fontsize= 5,loc=1)
plt.savefig(plot_dir+"/autoval_"+str(i)+"_"+str(f)+"_.png",dpi=300, bbox_inches = 'tight')
plt.close()
# Plots
################################### Here we plot iner state of the network with the desierd stimulus
for sample_number in np.arange(sample_size_3):
print ("sample_number",sample_number)
print_sample = plot_sample(sample_number,1,neurons,x_train,y_train,model,seq_dur,i,plot_dir)
|
from django.http import HttpResponse
from django.template.loader import render_to_string
from user_agents import parse
from apps.student.decorators import decorator
from apps.student.services.reportservice import ReportService
from apps.student.queries.reportquery import ReportQuery
from apps.student.forms.report.reportdeleteform import ReportDeleteForm
from apps.student.forms.report.reportsearchform import ReportSearchForm
from apps.student.forms.report.reportsearchspform import ReportSearchSpForm
from apps.student.forms.search.pagingform import PagingForm
from apps.student.forms.search.sortform import SortForm
from apps.student.functions import function
# TODO:Paging確認のため、とりあえずこの数字。
__limit = 2
@decorator.authenticate_async("index")
def index(request):
user_agent = parse(request.META['HTTP_USER_AGENT'])
if(user_agent.is_mobile):
html = render_to_string(
'student/report/index_sp.html',
{},
request=request)
return HttpResponse(html)
context = {
'authority_name': request.session['authority']
}
html = render_to_string(
'student/report/index.html',
context,
request=request)
return HttpResponse(html)
@decorator.authenticate_async("search")
def search(request):
form = ReportSearchForm(data=request.POST)
if form.is_valid():
target_year = form.cleaned_data['target_year']
full_name = form.cleaned_data['full_name']
file_name = form.cleaned_data['file_name']
target_sort_item = 'target-year-sort'
target_descending_order = 'True'
return __search(
request,
target_year,
full_name,
file_name,
target_sort_item,
target_descending_order)
else:
return None
@decorator.authenticate_async("sort")
def sort(request):
form = SortForm(data=request.POST)
if form.is_valid():
target_year = request.session['target_year']
full_name = request.session['full_name']
file_name = request.session['file_name']
target_sort_item = form.cleaned_data['target_sort_item']
target_descending_order = form.cleaned_data['target_descending_order']
return __search(
request,
target_year,
full_name,
file_name,
target_sort_item,
target_descending_order)
else:
return None
def __search(
request,
target_year,
full_name,
file_name,
target_sort_item,
target_descending_order):
offset = 0
result_list_count = ReportQuery().custom_count(target_year, full_name, file_name)
result_list = ReportQuery().custom_query(
target_year,
full_name,
file_name,
offset,
__limit,
target_sort_item,
target_descending_order)
request.session['target_year'] = target_year
request.session['full_name'] = full_name
request.session['file_name'] = file_name
request.session['current_sort_item'] = target_sort_item
request.session['current_descending_order'] = target_descending_order
context = {
'result_list': result_list,
'result_list_count': result_list_count,
'current_sort_item': target_sort_item,
'current_descending_order': target_descending_order,
'current_page': offset + 1,
'limit': __limit,
'authority_name': request.session['authority']
}
html = render_to_string(
'student/report/search_result.html',
context,
request=request)
return HttpResponse(html)
@decorator.authenticate_async("search_sp")
def search_sp(request):
form = ReportSearchSpForm(data=request.POST)
if form.is_valid():
search_value = form.cleaned_data['search_value']
result_list = ReportQuery().custom_query_sp(search_value)
context = {
'result_list': result_list
}
html = render_to_string(
'student/report/search_result_sp.html',
context,
request=request)
return HttpResponse(html)
else:
return None
@decorator.authenticate_async("paging")
def paging(request):
form = PagingForm(data=request.POST)
if form.is_valid():
current_page = form.cleaned_data['current_page']
previous = form.cleaned_data['previous']
next = form.cleaned_data['next']
target_page = form.cleaned_data['target_page']
return __paging(request, current_page, previous, next, target_page)
else:
return None
def __paging(request, current_page, previous, next, target_page):
offset, target_page = function.get_offset(
previous, next, target_page, current_page, __limit)
target_year = request.session['target_year']
full_name = request.session['full_name']
file_name = request.session['file_name']
current_sort_item = request.session['current_sort_item']
current_descending_order = request.session['current_descending_order']
result_list_count = ReportQuery().custom_count(target_year, full_name, file_name)
result_list = ReportQuery().custom_query(
target_year,
full_name,
file_name,
offset,
__limit,
current_sort_item,
current_descending_order)
context = {
'result_list': result_list,
'result_list_count': result_list_count,
'current_page': target_page,
'limit': __limit,
'current_sort_item': current_sort_item,
'current_descending_order': current_descending_order,
'authority_name': request.session['authority']
}
html = render_to_string(
'student/report/search_result.html',
context,
request=request)
return HttpResponse(html)
@decorator.authenticate_admin_only_async("delete_report")
def delete_report(request):
form = ReportDeleteForm(data=request.POST)
if form.is_valid():
report_id = form.cleaned_data['report_id']
current_page = form.cleaned_data['current_page']
ReportService().delete_report(report_id)
return __paging(request, current_page, False, False, current_page)
else:
return None
@decorator.authenticate_download("download_report")
def download_report(request):
user_id = request.session['user_id']
report_id = request.GET.get("report_id")
file, file_name = ReportService().download_report(report_id, user_id)
response = HttpResponse(
file,
content_type="application/vnd.openxmlformats-officedocument.wordprocessingml.document")
response["Content-Disposition"] = "filename=" + file_name
return response
|
#program menghitung biaya bensin mobil
#import waktu
import time
#variabel assigment
jarakKota=795
print('Pak Budi Mengendarai Mobil dari kota A ke kota C dengan jarak 795Km' )
#operasi perhitungan
bensin=1
jarakMinimal=12
bensinDigunakan=jarakKota/jarakMinimal
#kasih jeda 2 detik
time.sleep(2)
#menampilkan Hasil
print('Pak Budi akan menghabiskan Bensin', str(bensinDigunakan) +' Liter')
print('untuk Perjalanan dari kota A ke Kota C')
print('Hati-Hati Pak Budi')
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^longitude/', include('longitude.urls')),
url(r'^mygym/', include('mygym.urls')),
url(r'^admin/', include(admin.site.urls)),
)
|
f = open('A-large.in', 'rU')
lines = f.readlines()
t = int(lines[0])
out = open('A-large.out', 'a')
for i in range(1, t+1):
n = int(lines[i])
if n == 0:
out.write('Case #'+str(1)+': INSOMNIA\n')
else:
last_seen=n
count_i = 1
seen_numbers = ''
seen_all = False
while(not seen_all):
last_seen = count_i*n
for number in str(last_seen):
if number not in seen_numbers:
seen_numbers+=str(number)
if len(seen_numbers) == 10:
seen_all = True
else:
seen_all = False
count_i+=1
out.write('\nCase #'+str(i)+': '+str(last_seen)) |
import numpy as np
import math
import matplotlib
import cv2
import glob
import shutil
import time
import sys
import os.path as osp
from matplotlib.patches import Polygon
from threading import Thread, Lock
from Queue import Queue
import threading
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
# Add lib to PYTHONPATH
this_dir = osp.dirname(__file__)
lib_path = osp.join(this_dir, 'text')
add_path(lib_path)
from nms.gpu_nms import gpu_nms
from nms.cpu_nms import cpu_nms
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (10, 10)
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
caffe_root = '../' # this file is expected to be in {caffe_root}/examples
import os
os.chdir(caffe_root)
import sys
sys.path.insert(0, 'python')
import caffe
from google.protobuf import text_format
from caffe.proto import caffe_pb2
def nms(dets, thresh, force_cpu=False, device_id=2):
"""Dispatch to either CPU or GPU NMS implementations."""
if dets.shape[0] == 0:
return []
if force_cpu:
return cpu_nms(dets, thresh)
else:
return gpu_nms(dets, thresh, device_id=device_id)
def xcycwh_angle_to_x1y1x2y2x3y3x4y4(box, scale=[1,1]):
xc, yc, width, height, radians = box
x0 = -0.5*width
x1 = 0.5*width
y0 = -0.5*height
y1 = 0.5*height
cos_radians = math.cos(radians)
sin_radians = math.sin(radians)
x0_r = scale[0]*(cos_radians*x0 - sin_radians*y0 + xc)
x1_r = scale[0]*(cos_radians*x1 - sin_radians*y0 + xc)
x2_r = scale[0]*(cos_radians*x1 - sin_radians*y1 + xc)
x3_r = scale[0]*(cos_radians*x0 - sin_radians*y1 + xc)
y0_r = scale[1]*(sin_radians*x0 + cos_radians*y0 + yc)
y1_r = scale[1]*(sin_radians*x1 + cos_radians*y0 + yc)
y2_r = scale[1]*(sin_radians*x1 + cos_radians*y1 + yc)
y3_r = scale[1]*(sin_radians*x0 + cos_radians*y1 + yc)
return [x0_r, x1_r, x2_r, x3_r, y0_r, y1_r, y2_r, y3_r]
def clip(x, min_value=0, max_value=float('inf')):
return int(np.clip(x, min_value, max_value))
import os
cwd = os.getcwd()
print cwd
images = glob.glob("./examples/text/result/*.jpg")
results = glob.glob("./examples/text/result/*.txt")
removes = images + results
for f in removes:
os.remove(f)
# load PASCAL VOC labels
labelmap_file = "./examples/text/labelmap_text.prototxt"
file = open(labelmap_file, 'r')
labelmap = caffe_pb2.LabelMap()
text_format.Merge(str(file.read()), labelmap)
def get_labelname(labelmap, labels):
num_labels = len(labelmap.item)
labelnames = []
if type(labels) is not list:
labels = [labels]
for label in labels:
found = False
for i in xrange(0, num_labels):
if label == labelmap.item[i].label:
found = True
labelnames.append(labelmap.item[i].display_name)
break
assert found == True
return labelnames
image_dir = './examples/text/images'
image_list = glob.glob('{}/*.*'.format(image_dir))
image_list = sorted(image_list)
image_resizes = [512 + 128 +64]
threshold = 0.6
nets = []
device_id = 0
for image_resize in image_resizes:
model_def = './examples/text/model/deploy.prototxt'
model_weights = './examples/text/model/demo.caffemodel'
model_modify = './examples/text/model/final_deploy.prototxt'
lookup = 'step:'
true_steps = [' step: {}'.format(2**(2+i)) for i in range(1,5)]
for i in range(1,4):
step = image_resize / (image_resize / 64.0 - 2*i)
true_steps.append(' step: {}'.format(step))
print true_steps
f = open(model_modify, 'w')
with open(model_def, 'r') as myFile:
i = 0
for num, line in enumerate(myFile, 1):
if lookup in line:
print 'found at line:', num
f.write(true_steps[i]+'\r\n')
i = i + 1
continue
f.write(line)
f.close()
caffe.set_device(device_id)
caffe.set_mode_gpu()
nets.append(caffe.Net(model_modify, # defines the structure of the mode10
model_weights, # contains the trained weights
caffe.TEST)) # use test mode (e.g., don't perform dropout)
device_id = device_id + 1
t = 0
total_time = 0
for image_path in image_list:
try:
image = caffe.io.load_image(image_path)
original_shape = image.shape
original_image = image
except:
break
height, width, channels = image.shape
im_size_min = np.min(image.shape[0:2])
im_size_max = np.max(image.shape[0:2])
plt.imshow(original_image)
colors = plt.cm.hsv(np.linspace(0, 1, 21)).tolist()
currentAxis = plt.gca()
device_id = 0
device_ids = range(len(image_resizes))
original_images = [original_image] * len(image_resizes)
my_queue = Queue()
lock = Lock()
params = zip(device_ids, image_resizes)
for param in params:
my_queue.put(param)
detlist = []
def worker():
while True:
global total_time
global t
#grabs host from queue
id, resize = my_queue.get()
image_resize_height = resize
image_resize_width = resize
caffe.set_device(id)
caffe.set_mode_gpu()
transformer = caffe.io.Transformer({'data': (1,3,image_resize_height,image_resize_width)})
transformer.set_transpose('data', (2, 0, 1))
transformer.set_mean('data', np.array([104,117,123])) # mean pixel
transformer.set_raw_scale('data', 255) # the reference model operates on images in [0,255] range instead of [0,1]
transformer.set_channel_swap('data', (2,1,0)) # the reference model has channels in BGR order instead of RGB
image = original_image
start = time.clock()
nets[id].blobs['data'].reshape(1,3,image_resize_height,image_resize_width)
transformed_image = transformer.preprocess('data', image)
nets[id].blobs['data'].data[...] = transformed_image
detections = nets[id].forward()['detection_out']
total_time = total_time + (time.clock() - start)*1000.0
t = t + 1
print 'avearage running time ' + str(total_time/t)
print (image_path)
det_label = detections[0,0,:,1]
det_conf = detections[0,0,:,2]
det_xmin = detections[0,0,:,3]
det_ymin = detections[0,0,:,4]
det_xmax = detections[0,0,:,5]
det_ymax = detections[0,0,:,6]
# Get detections with confidence higher than threshold
top_indices = [i for i, conf in enumerate(det_conf) if conf >= threshold]
top_conf = det_conf[top_indices]
top_xmin = det_xmin[top_indices]
top_ymin = det_ymin[top_indices]
top_xmax = det_xmax[top_indices]
top_ymax = det_ymax[top_indices]
for i in xrange(top_conf.shape[0]):
xmin = int(round(top_xmin[i] * image.shape[1]))
ymin = int(round(top_ymin[i] * image.shape[0]))
xmax = int(round(top_xmax[i] * image.shape[1]))
ymax = int(round(top_ymax[i] * image.shape[0]))
score = top_conf[i]
xmin = max(0, int(round(top_xmin[i] * original_shape[1])))
ymin = max(0, int(round(top_ymin[i] * original_shape[0])))
xmax = min(original_shape[1]-1, int(round(top_xmax[i] * original_shape[1])))
ymax = min(original_shape[0]-1, int(round(top_ymax[i] * original_shape[0])))
coords = (xmin, ymin), xmax-xmin+1, ymax-ymin+1
try:
assert xmin <= xmax and ymin <= ymax, 'left must less than right'
except:
continue
lock.acquire()
detlist.append([xmin, ymin, xmax, ymax, score])
lock.release()
my_queue.task_done()
for j in xrange(10):
a = Thread(target=worker)
a.daemon = True
a.start()
my_queue.join()
image_name = os.path.splitext(os.path.basename(image_path))[0]
fp = open('./examples/text/result/res_{}.txt'.format(image_name),'w')
if len(detlist) != 0:
dets = np.array(detlist).astype(np.float32)
#keep = nms(dets, 0.1)
#dets = dets[keep, :]
for j in range(dets.shape[0]):
xmin, ymin, xmax, ymax, score = dets[j,:]
color = colors[1]
display_txt = '%.2f'%(score)
coords = (xmin, ymin), xmax-xmin+1, ymax-ymin+1
currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor='green', linewidth=2))
currentAxis.text(xmin, ymin, display_txt)
fp.write('{},{},{},{}\r\n'.format(int(xmin), int(ymin), int(xmax), int(ymax)))
plt.savefig('./examples/text/result/{}'.format(os.path.basename(image_path)))
plt.close()
fp.close()
|
from django.db import models
# Create your models here.
class LoginTable(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=30)
mail = models.EmailField(max_length=50, unique=True)
password = models.CharField(max_length=50) |
import serial
import time
import bluetooth
from datetime import datetime
from temps import *
try:
from camion import camioncito
except:
pass
bd_addr = "B8:27:EB:79:24:4F"
port = 1
tConPast = None
tRefPast = None
tConNow = None
tRefNow = None
hConPast = None
hRefPast = None
hConNow = None
hRefNow = None
deltatie = 360
lastSentCon = datetime.datetime.now() - datetime.timedelta(seconds=deltatie)
lastSentRef = datetime.datetime.now() - datetime.timedelta(seconds=deltatie)
try:
while True:
conge,refri = getTemps()
tConNow = conge["Temperatura"]
tRefNow = refri["Temperatura"]
hConNow = conge["Humedad"]
hRefNow = refri["Humedad"]
if (tConNow != tConPast or hConNow != hConPast) or (datetime.datetime.strptime(conge["fecha"],"%Y-%m-%d %H:%M:%S") - lastSentCon).total_seconds() > deltatie:
print(conge)
sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
sock.connect((bd_addr, port))
sock.send(str(conge))
sock.close()
lastSentCon = datetime.datetime.strptime(conge["fecha"],"%Y-%m-%d %H:%M:%S")
time.sleep(0.5)
if (tRefNow != tRefPast or hRefNow != hRefPast) or (datetime.datetime.strptime(refri["fecha"],"%Y-%m-%d %H:%M:%S") - lastSentRef).total_seconds() > deltatie:
print(refri)
sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
sock.connect((bd_addr, port))
sock.send(str(refri))
sock.close()
lastSentRef = datetime.datetime.strptime(refri["fecha"],"%Y-%m-%d %H:%M:%S")
tRefPast = tRefNow
tConPast = tConNow
hRefPast = hRefNow
hConPast = hConNow
time.sleep(180)
except KeyboardInterrupt:
print('Apagando')
|
from django.conf import settings
from purl import Template
import requests
from .models import SimilarResponse
API_URL = Template("http://developer.echonest.com/api/v4/artist/similar"
"?api_key=%s&results=100&name={name}"
% settings.ECHONEST_API_KEY)
def get_similar_from_api(name):
url = API_URL.expand({'name': name})
r = requests.get(str(url))
r.raise_for_status()
return SimilarResponse.objects.create(name=name, response=r.json())
def get_similar_from_db(name):
return SimilarResponse.objects.get(normalized_name=name.upper())
def get_similar(name):
try:
response = get_similar_from_db(name)
except SimilarResponse.DoesNotExist:
response = get_similar_from_api(name)
return response.artist_names
|
import os
import time
import datetime
# import enchant
import jieba
class HugoMarkdown:
# __srcDir = 'I:\src\hugo\docs' #源文章目录
# __desDir = 'I:\src\hugo\9ong\content\post' #目的文件目录
__srcDir = 'I:\src\github-page\docs' #源文章目录
__desDir = 'I:\src\hugo\9ong\content\post' #目的文件目录
def __init__(self):
print("···HugoMarkdown···\n")
#遍历源日志目录所有文件
def scanFiles(self):
print("开始遍历源文章目录:",self.__srcDir,"\n")
for root,dirs,files in os.walk(self.__srcDir):
for file in files:
print("\n-----开始处理文章:",os.path.join(root,file),"-----\n")
if file=='index.md' or file == 'README.md' or file=='more.md':
print("忽略",file,"\n")
continue
fileInfoDict = self.__getFileInfo(root,file)
if (fileInfoDict['fileExt'] != ".md") or (fileInfoDict['parentDir']==''):
print("忽略",file,"\n")
continue
#测试输出
print(fileInfoDict,"\n")
self.__adjustFIleContent(fileInfoDict)
#只循环一次,跳出所有循环
# return
def __getFileInfo(self,root,file):
print("获取文章信息:\n")
#文件全路径
filePath = os.path.join(root,file)
#文件名、扩展名
filename,fileExt = os.path.splitext(file)
#所在目录及上级目录
parentDir = os.path.basename(root)
grandpaDir = os.path.basename(os.path.dirname(root))
if parentDir == "docs":
parentDir = ""
if grandpaDir == "docs" or grandpaDir == "互联网":
grandpaDir = ""
#文件相关时间
fileCtime = self.__timeToDate(os.path.getctime(filePath),"%Y-%m-%d")
fileMtime = self.__timeToDate(os.path.getmtime(filePath),"%Y-%m-%d")
return {
"filePath":filePath,
"fileName":filename,
"fileExt":fileExt,
"parentDir":parentDir,
"grandpaDir":grandpaDir,
"fileCtime":fileCtime,
"fileMtime":fileMtime
}
#调整文章内容 比如meta设置、TOC、MORE设置,
def __adjustFIleContent(self,fileInfoDict):
#读取文章内容 及 关键词
print("读取文章内容...\n")
with open(fileInfoDict['filePath'],"r",encoding="utf-8") as mdFile:
content = mdFile.read().strip()
fileInfoDict['keywords'] = self.__getKeywords(content,fileInfoDict['fileName'])
content = self.__getMmeta(fileInfoDict) + self.__insertMoreToContent(content)
#写入新文件
self.__writeNewMarkdownFile(content,fileInfoDict)
#获取meta
def __getMmeta(self,fileInfoDict):
print("准备文章meta信息:","\n")
meta = ""
metaTitle = "title: \""+fileInfoDict['fileName']+"\"\n"
metaCJK = "isCJKLanguage: true\n"
metaDate = "date: "+fileInfoDict['fileCtime']+"\n"
metaCategories = "categories: \n"
metaParentCategory = ""
metaGrandpaCategory = ""
metaTags = "tags: \n"
metaTagsList = ""
metaKeywords = "keywords: \n"
metaKeywordsList = ""
if fileInfoDict['grandpaDir']!='':
metaGrandpaCategory = "- "+fileInfoDict['grandpaDir']+"\n"
if fileInfoDict['parentDir']!='':
metaParentCategory = "- "+fileInfoDict['parentDir']+"\n"
if fileInfoDict['keywords']:
for word in fileInfoDict['keywords']:
metaTagsList += "- "+word+"\n"
metaKeywordsList += "- "+word+"\n"
meta = "---\n"+metaTitle+metaCJK+metaDate+metaCategories+metaGrandpaCategory+metaParentCategory+metaTags+metaTagsList+metaKeywords+metaKeywordsList+"---\n\n"
print(meta,"\n")
return meta
#插入<!--more-->到文章
def __insertMoreToContent(self,content):
tocFlag = '<!-- /TOC -->'
if (content.find(tocFlag) != -1):
print("发现",tocFlag,"\n")
content = content.replace(tocFlag,tocFlag+"\n"+'<!--more-->'+"\n")
else:
print("没有发现",tocFlag,"\n")
contents = content.splitlines()
contentsLen = len(contents)
if contentsLen>4:
contents[4] = contents[4]+"\n"+'<!--more-->'+"\n"
content = "\n".join(contents)
print("插入<!--more-->...","\n")
return content
def __writeNewMarkdownFile(self,content,fileInfoDict):
relativeFilePath = fileInfoDict['filePath'].replace(self.__srcDir,"")
desFilePath = self.__desDir+relativeFilePath
print("写入新文件:",desFilePath,"\n")
desDirPath = os.path.dirname(desFilePath)
# print("##Final Path:"+desFilePath)
# return
if not os.path.exists(desDirPath):
os.makedirs(desDirPath)
with open(desFilePath,"w",encoding="utf-8") as nf:
nf.write(content)
if os.path.exists(desFilePath):
print("----- 完成文章处理:",desFilePath," -----\n")
else:
print("---- 写入新文件失败! -----\n")
#时间戳转换成日期
def __timeToDate(self,timeStamp,format="%Y-%m-%d %H:%M:%S"):
timeArray = time.localtime(timeStamp)
return time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
#获取文章关键词
def __getKeywords(self,content,filename):
keywords = self.__wordStatistics(content,filename)
keywordsList = sorted(keywords.items(), key=lambda item:item[1], reverse=True)
keywordsList = keywordsList[0:50]
keywordsList = self.__filterKeywords(keywordsList,filename)
print("保留关键词:",keywordsList,"\n")
return keywordsList
#词频统计
def __wordStatistics(self,content,filename):
stopwords = open('I:\src\github-page\script\python\stopwords.txt', 'r', encoding='utf-8').read().split('\n')[:-1]
words_dict = {}
temp = jieba.cut(content)
for t in temp:
if t in stopwords or t == 'unknow' or t.strip() == "":
continue
if t in words_dict.keys():
words_dict[t] += 1
else:
words_dict[t] = 1
# filenameCuts = jieba.cut(filename)
# for fc in filenameCuts:
# if fc in stopwords or fc == 'unknow' or fc.strip() == "":
# continue
# if fc in words_dict.keys():
# words_dict[fc] += 100
# else:
# words_dict[fc] = 100
return words_dict
#再次过滤关键词:在文件名也就是标题中,且汉字不少于2个,字符串不少于3个,不是纯数字
def __filterKeywords(self,keywordsList,filename):
print("分析文章标签/关键词...\n")
newKeywordsList = []
# print(keywordsList)
# enD = enchant.Dict("en_US")
for word,count in keywordsList:
# print(word,"\t",count)
wordLen = len(word)
if filename.find(word)!=-1:
if self.__isChinese(word) and wordLen<2:
continue
elif wordLen<3:
continue
elif word.isdigit():
continue
else:
newKeywordsList.append(word)
# else:
# if wordLen>1 and self.__isChinese(word) and count>5:
# newKeywordsList.append(word)
# elif wordLen>2 and enD.check(word) and count>5:
# newKeywordsList.append(word)
# else:
# continue
return newKeywordsList
def __isChinese(self,word):
for ch in word:
if '\u4e00' <= ch <= '\u9fff':
return True
return False
if __name__ == '__main__':
hm = HugoMarkdown()
hm.scanFiles()
|
#Jhon Albert Arango Duque
#Dubel Fernando Giraldo Duque
#Sistemas Distribuidos- Gr 2
#UTP
#-----------------------------------------------------------
from SimpleXMLRPCServer import SimpleXMLRPCServer
from SimpleXMLRPCServer import SimpleXMLRPCRequestHandler
from datetime import datetime
import xmlrpclib
import threading
import time
import sys
import random
import os
IP = '127.0.0.1' #Ip del servidor
PORT = 5006 #Puerto del servidor
#-----------------------------------------------------------
# Definiendo las clases que necesitamos para gestionar los archivos (directorios)
#Clase para el archivo
class FileC:
def __init__(self,name,ip,port,Datemod):
self.nombre=name
self.ipcliente=ip
self.puertocliente=port
self.Datemod=Datemod
#-----------------------------------------------------------
#Clase para el cliente
class ClientC:
def __init__(self,ip,port):
self.Files=[]
self.ipAddress=ip
self.port=port
def AddFileC(self,FileA):
self.Files.append(FileA)
def RemoveFileC(self,FileA):
for arch in self.FileA:
if arch == FileA:
self.Files.remove(FileA)
#-----------------------------------------------------------
#Clase para el archivo compartido
class FileSharedC:
def __init__(self,ip,port,Name,Cont,DateMod):
self.ip=ip
self.port=port
self.Name=Name
self.Cont=Cont
self.PerWriting=[] #Lista de permisos de escritura a los clientes.
self.PerWriting.append(ip+":"+str(port))
self.DateMod=DateMod
#-----------------------------------------------------------
#Conexion RPC / Registro de Clases
class RequestHandler(SimpleXMLRPCRequestHandler):
rpc_paths = ('/RPC2',)
s = SimpleXMLRPCServer((IP,PORT),
requestHandler=RequestHandler,allow_none=True)
s.register_introspection_functions()
#-----------------------------------------------------------
#Clase que contiene todas las funciones ejecutadas por el servidor
class Functions:
def __init__(self):
self.Clients=[]
self.PagControl=[]
self.Lockedfiles=[]
#-----------------------------------------------------------
#Funcion que registra cada lectura y escritura realizada por los clientes. (Datos guardados en HistorialAcceso.txt)
def RegisterAccess(self,Ip,Port,Name,Acct):
access=open("HistorialAcceso.txt","a")
access.write("CLiente:"+Ip+":"+Port+"|| Nombre de Archivo:"+Name+"|| Fecha/Hora:"+time.ctime(time.time())+"|| Accion:"+Acct+"\n")
access.close()
return 0
#-----------------------------------------------------------
#Funcion que elimina archivos del servidor,L.compartidos,L.bloqueo ha archivos existentes en clientes que se han cerrado.
def RemoveFileServer(self,Ip,Port):
Auxi1=[]
for N in self.PagControl:
if N.ip==Ip and N.port==Port:
Auxi1.append(N.Name)
for N in self.Lockedfiles:
if N.ip==Ip and N.port==Port:
self.Lockedfiles.remove(N)
for N in self.Clients:
if N.ipAddress==Ip and N.port==Port:
self.Clients.remove(N)
print Auxi1
for Elem in Auxi1:
print Elem
print""
for date in self.PagControl:
print Elem
print date.Name
if str(Elem) == str(date.Name):
self.PagControl.remove(date)
os.remove(date.Name)
return 0
#-----------------------------------------------------------
#Funcion que registra cada cliente que se conecta al servidor y le asigna permisos deacuerdo a los archivos compartidos.
def RegisterClient(self,ip,port):
for cl in self.Clients:
if cl.ipAddress==ip and cl.port==port:
return "El Cliente ya se encuentra registrado"
cli=ClientC(ip,port)
#Permisos de escritura aleatorio a clientes que se registran.
for perm in self.PagControl:
Typepermi= random.randint(0,1)
if Typepermi==1:
perm.PerWriting.append(ip+":"+str(port))
self.Clients.append(cli)
return "Cliente registrado!"
def LenCli(self):
return len(self.Clients)
def listarpermisos(self,port,ip):
lista=[]
for n in self.PagControl:
for m in n.PerWriting:
dato = ip+":"+str(port)
if m == dato:
lista.append(n.Name)
car=str(lista)
return car
#-----------------------------------------------------------
#Funcion que genera copias del primer cliente.
def FirstCopy(self,Ip,Port,contenido):
for Pag in self.Clients[0].Files:
self.Clients[1].Files.append(Pag)
for n in self.PagControl:
if Pag.nombre == n.Name:
Address = "http://"+Ip+":"+str(Port)
Link1 = xmlrpclib.ServerProxy(Address)
op=Link1.UpdateFileA(n.Name,n.Cont)
return 1
def LRU(self,cliente):
ip=cliente.ipAddress
port=cliente.port
cont=10000000000000000000000000000000000000000000000000
for archivo in cliente.Files:
if str(archivo.puertocliente) != str(port):
if archivo.Datemod <= cont:
cont=archivo.Datemod
nombre=archivo.nombre
objetoarchivo=archivo
cliente.Files.remove(objetoarchivo)
Address = "http://"+ip+":"+str(port)
Link1 = xmlrpclib.ServerProxy(Address)
op=Link1.RemoveFileE(nombre)
return 0
def clienteRet(self,ip,port):
for client in self.Clients:
if client.ipAddress== ip and client.port == port:
if len(client.Files)>=8:
self.LRU(client)
return 0
#-----------------------------------------------------------
#Funcion que registra cada archivo que contiene los clientes registrados.
def RegisterFile(self, Name, ip, port, Cont,DateMod):
Fil = FileC(Name, ip, port,DateMod)
for c in self.PagControl:
if c.Name == Name:
return "Archivo ya registrado"
for cli in self.Clients:
if len(cli.Files) < 8:
cli.AddFileC(Fil)
else:
self.LRU(cli)
cli.AddFileC(Fil)
FileShared = FileSharedC(ip, port, Name, Cont, DateMod)
for p in self.Clients:
permiso = random.randint(0,1)
Address = p.ipAddress+":"+str(p.port)
Address2 = ip+":"+str(port)
if permiso == 1 and Address != Address2:
FileShared.PerWriting.append(Address)
self.PagControl.append(FileShared)
if len(self.Clients) >= 2:
for cli in self.Clients:
i=cli.ipAddress
p=cli.port
Address = "http://"+i+":"+str(p)
Link1 = xmlrpclib.ServerProxy(Address)
op=Link1.UpdateFileA(Name,Cont)
return 1
#-----------------------------------------------------------
#Funcion que retorna la lista con el nombre de los archivos compartidos.
def ListFile(self):
listArch = []
for i in self.PagControl:
listArch.append(i.Name)
return listArch
#-----------------------------------------------------------
#Funcion que permite leer archivos al cliente.
def SearchFileR (self, Name):
for Conte in self.PagControl:
if Conte.Name == Name:
return Conte.Cont
return "Nope"
#-----------------------------------------------------------
#Funcion que permite editar archivos a los clientes conectados.
def SearchFileW (self, Name,AddressE):
for Lock in self.Lockedfiles:
if Lock.Name == Name:
return "Locked",0,0
for Conte in self.PagControl:
if Conte.Name == Name:
if AddressE in Conte.PerWriting:
self.Lockedfiles.append(Conte)
print "Permiso de escritura Establecido"
Address = "http://"+Conte.ip+":"+str(Conte.port)
return "Yes",Address, Conte.Cont
else:
return "No-PerWriting",0,0
#-----------------------------------------------------------
#Funcion que actualiza los archivos editados al cliente que los contiene.
def UpdatePagControl(self,Name,Contenido):
horamodifico=time.time()
for archiv in self.PagControl:
if archiv.Name == Name:
archiv.Cont=Contenido
archiv.DateMode=horamodifico
for client in self.Clients:
for fil in client.Files:
if Name == fil.nombre:
Add="http://"+client.ipAddress+":"+str(client.port)
Link1 = xmlrpclib.ServerProxy(Add)
solo=Link1.UpdateFileA(Name,Contenido)
fil.DateMode=horamodifico
return 0
#-----------------------------------------------------------
#Funcion que elimina un archivo de la lista de bloqueados
def DLocked(self, Name):
for f in self.PagControl:
if f.Name == Name:
self.Lockedfiles.remove(f)
return 0
#-----------------------------------------------------------
t = threading.Thread(target=s.serve_forever)
s.register_instance(Functions())
#-----------------------------------------------------------
t.start()
|
from django.shortcuts import render, redirect
from django.http import Http404
import requests
from tmdbv3api import TV
from tmdbv3api import TMDb
import imdb
ia = imdb.IMDb()
tmdb = TMDb()
tmdb.api_key = '122a9fafd99452516fe83207465ce55d'
img1 = "http://image.tmdb.org/t/p/w500/"
tv = TV()
popular = tv.popular()
li = []
det = []
gen = []
nme = []
for i in popular:
if i.name:
name = i.name
else:
name = ''
if i.id:
ids = i.id
else:
ids = ''
if i.poster_path:
pp = i.poster_path
else:
pp = ''
if i .overview:
ow = i.overview
else:
ow = ''
air = ''
z = [name, air, img1+pp, ow]
nme.append(ids)
li.append(z)
cast = []
for i in nme:
m = tv.details(i)
name = []
for i in m.credits['cast']:
name.append(i['name'])
name.append('')
name.append('')
name.append('')
name.append('')
name.append('')
cast.append(name)
gen = []
for i in nme:
m = tv.details(i)
gener = []
di = []
for i in m.genres:
gener.append(i['name'])
ki = ", ".join(gener)
di.append(ki)
gen.append(di)
print(gen)
def trending_tv(request):
return render(request, 'trending_tv.html', {
'mn1': li[0][0], 'mn2': li[1][0], 'mn3': li[2][0], 'mn4': li[3][0], 'mn5': li[4][0],
'mn6': li[5][0], 'mn7': li[6][0], 'mn8': li[7][0], 'mn9': li[8][0], 'mn10': li[9][0],
'mn11': li[10][0], 'mn12': li[11][0], 'mn13': li[12][0], 'mn14': li[13][0], 'mn15': li[14][0],
'mn16': li[15][0], 'mn17': li[16][0], 'mn18': li[17][0], 'mn19': li[18][0], 'mn20': li[19][0],
'mu1': li[0][2], 'mu2': li[1][2], 'mu3': li[2][2], 'mu4': li[3][2], 'mu5': li[4][2],
'mu6': li[5][2], 'mu7': li[6][2], 'mu8': li[7][2], 'mu9': li[8][2], 'mu10': li[9][2],
'mu11': li[10][2], 'mu12': li[11][2], 'mu13': li[12][2], 'mu14': li[13][2], 'mu15': li[14][2],
'mu16': li[15][2], 'mu17': li[16][2], 'mu18': li[17][2], 'mu19': li[18][2], 'mu20': li[19][2],
'rd1': li[0][1], 'rd2': li[1][1], 'rd3': li[2][1], 'rd4': li[3][1], 'rd5': li[4][1],
'rd6': li[5][1], 'rd7': li[6][1], 'rd8': li[8][1], 'rd9': li[8][1], 'rd10': li[9][1],
'rd11': li[10][1], 'rd12': li[11][1], 'rd13': li[12][1], 'rd14': li[13][1], 'rd15': li[14][1],
'rd16': li[15][1], 'rd17': li[16][1], 'rd18': li[17][1], 'rd19': li[18][1], 'rd20': li[19][1],
'd1': li[0][3], 'd2': li[1][3], 'd3': li[2][3], 'd4': li[3][3], 'd5': li[4][3],
'd6': li[5][3], 'd7': li[6][3], 'd8': li[7][3], 'd9': li[8][3], 'd10': li[9][3],
'd11': li[10][3], 'd12': li[11][3], 'd13': li[12][3], 'd14': li[13][3], 'd15': li[14][3],
'd16': li[15][3], 'd17': li[16][3], 'd18': li[17][3], 'd19': li[18][3], 'd20': li[19][3],
'gen1': gen[0][0], 'gen2': gen[1][0], 'gen3': gen[2][0], 'gen4': gen[3][0], 'gen5': gen[4][0],
'gen6': gen[5][0], 'gen7': gen[6][0], 'gen8': gen[7][0], 'gen9': gen[8][0], 'gen10': gen[9][0],
'gen11': gen[10][0], 'gen12': gen[11][0], 'gen13': gen[12][0], 'gen14': gen[13][0], 'gen15': gen[14][0],
'gen16': gen[15][0], 'gen17': gen[16][0], 'gen18': gen[17][0], 'gen19': gen[18][0], 'gen20': gen[19][0],
'cast1': cast[0][0], 'cast2': cast[0][1], 'cast3': cast[0][2], 'cast4': cast[0][3], 'cast5': cast[0][4],
'cast6': cast[1][0], 'cast7': cast[1][1], 'cast8': cast[1][2], 'cast9': cast[1][3], 'cast10': cast[1][4],
'cast11': cast[2][0], 'cast12': cast[2][1], 'cast13': cast[2][2], 'cast14': cast[2][3], 'cast15': cast[2][4],
'cast16': cast[3][0], 'cast17': cast[3][1], 'cast18': cast[3][2], 'cast19': cast[3][3], 'cast20': cast[3][4],
'cast21': cast[4][0], 'cast22': cast[4][1], 'cast23': cast[4][2], 'cast24': cast[4][3], 'cast25': cast[4][4],
'cast26': cast[5][0], 'cast27': cast[5][1], 'cast28': cast[5][2], 'cast29': cast[5][3], 'cast30': cast[5][4],
'cast31': cast[6][0], 'cast32': cast[6][1], 'cast33': cast[6][2], 'cast34': cast[6][3], 'cast35': cast[6][4],
'cast36': cast[7][0], 'cast37': cast[7][1], 'cast38': cast[7][2], 'cast39': cast[7][3], 'cast40': cast[7][4],
'cast41': cast[8][0], 'cast42': cast[8][1], 'cast43': cast[8][2], 'cast44': cast[8][3], 'cast45': cast[8][4],
'cast46': cast[9][0], 'cast47': cast[9][1], 'cast48': cast[9][2], 'cast49': cast[9][3], 'cast50': cast[9][4],
'cast51': cast[10][0], 'cast52': cast[10][1], 'cast53': cast[10][2], 'cast54': cast[10][3], 'cast55': cast[10][4],
'cast56': cast[11][0], 'cast57': cast[11][1], 'cast58': cast[11][2], 'cast59': cast[11][3], 'cast60': cast[11][4],
'cast61': cast[12][0], 'cast62': cast[12][1], 'cast63': cast[12][2], 'cast64': cast[12][3], 'cast65': cast[12][4],
'cast66': cast[13][0], 'cast67': cast[13][1], 'cast68': cast[13][2], 'cast69': cast[13][3], 'cast70': cast[12][4],
'cast71': cast[14][0], 'cast72': cast[14][1], 'cast73': cast[14][2], 'cast74': cast[14][3], 'cast75': cast[14][4],
'cast76': cast[15][0], 'cast77': cast[15][1], 'cast78': cast[15][2], 'cast79': cast[15][3], 'cast80': cast[15][4],
'cast81': cast[16][0], 'cast82': cast[16][1], 'cast83': cast[16][2], 'cast84': cast[16][3], 'cast85': cast[16][4],
'cast86': cast[17][0], 'cast87': cast[17][1], 'cast88': cast[17][2], 'cast89': cast[17][3], 'cast90': cast[17][4],
'cast91': cast[18][0], 'cast92': cast[18][1], 'cast98': cast[18][2], 'cast94': cast[18][3], 'cast95': cast[18][4],
'cast96': cast[19][0], 'cast97': cast[19][1], 'cast98': cast[19][2], 'cast99': cast[19][3], 'cast100': cast[19][4],
})
|
#!/usr/bin/python
#Pede para o usuário digitar um valor para ser convertido
n1 = int(input('Digite um valor: '))
#Pede para o usuário escolher qual será a base de conversão
print('''Escolha uma das bases para conversão:
[ 1 ] Converter para BINÁRIO
[ 2 ] Converter para OCTAL
[ 3 ] Converter para HEXADECIMAL.''')
print('')
n2 = int(input('Escolha uma opção: '))
#Verifica qual a opção escolhida
if n2 == 1:
print('Sua opção {}'.format(n1))
print('Conversão para binário: {}{}{}'.format('\033[1;32m', bin(n1)[2:], '\033[m'))
elif n2 == 2:
print('Sua opção {}'.format(n1))
print('Conversão para octal: {}{}{}'.format('\033[1;32m', oct(n1)[2:], '\033[m'))
elif n2 == 3:
print('Sua opção: '.format(n2))
print('Conversão para hexadecimal: {}{}{}'.format('\033[1;32m', hex(n1)[2:], '\033[m'))
else:
print('Opção inválida. Tente novamente.') |
# coding: utf-8
__author__ = 'ZFTurbo: https://kaggle.com/zfturbo'
import pandas as pd
from sklearn.metrics import roc_auc_score
from scipy.optimize import minimize
import os
import pickle
import numpy as np
def restore_data(path):
data = dict()
if os.path.isfile(path):
file = open(path, 'rb')
data = pickle.load(file)
return data
def cache_data(data, path):
if os.path.isdir(os.path.dirname(path)):
file = open(path, 'wb')
pickle.dump(data, file)
file.close()
else:
print('Directory doesnt exists')
def get_0_1_val(fl):
if fl < 0.24:
return 0
if fl > 0.76:
return 1
return 2
def find_strange_test_pairs(pairs):
count_1 = 0
count_0 = 0
save_index = dict()
res_1 = dict()
res_0 = dict()
# Much faster
count = 0
itemID_1 = pairs['itemID_1'].astype(np.int64).copy().values
itemID_2 = pairs['itemID_2'].astype(np.int64).copy().values
probability = pairs['probability'].astype(np.float64).copy().values
for i in range(len(probability)):
if i % 100000 == 0:
print('Index: {}'.format(i))
item_1 = int(itemID_1[i])
item_2 = int(itemID_2[i])
isDub = get_0_1_val(probability[i])
save_index[i] = (item_1, item_2)
if isDub == 1:
# Forward
count_1 += 1
if item_1 in res_1:
if item_2 not in res_1[item_1]:
res_1[item_1].append(item_2)
else:
res_1[item_1] = [item_2]
if item_2 in res_1:
for el in res_1[item_2]:
if el not in res_1[item_1] and el != item_1:
res_1[item_1].append(el)
# Backward
if item_2 in res_1:
if item_1 not in res_1[item_2]:
res_1[item_2].append(item_1)
else:
res_1[item_2] = [item_1]
if item_1 in res_1:
for el in res_1[item_1]:
if el not in res_1[item_2] and el != item_2:
res_1[item_2].append(el)
if isDub == 0:
count_0 += 1
if item_1 in res_0:
if item_2 not in res_0[item_1]:
res_0[item_1].append(item_2)
else:
res_0[item_1] = [item_2]
if item_2 in res_0:
if item_1 not in res_0[item_2]:
res_0[item_2].append(item_1)
else:
res_0[item_2] = [item_1]
print('Total pairs 0: {}'.format(count_0))
print('Total pairs 1: {}'.format(count_1))
# Total pairs 0: 1768538
# Total pairs 1: 1204751
return res_0, res_1
def find_strange_pairs_small_array(res_0, res_1):
strange_pairs = dict()
for el in res_1:
for i in range(len(res_1[el])):
for j in range(i+1, len(res_1[el])):
elem1 = res_1[el][i]
elem2 = res_1[el][j]
if elem1 in res_0:
if elem2 in res_0[elem1]:
strange_pairs[(el, elem1)] = 1
strange_pairs[(elem1, el)] = 1
strange_pairs[(el, elem2)] = 1
strange_pairs[(elem2, el)] = 1
strange_pairs[(elem1, elem2)] = 1
strange_pairs[(elem2, elem1)] = 1
if elem2 in res_0:
if elem1 in res_0[elem2]:
strange_pairs[(el, elem1)] = 1
strange_pairs[(elem1, el)] = 1
strange_pairs[(el, elem2)] = 1
strange_pairs[(elem2, el)] = 1
strange_pairs[(elem1, elem2)] = 1
strange_pairs[(elem2, elem1)] = 1
return strange_pairs
def intersect(a, b):
return list(set(a) & set(b))
def decrease_one_by_percent(prob, perc):
return prob - (prob - 0.5)*perc
def increase_one_by_percent(prob, perc):
return prob + (1 - prob)*perc
def increase_zero_by_percent(prob, perc):
return prob + (0.5 - prob)*perc
def get_new_probability(id1, id2, prob, res_0, res_1):
new_prob = prob
perc_incr = 0.07
perc_decr = 0.4
ind1_elem0 = []
if id1 in res_0:
ind1_elem0 = res_0[id1]
ind1_elem1 = []
if id1 in res_1:
ind1_elem1 = res_1[id1]
ind2_elem0 = []
if id2 in res_0:
ind2_elem0 = res_0[id2]
ind2_elem1 = []
if id2 in res_1:
ind2_elem1 = res_1[id2]
if prob > 0.51:
decrease1 = intersect(ind1_elem0, ind2_elem1)
for i in range(len(decrease1)):
new_prob = decrease_one_by_percent(new_prob, 0.25)
decrease1 = intersect(ind2_elem0, ind1_elem1)
for i in range(len(decrease1)):
new_prob = decrease_one_by_percent(new_prob, 0.25)
increase1 = intersect(ind1_elem1, ind2_elem1)
for i in range(len(increase1)):
new_prob = increase_one_by_percent(new_prob, 0.07)
if 0:
print(id1, id2)
print(ind1_elem0)
print(ind1_elem1)
print(ind2_elem0)
print(ind2_elem1)
print(prob, new_prob)
if prob < 0.49:
increase1 = intersect(ind1_elem1, ind2_elem1)
# for i in range(len(increase1)):
# new_prob = increase_zero_by_percent(new_prob, 1.0)
if len(increase1) > 0:
new_prob = 0.5
return new_prob
def run_experiment(pairs, num):
# print(pairs)
auc_initial = roc_auc_score(pairs['real'].values, pairs['probability'].values)
print('Auc for experiment: {}'.format(auc_initial))
res_cache_path = '../data_external/analysis/res_0_1_' + str(num) + '.pickle'
if not os.path.isfile(res_cache_path):
res_0, res_1 = find_strange_test_pairs(pairs)
cache_data((res_0, res_1), res_cache_path)
else:
print('Restore res_0_1 from cache...')
(res_0, res_1) = restore_data(res_cache_path)
# strange_pairs = find_strange_pairs_small_array(res_0, res_1)
# print('Strange pairs:', len(strange_pairs))
if 0:
pairs['probability_new'] = pairs['probability'].copy()
count = 0
for index, row in pairs.iterrows():
if index % 100000 == 0:
print('Index: {}'.format(index))
item_1 = row['itemID_1']
item_2 = row['itemID_2']
prob = row['probability']
new_prob = get_new_probability(item_1, item_2, prob, res_0, res_1)
if abs(new_prob - prob) < 0.000001:
count += 1
pairs.set_value(index, 'probability_new', new_prob)
# Much faster
count = 0
probability_new = pairs['probability'].astype(np.float64).copy().values
itemID_1 = pairs['itemID_1'].astype(np.int64).copy().values
itemID_2 = pairs['itemID_2'].astype(np.int64).copy().values
probability = pairs['probability'].astype(np.float64).copy().values
for i in range(len(probability_new)):
if i % 100000 == 0:
print('Index: {}'.format(i))
item_1 = int(itemID_1[i])
item_2 = int(itemID_2[i])
prob = probability[i]
new_prob = get_new_probability(item_1, item_2, prob, res_0, res_1)
if abs(new_prob - prob) > 0.000001:
count += 1
probability_new[i] = new_prob
if i > 100000000:
exit()
print('Replacements: ' + str(count))
auc_new = roc_auc_score(pairs['real'].values, probability_new)
print('Auc after replace: {}'.format(auc_new))
improvement = auc_new - auc_initial
return improvement
def find_mean_score(pairs):
for i in range(len(pairs)):
auc_initial = roc_auc_score(pairs[i]['real'].values, pairs[i]['probability'].values)
print('Independent AUC: {}'.format(auc_initial))
result = pairs[0].copy()
result['overall'] = result['probability']
for i in range(1, len(pairs)):
result['overall'] += pairs[i]['probability']
result['overall'] /= len(pairs)
auc = roc_auc_score(result['real'].values, result['overall'].values)
print('Mean AUC: {}'.format(auc))
return auc
def get_new_prob_mean(row, len1):
val = 0.0
for i in range(len1):
val += row['probability_' + str(i)]
return val
def get_new_prob(row, len1):
ret_val = 0.0
all_1 = True
all_0 = True
max_val = 0
min_val = 1
out = []
for i in range(len1):
cur_val = row['probability_' + str(i)]
out.append(cur_val)
ret_val += cur_val
if cur_val > 0.01:
all_0 = False
if cur_val < 0.99:
all_1 = False
if cur_val > max_val:
max_val = cur_val
if cur_val < min_val:
min_val = cur_val
ret_val /= len1
if all_1 == True:
ret_val = max_val
if all_0 == True:
ret_val = min_val
# out.append(ret_val)
# if all_1 == True or all_0 == True:
# print(out)
return ret_val
def try_new_method(pairs):
result = pairs[0].copy()
for i in range(len(pairs)):
result['probability_' + str(i)] = pairs[i]['probability']
print('Start calcs...')
result['merged_prob'] = result.apply(get_new_prob, args=(len(pairs),), axis=1)
auc = roc_auc_score(result['real'].values, result['merged_prob'].values)
print('New AUC: {}'.format(auc))
return auc
pairs = dict()
for j in range(4):
pairs_path = '../data_external/analysis/pairs_' + str(j) + '.hdf'
print('Read from cache!')
pairs[j] = pd.read_hdf(pairs_path, 'table')
mean_auc = find_mean_score(pairs)
new_auc = try_new_method(pairs)
print('Improvement: {}'.format(new_auc - mean_auc)) |
import ctypes
lib=ctypes.cdll.LoadLibrary('./dist-newstyle/build/x86_64-linux/ghc-8.10.4/project0-1.0.0/f/profile/build/profile/libprofile.so')
lib.main() |
from django.db import models
class Post(models.Model):
image = models.ImageField(upload_to='blogimages/')
title = models.CharField(max_length=100)
body = models.TextField()
time = models.DateTimeField(auto_created=True, auto_now=True)
def __str__(self):
return self.title
def summary(self):
return self.body[:120]
def datecreated(self):
return self.time.strftime('%b %e %Y')
|
#!/usr/bin/python
# Copyright (c) 2015 SUSE LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from pprint import pprint
import os, sys, re
import logging
import cmdln
import requests
import json
from jinja2 import Environment, FileSystemLoader
from xml.etree import cElementTree as ET
import osc.conf, osc.core
import datetime
# very simple spec file parser
class Spec(object):
def __init__(self, fn = None):
self.lines = []
if fn is not None:
self.lines = self._read(fn)
def _read(self, fn):
with open(fn, 'r') as fh:
return [l[:-1] for l in fh.readlines()]
def settag(self, tag, value):
tag = tag.capitalize()
for i, line in enumerate(self.lines):
if line.startswith('%package ') or line.startswith('%description'):
break
if line.startswith(tag+':'):
spaces = re.match(r'\s*', line[len(tag)+1:]).group()
self.lines[i] = "{}:{}{}".format(tag, spaces, value)
def gettag(self, tag):
tag = tag.capitalize()
for line in self.lines:
if line.startswith('%package ') or line.startswith('%description'):
break
if line.startswith(tag+':'):
return line[len(tag)+1:].lstrip()
return None
def __str__(self):
return '\n'.join(self.lines)
class BoilderPlate(cmdln.Cmdln):
def __init__(self, *args, **kwargs):
cmdln.Cmdln.__init__(self, args, kwargs)
self.templates = Environment(loader = FileSystemLoader(os.path.dirname(__file__)))
osc.conf.get_config()
def get_optparser(self):
parser = cmdln.CmdlnOptionParser(self)
parser.add_option("--dry", action="store_true", help="dry run")
parser.add_option("--debug", action="store_true", help="debug output")
parser.add_option("--verbose", action="store_true", help="verbose")
return parser
def postoptparse(self):
level = None
if self.options.debug:
level = logging.DEBUG
elif self.options.verbose:
level = logging.INFO
logging.basicConfig(level = level)
self.logger = logging.getLogger(self.optparser.prog)
@cmdln.option("-f", "--force", action="store_true",
help="force something")
def do_genspec(self, subcmd, opts, pkg):
"""${cmd_name}: foo bar
${cmd_usage}
${cmd_option_list}
"""
print self.genspec(self.get_templatedata(self.fetch_registry(pkg)))
@cmdln.option("-u", "--update", action="store_true",
help="update if exists")
@cmdln.option("-f", "--force", action="store_true",
help="force something")
def do_genpkg(self, subcmd, opts, name):
"""${cmd_name}: foo bar
${cmd_usage}
${cmd_option_list}
"""
exists = False
pkg = 'nodejs-{}'.format(name)
dst = pkg
specfn = pkg+'.spec'
if os.path.exists(specfn):
dst = '.'
exists = True
elif os.path.exists(os.path.join(dst, specfn)):
exists = True
specfn = os.path.join(dst, specfn)
data = self.fetch_registry(name)
if not exists:
v = self.check_dln_exists('nodejs-{}'.format(name))
if v is not None:
self.logger.warn("Note: nodejs-{} exists in obs".format(name))
self.logger.warn("current version: {}, obs version: {}".format(data['version'], v))
return
if not exists and dst != '.' and not os.path.exists(dst):
if os.path.exists('.osc'):
osc.core.createPackageDir(dst)
else:
os.mkdir(dst)
if exists:
spec = Spec(specfn)
oldver = spec.gettag('Version')
if oldver is None:
raise Exception("old version not defined?")
if oldver == data['version'] and not opts.force:
self.logger.info("same version exists")
return
if 'dependencies' in data:
print data['dependencies']
if opts.update:
spec.settag('Version', data['version'])
with open(specfn, 'w') as fh:
fh.write(str(spec))
self.write_changes_file(dst, pkg, data)
self.download(data['dist']['tarball'], dst)
else:
context = self.get_templatedata(data)
with open(specfn, 'w') as fh:
fh.write(self.genspec(context))
with open("{}/_service".format(dst), 'w') as fh:
fh.write(self.genservice(context))
self.write_changes_file(dst, pkg, data)
self.download(data['dist']['tarball'], dst)
def write_changes_file(self, dst, pkg, data):
lines = []
fn = os.path.join(dst, "{}.changes".format(pkg))
if os.path.exists(fn):
with open(fn, 'r') as fh:
lines = fh.readlines()
with open(fn+'.new', 'w') as fh:
author = 'lnussel@suse.de' # FIXME
fh.write('-' * 67 + '\n')
fh.write("%s - %s\n" % (
datetime.datetime.utcnow().strftime('%a %b %d %H:%M:%S UTC %Y'),
author))
fh.write('\n')
fh.write("- Update to version %s:\n" % data['version'])
fh.write('\n')
fh.write(''.join(lines))
os.rename(fn+'.new', fn)
def check_dln_exists(self, name):
apiurl = 'https://build.opensuse.org'
self.logger.debug("checking devel:languages:nodejs")
r = requests.get(apiurl + '/source/devel:languages:nodejs/{}'.format(name))
if r.status_code == requests.codes.ok:
r = requests.get(apiurl +
'/build/devel:languages:nodejs/Tumbleweed/x86_64/_repository/{}?view=fileinfo'.format(name),
stream = True)
if r.status_code == requests.codes.ok:
xml = ET.parse(r.raw)
xml = xml.getroot()
v = xml.find('version')
if v is not None:
return v.text
return ''
return None
def download(self, url, dst):
fn = os.path.join(dst, os.path.basename(url))
if os.path.exists(fn):
return
r = requests.get(url, stream=True)
r.raise_for_status()
with open(fn, 'w') as fh:
for chunk in r.iter_content(4096):
fh.write(chunk)
def fetch_registry(self, pkg):
data = None
fn = os.path.expanduser('~/.npm/registry.npmjs.org/{}/.cache.json'.format(pkg))
if os.path.exists(fn):
with open(fn, 'r') as fh:
data = json.load(fh)
headers = {
'accept-encoding' : 'gzip',
'accept' : 'application/json',
}
if data is not None:
headers['etag'] = data['_etag']
headers['if-none-match'] = data['_etag']
url = "https://registry.npmjs.org/{}".format(pkg)
r = requests.get(url, headers = headers)
if data is None or r.status_code != 304:
r.raise_for_status()
data = r.json()
data['_etag'] = r.headers['etag']
if not os.path.exists(os.path.dirname(fn)):
os.makedirs(os.path.dirname(fn))
with open(fn, 'w') as fh:
fh.write(json.dumps(data))
else:
self.logger.debug("using cached data")
version = data['dist-tags']['latest']
return data['versions'][version]
def get_templatedata(self, data):
context = {
'name' : data['name'],
'version' : data['version'],
'source' : data['dist']['tarball'],
'description' : data['description'],
'license' : data['license'] if 'license' in data else 'FIXME',
'url' : data['homepage'],
'summary' : data['description'].split('\n')[0],
}
requires = []
if 'dependencies' in data:
for (k, v) in data['dependencies'].items():
if v[0] == '=':
v = v[1:]
if v[0] == 'v':
v = v[1:]
if v[0] == '^' or v[0] == '~':
v = v[1:]
a = v.split('.')
requires.append("{} >= {}".format(k, v))
#let's keep it simple for now
#requires.append("{} < {}.{}.0".format(k, a[0], int(a[1])+1))
else:
raise Exception("unsupported version specification {}".format(v[0]))
context['requires'] = requires
print requires
return context
def genspec(self, context):
t = self.templates.get_template('spec.template')
return t.render(context)
def genservice(self, context):
t = self.templates.get_template('service.template')
return t.render(context)
if __name__ == "__main__":
app = BoilderPlate()
sys.exit( app.main() )
# vim: sw=4 et
|
import RPi.GPIO as GPIO
import time
class Freq:
def __init__(self, tfreq, bfreq):
self.tfreq = tfreq
self.bfreq = bfreq
class Pin:
def __init__(self, pin):
self.pin = pin
class DutyCycle:
def __init__(self, tduty, bduty):
self.tduty = tduty
self.bduty = bduty
class Motor:
def __init__(self, t, b):
self.t = GPIO.PWM(11, Freq.tfreq)
self.b = GPIO.PWM(13, Freq.bfreq)
def pwm(pin, freq):
GPIO.PWM(pin, freq)
def duty(tfreq, bfreq):
pwm(Pin.pin, Freq.tfreq)
Freq.tfreq = 500
Freq.bfreq = 500
Pin.pin = 11 |
import tokens
import lexer
import monkey_ast as ast
from typing import List, Dict, Callable
LOWEST = 0
ASSIGN = 1
EQUALS = 2
LESSGREATER = 3
SUM = 4
PRODUCT = 5
PREFIX = 6
CALL = 7
INDEX = 8
precedences = {
tokens.EQ: EQUALS,
tokens.ASSIGN: ASSIGN,
tokens.NOT_EQ: EQUALS,
tokens.LT: LESSGREATER,
tokens.GT: LESSGREATER,
tokens.PLUS: SUM,
tokens.MINUS: SUM,
tokens.SLASH: PRODUCT,
tokens.ASTERISK: PRODUCT,
tokens.LPAREN: CALL,
tokens.LBRACKET: INDEX
}
class Parser:
def __init__(self, lexer: lexer.Lexer):
self.lexer: lexer.Lexer = lexer
self.errors: List[str] = []
self.cur_token: tokens.Token = None
self.peek_token: tokens.Token = None
self.prefix_parse_functions: Dict[tokens.TokenType, Callable] = {
tokens.IDENT: self.parse_identifier,
tokens.INT: self.parse_integer_literal,
tokens.STRING: self.parse_string_literal,
tokens.TRUE: self.parse_boolean,
tokens.FALSE: self.parse_boolean,
tokens.BANG: self.parse_prefix_expression,
tokens.MINUS: self.parse_prefix_expression,
tokens.LPAREN: self.parse_grouped_expression,
tokens.IF: self.parse_if_expression,
tokens.FUNCTION: self.parse_function_literal,
tokens.LBRACKET: self.parse_array_literal,
tokens.LBRACE: self.parse_hash_literal,
tokens.FOR: self.parse_for_expression,
tokens.WHILE: self.parse_while_expression,
}
self.infix_parse_functions: Dict[tokens.TokenType, Callable] = {
tokens.PLUS: self.parse_infix_expression,
tokens.MINUS: self.parse_infix_expression,
tokens.SLASH: self.parse_infix_expression,
tokens.ASTERISK: self.parse_infix_expression,
tokens.EQ: self.parse_infix_expression,
tokens.NOT_EQ: self.parse_infix_expression,
tokens.LT: self.parse_infix_expression,
tokens.GT: self.parse_infix_expression,
tokens.LPAREN: self.parse_call_expression,
tokens.LBRACKET: self.parse_index_expression,
tokens.ASSIGN: self.parse_assign_expression,
}
self.next_token()
self.next_token()
def register_prefix(self, token_type: tokens.TokenType, prefix_parse_function: Callable):
self.prefix_parse_functions[token_type] = prefix_parse_function
def register_infix(self, token_type: tokens.TokenType, infix_parse_function: Callable):
self.infix_parse_functions[token_type] = infix_parse_function
def next_token(self):
self.cur_token = self.peek_token
self.peek_token = self.lexer.next_token()
def parse_program(self):
program = ast.Program()
while self.cur_token.typ != tokens.EOF:
statement = self.parse_statement()
if statement is not None:
program.statements.append(statement)
self.next_token()
return program
def parse_statement(self):
parse_typ_dict = {
tokens.LET: self.parse_let_statement,
tokens.RETURN: self.parse_return_statement,
tokens.BREAK: self.parse_break_continue_statement,
tokens.CONTINUE: self.parse_break_continue_statement,
}
return parse_typ_dict.get(self.cur_token.typ, self.parse_expression_statement)()
def parse_let_statement(self):
statement = ast.LetStatement(token=self.cur_token)
if not self.expect_peek(tokens.IDENT):
return None
statement.name = ast.Identifier(token=self.cur_token, value=self.cur_token.literal)
if not self.expect_peek(tokens.ASSIGN):
return None
self.next_token()
statement.value = self.parse_expression(LOWEST)
if self.peek_token_is(tokens.SEMICOLON):
self.next_token()
return statement
def parse_return_statement(self):
statement = ast.ReturnStatement(token=self.cur_token)
self.next_token()
statement.return_value = self.parse_expression(LOWEST)
if self.peek_token_is(tokens.SEMICOLON):
self.next_token()
return statement
def parse_break_continue_statement(self):
if self.cur_token.typ == tokens.BREAK:
statement = ast.BreakStatement(token=self.cur_token)
elif self.cur_token.typ == tokens.CONTINUE:
statement = ast.ContinueStatement(token=self.cur_token)
else:
return None
if self.peek_token_is(tokens.SEMICOLON):
self.next_token()
return statement
def parse_expression_statement(self):
statement = ast.ExpressionStatement(token=self.cur_token)
statement.expression = self.parse_expression(LOWEST)
if self.peek_token_is(tokens.SEMICOLON):
self.next_token()
return statement
def parse_expression(self, precedence: int):
prefix = self.prefix_parse_functions.get(self.cur_token.typ)
if prefix is None:
self.no_prefix_parse_function(self.cur_token.typ)
return None
left_exp = prefix()
while not self.peek_token_is(tokens.SEMICOLON) and precedence < self.peek_precedence():
infix = self.infix_parse_functions.get(self.peek_token.typ)
if infix is None:
return left_exp
self.next_token()
left_exp = infix(left_exp)
return left_exp
def no_prefix_parse_function(self, t: tokens.TokenType):
self.errors.append(f"no prefix parse function for {t} found")
def parse_identifier(self) -> ast.Expression:
return ast.Identifier(token=self.cur_token, value=self.cur_token.literal)
def parse_integer_literal(self) -> ast.Expression:
lit = ast.IntegerLiteral(token=self.cur_token)
try:
value = int(self.cur_token.literal)
except BaseException as e:
self.errors.append(str(e))
return None
lit.value = value
return lit
def parse_boolean(self) -> ast.Expression:
return ast.Boolean(
token=self.cur_token,
value=self.cur_token_is(tokens.TRUE)
)
def parse_string_literal(self) -> ast.Expression:
return ast.StringLiteral(
token=self.cur_token,
value=self.cur_token.literal
)
def parse_array_literal(self) -> ast.Expression:
array = ast.ArrayLiteral(token=self.cur_token)
array.elements = self.parse_expression_list(tokens.RBRACKET)
return array
def parse_hash_literal(self) -> ast.Expression:
hash = ast.HashLiteral(token=self.cur_token)
hash.pairs = []
while not self.peek_token_is(tokens.RBRACE):
self.next_token()
key = self.parse_expression(LOWEST)
if not self.expect_peek(tokens.COLON):
return None
self.next_token()
value = self.parse_expression(LOWEST)
hash.pairs.append((key, value))
if not self.peek_token_is(tokens.RBRACE) and not self.expect_peek(tokens.COMMA):
return None
if not self.expect_peek(tokens.RBRACE):
return None
return hash
def parse_if_expression(self) -> ast.Expression:
exp = ast.IfExpression(token=self.cur_token)
if not self.expect_peek(tokens.LPAREN):
return None
self.next_token()
exp.condition = self.parse_expression(LOWEST)
if not self.expect_peek(tokens.RPAREN):
return None
if not self.expect_peek(tokens.LBRACE):
return None
exp.consequence = self.parse_block_statement()
if self.peek_token_is(tokens.ELSE):
self.next_token()
if not self.expect_peek(tokens.LBRACE):
return None
exp.alternative = self.parse_block_statement()
return exp
def parse_for_expression(self) -> ast.Expression:
cur_token = self.cur_token
if not self.expect_peek(tokens.LPAREN):
return None
self.next_token()
ident = ast.Identifier(token=self.cur_token, value=self.cur_token.literal)
if not self.expect_peek(tokens.IN):
return None
self.next_token()
iterator = self.parse_expression(LOWEST)
if not self.expect_peek(tokens.RPAREN):
return None
if not self.expect_peek(tokens.LBRACE):
return None
body = self.parse_block_statement()
return ast.ForExpression(
token=cur_token,
iterator=iterator,
element=ident,
body=body
)
def parse_while_expression(self) -> ast.Expression:
cur_token = self.cur_token
if not self.expect_peek(tokens.LPAREN):
return None
self.next_token()
condition = self.parse_expression(LOWEST)
if not self.expect_peek(tokens.RPAREN):
return None
if not self.expect_peek(tokens.LBRACE):
return None
body = self.parse_block_statement()
return ast.WhileExpression(
token = cur_token,
condition = condition,
body = body
)
def parse_block_statement(self) -> ast.BlockStatement:
block = ast.BlockStatement(
token=self.cur_token,
statements=[])
self.next_token()
while not self.cur_token_is(tokens.RBRACE) and not self.cur_token_is(tokens.EOF):
statement = self.parse_statement()
if statement is not None:
block.statements.append(statement)
self.next_token()
return block
def parse_function_literal(self) -> ast.Expression:
lit = ast.FunctionLiteral(token=self.cur_token)
if not self.expect_peek(tokens.LPAREN):
return None
lit.parameters = self.parse_function_parameters()
if not self.expect_peek(tokens.LBRACE):
return None
lit.body = self.parse_block_statement()
return lit
def parse_function_parameters(self) -> List[ast.Identifier]:
identifiers: List[ast.Identifier] = []
if self.peek_token_is(tokens.RPAREN):
self.next_token()
return identifiers
self.next_token()
ident: ast.Identifier = ast.Identifier(token = self.cur_token, value= self.cur_token.literal)
identifiers.append(ident)
while self.peek_token_is(tokens.COMMA):
self.next_token()
self.next_token()
ident = ast.Identifier(
token=self.cur_token,
value=self.cur_token.literal
)
identifiers.append(ident)
if not self.expect_peek(tokens.RPAREN):
return None
return identifiers
def parse_call_expression(self, function: ast.Expression) -> ast.Expression:
exp = ast.CallExpression(
token=self.cur_token,
function=function
)
exp.arguments = self.parse_expression_list(tokens.RPAREN)
return exp
def parse_index_expression(self, left: ast.Expression) -> ast.Expression:
exp = ast.IndexExpression(
token = self.cur_token,
left = left
)
self.next_token()
exp.index = self.parse_expression(LOWEST)
if not self.expect_peek(tokens.RBRACKET):
return None
return exp
def parse_expression_list(self, end: tokens.TokenType) -> List[ast.Expression]:
args: List[ast.Expression] = []
if self.peek_token_is(end):
self.next_token()
return args
self.next_token()
args.append(self.parse_expression(LOWEST))
while self.peek_token_is(tokens.COMMA):
self.next_token()
self.next_token()
args.append(self.parse_expression(LOWEST))
if not self.expect_peek(end):
return None
return args
def parse_prefix_expression(self) -> ast.Expression:
expression = ast.PrefixExpression(
token=self.cur_token,
operator=self.cur_token.literal
)
self.next_token()
expression.right = self.parse_expression(PREFIX)
return expression
def parse_infix_expression(self, left: ast.Expression) -> ast.Expression:
expression = ast.InfixExpression(
token=self.cur_token,
operator=self.cur_token.literal,
left=left
)
precedence = self.cur_precedence()
self.next_token()
expression.right = self.parse_expression(precedence)
return expression
def parse_assign_expression(self, left: ast.Expression) -> ast.Expression:
expression = ast.AssignExpression(
token=self.cur_token,
name=left
)
precedence = self.cur_precedence()
self.next_token()
expression.value = self.parse_expression(precedence)
return expression
def parse_grouped_expression(self) -> ast.Expression:
self.next_token()
exp = self.parse_expression(LOWEST)
if not self.expect_peek(tokens.RPAREN):
return None
return exp
def cur_token_is(self, t: tokens.TokenType) -> bool:
return self.cur_token.typ == t
def peek_token_is(self, t: tokens.TokenType) -> bool:
return self.peek_token.typ == t
def expect_peek(self, t: tokens.TokenType) -> bool:
if self.peek_token_is(t):
self.next_token()
return True
else:
self.peek_error(t)
return False
def peek_error(self, t: tokens.TokenType):
message = f"expected next token to be {t}, got {self.peek_token.typ} instead"
self.errors.append(message)
def cur_precedence(self) -> int:
return precedences.get(self.cur_token.typ, LOWEST)
def peek_precedence(self) -> int:
return precedences.get(self.peek_token.typ, LOWEST) |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'decryption.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from decryptText import decryptText
class Ui_windowDecryption(object):
def setupUi(self, windowDecryption):
windowDecryption.setObjectName("windowDecryption")
windowDecryption.resize(800, 600)
windowDecryption.setMinimumSize(800, 600)
windowDecryption.setMaximumSize(800, 600)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("../Images/icon.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
windowDecryption.setWindowIcon(icon)
self.centralwidget = QtWidgets.QWidget(windowDecryption)
self.centralwidget.setObjectName("centralwidget")
self.buttonDecrypt = QtWidgets.QPushButton(self.centralwidget)
self.buttonDecrypt.setGeometry(QtCore.QRect(640, 480, 141, 41))
font = QtGui.QFont()
font.setPointSize(14)
self.buttonDecrypt.setFont(font)
self.buttonDecrypt.setObjectName("buttonDecrypt")
self.buttonDecrypt.clicked.connect(self.decrypt)
self.labelD = QtWidgets.QLabel(self.centralwidget)
self.labelD.setGeometry(QtCore.QRect(440, 370, 31, 41))
font = QtGui.QFont()
font.setPointSize(14)
self.labelD.setFont(font)
self.labelD.setObjectName("labelD")
self.labelN = QtWidgets.QLabel(self.centralwidget)
self.labelN.setGeometry(QtCore.QRect(60, 370, 31, 41))
font = QtGui.QFont()
font.setPointSize(14)
self.labelN.setFont(font)
self.labelN.setObjectName("labelN")
self.inputN = QtWidgets.QPlainTextEdit(self.centralwidget)
self.inputN.setGeometry(QtCore.QRect(90, 370, 341, 41))
font = QtGui.QFont()
font.setPointSize(12)
self.inputN.setFont(font)
self.inputN.setObjectName("inputN")
self.inputD = QtWidgets.QPlainTextEdit(self.centralwidget)
self.inputD.setGeometry(QtCore.QRect(470, 370, 161, 41))
font = QtGui.QFont()
font.setPointSize(12)
self.inputD.setFont(font)
self.inputD.setObjectName("inputD")
self.outputText = QtWidgets.QTextEdit(self.centralwidget)
self.outputText.setGeometry(QtCore.QRect(60, 50, 571, 261))
font = QtGui.QFont()
font.setPointSize(12)
self.outputText.setFont(font)
self.outputText.setObjectName("outputText")
self.inputText = QtWidgets.QPlainTextEdit(self.centralwidget)
self.inputText.setGeometry(QtCore.QRect(60, 440, 571, 81))
font = QtGui.QFont()
font.setPointSize(12)
self.inputText.setFont(font)
self.inputText.setObjectName("inputText")
windowDecryption.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(windowDecryption)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 22))
self.menubar.setObjectName("menubar")
windowDecryption.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(windowDecryption)
self.statusbar.setObjectName("statusbar")
windowDecryption.setStatusBar(self.statusbar)
self.retranslateUi(windowDecryption)
QtCore.QMetaObject.connectSlotsByName(windowDecryption)
def decrypt(self):
x = self.inputN.toPlainText()
y = self.inputD.toPlainText()
s = self.inputText.toPlainText()
if (not len(x) or not len(y) or not len(s)):
self.displayInfo("Enter Input.")
return
bad = False
if (x[0] == '0' or y[0] == '0'):
bad = True
for ch in x:
if (ch > '9' or ch < '0'):
bad = True
break
for ch in y:
if (ch > '9' or ch < '0'):
bad = True
break
if bad:
self.displayInfo("Input N or D must be positive integers and should not contain any leading zeros.")
return
n = int(x)
d = int(y)
self.outputText.setText(decryptText(s, n, d))
def displayInfo(self, message):
msg = QtWidgets.QMessageBox()
msg.setWindowTitle("Info")
msg.setText(message)
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.exec_()
def retranslateUi(self, windowDecryption):
_translate = QtCore.QCoreApplication.translate
windowDecryption.setWindowTitle(_translate("windowDecryption", "Text Decryption"))
self.buttonDecrypt.setText(_translate("windowDecryption", "Decrypt Text"))
self.labelD.setText(_translate("windowDecryption", "D :"))
self.labelN.setText(_translate("windowDecryption", "N :"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
windowDecryption = QtWidgets.QMainWindow()
ui = Ui_windowDecryption()
ui.setupUi(windowDecryption)
windowDecryption.show()
sys.exit(app.exec_())
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import cross_val_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import StratifiedKFold
from sklearn.neighbors import KNeighborsClassifier
# ### Загружаем данные
# In[ ]:
train_data = pd.read_csv('../input/train.csv')
test_data = pd.read_csv('../input/test.csv')
data_for_ID = pd.read_csv('../input/test.csv')
# ### Посмотрим на данные
# In[ ]:
train_data.head()
# ### Избавимся от, как кажется, признаков, которые не несут особой информации
# In[ ]:
train_data.drop(['PassengerId','Name','Ticket','Cabin'], axis=1, inplace=True)
test_data.drop(['PassengerId','Name','Ticket','Cabin'], axis=1, inplace=True)
# In[ ]:
all_data = pd.concat([test_data, train_data])
# ### Проведем EDA
# In[ ]:
# Посмотрим как Pclass влияет на шанс пассажира спастись
print(train_data[['Pclass', 'Survived']].groupby(['Pclass']).mean())
sns.catplot(x='Pclass', y='Survived', kind='bar', data=train_data)
# ### Таким образом, мы видим, что класс, которым плыл пассажир, по-разному влияет на его шанс спастись
# In[ ]:
### Так же посмотрим как пол влияет на шанс пассажира спастись
print(train_data[['Sex', 'Survived']].groupby(['Sex']).mean())
sns.catplot(x='Sex', y='Survived', kind='bar', data=train_data)
# ### Мы видим, что большинство спасшихся пассажиров - женщины
# In[ ]:
# Посмотрим как заплаченные деньги влияют на шанс пассажира спастись
g = sns.FacetGrid(train_data, col='Survived')
g = g.map(sns.distplot, "Fare")
# ### Чем больше ты заплатил, тем больше твой шанс спастись
# In[ ]:
# Посмотрим как возраст влияет на шанс пассажира спастись
g = sns.FacetGrid(train_data, col='Survived')
g = g.map(sns.distplot, "Age")
# ### Скорее всего первых на спасительные шлюпки сажали женщин и детей
# In[ ]:
# Посмотрим, как количесво родственников влияет на шанс пассажира спастись
sns.catplot(x='SibSp', y='Survived', data=train_data, kind='bar')
# ### Можно заметить, что чем меньше родственников было у пассажира, тем больше был его шанс спастись
# ### Заполним пропуски в данных и добавим новые признаки
# In[ ]:
def munge_data(data):
#Замена пропусков на медианы
data["Age"] = data.apply(lambda r: data.groupby("Sex")["Age"].median()[r["Sex"]]
if pd.isnull(r["Age"]) else r["Age"], axis=1)
#Замена пропусков на медианы
data["Fare"] = data.apply(lambda r: all_data.groupby("Pclass")["Fare"].median()[r["Pclass"]]
if pd.isnull(r["Fare"]) else r["Fare"], axis=1)
# Gender - замена
genders = {"male": 1, "female": 0}
data["Sex"] = data["Sex"].apply(lambda s: genders.get(s))
# Gender - расширение
gender_dummies = pd.get_dummies(data["Sex"], prefix="SexD", dummy_na=False)
data = pd.concat([data, gender_dummies], axis=1)
# Embarkment - замена
embarkments = {"U": 0, "S": 1, "C": 2, "Q": 3}
data["Embarked"] = data["Embarked"].fillna("U").apply(lambda e: embarkments.get(e))
# Embarkment - расширение
embarkment_dummies = pd.get_dummies(data["Embarked"], prefix="EmbarkedD", dummy_na=False)
data = pd.concat([data, embarkment_dummies], axis=1)
# Количество родственников на борту
data["Relatives"] = data["Parch"] + data["SibSp"]
return(data)
# In[ ]:
train_data_munged = munge_data(train_data).drop(['EmbarkedD_0'],axis=1)
test_data_munged = munge_data(test_data)
# ### Кросс-валидация на три фолда
# In[ ]:
cv = StratifiedKFold(train_data["Survived"], n_folds=3, shuffle=True, random_state=1)
# ### Посмотрим на два алгоритма - Random Forest и K Nearest Neighbors
# In[ ]:
alg = RandomForestClassifier(random_state=1, n_estimators=350, min_samples_split=6, min_samples_leaf=2)
scores = cross_val_score(alg, train_data_munged, train_data_munged["Survived"], cv=cv)
print("Accuracy (random forest): {}".format(scores.mean()))
# In[ ]:
alg_ngbh = KNeighborsClassifier(n_neighbors=3)
scores = cross_val_score(alg_ngbh, train_data_munged, train_data_munged["Survived"], cv=cv)
print("Accuracy (k-neighbors): {}".format(scores.mean()))
# ### Для решения задачи будем использовать RandomForestClassifier
# In[ ]:
alg.fit(train_data_munged.drop(["Survived"],axis=1), train_data_munged["Survived"])
predictions = alg.predict(test_data_munged)
submission = pd.DataFrame({
"PassengerId": data_for_ID["PassengerId"],
"Survived": predictions
})
submission.to_csv("titanic-submission.csv", index=False)
|
class g2coreGuiBackendDRO:
def __init__(self):
self.changes = True
self.positionInformation = {}
def getValue(self, name):
name = "pos"+name
if name in self.positionInformation:
return float(self.positionInformation[name])
else:
return None
def getValueAsText(self, name):
name = "pos"+name
if name in self.positionInformation:
return str(self.positionInformation[name])
else:
return "?"
def hasChanges(self):
value = self.changes
self.changes = False
return value
def updateValue(self, status, name):
if name in status:
self.positionInformation[name] = status[name]
self.changes = True
def animate(self, data):
status = None
if "sr" in data:
status = data["sr"]
elif "r" in data:
if "sr" in data["r"]:
status = data["r"]["sr"]
if status != None:
self.updateValue(status, "posx")
self.updateValue(status, "posy")
self.updateValue(status, "posz")
self.updateValue(status, "posa")
self.updateValue(status, "posb")
self.updateValue(status, "posc")
|
"""Wk 4 Practical - Samuel Barrett 13038579"""
# user_name = input('Name: ').capitalize()
# vowels = ['a', 'e', 'i', 'o', 'u']
# cnt = 0
# letter_cnt = len(user_name)
# for i in vowels:
# if user_name.__contains__(i):
# cnt += 1
# print('Out of {} letters, {} has {} vowels'.format(letter_cnt, user_name, cnt))
user_name = input('Name: ').capitalize()
vowels = 'aeiouAEIOU'
cnt = 0
letter_cnt = len(user_name) # This is erroneous, can be put straight into print format
for letter in user_name:
if letter.lower() in vowels:
cnt += 1
print('Out of {} letters, {} has {} vowels'.format(letter_cnt, user_name, cnt)) # could include len(user_name)
|
# @Author : comma
# @Date : 2021-02-14 15:23
import turtle
turtle.fd(-200)
turtle.right(90)
turtle.circle(200) |
"""cursor.execute(
'INSERT INTO pokemon VALUES(%d, "%s", "%s","%s", %d, %d,%d, %d, %d,%d, %d, %d,%r)', int(row[0]), row[1], row[2], row[3], int(row[4]), int(row[5]), int(row[6]), int(row[7]), int(row[8]), int(row[9]), int(row[10]), bool(row[11]))"""
'''
conn = mysql.connect()
cursor = conn.cursor()
cursor.callproc('sp_createUser',(_name,_email,_hashed_password))
data = cursor.fetchall()
if len(data) is 0:
conn.commit()
return json.dumps({'message':'User created successfully !'})
else:
return json.dumps({'error':str(data[0])})
'''
from flask import render_template, request, redirect, flash, url_for, session
from app import app, mysql
from app.admin import auth
'''
@app.route('/')
def index():
conn = mysql.connect()
cur = conn.cursor()
cur.execute('show tables;')
data = cur.fetchall()
cur.close()
print (data)
return 'HELLO'
'''
@app.route('/', methods=['GET','POST'])
def index():
if request.method == 'GET':
if session.get('auth'):
return render_template('index.html')
else:
return redirect(url_for('login'))
else:
conn = mysql.connect()
cur = conn.cursor()
cur.execute('desc pokemon_mv;')
data =cur.fetchmany(3)
attrs = []
for d in data:
attrs.append(d[0])
cur.execute('select * from pokemons;')
data1 = cur.fetchmany(5)
cur.execute('select * from pokemons;')
data2 = cur.fetchmany(5)
cur.close()
print(len(data1),len(data2))
return render_template('relation.html', attrs=attrs,data1=data1, data2=data2, data=data1)
@app.route('/insert', methods=['POST'])
def insert():
conn = mysql.connect()
cursor = conn.cursor()
row = []
data = request.form
for d in data:
row.append(data[d])
print(row)
#insert some values into pokemon_stats and some in pokemon__details
x = 'INSERT INTO pokemons VALUES( NULL, %a, %a, %a, % d, % d, % d, % d, % d, % d, % d,%d, % r)' % (row[1], row[2], row[3], int(row[4]), int(row[5]), int(row[6]), int(row[7]), int(row[8]), int(row[9]), int(row[10]),int(row[11]) ,bool(row[12]))
print(x)
'''cursor.execute(x)
conn.commit()
cursor.close()'''
return redirect('/')
'''
login to the site to view/edit rest of the file
'''
@app.route('/login',methods=['POST','GET'])
def login():
if request.method == 'GET':
if not session.get('auth'):
return render_template('login.html')
else:
flash('Already logged in')
return redirect(url_for('index'))
else:
if auth(request.form['username'],request.form['password']):
flash('Successfully logged in')
return redirect(url_for('index'))
else:
flash('Wrong credentials')
return redirect(url_for('login'))
@app.route('/table/1')
def table1():
#for table 1
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute('desc pokemons')
data = cursor.fetchall()
#table headers are in attrs
attrs = []
for d in data:
attrs.append(d[0])
x = 'select * from pokemons;'
cursor.execute(x)
#table content is in data
data = cursor.fetchall()
cursor.close()
return render_template('relation.html',attrs=attrs,data=data)
@app.route('/table/2')
def table2():
#for table 2
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute('desc pokemons')
data = cursor.fetchall()
#table headers are in attrs
attrs = []
for d in data:
attrs.append(d[0])
x = 'select * from pokemons;'
cursor.execute(x)
#table content is in data
data = cursor.fetchall()
cursor.close()
return render_template('relation.html',attrs=attrs,data=data)
'''
name name
type1 type1
type2 type2
hp hp
attack attack
defense defense
sp_attack
sp_defense
speed
gen
leg
''' |
"""
Authentication functions for the routes
"""
from functools import wraps
import inspect
from flask import current_app, request, Response
import jwt
import requests
def token_required(f):
"""Checks whether token is valid or raises error 401."""
@wraps(f)
def decorated(*args, **kwargs):
"""Return function for decorator"""
token_string = request.headers.get("Authorization")
auth_url = current_app.config["AUTH_URL"]
use_authentication = current_app.config["AUTHENTICATE_ROUTES"]
if not use_authentication:
# don't authenticate routes
return f(*args, **kwargs)
else:
# authenticate routes
r = requests.get(
f"{auth_url}/status", headers={"Authorization": token_string}
)
if r.status_code == 200:
auth_key = current_app.config["AUTH_KEY"]
# strip "Bearer" prefix
token_string = token_string.replace("Bearer ", "")
payload = jwt.decode(token_string, auth_key)
token_username = payload["name"]
if "token_username" in inspect.getfullargspec(f).args:
kwargs["token_username"] = token_username
return f(*args, **kwargs)
else:
return Response(
"User not authenticated",
401,
{"WWW-Authenticate": 'Basic realm="Login Required"'},
)
return decorated
def job_token_required(f):
"""Injects token content into endpoint."""
@wraps(f)
def decorated(*args, **kwargs):
"""Return function for decorator"""
auth_string = request.headers.get("Authorization")
use_authentication = current_app.config["AUTHENTICATE_ROUTES"]
if not use_authentication:
# don't authenticate routes
return f(*args, **kwargs)
else:
auth_key = current_app.config["JOB_KEY"]
token_string = auth_string.replace("Bearer ", "")
try:
payload = jwt.decode(token_string, auth_key)
token_job_id = payload.get("job_id", "")
except jwt.DecodeError:
token_job_id = None
if "token_job_id" in inspect.getfullargspec(f).args:
kwargs["token_job_id"] = token_job_id
kwargs["token_job_id"] = token_job_id
return f(*args, **kwargs)
return decorated
|
from django.urls import path
from . import views
urlpatterns = [
path('',views.toLogin_view),
path('index/',views.Login_view),
path('toregister/',views.toregister_view),
path('register/',views.Register_view),
path('bgm.html/',views.bgm),
]
|
import copy
class Game:
def __init__(self, acc):
self.acc = acc
def run(self, commands):
self.inf = True
self.at = 0
flag = True
while flag:
flag = self.execute(commands[self.at])
if self.at >= len(commands):
self.inf = False
flag = False
def execute(self, command):
if command['exec'] == 1:
return False
if command['code'] == 'nop':
self.at += 1
elif command['code'] == 'acc':
self.acc += int(command['value'])
self.at += 1
elif command['code'] == 'jmp':
self.at += int(command['value'])
command['exec'] += 1
return True
f = open('input.txt')
commands = []
for line in f:
data = line[:-1].split()
commands.append({'code': data[0], 'value': data[1], 'exec': 0})
for i, command in enumerate(commands):
tst = copy.deepcopy(commands)
if command['code'] == 'nop':
tst[i]['code'] = 'jmp'
elif command['code'] == 'jmp':
tst[i]['code'] = 'nop'
else: continue
g = Game(0)
g.run(tst)
if not g.inf: print(g.acc)
|
#
# Library
#
def int_to_string(lst):
"""
create a fn called int_to_string, takes in a lst of integers and converts them all to strings. Use Recursion
input = [0,1,2,3,4] ---- output == [str(0), str(1)...str(4)]
"""
if len(lst) == 1:
elt = lst[0]
str_ = [str(elt)]
return str_
part_1 = int_to_string(lst[:1])
part_2 = int_to_string(lst[1:])
res = part_1 + part_2
return res
#
# Script
#
def main():
lst = range(5)
print(int_to_string(lst))
if __name__ == '__main__':
main()
|
import pygame, sys, random
from pygame.locals import *
from random import randint
from functions import *
from parameters import *
from lower_functions import *
pygame.init()
pygame.mixer.init()
myfont = pygame.font.SysFont('Comic Sans MS', 50)
textsurface = myfont.render('Game Over!', False, BLACK, RED)
game_over_size = myfont.size('Game Over!')[0]
myfont2 = pygame.font.SysFont('Comic Sans MS', 20)
screen = pygame.display.set_mode((windowwidth, windowheight), 0, 32)
screen.fill(CYAN)
# ============
space = False
old_rects = []
color = GREEN
colors = [RED, YELLOW, MAGENTA, GREEN, BLUE]
shape = "I"
shapes = ["I", "J", "L", "K", "o", "z", "s"]
orientation = 0 # orientation by how many times 90 degrees clockwise turned from original shape
orientations = [0, 1, 2, 3]
points = 0
score = myfont2.render('Score: ' + str(points), False, BLACK)
move_y = starting_y(shape, orientation) - rec_size # -rec_size because before drawing it gets incremented
lane = get_random_lane(shape, orientation)
# =============
start_time = pygame.time.get_ticks()
# example of show image on screen: coin = screen.blit(coin_Img, (x,get_coin_y(counter)))
# example of rectangle on screen: pygame.draw.rect(screen, WHITE, (top_left_corner_x, top_left_corner_y, x_length, y_length))
while True:
# === Fixed Screen Content ===
screen.fill(CYAN)
pygame.draw.rect(screen, WHITE, (hor_margin,0,windowwidth-2*hor_margin,windowheight-vert_margin))
draw_old_rects(screen, old_rects)
screen.blit(score, (hor_margin, windowheight-vert_margin))
# === Draw Current Rectangles ===
new_step, bottom, new_step_size = get_y(start_time, space, lane, old_rects, shape, orientation, move_y)
if new_step and not bottom: move_y = new_step_size
rects = draw_shape(screen, move_y, lane, color, shape, orientation)
# === Event Management ===
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
lowest_y = max(shape_incr(shape, orientation), key = itemgetter(1))[1]
if event.key == pygame.K_LEFT:
if move_available(shape, orientation, lane, move_y, old_rects, "left"):
lane -= 1
elif event.key == pygame.K_RIGHT:
if move_available(shape, orientation, lane, move_y, old_rects, "right"):
lane += 1
elif event.key == pygame.K_SPACE:
space = True
elif event.key == pygame.K_f:
if orientation < 3:
new_orientation = orientation + 1
else:
new_orientation = 0
if move_available(shape, orientation, lane, move_y, old_rects, "turn"):
orientation = new_orientation
if event.type == QUIT:
pygame.quit()
sys.exit()
# === Reset All Parameters for new Rectangles ===
if bottom:
space = False
for rect in rects:
old_rects.append(rect)
start_time = pygame.time.get_ticks()
color = colors[randint(0, len(colors)-1)]
shape = shapes[randint(0, len(shapes)-1)]
orientation = orientations[randint(0,len(orientations)-1)] # orientation by how many times 90 degrees clockwise turned from original shape
lane = get_random_lane(shape, orientation)
move_y = starting_y(shape, orientation) - rec_size # -rec_size because before drawing it gets incremented
# === Check for full rows, remove them and add points =====
full_rows = get_full_rows(old_rects)
old_rects = remove_row(old_rects, full_rows)
points = update_score(full_rows, points)
score = myfont2.render('Score: ' + str(points), False, BLACK)
# ===== Check for any rects in the top row if yes then game over ====
while game_over(old_rects):
screen.fill(CYAN)
pygame.draw.rect(screen, WHITE, (hor_margin,0,windowwidth-2*hor_margin,windowheight-vert_margin))
draw_old_rects(screen, old_rects)
screen.blit(textsurface,((windowwidth-game_over_size)/2,windowheight/3))
screen.blit(score, (hor_margin, windowheight-vert_margin))
# === Event Management ===
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
pygame.display.update()
|
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
from bandit import *
# genere des moyennes pour 'nb_tests' tests. leur moyenne va etre selectionnee
# aleatoirement dans l'intervalle [min_mean,max_mean]
def generate_tests(nb_tests, nb_actions=10, min_mean=0, max_mean=100, variance=1):
tests = []
for i in xrange(nb_tests):
real_Qs = gen_Q(nb_actions, min_mean, max_mean, variance)
tests.append(real_Qs)
return tests
# Simuler plusieurs methodes sur plusieurs tests
# - sel_action_methods est une liste de methodes a appliquer
# - tests est un ensemble de tests generes avec 'generate_tests'
# - tries fixe le nombre d'iterations pour chaque test
# Cette fonction retournera les resultats sous forme de liste
# ou chaque element de la liste represente les resultats d'une methode.
# les resultats d'une methode est une liste d'objets 'Result'.
# chaque element de cette liste constitue le resultat d'une methode pour un test.
def simulate_on_tests(tests, sel_action_methods, tries=100):
results = []
for sel_action_method in sel_action_methods:
method_results = []
for test_real_Qs in tests:
method_result = simulate(test_real_Qs, tries, sel_action_method, updated_Q)
method_results.append(method_result)
results.append(method_results)
return results
def cumulated_rewards(rewards):
cumulated = 0
for reward in rewards:
cumulated += reward
yield cumulated
# utilisee avec plot_method_results pour afficher l'evolution des rewards
# en fonction de l'iteration.
# pour chaque methode, a chaque iteration, le reward est la moyenne
# des rewards obtenus dans tous les tests pour cette iteration.
def method_rewards_average(tests_results, tests):
rewards = list((result.rewards) for result in tests_results)
for iteration_rewards in zip(*rewards):
average = mean(iteration_rewards)
yield average
# utilisee avec plot_method_results pour afficher l'evolution des pourcentages
# de selection de la meilleure action en fonction de l'iteration.
# pour chaque methode, a chaque iteration, le pourcentage est le nombre
# de tests dans lequel la meilleure action a ete choisie
def method_percentage_sel_best_action(tests_results, tests):
actions = list((result.actions for result in tests_results))
for iteration_actions in zip(*actions):
nb_good_actions = 0
for test, action in zip(tests, iteration_actions):
best_action = max(test)
best_action_index = test.index(best_action)
if action == best_action_index:
nb_good_actions += 1
nb_good_actions = 100. * float(nb_good_actions) / len(tests)
yield nb_good_actions
def mean(L):
return sum(L) / len(L)
COLORS = [
'red', 'blue', 'grey', 'yellow', 'green'
]
# Dessine une courbe montrant l'evolution d'une quantitée calculée à partir des resultats
# des simulations. cette quantitiée est calculée à l'aide de aggregate_func.
# tests : les tests sur lesquels les simulations ont été faites
# methods_names : le label qui sera associé aux methodes dans le graphe
# methods_tests_results : les résultats des tests
def plot_methods_results(tests, methods_names, methods_tests_results, aggregate_func, xlabel='Iteration', ylabel='',colors=None):
if colors is None:
colors = COLORS
plt.xlabel(xlabel)
plt.ylabel(ylabel)
data = []
for method, tests_results, color in zip(methods_names, methods_tests_results, colors):
aggregation = aggregate_func(tests_results, tests)
data = list(aggregation)
plt.plot(xrange(1, len(data)+1), data, color=color, label=method)
plt.legend(loc='best')
plt.show()
if __name__ == "__main__":
tests = generate_tests(500, variance=10)
epsilon1, epsilon2, tau, C = 0.9, 0.2, 20, 1000
methods_names = [
"Greedy epsilon ($\epsilon=%.2f$)" % (epsilon1,),
"Greedy epsilon ($\epsilon=%.2f$)" % (epsilon2,),
"Greedy",
"Softmax ($\\tau=%.2f$)" % (tau,),
"UCB ($C = %.2f$)" % (C,),
]
methods = [
epsilon_sel_action(lambda it:epsilon1),
epsilon_sel_action(lambda it:epsilon2),
greedy_sel_action,
softmax_sel_action(lambda it:tau),
ucb_sel_action(lambda it:float(C)/(it+1)),
]
results = simulate_on_tests(tests, methods)
plot_methods_results(tests, methods_names, results, method_rewards_average, ylabel='Average reward')
plot_methods_results(tests, methods_names, results, method_percentage_sel_best_action, ylabel='Percentage of selecting the best action')
|
import rclpy
from rclpy.node import Node
from machines.msg import Sensor,Scanner,MAMstatus
from std_msgs.msg import String
from pycomm3 import LogixDriver
import time
class MinimalPublisher(Node):
def __init__(self):
super().__init__('onScanner')
self.subscription = self.create_subscription(MAMstatus ,'status/zaxis',self.callback,10)
self.subscription
self.pub = self.create_publisher(Scanner, 'status/scanner_on', 10)
timer_period = 0.01 # seconds
self.timer = self.create_timer(timer_period, self.timer_callback)
def callback(self,data):
print("hello")
c = LogixDriver('10.226.52.171')
print(data.position)
if data.position > 178.0:
c.open()
c.write(('Ros_test',1))
c.write(('send_to_micrologix[0]', 32), ('send_to_micrologix[1]',0), ('send_to_micrologix[2]',55))
print('scanner on')
time.sleep(30)
#c.write(('Ros_test',0))
c.close()
else:
print("unable to on scanner")
def timer_callback(self):
status = Scanner()
c = LogixDriver('10.226.52.171')
c.open()
print('connected')
#print(c.read('send_to_micrologix[0]'))
#print(c.read('send_to_micrologix[1]'))
#print(c.read('send_to_micrologix[2]'))
status.data1 = c.read('send_to_micrologix[0]')[1]
status.data2 = c.read('send_to_micrologix[1]')[1]
status.data3 = c.read('send_to_micrologix[2]')[1]
print(status.data1)
#self.pub.publish(status)
#print(status)
c.close()
def main(args=None):
rclpy.init(args=args)
minimal_publisher = MinimalPublisher()
rclpy.spin(minimal_publisher)
minimal_publisher.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main() |
"""
Class to read individual match yaml files
Design:
Methods:
initialise
read yaml
parse yaml
insert into db
Call as follows:
def read_single_match(matchFile.yaml)
match = Match(matchFile.yaml)
match_info = match.metadata()
# add match metadata to match_master table
match_details = match.ballByBall()
# add match ball-by-ball to matches table
def read_all_matches(matchFolder)
for matchFile in matchFolder:
read_single_match(matchFile)
# add logging
"""
import yaml
class Match():
def __init__(self, yaml_file):
self.yaml_file = yaml_file
self.match_id = self.yaml_file.split("/")[-1].split("\\")[-1].split(".")[0]
self.raw_data = None
self.meta = None
self.balldata = None
def __str__(self):
return f'Match_ID: {self.match_id}'
def read_yaml(self, yaml_file):
# load yaml contents
with open(yaml_file, 'r') as stream:
try:
data = (yaml.safe_load(stream))
except yaml.YAMLError as exc:
print(exc)
#self.raw_data = data
return data
def execute(self):
data = self.read_yaml(self.yaml_file)
self.meta = self.parse_metadata(data)
self.balldata = self.parse_ball_by_ball(data)
def parse_metadata(self, data):
# TODO: subclass of main Match class: MetaData Class. Store as attributes of the class instead of dict
# TODO: list of required columns, read and add to dict in one go. likewise optional columns with None if not present
meta = data['info']
d_meta = dict()
d_meta['match_id'] = self.match_id
d_meta['match_type'] = meta['match_type']
d_meta['date_start'] = meta['dates'][0] # first entry in dates field - first day of match if Test, match date otherwise
d_meta['gender'] = meta['gender']
d_meta['competition'] = meta['competition'] if 'competition' in meta else None
d_meta['venue'] = meta['venue']
d_meta['city'] = meta['city'] if 'city' in meta else None
d_meta['overs'] = meta['overs'] if 'overs' in meta else None
d_meta['team_a'], d_meta['team_b'] = meta['teams']
d_meta['toss_decision'] = meta['toss']['decision']
d_meta['toss_winner'] = meta['toss']['winner']
if 'winner' in meta['outcome']:
d_meta['outcome'] = 'won'
d_meta['outcome_winner'] = meta['outcome']['winner']
d_meta['outcome_by_type'], d_meta['outcome_by_value'] = next(iter(meta['outcome']['by'].items()))
else:
d_meta['outcome'] = meta['outcome']['result']
d_meta['outcome_winner']= None
d_meta['outcome_by_type']= None
d_meta['outcome_by_value']= None
# didn't parse: umpires, player of match, data_verison, created_date, revision_number
return d_meta
def parse_ball_by_ball(self, data):
# TODO: subclass of main Match class: MetaData Class. Store as attributes of the class instead of dict
# TODO: list of required columns, read and add to dict in one go. likewise optional columns with None if not present
balldata = data['innings']
d_ball = dict()
d_ball['match_id'] = self.match_id
#d_ball['match_type'] = balldata['match_type']
for inning in balldata:
innings_name = list(inning.keys())[0]
d_ball['innings'] = innings_name
d_ball['team'] = inning[innings_name]['team']
deliveries = inning[innings_name]['deliveries']
#for delivery in deliveries:
# for each delivery in deliveries:
# over #
# ball_in_over #
# ball counter?
# batsman
# bowler
# non-striker
# runs_batsman
# runs_extras
# runs_total
# extras_type
# extras_value
# batsman score counter?
# innings score counter?
# bowler balls bowled counter?
# bowler runs conceded counter?
# wicket (True/False)
# wicket_player_out
# wicket_fielder
# wicket_kind
return d_ball
#=============================================================================
# # from https://github.com/aadiuppal/cricsheet_db/blob/master/yaml_read.py
# def getBallDetails(self,myfile):
# stream = file(myfile,'r')
# balls_data = []
# matchid = get_id(myfile)
# data = yaml.load(stream)
# #print data.keys()
# for i in range(len(data['innings'])):
# innings = data['innings'][i].keys()[0]
# batting_team = data['innings'][i][innings]['team']
# for j in range(len(data['innings'][i][innings]['deliveries'])):
# ball_num = j+1
# over,batsman,bowler,non_striker,runs_batsman,runs_extras,runs_total,wicket_player,wicket_kind,wicket_fielder = self.get_ball_data(data['innings'][i][innings]['deliveries'][j])
# balls_data.append([matchid,innings,batting_team,ball_num,over,batsman,
# bowler,non_striker,runs_batsman,runs_extras,runs_total,wicket_player,wicket_kind,wicket_fielder])
# #print data['innings'][0]['1st innings']['deliveries'][0]
# #print data['innings'][0]['1st innings']['team']
# return balls_data
# def get_ball_data(self,dat):
# over = dat.keys()[0]
# batsman = dat[over]['batsman']
# bowler = dat[over]['bowler']
# non_striker = dat[over]['non_striker']
# runs_batsman = dat[over]['runs']['batsman']
# runs_extras = dat[over]['runs']['extras']
# runs_total = dat[over]['runs']['total']
# if 'wicket' in dat[over]:
# wicket_player = dat[over]['wicket']['player_out']
# wicket_kind = dat[over]['wicket']['kind']
# if 'fielders' in dat[over]['wicket']:
# wicket_fielder = dat[over]['wicket']['fielders'][0]
# else:
# wicket_fielder = None
# else:
# wicket_fielder,wicket_kind,wicket_player = None,None,None
# return [over,batsman,bowler,non_striker,runs_batsman,runs_extras,runs_total,wicket_player,wicket_kind,wicket_fielder]
# #=============================================================================
|
from . import OutputFormatter
from corpus_cleaner.components.cleaner_component import CleanerComponent
from corpus_cleaner.document import Document
import argparse
from typing import Iterable
from typing import Tuple, Optional
import os
class OutputFormatterMapper(CleanerComponent):
def __init__(self, args: argparse.Namespace, output_formatter: OutputFormatter,
write_checkpoint_path: Optional[str] = None):
super().__init__(args)
self.output_formatter = output_formatter
self.write_checkpoint_path = write_checkpoint_path
@staticmethod
def add_args(parser: argparse.ArgumentParser):
pass
@staticmethod
def check_args(args: argparse.Namespace):
pass
def _write_checkpoint(self, e: str):
with open(os.path.join(self.write_checkpoint_path, e.replace('/', '!')), 'w') as f:
pass
def __call__(self, documents: Iterable[Document]) -> Tuple[int, Optional[Tuple], Optional[str]]:
self.output_formatter.init_writing()
filename = None
for document in documents:
self.output_formatter._write_document(document)
filename = document.filename
self.output_formatter.end_writing()
if self.write_checkpoint_path:
self._write_checkpoint(filename)
return filename
|
import numpy as np
from scipy.io import wavfile
def read_wav(filename):
fs, samples = wavfile.read(filename)
return fs, samples2float(samples)
def samples2float(data):
# divide by the largest number for this data type
return 1. * data / np.iinfo(data.dtype).max
def write_wav(fs, data, filename):
wavfile.write(filename, fs, samples2int(data))
def samples2int(data):
return np.array(data * np.iinfo(np.int32).max, dtype=np.int32)
def db(x):
return 20 * np.log10(x)
def lvl(db):
return 10**(db/20.)
def rms(x):
return np.sqrt(np.sum(x**2, axis=0) / len(x))
def cut(fs, t0, length):
return slice(int(fs * t0), int(fs * (t0 + length)))
|
#This program calculate the maximmun common divisor of three numbers introducted by user.
num1 = int(input('Giv me the first number: '))
num2 = int(input('Giv me the second number: '))
num3 = int(input('Giv me the third number: '))
try:
if num1 < num2 and num1 < num3:
mcd = num1
elif num2 < num1 and num2 < num3:
mcd = num2
else:
mcd = num3
while True:
if num1%mcd==0 and num2%mcd==0 and num3%mcd==0:
print('The mcd is',mcd)
break
else:
mcd -= 1
except ZeroDivisionError:
print('There was an error when dividing by 0') |
"""college_second_hand URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,re_path,include
from mainsite import views
urlpatterns = [
path('admin/', admin.site.urls),
path('login', views.login),
path('login/', views.login),
path('logout', views.logout),
path('logout/', views.logout),
path('', views.index),
path('userinfo',views.userinfo),
path('userinfo/',views.userinfo),
path('accounts/register', views.register),
path('accounts/register/', views.register),
path('myorder', views.myorder),
path('myorder/', views.myorder),
path('mymark', views.mymark),
path('mymark/', views.mymark),
path('pay', views.pay),
path('pay/', views.pay),
re_path(r'^product/(\d+)$', views.product, name = "product-url"),
path('order_add', views.order_add),
path('order_add/', views.order_add),
path('order_remove', views.order_remove),
path('order_remove/', views.order_remove),
path('product_add', views.product_add),
path('product_add/', views.product_add),
path('myproduct', views.myproduct),
path('myproduct/', views.myproduct),
re_path(r'^product_modify/(\d+)$', views.product_modify),
path('mark_add', views.mark_add),
path('mark_add/', views.mark_add),
path('mark_remove', views.mark_remove),
path('mark_remove/', views.mark_remove),
path('product_remove', views.product_remove),
path('product_remove/', views.product_remove),
path('search', views.search),
path('search/', views.search),
path('profile_modify', views.profile_modify),
path('profile_modify/', views.profile_modify),
path('change_password', views.change_password),
path('change_password/', views.change_password),
path('pay', views.pay),
path('pay/', views.pay),
path('pay_finish', views.pay_finish),
path('pay_finish/', views.pay_finish),
path('pay_cancel', views.pay_cancel),
path('pay_cancel/', views.pay_cancel),
path('product_send', views.product_send),
path('product_send/', views.product_send),
path('order_receive', views.order_receive),
path('order_receive/', views.order_receive),
re_path(r'^comment/(\d+)$', views.comment),
path('check', views.check),
path('check/', views.check),
path('checkall', views.checkall),
path('checkall/', views.checkall),
path('college_authenticate', views.college_authenticate),
path('college_authenticate/', views.college_authenticate),
path('college_authenticate_confirm', views.college_authenticate_confirm),
path('college_authenticate_confirm/', views.college_authenticate_confirm),
path('bind_payment', views.bind_payment),
path('bind_payment/', views.bind_payment),
path('bind_payment_confirm', views.bind_payment_confirm),
path('bind_payment_confirm/', views.bind_payment_confirm),
]
|
# Generated by Django 3.2 on 2021-06-04 05:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hp', '0002_alter_blog_thumbnail'),
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_date', models.DateField(help_text='※', verbose_name='ご希望日(第一希望)')),
('first_time', models.DateTimeField(help_text='※', verbose_name='ご希望時間帯(第一希望)')),
('second_date', models.DateField(blank=True, null=True, verbose_name='ご希望日(第二希望)')),
('second_time', models.DateTimeField(blank=True, help_text='※', null=True, verbose_name='ご希望時間帯(第二希望)')),
('parent_name', models.CharField(blank=True, help_text='※', max_length=50, null=True, verbose_name='保護者氏名')),
('student_name', models.CharField(help_text='※', max_length=50, verbose_name='生徒名')),
('birthday', models.DateField(help_text='※', verbose_name='生年月日')),
('phone_number', models.CharField(help_text='※', max_length=12, verbose_name='電話番号')),
('email', models.EmailField(help_text='※', max_length=254, verbose_name='メールアドレス')),
('text', models.CharField(blank=True, max_length=200, null=True, verbose_name='ご要望・ご質問')),
],
),
]
|
# -*- coding: utf-8 -*-
from .main import DistanceRaster
from .utils import rasterize, export_raster
|
"""
Classes from the 'DocumentManager' framework.
"""
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
DOCConcreteLocation = _Class("DOCConcreteLocation")
DOCUserInterfaceStateStore = _Class("DOCUserInterfaceStateStore")
DOCSmartFolderDatabase = _Class("DOCSmartFolderDatabase")
DOCFrecencyBasedEvent = _Class("DOCFrecencyBasedEvent")
DOCHotFolderEvent = _Class("DOCHotFolderEvent")
DOCSmartFolderHit = _Class("DOCSmartFolderHit")
DOCTransitionUtils = _Class("DOCTransitionUtils")
UIDocumentBrowserActionDescriptor = _Class("UIDocumentBrowserActionDescriptor")
UIDocumentBrowserTransitionController = _Class("UIDocumentBrowserTransitionController")
DOCDocumentSource = _Class("DOCDocumentSource")
DOCSearchingDocumentSource = _Class("DOCSearchingDocumentSource")
DOCSourceSearchingContext = _Class("DOCSourceSearchingContext")
DOCErrorStore = _Class("DOCErrorStore")
DOCActivity = _Class("DOCActivity")
DOCDestructiveActivity = _Class("DOCDestructiveActivity")
DOCWeakProxy = _Class("DOCWeakProxy")
DOCItem = _Class("DOCItem")
DOCPromisedItem = _Class("DOCPromisedItem")
DOCExtensionInterface = _Class("DOCExtensionInterface")
DOCRemoteContext = _Class("DOCRemoteContext")
DOCKeyCommandRegistry = _Class("DOCKeyCommandRegistry")
DOCKeyboardFocusManager = _Class("DOCKeyboardFocusManager")
UIDocumentBrowserAction = _Class("UIDocumentBrowserAction")
DOCRemoteBarButton = _Class("DOCRemoteBarButton")
DOCRemoteUIBarButtonItem = _Class("DOCRemoteUIBarButtonItem")
DOCAppearance = _Class("DOCAppearance")
DOCSymbolicLocationURLWrapper = _Class("DOCSymbolicLocationURLWrapper")
DOCViewServiceErrorView = _Class("DOCViewServiceErrorView")
DOCRemoteBarButtonTrackingView = _Class("DOCRemoteBarButtonTrackingView")
DOCViewServiceErrorViewController = _Class("DOCViewServiceErrorViewController")
DOCDocBrowserVC_UIActivityViewController = _Class(
"DOCDocBrowserVC_UIActivityViewController"
)
UIDocumentBrowserViewController = _Class("UIDocumentBrowserViewController")
DOCExportModeViewController = _Class("DOCExportModeViewController")
DOCRemoteViewController = _Class("DOCRemoteViewController")
DOCTargetSelectionBrowserViewController = _Class(
"DOCTargetSelectionBrowserViewController"
)
|
# OpenWeatherMap API Key
weather_api_key = "8543a6e0d86c0cc0975180820e40738f"
# Google API Key
g_key = "AIzaSyCVBSPIQJZEL1xB1quNlF08RzNFepamZts"
|
#!/usr/local/bin/python3.6.5
# -*- coding: utf-8 -*-
# @Time : 2018/7/26 PM6:48
# @Author : L
# @Email : L862608263@163.com
# @File : jiandan.py
# @Software: PyCharm
import urllib.request
import urllib.response
import re
import uuid
import base64
import datetime
""" 使用代理
import random
@staticmethod
def url_open(url, data=None):
try:
proxy_collection = [{'http': '39.137.77.66:8080'}, {'http': '43.239.79.55:3128'},
{'http': '185.22.174.65:1448'}, {'http': '117.191.11.78:8080'}]
proxy = random.choice(proxy_collection)
proxy_support = urllib.request.ProxyHandler(proxy)
opener = urllib.request.build_opener(proxy_support)
print(proxy)
USER_AGENTS = random.choice([
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
])
print(USER_AGENTS)
opener.addheaders = [('User-Agent', USER_AGENTS)]
response = opener.open(url, data=data)
return response.read()
except Exception as err:
print(err)
return None
"""
# https://baike.baidu.com/item/robots/5243374?fr=aladdin
# robots.txt 文件 对搜索引擎爬虫的建议性限制吧(君子协议, 该爬的还是照样爬)
class JianDanImage:
__ooxx_url = "http://jandan.net/ooxx/"
__pic_url = "http://jandan.net/pic/"
download_counter = 0
@staticmethod
def url_open(url, data=None):
try:
request = urllib.request.Request(url)
request.add_header('User-Agent',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/605.1.15 (KHTML, like Gecko) '
'Version/11.1 Safari/605.1.15')
response = urllib.request.urlopen(request, data)
return response.read()
except Exception as e:
print("url_open error", e)
return None
def fetch_image(self, from_page=1, to_page=300, filter_score=0, store_path="/Users/l/Desktop/jiandan"):
page_list = [self.__pic_url + str(base64.b64encode(
("%s-%s#comments" % (datetime.datetime.now().strftime('%Y-%m-%d').replace("-", ""), str(idx))).encode(
'utf-8')), 'utf-8')
for idx in range(from_page, to_page + 1)]
self.download_counter = 0
if filter_score < 0:
limit = 0
elif filter_score > 1:
limit = 1
else:
limit = filter_score
current_page_number = 0
for idx, url in enumerate(page_list):
content = self.url_open(url)
if content is None:
continue
print("第:" + str(idx + 1) + "页 链接:" + url)
html_str = content.decode('utf-8')
load_page_number = int(re.findall('"current-comment-page">\[(.*?)]</span>', html_str)[0])
if current_page_number == load_page_number:
return
current_page_number = load_page_number
support_regular = 'OO</a> \[<span>(.*?)</span>]'
oppose_regular = 'XX</a> \[<span>(.*?)</span>]'
support_list = [float(idx) for idx in re.findall(support_regular, html_str) if float(idx) != 0.0]
oppose_list = [float(idx) for idx in re.findall(oppose_regular, html_str) if float(idx) != 0.0]
score_list = list(map(lambda x, y: x / (x + y), support_list, oppose_list))
# img_hash_list = list(re.findall('img-hash">(.*?)</span>', html_str))
img_hash_list = list(re.findall('wx3(.*?)"', html_str))
# print(img_hash_list)
if limit != 0:
tuple_list = list(zip(img_hash_list, score_list))
img_hash_list = [idx[0] for idx in list(filter(lambda x: x[1] > limit, tuple_list))]
img_url = ["http://wx3" + idx for idx in img_hash_list]
img_url = list(filter(lambda index: "mw600" not in index, img_url))
for element in img_url:
# pass
print(img_url)
self.download_img(element, store_path)
def download_img(self, url, store_path):
# 有空加个多线程?
data = self.url_open(url)
if data is None:
self.download_counter += 1
print("writing - {0}".format(self.download_counter))
return
img_type = "." + url.split(".")[-1]
img_path = store_path + "/" + str(uuid.uuid4()) + img_type
print(img_path)
with open(img_path, "wb") as file:
self.download_counter += 1
print("writing - {0}".format(self.download_counter))
file.write(data)
def decode_hash_value(self, hash_value):
return self.base64_decode(hash_value)
@staticmethod
def base64_decode(input_value):
return str(base64.b64decode(input_value), "utf-8")
# 下面这些都是源js代码,转换成py的,校验图片的(对于这次爬虫来说,作用并不大)
# 可以研究一下实现过程
# 固定的值
# t = "cPUY9gzQmFTubb1z4qdRFw7PQaN63Kgg"
# t = self.md5_encryption(t)
#
# p = self.md5_encryption(t[:16])
# o = self.md5_encryption(t[16:])
#
# r = 4
#
# m = hash_value[:r]
#
# c = p + self.md5_encryption(p + m)
#
# hash_value = hash_value[r:]
#
# l = self.base64_decode(hash_value)
#
# k = list(range(0, 256))
#
# b = [ord(c[idx % len(c)]) for idx in k]
#
# g = h = 0
#
# for _ in list(range(0, 256)):
# g = (g + k[h] + b[h]) % 256
# temp = k[h]
# k[h] = k[g]
# k[g] = temp
# h += 1
# print(k)
#
# u = ""
#
# l = list(l)
#
# q = g = h = 0
#
# for _ in range(0, len(l)):
# q = (q + 1) % 256
# g = (g + k[q]) % 256
# tmp = k[q]
# k[q] = k[g]
# k[g] = tmp
# u += chr(ord(l[h]) ^ (k[(k[q] + k[g]) % 256]))
# h += 1
#
# u = u[26:]
# @staticmethod
# def md5_encryption(input_value):
# md5_tool = hashlib.md5()
# md5_tool.update(input_value.encode("utf-8"))
# return md5_tool.hexdigest()
if __name__ == "__main__":
image = JianDanImage()
image.fetch_image()
|
import palmerpenguins
def get():
"""
This template function uses the Palmer Peguins dataset as a place holder.
Replace it by your own code to import your project's data.
"""
df = palmerpenguins.load_penguins()
cols = [
"bill_length_mm",
"bill_depth_mm",
"flipper_length_mm",
"body_mass_g",
"sex",
"species",
]
return df[cols]
|
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic.edit import CreateView, UpdateView,DeleteView
from django.views import View
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import Breed, Cat
class BreedList(LoginRequiredMixin, View):
def get(self, request):
bl = Breed.objects.all()
ctx = {'breed_list': bl}
return render(request,'breed/breed_list.html', ctx)
class BreedCreate(CreateView, LoginRequiredMixin):
model = Breed
fields = '__all__'
success_url = reverse_lazy('cats:all')
class BreedUpdate(LoginRequiredMixin, UpdateView):
model = Breed
fields = '__all__'
success_url = reverse_lazy('cats:all')
class BreedDelete(LoginRequiredMixin, DeleteView):
model = Breed
fields = '__all__'
success_url = reverse_lazy('cats:all')
##Cat View
class CatList(LoginRequiredMixin, View):
def get(self, request):
bc = Breed.objects.all().count()
cl = Cat.objects.all()
ctx = {'breed_count': bc, 'cats_list': cl}
return render(request,'cats/cats_list.html', ctx)
class CatCreate(CreateView, LoginRequiredMixin):
model = Cat
fields = '__all__'
success_url = reverse_lazy('cats:all')
class CatUpdate(LoginRequiredMixin, UpdateView):
model = Cat
fields = '__all__'
success_url = reverse_lazy('cats:all')
class CatDelete(LoginRequiredMixin, DeleteView):
model = Cat
fields = '__all__'
success_url = reverse_lazy('cats:all') |
import pytest
def test_login(client):
# Testing to see if a user can log in
response = client.get('/')
assert b'<h2>Log in</h2>' in response.data
assert b'New? sign up' in response.data
response = client.get('/', data={'email': 'Thommond@protonmail.com',
'password': 'PASSWORD'})
assert b'<h2>Welcome to Simple Todo</h2>' in response.data
client.get('/logout')
def test_register(client):
#Testing to see if a new user can be created and log in
response = client.get('/register')
assert b'<h2>Sign Up</h2>' in response.data
assert b'Already a user? Log In' in response.data
response = client.post('/register', data={'email': 'joe@gmail.com',
'password': 'qwerty'}, follow_redirects=True)
assert b'<h2>Log In</h2>' in response.data
response = client.post('/', data={'email': 'joe@gmail.com',
'password': 'qwerty'}, follow_redirects=True)
assert b'<h2>Welcome to Simple Todo</h2>' in response.data
client.get('/logout')
|
# region headers
# escript-template v20190605 / stephane.bourdeaud@nutanix.com
# * author: jose.gomez@nutanix.com
# * version: 20200214
# task_type: Set Variable
# task_name: AwxAddHost
# description: Add host to AWX inventory
# endregion
# region capture Calm variables
# * Capture variables here. This makes sure Calm macros are not referenced
# * anywhere else in order to improve maintainability.
awx_username = '@@{awx.username}@@'
awx_password = '@@{awx.secret}@@'
awx_api = '@@{awx_ip}@@'
awx_inventory_id = int('@@{awx_inventory_id}@@')
host_ip = '@@{address}@@'
# endregion
# region functions
def make_api_call(url,method,username,username_secret,payload):
"""Makes an API call to an external API.
Args:
url: The URL for the external REST API endpoint.
method: The REST method to use.
username: The API user name.
username_secret: The API user name password.
payload: The JSON payload to include in the call.
Returns:
The API response.
"""
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
while True:
print("Making a {} API call to {}".format(method, url))
resp = urlreq(
url,
verb=method,
auth='BASIC',
user=username,
passwd=username_secret,
params=json.dumps(payload),
headers=headers,
verify=False
)
# deal with the result/response
if resp.ok:
return resp
else:
print("Request failed")
print("Headers: {}".format(headers))
print("Payload: {}".format(json.dumps(payload)))
print('Status code: {}'.format(resp.status_code))
print('Response: {}'.format(
json.dumps(
json.loads(resp.content),
indent=4)))
exit(1)
def awx_add_host(api,username,password,inventory_id,host_ip,host_variables='',host_enabled=True):
# region prepare api call
# Form method, url and headers for the API call
api_port = "80"
api_endpoint = "/api/v2/hosts/"
url = "http://{}:{}{}".format(
api,
api_port,
api_endpoint
)
method = "POST"
# endregion
# Compose the json payload
payload = {
'variables': host_variables,
'name': host_ip,
'enabled': host_enabled,
'inventory': inventory_id
}
r = make_api_call(
url,
method,
username,
password,
payload
)
if r.ok:
resp = json.loads(r.content)
print 'awx_host_id={0}'.format(resp['id'])
exit(0)
else:
print 'Post request failed', r.content
exit(1)
# endregion
awx_add_host(awx_api,awx_username,awx_password,awx_inventory_id,host_ip) |
import subprocess
debug = True
def get_names(num_workers=1):
names = []
for i in range(1, num_workers+1):
names.append("worker" + str(i))
return names
def launch_instances(names):
commands = []
for name in names:
commands.append("multipass launch -d 25G -m 2G -n " + name + " --cloud-init cloud-config.yaml")
commands.append("multipass mount /home/xekchansky/kursach/worker_dir " + name)
for command in commands:
if debug: print(command)
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
if debug: print(output.decode("utf-8") )
if debug: print("Done")
def get_ips():
if debug: print("getting ips")
process = subprocess.Popen("multipass list".split(), stdout=subprocess.PIPE)
output, error = process.communicate()
outs = output.split()[4:]
IP_dict = dict()
for i in range(0, len(outs) // 6):
IP_dict[outs[6*i].decode("utf-8")] = outs[6*i+2].decode("utf-8")
return IP_dict
def add_known_hosts(names, ips):
#this somehow doesn't work (enterprets >> as adress)
if debug: print("adding ips to known hosts")
command = "ssh-keyscan -t rsa "
for name in names:
command += ips[name] + ' '
command += "> ~/.ssh/known_hosts"
if debug: print(command)
process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
output, error = process.communicate()
if debug: print(output.decode("utf-8"))
if debug: print("checking ssh")
command = "ssh " + str(ips[name]) + " ls"
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
if debug: print(output.decode("utf-8"))
#assert output.decode("utf-8") == "kursach"
if debug: print("Done")
def delete_instance(name):
if debug: print("deleting instance: ", name)
command = "multipass delete " + name
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
if debug: print(output.decode("utf-8") )
def delete_instances(names):
if debug: print("deleting instances")
for name in names:
delete_instance(name)
command = "multipass purge"
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
if debug: print(output.decode("utf-8") )
def get_libs(names, ips, CUDA=False, local=False):
if debug: print("downloading libraries")
for name in names:
if debug: print(name)
if debug: print(ips[name])
#install CUDA
if CUDA:
if local:
commands = ["wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/cuda-ubuntu2004.pin",
"sudo mv cuda-ubuntu2004.pin /etc/apt/preferences.d/cuda-repository-pin-600",
"wget https://developer.download.nvidia.com/compute/cuda/11.4.2/local_installers/cuda-repo-ubuntu2004-11-4-local_11.4.2-470.57.02-1_amd64.deb",
"sudo dpkg -i cuda-repo-ubuntu2004-11-4-local_11.4.2-470.57.02-1_amd64.deb",
"sudo apt-key add /var/cuda-repo-ubuntu2004-11-4-local/7fa2af80.pub",
"sudo apt-get update",
"sudo apt-get install cuda",
"cd /usr/local/cuda/samples/0_Simple/matrixMul",
"sudo make",
"cd",
"cd kursach/worker_dir",
"sudo dpkg -i libcudnn8_8.2.4.15-1+cuda11.4_amd64.deb"]
else: #network
commands = ["wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/cuda-ubuntu2004.pin",
"sudo mv cuda-ubuntu2004.pin /etc/apt/preferences.d/cuda-repository-pin-600",
"sudo apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/7fa2af80.pub",
'sudo add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/ /"',
"sudo apt-get update",
"sudo apt-get -y install cuda",
#"cd /usr/local/cuda/samples/0_Simple/matrixMul",
#"sudo make",
#"cd",
#"cd kursach/worker_dir",
"sudo dpkg -i kursach/worker_dir/libcudnn8_8.2.4.15-1+cuda11.4_amd64.deb"
]
for command in commands:
if debug: print(command)
process = subprocess.Popen("ssh " + str(ips[name]) + " " +command, stdout=subprocess.PIPE, shell=True)
#process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
if debug: print(output.decode("utf-8"))
#install tensorflow
command = "ssh " + str(ips[name]) + " sudo pip install tensorflow"
if CUDA: command += "-gpu"
if debug: print(command)
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
if debug: print(output.decode("utf-8"))
#install horovod
command = "ssh " + ips[name] + " HOROVOD_WITH_TENSORFLOW=1 HOROVOD_WITH_MPI=1 HOROVOD_GPU=1" \
" sudo pip install horovod"
if debug: print(command)
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
if debug: print(output.decode("utf-8"))
def run_horovod(names, ips):
# print horovod command
command = "horovodrun -np " + str(len(names)) + " -H "
for name in names:
command += str(ips[name]) + ":1 "
command += "python3 kursach/worker_dir/main.py"
if debug: print(command)
delete = 0
names = get_names(1)
if delete:
delete_instances(names)
else:
launch_instances(names)
ips = get_ips()
add_known_hosts(names, ips)
get_libs(names, ips, CUDA=True, local=False)
run_horovod(names, ips)
|
import sqlite3
conexion = sqlite3.connect("db/tkinter.db")
print("Conectado exitosamente")
cursor = conexion.cursor()
cursor.execute("CREATE TABLE clientes(id integer PRIMARY KEY, nombre text, apellido text)")
conexion.commit() # Guardo los cambios en la base datos |
from flask_restful import Resource, reqparse, abort, fields, marshal_with
from models.cliente import ClienteModel
cliente_post_args = reqparse.RequestParser()
cliente_post_args.add_argument("nome", type = str, help = "Nome do cliente precisa ser preenchido!", required = True)
cliente_post_args.add_argument("razao_social", type = str, help = "Razão social do cliente precisa ser preenchido!", required = True)
cliente_post_args.add_argument("cnpj", type = str, help = "CNPJ do cliente precisa ser preenchido!", required = True)
cliente_patch_args = reqparse.RequestParser()
cliente_patch_args.add_argument("nome", type = str, help = "Nome do cliente")
cliente_patch_args.add_argument("razao_social", type = str, help = "Razão social")
cliente_patch_args.add_argument("cnpj", type = str, help = "CNPJ do cliente")
cliente_patch_args.add_argument("data_inclusao",type = str, help = "Data de inclusão")
resource_fields = {
'codigo' : fields.Integer,
'nome' : fields.String,
'razao_social' : fields.String,
'cnpj' : fields.String,
'data_inclusao' : fields.DateTime
}
class Cliente(Resource):
@marshal_with(resource_fields)
def post(self):
hoje = ClienteModel.pega_data_atual()
args = cliente_post_args.parse_args()
cliente = ClienteModel(nome = args['nome'], razao_social = args['razao_social'], cnpj = args['cnpj'], data_inclusao = hoje)
cliente.salva_cliente()
return cliente, 201 #Created
def get(self):
return {'clientes': [cliente.json() for cliente in ClienteModel.query.all()]}
class Cliente_com_codigo(Resource):
@marshal_with(resource_fields)
def get(self, codigo):
result = ClienteModel.verifica_codigo(codigo)
if not result:
abort(404, message = "Cliente não encontrado")
return result
@marshal_with(resource_fields)
def patch(self, codigo):
args = cliente_patch_args.parse_args()
result = ClienteModel.verifica_codigo(codigo)
if not result:
abort(404, message = "Cliente não encontrado")
if args['nome']:
result.nome = args['nome']
if args['razao_social']:
result.razao_social = args['razao_social']
if args['cnpj']:
result.cnpj = args['cnpj']
if args['data_inclusao']:
result.data_inclusao = args['data_inclusao']
result.salva_cliente()
return result
def delete(self, codigo):
result = ClienteModel.verifica_codigo(codigo)
if not result:
abort(404, message = "Cliente não encontrado")
result.deleta_cliente()
return '', 204 #No content |
from .early_stopper import EarlyStopper
from .model_handler import ModelHandler
from .optimizers import Optimizer
from .runners import *
from .scheduler import Scheduler
from .learners import *
from .checkpoint_handler import CheckpointHandler
|
class BubbleSort:
def sort(self, items):
i = 0
length = len(items)
while i < length:
j = 0
while j < length - 1 - i:
if items[j] > items[j + 1]:
temp = items[j + 1]
items[j + 1] = items[j]
items[j] = temp
j += 1
i += 1
if __name__ == '__main__':
bubbleSort = BubbleSort()
numbers = str(input('Enter numbers(separated by comma) to be sorted(Press enter when done) :: ')).split(',')
numbers = [int(number) for number in numbers]
bubbleSort.sort(numbers)
print('Sorted values are : {}'.format(numbers)) |
from turtle import forward, left, right, exitonclick, speed
# 1 ctverec (xsize, ysize=4)
# for i in range(4):
# forward(50)
# left(90)
# 2 ctvercova sit, velikost dale promennych x, y,
# for x in range(4):
# for y in range(4):
# for i in range(4):
# forward(50)
# left(90)
# forward(50)
# left(180)
# forward(200)
# left(90)
# exitonclick()
# 3 sestiuhelnik
for i in range(2):
for i in range(6):
for i in range(6):
forward(50)
left(60)
forward(50)
left(60)
forward(50)
right(60)
right(120)
for i in range(6):
forward(50)
right(60)
forward(50)
left(60)
exitonclick() |
#print(" ", end='')
print(" " * 3, end='')
for i in range(10):
print(f"{i:3}", end='')
print()
print()
for i in range(10):
print(f'{i}', end=' ')
for j in range(10):
wynik = i * j
print(f'{wynik:3}', end='')
print()
|
from functools import partial
from django.core.cache import cache
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import status
from rest_framework import viewsets
from rest_framework import mixins
from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import TokenAuthentication
from rest_framework.response import Response
from core import models
from movies import serializers
from movies import permissions
from core.omdb import Omdb_API
class BaseListMovieViewSet(mixins.ListModelMixin,
mixins.CreateModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet):
"""The view with predefined features for movies list endpoints"""
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated, permissions.IsUserOrReadOnly)
def get_queryset(self):
return self.queryset.filter(user=self.request.user).order_by('movie_id')
class FavouriteMovieViewSet(BaseListMovieViewSet):
"""The view for favourite movie model"""
queryset = models.FavouriteMovie.objects.all()
serializer_class = serializers.FavouriteMovieSerializer
class MovieToWatchViewSet(BaseListMovieViewSet):
"""The view for movie to watch model"""
queryset = models.MovieToWatch.objects.all()
serializer_class = serializers.MovieToWatchSerializer
class ReviewViewSet(viewsets.ModelViewSet):
"""The view for review model"""
serializer_class = serializers.ReviewSerializer
queryset = models.Review.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated, permissions.IsUserOrReadOnly)
filter_backends = [DjangoFilterBackend]
filterset_fields = ['user', 'movie_id']
def perform_create(self, serializer):
"""Ensure that saved user is authenticated user"""
serializer.save(user=self.request.user)
def perform_update(self, serializer):
"""Ensure that saved user is authenticated user"""
serializer.save(user=self.request.user)
class MovieViewSet(viewsets.ViewSet):
"""
View to display a list of movies from omdb. Provides the option of filtering the title and genre.
Title is required.
"""
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def list(self, request):
api = Omdb_API()
title = request.query_params.get('title')
genre = request.query_params.get('genre')
if not title:
return Response("'title' parameter is required", status=status.HTTP_400_BAD_REQUEST)
try:
movie_list = cache.get_or_set(f'{title},{genre}',
partial(api.search_movies, title=title, genre=genre),
timeout=60 * 60)
except Exception as e:
return Response(f'OMDB API does not work correctly. Original message: {e}',
status=status.HTTP_503_SERVICE_UNAVAILABLE)
return Response(movie_list)
|
from database import Base, engine
from db_model import Item
print("Creating Database ...")
Base.metadata.create_all(engine) |
from lxml import etree
import requests
url = 'https://www.chainnode.com/'
head ={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3724.8 Safari/537.36"}
str_data = requests.get(url,headers = head).content.decode()
#with open('bta.html','w',encoding="utf-8")as f:
# f.write(str_data)
xpath_data = etree.HTML(str_data)
data1 = xpath_data.xpath('//div[@class="post-item-wrap"]/div/div[wallhaven_spider]/div/h3/a[@class="link-dark-major font-bold bbt-block"]/text()')
data2 = xpath_data.xpath('//div[@class="post-item-wrap"]/div/div[wallhaven_spider]/div/h3/a[@class="link-dark-major font-bold bbt-block"]/@href')
print(len(data1))
print(data2)
#for i in data:
# i = i.replace(' ','')
# print(i)
|
import numpy
data = [50,24,50,55,55,72,76,54,57,]
var = (numpy.std(data))
data2 = 0
data2 = sum(data)
num = 1
for data3 in data:
a = -1
a = a + 1
pt = (data[a+1] - (data3/9))*10 + 50
for pt2 in range(9):
print(f'{num}人目の偏差値 is {round(pt,1)}')
num = num + 1
|
"""
WSGI config for oneup project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
# load environment settings
try:
from settings.environment import environment
except:
environment = ''
if environment in ['development','testing','production']:
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'oneup.settings.%s' % environment)
else:
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'oneup.settings.default')
application = get_wsgi_application()
|
import numpy as np
def solve_sudoku(sudoku: np.array):
nrows, ncols = sudoku.shape
sdim = int(nrows ** 0.5)
assert nrows == ncols, "sudoku dimensions must be equal."
def _valid(r, c, n):
for i in range(nrows):
if sudoku[r][i] == n:
return False
for j in range(ncols):
if sudoku[j][c] == n:
return False
r_, c_ = (r // sdim) * sdim, (c // sdim) * sdim
for i in range(sdim):
for j in range(sdim):
if sudoku[r_ + i][c_ + j] == n:
return False
return True
def _solve():
for row in range(ncols):
for col in range(nrows):
if sudoku[row][col] == 0:
for d in range(1, nrows + 1):
if _valid(row, col, d):
sudoku[row][col] = d
_solve()
sudoku[row][col] = 0
return
print(sudoku)
_check(sudoku)
def _check(s):
for row in s:
d = 0
for i in range(nrows):
d = d | (1 << row[i])
assert (d >> 1) == ((1 << nrows) - 1)
for col in s.T:
d = 0
for i in range(ncols):
d = d | (1 << col[i])
assert (d >> 1) == ((1 << ncols) - 1)
for i in range(sdim):
r = i * sdim
for j in range(sdim):
c, d = sdim, 0
for k in range(r, r + sdim):
for h in range(c, c + sdim):
d = d | (1 << s[k][h])
assert (d >> 1) == ((1 << nrows) - 1)
return True
_solve()
if __name__ == '__main__':
easy = np.array([0, 0, 0, 2, 6, 0, 7, 0, 1,
6, 8, 0, 0, 7, 0, 0, 9, 0,
1, 9, 0, 0, 0, 4, 5, 0, 0,
8, 2, 0, 1, 0, 0, 0, 4, 0,
0, 0, 4, 6, 0, 2, 9, 0, 0,
0, 5, 0, 0, 0, 3, 0, 2, 8,
0, 0, 9, 3, 0, 0, 0, 7, 4,
0, 4, 0, 0, 5, 0, 0, 3, 6,
7, 0, 3, 0, 1, 8, 0, 5, 0]).reshape(9, 9)
hard = np.array([0, 2, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 6, 0, 0, 0, 0, 3,
0, 7, 4, 0, 8, 0, 0, 0, 0,
0, 0, 0, 0, 0, 3, 0, 0, 2,
0, 8, 0, 0, 4, 0, 0, 1, 0,
6, 0, 0, 5, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 0, 7, 8, 0,
5, 0, 0, 0, 0, 9, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 4, 0]).reshape(9, 9)
print(hard)
solve_sudoku(hard)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16*5*5, 120)
|
from tkinter import *
import os
class Calculadora():
def __init__(self):
##variables a utilizar en la interfaz grafica
self.principal = Tk()
self.principal.title("Calculadora")
self.principal.resizable(False,False) #prohibe la ampliación de la ventana
self.ancho_boton = 4 #variable para definir ancho de botones
self.alto_boton = 1 #varible para definir alto de los botones
self.color_boton = "#A9BCF5" #variable para definir color de botones
self.fuente_btn = ('Consolas', '22') #define fuente de botones
self.padx_btn = 2
self.pady_btn = 2
self.pady_btn = 2
self.bg_gral = "#EFEFFB"
self.fuente_selec = ('Consolas', '12')
#variables a utilizar en la calculadora de bases numericas
self.hubo_resultado = False
self.input_text = StringVar() #variable para almacenar valores ingresados a la pantalla de la calculadora
self.input_text_bin = StringVar() #variable para almacenar numeros base 2
self.input_text_oct = StringVar() #variable para almacenar numeros base 8
self.input_text_dec = StringVar() #variable para almacenar numeros base 10
self.input_text_hex = StringVar() #variable para almacenar numero base 16
self.caracteres_especiales = ['*', '/', '-', '+', '(', ')'] #lista de caracteres especiales (operadores) presnetados en la calculadora
self.un_caracter_especial = False #define si el usuario ingreso un caracter especial antes mecionado, o no
self.hasta_operador = int() #variable para almacenar la cantidad de digitos ingresados por el usuario hasta un operador matematico (caracter especial)
self.ventana() #funcion donde esta el codigo de la interfaz grafica
self.principal.mainloop() #se lanza el ciclo de la interfaz grafica
def On_Clik_btn(self, bton):
if self.hubo_resultado: #verifica si se ha oprimido el boton igual
self.hasta_operador = 0
self.limpiar_celdas(True) #funcion para limpiar todas celdas
self.hubo_resultado = False
texto = self.campo_pantalla.get() + str(bton) #se carga en la variable texto el texto en la variable más el botón ingresado
if str(bton) in self.caracteres_especiales: #si es un caracter especiale
self.operacion = self.input_text_dec.get() ##guarda lo que habia en el campo decimal en un variable operacion, para luego hacer la operacion con la funcion eval
self.operacion = self.operacion + str(bton) #Se le agrega la tecla orpimida por el usuario a la variable operacion
self.hasta_operador = len(texto) #se cuentan los elementos que hay hasta el operador ingresado, para luego hacer un slice y sacar solo el ultimo numero ingresado, para luego hacer la operacion con la funcion eval() todo en base 10
self.limpiar_celdas() #funcion que limpia las celdas
self.input_text.set(texto) #se coloca el texto en la pantalla principal de la calculadora
self.un_caracter_especial = True #almacena True si el usuario ingreso un operador
else:
self.input_text.set(texto) #la funcion set() coloca el valor indicado en el campo grafico
texto = self.input_text.get()[self.hasta_operador:] ##solo coje los numeros que hay hasta antes del operador
self.imprmir_en_celdas(texto) #coloca en los inputs las conversiones
def imprmir_en_celdas(self, texto):
if self.base_numeric.get() == 1: # osea que la calculadora esta en binarios
decimal = self.bin_a_dec(texto) #obtiene el valor del campo decimal, ya que es más facil hacer la conversion a otras bases, partiendo con un numero base 10
self.input_text_bin.set(texto) #si la calculadora esta en binario, el texto en pantalla es binario y no necesita conversion
self.input_text_oct.set(self.dec_a_baseN(decimal, 8)) #la funcion dec_a_baseN convierte numeros base 10 a cualquiera que se le indique
self.input_text_dec.set(decimal)
self.input_text_hex.set(self.dec_a_baseN(decimal, 16))
elif self.base_numeric.get() == 2: # osea que la calculadra esta en octadecimal
decimal = self.oct_a_dec(texto)
self.input_text_bin.set(self.dec_a_baseN(decimal, 2))
self.input_text_oct.set(texto)
self.input_text_dec.set(decimal)
self.input_text_hex.set(self.dec_a_baseN(decimal, 16))
elif self.base_numeric.get() == 3: # osea que la calculadra esta en decimal
self.input_text_bin.set(self.dec_a_baseN(texto, 2))
self.input_text_oct.set(self.dec_a_baseN(texto, 8))
self.input_text_dec.set(texto)
self.input_text_hex.set(self.dec_a_baseN(texto, 16))
elif self.base_numeric.get() == 4: # osea que la calculadora esta en haxadecimal
decimal = self.hexa_a_dec(texto)
self.input_text_bin.set(self.dec_a_baseN(decimal, 2))
self.input_text_oct.set(self.dec_a_baseN(decimal, 8))
self.input_text_dec.set(decimal)
self.input_text_hex.set(texto)
def bin_a_dec(self, binario): #funcion que convirte numero base 2 a base 10
decimal = 0
numero_de_bits = len(str(binario))
for n in range(numero_de_bits):
decimal = decimal + int(str(binario)[-(n + 1)]) * 2 ** n
return decimal
def oct_a_dec(self, octal): #funcion que convierte número base 8 a numero base 10
decimal = 0
numero_de_valores = len(str(octal))
for n in range(numero_de_valores):
decimal = decimal + int(str(octal)[-(n + 1)]) * 8 ** n
return decimal
def dec_a_baseN(self, decimal, base): #funcion que convierte numero base 10 a cualquier base
conversion = ''
while int(decimal) // base != 0:
conversion = str(int(decimal) % base) + conversion
decimal = int(decimal) // base
return str(decimal) + conversion
def hexa_a_dec(self, hexa): #funcion que convirte numero base 16 a base 10
decimal = 0
numero_de_valores = len(str(hexa))
for n in range(numero_de_valores):
if str(hexa)[-(n + 1)] >='0' and str(hexa)[-(n + 1)] <='9':
decimal = decimal + int(str(hexa)[-(n + 1)]) * 16 ** n
elif str(hexa)[-(n + 1)] >='A' and str(hexa)[-(n + 1)] <='F':
decimal = decimal + (int(str(hexa)[-(n + 1)])-55) * 16 ** n
return decimal
def calcular(self): #funcion que realiza el calculo al usuario oprimir el botón =
try:
if len(self.operacion) > 0: #si hay un valor en la variable operacion, se le suma el numero base 10 ingresado por el usuario despues del signo de operacion
self.operacion = self.operacion + self.input_text_dec.get()
resultado = str(eval(self.operacion)) #realiza la operacion con la funcion eval(). esta función ejecuta el string entregado como una linea de comando
if self.base_numeric.get() == 1: #se define en que base se va a colocar en la pantalla principal de la calculadora
resultado = self.dec_a_baseN(resultado, 2)
elif self.base_numeric.get() == 2:
resultado = self.dec_a_baseN(resultado, 8)
elif self.base_numeric.get() == 4:
resultado = self.dec_a_baseN(resultado, 16)
print(resultado)
self.guardartxt(self.operacion)
self.input_text.set(resultado)
self.imprmir_en_celdas(resultado)
self.operacion = ""
else:
self.limpiar_celdas(True)
self.un_caracter_especial = False
self.hubo_resultado = True
except:
self.limpiar_celdas()
resultado = "ERROR"
self.hubo_resultado = True
def limpiar_celdas(self, todo=True): ##borra el contenido de los campos graficados
self.input_text_bin.set("")
self.input_text_oct.set("")
self.input_text_dec.set("")
self.input_text_hex.set("")
if todo:
self.input_text.set("")
def cerrando_p(self): #funcion para detener el loop de la interfaz grafica y destruye la ventana creada
self.principal.quit()
self.principal.destroy()
def guardartxt(self, operacion): #funcion para guardar los resulados en un archivo .txt
file = open("resultados.txt", "w")
file.write("Operación: %s" % operacion + os.linesep)
file.write("Reseultados: binario = %s, octadecimal = %s, decimal = %s, hexadecimal = %s" %(str(self.input_text_bin.get()), str(self.input_text_oct.get()), str(self.input_text_dec.get()), str(self.input_text_hex.get())))
file.close()
def sele_base(self): #funcion para habilitar/deshabilitar los botones utilizados en las bases numericas
self.base_numeric.set(self.base_numeric.get())
self.botones.destroy()
self.limpiar_celdas(True)
if self.base_numeric.get() == 1:
self.botone("disabled","disabled")
elif self.base_numeric.get() == 2:
self.botone("disabled","normal")
else:
self.botone("normal","normal")
def botone(self, estado8a9 ="normal", estado2a7="normal"): #funcion donde esta el codigo que crea los botones de la calculadora
self.botones = Frame(self.marco02)
self.botones.pack(fill=X, expand=True, side=TOP, pady=2)
# FILA 0 DE BOTONES
bton_c = Button(self.botones, text="BORRAR", font=self.fuente_btn, bd=self.pady_btn, bg=self.color_boton,
width=9,
height=self.alto_boton, command=lambda: self.limpiar_celdas(True)).grid(row=0, column=0,
columnspan=2,
padx=self.padx_btn,
pady=self.pady_btn)
bton_Iparent = Button(self.botones, text="(", font=self.fuente_btn, bd=self.pady_btn, bg=self.color_boton,
width=self.ancho_boton,
height=self.alto_boton, command=lambda: self.On_Clik_btn("(")).grid(row=0, column=2,
padx=self.padx_btn,
pady=self.pady_btn)
bton_Fparent = Button(self.botones, text=")", font=self.fuente_btn, bd=self.pady_btn, bg=self.color_boton,
width=self.ancho_boton,
height=self.alto_boton, command=lambda: self.On_Clik_btn(")")).grid(row=0, column=3,
padx=self.padx_btn,
pady=self.pady_btn)
bton_power = Button(self.botones, text="OFF", font=self.fuente_btn, bd=self.pady_btn, bg="#B22222",
width=self.ancho_boton,
height=self.alto_boton, command=lambda: self.cerrando_p()).grid(row=0, column=4, rowspan=1,
padx=self.padx_btn,
pady=self.pady_btn)
# FILA 1 DE BOTONES
bton_7 = Button(self.botones, text="7", font=self.fuente_btn,state =estado2a7, bd=self.pady_btn, bg=self.color_boton,
width=self.ancho_boton,
height=self.alto_boton, command=lambda: self.On_Clik_btn("7")).grid(row=1, column=0,
padx=self.padx_btn,
pady=self.pady_btn)
bton_8 = Button(self.botones, text="8", font=self.fuente_btn, state=estado8a9, bd=self.pady_btn, bg=self.color_boton,
width=self.ancho_boton,
height=self.alto_boton, command=lambda: self.On_Clik_btn("8")).grid(row=1, column=1,
padx=self.padx_btn,
pady=self.pady_btn)
bton_9 = Button(self.botones, text="9", font=self.fuente_btn,state = estado8a9, bd=self.pady_btn, bg=self.color_boton,
width=self.ancho_boton,
height=self.alto_boton, command=lambda: self.On_Clik_btn("9")).grid(row=1, column=2,
padx=self.padx_btn,
pady=self.pady_btn)
bton_div = Button(self.botones, text="/", font=self.fuente_btn, bd=self.pady_btn, bg=self.color_boton,
width=self.ancho_boton,
height=self.alto_boton, command=lambda: self.On_Clik_btn("/")).grid(row=1, column=3,
padx=self.padx_btn,
pady=self.pady_btn)
# FILA 2 DE self.botones
bton_4 = Button(self.botones, text="4",state =estado2a7, font=self.fuente_btn, bd=self.pady_btn, bg=self.color_boton,
width=self.ancho_boton,
height=self.alto_boton, command=lambda: self.On_Clik_btn("4")).grid(row=2, column=0,
padx=self.padx_btn,
pady=self.pady_btn)
bton_5 = Button(self.botones, text="5",state =estado2a7, font=self.fuente_btn, bd=self.pady_btn, bg=self.color_boton,
width=self.ancho_boton,
height=self.alto_boton, command=lambda: self.On_Clik_btn("5")).grid(row=2, column=1,
padx=self.padx_btn,
pady=self.pady_btn)
bton_6 = Button(self.botones, text="6",state =estado2a7, font=self.fuente_btn, bd=self.pady_btn, bg=self.color_boton,
width=self.ancho_boton,
height=self.alto_boton, command=lambda: self.On_Clik_btn("6")).grid(row=2, column=2,
padx=self.padx_btn,
pady=self.pady_btn)
bton_mult = Button(self.botones, text="*", font=self.fuente_btn, bd=self.pady_btn, bg=self.color_boton,
width=self.ancho_boton,
height=self.alto_boton, command=lambda: self.On_Clik_btn("*")).grid(row=2, column=3,
padx=self.padx_btn,
pady=self.pady_btn)
bton_igual = Button(self.botones, text="=", font=self.fuente_btn, bd=self.pady_btn, bg=self.color_boton,
width=self.ancho_boton,
height=self.alto_boton, command=lambda: self.calcular()).grid(row=1, column=4,
padx=self.padx_btn,
pady=self.pady_btn,
rowspan=4,
sticky=N + S)
# FILA 3 DE BOTONES
bton_1 = Button(self.botones, text="1", font=self.fuente_btn, bd=self.pady_btn, bg=self.color_boton,
width=self.ancho_boton,
height=self.alto_boton, command=lambda: self.On_Clik_btn("1")).grid(row=3, column=0,
padx=self.padx_btn,
pady=self.pady_btn)
bton_2 = Button(self.botones, text="2",state =estado2a7, font=self.fuente_btn, bd=self.pady_btn, bg=self.color_boton,
width=self.ancho_boton,
height=self.alto_boton, command=lambda: self.On_Clik_btn("2")).grid(row=3, column=1,
padx=self.padx_btn,
pady=self.pady_btn)
bton_3 = Button(self.botones, text="3",state =estado2a7, font=self.fuente_btn, bd=self.pady_btn, bg=self.color_boton,
width=self.ancho_boton,
height=self.alto_boton, command=lambda: self.On_Clik_btn("3")).grid(row=3, column=2,
padx=self.padx_btn,
pady=self.pady_btn)
bton_men = Button(self.botones, text="-", font=self.fuente_btn, bd=self.pady_btn, bg=self.color_boton,
width=self.ancho_boton,
height=self.alto_boton, command=lambda: self.On_Clik_btn("-")).grid(row=3, column=3,
padx=self.padx_btn,
pady=self.pady_btn)
# FILA 4 DE BOTONES
bton_0 = Button(self.botones, text="0", font=self.fuente_btn, bd=self.pady_btn, bg=self.color_boton,
width=self.ancho_boton,
height=self.alto_boton, command=lambda: self.On_Clik_btn("0")).grid(row=4, column=0,
padx=self.padx_btn,
pady=self.pady_btn,
columnspan=3,
sticky=W + E)
bton_sum = Button(self.botones, text="+", font=self.fuente_btn, bd=self.pady_btn, bg=self.color_boton,
width=self.ancho_boton,
height=self.alto_boton, command=lambda: self.On_Clik_btn("+")).grid(row=4, column=3,
padx=self.padx_btn,
pady=self.pady_btn)
def calculadora_bases_numericas(self): #funcion donde se configura el tamaño de la ventana y contiene los radios y campos de entrada para las conversiones
self.principal.geometry("420x590")
# self.marco para la fila 2, columna 0
self.marco02 = Frame(self.principal, bg="#BDBDBD")
self.marco02.pack(fill=Y, side=TOP, pady=20, padx=8, ipady=30)
pantalla = Frame(self.marco02)
pantalla.pack(fill=X, expand=True, side=TOP)
self.campo_pantalla = Entry(pantalla, font=('Consolas', 20, 'bold'), width=22, bg="#A9F5A9", bd=20,
insertwidth=4, textvariable=self.input_text, justify="right") # , state=DISABLED)
self.campo_pantalla.pack(fill=X, side=TOP, expand=True)
self.base_numeric = IntVar()
self.base_numeric.set(3)
radios = Frame(self.marco02, bg= "#BDBDBD")
radios.pack(fill = X, expand = True, side = TOP, pady=1)
binarios = Radiobutton(radios, text="Binario", variable=self.base_numeric, bg="#BDBDBD", value=1,
font=self.fuente_selec,
command=lambda: self.sele_base()).grid(row=0, column=0, padx=25,sticky =W)
input_binarios = Entry(radios, font=self.fuente_selec, width=20,insertwidth=4, textvariable=self.input_text_bin, justify="right").grid(row=0, column=1)
octaldecimal = Radiobutton(radios, text="Octal decimal", variable=self.base_numeric, bg="#BDBDBD", value=2,
font=self.fuente_selec,
command=lambda: self.sele_base()).grid(row=1, column=0, padx=25,sticky =W)
input_octaldecimal = Entry(radios, font=self.fuente_selec, width=20,insertwidth=4, textvariable=self.input_text_oct, justify="right").grid(row=1, column=1)
decimal = Radiobutton(radios, text="Decimal", variable=self.base_numeric, bg="#BDBDBD", value=3,
font=self.fuente_selec,
command=lambda: self.sele_base()).grid(row=2, column=0, padx=25,sticky =W)
input_decimal = Entry(radios, font=self.fuente_selec, width=20,insertwidth=4, textvariable=self.input_text_dec, justify="right").grid(row=2, column=1)
hexadecimal = Radiobutton(radios, text="Hexadecimal", variable=self.base_numeric, bg="#BDBDBD", value=4,
font=self.fuente_selec,
command=lambda: self.sele_base()).grid(row=3, column=0, padx=25,sticky =W)
input_hexadecimal = Entry(radios, font=self.fuente_selec, width=20,insertwidth=4, textvariable=self.input_text_hex, justify="right").grid(row=3, column=1)
#coloca en pantalla los botones
self.botone()
def ventana(self):
self.principal.config(bg=self.bg_gral,relief="ridge")
self.calculadora_bases_numericas()
self.principal.protocol("WM_DELETE_WINDOW", self.cerrando_p)
def main():
app = Calculadora()
if __name__ == "__main__":
main() |
#!/usr/bin/env python
# Class for common Google sheet operations.
import os
import sys
import json
import doctest
import csv
import gspread
import string
from h1 import h1
from oauth2client.client import SignedJwtAssertionCredentials
from collections import defaultdict
try:
from collections import OrderedDict
except ImportError:
# python 2.6 or earlier, use backport
from ordereddict import OrderedDict
import argparse
class Sheet:
""" Handle google spreadsheet read and flatfile write operations.
>>> sheet = Sheet('test-sheet', 'worksheet-name')
>>> sheet.publish()
True
"""
def __init__(self, sheet_name, worksheet=None):
self.options = None
self.directory = os.path.dirname(os.path.realpath(__file__))
if not os.path.isdir('%s/output' % self.directory):
os.mkdir('%s/output' % self.directory)
scope = ['https://spreadsheets.google.com/feeds']
self.credentials = SignedJwtAssertionCredentials(
os.environ.get('ACCOUNT_USER'),
string.replace(os.environ.get('ACCOUNT_KEY'), "\\n", "\n"),
scope)
self.spread = gspread.authorize(self.credentials)
self.sheet_name = sheet_name
self.filters = None
if worksheet:
self.worksheet = self.open_worksheet(worksheet)
def set_options(self, options):
""" Set the objects options var.
"""
self.options = options
return options
def slugify(self, slug):
return slug.lower().replace(' ', '-')
def open_worksheet(self, worksheet):
""" Open a spreadsheet, return a sheet object.
>>> sheet = Sheet('test-sheet')
>>> sheet.open_worksheet('worksheet-name')
<Worksheet 'worksheet-name' id:od6>
"""
self.sheet = self.spread.open(self.sheet_name).worksheet(worksheet)
return self.sheet
def publish(self, worksheet=None):
""" Print out markup for a list.
"""
if not self.sheet or worksheet:
self.sheet = self.open_worksheet(worksheet)
if not worksheet:
worksheet = self.worksheet
cell_list = worksheet.get_all_values()
i = 0
for row in cell_list:
i += 1
try:
if 'http' in row[1]:
print '<li><a href="%s">%s</a></li>' % ( row[1], row[0] )
else:
print '<li><a href="http://%s">%s</a></li>' % ( row[1], row[0] )
except:
pass
def adddupe(self, worksheet=None):
""" Find sheets that have the same url in there twice, and
add their PVs together.
"""
if not self.sheet or worksheet:
self.sheet = self.open_worksheet(worksheet)
if not worksheet:
worksheet = self.worksheet
cell_list = worksheet.get_all_values()
urls = []
dupes = []
dupecounts = {}
# Get the dupes
i = 0
for row in cell_list:
i += 1
if row[1] in urls:
dupes.append(row[1])
dupecounts[row[1]] = 0
else:
urls.append(row[1])
# Tally up the counts of the dupes
i = 0
for row in cell_list:
i += 1
if row[1] in dupes:
dupecounts[row[1]] += int(row[2])
print dupecounts
# Update the sheet with the totals
dupekills = []
i = 0
for row in cell_list:
i += 1
if row[1] in dupekills:
# We've already added it to the sheet. Kill it.
index = dupekills.index(row[1])
del dupekills[index]
worksheet.update_cell(i, 1, '')
worksheet.update_cell(i, 2, '')
worksheet.update_cell(i, 3, '')
elif row[1] in dupes:
worksheet.update_cell(i, 2, dupecounts[row[1]])
dupekills.append(row[1])
return True
def dedupe(self, worksheet=None):
""" Find sheets that have the same url in there twice, and
kill the other one.
"""
if not self.sheet or worksheet:
self.sheet = self.open_worksheet(worksheet)
if not worksheet:
worksheet = self.worksheet
cell_list = worksheet.get_all_values()
urls = []
dupes = []
# Get the dupes
i = 0
for row in cell_list:
i += 1
if row[1] in urls:
dupes.append(row[1])
worksheet.update_cell(i, 1, '')
worksheet.update_cell(i, 2, '')
worksheet.update_cell(i, 3, '')
else:
urls.append(row[1])
return True
def fix(self, worksheet=None):
""" Publish the data in whatever permutations we need.
This assumes the spreadsheet's key names are in the first row.
>>> sheet = Sheet('test-sheet', 'worksheet-name')
>>> sheet.fix()
True
"""
if not self.sheet or worksheet:
self.sheet = self.open_worksheet(worksheet)
if not worksheet:
worksheet = self.worksheet
cell_list = worksheet.get_all_values()
i = 0
for row in cell_list:
i += 1
# If row[0] is blank then we're dealing with a GA row
# that needs to be fixed.
# row[0] should contain the title, row[1] the URL, row[2] the PVs.
#if 'http://' in row[0]:
if row[0] == '' and row[2] != '':
# Get title.
# If we have a blog post then it's a h1.
# If we have an article it's some weird element in a printer-friendly page.
extract = h1()
if 'newsfuze.com' in row[1]:
row[1] = row[1].replace('newsfuze', 'denverpost')
worksheet.update_cell(i, 3, row[1])
elif 'dailycamera.com' in row[1]:
row[1] = row[1].replace('dailycamera', 'denverpost')
worksheet.update_cell(i, 3, row[1])
elif 'mercurynews.com' in row[1]:
row[1] = row[1].replace('mercurynews', 'denverpost')
worksheet.update_cell(i, 3, row[1])
elif 'timescall.com' in row[1]:
row[1] = row[1].replace('timescall', 'denverpost')
worksheet.update_cell(i, 3, row[1])
elif row[1][0] == '/':
row[1] = 'http://www.denverpost.com%s' % row[1]
worksheet.update_cell(i, 3, row[1])
extract.content = extract.request(row[1])
# Blogs have "blogs." in row[0], articles have "www."
element = 'h1\ class="entry-title",h1'
if 'www.denverpost.com' in row[1]:
value = extract.extract_anything("'Content\ Title'\ \:\ '(.*)',")
elif 'cannabist.co' in row[1]:
value = extract.extract(element)
elif 'theknow' in row[1]:
value = extract.extract(element)
if value:
try:
if '\\' in value:
value = value.replace('\\', '')
worksheet.update_cell(i, 1, value)
except:
print value
# Move URL to the third column
#worksheet.update_cell(i, 3, row[0])
return True
def main(args):
""" Take args as key=value pairs, pass them to the add_filter method.
Example command:
$ python writesheet.py test
"""
if args:
sheet = Sheet('popular')
sheet.set_options(args)
for worksheet in args.sheets[0]:
sheet.worksheet = sheet.open_worksheet(worksheet)
if args.publish == True:
sheet.publish()
else:
print worksheet
sheet.dedupe()
sheet.fix()
if __name__ == '__main__':
parser = argparse.ArgumentParser(usage='$ python writesheet.py',
description='',
epilog='')
parser.add_argument("-v", "--verbose", dest="verbose", default=False, action="store_true")
parser.add_argument("-d", "--dupes", dest="dupes", default=False, action="store_true")
parser.add_argument("-p", "--publish", dest="publish", default=False, action="store_true")
parser.add_argument("sheets", action="append", nargs="*")
args = parser.parse_args()
if args.verbose:
doctest.testmod(verbose=args.verbose)
main(args)
|
from django.apps import AppConfig
class MinecraftableConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'Minecraftable'
|
a,b=map(int,input().split())
secs=list(map(int,input().split()))
c=0
for i in secs:
b1=86400-i
b=b-b1
c=c+1
if b<=0:
break
print(c)
|
# main.py
# CompareThreeNumbers_Python
#
# This program will help you get the size of a phrase given by the user, and let you
# know if that size is an even or odd number.
#
# Python interpreter: 3.6
#
# Author: León Felipe Guevara Chávez
# email: leon.guevara@itesm.mx
# date: May 29, 2017
#
# We ask for and read three numbers
number1 = float(input("Give me the first number: "))
number2 = float(input("Give me the second number: "))
number3 = float(input("Give me the third number: "))
# We find which of the three numbers is the biggest
if number1 >= number2 and number1 >= number3:
biggest = number1
elif number2 >= number1 and number2 >= number3:
biggest = number2
else:
biggest = number3
# We display our findings
print("The biggest number among the three is: " + str(biggest))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 3 10:13:24 2019
@author: naeemsunesara
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_csv('hr_employee.csv')
categorical = df.select_dtypes(exclude = np.number )
numerical = df.select_dtypes(include = np.number)
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
for x in list(categorical):
categorical[x] = le.fit_transform(categorical[x])
df = pd.concat([numerical,categorical], 1)
X = df[['MonthlyIncome','YearsAtCompany','Age','StockOptionLevel','OverTime']]
y = df['Attrition']
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split as tts
X_train, X_test, y_train, y_test = tts(X,y, test_size = 0.3, random_state = 42)
rfc = RandomForestClassifier()
rfc.fit(X_train,y_train)
y_pred = rfc.predict(X_test)
from sklearn.metrics import accuracy_score
print(accuracy_score(y_test,y_pred))
## Class Imbalance accuracy score = 0.86167 not good
from sklearn.metrics import classification_report
print(classification_report(y_test,y_pred))
impf = pd.Series(rfc.feature_importances_, index = list(X))
impf = impf.sort_values(ascending=True)
impf.plot(kind = "barh")
# from imblearn.over_sampling import SMOTE
###### Grid Search CV #########
from sklearn.model_selection import GridSearchCV
params = {'max_depth':np.arange(1,10),
'min_samples_split':np.arange(2,10),
'max_features':np.arange(2,5)}
model = GridSearchCV(rfc, param_grid=params,cv=5)
model.fit(X,y)
#################
##### Class Weights Code #####
from sklearn.utils import class_weight
class_weights = class_weight.compute_class_weight('balanced',
np.unique(y),
y)
class_weight=dict(enumerate(class_weights))
############
#### ADABOOST ####
from sklearn.ensemble import AdaBoostClassifier
abc = AdaBoostClassifier(rfc)
abc.fit(X_train,y_train)
y_pred = abc.predict(X_test)
from sklearn.metrics import accuracy_score
print(accuracy_score(y_test,y_pred))
## Class Imbalance accuracy score = 0.86167 not good
from sklearn.metrics import classification_report
print(classification_report(y_test,y_pred))
##### airbnb Problem ########
df = pd.read_csv('dc_airbnb.csv')
df['host_response_rate'] = df['host_response_rate'].str.replace('%','')
df['host_acceptance_rate'] = df['host_acceptance_rate'].str.replace('%','')
df['price'] = df['price'].str.replace('$','')
df['cleaning_fee'] = df['cleaning_fee'].str.replace('$','')
df['security_deposit'] = df['security_deposit'].str.replace('$','')
df = df.drop(['cleaning_fee','security_deposit','latitude','longitude'],1)
miss = df.isna().sum()[df.isna().sum()>0].index.tolist()
for x in miss:
df[x] = df[x].fillna(df[x].mode()[0])
df['price'] = df['price'].str.replace(',','')
df = df.astype({"host_response_rate": int, "host_acceptance_rate": int,"price":float})
df = df.drop(['city','state'],1)
categorical = df.select_dtypes(exclude = np.number)
numerical = df.select_dtypes(include = np.number)
for x in list(categorical):
categorical[x] = le.fit_transform(categorical[x])
df = pd.concat([numerical,categorical], 1)
df2 = df[df['price']<=200]
X = df2[['bedrooms','bathrooms','maximum_nights','accommodates']]
y = df2['price']
from sklearn.ensemble import RandomForestRegressor
rfr = RandomForestRegressor()
X_train, X_test, y_train, y_test = tts(X,y, test_size=0.3, random_state=42)
rfr.fit(X_train,y_train)
y_pred = rfr.predict(X_test)
from sklearn.metrics import r2_score
print(r2_score(y_test,y_pred))
impf = pd.Series(rfr.feature_importances_, index = list(X))
impf = impf.sort_values(ascending=True)
impf.plot(kind = "barh")
|
import io
import unittest
from vstruct.types import *
from dissect.filelab import *
class CommonTest(unittest.TestCase):
def test_filelab(self):
fd = io.BytesIO( b'asdfqwer' )
class Woot(VStruct):
def __init__(self):
VStruct.__init__(self)
self.one = uint8()
self.two = uint16()
class FooLab(FileLab):
def __init__(self, fd, off=0):
FileLab.__init__(self, fd, off=off)
self.addOnDemand('woot',self._getWoot)
self.addOnDemand('baz',self._getFooBaz)
self.addOnDemand('bars',self._getFooBars)
def _getFooBaz(self):
return 'foobaz'
def _getFooBars(self):
return ('foo','bar','baz')
def _getWoot(self):
return self.getStruct( 0, Woot )
foo = FooLab(fd)
self.assertEqual( foo['baz'], 'foobaz' )
self.assertEqual( foo['bars'], ('foo','bar','baz') )
self.assertEqual( foo['woot'].one, 0x61 )
|
"""Metadata for uniplot."""
__title__ = 'uplt-labview'
__version__ = '0.0.0'
__author__ = 'Sean Marshallsay'
__email__ = 'srm.1708@gmail.com'
__description__ = 'A uniplot parser for LabVIEW files.'
__homepage__ = 'https://github.com/Sean1708/uplt-labview'
__download__ = 'https://github.com/Sean1708/uplt-labview.git'
__license__ = 'MIT'
|
a = {'key1': 1, 'key2': 3, 'key3': 2}
b = {'key1': 1, 'key2': 2}
def inter_dict(a, b):
result = {}
for key, value in a.items():
if b.get(key) == value:
result[key] = value
return result
print(inter_dict(a, b))
|
file_reader = open('all_courses_output.txt', 'r')
lines = file_reader.readlines()
a = 0
Science = ['ACMA', 'BISC', 'BPK', 'CHEM', 'EASC', 'MATH', 'MBB', 'PHYS', 'SCI', 'STAT']
AppliedScience = ['CMPT','ENSC','MACM',"MSE",'TEKX']
SocialScience = ['COGS', 'CRIM', 'ECON', 'ENGL', 'FNST', 'FREN', 'GA', 'GERO', 'GSWS', 'HIST',
'HS', 'HUM', 'IS', 'LAS', 'LBST', 'LING', 'PHIL', 'POL', 'PSYC', 'SA', 'WL']
Communication = ['CA,"CMNS','IAT','PUB']
Environment = ['ARCH', 'ENV', 'EVSC', 'GEOG', 'PLAN', 'REM', 'SD']
scienceTypes = []
appliedScienceTypes = []
socialScienceTypes = []
communicationTypes = []
environmentTypes = []
otherTypes = []
while a < len(lines):
courseType = {}
currentType = eval(lines[a])[0]
courseLevels = []
while a < len(lines) and eval(lines[a])[0] == currentType:
courseLevel = {}
currentLevel = eval(lines[a])[1][0]
courses = []
while a < len(lines) and eval(lines[a])[1][0] == currentLevel and eval(lines[a])[0] == currentType:
course = {}
course["name"] = currentType + ' ' + eval(lines[a])[1]
course["title"] = eval(lines[a])[2]
course["description"] = eval(lines[a])[3]
course["credits"] = eval(lines[a])[4]
course["WQB"] = eval(lines[a])[5]
courses.append(course)
a += 1
courseLevel["name"] = currentLevel + "XX"
courseLevel["children"] = courses
courseLevels.append(courseLevel)
courseType["name"] = currentType
courseType["children"] = courseLevels
if courseType["name"] in Science:
scienceTypes.append(courseType)
elif courseType["name"] in AppliedScience:
appliedScienceTypes.append(courseType)
elif courseType["name"] in SocialScience:
socialScienceTypes.append(courseType)
elif courseType["name"] in Communication:
communicationTypes.append(courseType)
elif courseType["name"] in Environment:
environmentTypes.append(courseType)
else:
otherTypes.append(courseType)
allCourses = {}
allCourses["name"] = "Courses"
allCourses["children"] = [{'name':'Faculty of Science', 'children': scienceTypes},{'name':'Faculty of Applied Science', 'children':appliedScienceTypes},
{'name':'Faculty of Arts and Social Science', 'children': socialScienceTypes},{'name':'Faculty of Communication, Art, and Technology', 'children': communicationTypes},
{'name':'Faculty of Environment', 'children': environmentTypes}, {'name':'Other Courses', 'children': otherTypes}]
#This code puts the dictionary in the json file
import json
with open('Courses_File2.json', 'w') as fp:
json.dump(allCourses, fp)
|
# -*- coding:utf-8 -*-
import datetime
import sys
import os
# 项目的根目录 /home/andy/flask_projects/bluelog
basedir = os.path.dirname(os.path.dirname(__file__))
WIN = sys.platform.startswith('WIN')
if WIN:
prefix = 'sqlite:///'
else:
prefix = 'sqlite:////'
SQLALCHEMY_TRACK_MODIFICATIONS = False
class BaseConfig(object):
SECRET_KEY = os.getenv('SECRET_KEY', 'dev key')
MAIL_SERVER = os.getenv('MAIL_SERVER')
MAIL_USERNAME = os.getenv('MAIL_USERNAME')
MAIL_PASSWORD = os.getenv('MAIL_PASSWORD')
BLUELOG_EMAIL = os.getenv('BLUELOG_EMAIL')
SQLALCHEMY_TRACK_MODIFICATIONS = False
# 配置sqlalchemy数据库的路径
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL', prefix + os.path.join(basedir, 'date_dev.db'))
# 每页的blog的数量
BLUELOG_POST_PER_PAGE = 10
# 每个item的blog的评论数量
BLUELOG_COMMENT_PER_PAGE = 10
# _sidebar.html模板中需要的是字典:theme_name:display_name
BLUELOG_THEMES = {'perfect_blue': 'Perfect Blue', 'black_swan': 'Black Swan'}
# 数据库查询的耗时阈值
BLUELOG_SLOW_QUERY_THRESHOLD = 1
# 管理界面展示post时每页的数量
BLUELOG_MANAGE_POST_PER_PAGE = 10
# 管理界面展示category时每页的数量
BLUELOG_MANAGE_CATEGORY_PER_PAGE = 10
# 管理界面展示comment时每页的数量
BLUELOG_MANAGE_MANAGE_PER_PAGE = 10
REMEMBER_COOKIE_DURATION = datetime.timedelta(days=1)
# REMEMBER_COOKIE_DURATION = 10
PERMANENT_SESSION_LIFETIME = datetime.timedelta(days=1)
# PERMANENT_SESSION_LIFETIME = datetime.timedelta(seconds=10)
class DevelopmentConfig(BaseConfig):
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL', prefix + os.path.join(basedir, 'date_dev.db'))
SQLALCHEMY_RECORD_QUERIES = True
DEBUG_TB_INTERCEPT_REDIRECTS = False
class TestingConfig(BaseConfig):
TESTING = True
WTF_CSRF_ENABLED = False
SQLALCHEMY_DATABASE_URI = 'sqlite:///:memory:' # in-memory database
class ProductionConfig(BaseConfig):
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL', prefix + os.path.join(basedir, 'data.db'))
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig
}
|
import os
def get_package_data():
paths_test = [os.path.join('data', '*.json')] + [os.path.join('data', '*.pkl')] + [os.path.join('data', '*.ecsv')]
return {'astroquery.vo.tests': paths_test}
|
#
# Bug: 62211
# Title: [ yaim-wms ] Enable Glue 2.0 publishing
# Link: https://savannah.cern.ch/bugs/?62211
#
#
import os
from lib.Exceptions import *
def run(utils):
bug='62211'
utils.log_info("Start regression test for bug %s"%(bug))
utils.log_info("Get the publication in glue1 format")
glue1=utils.run_command("ldapsearch -x -H ldap://%s:2170 -b mds-vo-name=resource,o=grid"%(utils.get_WMS())).split("\n")
utils.log_info("Get the publication in glue2 format")
glue2=utils.run_command("ldapsearch -x -H ldap://%s:2170 -b o=glue"%(utils.get_WMS())).split("\n")
utils.log_info("Check the result status of each publication")
ok=0
for line in glue1:
if line.find("result: ")!=-1:
if line.split("result: ")[1].find("0 Success")!=-1:
ok=1
if ok==0:
utils.log_info("ERROR: Result for publication in glue1 format is not '0 Success'")
raise GeneralError("Error","Result for publication in glue1 format is not '0 Success'")
else:
utils.log_info("Check ok , result for publication in glue1 format is '0 Success'")
ok=0
for line in glue2:
if line.find("result: ")!=-1:
if line.split("result: ")[1].find("0 Success")!=-1:
ok=1
if ok==0:
utils.log_info("ERRO: Result for publication in glue2 format is not '0 Success'")
raise GeneralError("Error","Result for publication in glue2 format is not '0 Success'")
else:
utils.log_info("Check ok , result for publication in glue2 format is '0 Success'")
utils.log_info("Prepare directory to checkout GLUE Validator")
os.makedirs("%s/trunk"%(utils.get_tmp_dir()))
utils.log_info("Checkout GLUE Validator")
utils.run_command("svn co http://svnweb.cern.ch/guest/gridinfo/glue-validator/trunk %s/trunk"%(utils.get_tmp_dir()))
utils.log_info("Execute GLUE Validator")
os.putenv("PYTHONPATH","%s/trunk/lib"%(utils.get_tmp_dir()))
utils.run_command("%s/trunk/bin/glue-validator -t glue2 -b \"o=glue\" -h %s -p 2170"%(utils.get_tmp_dir(),utils.get_WMS()))
utils.log_info("Test OK")
utils.log_info("End of regression test for bug %s"%(bug))
|
#!/usr/bin/env python
__author__ = 'rgolla'
import commands
import random
import subprocess
import shutil
from scipy.stats import norm
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import os
from sklearn import metrics
from matplotlib.backends.backend_pdf import PdfPages
import pylab as pl
import argparse
def get_tag_results( key ):
with open( key ) as f1:
lis1 = [ line.split()[0] for line in f1 ]
with open( key ) as f2:
lis2 = [ 100 * float( line.split()[1] ) for line in f2 ]
f1.close()
f2.close()
return lis1, lis2
parser = argparse.ArgumentParser()
parser.add_argument( '-pos', action='store', dest='pos', help='Text file with scores for positive images' )
parser.add_argument( '-neg', action='store', dest='neg', help='Text file with scores for negative images' )
parser.add_argument( '-output', action='store', dest='output', help='PDF file name to store output' )
results = parser.parse_args()
pos_filename = results.pos
neg_filename = results.neg
pp = PdfPages( results.output )
keyword = [ pos_filename, neg_filename ]
sorted_list = []
for key in keyword:
temp, data= get_tag_results( key )
( mu, sigma ) = norm.fit( data )
#histogram of the data
n, bins, patches = plt.hist( data, 20, range=( 0, 100 ), facecolor='green', alpha=0.75 )
# add a 'best fit' line
y = mlab.normpdf( bins, mu, sigma )
l = plt.plot( bins, y, 'r--', linewidth=2 )
#plot
plt.xlabel( key.split('.')[0] + ' frames confidence' )
plt.ylabel( 'No_of_entries' )
plt.title( r'$\mathrm{Histogram\ of\ IQ:}\ \mu=%.3f,\ \sigma=%.3f$' % ( mu, sigma ) )
plt.grid( True )
pp.savefig()
plt.close()
with open( pos_filename ) as f3:
pos = f3.readlines()
with open( neg_filename ) as f4:
neg = f4.readlines()
#roc computation
label_true1 = [ 1 for line in pos ]
label_true2 = [ 0 for line in neg ]
label_true = label_true1 + label_true2
probs1 = [ float( line.split()[1] ) for line in pos ]
probs2 = [ float( line.split()[1] ) for line in neg ]
probs = probs1 + probs2
fpr, tpr, thresholds = metrics.roc_curve( label_true, probs )
#print label_true
#print probs
roc_auc = metrics.auc( fpr, tpr )
print( "Area under the ROC curve : %f" % ( roc_auc ) )
# Plot ROC curve
pl.clf()
pl.plot( fpr, tpr, label='ROC curve (area = %0.2f)' % ( roc_auc ) )
pl.plot( [0, 1], [0, 1], 'k--' )
pl.xlim( [0.0, 1.0] )
pl.ylim( [0.0, 1.0] )
pl.xlabel( 'False Positive Rate' )
pl.ylabel( 'True Positive Rate' )
pl.title( 'Receiver operating characteristic example' )
pl.legend( loc="lower right" )
pp.savefig()
pp.close()
|
###
### This file is part of Pyffle BBS.
###
### Pyffle BBS is free software: you can redistribute it and/or modify
### it under the terms of the GNU General Public License as published by
### the Free Software Foundation, either version 3 of the License, or
### (at your option) any later version.
###
### Pyffle BBS is distributed in the hope that it will be useful,
### but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
### GNU General Public License for more details.
###
### You should have received a copy of the GNU General Public License
### along with Foobar. If not, see <http://www.gnu.org/licenses/>.
###
###
## Models for SqlAlchemy version 6
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relation, backref
from sqlalchemy import *
from sqlalchemy.dialects.postgresql import *
from sqlalchemy.orm import sessionmaker
from pyffle_tables import *
from pyffle_data import *
from pyffle_editor import Editor
from datetime import datetime
import sys
import getpass
import os
import tempfile
def getIdentity():
return "pyffle_join v0.23"
## Returns True if the version of pyffle is compatible this version of module
def confirmVersion(version):
return True
class PyffleModule:
currentUser = None
data = None
def eventDispatched(self, event):
pass
## UI for displaying a board list
def listBoards(self, boards):
## Loop through board objects, display one numbered line / board
self.data.stateChange("board_listboardsstart")
self.data.util.cls()
joinedBoardIds = self.data.getJoinedBoardids()
self.data.util.println("\nBoards available (* = JOINed):\n")
i = 1
for board in boards:
if not board.name.startswith('__'):
if self.data.srmcheck(board.aclid,self.currentUser.username,"READ",minlevel=board.minreadlevel):
prefix = ' '
if board.id in joinedBoardIds:
prefix = ' * '
self.data.util.println(" [^" + str(i) + "^] " + prefix + str(board.name) + " - (" + board.description + ") ")
i = i + 1
self.data.stateChange("board_listboardsend")
## UI for selecting message boards to join
def join(self):
self.data.stateChange("board_joinstart")
self.data.stateChange("board_joinloopstart")
userQuits = False
while not userQuits:
## Load board objects, pass them to listBoards() to display
boards = self.data.getBoards()
self.listBoards(boards)
## Prompt user for board
self.data.stateChange("board_joinpromptstart")
choice = self.data.util.prompt("\n<Board number>, [^A^]ll, [^N^]one, [^Q^]uit JOIN> ")
self.data.stateChange("board_joinpromptend")
choice = choice.lower()
if choice.isdigit():
self.data.joinBoardToggle(boards[int(choice)-1].id)
if choice == "q":
self.data.stateChange("board_joinuserquit")
userQuits = True
break
if choice == "a":
self.data.stateChange("board_joinall")
self.data.joinAll()
if choice == "n":
self.data.stateChange("board_unjoinall")
self.data.unjoinAll()
self.data.stateChange("board_joinloopend")
self.data.stateChange("board_joinend")
def go(self, command, args):
command = command.strip()
if command == "join":
self.data.stateChange("board_cmdjoinstart")
self.join()
self.data.stateChange("board_cmdjoinend")
|
#!/usr/bin/env python
import sys, array
import getopt
from ROOT import gROOT, TCanvas, TF1, TFile, gStyle, TFormula, TGraph, TGraphErrors, TH1D, TCutG, TH2D
def makeplots(files,path,name,c):
c.Clear()
color=1
for f in files:
print path
h=f.Get(path)
#h.Print()
h.SetLineColor(color)
if color==1:
h.Draw("")
else:
print "blah"
masses = array.array('d')
zcuts = array.array('d')
test = [
[0.015, 39.072],
[0.016, 39.9379],
[0.017, 40.6044],
[0.018, 41.1243],
[0.019, 41.5353],
[0.02, 41.874],
[0.021, 42.1671],
[0.022, 42.4281],
[0.023, 42.6634],
[0.024, 42.8838],
[0.025, 43.095],
[0.026, 43.303],
[0.027, 43.5116],
[0.028, 43.7343],
[0.029, 43.961],
[0.03, 43.9266],
[0.031, 43.8693],
[0.032, 43.5257],
[0.033, 43.1753],
[0.034, 42.8188],
[0.035, 42.4538],
[0.036, 42.0865],
[0.037, 41.7118],
[0.038, 41.3287],
[0.039, 40.9453],
[0.04, 40.5619],
[0.041, 40.1773],
[0.042, 39.7911],
[0.043, 39.4081],
[0.044, 39.0268],
[0.045, 38.648],
[0.046, 38.2636],
[0.047, 37.8759],
[0.048, 37.4939],
[0.049, 37.1126],
[0.05, 36.7295],
[0.051, 36.3433],
[0.052, 35.9514],
[0.053, 35.571],
[0.054, 35.1847],
[0.055, 34.873],
[0.056, 34.633],
[0.057, 34.3992],
[0.058, 34.1682],
[0.059, 33.9416],
[0.06, 33.7158],
]
for l in test:
masses.append(l[0])
zcuts.append(l[1])
zcutgraph = TGraph(len(masses),masses,zcuts)
zcutgraph.SetLineColor(4)
zcutgraph.Draw("same")
color+=1
c.SaveAs(sys.argv[1]+"-"+name+".png")
def makenormplots(files,path,name,c):
c.Clear()
color=1
for f in files:
h=f.Get(path)#.Clone()
#h.SetDirectory(0)
h.SetLineColor(color)
h.Scale(1/h.Integral())
if color==1:
#h.SetName("slice_36")
h.GetXaxis().SetTitle("Vertex Z [mm]")
h.GetYaxis().SetTitle("Arbitrary units")
h.DrawCopy("")
else:
nbins = h.GetXaxis().GetNbins()
shiftedH = TH1D("shiftedH","test",h.GetNbinsX(),h.GetXaxis().GetXmin(),h.GetXaxis().GetXmax())
for i in range(0,nbins-1):
shiftedH.SetBinContent(i,h.GetBinContent(i+1))
shiftedH.SetBinError(i,h.GetBinError(i+1))
shiftedH.SetLineColor(color)
shiftedH.DrawCopy("same")
color+=1
c.SaveAs(sys.argv[1]+"-"+name+".png")
options, remainder = getopt.gnu_getopt(sys.argv[1:], 'h', ['help',])
for opt, arg in options:
if opt in ('-h', '--help'):
print "\nUsage: "+sys.argv[0]+" <output basename> <root files>"
print "Arguments: "
print "\n"
sys.exit(0)
if (len(remainder)!=3):
print sys.argv[0]+' <output basename> <root files>'
sys.exit()
c = TCanvas("c","c",800,600);
outfile = TFile(remainder[0]+"-plots.root","RECREATE")
totalH = None
files=[]
keylists=[]
for filename in remainder[1:]:
f=TFile(filename)
files.append(f)
makeplots(files,"zcut","zcut",c)
c.SetLogy(1)
makenormplots(files,"slice_36","slice-36",c)
#keylists.append(f.GetListOfKeys())
#print filename
#print f.GetListOfKeys()
#for blah in f.GetListOfKeys():
# print blah
#if totalH is None:
#totalH=TH2D(h)
#totalH.SetDirectory(outfile)
#else:
#totalH.Add(h)
#for key in files[0].GetListOfKeys():
#print key
#print key.GetName()
#files[0].GetObjectUnchecked(key.GetName()).Draw()
# print
outfile.Write()
outfile.Close()
sys.exit(0)
|
import zipfile
import re
import time
namesuff = '.txt'
nothing = '90052'
order = [nothing]
# I would just import ch4.py, but there are differences between the two
# returns a string - either the nothing or text if a nothing isn't found
def findnothing():
global string
zipobject = zipfile.ZipFile('channel.zip', 'r')
string = zipobject.open(str(nothing) + namesuff, 'r').read()
matchObj = re.search(r'Next nothing is (\d+)', string, flags=0)
if matchObj:
print zipobject.getinfo(str(nothing) + namesuff).comment,
return matchObj.group(1)
else:
return string
# iterate through all files (1001 to make sure we hit them all; not worried
# about going too far because it breaks when it finds no nothing)
for i in range(0, 1001):
try:
nothing = int(findnothing())
except ValueError:
print '\nValueError! There is no nothing here!\n String:\n%s' % findnothing()
break
|
from aiohttp import web
from ..app import app
from ..model.build import BUILD_STATES
@app.http_get("/api/buildstates")
@app.authenticated
async def get_buildstates(*_):
"""
Returns a list of all buildstates.
---
description: Returns a list of buildstates.
tags:
- BuildStates
produces:
- text/json
responses:
"200":
description: successful
"500":
description: internal server error
"""
data = {"total_result_count": len(BUILD_STATES), "results": BUILD_STATES}
return web.json_response(data)
|
'''
Phan Le Son
mail: plson03@gmail.com
'''
import pyaudio
import numpy as np
import BF.Parameter as PAR
import sys
import time
import math
flgIsStop = False
idxFrame = 0
idxFrameLoad = 0
flgLoad = [True]*PAR.CNTBUF
CHANNELS = 10
CHUNK = PAR.N # PAR.m*PAR.N/CHANNELS
FORMAT = pyaudio.paInt24 # paInt8
RATE = PAR.fs # sample rate
FRAMELEN = int(CHUNK * CHANNELS*3/4)
raw_data_frames = np.zeros((FRAMELEN*PAR.CNTBUF),dtype=np.int32)
Frames_10Chnnl = np.zeros((CHUNK,CHANNELS))
raw_data = np.zeros(CHANNELS * CHUNK, dtype=np.int32)
Frame_N_m = np.zeros((PAR.N,PAR.m), dtype=np.int32)
Beam_N = np.zeros((PAR.N,),dtype=np.int32)
def callback(in_data, frame_count, time_info, status):
global idxFrame
global raw_data_frames
global flgIsStop
global flgLoad
#if flgLoad[idxFrame]==False:
# print("Lost samples")
flgLoad[idxFrame] = False
raw_data_frames[idxFrame * FRAMELEN:(idxFrame + 1) * FRAMELEN] = np.fromstring(in_data,np.int32)
if idxFrame < PAR.CNTBUF - 1:
idxFrame = idxFrame + 1
else:
idxFrame = 0
if (flgIsStop == True):
return (raw_data, pyaudio.paComplete)
return (raw_data, pyaudio.paContinue)
class Mic_Array_Read(object):
'''
Reading, demux and shifting microphones from ALSA
the output is 8 channel microphone signal 16000Hz
'''
def __init__(self):
self.p = pyaudio.PyAudio()
nMics = self.p.get_device_count()
micIndex = None
for ind in range(0, nMics):
info = self.p.get_device_info_by_index(ind)
devname = info['name']
print(devname)
if 'Microchip' in devname:
micIndex = ind
if (micIndex is not None):
print("Connecting to Microchip Microphone Device")
self.stream = self.p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
input_device_index=micIndex,
# name = name,
frames_per_buffer=CHUNK,
stream_callback=callback)
# start the stream
else:
print("*** Could not find Autonomous Microphone Array Device ")
sys.exit()
def Read(self):
global idxFrame, idxFrameLoad
global raw_data
self.stream.start_stream()
while idxFrame == idxFrameLoad:
time.sleep(0.001)
flgLoad[idxFrameLoad] = True
byte_data = raw_data_frames[idxFrameLoad * FRAMELEN:(idxFrameLoad + 1) * FRAMELEN]
'''
raw_data[0::4] = ((byte_data[0::3] & 0x00FFFFFF) << 8)
raw_data[1::4] = ((byte_data[0::3] & 0xFF000000) >> 16) | ((byte_data[1::3] & 0x0000FFFF) << 16)
raw_data[2::4] = ((byte_data[1::3] & 0xFFFF0000) >> 8) | ((byte_data[2::3] & 0x000000FF) << 24)
raw_data[3::4] = (byte_data[2::3] & 0xFFFFFF00)
'''
raw_data[0::4] = ((byte_data[0::3] & 0x000000FF) << 8) | \
((byte_data[0::3] & 0x0000FF00) << 8) | ((byte_data[0::3] & 0x00FF0000) << 8)
raw_data[1::4] = ((byte_data[0::3] & 0xFF000000) >> 16) | \
((byte_data[1::3] & 0x000000FF) << 16) | ((byte_data[1::3] & 0x0000FF00) << 16)
raw_data[2::4] = ((byte_data[1::3] & 0x00FF0000) >> 8) | \
((byte_data[1::3] & 0xFF000000) >> 8) | ((byte_data[2::3] & 0x000000FF) << 24)
raw_data[3::4] = (byte_data[2::3] & 0x0000FF00) | \
(byte_data[2::3] & 0x00FF0000) | (byte_data[2::3] & 0xFF000000)
Data_Calc = raw_data / 256 # correct for building 24 bit data left aligned in 32bit words
if (idxFrameLoad <PAR.CNTBUF - 1):
idxFrameLoad = idxFrameLoad + 1
else:
idxFrameLoad = 0
Frames_10Chnnl = np.reshape(Data_Calc, (CHUNK, CHANNELS))
Frame_N_m = 20*np.array(Frames_10Chnnl[:,0:PAR.m])
Beam_N = 20*np.array(Frames_10Chnnl[:,8])
Dir = Frames_10Chnnl[0,9]
return Frame_N_m, Beam_N, Dir
def Stop_Read(self):
global flgIsStop
#thread_DOA.join()
flgIsStop = True
time.sleep(1)
self.stream.stop_stream()
self.stream.close()
self.p.terminate()
def ForgetOldData(self):
global idxFrame, idxFrameLoad
idxFrame = 0
idxFrameLoad = 0
def Get_IdxFrame():
global idxFrame
return idxFrame
|
from django.db import models
from django.core.urlresolvers import reverse
# Create your models here.
class company(models.Model):
name=models.CharField(max_length=30)
ceo=models.CharField(max_length=20)
city=models.CharField(max_length=20)
def get_absolute_url(self):
return reverse('detail',kwargs={'pk':self.pk})
|
#!/usr/bin/env python
###############################################################################
## ##
## Copyright (c) Calix Networks, Inc. ##
## All rights reserved. ##
## ##
###############################################################################
##
## VERSION : 1.0
## DATE : 2015/07/28 16:52:08
##
## Changes in V1.0 (2016.09.18 by qshi)
## The Infor,Warning and Error mssage change to be well-formed
## (i.e. changes from <Error> to <Error> ...</Error>) in Ex-25985
## Also the ending label added for debug and verbose log.
##
## Changes in V0.9 (2016.08 by simon)
## -p option added for platfrom automation test added in EX-26288
##
## Changes in V0.8 (2016.07.13 by qshi)
## desc field added in EX-26170
##
## Changes in V0.7 (2016.04.28 by qshi)
## verbose dump to log feature added
##
## Changes in V0.6 (2016.02.19 by qshi)
## Add htDebug sub-class
##
## Changes in V0.5 (2016.01 by qshi)
## Support XInclude (See ESA-760)
##
## Changes in V0.4 (2016.01 by qshi)
## Move the htTestCommon out of this file and make it as
## individual file.
##
## Changes in v0.3 (2015.11 by Kenny)
## Re-run the failed ports(less than or equal three) for POTS Tone
##
## Changes in v0.2 (20150.10 by qshi)
## Re-run the failed port(s) for DSL Analog test
## Save the Summary for each test
## Save the Debug info if one test failed
## Show the summary and debug when "-d" specified
##
## AUTHOR : Qing Shi
## DESCRIPTION : Primarily used for manufacturing test
##
###############################################################################
import sys
import subprocess
import getopt
from xml.etree import ElementTree as ET
from xml.etree import ElementInclude as EI
from htLog import *
from htError import *
from htBaseCommand import *
from htDefault import *
import hstUtil
from hstUtil import *
from htSensors import *
from htDsl import *
from htBcm import *
from htPots import *
from htTelnet import *
from htDebug import *
# ESA-760, support up to 2 depth of XInclude parsing
m_XINCLUDE_DEPTH = 2
# Search all subelements, on all levels beneath the current element.
m_XPATH_PATTERN = ".//"
HELPMSG = " *** Manufacturing Test Suite *** \n \
\n \
Usage: ./htFactory.py [-t <TestItem> | [-v] [-n] [-f] ] [-r] [-V] [-d] [-D] [-h] [-p] [-l <count>]\n \
-h : show the help menu \n \
-v : Verbose mode. Display the output on console \n \
-n : Do *Not* output the final checking result(PASSED/FAILD) \n \
Applies for some version test items. \n \
-f : Forced to kill cardmgr. Used for DSL test items only.\n \
-r : Re-run the test again \n \
-V : Show the version of this script \n \
-d : Dump the last run log of one test item in Debug log\n \
-D : Dump the last run log of one test item in Detail log\n \
-A : Dump *All* test items output from Detail log\n \
-p : Run platform software automation cases \n \
-l : Run the platform software automation case repeatedly with <count> times \n \
-t : Test the items. Avaialbe items are shown below: \n \
ddr : Memory Test \n \
cpldv : Show the CPLD version \n \
-n -v option may be needed \n \
cpldnv : Show the CPLD version on network card\n \
-n -v option may be needed \n \
cpldrw : Verify the CPLD R/W operation \n \
powerver1 : Show version of Power chip 1 on MB \n \
powerver2 : Show version of Power chip 2 on MB \n \
powerver3 : Show version of Power chip 3 on MB \n \
powerver4 : Show version of Power chip 4 on DB \n \
powerver5 : Show version of Power chip 5 on DB \n \
powerver6 : Show version of Power chip 6 on DB \n \
-n -v option may be needed for power version test \n \
power4 : Verify Voltage and Current of Power chip 4 on DB \n \
Applies for Combo and Overlay cards. \n \
power5 : Verify Voltage and Current of Power chip 5 on DB \n \
power6 : Verify Voltage and Current of Power chip 6 on DB \n \
\n \
cpu : CPU test \n \
i2c : I2C checking \n \
i2csfp : I2C SFP model present checking \n \
pci : PCI checking \n \
emmc : EMMC checking \n \
sensors : Sensors checking \n \
hotsc : Hot Swap Controller checking \n \
rtc : RTC checking \n \
timing : Si5341 Timing status checking \n \
slotid : Slot id checking \n \
vcpvp : BCM VP chip detect \n \
\n \
bpeth : Backplane Ethernet checking \n \
ftaeth : FTA Ethernet checking \n \
bpmate : Slot to Slot Ethernet checking \n \
ftauart : FTA UART checking \n \
matesiglb : Mate Signal test for only one card via loopback \n \
-f must be specified as cardmgr needs to be killed \n \
matesig : Mate Signal test for two cards \n \
-f must be specified as cardmgr needs to be killed \n \
\n \
ledon : Turn on all LEDs on(some with green color and blink) \n \
ledy : Changes some LED color to yellow and blink \n \
ledoff : Turn off all LEDs \n \
\n \
slottrlb : Slot to Slot traffic test via backplane loopback \n \
katanas : Katana Sensors checking \n \
katanam : Katana Memory Test \n \
katanatrlb : Katana Traffic via Internal Loopback Test \n \
katanatrsfp : Katana Traffic via SFP loopback modules Test \n \
sgmiika2cpu : SGMII(Katana to CPU) traffic test \n \
sfp : SFP loopback modules Test(No Katana) \n \
applicable for VCP cards \n \
\n \
afe : DSL AFE port test \n \
dsla : DSL Analog Test \n \
dslbd : DSL Bonding Test \n \
dsllb : DSL Loop Test \n \
dslv : DSL BLV Test \n \
dlvclk : DSL DLV Test including 64K clock \n \
Do *Not* run this on both cards at the same time \n \
vcc10gkrlb : 10G-KR test via VCC loopback connector \n \
applicable for DSL SLV Test \n \
or VCP 10G Kr lb \n \
vccoob : OOB test via VCC loopback connector(DSL SLV Test) \n \
*All DSL* test itesm must be followed by -f option except \n \
afe and vccoob. \n \
vcp64kclk : 64K Reference clock on VCP cards \n \
cpu2vplink : Link test between CPU and VP chip for VCP \n \
vphpi : HPI bus test \n \
subhpi : HPI bus test for subscriber board on E3-48c r2 \n \
uphpi : HPI bus test for board with uplink DSP on E3-48c r2 \n \
dslula : Uplink DSL Analog test \n \
uplinkafe : Uplink AFE port test \n \
\n \
potst : POTS Tone Test \n \
potslv : POTS Loop Voltage Test \n \
potsrv : POTS Ring Voltage Test \n \
potsb : POTS Battery Test \n \
potslc : POTS Loop Current Test \n \
\n \
dooropen : Test whether the Door is open for E3-48c r2 \n \
doorclose : Test whether the Door is close for E3-48c r2 \n \
almpin : Alarm pin test for E3-48c r2 \n \
linepwr1 : Line Power 1 test for E3-48c r2 \n \
linepwr2 : Line Power 2 test for E3-48c r2 \n \
linepwr3 : Line Power 3 test for E3-48c r2 \n \
phyconn : Physical connection using two 2 RJ21 connectors for E3-48c \n \
killcard : Util only. \n \
-f option must be specified \n \
potsbistcfg : Util only for POTS BIST limitations. \n \
available for Combo card only \n \
\n \
Example: \n \
1) Test the PCI: htFactory.py -t pci \n \
2) Dump the last run of PCI: htFactory.py -t pci -d \n \
3) Dump all verbose log: htFactory.py -A \n \
"
#EX-26077
def usage():
print getSpecBoardHlpMsg()
def show_version():
print "Version 1.0"
class testCmdParse:
def __init__(self,cmd):
self.curCmdIdx = -1
self.singleCmd = True
self.inCmd = []
self.inCmd.append(cmd)
self.outCmdHandler = None
def add(self,newchild):
self.inCmd.append(newchild);
def ready(self):
self.curCmdIdx += 1
#Is there any command follows?
cmdNext = self.inCmd[self.curCmdIdx].get(g_CMDNEXT)
if cmdNext is None:
return True
else:
if cmdNext.lower() == g_CMDNEXT_YES:
self.singleCmd = False
return False
else:
return True
def getCmdHandler(self):
cmdName = self.inCmd[0].get(g_CMDNAME)
if not cmdName:
print "The Command is Empty!"
return None
cmdType = self.inCmd[0].get(g_CMDTYPE)
cmdNext = self.inCmd[0].get(g_CMDNEXT)
#create one special class if one type keyword specified
if cmdType:
#Temp sensors is special case
if cmdType == g_CMDTYPE_SENSORS:
self.outCmdHandler = htTestSensors(self.inCmd)
elif cmdType == g_CMDTYPE_DSLHMI:
self.outCmdHandler = htTestDsl(self.inCmd)
elif cmdType == g_CMDTYPE_BCM:
self.outCmdHandler = htTestBcm(self.inCmd)
elif cmdType == g_CMDTYPE_POTS:
self.outCmdHandler = htTestPots(self.inCmd)
elif cmdType == g_CMDTYPE_MB_TELNET:
self.outCmdHandler = htTelnet(self.inCmd)
elif cmdType == g_CMDTYPE_DBG:
self.outCmdHandler = htDebug(self.inCmd)
elif cmdType == g_CMDTYPE_UTIL:
self.outCmdHandler = htTestDefault(self.inCmd)
else:
self.outCmdHandler = htTestDefault(self.inCmd)
return self.outCmdHandler
tstItem_name = None
arg_verbose = False
# Display final checked result?
arg_rstShown = True
# Forced to kill cardmgr?
arg_forced = False
arg_repeated = False
# Dump single test item?
arg_dump_single_dbg = False
arg_dump_single_detail = False
# Run platform sw automation?
arg_plat_sw_automation = False
# Loop count of running platform sw automation repeatedly
arg_plat_sw_loop_count = 1
def parseCmdLine():
global tstItem_name, arg_verbose, arg_rstShown, \
arg_forced, arg_repeated, arg_dump_single_dbg, \
arg_dump_single_detail,arg_plat_sw_automation, \
arg_plat_sw_loop_count
try:
opts, args = getopt.getopt(sys.argv[1:],'t:vnfrdDAVhpl:')
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ('-t'):
tstItem_name = arg.upper()
elif opt in ('-v'):
arg_verbose = True
elif opt in ('-n'):
arg_rstShown = False
elif opt in ('-f'):
arg_forced = True
elif opt in ('-r'):
arg_repeated = True
elif opt in ('-d'):
arg_dump_single_dbg = True
# EX-25985
elif opt in ('-D'):
arg_dump_single_detail = True
elif opt in ('-V'):
show_version()
sys.exit(0)
# EX-25985
elif opt in ('-A'):
milog = miLog()
milog.dumpAll()
sys.exit(0)
elif opt in ('-h'):
usage()
sys.exit(2)
elif opt in ('-p'):
arg_plat_sw_automation = True
elif opt in ('-l'):
arg_plat_sw_loop_count = eval(arg)
else:
sys.exit(2)
def runCmds():
#1517 is VDSLR2 EqptType. This is demo. We should first get the EqptType from BID
if tstItem_name is not None:
if tstItem_name.upper() == "ALL":
print g_ERR_TBD
exit(0)
cmdMapFile = None
pCmdMapping = None
boardInfo = getBidEx()
if boardInfo is not None:
if arg_plat_sw_automation:
setPlatSwAutomation()
cmdMapFile = getCmdMapFile(boardInfo['EqptType'])
if cmdMapFile is not None:
pCmdMapping = ET.parse(cmdMapFile)
# ESA-760
depth = 0
while (depth < m_XINCLUDE_DEPTH):
EI.include(pCmdMapping.getroot())
depth += 1
# debug
#ET.dump(pCmdMapping.getroot())
else:
print g_ERR_CMD_MAPPING_FILE_NOT_FOUND
exit(-1)
else:
print g_ERR_USE_DEFAULT_CMD_MAP
cmdMapFile = getCmdMapFile('1517')
if cmdMapFile is not None:
pCmdMapping = ET.parse(cmdMapFile)
else:
print g_ERR_CMD_MAPPING_FILE_NOT_FOUND
exit(-1)
bTstItem = False
for tstKey, tstVal in getTstItemMapping(boardInfo['EqptType']):
testCmd = tstKey.upper()
if testCmd == tstItem_name:
bTstItem = True
cmdParse = None
testCmd = None
childNotEmpty = False
cmdMappingList=pCmdMapping.findall(tstVal)
# ESA-760
if not cmdMappingList:
itemXPath = m_XPATH_PATTERN + tstVal
cmdMappingList = pCmdMapping.findall(itemXPath)
# ESAA-523
milog = miLog()
passCnt = 0
failCnt = 0
summaryMsg = ""
detailMsg = ""
debugMsg = ""
tstNameFmtStr = "\nTestName:[{}] \n".format(tstItem_name)
tstNameEndFmtStr = "\nTestName:[/{}] \n".format(tstItem_name)
summaryMsg = tstNameFmtStr + summaryMsg
summaryMsg = summaryMsg + g_HEADERFMTSTR
startTime = time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime(time.time()))
tmpDebugStr = ""
tmpDetailStr = ""
if arg_dump_single_dbg:
milog.dumpTest(tstItem_name)
continue
if arg_dump_single_detail:
milog.dumpTest(tstItem_name,g_DUMP_DETAIL_STR)
continue
# ESA-1227
debugMsg = tstNameFmtStr
milog.prt_debug(debugMsg)
# ESA-747
if not cmdMappingList:
testResult = g_cmp_FAIL
tmpDebugStr= g_ERR_ITEM_NOT_FOUND_F(cmdMapFile)
else:
# # ESA-1227
detailMsg = tstNameFmtStr
milog.prt_detail(detailMsg)
# debugMsg = tstNameFmtStr
# milog.prt_debug(debugMsg)
testResult = g_cmp_PASS
# ESA-1476. Only get the last item as the findall returns a list
# containing all matching elements in *document* order.
cmdMappingList = cmdMappingList[-1:]
for cmdMappingElem in cmdMappingList:
for child in cmdMappingElem.getchildren():
#print child.attrib['name'],':',child.text.strip()
if cmdParse is None:
childNotEmpty = True
cmdParse = testCmdParse(child)
else:
cmdParse.add(child)
if cmdParse:
if cmdParse.ready():
testCmdHandler = cmdParse.getCmdHandler()
if testCmdHandler is not None:
testCmdHandler.preProcess(verbose=arg_verbose, \
rstshown=arg_rstShown, killcard=arg_forced,\
repeated=arg_repeated)
testCmdHandler.process()
# ESAA-480
tmpResult = testCmdHandler.postProcess()
# ESA-1227
#tmpDetailStr = testCmdHandler.getDetailMsg()
# ESA-1283. Move log from the end to here since there
# is one case of hybrid(e.g. SGMIIKA2CPU test which is
# the combination of BCM and linux commands).
# EX-25985 does the same as debug log(add ending label)
# to get the detail information for the phyconn test
# if this test pass.
#tmpDetailStr = tmpDetailStr + tstNameEndFmtStr
#milog.prt_detail(tmpDetailStr)
if tmpResult == g_cmp_FAIL:
testResult = g_cmp_FAIL
# ESAA-523
#if testCmdHandler.getDebugMsg() is not None:
# tmpDebugStr = tmpDebugStr + testCmdHandler.getDebugMsg()
# EX-25985
#tmpDebugStr = tmpDebugStr + tstNameEndFmtStr
# ESA-1283 Put log there since there is one hybrid case.
#milog.prt_debug(tmpDebugStr)
cmdParse = None
else:
print g_ERR_HANDLER_NOT_FOUND
else:
print g_ERR_TEST_CMD_EMPTY
#The cmdParse should be None. If it's not, there is one possibility
#that the value of next in last command of config xml file(config/xxx.xml)
#is still yes. To cover this inadvertent incorrectness, add code below
if cmdParse is not None:
#No ready() invoked. Forced to run the handler
testCmdHandler = cmdParse.getCmdHandler()
if testCmdHandler is not None:
testCmdHandler.preProcess(verbose=arg_verbose, \
rstshown=arg_rstShown, killcard=arg_forced,\
repeated=arg_repeated)
testCmdHandler.process()
# ESAA-480
tmpResult = testCmdHandler.postProcess()
#tmpDetailStr = testCmdHandler.getDetailMsg()
# ESA-1283
# EX-25985 does the same as debug log(add ending label)
#tmpDetailStr = tmpDetailStr + tstNameEndFmtStr
#milog.prt_detail(tmpDetailStr)
if tmpResult == g_cmp_FAIL:
testResult = g_cmp_FAIL
# ESAA-523
#tmpDebugStr = tmpDebugStr + testCmdHandler.getDebugMsg()
# EX-25985
#tmpDebugStr = tmpDebugStr + tstNameEndFmtStr
# ESA-1283
#milog.prt_debug(tmpDebugStr)
cmdParse = None
else:
print g_ERR_HANDLER_NOT_FOUND
if childNotEmpty:
tmpDetailStr = testCmdHandler.getDetailMsg()
# ESA-1283. Move log from the end to here since there
# is one case of hybrid(e.g. SGMIIKA2CPU test which is
# the combination of BCM and linux commands).
# EX-25985 does the same as debug log(add ending label)
# to get the detail information for the phyconn test
# if this test pass.
tmpDetailStr = tmpDetailStr + tstNameEndFmtStr
milog.prt_detail(tmpDetailStr)
if testResult == g_cmp_FAIL:
tmpDebugStr = tmpDebugStr + testCmdHandler.getDebugMsg()
if testResult == g_cmp_FAIL:
# ESAA-523
#if testCmdHandler.getDebugMsg() is not None:
#tmpDebugStr = tmpDebugStr + testCmdHandler.getDebugMsg()
# EX-25985
tmpDebugStr = tmpDebugStr + tstNameEndFmtStr
# ESA-1283 Put log there since there is one hybrid case.
milog.prt_debug(tmpDebugStr)
# ESAA-523
endTime = time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime(time.time()))
if testResult == g_cmp_PASS:
passCnt += 1
else:
failCnt += 1
tmpStr = "| {:>4} | {:>4} | {} | {} |\n".format(str(passCnt),str(failCnt),startTime,endTime)
summaryMsg = summaryMsg + tmpStr
summaryMsg = summaryMsg + g_FOOTERFMRSTR
milog.prt_summary(summaryMsg)
# ESAA-480
if arg_rstShown:
print testResult
if bTstItem == False:
if tstItem_name is None:
usage()
else:
print g_ERR_TEST_ITEM_NOT_FOUND
if __name__ == "__main__":
parseCmdLine()
while (arg_plat_sw_loop_count > 0):
runCmds()
arg_plat_sw_loop_count -= 1
|
#import matplotlib
#matplotlib.use('Agg')
import numpy as np
import glob
import string
import os
import sys
import matplotlib.pyplot as plt
from scipy.fftpack import fft,fftfreq, rfft
#from mpl_toolkits.basemap import Basemap, shiftgrid
from matplotlib.patches import Polygon
from matplotlib.colors import LogNorm
from cmath import *
import colorsys
from scipy import stats
from matplotlib.ticker import FuncFormatter
import matplotlib.ticker
import modules
from collections import OrderedDict
import operator
from netCDF4 import Dataset
from scipy.odr import Model, RealData, ODR, Data
from pylab import *
import pandas as pd
import matplotlib.dates as dates
import psutil
import gc
import matplotlib.gridspec as gridspec
present_dir = os.getcwd()
paths = present_dir.split("/")
species = paths[-4]
start_year = 2009
end_year = 2011
#read in obs ts data
obs_fname = '/work/home/db876/observations/surface/%s/process/GLOBAL_SURFACE_%s_2009_2011_H_HP.nc'%(species,species)
obs_refs,obs_raw_time,obs_ref_time,obs_datetime_time,obs_var,obs_lats,obs_lons,obs_alt,obs_groups,obs_raw_class,obs_anthrome_class,gap_inds = modules.read_obs_all(obs_fname,species,start_year,end_year)
#read in std model data
model_fname = '/work/home/db876/plotting_tools/model_files/GEOSCHEM_SURFACE_2009_2011_v1001_4x5_GEOS5_H_STD.nc'
model_raw_time,model_ref_time,model_datetime_time,model_std_var,lat_e,lon_e,lat_c,lon_c,grid_size,gridbox_count = modules.read_model_all(model_fname,species,start_year,end_year)
#get observational location tags
#EU = europe, AF = africa, NA = north america, SA = south america, ANT = antarctica, ARC = arctic, O = oceanic, OC = oceania, AS = asia
tags = modules.get_tags(obs_refs)
#--------------------------------------------------------
#load in periodic lsp data
obs_grp = Dataset('../obs_SURFACE_H/LSP_stats.nc')
alt_model_dirs_a = ['GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX0.25ANMVOC0.25','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX0.25ANMVOC0.5','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX0.25NMVOC1.0',
'GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX0.25ANMVOC2.0','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX0.25ANMVOC4.0','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX0.5ANMVOC0.25',
'GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX0.5ANMVOC0.5','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX0.5NMVOC1.0','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX0.5ANMVOC2.0',
'GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX0.5ANMVOC4.0','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX1.0ANMVOC0.25','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX1.0ANMVOC0.5',
'GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_STD','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX1.0ANMVOC2.0','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX1.0ANMVOC4.0',
'GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX2.0ANMVOC0.25','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX2.0ANMVOC0.5','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX2.0NMVOC1.0',
'GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX2.0ANMVOC2.0','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX2.0ANMVOC4.0','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX4.0ANMVOC0.25',
'GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX4.0ANMVOC0.5','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX4.0NMVOC1.0','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX4.0ANMVOC2.0',
'GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX4.0ANMVOC4.0']
alt_model_dirs_b = ['GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX0.25BNMVOC0.25','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX0.25BNMVOC0.5','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX0.25NMVOC1.0',
'GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX0.25BNMVOC2.0','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX0.25BNMVOC4.0','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX0.5BNMVOC0.25',
'GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX0.5BNMVOC0.5','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX0.5NMVOC1.0','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX0.5BNMVOC2.0',
'GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX0.5BNMVOC4.0','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX1.0BNMVOC0.25','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX1.0BNMVOC0.5',
'GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_STD','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX1.0BNMVOC2.0','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX1.0BNMVOC4.0',
'GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX2.0BNMVOC0.25','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX2.0BNMVOC0.5','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX2.0NMVOC1.0',
'GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX2.0BNMVOC2.0','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX2.0BNMVOC4.0','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX4.0BNMVOC0.25',
'GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX4.0BNMVOC0.5','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX4.0NMVOC1.0','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX4.0BNMVOC2.0',
'GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX4.0BNMVOC4.0']
alt_model_dirs_c = ['GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX0.25DRYDEPO30.25','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX0.25DRYDEPO30.5','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX0.25NMVOC1.0',
'GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX0.25DRYDEPO32.0','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX0.25DRYDEPO34.0','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX0.5DRYDEPO30.25',
'GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX0.5DRYDEPO30.5','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX0.5NMVOC1.0','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX0.5DRYDEPO32.0',
'GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX0.5DRYDEPO34.0','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX1.0DRYDEPO30.25','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX1.0DRYDEPO30.5',
'GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_STD','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX1.0DRYDEPO32.0','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX1.0DRYDEPO34.0',
'GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX2.0DRYDEPO30.25','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX2.0DRYDEPO30.5','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX2.0NMVOC1.0',
'GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX2.0DRYDEPO32.0','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX2.0DRYDEPO34.0','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX4.0DRYDEPO30.25',
'GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX4.0DRYDEPO30.5','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX4.0NMVOC1.0','GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX4.0DRYDEPO32.0',
'GEOSCHEM_SURFACE_v1001_4x5_GEOS5_H_NOX4.0DRYDEPO34.0']
#if 'rig' in obs_refs:
# obs_refs[obs_refs.index('rig')] = 'rig_photo'
obs_seasonal_waveforms = obs_grp.variables['seasonal_waveform'][:]
#-----------------------------------
#get area
areas = ['SW_NA','NW_NA','NE_NA','CE_NA','SE_NA','C_NA','S_EU','SW_EU','CW_EU','CS_EU','C_EU','E_EU','NW_EU','N_EU','NE_AS','SE_AS']
area_boundaries,area_tags,area_labels = modules.area_dicts()
diff_wf_s_a_spring = np.empty((len(areas),len(alt_model_dirs_a)))
diff_wf_s_a_summer = np.empty((len(areas),len(alt_model_dirs_a)))
diff_wf_s_a_autumn = np.empty((len(areas),len(alt_model_dirs_a)))
diff_wf_s_a_winter = np.empty((len(areas),len(alt_model_dirs_a)))
diff_wf_s_b_spring = np.empty((len(areas),len(alt_model_dirs_b)))
diff_wf_s_b_summer = np.empty((len(areas),len(alt_model_dirs_b)))
diff_wf_s_b_autumn = np.empty((len(areas),len(alt_model_dirs_b)))
diff_wf_s_b_winter = np.empty((len(areas),len(alt_model_dirs_b)))
diff_wf_s_c_spring = np.empty((len(areas),len(alt_model_dirs_c)))
diff_wf_s_c_summer = np.empty((len(areas),len(alt_model_dirs_c)))
diff_wf_s_c_autumn = np.empty((len(areas),len(alt_model_dirs_c)))
diff_wf_s_c_winter = np.empty((len(areas),len(alt_model_dirs_c)))
#cut vals into seasons
start = datetime.datetime(year = 2008, month = 1, day = 1, hour = 0, minute = 0)
end = datetime.datetime(year = 2009, month = 1, day = 1, hour = 0, minute = 0)
ref_date_dt = pd.date_range(start,end,freq='H')[:-19]
months = np.array([d.strftime('%m') for d in ref_date_dt]).astype('int')
valid_inds_winter = (months < 3) | (months ==12)
valid_inds_spring = (months >=3) & (months <6)
valid_inds_summer = (months >= 6) & (months <9)
valid_inds_autumn = (months >= 9) & (months <12)
for m in range(len(alt_model_dirs_a)):
print 'point 1'
print m
print '../%s/LSP_stats.nc'%(alt_model_dirs_a[m])
print '../%s/LSP_stats.nc'%(alt_model_dirs_b[m])
print '../%s/LSP_stats.nc'%(alt_model_dirs_c[m])
alt_model_grp_a = Dataset('../%s/LSP_stats.nc'%(alt_model_dirs_a[m]))
alt_model_grp_b = Dataset('../%s/LSP_stats.nc'%(alt_model_dirs_b[m]))
alt_model_grp_c = Dataset('../%s/LSP_stats.nc'%(alt_model_dirs_c[m]))
alt_model_seasonal_waveforms_a = alt_model_grp_a.variables['seasonal_waveform'][:]
alt_model_seasonal_waveforms_b = alt_model_grp_b.variables['seasonal_waveform'][:]
alt_model_seasonal_waveforms_c = alt_model_grp_c.variables['seasonal_waveform'][:]
day = np.arange(0,24,1)
year = np.linspace(0,12,8766,endpoint=False)
count = 0
for a in range(len(areas)):
area = areas[a]
print area
area_grid = area_boundaries[area]
area_tag = area_tags[area]
area_label = area_labels[area]
cut_test = modules.area_cut(area,obs_lats,obs_lons,tags,area_grid,area_tag)
if np.all(cut_test == False):
diff_wf_s_a_spring[a,m] = np.NaN
diff_wf_s_a_summer[a,m] = np.NaN
diff_wf_s_a_autumn[a,m] = np.NaN
diff_wf_s_a_winter[a,m] = np.NaN
diff_wf_s_b_spring[a,m] = np.NaN
diff_wf_s_b_summer[a,m] = np.NaN
diff_wf_s_b_autumn[a,m] = np.NaN
diff_wf_s_b_winter[a,m] = np.NaN
diff_wf_s_c_spring[a,m] = np.NaN
diff_wf_s_c_summer[a,m] = np.NaN
diff_wf_s_c_autumn[a,m] = np.NaN
diff_wf_s_c_winter[a,m] = np.NaN
else:
obs_sites = obs_seasonal_waveforms[cut_test,:]
obs_s_w_spring = np.nanmean(obs_sites[:,valid_inds_spring],axis=0)
obs_s_w_summer = np.nanmean(obs_sites[:,valid_inds_summer],axis=0)
obs_s_w_autumn = np.nanmean(obs_sites[:,valid_inds_autumn],axis=0)
obs_s_w_winter = np.nanmean(obs_sites[:,valid_inds_winter],axis=0)
model_sites_a = alt_model_seasonal_waveforms_a[cut_test,:]
model_sites_b = alt_model_seasonal_waveforms_b[cut_test,:]
model_sites_c = alt_model_seasonal_waveforms_c[cut_test,:]
alt_model_s_w_a_spring = np.nanmean(model_sites_a[:,valid_inds_spring],axis=0)
alt_model_s_w_a_summer = np.nanmean(model_sites_a[:,valid_inds_summer],axis=0)
alt_model_s_w_a_autumn = np.nanmean(model_sites_a[:,valid_inds_autumn],axis=0)
alt_model_s_w_a_winter = np.nanmean(model_sites_a[:,valid_inds_winter],axis=0)
alt_model_s_w_b_spring = np.nanmean(model_sites_b[:,valid_inds_spring],axis=0)
alt_model_s_w_b_summer = np.nanmean(model_sites_b[:,valid_inds_summer],axis=0)
alt_model_s_w_b_autumn = np.nanmean(model_sites_b[:,valid_inds_autumn],axis=0)
alt_model_s_w_b_winter = np.nanmean(model_sites_b[:,valid_inds_winter],axis=0)
alt_model_s_w_c_spring = np.nanmean(model_sites_c[:,valid_inds_spring],axis=0)
alt_model_s_w_c_summer = np.nanmean(model_sites_c[:,valid_inds_summer],axis=0)
alt_model_s_w_c_autumn = np.nanmean(model_sites_c[:,valid_inds_autumn],axis=0)
alt_model_s_w_c_winter = np.nanmean(model_sites_c[:,valid_inds_winter],axis=0)
diff_wf_s_a_spring[a,m] = np.sum(np.abs(alt_model_s_w_a_spring - obs_s_w_spring))
diff_wf_s_a_summer[a,m] = np.sum(np.abs(alt_model_s_w_a_summer - obs_s_w_summer))
diff_wf_s_a_autumn[a,m] = np.sum(np.abs(alt_model_s_w_a_autumn - obs_s_w_autumn))
diff_wf_s_a_winter[a,m] = np.sum(np.abs(alt_model_s_w_a_winter - obs_s_w_winter))
diff_wf_s_b_spring[a,m] = np.sum(np.abs(alt_model_s_w_b_spring - obs_s_w_spring))
diff_wf_s_b_summer[a,m] = np.sum(np.abs(alt_model_s_w_b_summer - obs_s_w_summer))
diff_wf_s_b_autumn[a,m] = np.sum(np.abs(alt_model_s_w_b_autumn - obs_s_w_autumn))
diff_wf_s_b_winter[a,m] = np.sum(np.abs(alt_model_s_w_b_winter - obs_s_w_winter))
diff_wf_s_c_spring[a,m] = np.sum(np.abs(alt_model_s_w_c_spring - obs_s_w_spring))
diff_wf_s_c_summer[a,m] = np.sum(np.abs(alt_model_s_w_c_summer - obs_s_w_summer))
diff_wf_s_c_autumn[a,m] = np.sum(np.abs(alt_model_s_w_c_autumn - obs_s_w_autumn))
diff_wf_s_c_winter[a,m] = np.sum(np.abs(alt_model_s_w_c_winter - obs_s_w_winter))
count+=1
#remove unneeded variables
try:
del ave_obs_param
del ave_alt_model_param_a
del ave_alt_model_param_b
del ave_alt_model_param_c
del alt_model_seasonal_waveforms_a
del alt_model_seasonal_waveforms_b
del alt_model_seasonal_waveforms_c
del obs_s_w
del alt_model_s_w_a
del alt_model_s_w_b
del alt_model_s_w_c
del area_grid
del area_tag
del area_label
del cut_test
except:
pass
gc.collect()
print '\n'
spring_inds = np.array([0,2,4,6,16,18,20,22,32,34,36,38,48,50,52,54])
summer_inds = np.array([1,3,5,7,17,19,21,23,33,35,37,39,49,51,53,55])
autumn_inds = np.array([8,10,12,14,24,26,28,30,40,42,44,46,56,58,60,62])
winter_inds = np.array([9,11,13,15,25,27,29,31,41,43,45,47,57,59,61,63])
plotter = 'T'
while plotter == 'T':
fig = plt.figure(figsize = (14,13))
fig.patch.set_facecolor('white')
gs1 = gridspec.GridSpec(2, 2)
gs1.update(top=0.99,bottom=0.79,left=0.04,right=0.27,wspace=0,hspace=0)
ax1 = plt.subplot(gs1[0, 0])
ax2 = plt.subplot(gs1[0, 1])
ax3 = plt.subplot(gs1[1, 0])
ax4 = plt.subplot(gs1[1, 1])
gs2 = gridspec.GridSpec(2, 2)
gs2.update(top=0.99,bottom=0.79,left=0.28, right=0.51,wspace=0,hspace=0)
ax5 = plt.subplot(gs2[0, 0])
ax6 = plt.subplot(gs2[0, 1])
ax7 = plt.subplot(gs2[1, 0])
ax8 = plt.subplot(gs2[1, 1])
gs3 = gridspec.GridSpec(2, 2)
gs3.update(top=0.99,bottom=0.79,left=0.52, right=0.75,wspace=0,hspace=0)
ax9 = plt.subplot(gs3[0, 0])
ax10 = plt.subplot(gs3[0, 1])
ax11 = plt.subplot(gs3[1, 0])
ax12 = plt.subplot(gs3[1, 1])
gs4 = gridspec.GridSpec(2, 2)
gs4.update(top=0.99,bottom=0.79,left=0.76, right=0.99,wspace=0,hspace=0)
ax13 = plt.subplot(gs4[0, 0])
ax14 = plt.subplot(gs4[0, 1])
ax15 = plt.subplot(gs4[1, 0])
ax16 = plt.subplot(gs4[1, 1])
gs5 = gridspec.GridSpec(2, 2)
gs5.update(top=0.78,bottom=0.58,left=0.04, right=0.27,wspace=0,hspace=0)
ax17 = plt.subplot(gs5[0, 0])
ax18 = plt.subplot(gs5[0, 1])
ax19 = plt.subplot(gs5[1, 0])
ax20 = plt.subplot(gs5[1, 1])
gs6 = gridspec.GridSpec(2, 2)
gs6.update(top=0.78,bottom=0.58,left=0.28, right=0.51,wspace=0,hspace=0)
ax21 = plt.subplot(gs6[0, 0])
ax22 = plt.subplot(gs6[0, 1])
ax23 = plt.subplot(gs6[1, 0])
ax24 = plt.subplot(gs6[1, 1])
gs7 = gridspec.GridSpec(2, 2)
gs7.update(top=0.78,bottom=0.58,left=0.52, right=0.75,wspace=0,hspace=0)
ax25 = plt.subplot(gs7[0, 0])
ax26 = plt.subplot(gs7[0, 1])
ax27 = plt.subplot(gs7[1, 0])
ax28 = plt.subplot(gs7[1, 1])
gs8 = gridspec.GridSpec(2, 2)
gs8.update(top=0.78,bottom=0.58,left=0.76, right=0.99,wspace=0,hspace=0)
ax29 = plt.subplot(gs8[0, 0])
ax30 = plt.subplot(gs8[0, 1])
ax31 = plt.subplot(gs8[1, 0])
ax32 = plt.subplot(gs8[1, 1])
gs9 = gridspec.GridSpec(2, 2)
gs9.update(top=0.57,bottom=0.37,left=0.04, right=0.27,wspace=0,hspace=0)
ax33 = plt.subplot(gs9[0, 0])
ax34 = plt.subplot(gs9[0, 1])
ax35 = plt.subplot(gs9[1, 0])
ax36 = plt.subplot(gs9[1, 1])
gs10 = gridspec.GridSpec(2, 2)
gs10.update(top=0.57,bottom=0.37,left=0.28, right=0.51,wspace=0,hspace=0)
ax37 = plt.subplot(gs10[0, 0])
ax38 = plt.subplot(gs10[0, 1])
ax39 = plt.subplot(gs10[1, 0])
ax40 = plt.subplot(gs10[1, 1])
gs11 = gridspec.GridSpec(2, 2)
gs11.update(top=0.57,bottom=0.37,left=0.52, right=0.75,wspace=0,hspace=0)
ax41 = plt.subplot(gs11[0, 0])
ax42 = plt.subplot(gs11[0, 1])
ax43 = plt.subplot(gs11[1, 0])
ax44 = plt.subplot(gs11[1, 1])
gs12 = gridspec.GridSpec(2, 2)
gs12.update(top=0.57,bottom=0.37,left=0.76, right=0.99,wspace=0,hspace=0)
ax45 = plt.subplot(gs12[0, 0])
ax46 = plt.subplot(gs12[0, 1])
ax47 = plt.subplot(gs12[1, 0])
ax48 = plt.subplot(gs12[1, 1])
gs13 = gridspec.GridSpec(2, 2)
gs13.update(top=0.36,bottom=0.16,left=0.04, right=0.27,wspace=0,hspace=0)
ax49 = plt.subplot(gs13[0, 0])
ax50 = plt.subplot(gs13[0, 1])
ax51 = plt.subplot(gs13[1, 0])
ax52 = plt.subplot(gs13[1, 1])
gs14 = gridspec.GridSpec(2, 2)
gs14.update(top=0.36,bottom=0.16,left=0.28, right=0.51,wspace=0,hspace=0)
ax53 = plt.subplot(gs14[0, 0])
ax54 = plt.subplot(gs14[0, 1])
ax55 = plt.subplot(gs14[1, 0])
ax56 = plt.subplot(gs14[1, 1])
gs15 = gridspec.GridSpec(2, 2)
gs15.update(top=0.36,bottom=0.16,left=0.52, right=0.75,wspace=0,hspace=0)
ax57 = plt.subplot(gs15[0, 0])
ax58 = plt.subplot(gs15[0, 1])
ax59 = plt.subplot(gs15[1, 0])
ax60 = plt.subplot(gs15[1, 1])
gs16 = gridspec.GridSpec(2, 2)
gs16.update(top=0.36,bottom=0.16,left=0.76, right=0.99,wspace=0,hspace=0)
ax61 = plt.subplot(gs16[0, 0])
ax62 = plt.subplot(gs16[0, 1])
ax63 = plt.subplot(gs16[1, 0])
ax64 = plt.subplot(gs16[1, 1])
axes = [ax1,ax2,ax3,ax4,ax5,ax6,ax7,ax8,ax9,ax10,ax11,ax12,ax13,ax14,ax15,ax16,ax17,ax18,ax19,ax20,ax21,ax22,ax23,ax24,ax25,ax26,ax27,ax28,ax29,ax30,ax31,ax32,ax33,ax34,ax35,ax36,ax37,ax38,ax39,ax40,ax41,ax42,ax43,ax44,ax45,ax46,ax47,ax48,ax49,ax50,ax51,ax52,ax53,ax54,ax55,ax56,ax57,ax58,ax59,ax60,ax61,ax62,ax63,ax64]
set_type = raw_input('\nANMVOC ,BNMVOC or DRYDEPO3?\n')
plot_grid = raw_input('\nbox or contour?\n')
area_count = 0
ax_count = 0
for area in areas:
print area
#ax.axis('off')
area_label = area_labels[area]
#ANMVOC cut
area_grid_a_spring = diff_wf_s_a_spring[area_count,:]
area_grid_a_summer = diff_wf_s_a_summer[area_count,:]
area_grid_a_autumn = diff_wf_s_a_autumn[area_count,:]
area_grid_a_winter = diff_wf_s_a_winter[area_count,:]
#BNMVOC cut
area_grid_b_spring = diff_wf_s_b_spring[area_count,:]
area_grid_b_summer = diff_wf_s_b_summer[area_count,:]
area_grid_b_autumn = diff_wf_s_b_autumn[area_count,:]
area_grid_b_winter = diff_wf_s_b_winter[area_count,:]
#drydepo3 cut
area_grid_c_spring = diff_wf_s_c_spring[area_count,:]
area_grid_c_summer = diff_wf_s_c_summer[area_count,:]
area_grid_c_autumn = diff_wf_s_c_autumn[area_count,:]
area_grid_c_winter = diff_wf_s_c_winter[area_count,:]
if set_type == 'ANMVOC':
area_grid_spring = area_grid_a_spring
area_grid_summer = area_grid_a_summer
area_grid_autumn = area_grid_a_autumn
area_grid_winter = area_grid_a_winter
elif set_type == 'BNMVOC':
area_grid_spring = area_grid_b_spring
area_grid_summer = area_grid_b_summer
area_grid_autumn = area_grid_b_autumn
area_grid_winter = area_grid_b_winter
elif set_type == 'DRYDEPO3':
area_grid_spring = area_grid_c_spring
area_grid_summer = area_grid_c_summer
area_grid_autumn = area_grid_c_autumn
area_grid_winter = area_grid_c_winter
#set min and max
if species == 'O3':
minval = 0
maxval = 100000
t = [0,100000]
t_str = ['0','+100000']
cb_label = 'Integrated Seasonal Bias'
cts = np.linspace(0,100000,40)
elif species == 'NO':
minval = 0
maxval = 100000
t = [0,100000]
t_str = ['0','+100000']
cb_label = 'Integrated Seasonal Bias'
cts = np.linspace(0,100000,40)
elif species == 'NO2-MOLYBDENUM':
minval = 0
maxval = 300000
t = [0,300000]
t_str = ['0','+300000']
cb_label = 'Integrated Seasonal Bias'
cts = np.linspace(0,300000,40)
elif species == 'CO':
minval = 0
maxval = 2000000
t = [0,2000000]
t_str = ['0','+2000000']
cb_label = 'Integrated Seasonal Bias'
cts = np.linspace(0,2000000,40)
cmap = matplotlib.cm.jet
area_grid_spring = np.reshape(area_grid_spring,(5,5))
area_grid_summer = np.reshape(area_grid_summer,(5,5))
area_grid_autumn = np.reshape(area_grid_autumn,(5,5))
area_grid_winter = np.reshape(area_grid_winter,(5,5))
masked_array_spring = np.ma.array(area_grid_spring, mask=np.isnan(area_grid_spring))
masked_array_summer = np.ma.array(area_grid_summer, mask=np.isnan(area_grid_summer))
masked_array_autumn = np.ma.array(area_grid_autumn, mask=np.isnan(area_grid_autumn))
masked_array_winter = np.ma.array(area_grid_winter, mask=np.isnan(area_grid_winter))
cmap.set_bad('w',1.)
if plot_grid == 'box':
pl = axes[ax_count].pcolor(masked_array_spring,vmin = minval,vmax=maxval,cmap =cmap)
pl = axes[ax_count+1].pcolor(masked_array_summer,vmin = minval,vmax=maxval,cmap =cmap)
pl = axes[ax_count+2].pcolor(masked_array_autumn,vmin = minval,vmax=maxval,cmap =cmap)
pl = axes[ax_count+3].pcolor(masked_array_winter,vmin = minval,vmax=maxval,cmap =cmap)
elif plot_grid == 'contour':
pl = axes[ax_count].contourf(masked_array_spring,cts,vmin = minval,vmax=maxval,cmap =cmap)
pl = axes[ax_count+1].contourf(masked_array_summer,cts,vmin = minval,vmax=maxval,cmap =cmap)
pl = axes[ax_count+2].contourf(masked_array_autumn,cts,vmin = minval,vmax=maxval,cmap =cmap)
pl = axes[ax_count+3].contourf(masked_array_winter,cts,vmin = minval,vmax=maxval,cmap =cmap)
axes[ax_count].set_xticks([1,2,3,4])
axes[ax_count+1].set_xticks([1,2,3,4])
axes[ax_count+2].set_xticks([1,2,3,4])
axes[ax_count+3].set_xticks([1,2,3,4])
axes[ax_count].set_xticklabels(['','','',''])
axes[ax_count+1].set_xticklabels(['','','',''])
axes[ax_count+2].set_xticklabels(['','','',''])
axes[ax_count+3].set_xticklabels(['','','',''])
axes[ax_count].set_yticks([1,2,3,4])
axes[ax_count+1].set_yticks([1,2,3,4])
axes[ax_count+2].set_yticks([1,2,3,4])
axes[ax_count+3].set_yticks([1,2,3,4])
axes[ax_count].set_yticklabels(['','','',''])
axes[ax_count+1].set_yticklabels(['','','',''])
axes[ax_count+2].set_yticklabels(['','','',''])
axes[ax_count+3].set_yticklabels(['','','',''])
#ax.set_yticklabels(['0.25','0.5','1.0','2.0','4.0'])
#axes[ax_count].axes.get_xaxis().set_visible(False)
#axes[ax_count].axes.get_yaxis().set_visible(False)
#axes[ax_count+1].axes.get_xaxis().set_visible(False)
#axes[ax_count+1].axes.get_yaxis().set_visible(False)
#axes[ax_count+2].axes.get_xaxis().set_visible(False)
#axes[ax_count+2].axes.get_yaxis().set_visible(False)
#axes[ax_count+3].axes.get_xaxis().set_visible(False)
#axes[ax_count+3].axes.get_yaxis().set_visible(False)
axes[ax_count].grid(b=True,which='major',color='white',linestyle='--',linewidth=0.42)
axes[ax_count+1].grid(b=True,which='major',color='white',linestyle='--',linewidth=0.42)
axes[ax_count+2].grid(b=True,which='major',color='white',linestyle='--',linewidth=0.42)
axes[ax_count+3].grid(b=True,which='major',color='white',linestyle='--',linewidth=0.42)
axes[ax_count+1].text(0.81, 0.85, area_label, ha='center', va='center',transform=axes[ax_count+1].transAxes,fontsize=15)
area_count+=1
ax_count+=4
#plt.tight_layout(pad = 1.5)
fig.subplots_adjust(bottom=0.08)
fig.subplots_adjust(left=0.10)
fig.text(0.5, 0.12, set_type, ha='center',fontsize=30)
fig.text(0.01, 0.5, 'NOx', va='center', rotation='vertical',fontsize=30)
cbar_ax = fig.add_axes([0.58, 0.07, 0.35, 0.06])
cb = fig.colorbar(pl,orientation='horizontal',cax=cbar_ax,ticks=t)
cb.set_ticklabels(t_str)
cb.ax.tick_params(labelsize=20)
cb.set_label(cb_label,fontsize=20)
cb.set_clim(minval, maxval)
plt.show()
plotter = raw_input('\nAnother plot? T or F?\n')
|
from app import app, db
from flask import render_template, request, redirect, url_for, flash, jsonify, json, send_from_directory, make_response, abort
from flask_jwt import JWT, jwt_required, current_identity
from sqlalchemy.sql import text
from app.models import User, Wish, wishes
from bs4 import BeautifulSoup
from werkzeug.datastructures import ImmutableMultiDict
from werkzeug.utils import secure_filename
from sqlalchemy.sql import exists
from random import randint
import requests
import urlparse
import datetime
import send
import time
import os
import random
@app.route('/')
def home():
return render_template('home.html')
@app.route('/application')
def Application():
return app.send_static_file('index.html')
select = """SELECT wish.pid, wish.name, wish.description, wish.url, wish.thumbnail FROM wish JOIN wishes ON wishes.pid = wish.pid WHERE wishes.uid = uid"""
def timeinfo():
return time.strftime("%d %b %Y")
@app.route("/api/users/register", methods=["POST"])
def signup():
if not request.form:
abort(400)
name = request.form['name']
email = request.form['email']
pword = request.form['pword']
age = request.form['age']
sex = request.form['gender']
made_on = timeinfo()
if db.session.query(User).filter_by(email = email).first() is not None and db.session.query(User).filter_by(pword = pword) is not None:
return jsonify(error=True, info={}, message="This email and password cannot be used for signing up")
while True:
uid = randint(450000,470000)
if not db.session.query(exists().where(User.uid == str(uid))).scalar():
break
user = User(uid,name,age,sex,email,pword,made_on)
db.session.add(user)
db.session.commit()
info = {'uid': user.uid, 'email': user.email,'name': user.name, 'age': user.age, 'sex': user.sex, 'made_on': user.made_on, 'uri': user.url}
return jsonify(error=None, info={'info': info}, message="Everything was fine")
@app.route("/api/users/<int:uid>/swishlist", methods=["GET","POST"])
def swishlist(uid):
user = db.session.query(User).filter_by(uid=uid).first()
if request.method == "POST":
lst = []
if not request.json:
flash(str(user)+" Sorry but something is went wrong, our bad")
abort(400)
if user:
if 'emails' in request.json:
lst.append(request.json['email1'])
lst.append(request.json['email2'])
lst.append(request.json['email3'])
if not lst:
flash(str(user)+" Sorry but something is went wrong, our bad")
abort(400)
from_name = user.name
from_addr = user.email
Topic = "The best Wishlist"
userpage = app.config["LINK"]
message = "This was my wish list check it out and tell me what you think later! " + "" + userpage
for i in lst:
send(i,user.name,user.email,Topic,message)
info = {'persons': lst}
response = jsonify({"error":None,"info":info,"message":"Success"})
return response
else:
flash(str(user)+" Sorry but we couldnt find you in our emails that were entered")
abort(404)
elif request.method == "GET":
if user:
wishlst = []
wishes = db.session.get_bind().execute(select, id=user.id)
if wishes:
for i in wishes:
wish = {'pid': i["pid"], 'name': i["name"], 'description': i["description"], 'url': i["url"], 'thumbnail_url': i["thumbnail"]}
wishlst.append(wish)
errors = None
message = "Success"
info = {"wishes": wishlst, "user": user.name}
else:
errors = True
message = "Something went wrong we couldnt find the wishes you were looking for"
info = {"wishes": wishlst, "user": user.name}
return jsonify(error=errors, info=info, message=message)
else:
flash(str(user)+" Sorry but we couldn't find you in our database")
abort(404)
@app.route("/api/users/login", methods=["POST"])
def login():
if not request.json:
flash("Something went wrong on our end. Please give us a moment")
abort(400)
if 'email' not in request.json and 'pword' not in request.json:
abort(400)
email = request.json['email']
pword = request.json['pword']
if db.session.query(User).filter_by(email = email).first() is None and db.session.query(User).filter_by(email = email).first() is None :
flash("Something went wrong on our end please give us a moment we cannot find that email in our database")
abort(400)
else:
uid=db.session.getbind().execute("""SELECT uid FROM Users WHERE email={}""".format(str(email)))
return jsonify(error=None, info={'user': uid}, message="Everything was fine")
@app.route("/api/users/<int:uid>/wishlist/<int:pid>", methods=["DELETE"])
def delete_item(uid, pid):
user = db.session.query(User).filter_by(id=uid).first()
wish = db.session.query(Wish).filter_by(pid=pid).first()
if user and wish :
wish = db.session.query(Wish).filter_by(pid=pid).first()
if wish:
db.session.delete(wish)
db.session.commit()
info = {'info': wish.pid, 'title': wish.name}
else:
flash("Something went wrong on our side please give us a moment")
abort(404)
return jsonify(error=None,info={'info': info}, message="Everything went fine")
@app.route('/api/thumbnails', methods=['GET'])
def thumbnails():
if not request.json :
flash("Something went wrong on our side please give us a moment")
abort(400)
info = request.json['url']
if info:
errors = None
message = "OK"
else:
errors = True
message = "Something went wrong"
return jsonify(error=errors, info={'info': info}, message=message)
@app.route("/api/users/<int:uid>/wishlist", methods=["GET","POST"])
def wishlist(uid):
user = db.session.query(User).filter_by(uid=uid).first()
if request.method == "POST":
if not request.json:
flash("Something went wrong on our side please give us a moment")
abort(400)
exp = ('name' not in request.json and 'thumbnail_url' not in request.json and 'description' not in request.json and 'url' not in request.json)
if exp:
flash("Something went wrong on our side please give us a moment")
abort(400)
while True:
pid = randint(450000,470000)
if not db.session.query(exists().where(Wish.pid == str(pid))).scalar():
break
name = request.json['name']
description = request.json['description']
url = request.json['url']
thumbnail = request.json['thumbnaill']
if user:
wish = Wish(pid,name,description,url,thumbnail)
db.session.add(wish)
db.session.commit()
err = None
info = {'pid': wish.pid, 'name': wish.name, 'description': wish.description, 'url': wish.url, 'thumbnail': wish.thumbnail}
else:
flash("Something went wrong on our side please give us a moment")
abort(404)
return jsonify(error=None, data={'info': info}, message="Everything is fine")
elif request.method == "GET":
if user:
wishlst = []
#FIGURE THIS OUT
query = text("""SELECT wish.item_id, wish.title, wish.description, wish.url, wish.thumbnail FROM wish INNER JOIN users_wishes ON users_wishes.wish_id = wish.item_id WHERE users_wishes.user_id = :id""")
wishes = db.session.get_bind().execute(query, id=user.id)
if wishes:
for i in wishes:
wish = {'pid': wish["pid"], 'name': wish["name"], 'description': wish["description"], 'url': wish["url"], 'thumbnail': wish["thumbnail"]}
wishlst.append(wish)
errors = None
message = "Success"
info = {"wishes": wishlst}
else:
errors = True
message = "No wishes found"
info = {"wishes": wishlst}
else:
flash("Something went wrong on our side please give us a moment")
abort(404)
return jsonify(error=errors, info=info, message=message)
@app.after_request
def add_header(response):
response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'
response.headers['Cache-Control'] = 'public, max-age=600'
return response
@app.errorhandler(404)
def page_not_found(error):
return render_template('404.html'), 404
if __name__ == '__main__':
app.run(debug=True,host="0.0.0.0",port="5000")
|
from character import Character
from catalogue import loot
class Mage(Character):
def __init__(self, name, current_room, health, focus, gold, atk, defense, description):
super(Mage, self).__init__(name, current_room, health, focus, gold, atk, defense, description)
self.special_attacks = []
self.non_combat_special = []
self.trapped = []
self.minions = []
def read(self, book):
if loot[book].s_type == 'combat':
self.special_attacks.append(loot[book].ability)
elif loot[book].s_type == 'non-combat':
self.non_combat_special.append(loot[book].ability)
loot[book].on_read(loot[book].ability)
|
from .class_lists import ClassList
from .class_list_supply_item import ClassListSupplyItem
from .package_types import PackageType
from .supply_items import SupplyItem
from .supply_types import SupplyType
from .user_classes import UserClass |
import tkinter as tk
import tkinter.messagebox
import sqlite3
import os
import time
from tkinter import ttk
def menu_system():
def st_data():#學生資料頁面
def combo_change(event): #連動式下拉選單
st_level = st_level_combo.get()
if st_level:
comboclass["values"] = studentClasses.get(st_level)
else:
comboclass.set([])
def insertdata():
localtime = time.localtime(time.time())#紀錄時間
urtime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
stna = st_name.get() #姓名
stsc = school_name.get() #學校
stl = st_level_combo.get() #學制級別
stlc = comboclass.get() #年級
stcls = st_class.get() #班級
sts = sex.get() #性別
stadr = st_address.get() #住家地址
stp = st_phone.get() #連絡電話
stec = st_EC.get() #緊急聯絡人
stecp = st_EC_phone.get() #緊急聯絡人電話
stm = st_mail.get() #電子郵件
stre = st_remark.get() #備註欄
now_tag = 1
if sts == 1:
sts = '男生'
else:
sts = '女生'
conn = sqlite3.connect( 'Student_achievement_data.sqlite' )
sqlstr = "insert into studentdata values( '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}');".format(stna, stsc, stl, stlc, stcls, sts, stadr, stp, stec, stecp, stm, stre, now_tag, urtime)
conn.execute(sqlstr)
conn.commit() #更新資料庫用
tk.messagebox.showinfo('success', '新增成功')
conn.close()
def rebackpage():
window.destroy()
menu_system()
#封鎖其他選項
#基本資料title
basdata_title = tk.Label(window, text='學生基本資料',font=("新細明體", 16) , width=20).place(x=330, y=20)
#姓名
st_name = tk.StringVar()
st_name_labe = ttk.Label(window, text='學生姓名:', width=20).place(x=320, y=70)
st_name_entry = ttk.Entry(window, textvariable=st_name, width=15).place(x=380, y=70)
#就讀學校
school_name= tk.StringVar()
st_school_labe = ttk.Label(window, text='就讀學校:', width=20).place(x=320, y=110)
st_school_entry = ttk.Entry(window, textvariable=school_name, width=20).place(x=380, y=110)
#就讀年級
st_school_level = ttk.Label(window, text='學制級別:', width=20).place(x=320, y=150)
#建立年級班級字典
studentClasses = {'國小': ['一年級', '二年級', '三年級', '四年級', '五年級', '六年級'],
'國中': ['一年級', '二年級', '三年級'],
'高中': ['一年級', '二年級', '三年級'],
}
#學生學級年級下拉視選單
st_level_combo = ttk.Combobox(window, values=tuple(studentClasses.keys()), width=8)
st_level_combo.place(x=380, y=150)
st_level_combo.bind('<<ComboboxSelected>> ', combo_change)
#就讀年級
st_school_class = tk.Label(window, text='就讀年級:')
st_school_class.place(x=480, y=150)
comboclass = ttk.Combobox(window,width=10)
comboclass.place(x=550, y=150)
#班級
st_class = tk.StringVar()
st_class_labe = ttk.Label(window, text='班級:', width=20).place(x=650, y=150)
st_class_entry = ttk.Entry(window, textvariable=st_class, width=10).place(x=690, y=150)
#性別: 1:男生;0:女生(默認為男聲)
sex_label = tk.Label(window, text='性別:').place(x=345, y=190)
sex = tkinter.IntVar(value=1)
st_sex_boy = tk.Radiobutton(window, variable=sex, value=1, text='男')
st_sex_boy.place(x=390, y=190)
st_sex_girl = tk.Radiobutton(window, variable=sex, value=0, text='女')
st_sex_girl.place(x=450, y=190)
#住址
st_address = tk.StringVar()
st_address_labe = ttk.Label(window, text='住家地址:', width=20).place(x=320, y=230)
st_address_entry = ttk.Entry(window, textvariable=st_address, width=40).place(x=380, y=230)
#聯絡電話
st_phone = tk.StringVar()
st_phone_labe = ttk.Label(window, text='聯絡電話:', width=20).place(x=320, y=270)
st_phone_entry = ttk.Entry(window, textvariable=st_phone, width=20).place(x=380, y=270)
#緊急聯絡人、聯絡人手機
st_EC = tk.StringVar()
st_EC_labe = ttk.Label(window, text='緊急聯絡人:', width=20).place(x=310, y=310)
st_EC_entry = ttk.Entry(window, textvariable=st_EC, width=15).place(x=380, y=310)
st_EC_phone = tk.StringVar()
st_EC_phone_labe = ttk.Label(window, text='電話 :', width=20).place(x=520, y=310)
st_EC_phone_entry = ttk.Entry(window, textvariable=st_EC_phone, width=20).place(x=560, y=310)
#email
st_mail = tk.StringVar()
st_mail_labe = ttk.Label(window, text='e-mail :', width=20).place(x=330, y=350)
st_mail_entry = ttk.Entry(window, textvariable=st_mail, width=40).place(x=380, y=350)
#備註欄
st_remark = tk.StringVar()
st_remark_labe = ttk.Label(window, text='備註欄 :', width=20).place(x=330, y=390)
st_remark_entry = ttk.Entry(window, textvariable=st_remark, width=40).place(x=380, y=390)
#登錄sqlite
btnDown = tk.Button(window, text = '新 增', width=10, command=insertdata)
btnDown.place(x=420, y=500)
#返回
btnDown2 = tk.Button(window, text = '返 回', width=10, command=rebackpage)
btnDown2.place(x=520, y=500)
def insert_grades():#成績登錄
def sql_compute(sqlstr):
conn = sqlite3.connect('Student_achievement_data.sqlite')
cursor = conn.execute(sqlstr)
rows = cursor.fetchall()
conn.commit()
conn.close()
return rows
def st_info(event):#下拉視選單選擇後讀出基本資料(學校選擇>年級)
nameinfo = st_name_combo.get()
if nameinfo:
st_gr_combo['values'] = datachange(sql_compute("select grade2 from studentdata where school = '{}'".format(nameinfo)))
else:
st_gr_combo.set([])
def st_info2(event):#年級選擇>班級
gradeinfo = st_gr_combo.get()
if gradeinfo:
st_cl_combo['values'] = datachange(sql_compute("select class from studentdata where grade2 = '{}'".format(gradeinfo)))
else:
st_cl_combo.set([])
def st_info3(event):#班級>姓名
classinfo = st_cl_combo.get()
gradeinfo = st_gr_combo.get()
if classinfo:
st_p_combo['values'] = datachange(sql_compute("select name from studentdata where class = '{}' and grade2 = '{}' ".format(classinfo,gradeinfo )))
else:
st_p_combo.set([])
def school_year():#製作學年表##現在年分轉學年度且往前5年
localtime = time.localtime(time.time())#現在時間(時間搓)
u = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())#轉換時間格式
y = u.split('-')
p = int(y[0]) - 1911
rows=[p-r for r in range(12)]
year = ['{} 學年度'.format(k) for k in rows]
return year
def school_year_2():#下拉式選單values
year2 = ('上學期', '下學期')
return year2
def exam_type():#下拉式選單values
exam = ('第一次段考(期中考)', '第二次段考(期中考)', '第三次段考(期末考)', '第四次段考(期末考)')
return exam
def datachange(data):# 資料型態轉換
rows = [r[0] for r in data]
return list(set(rows))
def addinfo():
localtime = time.localtime(time.time())#紀錄時間
urtime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
schn = st_name_combo.get()#校名
stgr = st_gr_combo.get()#年級
stcl = st_cl_combo.get()#班級
stn = st_p_combo.get()#姓名
scy = schoolyear_combo.get()#學年度
scy2 = schoolyear2_combo.get()#上下學期
scy3 = schoolyear3_combo.get() #段考
sb1 = st_point1.get()
p1 = point_combo1.get()
sb2 = st_point2.get()
p2 = point_combo2.get()
sb3 = st_point3.get()
p3 = point_combo3.get()
sb4 = st_point4.get()
p4 = point_combo4.get()
sb5 = st_point5.get()
p5 = point_combo5.get()
sb6 = st_point6.get()
p6 = point_combo6.get()
sb7 = st_point7.get()
p7 = point_combo7.get()
sb8 = st_point8.get()
p8 = point_combo8.get()
now_tag = 1
conn = sqlite3.connect( 'Student_achievement_data.sqlite' )
sqlstr = "insert into studentgrades values('{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}');".format(schn, stgr, stcl, stn, scy, scy2, scy3, p1, sb1, p2, sb2, p3, sb3, p4, sb4, p5, sb5, p6, sb6, p7, sb7, p8, sb8, now_tag, urtime)
conn.execute(sqlstr)
conn.commit() #更新資料庫用
tk.messagebox.showinfo('success', '新增成功')
conn.close()
def rebackpage1():#返回
window.destroy()
menu_system()
def delSource1(): #刪除
name_nums = st_p_combo.get()
if name_nums:
conn = sqlite3.connect( 'Student_achievement_data.sqlite' )
sqlstr = "select * from studentgrades where studentname = {}" .format( name_nums )
cursor=conn.execute(sqlstr)
row = cursor.fetchone()
#print(row)
if row == None:
tk.messagebox.showinfo('Fail', "{} not exist!".format(name_nums))
else:
sqlstr = "update studentgrades set tag=0 where studentname ='{}'".format( name_nums )
conn.execute(sqlstr)
conn.commit() #更新資料庫用
tk.messagebox.showinfo('success', '刪除成功')
conn.close()
else:
tk.messagebox.showinfo('Fail', '請選擇姓名')
#title
basdata_title = tk.Label(window, text='學生成績登錄',font=("新細明體", 16) , width=20).place(x=330, y=20)
#學校
st_name_labe = ttk.Label(window, text='就讀學校 :').place(x=280, y=80)
sample = sql_compute("select school from studentdata where tag = 1")
st_name_combo = ttk.Combobox(window, values=datachange(sample), width=10)
st_name_combo.place(x=360, y=80)
st_name_combo.bind('<<ComboboxSelected>> ', st_info)
#年級
st_gr_labe = ttk.Label(window, text='年級 :').place(x=480, y=80)
st_gr_combo = ttk.Combobox(window, width=10)
st_gr_combo.place(x=520, y=80)
st_gr_combo.bind('<<ComboboxSelected>> ', st_info2)
#班級
st_cl_labe = ttk.Label(window, text='班級 :').place(x=640, y=80)
st_cl_combo = ttk.Combobox(window, width=10)
st_cl_combo.place(x=680, y=80)
st_cl_combo.bind('<<ComboboxSelected>> ', st_info3)
#姓名
st_p_labe = ttk.Label(window, text='學生姓名 :').place(x=280, y=120)
st_p_combo = ttk.Combobox(window, width=10)
st_p_combo.place(x=360, y=120)
#學生基本資訊
#st_msg = tk.StringVar()
#msg_lable = tk.Label(window, textvariable=st_msg, font = (8), fg='blue', justify='left')
#msg_lable.place(x=280, y=110)
#學年度/學期/考試種類
st_schoolyear_labe = ttk.Label(window, text='學年度/分類:').place(x=280, y=160)
schoolyear_combo = ttk.Combobox(window,values=school_year(), width=10)
schoolyear_combo.place(x=360, y=160)
schoolyear_combo.current(0)
schoolyear2_combo= ttk.Combobox(window,values=school_year_2(), width=10)
schoolyear2_combo.place(x=470, y=160)
schoolyear2_combo.current(0)
schoolyear3_combo= ttk.Combobox(window,values=exam_type(), width=20)
schoolyear3_combo.place(x=580, y=160)
schoolyear3_combo.current(0)
#學生分數一
subject = ('選擇科目:','國文', '英文', '數學', '自然', '社會', '生物', '歷史', '地理', '公民', '理化', '物理', '化學', '健康與體育', '英文作文', '國文作文')
st_point1 = tk.StringVar()
st_point1_labe = ttk.Label(window, text='填入分數一 :').place(x=280, y=200)
point_combo1 = ttk.Combobox(window, values=subject, width=10)
point_combo1.place(x=360, y=200)
point_combo1.current(0)
st_point1_entry = ttk.Entry(window, textvariable=st_point1, width=15).place(x=470, y=200)
st_point2 = tk.StringVar()
st_point2_labe = ttk.Label(window, text='填入分數二 :').place(x=280, y=240)
point_combo2 = ttk.Combobox(window, values=subject, width=10)
point_combo2.place(x=360, y=240)
point_combo2.current(0)
st_point2_entry = ttk.Entry(window, textvariable=st_point2, width=15).place(x=470, y=240)
st_point3 = tk.StringVar()
st_point3_labe = ttk.Label(window, text='填入分數三 :').place(x=280, y=280)
point_combo3 = ttk.Combobox(window, values=subject, width=10)
point_combo3.place(x=360, y=280)
point_combo3.current(0)
st_point3_entry = ttk.Entry(window, textvariable=st_point3, width=15).place(x=470, y=280)
st_point4 = tk.StringVar()
st_point4_labe = ttk.Label(window, text='填入分數四 :').place(x=280, y=320)
point_combo4 = ttk.Combobox(window, values=subject, width=10)
point_combo4.place(x=360, y=320)
point_combo4.current(0)
st_point4_entry = ttk.Entry(window, textvariable=st_point4, width=15).place(x=470, y=320)
st_point5 = tk.StringVar()
st_point5_labe = ttk.Label(window, text='填入分數五 :').place(x=280, y=360)
point_combo5 = ttk.Combobox(window, values=subject, width=10)
point_combo5.place(x=360, y=360)
point_combo5.current(0)
st_point5_entry = ttk.Entry(window, textvariable=st_point5, width=15).place(x=470, y=360)
st_point6 = tk.StringVar()
st_point6_labe = ttk.Label(window, text='填入分數六 :').place(x=280, y=400)
point_combo6 = ttk.Combobox(window, values=subject, width=10)
point_combo6.place(x=360, y=400)
point_combo6.current(0)
st_point6_entry = ttk.Entry(window, textvariable=st_point6, width=15).place(x=470, y=400)
st_point7 = tk.StringVar()
st_point7_labe = ttk.Label(window, text='填入分數七 :').place(x=280, y=440)
point_combo7 = ttk.Combobox(window, values=subject, width=10)
point_combo7.place(x=360, y=440)
point_combo7.current(0)
st_point7_entry = ttk.Entry(window, textvariable=st_point7, width=15).place(x=470, y=440)
st_point8= tk.StringVar()
st_point8_labe = ttk.Label(window, text='填入分數八 :').place(x=280, y=480)
point_combo8 = ttk.Combobox(window, values=subject, width=10)
point_combo8.place(x=360, y=480)
point_combo8.current(0)
st_point8_entry = ttk.Entry(window, textvariable=st_point8, width=15).place(x=470, y=480)
#顯示新增成果
#lstStudent = tk.Listbox(window, width=380)
#lstStudent.place(x=280, y=430, width=500, height=100)
#登錄sqlite
btnDown = tk.Button(window, text='新 增', width=10,command=addinfo)
btnDown.place(x=360, y=550)
#返回
btnDown2 = tk.Button(window, text='返 回', width=10,command=rebackpage1)
btnDown2.place(x=500, y=550)
#刪除
btnDown3 = tk.Button(window, text='刪 除', width=10,command=delSource1)
btnDown3.place(x=640, y=550)
def grades_search():#成績查詢
def sql_compute(sqlstr):
conn = sqlite3.connect('Student_achievement_data.sqlite')
cursor = conn.execute(sqlstr)
rows = cursor.fetchall()
conn.commit()
conn.close()
return rows
def st_info(event):#下拉視選單選擇後讀出基本資料(學校選擇>年級)
nameinfo = st_name_combo.get()
if nameinfo:
st_gr_combo['values'] = datachange(sql_compute("select grade from studentgrades where school = '{}'".format(nameinfo)))
else:
st_gr_combo.set([])
def st_info2(event):#年級選擇>班級
gradeinfo = st_gr_combo.get()
if gradeinfo:
st_cl_combo['values'] = datachange(sql_compute("select class from studentgrades where grade = '{}'".format(gradeinfo)))
else:
st_cl_combo.set([])
def st_info3(event):#班級>姓名
classinfo = st_cl_combo.get()
gradeinfo = st_gr_combo.get()
if classinfo:
st_p_combo['values'] = datachange(sql_compute("select studentname from studentgrades where class = '{}' and grade = '{}' ".format(classinfo, gradeinfo )))
else:
st_p_combo.set([])
def school_year(event):#製作學年表##現在年分轉學年度且往前5年
nameinfo = st_p_combo.get()
classinfo = st_cl_combo.get()
gradeinfo = st_gr_combo.get()
if nameinfo:
schoolyear_combo['values'] =datachange(sql_compute("select year from studentgrades where studentname ='{}' and class = '{}' and grade = '{}' ".format(nameinfo, classinfo, gradeinfo)))
else:
schoolyear_combo.set([])
def school_year_2(event):#下拉式選單values
yearinfo = schoolyear_combo.get()
nameinfo = st_p_combo.get()
classinfo = st_cl_combo.get()
gradeinfo = st_gr_combo.get()
if yearinfo:
schoolyear2_combo['values'] =datachange(sql_compute("select semester from studentgrades where studentname ='{}' and class = '{}' and grade = '{}' and year ='{}'".format(nameinfo, classinfo, gradeinfo, yearinfo)))
else:
schoolyear2_combo.set([])
def exam_type(event):#下拉式選單values
yearinfo = schoolyear_combo.get()
nameinfo = st_p_combo.get()
classinfo = st_cl_combo.get()
gradeinfo = st_gr_combo.get()
semesterinfo = schoolyear2_combo.get()
if semesterinfo:
schoolyear3_combo['values'] =datachange(sql_compute("select exam from studentgrades where studentname ='{}' and class = '{}' and grade = '{}' and year ='{}' and semester = '{}'".format(nameinfo, classinfo, gradeinfo, yearinfo, semesterinfo)))
else:
schoolyear3_combo.set([])
def datachange(data):# 資料型態轉換
rows = [r[0] for r in data]
return list(set(rows))
def datachange2(data):# 資料型態轉換 tuple >list
for r in data:
rows = list(r)
return rows[0:23]
def rangegread(data):
season = ''
for i in range(7, 23, 2):
season += ('' if data[i] =='選擇科目:' else data[i]) + ' ' + ('' if data[i+1] =='' else data[i+1]) + ' '
lstStudent.insert('end', season)
def result_insert(data):
r1 = '學校: ' + data[0]
lstStudent.insert(0, r1)
r2 = '年級/班級: ' + data[1] + data[2]
lstStudent.insert(1, r2)
r3 = '姓名: ' + data[3]
lstStudent.insert(2, r3)
r4 = data[4] +' '+ data[5] +'\n'+ data[6]+' '+'分數:'
lstStudent.insert(3, r4)
rangegread(data)
def searchdata():
sch = st_name_combo.get()
grd = st_gr_combo.get()
cla = st_cl_combo.get()
stn = st_p_combo.get()
ye = schoolyear_combo.get()
sem = schoolyear2_combo.get()
examd = schoolyear3_combo.get()
data = datachange2(sql_compute("select * from studentgrades where school = '{}' and grade = '{}' and class = '{}' and studentname = '{}' and year = '{}' and semester ='{}' and exam ='{}'".format(sch, grd, cla, stn, ye ,sem,examd)))
result_insert(data)
def rebackpage2():#返回
window.destroy()
menu_system()
#title
basdata_title = tk.Label(window, text='學生成績查詢',font=("新細明體", 16) , width=20).place(x=330, y=20)
#學校
st_name_labe = ttk.Label(window, text='就讀學校 :').place(x=280, y=80)
sample = sql_compute("select school from studentgrades where tag = 1")
st_name_combo = ttk.Combobox(window, values=datachange(sample), width=10)
st_name_combo.place(x=360, y=80)
st_name_combo.bind('<<ComboboxSelected>> ', st_info)
#年級
st_gr_labe = ttk.Label(window, text='年級 :').place(x=480, y=80)
st_gr_combo = ttk.Combobox(window, width=10)
st_gr_combo.place(x=520, y=80)
st_gr_combo.bind('<<ComboboxSelected>> ', st_info2)
#班級
st_cl_labe = ttk.Label(window, text='班級 :').place(x=640, y=80)
st_cl_combo = ttk.Combobox(window, width=10)
st_cl_combo.place(x=680, y=80)
st_cl_combo.bind('<<ComboboxSelected>> ', st_info3)
#姓名
st_p_labe = ttk.Label(window, text='學生姓名 :').place(x=280, y=120)
st_p_combo = ttk.Combobox(window, width=10)
st_p_combo.place(x=360, y=120)
st_p_combo.bind('<<ComboboxSelected>> ', school_year)
#學生基本資訊
#st_msg = tk.StringVar()
#msg_lable = tk.Label(window, textvariable=st_msg, font = (8), fg='blue', justify='left')
#msg_lable.place(x=280, y=110)
#學年度/學期/考試種類
st_schoolyear_labe = ttk.Label(window, text='學年度/分類:').place(x=280, y=160)
schoolyear_combo = ttk.Combobox(window, width=10)
schoolyear_combo.place(x=360, y=160)
schoolyear_combo.bind('<<ComboboxSelected>> ', school_year_2)
schoolyear2_combo= ttk.Combobox(window, width=10)#上下學期
schoolyear2_combo.place(x=470, y=160)
schoolyear2_combo.bind('<<ComboboxSelected>> ', exam_type)
schoolyear3_combo= ttk.Combobox(window, width=20)#第幾次段考
schoolyear3_combo.place(x=580, y=160)
#顯示新增成果
lstStudent = tk.Listbox(window, width=380)
lstStudent.place(x=280, y=220, width=500, height=100)
#顯示新增成果
#LT = tk.StringVar()
#lstStudent = tk.Label(window, textvariable = LT, font=(10),fg='blue', justify='left')
#lstStudent.place(x=280, y=200)
#登錄sqlite
btnDown = tk.Button(window, text = '查詢', command=searchdata ,width=10)
btnDown.place(x=360, y=550)
#返回
btnDown2 = tk.Button(window, text = '返 回', command=rebackpage2, width=10)
btnDown2.place(x=520, y=550)
#主要介面
window = tk.Tk()
window.geometry('850x600')
window.title('學生成績登錄系統-Menu')
window.resizable(0, 0)
def menu_view():
#選單
place_null = tk.Label(window, text='', width=20).pack(anchor=tk.NW, ipady=5, pady=2)
st_menu = ttk.Button(window, text='學生資料登錄', width=20, command=st_data, state=tk.NORMAL).pack(anchor=tk.NW, ipady=5, pady=2)
st_grades = ttk.Button(window, text='學期成績登錄', width=20, command=insert_grades, state=tk.NORMAL).pack(anchor=tk.NW, ipady=5, pady=2)
st_grades_inquire = ttk.Button(window, text='學期成績查詢', width=20,command=grades_search, state=tk.NORMAL).pack(anchor=tk.NW, ipady=5, pady=2)
st_grades_data = ttk.Button(window, text='學生資料查詢', width=20, state=tk.DISABLED).pack(anchor=tk.NW, ipady=5, pady=2)
st_grades_ana = ttk.Button(window, text='歷年成績分析', width=20, state=tk.DISABLED).pack(anchor=tk.NW, ipady=5, pady=2)
page_back = ttk.Button(window, text='返回登錄頁面', width=20, state=tk.DISABLED).pack(anchor=tk.NW, ipady=5, pady=2)
menu_view()
window.mainloop()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.