text stringlengths 38 1.54M |
|---|
import numpy as np
import matplotlib.pyplot as plt
x = np.array([1, 2, 3])
print(x)
# array([1, 2, 3])
y = np.arange(10) # like Python's range, but returns an array
print(y)
# array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = np.linspace(0, 2, 4) # create an array with 4 equally spaced points starting with 0 and ending with 2.
print(b)
plt.figure(1)
plt.plot(b)
plt.ylabel("b")
onemore = np.linspace(-np.pi, np.pi, 50)
print(onemore)
a_sine_thing = np.sin(onemore)
plt.figure(2)
plt.plot(onemore)
plt.ylabel("onemore")
plt.figure(3)
plt.plot(a_sine_thing)
plt.ylabel("a_sine_thing")
plt.show()
|
from feature_generation.datasets.Heatmap import Heatmap
class MoocImages(Heatmap):
def __init__(self):
super().__init__("mooc-images")
self.subject_id_column = "subject"
self.label = "posttest"
self.labels_are_categorical = False
def heatmap_label(self, metadata_file, id):
label = metadata_file[metadata_file[self.subject_id_column] == id][self.label]
return int(label)
def subject_id(self, file_reference):
return int(file_reference.reference.split("/")[-2])
def __str__(self):
return super().__str__()
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 18 21:55:15 2019
@author: Xiaoyin
"""
import numpy as np
import random
import math
import cv2
import os
import glob
import shutil
def confidence_(logits):
confidence=0
confidence=-1*math.log(min(softmax(logits)))
return confidence
def confidence(predicted_label_dict,predicted_logits_dict):
total_confidence=0
target=0
avg_confidence=0
for i in range(len(predicted_label_dict)):
if predicted_label_dict[i]==1:
target+=1
#print(predicted_logits_dict[i][0])
total_confidence+=confidence_(predicted_logits_dict[i][0])
avg_confidence=total_confidence/target
return target,avg_confidence
def softmax(array):
'''
softmax:regression
'''
s=0
array2=[]
for i in range(len(array)):
s+=math.exp(array[i])
for j in range(len(array)):
array2.append(math.exp(array[j])/s)
return array2
def make(a,b,image,normal_size=224):
'''
create a img copy from image form (a,b)
inputs: a,b, image, the size of copy
outputs: a copy of image from (a,b) in size
'''
img=tensor(normal_size,normal_size,3)
for i in range(normal_size):
for j in range(normal_size):
for u in range(3):
img[i][j][u]=int(image[a+i][b+j][u])
return img
def copy(x1,y1,x2,y2,image):
'''
create a img copy from image form (a,b)
inputs: a,b, image, the size of copy
outputs: a copy of image from (a,b) in size
'''
img=tensor(abs(x1-x2),abs(y1-y2),3)
for i in range(abs(x1-x2)):
for j in range(abs(y1-y2)):
for u in range(3):
img[i][j][u]=int(image[x1+i][y1+j][u])
return img
def tensor(a,b,c):
'''
return a tensor of 3-D(a,b,c)
'''
img=np.zeros((b,c))
image=[]
for i in range(a):
image.append(img)
image=np.asarray(image)
return image
# creat a tensor (a,b,c)
def pic_cut(image,n,normal_size=224):
'''
cut image into n
'''
a,b=0,0
launch=n*normal_size
cuted_img=[]
image = cv2.resize(image,(launch,launch),interpolation=cv2.INTER_CUBIC)
for i in range(n):
for j in range(n):
img=make(a,b,image,normal_size)
cuted_img.append(img)
b=b+normal_size
a=a+normal_size
b=0
return cuted_img
def random_cut_(image,normal_size=224):
'''
return a images from image (random)
'''
r1=int(random.random()*(len(image)-normal_size))
r2=int(random.random()*(len(image[0])-normal_size))
img=make(r1,r2,image,normal_size)
return img,r1,r2
def random_cut(image,n,normal_size=224):
'''
get a random images of image
'''
images=[]
for i in range(n):
r1=int(random.random()*(len(image)-normal_size))
r2=int(random.random()*(len(image[0])-normal_size))
img=make(r1,r2,image,normal_size)
images.append(img)
return images
def enhance(gray_image):
image=gray_image
for i in range(len(image)):
for j in range(len(image[i])):
image[i][j]=(image[i][j]/255)*(image[i][j]/255)*255
#image=image
return image
def gradient(image):
'''
gradient
'''
row, column = image.shape
moon_f = np.copy(image)
moon_f = moon_f.astype("float")
gradient = np.zeros((row, column))
for x in range(row - 1):
for y in range(column - 1):
gx = abs(moon_f[x + 1, y] - moon_f[x, y])
gy = abs(moon_f[x, y + 1] - moon_f[x, y])
gradient[x, y] = gx + gy
#gradient = gradient.astype("uint8")
return gradient
def pack_images_up(images,photo):
'''
pack images up into a list of images or gradient images
'''
packed_images=[]
if photo=='gradient':
for i in range(len(images)):
img=np.array(images[i])
img=img.astype(np.uint8)
img=cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)#注意:灰度图要求输入为np.ndarray dtype=uint8
packed_images.append(gradient(cv2.resize(img,(224,224),interpolation=cv2.INTER_CUBIC)))#梯度图,注意梯度要去inputs为灰度图,
return packed_images
#gray -resize
if photo=='gray':
for i in range(len(images)):
img=np.array(images[i])
img=img.astype(np.uint8)
img=cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)#注意:灰度图要求输入为np.ndarray dtype=uint8
packed_images.append(cv2.resize(img,(224,224),interpolation=cv2.INTER_CUBIC))#
return packed_images
if photo=='enhance':
for i in range(len(images)):
img=np.array(images[i])
img=img.astype(np.uint8)
img=cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)#注意:灰度图要求输入为np.ndarray dtype=uint8
packed_images.append(cv2.resize(enhance(img),(224,224),interpolation=cv2.INTER_CUBIC))#
return packed_images
else:
for i in range(len(images)):
img=np.array(images[i])
img=img.astype(np.uint8)
packed_images.append(cv2.resize(img,(224,224)))
return packed_images
def pack_image_up_(image,photo):
'''
pack images up into a list of images or gradient images
'''
packed_image=0
if photo=='gradient':
img=np.array(image)
img=img.astype(np.uint8)
img=cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)#注意:灰度图要求输入为np.ndarray dtype=uint8
packed_image=gradient(cv2.resize(img,(224,224),interpolation=cv2.INTER_CUBIC))#梯度图,注意梯度要去inputs为灰度图,
#gray -resize
if photo=='gray':
img=np.array(image)
img=img.astype(np.uint8)
img=cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)#注意:灰度图要求输入为np.ndarray dtype=uint8
packed_image=cv2.resize(img,(224,224),interpolation=cv2.INTER_CUBIC)
else:
img=np.array(image)
packed_image=img.astype(np.uint8)
return packed_image
def new_txt():
b = os.getcwd() + '\\test_txt\\'
print("Created directory:"+ "test_txt")
print("The created TXT files:")
if not os.path.exists(b):
os.makedirs(b)
for file in range(1,20):
print(str(file)+'.txt')
open(b+str(file)+'.txt', "w")
def record(k,area,packed_images,image,output_dir,red=255,green=255,blue=255):
'''
write the singel image
'''
normal_size=224
images=packed_images
count=0
graph=[]
array=[]
n=int(math.sqrt(len(area)))
if os.path.exists(output_dir+'/'+str(k)):
shutil.rmtree(output_dir+'/'+str(k))
os.makedirs(output_dir+'/'+str(k))
if os.path.exists(output_dir+'/'+str(k)+'/images'):
shutil.rmtree(output_dir+'/'+str(k)+'/images')
os.makedirs(output_dir+'/'+str(k)+'/images')
output_dir=output_dir+'/'+str(k)
for i in range(len(area)):
array.append(area[i])
count+=1
if count%n ==0:
graph.append(array)
array=[]
count=0
if os.path.exists(output_dir+'/area.txt'):
os.remove(output_dir+'/area.txt')
f = open(output_dir+'/area.txt','x')
for i in range(len(graph)):
print(graph[i])
for j in range(len(graph[i])):
f.write(str(graph[i][j]))
f.write(' ')
f.write('\n')
f.close()
area=np.loadtxt(output_dir+'/area.txt')
image_=tensor(len(area)*normal_size,len(area)*normal_size,3)
images_=[]
for i in range(len(area)):
for j in range(len(area)):
if area[i][j] ==1:
images_.append(images[len(area)*i+j])
for i in range(len(area)):
for j in range(len(area[i])):
if area[i][j]==1:
#cv2.imwrite(output_dir+'/fuck'+str(count)+'.jpg',images[count])
cv2.imwrite(output_dir+'/images/'+str(count)+'.jpg',images_[count])
images_[count]=cv2.cvtColor(images_[count],cv2.COLOR_RGB2GRAY)
image=draw_map(i*normal_size,j*normal_size,image,gradient(images_[count]),red,0,0)
image_=draw_map(i*normal_size,j*normal_size,image_,gradient(images_[count]),red,green,blue)
count+=1
cv2.imwrite(output_dir+'/draw_image'+str(k)+'.jpg',image)
cv2.imwrite(output_dir+'/draw_image_'+str(k)+'.jpg',image_)
def resort(area,packed_images,predicted_label_dict,predicted_logits_dict,image,normal_size=224):
logits_dict=[]
images_1=[]
images_0=[]
n=int(math.sqrt(len(area)))
for i in range(len(packed_images)):
if predicted_label_dict[i]==1:
area[i]=1;
images_1.append(packed_images[i])
logits_dict.append(predicted_logits_dict[i])
y=normal_size*(int((i)/n))
x=normal_size*((i)%n)
cv2.rectangle(image,(x,y),(x+normal_size,y+normal_size),(0,255,0),3)
else:
images_0.append(packed_images[i])
return area,image,logits_dict,images_1
def gradient_function(max_target,max_avg_score,target,avg_score,preference):
'''
the loss function (target to optimizer ) of 'quick' method in focal
'''
if preference=='amount':
if max_target*0.7+max_avg_score*0.3>target*0.7+max_avg_score*0.3:
return True
if preference=='accuracy':
if max_target*0.4+max_avg_score*0.7>target*0.4+max_avg_score*0.7:
return True
return False
def max_function(max_target,max_avg_score,target,avg_score,preference):
'''
the loss function (target to optimizer ) of 'quick' method in focal
'''
if preference=='amount':
if max_target*0.7+max_avg_score*0.3<target*0.7+max_avg_score*0.3:
return True
if preference=='accuracy':
if max_target*0.4+max_avg_score*0.7<target*0.4+max_avg_score*0.7:
return True
return False
def get_batch(images,a,b):
'''
get the batch
'''
image_batch = images[a:b+1]
return image_batch
def draw_map(a,b,image,images,red=255,green=0,blue=0):
'''
draw the image from (a ,b) to ... with color (red,green,blue)
'''
normal_size=224
for i in range(a,a+normal_size,1):
for j in range(b,b+normal_size,1):
#print(a,b,i,j)
if images[i-a][j-b]>=20:
#print(i,j)
image[i][j][0]=blue
image[i][j][1]=green
image[i][j][2]=red
return image
'''
def draw(images_path='./predictions/images/',image_path='./predictions/prediction.jpg',area_path='./predictions/area.txt',red=255,green=0,blue=0):
aborted ! ! ! !
draw the image according to the images,area
i forget waht the fuck it is ,too 3.24
normal_size=224
area=np.loadtxt(area_path)
images_=[]
images_path = os.path.join(images_path, '*.jpg')
name=[]
for image_file in glob.glob(images_path):
img = cv2.imread(image_file,0)
#print(image_file)
#img = cv2.resize(img,(224,224),interpolation=cv2.INTER_CUBIC)
#img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
#print(img)
img=gradient(img)
images_.append(img)
label = int(image_file.split('_')[-2].split('_')[-1])
name.append(label)
images=[]
for i in range(len(name)):
for j in range(len(name)):
if name[j] ==i:
images.append(images_[j])
#sort
image=cv2.imread(image_path)
count=0
for i in range(len(area)):
for j in range(len(area[i])):
if area[i][j]==1:
#print(i,j)
#print(len(images),count)
image=draw_map(i*normal_size,j*normal_size,image,images[count],red,green,blue)
count+=1
cv2.imwrite('./predictions/draw_image.jpg',image)
def draw_(images_path,image_path,area_path='./predictions/area.txt',red=255,green=0,blue=0):
draw the image according to the images,area
default :
images_path: target (images) size:(224,224)
aborted ! ! !
normal_size=224
area=np.loadtxt(area_path)
images_=[]
images_path = os.path.join(images_path, '*.jpg')
name=[]
for image_file in glob.glob(images_path):
img = cv2.imread(image_file,0)
#print(image_file)
#img = cv2.resize(img,(224,224),interpolation=cv2.INTER_CUBIC)
#img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
#print(img)
img=gradient(img)
images_.append(img)
label = int(image_file.split('_')[-2].split('_')[-1])
name.append(label)
images=[]
for i in range(len(name)):
for j in range(len(name)):
if name[j] ==i:
images.append(images_[j])
#sort
image=cv2.imread(image_path)
image = cv2.resize(image,(len(area)*normal_size,len(area)*normal_size),interpolation=cv2.INTER_CUBIC)
image_=tensor(len(area)*normal_size,len(area)*normal_size,3)
count=0
for i in range(len(area)):
for j in range(len(area[i])):
if area[i][j]==1:
#print(i,j)
#print(len(images),count)
image=draw_map(i*normal_size,j*normal_size,image,gradient(images[count]),red,green,blue)
image_=draw_map(i*normal_size,j*normal_size,image_,gradient(images[count]),red,green,blue)
count+=1
cv2.imwrite('../'+images_path+'/draw_image.jpg',image)
cv2.imwrite('../'+images_path+'/last_image_.jpg',image_)
'''
def joint(path,Gradient=False,red=255,green=255,blue=255):
'''
up to data
'''
images_path=path+'/images_unet/'
area_path=path+'/area.txt'
normal_size=224
area=np.loadtxt(area_path)
images_=[]
n=len(area)
image=tensor(n*normal_size,n*normal_size,3)
images_path = os.path.join(images_path, '*.jpg')
name=[]
for image_file in glob.glob(images_path):
img = cv2.imread(image_file,0)
images_.append(img)
label = int(image_file.split('_')[-2].split('_')[-1])
name.append(label)
images=[]
count=0
for i in range(len(name)):
for j in range(len(name)):
if name[j] ==i:
images.append(images_[j])
for i in range(len(area)):
for j in range(len(area[i])):
if area[i][j]==1:
if Gradient :
image=draw_map(i*normal_size,j*normal_size,image,gradient(images[count]),red,green,blue)
else:
image=draw_map(i*normal_size,j*normal_size,image,images[count],red,green,blue)
count+=1
cv2.imwrite(path+'/uimage.jpg',image)
cv2.imwrite('E:\\zxtdeeplearning\\crack\\matlab_proc\\start\\1.png',image)
def paint(path,red=255,green=0,blue=0):
'''
up to data
'''
images_path=path+'/images/'
area_path=path+'/area.txt'
print(area_path)
normal_size=224
area=np.loadtxt(area_path)
images_=[]
#n=len(area)
#image=tensor(n*normal_size,n*normal_size,3)
image=cv2.imread(path+'/prediction.jpg')
images_path = os.path.join(images_path, '*.jpg')
name=[]
for image_file in glob.glob(images_path):
img = cv2.imread(image_file,0)
images_.append(img)
label = int(image_file.split('_')[-2].split('_')[-1])
name.append(label)
images=[]
count=0
for i in range(len(name)):
for j in range(len(name)):
if name[j] ==i:
images.append(images_[j])
for i in range(len(area)):
for j in range(len(area[i])):
if area[i][j]==1:
image=draw_map(i*normal_size,j*normal_size,image,gradient(images[count]),red,green,blue)
count+=1
cv2.imwrite(path+'/beauty.jpg',image)
def clear(path):
'''
clear the file
'''
for i in os.listdir(path):
path_file = os.path.join(path,i)
if os.path.isfile(path_file):
os.remove(path_file)
else:
for f in os.listdir(path_file):
path_file2 =os.path.join(path_file,f)
if os.path.isfile(path_file2):
os.remove(path_file2)
|
import random
from .base import BaseCommand
class HeadToHeadCommand(BaseCommand):
"""Return the results of the previous games between two players."""
command_term = 'head-to-head'
url_path = 'api/match/head_to_head/'
help = (
'Use the `head-to-head` command to see all results between two players.\n'
'For example, `@poolbot head-to-head @danny @marin` will return number '
'of wins for each player.'
)
def process_request(self, message):
mentioned_user_ids = self._find_user_mentions(message)
try:
player1 = mentioned_user_ids[0]
player2 = mentioned_user_ids[1]
except IndexError:
return 'Sorry, I was unable to find two users in that message...'
response = self.poolbot.session.get(
self._generate_url(),
params={
'player1': player1,
'player2': player2,
}
)
if response.status_code == 200:
data = response.json()
player1_wins = data[player1]
player2_wins = data[player2]
most_wins = player1 if player1_wins > player2_wins else player2
most_loses = player2 if most_wins == player1 else player1
reply_text = (
'{winner} has won {winner_win_count} games. '
'{loser} has only won {loser_win_count}! '
'This gives a win ratio of {winner_ratio} for {winner}! '
)
try:
winning_percentage = ((data[most_wins] * 100) / sum(data.values()))
except ZeroDivisionError:
return (
'{player1} and {player2} are yet to record any games!'.format(
player1=self.poolbot.get_username(player1),
player2=self.poolbot.get_username(player2)
)
)
return reply_text.format(
winner=self.poolbot.get_username(most_wins),
loser=self.poolbot.get_username(most_loses),
winner_win_count=data[most_wins],
loser_win_count=data[most_loses],
winner_ratio='{percent:.0f}%'.format(percent=winning_percentage)
)
else:
return 'Sorry, I was unable to get head to head data!'
|
import matplotlib.pyplot as plt
import networkx as nx
import pprint
from collections import defaultdict
def readFromPayek():
G = nx.read_pajek("euler.net")
# print(G.edges)
# print(G.nodes)
# G1.number_of_nodes()
# G1.number_of_edges()
# G.degree[1] koliko ima brida pojedini vrh
# list(G.adj[1]) listat susjednih nodova
# print(G1["A"]) je isto ko i G1.adj("A")
G1 = nx.Graph(G)
d = {key: [] for key in G1.nodes}
for el in G1.nodes:
for key in G1[el].keys():
d[el].append(key)
pprint.pprint(d)
readFromPayek()
|
import pessoa1
class Funcionario(pessoa1.Pessoa):
def __init__(self):
# idPessoa
self._id = 0
self._cargo = ""
#Getter do id
@property
def id(self):
return self._id
# Getter e Setter do cargo
@property
def cargo(self):
return self._cargo
@cargo.setter
def cargo(self, val):
self._cargo = val |
import random
from graphics import *
import Learn
class Creature:
def __init__(self,type):
self.type=type
x=random.randrange(0,Learn.width)
y=random.randrange(0,Learn.height)
health=100
age=0
#sight=3
#record
self.draw()
def draw(self):
if type==0:
point=Point(self.x,self.y)
point.draw(Learn.win)
def move(self):
pass |
"""API Endpoints relating to users"""
import bcrypt
from flask import Blueprint
from flask_restful import Api, Resource, request
from kite.api.response import Error, Fail, Success
from kite.api.v2.parsers.user_parse import post_parser, put_parser
from kite.models import User, db
from kite.settings import FORUM_ADMIN, LOGGER
class UserLookup(Resource):
def get(self, username):
"""Get info on a user.
Args:
username: Username to lookup.
"""
LOGGER.debug({"Requested user": username})
user = User.get_user(username)
if user is not None:
user_json = user.to_json()
return Success(user_json).to_json(), 200
return Fail(f"user {username} not found").to_json(), 404
def put(self, username):
"""Update user info.
Args:
username: The user to be updated.
"""
args = put_parser.parse_args(strict=True)
user = User.get_user(username)
if user is not None:
if args.is_admin is not None:
user.is_admin = args.is_admin
if args.bio is not None:
user.bio = args.bio
if args.is_mod is not None:
user.is_mod = args.is_mod
if args.displayName is not None:
user.displayName = args.displayName
if args.password is not None:
user.pw_hash = bcrypt.hashpw(
args.password.encode("utf8"), bcrypt.gensalt()
)
db.session.commit()
data = {"message": f"{username} updated"}
return Success(data).to_json(), 200
return Fail(f"user {username} does not exist").to_json(), 404
def delete(self, username):
"""Delete a user.
Args:
username: The user to be deleted.
"""
user = User.get_user(username)
if user is not None:
user.delete()
return Success(None).to_json(), 204
return Fail(f"user {username} does not exist").to_json(), 404
class UserList(Resource):
def post(self):
"""Create a new user.
Required in Payload:
userame: Username of the new user to be created.
password: Passowrd of the user to be created.
Optional in Payload:
bio: Bio of the user to be created.
"""
args = post_parser.parse_args(strict=True)
LOGGER.info({"Args": args})
user = User.get_user(args.username)
if user is None:
hashed = bcrypt.hashpw(args.password.encode("utf8"), bcrypt.gensalt())
record = User(
username=args.username,
pw_hash=hashed,
bio=args.bio,
displayName=args.displayName,
)
record.save()
data = {"message": f"user {args.username} created"}
return Success(data).to_json(), 201
return Fail(f"user {args.username} exists").to_json(), 400
def get(self):
"""Get list of all users."""
user_filter = {}
users = User.get_all()
users_json = [res.to_json() for res in users]
return Success({"users": users_json}).to_json(), 200
users_bp_v2 = Blueprint("users", __name__)
api = Api(users_bp_v2)
api.add_resource(UserList, "/api/v2/users")
api.add_resource(UserLookup, "/api/v2/users/<string:username>")
|
range(5)
range (0,5)
for i in range(5):
print(i)
range(5, 10)
list(range (5, 10))
list(range(10, 15))
list(range(0,10, 2))
|
# python 3.6
"""网络爬虫和搜索引擎
"""
def quick_sort_pages(ranks, pages):
if not pages or len(pages) <= 1:
return pages
else:
pivot = ranks[pages[0]]
worse = []
better = []
for page in pages[1:]:
if ranks[page] <= pivot:
worse.append(page)
else:
better.append(page)
return quick_sort_pages(ranks, better) + [pages[0]] + quick_sort_pages(ranks, worse)
def ordered_search(index, ranks, keyword):
pages = lookup(index, keyword)
return quick_sort_pages(ranks, pages)
def lucky_search(index, ranks, keyword):
"""找到最佳匹配结果
"""
pages = lookup(index, keyword)
if not pages:
return None
best_page = pages[0]
for candidate in pages:
if ranks[candidate] > ranks[best_page]:
best_page = candidate
return best_page
def compute_ranks(graph):
"""page rank algorithm
rank(page, 0) = 1/npages
rank(page, t) = (1-d)/npages
+ sum (d * rank(p, t - 1) / number of outlinks from p)
over all pages p that link to this page
args:
graph: {page: [all urls in this page]}
return:
rank: {page: rank value}
"""
d = 0.8 # damping factor
numloops = 10
ranks = {}
npages = len(graph)
for page in graph:
ranks[page] = 1.0 / npages
for i in range(0, numloops):
newranks = {}
for page in graph:
newrank = (1 - d) / npages
for node in graph:
if page in graph[node]:
newrank += d * ranks[node] / len(graph[node])
newranks[page] = newrank
ranks = newranks
return ranks
cache = {
'http://udacity.com/cs101x/urank/index.html': """<html>
<body>
<h1>Dave's Cooking Algorithms</h1>
<p>
Here are my favorite recipies:
<ul>
<li> <a href="http://udacity.com/cs101x/urank/hummus.html">Hummus Recipe</a>
<li> <a href="http://udacity.com/cs101x/urank/arsenic.html">World's Best Hummus</a>
<li> <a href="http://udacity.com/cs101x/urank/kathleen.html">Kathleen's Hummus Recipe</a>
</ul>
For more expert opinions, check out the
<a href="http://udacity.com/cs101x/urank/nickel.html">Nickel Chef</a>
and <a href="http://udacity.com/cs101x/urank/zinc.html">Zinc Chef</a>.
</body>
</html>
""",
'http://udacity.com/cs101x/urank/zinc.html': """<html>
<body>
<h1>The Zinc Chef</h1>
<p>
I learned everything I know from
<a href="http://udacity.com/cs101x/urank/nickel.html">the Nickel Chef</a>.
</p>
<p>
For great hummus, try
<a href="http://udacity.com/cs101x/urank/arsenic.html">this recipe</a>.
</body>
</html>
""",
'http://udacity.com/cs101x/urank/nickel.html': """<html>
<body>
<h1>The Nickel Chef</h1>
<p>
This is the
<a href="http://udacity.com/cs101x/urank/kathleen.html">
best Hummus recipe!
</a>
</body>
</html>
""",
'http://udacity.com/cs101x/urank/kathleen.html': """<html>
<body>
<h1>
Kathleen's Hummus Recipe
</h1>
<p>
<ol>
<li> Open a can of garbonzo beans.
<li> Crush them in a blender.
<li> Add 3 tablesppons of tahini sauce.
<li> Squeeze in one lemon.
<li> Add salt, pepper, and buttercream frosting to taste.
</ol>
</body>
</html>
""",
'http://udacity.com/cs101x/urank/arsenic.html': """<html>
<body>
<h1>
The Arsenic Chef's World Famous Hummus Recipe
</h1>
<p>
<ol>
<li> Kidnap the <a href="http://udacity.com/cs101x/urank/nickel.html">Nickel Chef</a>.
<li> Force her to make hummus for you.
</ol>
</body>
</html>
""",
'http://udacity.com/cs101x/urank/hummus.html': """<html>
<body>
<h1>
Hummus Recipe
</h1>
<p>
<ol>
<li> Go to the store and buy a container of hummus.
<li> Open it.
</ol>
</body>
</html>
""",
}
def crawl_web(seed):
"""爬取网页
以seed起始开始爬取网页,从seed的网页内容中收集链接,然后再从链接中的页面中继续收集链接
参数列表:
seed: 种子url,即首个开始搜索的网页
返回值:
index: {keyword: [url,url...]}
graph: {page: [all url in this page]}
异常:
"""
tocrawl = [seed]
crawled = []
graph = {} # <url>, [list of pages it links to]
index = {}
while tocrawl:
page = tocrawl.pop()
if page not in crawled:
content = get_page(page)
add_page_to_index(index, page, content)
outlinks = get_all_links(content)
graph[page] = outlinks
union(tocrawl, outlinks)
crawled.append(page)
return index, graph
def get_page(url):
# TODO(zx): 通过urllib库获取真实的internet页面
if url in cache:
return cache[url]
else:
return None
def get_next_target(page):
"""从页面内容中获取下一个网页链接
查找页面内容中的a元素(<a href="url">some link info</a>), 提取其中的url
args:
page: 网页的内容或者其中的一部分
return:
url: page中的第一个网页链接,找不到链接返回None
end_quote: page中url结尾的"\""的位置,找不到链接返回0
"""
start_link = page.find('<a href=')
if start_link == -1:
return None, 0
start_quote = page.find('"', start_link)
end_quote = page.find('"', start_quote + 1)
url = page[start_quote + 1:end_quote]
return url, end_quote
def get_all_links(page):
"""从页面内容中获取所有网页链接
查找页面内容中的所有a元素(<a href="url">some link info</a>), 提取其中的url
args:
page: 网页的内容
return:
links: 该页面内容所包含的所有url列表
"""
links = []
while True:
url, endpos = get_next_target(page)
if url:
links.append(url)
page = page[endpos:]
else:
break
return links
def union(a, b):
"""把b中和a不重复的项添加至a
args:
a,b: 列表
"""
for e in b:
if e not in a:
a.append(e)
def add_page_to_index(index, url, content):
"""将网页内容包含的单词以及对应的url添加至index字典
args:
index: {keyword: [url,url...]}
url: 网页链接
content: 网页内容
"""
words = content.split()
for word in words:
add_to_index(index, word, url)
def add_to_index(index, keyword, url):
"""将keyword及对应的url添加到index字典中
args:
index: {keyword: [url,url...]}
keyword: 关键词 string
url: 网页链接
"""
if keyword in index:
index[keyword].append(url)
else:
index[keyword] = [url]
def lookup(index, keyword):
"""查询关键字匹配的url
args:
index: 保存了keyword和对应url的字典
keyword: 要查询的关键词
return:
url: keyword对应的url列表
"""
if keyword in index:
return index[keyword]
else:
return None
if __name__ == "__main__":
index, graph = crawl_web('http://udacity.com/cs101x/urank/index.html')
ranks = compute_ranks(graph)
#print(ranks)
#>>> {'http://udacity.com/cs101x/urank/kathleen.html': 0.11661866666666663,
#'http://udacity.com/cs101x/urank/zinc.html': 0.038666666666666655,
#'http://udacity.com/cs101x/urank/hummus.html': 0.038666666666666655,
#'http://udacity.com/cs101x/urank/arsenic.html': 0.054133333333333325,
#'http://udacity.com/cs101x/urank/index.html': 0.033333333333333326,
#'http://udacity.com/cs101x/urank/nickel.html': 0.09743999999999997}
#print(lucky_search(index, ranks, 'Hummus'))
#>>> http://udacity.com/cs101x/urank/kathleen.html
#print(lucky_search(index, ranks, 'the'))
#>>> http://udacity.com/cs101x/urank/nickel.html
#print(lucky_search(index, ranks, 'babaganoush'))
#>>> None
print(ordered_search(index, ranks, 'Hummus'))
#>>> ['http://udacity.com/cs101x/urank/kathleen.html',
# 'http://udacity.com/cs101x/urank/nickel.html',
# 'http://udacity.com/cs101x/urank/arsenic.html',
# 'http://udacity.com/cs101x/urank/hummus.html',
# 'http://udacity.com/cs101x/urank/index.html']
print(ordered_search(index, ranks, 'the'))
#>>> ['http://udacity.com/cs101x/urank/nickel.html',
# 'http://udacity.com/cs101x/urank/arsenic.html',
# 'http://udacity.com/cs101x/urank/hummus.html',
# 'http://udacity.com/cs101x/urank/index.html']
print(ordered_search(index, ranks, 'babaganoush'))
#>>> None
|
import collections
class Solution:
def fourSumCount(self, A, B, C, D):
"""
:type A: List[int]
:type B: List[int]
:type C: List[int]
:type D: List[int]
:rtype: int
"""
# A_counts = {}
# B_counts = {}
# C_counts = {}
# D_counts = {}
# count = 0
# for a in range(len(A)):
# a_sum = A[a]
# a_sum_count = A_counts.get(a_sum)
# if a_sum_count:
# count = count + a_sum_count
# continue
# a_count = 0
# for b in range(len(B)):
# b_sum = A[a] + B[b]
# b_sum_count = B_counts.get(b_sum)
# if b_sum_count:
# a_count = a_count + b_sum_count
# continue
# b_count = 0
# for c in range(len(C)):
# c_sum = A[a] + B[b] + C[c]
# c_sum_count = C_counts.get(c_sum)
# if c_sum_count:
# b_count = b_count + c_sum_count
# continue
# c_count = 0
# for d in range(len(D)):
# if A[a] + B[b] + C[c] + D[d] == 0:
# c_count = c_count + 1
# C_counts[c_sum] = c_count
# b_count = b_count + c_count
# B_counts[b_sum] = b_count
# a_count = a_count + b_count
# A_counts[a_sum] = a_count
# count = count + a_count
AB = collections.Counter(a+b for a in A for b in B)
return sum(AB[-c-d] for c in C for d in D)
S = Solution()
A = list(map(int, input().strip('[').strip(']').split(',')))
B = list(map(int, input().strip('[').strip(']').split(',')))
C = list(map(int, input().strip('[').strip(']').split(',')))
D = list(map(int, input().strip('[').strip(']').split(',')))
print(S.fourSumCount(A, B, C, D))
|
import tkinter as tk
def my_command():
print("command")
def my_command2():
print("commanded")
root = tk.Tk() # all our code goes here
root.geometry('800x600')
menu_bar = tk.Menu(root)
file_menu = tk.Menu(menu_bar, tearoff=0) # all file menu-items will be added here next
edit_menu = tk.Menu(menu_bar, tearoff=0)
view_menu = tk.Menu(menu_bar, tearoff=0)
file_menu.add_command(label="New", accelerator='Ctrl+N', compound='left', underline=0, command=my_command)
file_menu.add_command(label="Old", accelerator='Ctrl+O', compound='left', underline=0, command=my_command2)
menu_bar.add_cascade(label='File', menu=file_menu)
menu_bar.add_cascade(label='Edit', menu=edit_menu)
menu_bar.add_separator()
menu_bar.add_cascade(label='View', menu=view_menu)
root.config(menu=menu_bar)
root.mainloop()
|
"""Decorators for use with SimpleRPG"""
from discord.ext import commands
from ..exceptions import HasNoCharacterException
def has_character():
def predicate(ctx):
if ctx.bot.get_or_load_character(ctx.message.author.id):
return True
else:
raise HasNoCharacterException
return commands.check(predicate)
|
import numpy as np
from matplotlib import pyplot as plt
plt.figure(1)
plt.ion()
l1 = 1
l2 = 1
l3 = 1
theta1 = 10
theta2 = -5
theta3 = -5
i=0
while i<60:
p1 = [l1*np.cos(np.radians(theta1)), l1*np.sin(np.radians(theta1))]
p2 = [p1[0] + l1*np.cos(np.radians(theta2 + theta1)), p1[1] + l1*np.sin(np.radians(theta2 + theta1))]
p3 = [p2[0] + l1*np.cos(np.radians(theta3 + theta2 + theta1)), p2[1] + l1*np.sin(np.radians(theta3 + theta2 + theta1))]
plt.clf()
plt.plot([0, p1[0], p2[0], p3[0]], [0, p1[1], p2[1], p3[1]], '-*')
theta1 = theta1 + 5
theta2 = theta2 - 1
theta3 = theta3 + 3
plt.grid()
plt.ylim([-5,5])
plt.xlim([-5,5])
plt.draw()
plt.pause(0.001)
i = i + 1
plt.ioff()
plt.show() |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains PME free switch tests.
"""
import time
from enum import IntEnum
from ducktape.mark import matrix
from ignitetest.services.ignite import IgniteService
from ignitetest.services.ignite_app import IgniteApplicationService
from ignitetest.services.utils.control_utility import ControlUtility
from ignitetest.services.utils.ignite_configuration import IgniteConfiguration
from ignitetest.services.utils.ignite_configuration.cache import CacheConfiguration
from ignitetest.services.utils.ignite_configuration.discovery import from_ignite_cluster
from ignitetest.utils import ignite_versions, cluster, ignore_if
from ignitetest.utils.enum import constructible
from ignitetest.utils.ignite_test import IgniteTest
from ignitetest.utils.version import DEV_BRANCH, V_2_8_0, IgniteVersion, LATEST
from ignitetest.services.utils.ssl.ssl_params import is_ssl_enabled
@constructible
class LoadType(IntEnum):
"""
Load type.
"""
NONE = 0
EXTRA_CACHES = 1
LONG_TXS = 2
class PmeFreeSwitchTest(IgniteTest):
"""
Tests PME free switch scenarios.
"""
NUM_NODES = 9
EXTRA_CACHES_AMOUNT = 100
@cluster(num_nodes=NUM_NODES + 2)
@ignore_if(lambda version, globals: version < V_2_8_0 and is_ssl_enabled(globals))
@ignite_versions(str(DEV_BRANCH), str(LATEST))
@matrix(load_type=[LoadType.NONE, LoadType.EXTRA_CACHES, LoadType.LONG_TXS])
def test(self, ignite_version, load_type):
"""
Tests PME-free switch scenario (node stop).
"""
data = {}
caches = [CacheConfiguration(name='test-cache', backups=2, atomicity_mode='TRANSACTIONAL')]
l_type = LoadType.construct_from(load_type)
# Checking PME (before 2.8) vs PME-free (2.8+) switch duration, but
# focusing on switch duration (which depends on caches amount) when long_txs is false and
# on waiting for previously started txs before the switch (which depends on txs duration) when long_txs of true.
if l_type is LoadType.EXTRA_CACHES:
for idx in range(1, self.EXTRA_CACHES_AMOUNT):
caches.append(CacheConfiguration(name="cache-%d" % idx, backups=2, atomicity_mode='TRANSACTIONAL'))
config = IgniteConfiguration(version=IgniteVersion(ignite_version), caches=caches, cluster_state="INACTIVE")
num_nodes = self.available_cluster_size - 2
self.test_context.logger.info("Nodes amount calculated as %d." % num_nodes)
ignites = IgniteService(self.test_context, config, num_nodes=num_nodes)
ignites.start()
if IgniteVersion(ignite_version) >= V_2_8_0:
ControlUtility(ignites).disable_baseline_auto_adjust()
ControlUtility(ignites).activate()
client_config = config._replace(client_mode=True,
discovery_spi=from_ignite_cluster(ignites, slice(0, num_nodes - 1)))
long_tx_streamer = IgniteApplicationService(
self.test_context,
client_config,
java_class_name="org.apache.ignite.internal.ducktest.tests.pme_free_switch_test.LongTxStreamerApplication",
params={"cacheName": "test-cache"},
startup_timeout_sec=180)
if l_type is LoadType.LONG_TXS:
long_tx_streamer.start()
single_key_tx_streamer = IgniteApplicationService(
self.test_context,
client_config,
java_class_name="org.apache.ignite.internal.ducktest.tests.pme_free_switch_test."
"SingleKeyTxStreamerApplication",
params={"cacheName": "test-cache", "warmup": 1000},
startup_timeout_sec=180)
single_key_tx_streamer.start()
ignites.stop_node(ignites.nodes[num_nodes - 1])
single_key_tx_streamer.await_event("Node left topology", 60, from_the_beginning=True)
if l_type is LoadType.LONG_TXS:
time.sleep(30) # keeping txs alive for 30 seconds.
long_tx_streamer.stop_async()
single_key_tx_streamer.await_event("Node left topology", 60, from_the_beginning=True)
single_key_tx_streamer.await_event("APPLICATION_STREAMED", 60) # waiting for streaming continuation.
single_key_tx_streamer.stop()
data["Worst latency (ms)"] = single_key_tx_streamer.extract_result("WORST_LATENCY")
data["Streamed txs"] = single_key_tx_streamer.extract_result("STREAMED")
data["Measure duration (ms)"] = single_key_tx_streamer.extract_result("MEASURE_DURATION")
data["Server nodes"] = num_nodes
return data
|
"""
Generate a dataset containing features for each of the specified log datasets.
"""
import os
import pandas as pd
from global_constants import RESULTS_DIR
from src.data_config import DataConfigs
from src.helpers.data_manager import DataManager
from src.utils import get_vocabulary_indices, get_token_counts_batch
from global_utils import get_num_true_clusters
from sklearn.metrics import silhouette_score, calinski_harabasz_score, \
davies_bouldin_score
from analysis.utils import get_intra_cluster_spread, get_inter_cluster_spread, \
split_counts_per_cluster, get_labels_from_true_assignments, \
get_avg_entropy, get_sum_entropy, get_flat_entropy
from analysis.constants import NAME, VOCAB_SIZE, TRUE_CLUSTER_COUNT, \
TOKEN_COUNT_AVG_ENTROPY_A0, TOKEN_COUNT_AVG_ENTROPY_A1, \
TOKEN_COUNT_SUM_ENTROPY, TOKEN_COUNT_FLAT_ENTROPY, INTRA_CLUSTER_SPREAD, \
INTER_CLUSTER_SPREAD, SILHOUETTE_SCORE, CALINSKI_HARABASZ_SCORE, \
DAVIES_BOULDIN_SCORE
N_SAMPLES = 50
data_configs = [
DataConfigs.Android,
DataConfigs.Apache,
DataConfigs.BGL,
DataConfigs.Hadoop,
DataConfigs.HDFS,
DataConfigs.HealthApp,
DataConfigs.HPC,
DataConfigs.Linux,
DataConfigs.Mac,
DataConfigs.OpenSSH,
DataConfigs.OpenStack,
DataConfigs.Proxifier,
DataConfigs.Spark,
DataConfigs.Thunderbird,
DataConfigs.Windows,
DataConfigs.Zookeeper,
]
data = {
NAME: [],
VOCAB_SIZE: [],
TRUE_CLUSTER_COUNT: [],
TOKEN_COUNT_AVG_ENTROPY_A1: [],
TOKEN_COUNT_AVG_ENTROPY_A0: [],
TOKEN_COUNT_SUM_ENTROPY: [],
TOKEN_COUNT_FLAT_ENTROPY: [],
INTRA_CLUSTER_SPREAD: [],
INTER_CLUSTER_SPREAD: [],
SILHOUETTE_SCORE: [],
CALINSKI_HARABASZ_SCORE: [],
DAVIES_BOULDIN_SCORE: [],
}
for data_config in data_configs:
name = data_config['name']
data_manager = DataManager(data_config)
tokenized_logs = data_manager.get_tokenized_logs()
true_assignments = data_manager.get_true_assignments()
v_indices = get_vocabulary_indices(tokenized_logs)
C = get_token_counts_batch(tokenized_logs, v_indices)
count_cluster_split = split_counts_per_cluster(C, true_assignments)
true_labels = get_labels_from_true_assignments(true_assignments)
si_score = silhouette_score(C, true_labels)
ch_score = calinski_harabasz_score(C, true_labels)
db_score = davies_bouldin_score(C, true_labels)
print('{}: {}'.format(name, len(v_indices)))
data[NAME].append(name)
data[VOCAB_SIZE].append(len(v_indices))
data[TRUE_CLUSTER_COUNT].append(get_num_true_clusters(true_assignments))
data[TOKEN_COUNT_AVG_ENTROPY_A0].append(get_avg_entropy(C, 0))
data[TOKEN_COUNT_AVG_ENTROPY_A1].append(get_avg_entropy(C, 1))
data[TOKEN_COUNT_SUM_ENTROPY].append(get_sum_entropy(C))
data[TOKEN_COUNT_FLAT_ENTROPY].append(get_flat_entropy(C))
data[SILHOUETTE_SCORE].append(si_score)
data[CALINSKI_HARABASZ_SCORE].append(ch_score)
data[DAVIES_BOULDIN_SCORE].append(db_score)
data[INTRA_CLUSTER_SPREAD].append(
get_intra_cluster_spread(count_cluster_split, C))
data[INTER_CLUSTER_SPREAD].append(
get_inter_cluster_spread(count_cluster_split, C))
path = os.path.join(RESULTS_DIR, 'dataset_properties.csv')
df = pd.DataFrame(data)
df.to_csv(path, index=False)
|
# 生成一个扁平的盘状结构元素。
#
# 如果像素与原点之间的欧几里得距离不大于半径,则该像素在邻域内。 |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.applications.vgg16 import preprocess_input
from keras.applications.vgg16 import decode_predictions
from keras.applications.vgg16 import VGG16
from keras import backend as K
import matplotlib.pyplot as plt
import numpy as np
# In[14]:
# Process Model
model = VGG16()
image = load_img("D:/Projects/VGG16/cars.jpg", target_size=(224, 224))
image = img_to_array(image)
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
image = preprocess_input(image)
# Generate predictions
pred = model.predict(image)
print('Predicted:', decode_predictions(pred, top=3)[0])
np.argmax(pred[0])
# Grad-CAM algorithm
specoutput=model.output[:, 668]
last_conv_layer = model.get_layer('block5_conv3')
grads = K.gradients(specoutput, last_conv_layer.output)[0]
pooled_grads = K.mean(grads, axis=(0, 1, 2))
iterate = K.function([model.input], [pooled_grads, last_conv_layer.output[0]])
pooled_grads_value, conv_layer_output_value = iterate([image])
for i in range(512):
conv_layer_output_value[:, :, i] *= pooled_grads_value[i]
heatmap=np.mean(conv_layer_output_value, axis=-1)
# Heatmap post processing
heatmap = np.maximum(heatmap, 0)
heatmap /= np.max(heatmap)
plt.matshow(heatmap)
plt.show()
# Superimposing heatmap
import cv2
img = cv2.imread("D:/Projects/VGG16/cars.jpg")
heatmap = cv2.resize(heatmap, (img.shape[1], img.shape[0]))
heatmap = np.uint8(255 * heatmap)
heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
superimposed_img = heatmap * 0.4 + img
cv2.imwrite('heatmap.jpg', superimposed_img)
|
DAMAGE_SPELL = "damage_spell"
HEAL_SPELL = "heal_spell"
SINGLE_TARGET_SPELL = "single_target_spell"
|
import copy
import math
import os
import numpy as np
from OpenGL import GL
from OpenGL import GLU
from prototype import quaternion as QUAT
def to_seg(value,precs = 4):
seg_value = (360/(2*math.pi))*value
hour = int(seg_value)
min_value = (seg_value - hour)*60
min = int(min_value)
sec = round((min_value - min)*60 , precs)
return [hour, min, sec]
def float_to_hex(value):
return value*255
def plot_list(list_of_objects):
os.system("clear")
do = True
while do:
print("LISTA DE OBJETOS DIBUJADOS\n\n")
index = 0
for graph_object in list_of_objects:
print(graph_object.get_color_name(), " \t Indice[", index, "]\n")
index += 1
print("\n\n[-1] Omitir\t")
index = input("""
\n Si el Objeto es Visible , se oculta
Si esta oculto, se hace visible
Opcion >>>:\t
""")
try:
index = int(index)
if index >= len(list_of_objects):
return None
else:
pass
except:
return None
if index == -1:
do = False
pass
else:
who_object = list_of_objects[index]
if who_object.is_visible():
who_object.hide()
else:
who_object.show()
for ref_object in list_of_objects:
ref_object.plot()
def control(list_of_objects, glu_context=GLU):
os.system("clear")
print("LISTA DE OBJETOS DIBUJADOS\n\n")
index = 0
exist = False
for graph_object in list_of_objects:
if graph_object.is_visible():
print(graph_object.get_color_name(), " \t Indice[", index, "]\n")
exist = True
else:
pass
index += 1
index = input("Opcion >>>:\t")
if exist:
pass
else:
return None
try:
index = int(index)
if index >= len(list_of_objects):
return None
else:
pass
except:
return None
who_object = list_of_objects[index]
option = input("""
[1]Rotar - Sobre si mismo
[2]Rotar - Respecto a un Vector
[3]Escalar
[4]Trasladar
[9]CAMARA
""")
try:
option = int(option)
except:
return None
if option == 1:
option = input("""
[1]X
[2]Y
[3]Z
""")
try:
option = int(option)
except:
return None
who_object.update_center()
ref_center = copy.copy(who_object.get_center())
angle = input("Angulo >>>:\t")
try:
angle = float(angle)
except:
return None
if option == 1:
who_object.translate()
who_object.rotate(angle, "x", True)
who_object.translate(ref_center)
elif option == 2:
who_object.translate()
who_object.rotate(angle, "y", True)
who_object.translate(ref_center)
elif option == 3:
who_object.translate()
who_object.rotate(angle, "z", True)
who_object.translate(ref_center)
else:
return None
elif option == 2:
option = input("""
[1]X
[2]Y
[3]Z
[4]Otro
""")
try:
option = int(option)
except:
return None
angle = input("Angulo >>>:\t")
try:
angle = float(angle)
except:
return None
if option == 1:
who_object.rotate(angle, "x", True)
elif option == 2:
who_object.rotate(angle, "y", True)
elif option == 3:
who_object.rotate(angle, "z", True)
elif option == 4:
vector = eval(input("Vector >>>:\t"))
try:
vector[0]
except:
return None
who_object.rotate(angle, vector)
else:
return None
elif option == 3:
escala = eval(input("Escala >>>:\t"))
try:
escala = list(escala)
except:
return None
who_object.scale(None, escala)
elif option == 4:
to_point = eval(input("Punto >>>:\t"))
try:
to_point == list(to_point)
except:
return None
who_object.translate(Point(to_point[0], to_point[1], to_point[2]))
else:
return None
"""
elif option == 9:
to_point = eval(input("Punto >>>:\t"))
try:
to_point == list(to_point)
except:
return None
glu_context.gluLookAt(to_point[0], to_point[1], to_point[2], 0, 0, 0, 0, 1, 0)
"""
class Color:
R = 0.0
G = 0.0
B = 0.0
id_name = ""
def __init__(self, r=0.0, g=0.0, b=0.0):
self.R = r
self.G = g
self.B = b
self.id_name = "Void"
def get_rgb(self):
return [self.R, self.G, self.B]
def get_r(self):
return self.R
def get_g(self):
return self.G
def get_b(self):
return self.B
def set_name(self, name):
self.id_name = name
def set_color(self, color):
self.R = color[0]
self.G = color[1]
self.B = color[2]
def get_name(self):
return self.id_name
def set_entity_color(self):
GL.glColor3f(self.R, self.G, self.B)
def color_mode(self,type=None):
if type is None:
type = 'rgb'
else:
pass
if type == 'rgb':
pass
elif type == 'cmyk':
self.convert_cmyk()
elif type == 'yuv':
self.convert_yuv()
elif type == 'hsl':
self.convert_hsl()
elif type == 'hsv':
self.convert_hsv()
else:
print("ERROR - Modo de Color Invalido")
pass
def convert_cmyk(self):
r_p = float_to_hex(self.R)/255
g_p = float_to_hex(self.G)/255
b_p = float_to_hex(self.B)/255
k = 1 - max([r_p, g_p, b_p])
if k == 1:
c = m = y = 0
else:
c = (1 - r_p - k) / (1 - k)
m = (1 - g_p - k) / (1 - k)
y = (1 - b_p - k) / (1 - k)
self.R = c
self.G = m
self.B = y
"""#SI SE DESEA VOLVER A RGB
self.R = (1-c)*(1-k)
self.G = (1-m)*(1-k)
self.B = (1-y)*(1-k)
"""
def convert_yuv(self):
yuv_matrix = [[0.299, 0.587, 0.114],
[-0.147, -0.289, 0.436],
[0.615, -0.515, -0.100]]
rgb_vector = [self.R, self.G, self.B]
yuv_matrix = np.matrix(yuv_matrix)
rgb_vector = np.matrix(rgb_vector)
yuv_vector = yuv_matrix*rgb_vector.T
yuv_vector = yuv_vector.T
yuv_vector = yuv_vector.tolist()
self.R = yuv_vector[0][0]
self.G = yuv_vector[0][1]
self.B = yuv_vector[0][2]
"""#SI SE DESEA VOLVER A RGB
rgb_matrix = [[1, 0, 1.14],
[1, -0.396, -0.581],
[1, 2.029, 0]]
rgb_matrix = np.matrix(rgb_matrix)
yuv_vector = np.matrix(yuv_vector[0])
yuv_vector = yuv_vector.T
rgb_vector = rgb_matrix*yuv_vector
rgb_vector = rgb_vector.T
rgb_vector = rgb_vector.tolist()
self.R = rgb_vector[0][0]
self.G = rgb_vector[0][1]
self.B = rgb_vector[0][2]
"""
def convert_hsl(self):
r_p = float_to_hex(self.R) / 255
g_p = float_to_hex(self.G) / 255
b_p = float_to_hex(self.B) / 255
max_value = max([r_p, g_p, b_p])
min_value = min([r_p, g_p, b_p])
if max_value == min_value:
H = 0
elif max_value == r_p:
H = (60 * ((g_p - b_p)/(max_value - min_value)) +360) % 360
elif max_value == g_p:
H = 60 * ((b_p - r_p)/(max_value - min_value)) + 120
elif max_value == b_p:
H = 60 * ((r_p - g_p)/(max_value - min_value)) +240
L = (max_value + min_value)/2
if max_value == min_value:
S = 0
elif L <= 0.5:
S = (max_value - min_value)/(2*L)
elif L > 0.5:
S = (max_value - min_value) / (2 - (2 * L))
self.R = H
self.G = S
self.B = L
"""#SI SE DESEA VOLVER A RGB
C = (1 - math.fabs(2*L - 1)) * S
X = C*(1 - math.fabs((H / 60) % 2 - 1))
m = L - C / 2
if 0 <= H and H < 60:
self.R = C + m
self.G = X + m
self.B = 0 + m
elif 60 <= H and H < 120:
self.R = X + m
self.G = C + m
self.B = 0 + m
elif 120 <= H and H < 180:
self.R = 0 + m
self.G = C + m
self.B = X + m
elif 180 <= H and H < 240:
self.R = 0 + m
self.G = X + m
self.B = C + m
elif 240 <= H and H < 300:
self.R = X + m
self.G = 0 + m
self.B = C + m
elif 300 <= H and H < 360:
self.R = C + m
self.G = 0 + m
self.B = X + m
"""
def convert_hsv(self):
r_p = float_to_hex(self.R) / 255
g_p = float_to_hex(self.G) / 255
b_p = float_to_hex(self.B) / 255
max_value = max([r_p, g_p, b_p])
min_value = min([r_p, g_p, b_p])
if max_value == min_value:
H = 0
elif max_value == r_p:
H = 60 *(((g_p - b_p) / (max_value - min_value)) % 6)
elif max_value == g_p:
H = 60 * (((b_p - r_p) / (max_value - min_value)) + 2)
elif max_value == b_p:
H = 60 * (((r_p - g_p) / (max_value - min_value)) + 4)
if max_value == 0:
S = 0
else:
S = (max_value-min_value)/max_value
V = max_value
self.R = H
self.G = S
self.B = V
"""#SI SE DESEA VOLVER A RGB
C = V * S
X = C*(1 - math.fabs((H / 60) % 2 - 1))
m = V - C
if 0 <= H and H < 60:
self.R = C + m
self.G = X + m
self.B = 0 + m
elif 60 <= H and H < 120:
self.R = X + m
self.G = C + m
self.B = 0 + m
elif 120 <= H and H < 180:
self.R = 0 + m
self.G = C + m
self.B = X + m
elif 180 <= H and H < 240:
self.R = 0 + m
self.G = X + m
self.B = C + m
elif 240 <= H and H < 300:
self.R = X + m
self.G = 0 + m
self.B = C + m
elif 300 <= H and H < 360:
self.R = C + m
self.G = 0 + m
self.B = X + m
"""
class Point:
x_component = 0
y_component = 0
z_component = 0
it_self_name = ""
def __init__(self, x=0, y=0, z=0):
self.x_component = x
self.y_component = y
self.z_component = z
self.it_self_name = None
def comp_x(self):
return self.x_component
def comp_y(self):
return self.y_component
def comp_z(self):
return self.z_component
def get_coord(self):
return [self.x_component, self.y_component, self.z_component]
def get_unitary(self):
return [self.x_component/self.norm(), self.y_component/self.norm(), self.z_component/self.norm()]
def conjugate(self):
self.x_component = -1 * self.x_component
self.y_component = -1 * self.y_component
self.z_component = -1 * self.z_component
def set_coords(self, coords):
self.x_component = coords[0]
self.y_component = coords[1]
self.z_component = coords[2]
def set_point_name(self, name=None):
if name is None:
print("No Label Name Changed")
else:
self.it_self_name = name
def get_point_name(self):
if self.it_self_name is None:
return False
else:
return self.it_self_name
def norm(self):
return math.sqrt(self.x_component**2 + self.y_component**2 + self.z_component**2)
def angle_x(self, reference_point = None):
if reference_point is None:
reference_point = Point(0, 0, 0)
else:
pass
a_x = self.x_component - reference_point.comp_x()
b_y = self.y_component - reference_point.comp_y()
c_z = self.z_component - reference_point.comp_z()
norm = math.sqrt(a_x**2 + b_y**2 + c_z**2)
return math.acos(a_x/norm)
def angle_y(self, reference_point=None):
if reference_point is None:
reference_point = Point(0, 0, 0)
else:
pass
a_x = self.x_component - reference_point.comp_x()
b_y = self.y_component - reference_point.comp_y()
c_z = self.z_component - reference_point.comp_z()
norm = math.sqrt(a_x**2 + b_y**2 + c_z**2)
return math.acos(b_y/norm)
def angle_z(self, reference_point = None):
if reference_point is None:
reference_point = Point(0, 0, 0)
else:
pass
a_x = self.x_component - reference_point.comp_x()
b_y = self.y_component - reference_point.comp_y()
c_z = self.z_component - reference_point.comp_z()
norm = math.sqrt(a_x**2 + b_y**2 + c_z**2)
return math.acos(c_z/norm)
def update(self, x=0, y=0, z=0):
self.x_component = x
self.y_component = y
self.z_component = z
class Edge:
point_A = None
point_B = None
edge_norm = 0
color = None
it_self_name = ""
def __init__(self,p_a=None, p_b=None, color=None):
if p_a is None:
p_a = Point(0, 0, 0)
else:
pass
if p_b is None:
p_b = Point(0, 0, 0)
else:
pass
if color is None:
color = Color()
else:
pass
self.point_A = p_a
self.point_B = p_b
self.color = color
self.it_self_name = None
self.edge_norm = 0
def set_edge_name(self, name=None):
if name is None:
print("No Label Name Changed")
else:
self.it_self_name = name
def get_edge_name(self):
if self.it_self_name is None:
return False
else:
return self.it_self_name
def set_color(self, color):
self.color = color
def get_color(self):
return self.color
def dist_x(self):
return self.point_B.comp_x() - self.point_A.comp_x()
def dist_y(self):
return self.point_B.comp_y() - self.point_A.comp_y()
def dist_z(self):
return self.point_B.comp_z() - self.point_A.comp_z()
def norm(self):
self.edge_norm = math.sqrt(self.dist_x()**2 + self.dist_y()**2 + self.dist_z()**2)
return self.edge_norm
def angle_x(self):
return math.acos(self.dist_x()/self.norm())
def angle_y(self):
return math.acos(self.dist_y() / self.norm())
def angle_z(self):
return math.acos(self.dist_z() / self.norm())
def get_point_a(self):
return self.point_A
def get_point_b(self):
return self.point_B
class GraphicalObject:
point_collection = {}
edge_collection = {}
point_set = []
last_point_index = 0
last_edge_index = 0
center = None
precs = 3
name = ""
visible = False
def __init__(self):
self.point_set = set(self.point_set)
self.last_point_index = 0
self.last_edge_index = 0
self.center = Point(0, 0, 0)
self.point_collection = {}
self.edge_collection = {}
self.precs = 3
self.name = "Void"
self.visible = False
################################################################
def set_precs(self, precs):
self.precs = precs
def get_precs(self):
return self.precs
################################################################
def set_name(self, new_name):
self.name = new_name
def get_name(self):
return self.name
################################################################
def update_last_point_index(self):
self.last_point_index += 1
def last_point_name(self):
self.update_last_point_index()
return "p_" + str(self.last_point_index - 1)
###*********************************************************###
def update_last_edge_index(self):
self.last_edge_index += 1
def last_edge_name(self):
self.update_last_edge_index()
return "e_" + str(self.last_edge_index - 1)
################################################################
def show(self):
self.visible = True
def hide(self):
self.visible = False
def is_visible(self):
return self.visible
################################################################
def update_center(self):
auxiliar_list = list(self.point_set)
num_of_points = len(auxiliar_list)
x_carry = 0
y_carry = 0
z_carry = 0
for point_ref in auxiliar_list:
x_carry += point_ref.comp_x()
y_carry += point_ref.comp_y()
z_carry += point_ref.comp_z()
x_carry = round(x_carry / num_of_points, self.precs)
y_carry = round(y_carry / num_of_points, self.precs)
z_carry = round(z_carry / num_of_points, self.precs)
self.center.update(x_carry, y_carry, z_carry)
def get_center(self):
return self.center
################################################################
def push_point(self, ref_point=Point(0, 0, 0), name=None, verbose = False):
if name is None:
if not ref_point.get_point_name():
name = self.last_point_name()
ref_point.set_point_name(name)
else:
pass
else:
ref_point.set_point_name(name)
self.point_collection[ref_point.get_point_name()] = ref_point
self.point_set.add(ref_point)
if verbose:
print("\nPoint:\t(", ref_point.comp_x(), ",", ref_point.comp_y(), ",", ref_point.comp_z(), ") Add-ed /=/ Name:\t", ref_point.get_point_name(), "\n")
################################################################
def get_point(self, name):
return self.point_collection[name]
################################################################
def push_edge(self, point_1, point_2, color=None, name=None, verbose=False):
try:
self.point_collection[point_1.get_point_name()]
except:
if verbose:
print("\nError - No Point with name ", point_1.get_point_name())
point_1.set_point_name(self.last_point_name())
self.point_collection[point_1.get_point_name()] = point_1
self.point_set.add(point_1)
if verbose:
print("\nCreate Point ", point_1.get_point_name())
if verbose:
print(self.last_point_index)
try:
self.point_collection[point_2.get_point_name()]
except:
if verbose:
print("\nError - No Point with name ", point_2.get_point_name())
point_2.set_point_name(self.last_point_name())
self.point_collection[point_2.get_point_name()] = point_2
self.point_set.add(point_2)
if verbose:
print("\n Create Point ", point_2.get_point_name())
print(self.last_point_index)
if name is None:
name = self.last_edge_name()
else:
pass
ref_edge = Edge(point_1, point_2, color)
ref_edge.set_edge_name(name)
self.edge_collection[name] = ref_edge
################################################################
def init(self):
pass
def plot(self, gl_context=GL):
if self.visible:
pass
else:
return None
gl_context.glBegin(GL.GL_LINES)
for edge_name, edge_ref in self.edge_collection.items():
color = edge_ref.get_color()
gl_context.glColor3f(color.get_r(), color.get_g(), color.get_b())
gl_context.glVertex3f(edge_ref.get_point_a().comp_x(), edge_ref.get_point_a().comp_y(), edge_ref.get_point_a().comp_z())
gl_context.glVertex3f(edge_ref.get_point_b().comp_x(), edge_ref.get_point_b().comp_y(), edge_ref.get_point_b().comp_z())
gl_context.glEnd()
################################################################
def rotate(self, angle=0, vector=None, its_unitary=False, sign="+", verbose=False):
if vector is None:
self.update_center()
vector = self.center.get_coord()
else:
pass
for point_ref in list(self.point_set):
carry_coord = QUAT.rotate(point_ref.get_coord(), vector, angle, sign, its_unitary, verbose)
point_ref.update(carry_coord[0], carry_coord[1], carry_coord[2])
def scale(self, gen_fact=1, fact_vector=[1, 1, 1]):
if gen_fact is None:
pass
else:
fact_vector = [gen_fact, gen_fact, gen_fact]
for label_ref, point_ref in self.point_collection.items():
carry_coord = point_ref.get_coord()
point_ref.update(carry_coord[0]*fact_vector[0], carry_coord[1]*fact_vector[1], carry_coord[2]*fact_vector[2])
def translate(self, move_point=None):
if move_point is None:
self.update_center()
move_point_vector = self.get_center().get_coord()
move_point_vector[0] = -1 * move_point_vector[0]
move_point_vector[1] = -1 * move_point_vector[1]
move_point_vector[2] = -1 * move_point_vector[2]
else:
move_point_vector = move_point.get_coord()
for point_ref in list(self.point_set):
carry_coord = point_ref.get_coord()
point_ref.update(carry_coord[0]+move_point_vector[0], carry_coord[1]+move_point_vector[1],
carry_coord[2]+move_point_vector[2])
################################################################
def show_points(self):
for point_ref in list(self.point_set):
print("\tPoint:\t",point_ref.get_point_name()," at (", point_ref.comp_x(), " ,",
point_ref.comp_y(), " ,", point_ref.comp_z(), ")")
def show_edges(self):
sorted(self.edge_collection)
for edge_name, edge_ref in self.edge_collection.items():
print("\tEdge:\t", edge_ref.get_edge_name(), " between ", edge_ref.get_point_a().get_point_name(),
" and ", edge_ref.get_point_b().get_point_name() + " , Color: ", edge_ref.color.get_rgb())
#################################################################
def get_edge_collection(self):
return self.edge_collection
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './ui/login_form.ui'
#
# Created by: PyQt5 UI code generator 5.12.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtSql
db = QtSql.QSqlDatabase.addDatabase('QPSQL')
db.setHostName("localhost")
db.setPort(5433)
db.setDatabaseName("bama_prj")
db.setUserName("postgres")
db.setPassword("51136616")
if db.open():
for item in db.tables():
print(item)
print(db.databaseName())
print(db.driverName())
else:
print("NOOOOOOOOOOOOOO")
myquery = QtSql.QSqlQuery()
myquery.exec_("select * from amain_car_info")
rows = myquery.fetchall()
print(rows) |
import tkinter
from tkinter import *
from process_module import process
from output_module import output
def send():
msg = EntryBox.get("1.0",'end-1c').strip()
EntryBox.delete("0.0",END)
if msg != '':
ChatBox.config(state=NORMAL)
ChatBox.insert(END, "You: " + msg + '\n\n')
ChatBox.config(foreground="#446665", font=("Verdana", 12 ))
res = process(msg)
ChatBox.insert(END, "Bot: " + res + '\n\n')
ChatBox.config(state=DISABLED)
ChatBox.yview(END)
output(res)
root = Tk()
root.title("Chatbot")
root.geometry("400x500")
root.resizable(width=FALSE, height=FALSE)
ChatBox = Text(root, bd=0, bg="white", height="8", width="50", font="Arial",)
ChatBox.config(state=DISABLED)
scrollbar = Scrollbar(root, command=ChatBox.yview, cursor="heart")
ChatBox['yscrollcommand'] = scrollbar.set
SendButton = Button(root, font=("Verdana",12,'bold'), text="Send", width="12", height=5,
bd=0, bg="#f9a602", activebackground="#3c9d9b",fg='#000000',
command= send )
EntryBox = Text(root, bd=0, bg="white",width="29", height="5", font="Arial")
scrollbar.place(x=376,y=6, height=386)
ChatBox.place(x=6,y=6, height=386, width=370)
EntryBox.place(x=128, y=401, height=90, width=265)
SendButton.place(x=6, y=401, height=90)
root.mainloop() |
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 20 13:20:03 2018
@author: XIECHEN
"""
import requests
import json
import time
starttime=time.asctime(time.localtime(time.time()))
starttime1=time.time();
def getjson(ocoo):
url='http://api.map.baidu.com/direction/v2/transit?origin='+ocoo+'&destination=31.1431889586,121.6581503809&coord_type=wgs84&tactics_incity=4&ak=
while True:
try:
response=requests.get(url=url,timeout=5)
break
except requests.exceptions.ConnectionError:
print 'ConnectionError -- please wait 10 sec'
time.sleep(10)
except requests.exceptions.ChunkedEncodingError:
print 'ChunkedEncodingError -- please wait 10 sec'
time.sleep(10)
except:
print 'Unknow error -- please wait 10 sec'
time.sleep(10)
html=response.text
decodejson=json.loads(html)
return decodejson
file_object=open(r'C:\Users\XIECHEN\Desktop\test1.txt','r')
file_object2=open(r'C:\Users\XIECHEN\Desktop\output.txt','w')
count=0
try:
for line in file_object:
count=count+1
spline=line.split(',')
idn=spline[0]
coor=spline[4].strip()+','+spline[5].strip()
#print coor
decodejson=getjson(coor)
if decodejson.get('status')==0:#表示运行成功
result=decodejson.get('result')
routes=result.get('routes')
#获得需要的时间和距离
if len(routes)>0:
time2=routes[0].get('duration')
distance=routes[0].get('distance')
file_object2.write(str(idn)+','+str(time2)+','+str(distance) +'\n')
if count%10==0:
finishtime=time.asctime( time.localtime(time.time()))
finishtime1=time.time()
print count
print 'duration:',(finishtime1-starttime1)/60.0,'mins'
else:
print str(coor)+','+ str(decodejson.get('status'))+','+decodejson.get('message')
finally:
file_object.close()
file_object2.close()
print 'finish'
|
# -*- coding: utf-8 -*-
import datetime
import logging
import telegram
from django.utils import timezone
from tgbot.handlers import commands
from tgbot.handlers import static_text as st
from tgbot.handlers import manage_data as md
from tgbot.handlers import keyboard_utils as kb
from tgbot.handlers.utils import handler_logging
from tgbot.models import User
from tgbot.poetry import Poetry
from tgbot.tasks import broadcast_message
from tgbot.utils import convert_2_user_time, extract_user_data_from_update, get_chat_id
logger = logging.getLogger('default')
@handler_logging()
def send_more(update, context):
user_id = extract_user_data_from_update(update)['user_id']
user = User.get_user(update, context)
poetry = Poetry(user)
poem_text, poem_id = poetry.load_poem()
context.bot.edit_message_text(
text=poem_text,
chat_id=user_id,
message_id=update.callback_query.message.message_id,
reply_markup=kb.make_keyboard_for_start_command(poem_id),
parse_mode=telegram.ParseMode.MARKDOWN,
)
@handler_logging()
def add_to_fav(update, context):
logger.info('Начинаем процесс добавления в избранное')
user_id = extract_user_data_from_update(update)['user_id']
user = User.get_user(update, context)
# Извлекаем ID стиха из колбека
query = update.callback_query
query.answer()
query_data = query.data.split('#')
poem_id = query_data[1]
logger.info(f'Добавляем в избранное стих #{poem_id}')
poetry = Poetry(user)
poetry.add_to_fav(poem_id)
msg = st.add_to_fav_success
context.bot.edit_message_text(
text=msg,
chat_id=user_id,
message_id=update.callback_query.message.message_id,
reply_markup=kb.make_keyboard_for_start_command(poem_id),
parse_mode=telegram.ParseMode.MARKDOWN,
)
@handler_logging()
def view_fav(update, context):
user_id = extract_user_data_from_update(update)['user_id']
user = User.get_user(update, context)
poetry = Poetry(user)
authors_first_chars = poetry.get_authors(only_first_chars=True)
markup = kb.make_alphabetical_keyboard(authors_first_chars)
context.bot.edit_message_text(
text=st.choose_author,
chat_id=user_id,
message_id=update.callback_query.message.message_id,
reply_markup=markup,
parse_mode=telegram.ParseMode.MARKDOWN,
)
@handler_logging()
def show_authors(update, context):
user_id = extract_user_data_from_update(update)['user_id']
user = User.get_user(update, context)
query = update.callback_query
query.answer()
query_data = query.data.split('#')
selected_char = query_data[1]
poetry = Poetry(user)
authors = poetry.get_authors(only_first_chars=False, last_name_first_char=selected_char)
context.bot.edit_message_text(
text=st.choose_author_full,
chat_id=user_id,
message_id=update.callback_query.message.message_id,
reply_markup=kb.make_authors_keyboard(authors),
parse_mode=telegram.ParseMode.MARKDOWN,
)
@handler_logging()
def show_author_poems(update, context):
user_id = extract_user_data_from_update(update)['user_id']
user = User.get_user(update, context)
query = update.callback_query
query.answer()
query_data = query.data.split('#')
author = query_data[1]
poetry = Poetry(user)
poems = poetry.get_poems(author)
logger.info(poems)
context.bot.edit_message_text(
text=st.choose_poem,
chat_id=user_id,
message_id=update.callback_query.message.message_id,
reply_markup=kb.make_poems_keyboard(poems),
parse_mode=telegram.ParseMode.MARKDOWN,
)
@handler_logging()
def show_poem_by_id(update, context):
user_id = extract_user_data_from_update(update)['user_id']
user = User.get_user(update, context)
query = update.callback_query
query.answer()
query_data = query.data.split('#')
poem_id = query_data[1]
poetry = Poetry(user)
poem = poetry.get_poem_by_id(poem_id)
context.bot.edit_message_text(
text=poetry.format_poem(poem),
chat_id=user_id,
message_id=update.callback_query.message.message_id,
reply_markup=kb.make_btn_keyboard(),
parse_mode=telegram.ParseMode.MARKDOWN,
)
@handler_logging()
def back_to_main_menu_handler(update, context): # callback_data: BUTTON_BACK_IN_PLACE variable from manage_data.py
user, created = User.get_user_and_created(update, context)
payload = context.args[0] if context.args else user.deep_link # if empty payload, check what was stored in DB
text = st.welcome
user_id = extract_user_data_from_update(update)['user_id']
context.bot.edit_message_text(
chat_id=user_id,
text=text,
message_id=update.callback_query.message.message_id,
reply_markup=kb.make_keyboard_for_start_command(),
parse_mode=telegram.ParseMode.MARKDOWN
)
@handler_logging()
def secret_level(update, context): #callback_data: SECRET_LEVEL_BUTTON variable from manage_data.py
""" Pressed 'secret_level_button_text' after /start command"""
user_id = extract_user_data_from_update(update)['user_id']
text = "Congratulations! You've opened a secret room👁🗨. There is some information for you:\n" \
"*Users*: {user_count}\n" \
"*24h active*: {active_24}".format(
user_count=User.objects.count(),
active_24=User.objects.filter(updated_at__gte=timezone.now() - datetime.timedelta(hours=24)).count()
)
context.bot.edit_message_text(
text=text,
chat_id=user_id,
message_id=update.callback_query.message.message_id,
parse_mode=telegram.ParseMode.MARKDOWN
)
def broadcast_decision_handler(update, context): #callback_data: CONFIRM_DECLINE_BROADCAST variable from manage_data.py
""" Entered /broadcast <some_text>.
Shows text in Markdown style with two buttons:
Confirm and Decline
"""
broadcast_decision = update.callback_query.data[len(md.CONFIRM_DECLINE_BROADCAST):]
entities_for_celery = update.callback_query.message.to_dict().get('entities')
entities = update.callback_query.message.entities
text = update.callback_query.message.text
if broadcast_decision == md.CONFIRM_BROADCAST:
admin_text = st.msg_sent,
user_ids = list(User.objects.all().values_list('user_id', flat=True))
broadcast_message.delay(user_ids=user_ids, message=text, entities=entities_for_celery)
else:
admin_text = text
context.bot.edit_message_text(
text=admin_text,
chat_id=update.callback_query.message.chat_id,
message_id=update.callback_query.message.message_id,
entities=None if broadcast_decision == md.CONFIRM_BROADCAST else entities
) |
import time
def calculate_time(func):
def wrapper(numbers):
start = time.time()
result = func(numbers)
end = time.time()
print(func.__name__+ " took " + str(end-start))
return result
return wrapper
@calculate_time
def multipication(numbers):
result = []
for number in numbers:
result.append(number**3)
return result
@calculate_time
def collection(numbers):
result= []
for number in numbers:
result.append(number + 2)
return result
print(multipication(range(10)))
print("")
print(collection(range(10)))
|
#!/usr/bin/env python
__title__ = 'MakeChart'
__version__ = 0.2
__author__ = "Ryan McGreal ryan@quandyfactory.com"
__homepage__ = "http://quandyfactory.com/projects/56/makechart"
__copyright__ = "(C) 2009 by Ryan McGreal. Licenced under GNU GPL 2.0\nhttp://www.gnu.org/licenses/old-licenses/gpl-2.0.html"
"""
MakeChart is a simple script written in Python that takes an array and generates a bar chart.
"""
def add_sep(n, sep=','):
"""
Adds a separator (default comma) to long numbers.
Pilfered from here: http://snippets.dzone.com/posts/show/584
Added logic to deal with decimals.
"""
string = str(n).split('.') # deal with decimals
s = string[0]
if len(s) < 4: return str(n)
try: decimal = '.%s' % (string[1])
except: decimal = ''
groups = []
i = 0
while i < len(s):
groups.append(s[i:i+3])
i+=3
retval = sep.join(groups)[::-1]
if n < 0:
return '-%s' % retval
else:
return '%s%s' % (retval, decimal)
def get_highest_value(dataset, column):
"""
Walks a data set to get the highest value
"""
sortedset = sorted(dataset, key=lambda prod: prod[column])
return sortedset[-1][column] # highest value
def make_ratio(highest_value, scale):
"""
Generates a ratio of the highest value to the scale for other values.
"""
return scale * 1.0 / int(highest_value)
def vertical(string):
"""
Takes a string and makes it display vertically
"""
return '%s<br>' % ('<br>'.join([char for char in string]))
def make_chart(dataset, caption, unit=''):
"""
Makes an HTML bar chart out of a dataset.
"""
output = []
addline = output.append
bars = []
labels = []
highest_value = get_highest_value(dataset, 1)
ratio = make_ratio(highest_value, 200)
for datum in dataset:
bars.append('<td title="%s: %s %s" class="bar"><div style="height: %spx"></div></td>' % (datum[0], add_sep(datum[1]), unit, int(int(datum[1])*ratio)))
labels.append('<td>%s</td>' % (vertical(datum[0])))
addline('<table class="makechart">')
addline('<caption>%s</caption>' % (caption))
addline('<tr class="bar">')
addline('\n'.join(bars))
addline('</tr>')
addline('<tr class="label">')
addline('\n'.join(labels))
addline('</tr>')
addline('</table>')
return '\n'.join(output)
def make_css():
"""
Generates basic CSS to display the bar chart
"""
output = []
addline = output.append
output.append('table.makechart { border-collapse: collapse; border: 1px solid #ccc; font-size: 1em; }')
output.append('table.makechart caption { font-weight: bold; font-size: 130%; text-align: center; }')
output.append('table.makechart th, table.makechart td { border: 1px solid #ccc; padding: 0; }')
output.append('table.makechart .bar td { height: 300px; text-align: center; vertical-align: bottom; }')
output.append('table.makechart .bar td div { text-align: center; width: 100%; background: red; color: white; }')
output.append('table.makechart .label td { text-align: center; vertical-align: top; padding: 1px; padding-bottom: 1em; background: #eef; color: darkblue; font-size: .8em; }')
return '\n'.join(output)
def make_html(chart, css=make_css()):
"""
Makes an HTML page.
"""
output = []
addline = output.append
output.append('<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" ')
output.append('"http://www.w3.org/TR/html4/strict.dtd">')
output.append('<html lang="en">')
output.append('<head>')
output.append('<meta name="author" content="Ryan McGreal">')
output.append('<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">')
output.append('<meta http-equiv="Content-Style-Type" content="text/css">')
output.append('<meta name="generator" content="MakeChart; url=http://quandyfactory.com/projects/56/makechart">')
output.append('<title>Make Chart Example</title>')
output.append('<style type="text/css">@import "/static/styles/style.css";')
output.append(css)
output.append('</style>')
output.append('</head>')
output.append('<body>')
output.append(chart)
output.append('</body>')
output.append('</html>')
return '\n'.join(output)
|
import flask
from flask import render_template, Flask, redirect, json ,jsonify , url_for
from flask import request
import device_mgmt_class
from flask import jsonify
import database
import smtplib
device_detail_array = ['dev_id', 'dev_name' ,'dev_console', 'dev_mgmt','dev_power', 'used_by', 'dev_topo']
app = Flask(__name__)
app.config['SECRET_KEY'] = 'you-will-never-guess'
app.config['TESTING'] = False
@app.route('/', methods=['GET','POST'])
@app.route('/login', methods=['GET', 'POST'])
def login():
error = ""
if request.method == 'POST':
username = request.form['n_userid']
local_user_detail = device_mgmt_class.User(username,"")
ret_val = database.login_user(local_user_detail)
if ret_val is True:
''' If the user is authenticated redirect the use
to the device detail page
'''
print("coming here")
resp = flask.make_response(redirect(url_for('get_device_page')))
resp.set_cookie('user', username)
return resp
else:
return render_template('login.html', error="Invalid Username or Password")
else:
#Get method
username = request.cookies.get('user')
if username is None:
resp = flask.make_response(render_template('login.html', error=error))
return resp
else:
'''
The cookie is set, which means that the user is already authenticated
redirect the user to device detail page'''
resp = flask.make_response(redirect(url_for('get_device_page')))
return resp
@app.route('/signup', methods=['GET', 'POST'])
def signup():
error = ""
if request.method == 'POST':
username = request.form['n_reg_userid']
email = request.form['n_reg_email']
local_user_instance = device_mgmt_class.User(username, email)
return_str = database.insert_user_data(local_user_instance)
"""
Get the username and email and insert into the DB.
"""
if len(return_str):
error = return_str
else:
error="User created Successfully. \n Login to Continue.."
return flask.make_response(redirect(url_for('login')))
else:
return render_template('login.html', error=error)
@app.route('/device_detail/reserve_device', methods=['POST'])
def reserve_device():
if request.method == 'POST':
jsondata = request.get_json()
'''
Reserve the device for the user requested for and update the DB.
'''
local_device_instance = device_mgmt_class.DeviceMgmt(jsondata['dev_id'],
jsondata['dev_name'],
jsondata['dev_console'],
jsondata['dev_mgmt'],
jsondata['dev_power'],
jsondata['dev_topo'],
jsondata['used_by'])
database.add_update_device(local_device_instance)
return render_template('device_detail.html')
else:
pass
return 0
@app.route('/device_detail/request_device', methods=['POST'])
def request_device():
if request.method == 'POST':
jsondata = request.get_json()
try:
msg_to = jsondata['used_by'] + '@cisco.com'
msg_sub = 'Device Request : ' + jsondata['dev_name']
TEXT = 'User ' + request.cookies.get('user') + " is requesting device :" + jsondata['dev_name']
message = 'Subject: {}\n\n{}'.format(msg_sub, TEXT)
s = smtplib.SMTP('localhost')
s.sendmail('dev_mgr', msg_to, message)
s.quit()
return render_template('device_detail.html')
except:
return render_template('device_detail.html', error="Error sending Email")
else:
return render_template('device_detail.html')
@app.route('/device_detail/save_edit_device', methods=['POST'])
def device_detail_operation():
error = None
if request.method == 'POST':
jsondata = request.get_json()
form_device_id = jsondata['dev_id']
form_device_id = jsondata['dev_id']
device_name = jsondata['dev_name']
device_console = jsondata['dev_console']
device_mgmt = jsondata['dev_mgmt']
device_power = jsondata['dev_power']
device_topo = jsondata['dev_topo']
used_by = None
"""
Get the details from the form and update the DB.
"""
local_device_instance = device_mgmt_class.DeviceMgmt(form_device_id, device_name,device_console,device_mgmt,
device_power, device_topo, used_by)
device_id = database.add_update_device(local_device_instance)
if form_device_id == 0:
local_device_instance.dev_id = device_id
return flask.make_response(redirect(url_for('get_device_page')))
@app.route('/device_detail/',methods=['GET'])
def get_device_page():
'''
If the user is directly trying to access the page
without logging, re-direct to the login page.
'''
username = request.cookies.get('user')
if username is None:
error = "Please login to access this page"
return redirect(url_for('login'))
return render_template('device_detail.html')
@app.route('/device_detail/get_device', methods=['GET', 'POST','DELETE'])
def fetch_all_devices():
'''
Fetch all the device details and build json out of it
and send it over to the UI.
'''
list_dict =[]
if request.method == 'GET':
data = database.get_device_details()
for entry in data:
local_copy = entry
dev_dict = {}
for index in range(len(device_detail_array)):
dev_dict[device_detail_array[index]] = local_copy[index]
list_dict.append(dev_dict)
return jsonify(list_dict)
elif request.method == 'DELETE':
jsondata = request.get_json()
database.delete_device(jsondata['dev_id'])
return get_device_page()
@app.route('/logout',methods=["POST"])
def logout():
'''
Delete the cookie, this de-authenticates the user.
:return:
'''
username = request.cookies.get('user')
print(username)
resp = flask.make_response(redirect('/login'))
resp.delete_cookie('user' , username)
return resp
def invoke_db_conn():
'''
Setup the DB connections and create the DB if they don't exist.
'''
print("Connecting to DB.....")
connect = database.database_conn()
if connect is None:
print("No Db Connection")
else:
print("Connection Succeeded", "\n")
database.check_and_create_device_table()
database.check_and_create_user_table()
if __name__ == '__main__':
invoke_db_conn()
app.run(debug=True,host='0.0.0.0',port='5500')
|
# -*- coding: utf-8 -*-
import time
from datetime import timedelta, datetime, date
import urllib, urllib2
import os
import json
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django.template import Context, RequestContext
from django.contrib.auth.decorators import login_required, permission_required
from django.conf import settings
from django.shortcuts import render_to_response
from django.contrib.auth.models import User, Group, Permission
from django.contrib import auth
from django.db.models import Q
from core.jsonresponse import JsonResponse, create_response
from core.dateutil import get_today
from core.exceptionutil import full_stack, unicode_full_stack
from webapp.modules.cms import request_util
template_path_items = os.path.dirname(__file__).split(os.sep)
TEMPLATE_DIR = '%s/templates/webapp' % template_path_items[-1]
########################################################################
# get_category: 显示“文章列表”页面
########################################################################
def get_category(request):
request.template_dir = '%s/%s' % (TEMPLATE_DIR, request.template_name)
return request_util.get_article_list(request)
########################################################################
# get_article: 显示“文章内容”页面
########################################################################
def get_article(request):
request.template_dir = '%s/%s' % (TEMPLATE_DIR, request.template_name)
return request_util.get_article(request)
########################################################################
# get_demo_home_page: 显示“演示首页”页面
########################################################################
def get_demo_home_page(request):
request.template_dir = '%s/%s' % (TEMPLATE_DIR, request.template_name)
return request_util.get_demo_home_page(request) |
"""For each node in a binary tree, create
a new duplicate node, and insert the duplicate
as the left child of the original node."""
class TreeNode:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def doubleTree(root) :
if root is None:
return None
nodes = []
get_nodes(root, nodes)
for val in nodes:
insert(val, root)
return root
def get_nodes(root, array): #preorder traversal
if root is None:
return
array.append(root.val)
get_nodes(root.left, array)
get_nodes(root.right, array)
def insert(val, root):
if root is None:
return
if val == root.val:
newNode = TreeNode(val)
temp = root.left
root.left = newNode
newNode.left = temp
return
insert(val, root.left)
insert(val, root.right) |
from typing import Dict
from copy import deepcopy
import logging
import warnings
logger = logging.getLogger(__name__)
def reformat_config(config: Dict) -> Dict:
"""
Reformat old config files to enable their use for new versions of xopt.
Raise a bunch of warnings so it annoys people into updating their config files
Parameters
----------
config: Dict
Old config file to be checked
Returns
-------
new_config: Dict
Updated config file
"""
# copy config
new_config = deepcopy(config)
# check xopt
if 'algorithm' in new_config['xopt']:
warnings.warn('`algorithm` keyword no longer allowed in xopt config, removing')
del new_config['xopt']['algorithm']
if 'verbose' in new_config['xopt']:
warnings.warn('`verbose` keyword no longer allowed in xopt config')
del new_config['xopt']['verbose']
# check simulation
if 'function' in new_config['simulation']:
warnings.warn('`function` keyword no longer allowed in simulation config, moving to `evaluate`')
new_config['simulation']['evaluate'] = new_config['simulation'].pop('function')
if 'templates' in new_config['simulation']:
warnings.warn('`templates` keyword no longer allowed in simulation config, '
'moving to `options`')
try:
new_config['simulation']['options'].update({'templates': new_config[
'simulation']['templates']})
except KeyError:
new_config['simulation']['options'] = {'templates': new_config[
'simulation']['templates']}
del new_config['simulation']['templates']
# check vocs
for ele in ['name', 'description', 'simulation']:
if ele in new_config['vocs']:
logger.warning(
f'`{ele}` keyword no longer allowed in vocs config, removing')
del new_config['vocs'][ele]
# move templates to simulation
if 'templates' in new_config['vocs']:
logger.warning('`templates` keyword no longer allowed in vocs config, '
'moving to simulation `options`')
try:
new_config['simulation']['options'].update({'templates': new_config[
'vocs']['templates']})
except KeyError:
new_config['simulation']['options'] = {'templates': new_config[
'vocs']['templates']}
del new_config['vocs']['templates']
return new_config
|
# Generated by Django 3.1.4 on 2021-02-05 10:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0055_auto_20210122_1659'),
]
operations = [
migrations.CreateModel(
name='message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mobile', models.IntegerField(blank=True)),
('content', models.CharField(max_length=500)),
('wp', models.BooleanField(default=False, verbose_name='Send Whatsapp Message')),
],
),
]
|
'''
Created on Feb 6, 2016
@author: vagif
'''
import datetime
from lib.ShelveStorage import ShelveStorage
class VaultStorage():
## TODO Make the time format the same as AWS uses
__TIME_FORMAT = "%Y%m%d_%H%M%S"
__PATTERN_VAULT_STORAGE_FILE = 'vault_%s_%s'
__KEY_ARCHIVES = 'archives'
__KEY_LAST_INVENTORY_DATE = 'lastInventoryDate'
__KEY_UPDATED_DATE = 'updatedDate'
__KEY_STATUS = 'status'
STATUS_VALUE_READY = 'ready'
STATUS_VALUE_WAITING_INVENTORY = 'waiting inventory'
def __init__(self, region, vaultName, storageFolder = "~/.aws"):
self.region = region
self.vault = vaultName
self.shelveFile = self.__PATTERN_VAULT_STORAGE_FILE % (region, vaultName)
self.storage = ShelveStorage(storageFolder, self.shelveFile)
def createStorage(self):
self.setStatus(self.STATUS_VALUE_READY)
self.setCurrentUpdatedDate()
self.setArchives({})
def isStatusReady(self):
if self.STATUS_VALUE_READY == self.getStatus():
return True
return False
def getStatus(self):
with self.storage as d:
if d.has_key(self.__KEY_STATUS):
return d[self.__KEY_STATUS]
return None
def setStatus(self, status = None):
with self.storage as d:
d[self.__KEY_STATUS] = status
def getUpdatedDate(self):
with self.storage as d:
if d.has_key(self.__KEY_UPDATED_DATE):
return d[self.__KEY_UPDATED_DATE]
return None
def setUpdatedDate(self, date):
with self.storage as d:
d[self.__KEY_UPDATED_DATE] = date
def setCurrentUpdatedDate(self):
self.setUpdatedDate(self.__getCurrentTime())
def getLastInventoryDate(self):
with self.storage as d:
if d.has_key(self.__KEY_LAST_INVENTORY_DATE):
return d[self.__KEY_LAST_INVENTORY_DATE]
return None
def setLastInventoryDate(self, date):
with self.storage as d:
d[self.__KEY_LAST_INVENTORY_DATE] = date
def getArchives(self):
with self.storage as d:
if d.has_key(self.__KEY_ARCHIVES):
return d[self.__KEY_ARCHIVES]
return {}
def setArchives(self, archives = {}):
with self.storage as d:
d[self.__KEY_ARCHIVES] = archives
def isArchiveExist(self, filename):
## TODO check the archive size as well
## TODO throw exception if doesn't match
if self.getArchive(filename) is not None:
return True
return False
def getArchive(self, filename):
archives = self.getArchives()
if filename in archives:
return archives[filename]
return None
def updateArchive(self, filename, archive = {}):
archives = self.getArchives()
archives[archive.filename] = archive
self.setArchives(archives)
def removeArchive(self, filename):
archives = self.getArchives()
if filename in archives:
del archives[filename]
self.setArchives(archives)
def __getCurrentTime(self):
return datetime.datetime.now().strftime(self.__TIME_FORMAT)
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python [conda env:PROJ_IrOx_Active_Learning_OER]
# language: python
# name: conda-env-PROJ_IrOx_Active_Learning_OER-py
# ---
# # OER Volcano for IrOx systems
#
# ***
# # Import Modules | TEMP NEW
# %%capture
# %load_ext autoreload
# %autoreload 2
import os
print(os.getcwd())
import sys
# +
# # %%capture
sys.path.insert(
0, os.path.join(
os.environ["PROJ_irox"],
"workflow"))
sys.path.insert(
0, os.path.join(
os.environ["PROJ_irox"],
"data"))
from an_data_processing import load_df
# #############################################################################
# Python Modules
import pickle
import numpy as np
import plotly.graph_objs as go
# #############################################################################
# My Modules
from oxr_reaction.oxr_rxn import ORR_Free_E_Plot
from oxr_reaction.oxr_plotting_classes.oxr_plot_volcano import Volcano_Plot
# #############################################################################
# Project Data
from proj_data_irox import (
proj_dir_name,
smart_format_dict,
gas_molec_dict,
scaling_dict_ideal,
scaling_dict_fitted,
exp_irox_lim_pot,
data_dir,
groupby_props,
axis_label_font_size,
axis_tick_labels_font_size,
oer_systems_to_plot,
irox_bulk_color_map)
# #############################################################################
# Local Imports
from plotting.my_plotly import (
my_plotly_plot,
add_minor_ticks,
add_duplicate_axes,
)
# from layout import layout
# from layout2 import layout
# -
import pandas as pd
# # Script Inputs
# +
save_plot = False
plot_exp_traces = True
plot_range = {
"y": [2., 1.4],
"x": [1., 2.],
}
# + [markdown] toc-hr-collapsed=true
# # Read and Process Data Frame
# -
# ## Read dataframe from file
data_dir
# +
df_pourbaix, df_ads, df_surf = load_df(
from_file=False,
root_dir=data_dir,
data_dir=data_dir,
file_name="df_master.pickle",
process_df=True,
)
df_m = df_ads
# +
df_m.columns.tolist()
short_cols_list = [
'bulk_system',
'facet',
'adsorbate',
'coverage_type',
'ooh_direction',
'ads_e',
'elec_energy',
# 'total_magmom',
# 'abs_magmom',
# 'path_short',
# 'name_i',
# 'max_force',
# 'sum_force',
# 'elem_num_dict',
# 'incar_parsed',
# 'init_atoms',
'atoms_object',
# 'N_atoms',
# 'dipole_correction',
# 'path',
# 'name_i_2',
# 'name_i_3',
# 'priority',
'surface_type',
]
# -
# # ORR_Free_E_Plot Instance
ORR_PLT = ORR_Free_E_Plot(
free_energy_df=None,
state_title="adsorbate",
free_e_title="ads_e",
smart_format=smart_format_dict,
color_list=None,
rxn_type="OER")
# +
# smart_format_dict
# -
# # Processing Data
# +
new_index_order = [] + \
df_m[df_m.bulk_system != "IrO3"].index.tolist() + \
df_m[df_m.bulk_system == "IrO3"].index.tolist() + \
[]
df_m = df_m.loc[new_index_order]
# -
# # TEMP Changing data manualy just slightly for better visiblity in OER plot
# +
# index_i = df_m[
# (df_m.bulk_system == "IrO3_rutile-like") & \
# (df_m.facet == "100") & \
# (df_m.coverage_type == "o_covered_2") & \
# (df_m.adsorbate == "o")
# ].iloc[0:].index[0]
# # 2.840912 eV
# # df_m.loc[274, "ads_e"] = 2.78
# # df_m.loc[274, "ads_e"] = 2.838
# df_m.loc[index_i, "ads_e"] = 2.838
# index_i = df_m[
# (df_m.bulk_system == "IrO3_rutile-like") & \
# (df_m.facet == "110") & \
# (df_m.coverage_type == "o_covered") & \
# (df_m.adsorbate == "o")
# ].iloc[0:].index[0]
# # 2.62689
# df_m.loc[index_i, "ads_e"] = 2.63
# +
prop_name_list = [
'bulk_system',
# 'coverage',
'coverage_type',
'facet',
'surface_type',
]
df_dict_i = dict()
grouped = df_m.groupby(groupby_props, sort=False)
for i_ind, (name, group) in enumerate(grouped):
df_i = group
name_i = "_".join(list(name))
print("name:", name_i)
# if name_i == "IrO3_rutile-like_100_o_covered_2_NaN":
if not any([np.isnan(i) for i in df_i.elec_energy.tolist()]):
if name_i in oer_systems_to_plot:
print("ADDING SYSTEM")
ORR_PLT.add_series(
df_i,
plot_mode="all",
overpotential_type="OER",
property_key_list=prop_name_list,
add_overpot=False,
name_i=name_i,
)
df_dict_i[name_i] = df_i
# -
ORR_PLT.series_list
# + active=""
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
# -
# # Import Modules
# %load_ext autoreload
# %autoreload 2
# +
# %%capture
import pandas as pd
# Setting Custom Paths ********************************************************
# *****************************************************************************
import os; import sys
sys.path.append(".."); sys.path.append("../..")
sys.path.insert(0, os.path.join(
os.environ["PROJ_col_iro2"],
"data"))
# Python Modules **************************************************************
# *****************************************************************************
from plotly import io as pyio
import chart_studio.plotly as py
import plotly.graph_objs as go
from IPython.display import HTML
# My Modules ******************************************************************
# *****************************************************************************
from oxr_reaction.oxr_plotting_classes.oxr_plot_2d_volcano import (
Volcano_Plot_2D)
from plotting.my_plotly import my_plotly_plot
# -
# # Script Inputs
save_plot = True
# # Read OER Data
# +
# from sc_procc_manual_data import (
# ORR_PLT,
# # TEMP
# df_ads_e,
# # df_list,
# corrections_dict,
# oxy_ref, hyd_ref,
# )
# df_list
from proj_data_col_iro2 import proj_dir_name
# -
# # 2D Volcano Plot Instance
smart_format_dict
# +
VP = Volcano_Plot_2D(
ORR_PLT,
plot_range={
"x": [+0.9, +2.0],
"y": [-0.5, +2.0],
},
smart_format_dict=smart_format_dict,
)
data = VP.traces
layout = VP.get_plotly_layout()
# -
# # Plotting
layout = VP.get_plotly_layout()
# ## Small Plot
# +
# # %%capture
# #############################################################################
layout_override = dict(
width=8 * 37.795275591,
height=6 * 37.795275591,
showlegend=False,
margin=go.layout.Margin(
autoexpand=None,
b=8,
l=8,
pad=None,
r=5,
t=5,
),
paper_bgcolor="white",
plot_bgcolor="white",
)
layout.update(dict(xaxis=dict(
dtick=0.2,
)))
# #############################################################################
for trace_i in data:
try:
trace_i.marker.size = 12
except:
pass
fig = go.Figure(
data=data,
layout=layout.update(layout_override))
# my_plotly_plot(
# figure=fig,
# plot_name="out_plot_00_small",
# write_pdf=True,
# )
# +
# fig.show()
# -
# ## Medium Plot
# +
layout_override = {
"width": 24 * 37.795275591,
"height": 14 * 37.795275591,
"showlegend": True,
}
fig = go.Figure(
data=data,
layout=layout.update(layout_override))
# my_plotly_plot(
# figure=fig,
# plot_name="out_plot_00_medium",
# write_pdf=True,
# )
# -
fig.show()
|
# -*- coding: utf-8 -*-
import pandas as pd
url = "http://bit.ly/uforeports"
data = pd.read_csv(url)
print(data)
print(data.isnull().head())
print(data[data.City.isnull()])
print(data.isnull().sum())
print(data.shape)
#data = data.dropna(how = "any")
#data = data.dropna(subset = ['City','Colors Reported'],how = "all")
data['Shape Reported'].fillna(value='Belirsiz',inplace =True)
print(data['Shape Reported'].value_counts(dropna=False))
print(data.shape)
|
"""
Blind simulation:
ICs - /home/app/reseed/snapshot_099
Final snapshot - /home/app/reseed/IC.gadget3
"""
import sys
sys.path.append("/Users/lls/Documents/mlhalos_code")
import matplotlib
matplotlib.rcParams.update({'axes.labelsize': 18})
import numpy as np
from mlhalos import parameters
from mlhalos import machinelearning as ml
from scripts.ellipsoidal import predictions as ST_pred
from scripts.paper_plots import roc_plots
import matplotlib.pyplot as plt
from scripts.ellipsoidal import predictions as ell_pred
def particle_in_out_class(initial_parameters):
"""label in particles +1 and out particles -1."""
id_to_h = initial_parameters.final_snapshot['grp']
output = np.ones(len(id_to_h)) * -1
output[np.where((id_to_h >= initial_parameters.min_halo_number) & (id_to_h <= initial_parameters.max_halo_number))] = 1
output = output.astype("int")
return output
############### ROC CURVES ###############
if __name__ == "__main__":
# ic_training = parameters.InitialConditionsParameters(path="/Users/lls/Documents/CODE")
# mass_threshold_in_out = ic_training.halo[400]['mass'].sum()
ic = parameters.InitialConditionsParameters(initial_snapshot="/Users/lls/Documents/CODE/reseed50/IC.gadget3",
final_snapshot="/Users/lls/Documents/CODE/reseed50/snapshot_099",
load_final=True, min_halo_number=0, max_halo_number=400,
min_mass_scale=3e10, max_mass_scale=1e15)
# Change ic.ids_IN and ic.ids_OUT to be IN or OUT depending on whether they are in halos of mass larger than the
# mass of halo 400 in ic_training and not that of halo 400 in ic. Change ids_IN, ids_OUT, and ic.max_halo_number.
# We have that halo 409 in ic has the same mass as halo 400 in ic_training - hard code this for now.
ic.max_halo_number = 409
# Load or calculate true labels
try:
true_labels = np.load("/Users/lls/Documents/CODE/reseed50/predictions/true_labels.npy")
except:
print("Recalculating true labels")
true_labels = particle_in_out_class(ic)
np.save("/Users/lls/Documents/CODE/reseed50/predictions/true_labels.npy", true_labels)
density_pred = np.load("/Users/lls/Documents/CODE/reseed50/predictions/density_predicted_probabilities.npy")
density_shear_pred = np.load("/Users/lls/Documents/CODE/reseed50/predictions/shear_density_predicted_probabilities"
".npy")
fpr_den, tpr_den, auc_den, thr = ml.roc(density_pred, true_labels)
# fpr_shear_den, tpr_shear_den, auc_shear_den, thr_shear = ml.roc(density_shear_pred, true_labels)
EPS_predicted_label = np.load("/Users/lls/Documents/CODE/reseed50/predictions/EPS_predicted_label.npy")
ST_predicted_label = np.load("/Users/lls/Documents/CODE/reseed50/predictions/ST_predicted_label.npy")
fpr_EPS, tpr_EPS = ST_pred.get_fpr_tpr_ellipsoidal_prediction(EPS_predicted_label, true_labels)
fpr_ST, tpr_ST = ST_pred.get_fpr_tpr_ellipsoidal_prediction(ST_predicted_label, true_labels)
# # ROC curves + EPS + ST for blind simulation
#
# # pred_all = np.array([density_pred, density_shear_pred])
# # true_all = np.array([true_labels, true_labels])
# #
# # fig = roc_plots.get_multiple_rocs(pred_all, true_all, labels=["Density", "Density+Shear"],
# # add_EPS=True, fpr_EPS=fpr_EPS, tpr_EPS=tpr_EPS,
# # add_ellipsoidal=True, fpr_ellipsoidal=fpr_ST, tpr_ellipsoidal=tpr_ST)
# #
# # plt.savefig("/Users/lls/Documents/CODE/reseed50/predictions/ROCs_blind_sim.pdf")
#
#
# ######################## Fractional difference with training simulation ###########################
#
# # training data
#
# ic_training = parameters.InitialConditionsParameters(path="/Users/lls/Documents/CODE/")
#
# pred_density_training = np.load("/Users/lls/Documents/CODE/stored_files/shear/classification/density_only/"
# "predicted_den.npy")
# true_density_training = np.load("/Users/lls/Documents/CODE/stored_files/shear/classification/density_only/true_den.npy")
# fpr_den_training, tpr_den_training, auc_den_training, threshold = ml.roc(pred_density_training, true_density_training)
# del pred_density_training
# del true_density_training
#
# EPS_IN_label_training = np.load("/Users/lls/Documents/CODE/stored_files/all_out/not_rescaled/EPS_predictions_IN.npy")
# EPS_OUT_label_training = np.load("/Users/lls/Documents/CODE/stored_files/all_out/not_rescaled/EPS_predictions_OUT.npy")
# fpr_EPS_training, tpr_EPS_training = roc_plots.get_EPS_labels(EPS_IN_label_training, EPS_OUT_label_training)
#
# path = "/Users/lls/Documents/CODE/stored_files/shear/classification/den+den_sub_ell+den_sub_prol/"
# pred_shear_training = np.load(path + "predicted_den+den_sub_ell+den_sub_prol.npy")
# true_shear_training = np.load(path + "true_den+den_sub_ell+den_sub_prol.npy")
# fpr_shear_training, tpr_shear_training, auc_shear_training, threshold = ml.roc(pred_shear_training,
# true_shear_training)
#
# den_f = np.load("/Users/lls/Documents/CODE/stored_files/shear/shear_quantities/features/density_features.npy")
# fpr_ST_training, tpr_ST_training = ell_pred.get_fpr_tpr_from_features(den_f, mass_range="in",
# initial_parameters=ic_training,
# window_parameters=None,
# beta=0.485, gamma=0.615, a=0.707)
#
# figure, ax = plt.subplots(figsize=(8,6))
# cols = ["#8856a7", "#7ea6ce"]
#
# ax.plot(fpr_den/fpr_den_training, tpr_den/tpr_den_training, lw=1.5, color=cols[0],
# label=r"$\mathrm{Density} (\Delta \mathrm{AUC} = $" + ' %.3f' % (auc_den - auc_den_training))
# ax.plot(fpr_shear_den/fpr_shear_training, tpr_shear_den/tpr_shear_training, lw=1.5, color=cols[1],
# label=r"$\mathrm{Density+Shear} (\Delta \mathrm{AUC} = $" + ' %.3f' % (auc_den - auc_den_training))
#
# plt.scatter(fpr_EPS/fpr_EPS_training, tpr_EPS/tpr_EPS_training, color="k", s=30)
# plt.scatter(fpr_ST/fpr_ST_training, tpr_ST/tpr_ST_training, color="k", marker="^", s=30)
#
# ax.set_xlabel(r'$\mathrm{FPR_{blind}/FPR_{training}}$', fontsize=20)
# ax.set_ylabel(r'$\mathrm{TPR_{blind}/TPR_{training}}$', fontsize=20)
#
# plt.legend(loc="best", frameon=False)
# plt.savefig("/Users/lls/Documents/CODE/reseed50/predictions/ROCs_differences.pdf")
|
from starter2 import *
from collections import defaultdict
import fnmatch
class plot():
"""container object that connects images with parameters"""
def __init__(self,fname=".",parameters={}):
self.fname=fname
self.parameters=parameters
class core_target():
def __init__(self, h5ptr=None):
self.q={}
if h5ptr:
self.q['min_density'] = h5ptr['min_density'][()]
self.q['nzones'] = h5ptr['nzones'][()]
self.q['particle_index']= h5ptr['particle_index'][()]
self.q['peak_density'] = h5ptr['peak_density'][()]
self.q['peak_id'] = h5ptr['peak_id'][()]
self.q['peak_location'] = h5ptr['peak_location'][()]
class product():
"""A tool to collect plots. Takes
regexp: the regular expression to match files and get parameters from filenames.
Should like something like
"/path/to/file/%s_projection_c(\d\d\d\d).png"%(this_simname)
The first %s gets repladed by simname
(\d\d\d\d) is four digits enclosed in parenthesis to make a group. So
u301_projection_c0012.png
would be
r"%s_projection_c(\d\d\d\d).png"%this_simname
name: title on the column
style: how to display the image. Options are
single: just make an img tag with each file name
value: also then takes
fname: name of hdf5 file contating records
field: name of field.
It expexts a record that looks like
fname.h5[field]['core_id']
"""
def __init__(self, name="P", regexp=None,myglob="glob",
parameters=['core_id','frame'],style='single',width=200,
fname=None,field=None,number_format="%0.2e", data_dir=None,link_dir=None):
if regexp is not None:
self.regexp=re.compile(regexp)
self.re_string=regexp
self.name=name
self.parameters=parameters
self.plots=defaultdict(lambda: list())
self.style=style
self.field=field
self.fname=fname
self.data_dir=data_dir
self.link_dir=link_dir
if style=='single':
self.render = self.single_render
elif style == 'core_id':
self.render = self.core_id_render
elif style == 'frames':
self.render = self.frame_render
elif style == 'value':
self.render = self.value_render
elif style == 'numbertest':
self.render = self.number_render
elif style == 'value_target_file':
self.render = self.value_render_target_file
elif style == 'string':
self.render = self.string_render
else:
self.render = None
self.width=width
self.number_format=number_format
def render_head(self):
return "<th> %s </th>"%self.name
def check_glob(self):
print("check glob")
file_list = glob.glob(self.myglob)
print(self.myglob)
print(file_list)
def get_frames(self,verbose=False):
data_path = self.data_dir+self.re_string
print("Get", self.re_string)
dirname = os.path.dirname(data_path)
file_list = glob.glob(dirname+"/*")
for fname in file_list:
short_name = os.path.basename(fname)
match = self.regexp.match(short_name)
if 0:
#Regular Expressions got you down?
print(fname)
print('======')
print('R',self.re_string)
print('D',self.data_dir)
print('P',data_path)
print('N',dirname)
print('F',fname)
print('S',short_name)
print('X',self.regexp)
print('M',match)
if match is None:
continue
mygroups = match.groups()
link_name = self.link_dir+"/"+short_name
params = dict(zip(self.parameters,mygroups))
core_id = int(params['core_id'])
#trim off leading /
while link_name[0]=="/":
link_name=link_name[1:]
myplot = plot(link_name,params)
self.plots[core_id].append(myplot)
# if len(self.parameters) > 1 and len(self.plots[core_id]) > 1:
# for p in self.parameters:
# if p != 'core_id':
# sort_key = p
# break
# self.plots[core_id] = sorted( self.plots[core_id],key= lambda plot : plot.parameters[sort_key])
def core_id_render(self,core_id):
return "<td>%s</td>"%core_id
def frame_render(self,core_id):
if len( self.plots[core_id]) == 0:
img = "x"
else:
if len(self.parameters) > 1 and len(self.plots[core_id]) > 1:
for p in self.parameters:
#get a sort key that isn't core_id.
if p != 'core_id':
sort_key = p
break
self.plots[core_id] = sorted( self.plots[core_id],key= lambda plot : plot.parameters[sort_key])
img_tag_template = '<a h<figure><a href="%s"><img src="%s" width=%s id = %s></a><figcaption>%s</figcaption></figure>\n'
fname = self.plots[core_id][0].fname
caption = ""
myid = "%s_c%04d"%(self.name, core_id)
mynext = "%s_c%04d_next"%(self.name, core_id)
myback = "%s_c%04d_back"%(self.name, core_id)
img = img_tag_template%(fname, fname, self.width,myid,"")
for nplot,plt in enumerate(self.plots[core_id]):
#img += "<button onclick=set_image(%s,'%s')> n%04d</button>\n"%(myid,plt.fname,int(plt.parameters['frame']))
next_frame = nplot+1
if next_frame == len(self.plots[core_id]):
next_frame=0
back_fname = self.plots[core_id][nplot-1].fname
next_fname = self.plots[core_id][next_frame].fname
img += "<button onclick=set_image('%s','%s')> n%04d</button>\n"%(myid,plt.fname,int(plt.parameters['frame']))
#print("render c%04d %s"%(core_id,str(plt.parameters)))
out1 = "<td>%s</td>"%img
return out1
def string_render(self,core_id):
if 'values' not in self.__dict__:
fptr = h5py.File(self.fname,'r')
values = fptr[self.field].asstr()[()]
core_ids = fptr['core_ids'][()]
self.values = dict(zip(core_ids,values))
fptr.close()
#print(self.values)
if core_id in self.values:
values=str(self.values[core_id])
parts = values.split(",")
n = len(parts)
out_str = self.number_format*n%tuple([str(v) for v in parts])
print(out_str)
else:
out_str = '-1'
return "<td>%s </td>"%out_str
def value_render(self,core_id):
if 'values' not in self.__dict__:
fptr = h5py.File(self.fname,'r')
values = fptr[self.field][()]
core_ids = fptr['core_ids'][()]
self.values = dict(zip(core_ids,values))
fptr.close()
#print(self.values)
if core_id in self.values:
out_str = self.number_format%self.values[core_id]
else:
out_str = '-1'
return "<td>%s </td>"%out_str
def value_render_target_file(self,core_id):
if 'targets' not in self.__dict__:
fptr = h5py.File(self.fname,'r')
self.targets={}
for group in fptr:
peak_id = fptr[group]['peak_id'][()]
self.targets[peak_id] = core_target(h5ptr=fptr[group])
fptr.close()
#print(self.values)
if core_id in self.targets:
out_str = self.number_format%self.targets[core_id].q[self.field]
else:
out_str = '-1'
return "<td>%s </td>"%out_str
def number_render(self,core_id):
out = "<td>%0.2f</td>"%np.random.random()
return out
def single_render(self,core_id):
#out1 = "<td>single render (%s)</td>"%self.style
#img_tag = img_tag_template%(self.plots[core_id].fname)
if len( self.plots[core_id]) == 0:
img = "x"
else:
fname = self.plots[core_id][0].fname
suffix = fname.split(".")[-1]
img_tag_template = '<a h<figure><a href="%s"><img src="%s" width=%s></a><figcaption>%s</figcaption></figure>'
img = img_tag_template%(fname, fname, self.width,"")
if suffix == 'mp4':
img_tag_template = '<video width=%s controls> <source src=%s type="video/mp4" >video</video>'
img = img_tag_template%(self.width, fname)
out1 = "<td>%s</td>"%img
return out1
|
import PySimpleGUI as sg
import threading
import os
import re
from start import start
from setup import setup
from caption import caption_setup
from main import main
import utils, overnight
from unfollower import unfollow
WINDOW_TITLE = 'IG Upload Helper'
x = 650
y = 750
sg.theme('Dark') # Add a touch of color
# All the stuff inside your window.
base_path = os.path.realpath(__file__)[:-len(os.path.basename(__file__))]
# creates accounts.txt if it doesn't exist
accounts_path = os.path.join(base_path, "accounts.txt")
utils.create_file(accounts_path, "")
accounts = [account for account in open("accounts.txt", "r").read().split("\n") if account] # gets all accounts
accounts_visible = False
if len(accounts) > 1:
accounts_visible = True
barrier = sg.Text("|", font=("Ariel 15"))
barrier_visible = sg.Text("|", font=("Ariel 15"), visible=accounts_visible)
if len(accounts) >= 1:
default_account = accounts[0]
else:
sg.popup("No accounts found!")
exit()
layout = [
[sg.Text("IG Upload Helper", font=("Ariel 16 bold"), justification='center', size=(x,1))],
[sg.Text("")],
[sg.Text("First Time (for each account)", font=("Ariel 14 bold"))],
[sg.Button("Setup", size=(8,2)), ],
[sg.Text("Select account:", visible=accounts_visible), sg.DropDown(accounts, key='-SELECT_ACCOUNT-', default_value=default_account, visible=accounts_visible), barrier_visible, sg.Text("Setup files:"), sg.Button("Descriptions"), sg.Button("Hashtags"), sg.Button("Caption")],
[sg.Text("-------------------------------------------------------------------------------------------------------------------------------------------------------")],
[sg.Text("Run Bot", font=("Ariel 14 bold"))],
[sg.Text("Scrape and upload", font="Ariel 11 bold")],
[sg.Text('Enter the username of the account you want to scrape:'), sg.InputText(key='-SCRAPE_USERNAME-', size=(41,0))],
[sg.Text("Enter the timestamp of your last post (if nothing is entered, it will be taken from the 'last_timestamp.txt' file):")],
[sg.InputText(key = '-TIMESTAMP-', size=(11,0)), sg.Button('epochconverter.com')],
[sg.Text("Enter how many posts you want to posts from the user:"), sg.InputText(key='-NUM_POSTS-', default_text='25', size=(6,0))],
[sg.Text("Select your account:", visible=accounts_visible), sg.DropDown(accounts, key='-ACCOUNT-', default_value=default_account, visible=accounts_visible), sg.Button('Start', bind_return_key=True)],
[sg.Text("-------------------------------------------------------------------------------------------------------------------------------------------------------")],
[sg.Text("Scrape without uploading", font="Ariel 11 bold")],
[sg.Text("Scrape multiple accounts to use for later:")],
[sg.Text("Enter the accounts separated by a comma (e.g., 'instagram,cristiano,jlo')")],
[sg.InputText(key='-ACCOUNTS-', size=(25,0))],
[sg.Text("Select your account:"), sg.DropDown(accounts, key='-OVERNIGHT_ACCOUNT-', default_value=default_account, visible=accounts_visible), sg.Button('Scrape')],
[sg.Text("")],
[sg.Button('Cancel', size=(8,2))]
]
# Create the Window
window = sg.Window(WINDOW_TITLE, layout, size=(x, y))
# Event Loop to process "events" and get the "values" of the inputs
while True:
event, values = window.read()
# "Start" portion of Run
if event == 'epochconverter.com':
os.startfile('https://www.epochconverter.com/')
if event == 'Start':
account = values['-ACCOUNT-']
scrape_username = values["-SCRAPE_USERNAME-"]
while not scrape_username:
if scrape_username is None:
quit()
scrape_username = sg.popup_get_text("Username cannot be blank.")
# checks if the account has already been scraped
with open(os.path.join(f"accounts/{account}", "scraped_accounts.txt"), "a+") as f:
f.seek(0)
scraped_accounts = f.read().split("\n")
is_continue = "Yes"
if scrape_username in scraped_accounts:
is_continue = sg.popup_yes_no("Warning", "You have already scraped posts from that user. Are you sure you want to scrape them again?")
while not is_continue:
if is_continue is None:
quit()
is_continue = sg.popup_yes_no("Warning", "You have already scraped posts from that user. Are you sure you want to scrape them again?")
if is_continue == "Yes":
scrape_username = scrape_username.strip()
input_timestamp = values["-TIMESTAMP-"]
num_posts = values["-NUM_POSTS-"]
regex = r"\b([1-9]|[1-8][0-9]|9[0-9]|100)\b"
while not re.search(regex, num_posts):
if num_posts is None:
quit()
num_posts = sg.popup_get_text("Input must be a number between 1-100.")
num_posts = int(num_posts)
main(scrape_username, input_timestamp, num_posts, account)
if event == "Setup":
username = setup()
if event == "Descriptions":
accounts = [account for account in open("accounts.txt", "r").read().split("\n") if account]
if len(accounts) < 1:
sg.Popup("No accounts added")
break
if len(accounts) == 1:
username = accounts[0]
else:
username = values["-SELECT_ACCOUNT-"]
description_path = os.path.join(base_path, f"{username}/descriptions.txt")
os.startfile(description_path)
if event == "Hashtags":
accounts = [account for account in open("accounts.txt", "r").read().split("\n") if account]
if len(accounts) < 1:
sg.Popup("No accounts added")
break
if len(accounts) == 1:
username = accounts[0]
else:
username = values["-SELECT_ACCOUNT-"]
hashtags_path = os.path.join(base_path, f"{username}/hashtags.json")
utils.setup_hashtags(hashtags_path)
if event == "Caption":
accounts = [account for account in open("accounts.txt", "r").read().split("\n") if account]
if len(accounts) < 1:
sg.Popup("No accounts added")
break
if len(accounts) == 1:
username = accounts[0]
else:
username = values["-SELECT_ACCOUNT-"]
caption_setup(username)
if event == "Scrape":
utils.overnight_scrape(values["-ACCOUNTS-"], values["-OVERNIGHT_ACCOUNT-"])
if event == "Unfollow":
_u = sg.popup_get_text("Enter your username:")
_p = sg.popup_get_text("Enter your password:")
unfollow(_u, _p)
if event == sg.WIN_CLOSED or event == 'Cancel': # if user closes window or clicks cancel
break
window.close()
|
import os
import re
import imageio
from itertools import islice
from PIL import Image
#from skimage import transform,io
import cv2
def generateVideo(path, output_name):
sorted_files = []
for file in os.listdir(path):
if file.startswith('frame') and "rigid" not in file:
complete_path = path + '/' + file
try:
img = Image.open(complete_path) # open the image file
img.verify() # verify that it is, in fact an image
m = re.search('frame_(\d*?)_', file)
if not m:
print("key not found")
else:
sorted_files.append((int(m.group(1)), complete_path))
except (IOError, SyntaxError) as e:
print('Bad file:', complete_path)
print('output video: ' + output_name)
writer = imageio.get_writer(output_name, fps=4)
sorted_files = sorted(sorted_files, key=lambda x: x[0])
for (k, im) in sorted_files:
#print(str(k) + " " + im)
img = imageio.imread(im)
img = cv2.resize(img, (1600, 912), interpolation = cv2.INTER_AREA)
writer.append_data(img)
writer.close()
path = "images\\run_2019_10_01_smooth_3"
image_dirs = []
# r=root, d=directories, f = files
for r, d, f in os.walk(path):
for dir in d:
if 'color' in dir:
image_dirs.append(os.path.join(r, dir))
output_path = path + '\\videos\\'
for image_dir in image_dirs:
output_file = image_dir.replace(path + '\\', '')
output_file = output_path + output_file.replace('\\', '_') + '.avi'
if not os.path.exists(output_path):
os.makedirs(output_path)
print(output_file)
generateVideo(image_dir, output_name = output_file)
|
import json
import paho.mqtt.client as mqtt
import yaml
from functools import partial
def on_connect(topic, client, userdata, flags, rc):
print("Connected with result code " + str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe(topic)
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
print(msg.topic + " " + str(msg.payload))
def on_message_and_trans(trans_client, client, userdata, msg):
# print(msg.topic + " " + str(msg.payload))
try:
trans_client.publish(msg.payload)
except Exception as e:
print(e)
trans_client.connect(trans_client.config.get("target_host"), trans_client.config.get("target_port"))
def on_publish(client, userdata, mid):
pass
# print("mid", str(mid), sep=" ,")
class Config:
def __init__(self, path):
with open(path, "r") as f:
self.info = yaml.safe_load(f)
def get(self, key, default=""):
return self.info.get(key, default)
class TransClient():
def __init__(self, config_path):
self.config = Config(config_path)
self.client = mqtt.Client()
self.client.connect(self.config.get("target_host"), self.config.get("target_port"))
self.client.username_pw_set(self.config.get("target_user"), self.config.get("target_password"))
self.client.on_connect = partial(on_connect, self.config.get("target_topic"))
self.client.on_publish = on_publish
self.time_stamp = None
def publish(self, data):
# time_stamp = json.loads(data)["meta"]["t"]
# if self.time_stamp != time_stamp:
# self.client.publish(self.config.get("target_topic"), data)
# self.time_stamp = time_stamp
self.client.publish(self.config.get("target_topic"), data)
class ListenClient():
def __init__(self, config_path, trans_client):
self.config = Config(config_path)
self.listen_client = mqtt.Client()
self.listen_client.connect(self.config.get("source_host"), self.config.get("source_port"))
self.listen_client.username_pw_set(self.config.get("source_user"), self.config.get("source_password"))
self.listen_client.on_connect = partial(on_connect, self.config.get("source_topic"))
self.listen_client.on_message = partial(on_message_and_trans, trans_client)
self.listen_client.on_publish = on_publish
def loop(self):
self.listen_client.loop_forever()
class Trans:
def __init__(self, config_path):
self.trans_client = TransClient(config_path)
self.listen_client = ListenClient(config_path, self.trans_client)
if __name__ == '__main__':
Trans("./info.yaml").listen_client.loop()
|
from django.contrib.admin.utils import model_ngettext
from jinja2.nodes import Mod
from rest_framework.serializers import ModelSerializer
from .models import *
class ProvinceSerializer(ModelSerializer):
class Meta:
model = Province
fields = ["id", "name"]
class LocationSerializer(ModelSerializer):
class Meta:
model = Location
fields = ["id", "address", "province"]
class UserSerializer(ModelSerializer):
location = LocationSerializer
class Meta:
model = User
fields = ["id", "username", "password", "first_name", "last_name", "company_name", "sex", "email", "phone", "avatar", "role", "location" ]
extra_kwargs = {
'password': {'write_only': 'true'}
}
def create(self, validated_data):
user = User(**validated_data)
user.set_password(validated_data['password'])
user.save()
return user
class CareerSerializer(ModelSerializer):
class Meta:
model = Career
fields = ["id", "name"]
class TagSerializer(ModelSerializer):
class Meta:
model = Tag
fields = ["id", "content"]
class BenefitSerializer(ModelSerializer):
class Meta:
model = Benefit
fields = ["id", "name"]
class RecruitmentSerializer(ModelSerializer):
tag = TagSerializer(many=True)
benefit = BenefitSerializer(many=True)
class Meta:
model = Recruitment
fields = ["id", "title", "recruiter", "form", "created_date", "active", "career", "tag", "benefit"]
class ApplySerializer(ModelSerializer):
class Meta:
model = Apply
fields = ["id", "title", "CV", "candidate", "recruitment", "created_date"]
class CommentSerializer(ModelSerializer):
class Meta:
model = Comment
fields = ["id", "content", "file", "created_date", "commenter", "commented"]
|
from analyze_network import get_weights
import numpy as np
import matplotlib.pyplot as plt
wm_abs_dif_list = []
for arguments in [('/home/nhoang1/saralab/popgen-hmm-dl/dl/TD_pop/TD_pop_model.hdf5', 'pop', 'TD', 'TD'),
('/home/nhoang1/saralab/popgen-hmm-dl/dl/dupl_pop/dupl_pop_model.hdf5', 'pop_1', 'TD_1', 'duplicate'),
('/home/nhoang1/saralab/popgen-hmm-dl/dl/rand_pop/rand_pop_model.hdf5', 'pop_2', 'TD_2', 'random')]:
wd = get_weights(arguments[0])
wm_pop_orig = wd[arguments[1]]
wm = wd[arguments[2]]
constant = wm_pop_orig[:,0]
bottleneck = wm_pop_orig[:,1]
nat_sel = wm_pop_orig[:,2]
wm_pop = np.zeros(wm_pop_orig.shape)
wm_pop[:,0] = nat_sel
wm_pop[:,1] = constant
wm_pop[:,2] = bottleneck
wm_dif = wm - wm_pop_orig
wm_abs_dif = np.absolute(wm_dif)
wm_abs_dif_list.append(np.sort(np.sum(wm_abs_dif, axis=1)/3.0))
# plt.figure(1)
# plt.plot(np.arange(1,257),np.sort(wm_abs_dif, axis=0))
# plt.legend(['< 0, nat_sel','= 0, constant','> 0, bottleneck'])
# plt.title('Weights for '+arguments[3] +', separate')
# plt.show()
# plt.figure(2)
# plt.plot(np.arange(1,257),np.sort(np.sum(wm_abs_dif, axis=1)/3.0))
# plt.title('Weights for '+arguments[3] +', combined')
# plt.show()
#
# plt.close()
plt.plot(np.arange(1,257),wm_abs_dif_list[0])
plt.plot(np.arange(1,257),wm_abs_dif_list[1])
plt.plot(np.arange(1,257),wm_abs_dif_list[2])
plt.title('Summed weight abs dif for three outputs')
plt.legend(["Tajima's D", 'population duplicate', 'random'])
plt.show()
|
# import colorgram
# colors = colorgram.extract('spot.jpg', 20)
# image_colors=[]
# for i in colors:
# image_colors.append((i.rgb.r,i.rgb.g,i.rgb.b))
# print(image_colors)
import turtle as turtle_module
import random
colors = [(216, 148, 92), (221, 78, 57), (45, 94, 146), (151, 64, 91), (232, 219, 93), (217, 65, 85), (22, 27, 41), (40, 22, 29),
(120, 167, 197), (40, 19, 14), (194, 139, 159), (159, 72, 56), (35, 132, 91), (123, 181, 142), (69, 167, 94), (236, 222, 6)]
turtle_module.colormode(255)
t = turtle_module.Turtle()
t.penup()
t.hideturtle()
t.speed('fastest')
t.setheading(235)
t.forward(300)
t.setheading(0)
for i in range(100):
t.dot(10, random.choice(colors))
t.forward(30)
if (i+1) % 10 == 0:
t.left(90)
t.forward(30)
t.left(90)
t.forward(300)
t.left(90)
t.left(90)
screen = turtle_module.Screen()
screen.exitonclick()
|
import shelve
def get_data(id: int, type: str):
data = {}
with shelve.open('database' + str(id) + '.txt') as db:
if type == 'plan':
data = db['plan']
if type == 'timetable':
data = db['timetable']
return data
def add_data(id: int, type: str, action: str):
with shelve.open('database' + str(id) + '.txt') as db:
if type == 'plan':
db['plan'].add(action)
if type == 'timetable':
db['timetable'].add(action.split(' '))
def register(id: int):
with shelve.open('database' + str(id) + '.txt') as db:
db['plan'] = set()
db['timetable'] = set()
class Database:
def __init__(self):
self.users = []
def add_user(self, id: int):
self.users.append(id)
register(id)
def get_users(self):
return self.users
|
#!/usr/bin/env python
import code, re
try:
import here
except ImportError:
import sys
import os.path as op
sys.path.insert(0, op.abspath(op.join(op.dirname(__file__), '..')))
import here
from pprint import pprint
import csv
import codecs
from cStringIO import StringIO
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([s.encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def unicode_csv_reader(unicode_csv_data,
encoding='utf-8',
**kwargs):
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(utf_8_encoder(unicode_csv_data, encoding),
**kwargs)
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
yield [unicode(cell, encoding) for cell in row]
def utf_8_encoder(unicode_csv_data, encoding):
for line in unicode_csv_data:
yield line.encode(encoding)
def parse_questions(file_name, encoding='utf8'):
csv_file = codecs.open(file_name, 'r', encoding)
#reader = csv.reader(csv_file)
reader = unicode_csv_reader(csv_file, encoding, delimiter=',')
for row in reader:
if row[0] == 'CODE':
continue
yield row
csv_file.close()
def is_different(dict1, dict2):
if not dict2:
return True
keys = ('text', 'alternatives', 'author', 'alternatives_sorted',
'category', 'correct', 'location', 'points_value')
for key in keys:
v1 = dict1[key]
v2 = dict2[key]
if v1 != v2:
return True
return False
if __name__ == '__main__':
from models import *
from pymongo.objectid import InvalidId, ObjectId
import settings
db = connection[settings.DATABASE_NAME]
import sys, os
args = sys.argv[1:]
if '--verbose' in args:
args.remove('--verbose')
verbose = True
elif '-v' in args:
args.remove('-v')
verbose = True
else:
verbose = False
filename = os.path.abspath(args[0])
default_category = db.Category.find_one({'name': 'Tour guide'})
assert default_category
_cats = dict((x['name'].lower().strip(), x['_id'])
for x in db.Category.find())
for row in parse_questions(filename):
category=None
_id = None
try:
(code, __, text, correct,
alt1, alt2, alt3, alt4, alt_ordered,
points_value, category) = row[:11]
except ValueError:
print
print row
print len(row)
print
raise
if len(row) == 12:
_id = row[11]
if category:
category = _cats[category.lower().strip()]
else:
category = default_category['_id']
text = text.strip()
if not text.endswith('?'):
text += '?'
print "ADDING ? TO:", repr(text)
if _id:
question = db.Question.find_one({'_id': ObjectId(_id)})
else:
question = db.Question.find_one({'text': text})
if not question:
question = db.Question()
if hasattr(question, '_id'):
orig_question = dict(question)
else:
orig_question = None
location = db.Location.find_one({'code': code.upper()})
if not location:
print "CODE", repr(code)
raise ValueError("Unrecognized code %r" % code)
if question['location'] != location['_id']:
question['location'] = location['_id']
question['text'] = text
if correct.lower() in ('true', 'false'):
correct = correct.capitalize()
question['correct'] = correct
if alt1.lower() in ('true', 'false'):
alt1 = alt1.capitalize()
if alt2.lower() in ('true', 'false'):
alt2 = alt2.capitalize()
alternatives = [x.strip() for x in (alt1, alt2, alt3, alt4)
if x.strip()]
question['alternatives'] = alternatives
question['category'] = category
if correct not in alternatives:
print "ERROR",
print repr(correct), "not in alternatives", repr(alternatives)
#print alternatives
points_value = int(points_value)
question['points_value'] = points_value
if not is_different(dict(question), orig_question):
continue
if verbose:
d = dict(question)
del d['add_date']
del d['modify_date']
print question['text']
print "\tCorrect:", repr(question['correct'])
print "\tAlternatives:", repr(question['alternatives'])
print "\tLocation:", unicode(db.Location.find_one({'_id': question['location']}))
print "\tCategory:", unicode(db.Category.find_one({'_id': question['category']}))
print "\tPoints value:", question['points_value']
i = raw_input('Save? [Y/a/n] ').strip().lower()
if i == 'a':
verbose = False
elif i == 'n':
print "SKIP"
continue
question.save()
if verbose:
print
print "There are now", db.Question.find().count(), "questions!"
|
from django.shortcuts import render, redirect, get_object_or_404
from account.decorators import manager_required
from account.forms import User, UpdateUserForm
from care_point.forms import UpdateManagerForm
from care_point.models import Manager
@manager_required
def managers(request):
managers = Manager.objects.all()
return render(request, 'care_point/manager/manager.html', {'managers': managers})
@manager_required
def manager_details(request, manager_id):
manager = get_object_or_404(Manager, pk=manager_id)
return render(request, 'care_point/manager/manager_details.html', {'manager': manager})
@manager_required
def manager_update(request, manager_id):
manager = get_object_or_404(Manager, pk=manager_id)
user = get_object_or_404(User, pk=manager_id)
manager_form = UpdateManagerForm(data=request.POST or None, instance=manager)
user_form = UpdateUserForm(data=request.POST or None, instance=user)
if request.method == 'POST':
if manager_form.is_valid() and user_form.is_valid():
manager = manager_form.save(commit=False)
user = user_form.save(commit=False)
manager.save()
user.save()
return redirect('care_point:managers')
return render(request, 'care_point/manager/manager_update.html',
{'manager_form': manager_form, 'user_form': user_form})
@manager_required
def manager_delete(request, manager_id):
Manager.objects.get(pk=manager_id).delete()
User.objects.get(pk=manager_id).delete()
return redirect('care_point:managers')
|
#!/usr/bin/env python3
# call with parameter: MongoDB URI.
import random
import sys
import time
import statistics
import pprint
import datetime
from _datetime import date,timedelta
from multiprocessing import Process
from pymongo import MongoClient, WriteConcern
# Number of processes to launch
processesNumber = 16 # must be less than the number of items in deviceList
processesList = []
# constants
deviceList = ["PTA101","PTA299","BRA001","FRZ191","FRB980","AUS009","JPY891","JPY791","ITI112","SPL556","UKA198","NLO220","DEO987","ISO008","RUA177","CAR788","USH401","USJ465"]
startDate = datetime.datetime(2020,1,1) # first day to inject data
days = 140 # number of days to inject
# Returns a new temperature using delta and min/max values
def changeTemp(temp,min,max,delta):
variation=random.randint(0,delta)-(delta/2)
if (temp+variation > max) or (temp+variation < min):
temp = temp - variation
else:
temp = temp + variation
return temp
# Main processes code
def run(process_id, uri):
id=deviceList[process_id]
print("process", process_id, "connecting to MongoDB... for device ",id)
connection = MongoClient(host=uri, socketTimeoutMS=10000, connectTimeoutMS=10000, serverSelectionTimeoutMS=10000)
iot_collection = connection.world.get_collection("iot", write_concern=WriteConcern(w=1, wtimeout=8000))
for j in range(days):
currentDate = startDate + timedelta(days=j)
temp = random.randint(17,23)
for i in range(24):
missed = random.randint(0,3) #simulates missing measures
values = []
tempList = []
for k in range(60-missed):
values.append({ "measureMinute":k,"measuredValue": temp})
tempList.append(temp)
temp = changeTemp(temp,13,29,5)
doc = {
"id" : deviceList[process_id],
"measureDate" : currentDate + timedelta(hours=i),
"measureUnit" : "°C",
"periodAvg" : statistics.mean(tempList),
"periodMax" : max(tempList),
"periodMin" : min(tempList),
"missedMeasures" : missed,
"recordedMeasures" : 60-missed,
"values" : values
}
#pprint.pprint(doc)
iot_collection.insert_one(doc)
print('%s - process %s - id %s - date %s' % (time.strftime("%H:%M:%S"), process_id, id, currentDate))
# Main
if __name__ == '__main__':
if len(sys.argv) != 2:
print("You forgot the MongoDB URI parameter!")
print(" - example: mongodb://mongo1,mongo2,mongo3/test?replicaSet=replicaTest&retryWrites=true")
print(" - example: mongodb+srv://user:password@cluster0-abcde.mongodb.net/test?retryWrites=true")
exit(1)
mongodb_uri = str(sys.argv[1])
print("launching", str(processesNumber), "processes...")
# Creation of processesNumber processes
for i in range(processesNumber):
process = Process(target=run, args=(i, mongodb_uri))
processesList.append(process)
# launch processes
for process in processesList:
process.start()
# wait for processes to complete
for process in processesList:
process.join()
|
import os
import json
import requests
from bs4 import BeautifulSoup
google_image="https://www.google.com/search?biw=1600&tbm=isch&source=hp&biw=&bih=783&ei=r8RAYLO4B-2Z4-EP1_S2uAw&"
user_agent={"User-Agent":'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.72 Safari/537.36',
}
'''url=urllib.request.urlopen(google_image)
html=url.read()
print(url.getcode())
print(html)
print(os.getcwd())
'''
os.chdir("C:\\Users\\Kunal\\Desktop")
print(os.getcwd())
Images_py="images"
def folder_create ():
if not os.path.exists(Images_py):
os.mkdir(Images_py)
download_images()
def download_images():
data=input("What are you looking for :")
no_images=int(input("How many images do you want :"))
print (f"searching for {no_images} best images of {data}........")
search_url = google_image +"q="+ data
print(search_url)
response=requests.get(search_url,headers=user_agent )
html=response.text
soup=BeautifulSoup(html,"html.parser")
results=soup.find_all("img",{'class':'rg_i.Q4LuWd'}, limit=no_images)
print(results)
image_links = []
for result in results:
text = result.text
print(text)
text_dict=json.loads(text)
link=text_dict['Q4LuWd']
image_links.append(link)
print(f'found {len(image_links)} images')
x = len(image_links)
if x == 0:
print("CANT DOWNLOAD")
else:
print("start downloading....")
for i , image_link in enumerate (image_links):
response=requests.get(image_link)
image_name=Images_py+"/"+data+str(i+1)+ ".jpg"
with open(image_name,"wb")as file:
file.write(response.content)
print("DONE")
folder_create()
|
import time
class TokenBucket(object):
"""Implements token bucket algorithm.
https://en.wikipedia.org/wiki/Token_bucket
"""
def __init__(self, fill_rate, capacity):
self._fill_rate = float(fill_rate)
self._capacity = float(capacity)
self._count = float(capacity)
self._last_fill = time.time()
self.throttle_count = 0
def check_and_consume(self):
"""Returns True if there is currently at least one token, and reduces
it by one.
"""
if self._count < 1.0:
self._fill()
consumable = self._count >= 1.0
if consumable:
self._count -= 1.0
self.throttle_count = 0
else:
self.throttle_count += 1
return consumable
def __len__(self):
"""Returns current number of discrete tokens."""
return int(self._count)
def _fill(self):
"""Fills bucket with accrued tokens since last fill."""
right_now = time.time()
time_diff = right_now - self._last_fill
if time_diff < 0:
return
self._count = min(
self._count + self._fill_rate * time_diff,
self._capacity,
)
self._last_fill = right_now
|
from math import *
def isPrime(n):
i = 2
while(i**2 < n+1):
p = n%i
if p == 0:
#print("% 3d est un diviseur de % d " %(i,n) )
return False
i+=1
return True
print(isPrime(1001))
#crible à revoir
def eratosthene(n):
liste = [i for i in range(n)]
i = 2
for i in range(n):
if isPrime(i):
for j in range(i+2,n):
try:
liste.remove(j*i)
except ValueError:
pass
return liste
def primeList(n):
liste = []
for i in range(2,n):
if isPrime(i): liste.append(i)
return liste
print(eratosthene(45))
print(primeList(100))
#decomposition en facteurs premiers
def facteurs_prime2(n):
l = []
i=2
if isPrime(n): return [n]
while (not isPrime(n)):
for i in range(2,ceil(sqrt(n))):
p = n%i
if p==0:
l.append(i)
n = n/i
return l
print(facteurs_prime2(1001))
|
# Generated by Django 3.1.1 on 2020-09-26 12:33
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auctions', '0010_bookmark'),
]
operations = [
migrations.RemoveField(
model_name='bookmark',
name='user',
),
migrations.AddField(
model_name='bookmark',
name='user',
field=models.ManyToManyField(blank=True, related_name='bookmarks', to=settings.AUTH_USER_MODEL),
),
]
|
'''
Created on 2017. 6. 6.
@author: Joonki
'''
from rest_framework import serializers
from reviews.models import Review, Like, Comment
class ReviewSerializer(serializers.ModelSerializer):
user = serializers.ReadOnlyField(source='user.userame')
class Meta:
model = Review
fields = ('id','user','type','registeredDateTime','updatedDateTime','title','content', 'likeUsers')
class LikeSerializer(serializers.ModelSerializer):
class Meta:
model = Like
fields = ('review','user','registeredDateTime')
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = ('review','user','content','registeredDateTime') |
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.hashers import make_password
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
class FormUser(forms.ModelForm):
def save(self, commit=True):
user = super(FormUser, self).save(commit=False)
user.email = self.cleaned_data["username"]
user.set_password((self.cleaned_data["password"]))
if commit:
user.save()
return user
class Meta :
model = User
fields = ['username','first_name', 'password']
widgets = {
'username': forms.EmailInput(attrs={'class' : 'validate'}),
'first_name': forms.TextInput(attrs={'class' : 'validate'}),
'password': forms.PasswordInput(attrs={'class' : 'validate'})
}
error_messages = {
'username' : {
'require' : 'Username e obrigatorio.'
},
'first_name': {
'require': 'Nome e obrigatorio.'
},
'password': {
'require': 'Senha e obrigatorio.'
},
}
|
from jeu import *
from ui import *
import sys #Pour l'interface
from PyQt5 import QtGui, QtCore, QtWidgets, uic #Importations d'éléments de PyQt5
from pygame import mixer
import numpy as np
import unittest
#classes : Entite, Joueur(entite), Ia(entite), Plateau(nd.array), Cameleon, Nourriture, Arbitre, Mise
class TestJeu(unittest.TestCase):
"""Tests sur le plateau de jeu"""
def setUp(self):
app = QtWidgets.QApplication(sys.argv) #Obligatoire sinon on ne peut créer aucune classe d'interface
self.c = Cameleon() #Création d'un caméléon
self.p = Plateau(7, 7, self.c, PlateauUi(self,Arbitre(self,self.c),1)) #Création d'un plateau
self.a = Arbitre(self.p,self.c) #Création d'un arbitre
self.a.mise_en_place_jetons() #On met en place aléatoirement les jetons sur le plateau
self.ia = Ia() #On crée une IA
self.m = Mise() #Et une mise
def testInit_plateau(self): #Tests concernants le plateau
self.assertEqual(self.c.pos,(3,0)) #On regarde si le caméléon est au bon endroite au départ
self.assertEqual(self.p[0, 0],-1) #On regarde si les coins sont bien à -1
self.assertEqual(self.p[0, -1], -1)
self.assertEqual(self.p[-1, 0], -1)
self.assertEqual(self.p[-1, -1], -1)
self.assertEqual(self.p[3, 0],9) #Et si le caméléon est bien représenté par un 9
self.assertEqual(self.a.jetons_restants(),25) #On regarde s'il reste bien 25 jetons (aucun jeton mangé)
self.p.calcul_menu(self.c.pos[0],self.c.pos[1]) #On actualise le menu
self.assertTrue(type(self.p.menu),'list') #Et on regarde si c'est bien une liste de 5 éléments
self.assertEqual(len(self.p.menu),5)
def testBouger(self):
self.c.bouger(2, self.p) #On fait bouger le caméléon de 2 cases
self.assertEqual(self.p[3, 0],0) #Et on vérifie qu'il l'a bien fait
self.assertEqual(self.p[1, 0],9)
def testProjection(self): #Test de la méthode Projection permettant de retourner une position théorique du caméléon
self.assertEqual(self.c.projection(self.c.pos[0],self.c.pos[1],2),(1,0))
def testCalcul_menu(self): #Test de la méthode calcul_menu
self.p.calcul_menu(3, 0) #On actualise le menu
self.assertTrue((np.array(self.p[3,1:6]) == np.array(self.p.menu)).all()) #Et on vérifie que chacun des termes sont égaux
self.p.calcul_menu(0, 2) #Encore un pour la route
self.assertTrue((np.array(self.p[1:6,2]) == np.array(self.p.menu)).all())
def testInit_ia(self): #Création d'une IA
self.assertTrue(self.ia.id == 'Ordi') #Dont le nom sera Ordi
def testInit_mise(self): #Test de création d'une mise
self.assertEqual(self.m.mise, 1) #Pour savoir si elle est initialisée à 1
def test_incrementer_mise(self): #Test de l'incrémentation de mise
self.m.incrementer_mise() #On l'augmente
self.assertEqual(self.m.mise, 2) #Elle doit passer à 2
for i in range (6): #On l'augmente de 6
self.m.incrementer_mise()
self.assertEqual(self.m.mise, 5) #Le plafond est à 5
if __name__ == "main":
unittest.main()
|
# Generated by Django 3.0.6 on 2020-05-20 22:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('authentication', '0015_useractivationtoken'),
]
operations = [
migrations.AlterField(
model_name='useractivationtoken',
name='used',
field=models.DateTimeField(null=True),
),
]
|
from sympy import symbols
from .utils import sympify_expr
from ..numeric_method import NumericMethod
from .utils import error_absoluto, error_relativo
class Newton2Method(NumericMethod):
def calculate(self, params):
tol = eval(params["tol"])
xa = eval(params["x0"])
n_iter = eval(params["nIters"])
f = params["fx"]
f_prima = params["dfx"]
f_dos_prima = params["d2fx"]
tipo_error = eval(params["tipo_error"])
calcular_error = error_relativo if tipo_error == 2 else error_absoluto
response = self.init_response()
contador = 0
error = tol + 1
x = symbols("x")
f = sympify_expr(f)
f_prima = sympify_expr(f_prima)
f_dos_prima = sympify_expr(f_dos_prima)
response["funcion_in"] = str(f)
response["f_prima"] = str(f_prima)
response["f_dos_prima"] = str(f_dos_prima)
fx = f.evalf(subs={x: xa})
dfx = f_prima.evalf(subs={x: xa})
d2fx = f_dos_prima.evalf(subs={x: xa})
denominador = (dfx**2)-(fx*d2fx)
while ((error > tol) and (fx != 0) and (denominador != 0) and
(contador < n_iter)):
err_fm = "{e:.2e}".format(e=error) if contador != 0 else ""
fx_fm = "{fx:.2e}".format(fx=fx)
iteracion = [contador, str(xa), fx_fm, err_fm]
response["iteraciones"].append(iteracion)
xn = xa - (fx*dfx)/denominador
fx = f.evalf(subs={x: xn})
dfx = f_prima.evalf(subs={x: xn})
d2fx = f_dos_prima.evalf(subs={x: xn})
denominador = (dfx**2)-(fx*d2fx)
error = calcular_error(xn, xa)
xa = xn
contador = contador + 1
fx_fm = "{fx:.2e}".format(fx=fx)
err_fm = "{e:.2e}".format(e=error) if contador != 0 else ""
iteracion = [contador, str(xa), fx_fm, err_fm]
response["iteraciones"].append(iteracion)
if fx == 0:
# response["raiz"] = str(xa)
response["aproximado"] = str(xn)
elif error < tol:
response["aproximado"] = str(xn)
elif denominador == 0:
response["error"] = "Denominador es igual a cero"
else:
response["error"] = "El método fracasó en {} iteraciones"\
.format(n_iter)
return response
def get_description(self):
return "Este metodo encuentra la raiz de una función aún si tiene \
raices multiples", "Se necesita tol, x0, n_iter, funcion, f_prima y \
f_dos_prima"
def init_response(self):
response = dict()
response["iteraciones"] = []
response["error"] = ""
return response
|
import numpy as np
from PIL import Image
from matplotlib import pyplot as plt
im = Image.open('./../image/lena.png')
print(im.size)
img = np.array(im) # image类 转 numpy
print(img.shape)
img = img[:,:,0:3]
b = img[:,:,0:1]
plt.imshow(img, 'Blues')
plt.show()
plt.savefig('out.png') |
# File: train_embedder.py
# Creation: Saturday September 19th 2020
# Author: Arthur Dujardin
# Contact: arthur.dujardin@ensg.eu
# arthurd@ifi.uio.no
# --------
# Copyright (c) 2020 Arthur Dujardin
# Basic imports
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, random_split
import torchvision.transforms as transforms
from pytorch_pretrained_bert import BertTokenizer
# img2poem package
from img2poem.trainers.embedder import PoeticEmbedderTrainer
from img2poem.nn import PoeticEmbedder
from img2poem.datasets import PoeticEmbeddedDataset
from img2poem.utils import count_parameters
if __name__ == "__main__":
ROOT = "../"
FILENAME = f"{ROOT}/data/images/image-Sentiment-polarity-DFE.csv"
IMAGE_DIR = f"{ROOT}/data/images/crowdflower/sentiment"
RESNET_SENTIMENT_STATE = f'{ROOT}/models/resnet50_sentiment.pth.tar'
RESNET_SCENE_STATE = f'{ROOT}/models/resnet50_scene.pth.tar'
BATCH_SIZE = 32
LR = 1e-4
SPLIT = 0.9
print(f"\n0. Hyper params...")
print(f"\t------------------------")
print(f"\tBatch size: {BATCH_SIZE}")
print(f"\tLearning Rate: {LR}")
print(f"\tSplit ratio: {SPLIT}")
print(f"\t------------------------")
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
max_seq_len = tokenizer.max_model_input_sizes['bert-base-uncased']
max_seq_len = 128
transform = transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor()
])
print("\n1. Load the dataset...")
dataset = PoeticEmbeddedDataset(FILENAME, IMAGE_DIR,
tokenizer=tokenizer,
max_seq_len=max_seq_len,
transform=transform)
print(f"Dataset ids size: {dataset.ids.shape}")
print(f"Dataset images size: {dataset.images.shape}")
print(f"Dataset token ids size: {dataset.token_ids.shape}")
train_size = int(SPLIT * len(dataset))
dev_size = len(dataset) - train_size
train_dataset, test_dataset = random_split(dataset, [train_size, dev_size])
train_loader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True)
eval_loader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True)
print("\n2. Build the model...")
model = PoeticEmbedder(embedding_dim=512, alpha=0.2)
model.from_pretrained(sentiment_state=RESNET_SENTIMENT_STATE,
scene_state=RESNET_SCENE_STATE)
model.fine_tune()
print(f'The model has {count_parameters(model):,} trainable parameters')
print("\n3. Build the trainer...")
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adadelta(model.parameters(), lr=LR)
trainer = PoeticEmbedderTrainer(model, optimizer, criterion)
print("\n4. Train...")
trainer.fit(train_loader, eval_loader, epochs=100)
|
import streamlit as st
from PIL import Image
import pickle
import numpy as np
import pandas as pd
import time
st.set_option('deprecation.showfileUploaderEncoding', False)
# Load the pickled model
model = pickle.load(open('flight.pkl','rb'))
def predict_price(Total_Stops, Jounary_day, Jounary_Month, Deep_hour, Deep_min,
Arival_hour, Arival_min,Duration_hour, Duration_min,
Airline_AirIndia, Airline_GoAir, Airline_IndiGo,
Airline_JetAirways, Airline_JetAirwaysBusiness,
Airline_Multiplecarriers,
Airline_MultiplecarriersPremiumeconomy, Airline_SpiceJet,
Airline_Trujet, Airline_Vistara, Airline_VistaraPremiumeconomy,
Destination_Cochin, Destination_Delhi, Destination_Hyderabad,
Destination_Kolkata, Destination_NewDelhi, Source_Chennai,
Source_Delhi, Source_Kolkata, Source_Mumbai):
input = np.array([[Total_Stops, Jounary_day, Jounary_Month, Deep_hour, Deep_min,
Arival_hour, Arival_min,Duration_hour, Duration_min,
Airline_AirIndia, Airline_GoAir, Airline_IndiGo,
Airline_JetAirways, Airline_JetAirwaysBusiness,
Airline_Multiplecarriers,
Airline_MultiplecarriersPremiumeconomy, Airline_SpiceJet,
Airline_Trujet, Airline_Vistara, Airline_VistaraPremiumeconomy,
Destination_Cochin, Destination_Delhi, Destination_Hyderabad,
Destination_Kolkata, Destination_NewDelhi, Source_Chennai,
Source_Delhi, Source_Kolkata, Source_Mumbai]]).astype(np.float64)
prediction = model.predict(input)
return float(prediction)
def main():
st.title("50_Startups")
html_temp ="""
<div style="background-color:black; padding:10px">
<h2 style="color:white;text-align:center;">50_Startups</h2>
</div>
"""
st.markdown(html_temp,unsafe_allow_html=True)
Total_Stops = st.text_input("Total Stops")
Jounary_Day = st.date_input('Jounary_Day')
Jounary_day = pd.to_datetime(Jounary_Day,format="%Y-%m-%d").day
Jounary_Month = pd.to_datetime(Jounary_Day,format="%Y-%m-%d").month
Depature = st.time_input('Depature Time')
Deep_hour = int(pd.to_datetime(Depature, format ="%H:%M:%S").hour)
Deep_min = int(pd.to_datetime(Depature, format ="%H:%M:%S").minute)
Arival = st.time_input('Arival Time')
Arival_hour = int(pd.to_datetime(Arival, format ="%H:%M:%S").hour)
Arival_min = int(pd.to_datetime(Arival, format ="%H:%M:%S").minute)
Duration_hour = abs(Arival_hour-Deep_hour)
Duration_min = abs(Arival_min-Deep_min)
airline = st.selectbox('Select Airlines name:',('IndiGo', 'Air India', 'Jet Airways', 'SpiceJet','Multiple carriers', 'GoAir', 'Vistara', 'Air Asia','Vistara Premium economy', 'Jet Airways Business', 'Multiple carriers Premium economy', 'Trujet'))
if (airline=='Jet Airways'):
Airline_AirIndia = 0
Airline_GoAir = 0
Airline_IndiGo = 0
Airline_JetAirways = 1
Airline_JetAirwaysBusiness = 0
Airline_Multiplecarriers = 0
Airline_MultiplecarriersPremiumeconomy = 0
Airline_SpiceJet = 0
Airline_Trujet = 0
Airline_Vistara = 0
Airline_VistaraPremiumeconomy = 0
elif (airline=='IndiGo'):
Airline_AirIndia = 0
Airline_GoAir = 0
Airline_IndiGo = 1
Airline_JetAirways = 0
Airline_JetAirwaysBusiness = 0
Airline_Multiplecarriers = 0
Airline_MultiplecarriersPremiumeconomy = 0
Airline_SpiceJet = 0
Airline_Trujet = 0
Airline_Vistara = 0
Airline_VistaraPremiumeconomy = 0
elif (airline=='Air India'):
Airline_AirIndia = 1
Airline_GoAir = 0
Airline_IndiGo = 0
Airline_JetAirways = 0
Airline_JetAirwaysBusiness = 0
Airline_Multiplecarriers = 0
Airline_MultiplecarriersPremiumeconomy = 0
Airline_SpiceJet = 0
Airline_Trujet = 0
Airline_Vistara = 0
Airline_VistaraPremiumeconomy = 0
elif (airline=='Multiple carriers'):
Airline_AirIndia = 0
Airline_GoAir = 0
Airline_IndiGo = 0
Airline_JetAirways = 0
Airline_JetAirwaysBusiness = 0
Airline_Multiplecarriers = 1
Airline_MultiplecarriersPremiumeconomy = 0
Airline_SpiceJet = 0
Airline_Trujet = 0
Airline_Vistara = 0
Airline_VistaraPremiumeconomy = 0
elif (airline=='SpiceJet'):
Airline_AirIndia = 0
Airline_GoAir = 0
Airline_IndiGo = 0
Airline_JetAirways = 0
Airline_JetAirwaysBusiness = 0
Airline_Multiplecarriers = 0
Airline_MultiplecarriersPremiumeconomy = 0
Airline_SpiceJet = 1
Airline_Trujet = 0
Airline_Vistara = 0
Airline_VistaraPremiumeconomy = 0
elif (airline=='Vistara'):
Airline_AirIndia = 0
Airline_GoAir = 0
Airline_IndiGo = 0
Airline_JetAirways = 0
Airline_JetAirwaysBusiness = 0
Airline_Multiplecarriers = 0
Airline_MultiplecarriersPremiumeconomy = 0
Airline_SpiceJet = 0
Airline_Trujet = 0
Airline_Vistara = 1
Airline_VistaraPremiumeconomy = 0
elif (airline=='GoAir'):
Airline_AirIndia = 0
Airline_GoAir = 1
Airline_IndiGo = 0
Airline_JetAirways = 0
Airline_JetAirwaysBusiness = 0
Airline_Multiplecarriers = 0
Airline_MultiplecarriersPremiumeconomy = 0
Airline_SpiceJet = 0
Airline_Trujet = 0
Airline_Vistara = 0
Airline_VistaraPremiumeconomy = 0
elif (airline=='Multiple carriers Premium economy'):
Airline_AirIndia = 0
Airline_GoAir = 0
Airline_IndiGo = 0
Airline_JetAirways = 0
Airline_JetAirwaysBusiness = 0
Airline_Multiplecarriers = 0
Airline_MultiplecarriersPremiumeconomy = 1
Airline_SpiceJet = 0
Airline_Trujet = 0
Airline_Vistara = 0
Airline_VistaraPremiumeconomy = 0
elif (airline=='Jet Airways Business'):
Airline_AirIndia = 0
Airline_GoAir = 0
Airline_IndiGo = 0
Airline_JetAirways = 0
Airline_JetAirwaysBusiness = 1
Airline_Multiplecarriers = 0
Airline_MultiplecarriersPremiumeconomy = 0
Airline_SpiceJet = 0
Airline_Trujet = 0
Airline_Vistara = 0
Airline_VistaraPremiumeconomy = 0
elif (airline=='Vistara Premium economy'):
Airline_AirIndia = 0
Airline_GoAir = 0
Airline_IndiGo = 0
Airline_JetAirways = 0
Airline_JetAirwaysBusiness = 0
Airline_Multiplecarriers = 0
Airline_MultiplecarriersPremiumeconomy = 0
Airline_SpiceJet = 0
Airline_Trujet = 0
Airline_Vistara = 0
Airline_VistaraPremiumeconomy = 1
elif (airline=='Trujet'):
Airline_AirIndia = 0
Airline_GoAir = 0
Airline_IndiGo = 0
Airline_JetAirways = 0
Airline_JetAirwaysBusiness = 0
Airline_Multiplecarriers = 0
Airline_MultiplecarriersPremiumeconomy = 0
Airline_SpiceJet = 0
Airline_Trujet = 1
Airline_Vistara = 0
Airline_VistaraPremiumeconomy = 0
else:
Airline_AirIndia = 0
Airline_GoAir = 0
Airline_IndiGo = 0
Airline_JetAirways = 0
Airline_JetAirwaysBusiness = 0
Airline_Multiplecarriers = 0
Airline_MultiplecarriersPremiumeconomy = 0
Airline_SpiceJet = 0
Airline_Trujet = 0
Airline_Vistara = 0
Airline_VistaraPremiumeconomy = 0
Destination = st.selectbox('Select Destination name:',('New Delhi', 'Banglore', 'Cochin', 'Kolkata', 'Delhi', 'Hyderabad'))
if (Destination == 'Cochin'):
Destination_Cochin = 1
Destination_Delhi = 0
Destination_NewDelhi = 0
Destination_Hyderabad = 0
Destination_Kolkata = 0
elif (Destination == 'Delhi'):
Destination_Cochin = 0
Destination_Delhi = 1
Destination_NewDelhi = 0
Destination_Hyderabad = 0
Destination_Kolkata = 0
elif (Destination == 'New_Delhi'):
Destination_Cochin = 0
Destination_Delhi = 0
Destination_NewDelhi = 1
Destination_Hyderabad = 0
Destination_Kolkata = 0
elif (Destination == 'Hyderabad'):
Destination_Cochin = 0
Destination_Delhi = 0
Destination_NewDelhi = 0
Destination_Hyderabad = 1
Destination_Kolkata = 0
elif (Destination == 'Kolkata'):
Destination_Cochin = 0
Destination_Delhi = 0
Destination_NewDelhi = 0
Destination_Hyderabad = 0
Destination_Kolkata = 1
else:
Destination_Cochin = 0
Destination_Delhi = 0
Destination_NewDelhi = 0
Destination_Hyderabad = 0
Destination_Kolkata = 0
Source = st.selectbox('Select Source name:',('Banglore', 'Kolkata', 'Delhi', 'Chennai', 'Mumbai'))
if Source == 'Delhi':
Source_Delhi = 1
Source_Kolkata = 0
Source_Mumbai = 0
Source_Chennai = 0
elif Source == 'Kolkata':
Source_Delhi = 0
Source_Kolkata = 1
Source_Mumbai = 0
Source_Chennai = 0
elif Source == 'Mumbai':
Source_Delhi = 0
Source_Kolkata = 0
Source_Mumbai = 1
Source_Chennai = 0
elif Source == 'Chennai':
Source_Delhi = 0
SourceKolkata = 0
Source_Mumbai = 0
Source_Chennai = 1
else:
Source_Delhi = 0
Source_Kolkata = 0
Source_Mumbai = 0
Source_Chennai = 0
if st.button("Predict"):
output = predict_price(Total_Stops, Jounary_day, Jounary_Month, Deep_hour, Deep_min,
Arival_hour, Arival_min, Duration_hour, Duration_min,
Airline_AirIndia,Airline_GoAir, Airline_IndiGo,
Airline_JetAirways, Airline_JetAirwaysBusiness,
Airline_Multiplecarriers, Airline_MultiplecarriersPremiumeconomy,
Airline_SpiceJet, Airline_Trujet, Airline_Vistara,
Airline_VistaraPremiumeconomy, Destination_Cochin,
Destination_Delhi, Destination_Hyderabad, Destination_Kolkata,
Destination_NewDelhi, Source_Chennai, Source_Delhi,
Source_Kolkata, Source_Mumbai)
st.success(round(output))
if st.button("About"):
st.header("By Hritwick Goyal")
st.subheader("Intern")
if __name__=='__main__':
main()
|
from PyObjCTools.TestSupport import TestCase
import WebKit
class TestDOMHTMLElement(TestCase):
def testMehods(self):
self.assertResultIsBOOL(WebKit.DOMHTMLElement.isContentEditable)
|
import os
import json
from logging import *
# Rules
def createRulesFile(folder, *args):
"""
Creates the a rules file for the given folder.
Args:
- folder: string, a path for the folder which we are creating the rule file.
- *args: strings, all the rules that will constitute the rules file.
*args has the following structure:
1. - filename type
2. - folder type
3. - keywords
"""
log("createRulesFile")
try:
if not os.path.isfile(folder + RULES_FILE):
dic = {"filename": args[0], "type": args[1], "creationDate": datetime.datetime.today(), "keywords": args[2]}
with open(folder + RULES_FILE, 'w') as json_file: json.dump(dic, json_file)
else:
errorPrint("Folder already initiated...")
except:
errorPrint("Unable to create rule file for: {}".format(folder))
def readRulesFile(folder):
"""
Reads and returns the rules for a folder.
Args:
- folder, string, the path of the folder where the rules file will be read from.
Returns:
- a rule json? object?
TODO:
- Make sure that the file exists befor opening
"""
log("readRulesFile")
try:
if os.path.isfile(folder + RULES_FILE):
return json.load(open(folder + RULES_FILE), "r")
else:
errorPrint("Rule files does not exist in {}. Please init the folder.".format(folder))
except:
errorPrint("Unable to read rule file from {}".format(folder))
|
from flask import Flask, jsonify
from flask_socketio import SocketIO
from flask_cors import CORS
from RPi import GPIO
from helpers.Database import Database
from SerialPort import SerialPort
import threading
serialPort = SerialPort()
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
app = Flask(__name__)
CORS(app)
socketio = SocketIO(app)
hallsensor = 4
f = 0
r = 0
eindpoint = '/api/vi/'
conn = Database(app=app, host='192.168.4.1', port=3306, user='mct', password='mct', db='projectdb')
@app.route('/')
def hallo():
return "Server is running"
def start():
serialPort.send("Start")
threading.Timer(1, start).start()
data = conn.set_data("INSERT INTO historiek(GebruikersID, ActionID, waarde) VALUES (%s, %s, %s)", [102, 1, 1])
print("threading works")
return data
def go():
serialPort.send("Forward")
threading.Timer(1, go).start()
print("Sending forward signal")
def stop():
serialPort.send("Backward")
threading.Timer(1, stop).start()
print("Sending reverse signal")
@socketio.on("connect")
def connecting():
socketio.emit("connected")
print("Connection with client established")
threading.Timer(1, start).start()
@socketio.on("forward")
def forward(fSignal=f):
print("Going forward")
if fSignal % 2 == 0:
fSignal += 1
threading.Timer(1, go).start()
data = conn.set_data("INSERT INTO historiek(GebruikersID, ActionID, waarde) VALUES (%s, %s, %s)", [102, 5, 1])
return data
else:
fSignal += 1
threading.Timer(1, go).cancel()
data = conn.set_data("INSERT INTO historiek(GebruikersID, ActionID, waarde) VALUES (%s, %s, %s)", [102, 5, 0])
return data
@socketio.on("reverse")
def reverse(rSignal=r):
print("Going backwards")
if rSignal % 2 == 0:
rSignal += 1
threading.Timer(1, stop).start()
data = conn.set_data("INSERT INTO historiek(GebruikersID, ActionID, waarde) VALUES (%s, %s, %s)", [102, 5, 1])
return data
else:
rSignal += 1
threading.Timer(1, stop).cancel()
data = conn.set_data("INSERT INTO historiek(GebruikersID, ActionID, waarde) VALUES (%s, %s, %s)", [102, 5, 0])
return data
if __name__ == '__main__':
socketio.run(app=app, host="0.0.0.0", port="5000")
|
from bs4 import BeautifulSoup as bs
from splinter import Browser
import requests
import pandas as pd
import time
import re
executable_path = {'executable_path': 'C:\\Users\\Murtadha Almayahi\\Documents\\Python Scripts\\chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=False)
# NASA Mars News
def mars_news_title():
url = 'https://mars.nasa.gov/news'
browser.visit(url)
time.sleep(3)
html = browser.html
soup = bs(html, "html.parser")
latest_news_container = soup.find('div', class_= "image_and_description_container")
latest_news_text = latest_news_container.find('div', class_ ="list_text")
news_title = latest_news_text.find('div', class_ = 'content_title').text
return news_title
def mars_news_p():
url = 'https://mars.nasa.gov/news'
browser.visit(url)
time.sleep(3)
html = browser.html
soup = bs(html, "html.parser")
latest_news_container = soup.find('div', class_= "image_and_description_container")
latest_news_text = latest_news_container.find('div', class_ ="list_text")
news_p = latest_news_text.find('div',class_= 'article_teaser_body').text
return news_p
# JPL Mars Space Images - Featured Image
def mars_featured_image():
url = 'https://www.jpl.nasa.gov'
images = '/spaceimages/?search=&category=Mars'
browser.visit(url+images)
time.sleep(3)
button = browser.find_by_id('full_image')
button.click()
html = browser.html
time.sleep(3)
soup = bs(html, 'html.parser')
featured_image = soup .find('a', class_= "button fancybox")['data-fancybox-href']
featured_image_url = url + featured_image
return featured_image_url
# Mars Weather
def mars_weather():
url = 'https://twitter.com/marswxreport?lang=en'
browser.visit(url)
time.sleep(3)
html = browser.html
soup = bs(html, 'html.parser')
mars_weather = soup.find(string=re.compile("Sol"))
mars_weather
return mars_weather
# Mars Facts
def mars_profile():
url = 'https://space-facts.com/mars/'
browser.visit(url)
time.sleep(3)
html = browser.html
soup = bs(html, 'html.parser')
table = soup.find('table')
mars_planet_profile = pd.read_html(str(table))[0]
mars_planet_profile = mars_planet_profile.rename(columns={0: 'Description', 1: 'Value'}).set_index('Description')
mars_profile_html = mars_planet_profile.to_html(index = True, header =True)
return mars_profile_html
# Mars Hemisphere
def mars_hemispheres():
url = 'https://astrogeology.usgs.gov'
enhanced = '/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(url+enhanced)
time.sleep(3)
html = browser.html
soup = bs(html, 'html.parser')
hemispheres = soup.find_all('div', class_ = 'item')
hemisphere_image_urls = []
for hemisphere in hemispheres:
hemisphere_title = hemisphere.find('h3').text
title = hemisphere_title.replace('Enhanced','')
hemisphere_url = hemisphere.find('a')['href']
browser.visit(url + hemisphere_url)
html = browser.html
soup = bs(html, 'html.parser')
downloads = soup.find('div', class_='downloads')
image_url = downloads.find('a')['href']
hemisphere_image_urls.append({'title': title,'image_url': image_url})
return hemisphere_image_urls
# Scrape Function
def scrape():
mars_data={}
mars_data["news_title"] = mars_news_title()
mars_data["news_p"] = mars_news_p()
mars_data["featured_image_url"] = mars_featured_image()
mars_data["weather"] = mars_weather()
mars_data["profile"] = mars_profile()
mars_data["hemisphere_urls"] = mars_hemispheres()
return mars_data |
a=int(input("Ingrese el primer numero: "))
b=int(input("Ingrese el segundo numero: "))
print('La receta auténtica del alioli:\n1- Sumar \n2- Restar \n3- Multiplicar \n4- Dividir \n5- Salir')
opcion=int(input("Ingrese una opcion: "))
if opcion == 1:
resultado=a+b
print("el resultado de la suma es",resultado)
elif opcion == 2:
resultado=a-b
print("el resultado de la resta es",resultado)
elif opcion == 3:
resultado=a*b
print("el resultado de la multiplicacion es",resultado)
elif opcion == 4:
resultado=a//b
print("el resultado de la division es",resultado)
elif opcion == 5:
print("Adios!")
exit()
else:
print ("opcion incorrecta")
|
from PIL import ImageGrab
import numpy as np
import cv2
from .constant import MALE, FEMALE
# bgr
TEMPLATE_MALE = cv2.imread("./image/male.png", 0)
# TEMPLATE_MALE = TEMPLATE_MALE[:, :, 0]
TEMPLATE_FEMALE = cv2.imread("./image/female.png", 0)
# TEMPLATE_FEMALE = TEMPLATE_FEMALE[:, :, 2]
DEBUG_NUM = 0
def get_gender(x, y):
global DEBUG_NUM
img = np.array(ImageGrab.grab())
# img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
img = img[y + 43: y + 66, x + 38: x + 248, :]
# cv2.imwrite('./test_{!s}.png'.format(DEBUG_NUM), img)
# DEBUG_NUM += 1
corr_male = cv2.matchTemplate(img[:, :, 0], TEMPLATE_MALE, cv2.TM_CCOEFF_NORMED)
corr_male = np.max(corr_male)
corr_female = cv2.matchTemplate(img[:, :, 2], TEMPLATE_FEMALE, cv2.TM_CCOEFF_NORMED)
corr_female = np.max(corr_female)
print('%.3f, %.3f' % (corr_male, corr_female))
if max(corr_female, corr_male) < 0.9:
return 0
return MALE if corr_male > corr_female else FEMALE |
#!/usr/bin/env python
# __author__ = '北方姆Q'
# -*- coding: utf-8 -*-
import jieba, os, re
from gensim.corpora import WikiCorpus
def get_wiki_text():
outp = "../../data/wiki/wiki.zh.txt"
inp = "../../data/wiki/zhwiki-20190720-pages-articles-multistream.xml.bz2"
space = " "
output = open(outp, 'w', encoding='utf-8')
# gensim里的维基百科处理类WikiCorpus
wiki = WikiCorpus(inp, lemmatize=False, dictionary=[])
# 通过get_texts将维基里的每篇文章转换位1行text文本,并且去掉了标点符号等内容
for text in wiki.get_texts():
output.write(space.join(text) + "\n")
output.close()
def remove_words():
output = open('data/wiki.zh.txt', 'w', encoding='utf-8')
inp = open('data/wiki.zh.zh.txt', 'r', encoding='utf-8')
for line in inp.readlines():
ss = re.findall('[\n\s*\r\u4e00-\u9fa5]', line)
output.write("".join(ss))
def separate_words():
output = open('data/wiki.corpus.txt', 'w', encoding='utf-8')
inp = open('data/wiki.zh.txt', 'r', encoding='utf-8')
for line in inp.readlines():
seg_list = jieba.cut(line.strip())
output.write(' '.join(seg_list) + '\n')
# get_wiki_text()
# remove_words()
# separate_words()
|
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from .models import Person
from .forms import PersonForm
from django.core import serializers
import json
# Create your views here.
@login_required
def persons_list(request):
persons = Person.objects.all() # SELECT * FROM PERSON
return render(request, 'person.html', {'persons': persons})
@login_required
def persons_new(request):
form = PersonForm(request.POST or None, request.FILES or None)
if form.is_valid():
form.save()
return redirect('person_list')
return render(request, 'person_form.html', {'form': form})
@login_required
def persons_update(request, id):
person = get_object_or_404(Person, pk=id)
form = PersonForm(request.POST or None, request.FILES or None, instance = person)
if form.is_valid():
form.save()
return redirect('person_list')
return render(request, 'person_form.html', {'form': form})
@login_required
def persons_delete(request, id):
person = get_object_or_404(Person, pk=id)
#form = PersonForm(request.POST or None, request.FILES or None, instance = person)
if request.method == 'POST':
person.delete()
return redirect('person_list')
return render(request, 'person_delete_confirm.html', {'person': person})
@login_required
def persons_search(request):
first_name = request.GET.get('first_name')
# SELECT * FROM PERSONS WHERE FIRST_NAME LIKE '%nome-qualquer'
persons = Person.objects.filter(first_name__startswith=first_name)
persons = [ person_serializer(person) for person in persons ]
return HttpResponse(json.dumps(persons), content_type='application/json')
def person_serializer(person):
return {'first_name': person.first_name, 'last_name': person.last_name}
|
from django.shortcuts import render
from django.http import JsonResponse
from rest_framework import viewsets
from .serializers import UserSerializer
from .models import User
from rest_framework.response import Response
from django.views.decorators.http import require_http_methods
import json
# Create your views here.
@require_http_methods(['POST'])
def getUserDetail(request, username, password):
users = User.objects.filter(username=username,
password=password).values('firstName', 'lastName', 'email')
users = list(users)
return JsonResponse(status = 200, data = users)
# def registerUser(request):
# user = User() |
import datetime
import typing
import kubernetes.client
class V1ScopeSelector:
match_expressions: typing.Optional[
list[kubernetes.client.V1ScopedResourceSelectorRequirement]
]
def __init__(
self,
*,
match_expressions: typing.Optional[
list[kubernetes.client.V1ScopedResourceSelectorRequirement]
] = ...
) -> None: ...
def to_dict(self) -> V1ScopeSelectorDict: ...
class V1ScopeSelectorDict(typing.TypedDict, total=False):
matchExpressions: typing.Optional[
list[kubernetes.client.V1ScopedResourceSelectorRequirementDict]
]
|
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def reverseList(self, head):
prev = None
while head:
curr = head
head = head.next
curr.next = prev
prev = curr
return prev
def reorderList(self, head):
"""
:type head: ListNode
:rtype: void
"""
if not head: return
fast = head
middle = head
while fast.next and fast.next.next:
middle = middle.next
fast = fast.next.next
sec = middle.next
middle.next = None
sec = self.reverseList(sec)
tmp = head
while sec:
#next_tmp = tmp.next
next_middle = sec.next
sec.next = tmp.next#next_tmp
tmp.next = sec
tmp = sec.next #next_tmp
sec = next_middle
def list2linked(self, List):
res = ListNode(List[0])
result = res
for item in List[1:]:
res.next = ListNode(item)
res = res.next
return result
if __name__ == '__main__':
nums = [1, 2, 3, 4, 5]
sol = Solution()
head = sol.list2linked(nums)
sol.reorderList(head)
while head:
print head.val
head = head.next
|
import os
import dbus
import XlibGetWindowId
import KActivities
#######################################################################
# Function returns the window id for the current window #
#######################################################################
def _getWindowId():
try:
wid = 0
var = "v:windowid"
varExists = vim.eval('exists("' + var + '")')
if not varExists == "0":
wid = vim.eval(var)
if not wid == "0":
return wid
except:
pass
# Getting the window id for the current process - GVIM
for wid in XlibGetWindowId.getWindowIdsForCurrentProcess():
return wid
# Falling back to testing the environment vatiable
# if we are in a terminal - normal VIM
try:
wid = int(os.getenv("WINDOWID"))
return wid
# Otherwise, we must go without the window id
except ValueError:
return 0
#######################################################################
# Returns the url for the current file #
#######################################################################
def _urlForCurrentDocument(suf = ":p"):
try:
document = vim.eval('expand("<afile>' + suf + '")')
if document is None:
document = vim.eval('expand("%' + suf + '")')
if os.path.exists("/" + document):
return "file://" + document
return None
except:
return None
#######################################################################
# Activities related methods #
#######################################################################
kde_activities_resourceinstance = None
def kde_activities_ResourceInstance():
global kde_activities_resourceinstance
if kde_activities_resourceinstance is None:
kde_activities_resourceinstance = KActivities.ResourceInstance(_getWindowId(), "gvim")
return kde_activities_resourceinstance
def kde_activities_FocussedIn():
document = _urlForCurrentDocument()
if document is None:
return;
kde_activities_ResourceInstance().setUri(document)
def kde_activities_FocussedOut():
pass
def kde_activities_Link():
document = _urlForCurrentDocument()
if document is not None:
KActivities.LinkResourceToActivity(document)
def kde_activities_Unlink():
document = _urlForCurrentDocument()
if document is not None:
KActivities.UnlinkResourceFromActivity(document)
def kde_activities_LinkDirectory():
document = _urlForCurrentDocument(":p:h")
if document is not None:
KActivities.LinkResourceToActivity(document)
def kde_activities_UnlinkDirectory():
document = _urlForCurrentDocument(":p:h")
if document is not None:
KActivities.UnlinkResourceFromActivity(document)
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(
r'^create-support-survey/$',
views.create_support_survey,
name='create_support_survey'
),
url(r'^support-survey/(?P<pk>\w+)$',
views.complete_support_survey,
name='complete_support_survey'
)
]
|
from flask import Flask, render_template, request
from flask_wtf import CSRFProtect
from config import Config
from helpers import df, get_musicbrainz_info, get_my_year_album
import wikipedia
import requests
app = Flask(__name__)
app.config.from_object(Config)
csrf = CSRFProtect(app)
@app.route('/')
def index():
list_of_years = df['Release Date'].unique().tolist()
list_of_years.sort(reverse=True)
try:
counter = requests.get('https://api.countapi.xyz/get/musicalista/key').json()['value']
except:
counter = ''
return render_template('index.html', years=list_of_years, counter=counter)
@app.route('/results', methods=['POST'])
def results():
if request.method == "POST":
#Code for the temporary counter
requests.get('https://api.countapi.xyz/hit/musicalista/key')
year = request.form['year']
year_album = get_my_year_album(int(year))
album_info = get_musicbrainz_info(year_album)
try:
wiki = wikipedia.page(f"{album_info['Album']} {album_info['Artist Name']} {album_info['Release Date']}")
except:
wiki = ''
return render_template('results.html', album_info=album_info, wiki=wiki)
return "404"
# return render_template('results.html') |
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import botocore.exceptions
from tempest.lib import decorators
from ec2api_tempest_plugin import base
from ec2api_tempest_plugin import config
CONF = config.CONF
class VpnGatewayTest(base.EC2TestCase):
VPC_CIDR = '10.41.0.0/20'
vpc_id = None
@classmethod
@base.safe_setup
def setUpClass(cls):
super(VpnGatewayTest, cls).setUpClass()
if not base.TesterStateHolder().get_vpc_enabled():
raise cls.skipException('VPC is disabled')
base.check_vpnaas_enabled()
data = cls.client.create_vpc(CidrBlock=cls.VPC_CIDR)
cls.vpc_id = data['Vpc']['VpcId']
cls.get_vpc_waiter().wait_available(cls.vpc_id)
cls.addResourceCleanUpStatic(cls.client.delete_vpc, VpcId=cls.vpc_id)
@decorators.idempotent_id('d38c0185-782c-4da3-b02c-9cd7bf91b001')
def test_create_delete_vpn_gateway(self):
data = self.client.create_vpn_gateway(
Type='ipsec.1', AvailabilityZone=CONF.aws.aws_zone)
vgw_id = data['VpnGateway']['VpnGatewayId']
vgw_clean = self.addResourceCleanUp(
self.client.delete_vpn_gateway, VpnGatewayId=vgw_id)
self.get_vpn_gateway_waiter().wait_available(vgw_id)
self.client.delete_vpn_gateway(VpnGatewayId=vgw_id)
self.cancelResourceCleanUp(vgw_clean)
self.get_vpn_gateway_waiter().wait_delete(vgw_id)
try:
data = self.client.describe_vpn_gateways(
VpnGatewayIds=[vgw_id])
self.assertEqual(1, len(data['VpnGateways']))
self.assertEqual('deleted', data['VpnGateways'][0]['State'])
except botocore.exceptions.ClientError as ex:
self.assertEqual('InvalidVpnGatewayID.NotFound',
ex.response['Error']['Code'])
@decorators.idempotent_id('1d76b335-57ba-449a-9751-af75a8a7d11c')
def test_attach_detach_vpn_gateway(self):
data = self.client.create_vpn_gateway(
Type='ipsec.1', AvailabilityZone=CONF.aws.aws_zone)
vgw_id = data['VpnGateway']['VpnGatewayId']
self.addResourceCleanUp(self.client.delete_vpn_gateway,
VpnGatewayId=vgw_id)
self.get_vpn_gateway_waiter().wait_available(vgw_id)
data = self.client.attach_vpn_gateway(VpnGatewayId=vgw_id,
VpcId=self.vpc_id)
attach_clean = self.addResourceCleanUp(
self.client.detach_vpn_gateway,
VpnGatewayId=vgw_id, VpcId=self.vpc_id)
self.assertIn('VpcAttachment', data)
self.assertEqual(self.vpc_id, data['VpcAttachment']['VpcId'])
attach_waiter = self.get_vpn_gateway_attachment_waiter()
attach_waiter.wait_available(vgw_id, 'attached')
data = self.client.detach_vpn_gateway(VpnGatewayId=vgw_id,
VpcId=self.vpc_id)
self.cancelResourceCleanUp(attach_clean)
attach_waiter.wait_delete(vgw_id)
data = self.client.describe_vpn_gateways(VpnGatewayIds=[vgw_id])
self.assertEqual(
'detached',
(data['VpnGateways'][0]['VpcAttachments'] or
[{'State': 'detached'}])[0]['State'])
|
# Created by MechAviv
# [Athena Pierce] | [10200]
# Maple Road : Split Road of Destiny
sm.setSpeakerID(10200)
sm.sendNext("Bowmen are blessed with dexterity and power, taking charge of long-distance attacks, providing support for those at the front line of the battle. Very adept at using landscape as part of the arsenal.")
sm.setSpeakerID(10200)
if sm.sendAskYesNo("Would you like to experience what it's like to be a Bowman?"):
sm.setTemporarySkillSet(0)
sm.setDirectionMode(True, 0)
sm.setStandAloneMode(True)
sm.createQuestWithQRValue(32219, "4000026")
# Unhandled Message [47] Packet: 2F 01 00 00 00 B0 83 08 00 00 00 00 00 2E 02 00 00 00 00 00 80 05 BB 46 E6 17 02 00 00
sm.warp(1020300, 0)
else:
sm.setSpeakerID(10200)
sm.sendNext("If you wish to experience what it's like to be a Bowman, come see me again.")
|
from django.db import models
from django.conf import settings
from django.core.validators import FileExtensionValidator
from spectra.models import *
import os
from uuid import uuid4
from django.db import models
from django.core.files.storage import FileSystemStorage
class OverwriteStorage(FileSystemStorage):
def _save(self, name, content):
if self.exists(name):
self.delete(name)
return super(OverwriteStorage, self)._save(name, content)
def get_available_name(self, name, max_length):
return name
class UserFile(models.Model):
'''
Spectra and metadata files uploaded by users
-- Owner is optional allows for anonymous user uploads.
'''
owner = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete = models.CASCADE,
blank = True,
null = True)
file = models.FileField(
# ~ upload_to = path_and_rename,
upload_to = 'uploads/',
validators = [
FileExtensionValidator(
allowed_extensions = ['mzml', 'mzxml', 'fid', 'csv'])
],
storage = OverwriteStorage()
)
extension = models.CharField(max_length = 255, blank = True)
upload_date = models.DateTimeField(auto_now_add = True, blank = False)
library = models.ForeignKey('chat.Library', on_delete = models.CASCADE,
blank = False, null = False)
# ~ def replace_video(self):
# ~ self.file.save(os.path.basename(self.file.path), File(open(video_path ,"wb")), save=True)
# ~ os.remove(video_path)
# ~ os.remove(old_path)
#spectra = models.ManyToManyField('spectra.Spectra', blank = True)
class Meta:
unique_together= (('file', 'library'),)
# ~ def extension(self):
# ~ name, extension = os.path.splitext(self.file.name)
# ~ return extension
|
import test_runner
import time
import math
import os
from odrive.enums import *
from test_runner import *
teensy_code_template = """
float position = 0; // between 0 and 1
float velocity = 1; // [position per second]
void setup() {
pinMode({pwm_gpio}, OUTPUT);
}
// the loop routine runs over and over again forever:
void loop() {
int high_microseconds = 1000 + (int)(position * 1000.0f);
digitalWrite({pwm_gpio}, HIGH);
delayMicroseconds(high_microseconds);
digitalWrite({pwm_gpio}, LOW);
// Wait for a total of 20ms.
// delayMicroseconds() only works well for values <= 16383
delayMicroseconds(10000 - high_microseconds);
delayMicroseconds(10000);
position += velocity * 0.02;
while (position > 1.0)
position -= 1.0;
}
"""
class TestPwmInput():
"""
Verifies the PWM input.
The Teensy generates a PWM signal that goes from 0% (1ms high) to 100% (2ms high)
in 1 second and then resumes at 0%.
Note: this test is currently only written for ODrive 3.6 (or similar GPIO layout).
"""
def get_test_cases(self, testrig: TestRig):
for odrive in testrig.get_components(ODriveComponent):
if odrive.yaml['board-version'].startswith('v3.'):
# Run a separate test for each PWM-capable GPIO. Use different min/max settings for each test.
test_cases = [(1, -50, 200, odrive.gpio1),
(2, 20, 400, odrive.gpio2),
(3, -1000, 0, odrive.gpio3),
(4, -20000, 20000, odrive.gpio4)]
elif odrive.yaml['board-version'].startswith('v4.'):
# Run a separate test for each PWM-capable GPIO. Use different min/max settings for each test.
test_cases = [(14, -50, 200, odrive.gpio14),
(19, 20, 400, odrive.gpio19),
(20, -20000, 20000, odrive.gpio20),
(21, -1000, 0, odrive.gpio21)]
else:
raise Exception(f"unknown board version {odrive.yaml['board-version']}")
for test_case in test_cases:
yield AnyTestCase(*[(odrive,) + tuple(test_case[:-1]) + (teensy_gpio,tf,) for teensy_gpio, tf in testrig.get_connected_components(test_case[-1], TeensyGpio)])
def run_test(self, odrive: ODriveComponent, odrive_gpio_num: int, min_val: float, max_val: float, teensy_gpio: Component, logger: Logger):
teensy = teensy_gpio.parent
code = teensy_code_template.replace("{pwm_gpio}", str(teensy_gpio.num))
teensy.compile_and_program(code)
logger.debug("Set up PWM input...")
odrive.disable_mappings()
setattr(odrive.handle.config, f'gpio{odrive_gpio_num}_mode', GPIO_MODE_PWM)
pwm_mapping = getattr(odrive.handle.config, f'gpio{odrive_gpio_num}_pwm_mapping')
pwm_mapping.endpoint = odrive.handle.axis0.controller._input_pos_property
pwm_mapping.min = min_val
pwm_mapping.max = max_val
odrive.save_config_and_reboot()
data = record_log(lambda: [odrive.handle.axis0.controller.input_pos], duration=5.0)
full_scale = max_val - min_val
slope, offset, fitted_curve = fit_sawtooth(data, min_val, max_val)
test_assert_eq(slope, full_scale / 1.0, accuracy=0.001)
test_curve_fit(data, fitted_curve, max_mean_err = full_scale * 0.05, inlier_range = full_scale * 0.05, max_outliers = len(data[:,0]) * 0.01)
tests = [TestPwmInput()]
if __name__ == '__main__':
test_runner.run(tests)
|
# Copyright 2019 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
#
# author: Edward Chee <echee@scalyr.com>
from __future__ import unicode_literals
from __future__ import absolute_import
__author__ = "echee@scalyr.com"
import os
import shutil
import tempfile
import datetime
import threading
from io import open
import scalyr_agent.util as scalyr_util
from scalyr_agent.builtin_monitors.docker_monitor import DockerMonitor
from scalyr_agent.builtin_monitors.docker_monitor import ContainerChecker
from scalyr_agent.builtin_monitors.docker_monitor import _get_containers
from scalyr_agent.test_base import ScalyrTestCase
from scalyr_agent.test_util import ScalyrTestUtils
from scalyr_agent.util import FakeClock, FakeClockCounter
import mock
from mock import patch
from mock import Mock
__all__ = ["DockerMonitorTest"]
BASE_DIR = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))
FIXTURES_DIR = os.path.join(BASE_DIR, "../fixtures")
CONTAINER_LOG_FIXTURE_PATH_1 = os.path.join(
FIXTURES_DIR, "docker_container_logs/docker-printer-stdout.log"
)
CONTAINER_LOG_FIXTURE_PATH_2 = os.path.join(
FIXTURES_DIR, "docker_container_logs/docker-printer-stdout-invalid-ts.log"
)
class ContainerCheckerTestCase(ScalyrTestCase):
def setUp(self):
super(ContainerCheckerTestCase, self).setUp()
self._temp_data_dir = tempfile.mkdtemp()
self._temp_log_dir = tempfile.mkdtemp()
shutil.copy(CONTAINER_LOG_FIXTURE_PATH_1, self._temp_log_dir)
shutil.copy(CONTAINER_LOG_FIXTURE_PATH_2, self._temp_log_dir)
@mock.patch("scalyr_agent.builtin_monitors.docker_monitor.DockerClient")
def test_get_last_request_for_log(self, mock_docker_client):
mock_docker_client.containers.return_value = []
monitor_config = {
"module": "scalyr_agent.builtin_monitors.docker_monitor",
"log_mode": "syslog",
"readback_buffer_size": 5 * 1024,
}
container_checker = ContainerChecker(
config=monitor_config,
logger=mock.Mock(),
socket_file=None,
docker_api_version=None,
host_hostname=None,
data_path=self._temp_data_dir,
log_path=self._temp_log_dir,
)
# We mock start time which is used in the calculation to make tests more stable
mock_start_time = 12345
mock_start_time_dt = datetime.datetime.utcfromtimestamp(mock_start_time)
container_checker._ContainerChecker__start_time = mock_start_time
# 1. Existing log file is not available, should use start_time timestamp
expected_result = scalyr_util.seconds_since_epoch(mock_start_time_dt)
result = container_checker._ContainerChecker__get_last_request_for_log(
path="not.exist.123"
)
self.assertEqual(result, expected_result)
# 2. Existing log file, but with no valid data / timestamp, should fall back to start_time
file_name = "test.1"
with open(os.path.join(self._temp_log_dir, file_name), "w") as fp:
fp.write("mock data\n")
expected_result = scalyr_util.seconds_since_epoch(mock_start_time_dt)
result = container_checker._ContainerChecker__get_last_request_for_log(
path=file_name
)
self.assertEqual(result, expected_result)
# 3. File exists, but it contains invalid / corrupted timestamp
file_name = "docker-printer-stdout-invalid-ts.log"
expected_result = scalyr_util.seconds_since_epoch(mock_start_time_dt)
result = container_checker._ContainerChecker__get_last_request_for_log(
path=file_name
)
self.assertEqual(result, expected_result)
# 4. Existing log file is always, should use time stamp from the last log line
file_name = "docker-printer-stdout.log"
# Last log line in that log file looks like this: "2020-10-27T17:18:12.878281177Z Line 291"
start_time_dt = datetime.datetime(2020, 10, 27, 17, 18, 12, 878281)
expected_result = scalyr_util.seconds_since_epoch(start_time_dt)
result = container_checker._ContainerChecker__get_last_request_for_log(
path=file_name
)
self.assertEqual(result, expected_result)
class DockerMonitorTest(ScalyrTestCase):
"""This test exercises various different user-agent fragments returned by DockerMonitor.
It also captures the situation where the monitor is polled before it has obtained a version, in which case it must
return a base fragment indicating docker but no version.
Implemntation: A MonitorsManager and DockerMonitor is started. But the docker api lib is completely mocked out.
The _initialize() method is also mocked with a fake method
"""
def parse_file_as_json(self, filename):
result = {}
with open(filename, "r") as f:
content = f.read()
result = scalyr_util.json_decode(content)
return result
def get_data_filename(self, name):
base = os.path.dirname(os.path.realpath(__file__))
return os.path.join(base, "data", name)
def get_mock_docker_client(self, data_file):
filename = self.get_data_filename(data_file)
data = self.parse_file_as_json(filename)
client = Mock()
client.containers.return_value = data
return client
def test_get_containers_no_include_no_exclude(self):
"""Test get containers when no glob filters are applied"""
expected_ids = [
"87d533137e70601d17a4bf9d563b11d528ba81e31e2299eb8d4ab2ef5a54f0c0",
"84afc8ee4d726544e77dcfe22c6f5be04f36b115ea99c0510d612666f83da1ce",
"e511aaf76add81d41c1536b2a93a17448d9f9d3c6a3f8a5abab1d9a582c397fc",
"cfcb7f7d1481c805f91fd6c8c120300a586978fa2541a58026bd930eeeda3e36",
"b8757266fd6a9efcb40a2881215d8d3642e6f8a0cfdcbb941382d002f31dcca4",
"343c05b55c1b0cebca6df0309a4892974c5ed3013731433461ffe7442223659a",
"58ded582ebf14063bff3b15cd3962035be9904b73318796f1119d71472801d23",
"c4eacccbf687f408c708b12fa875ab55f4d6d0aad8e2eec489ba9712e041bd14",
]
client = self.get_mock_docker_client("containers-running.json")
containers = _get_containers(client)
self.assertEqual(len(expected_ids), len(containers))
for cid in expected_ids:
self.assertTrue(cid in containers)
def test_get_containers_include_no_exclude(self):
"""Test get_containers when an include filter but no exclude filter is applied"""
expected_ids = [
"e511aaf76add81d41c1536b2a93a17448d9f9d3c6a3f8a5abab1d9a582c397fc",
"cfcb7f7d1481c805f91fd6c8c120300a586978fa2541a58026bd930eeeda3e36",
"58ded582ebf14063bff3b15cd3962035be9904b73318796f1119d71472801d23",
"c4eacccbf687f408c708b12fa875ab55f4d6d0aad8e2eec489ba9712e041bd14",
]
glob_list = {"include": ["*include1*", "*include2*"]}
client = self.get_mock_docker_client("containers-running.json")
containers = _get_containers(client, glob_list=glob_list)
self.assertEqual(len(expected_ids), len(containers))
for cid in expected_ids:
self.assertTrue(cid in containers)
def test_get_containers_no_include_exclude(self):
"""Test get_containers when no include filter is applied but an exclude filter is"""
expected_ids = [
"87d533137e70601d17a4bf9d563b11d528ba81e31e2299eb8d4ab2ef5a54f0c0",
"84afc8ee4d726544e77dcfe22c6f5be04f36b115ea99c0510d612666f83da1ce",
"b8757266fd6a9efcb40a2881215d8d3642e6f8a0cfdcbb941382d002f31dcca4",
"343c05b55c1b0cebca6df0309a4892974c5ed3013731433461ffe7442223659a",
"58ded582ebf14063bff3b15cd3962035be9904b73318796f1119d71472801d23",
"c4eacccbf687f408c708b12fa875ab55f4d6d0aad8e2eec489ba9712e041bd14",
]
glob_list = {"exclude": ["*include1-exclude*", "*include2-exclude*"]}
client = self.get_mock_docker_client("containers-running.json")
containers = _get_containers(client, glob_list=glob_list)
self.assertEqual(len(expected_ids), len(containers))
for cid in expected_ids:
self.assertTrue(cid in containers)
def test_get_containers_include_exclude(self):
"""Test get_containers when both an include and an exclude filter are applied"""
expected_ids = [
"b8757266fd6a9efcb40a2881215d8d3642e6f8a0cfdcbb941382d002f31dcca4",
"343c05b55c1b0cebca6df0309a4892974c5ed3013731433461ffe7442223659a",
"58ded582ebf14063bff3b15cd3962035be9904b73318796f1119d71472801d23",
"c4eacccbf687f408c708b12fa875ab55f4d6d0aad8e2eec489ba9712e041bd14",
]
glob_list = {"include": ["*include*"], "exclude": ["*exclude*"]}
client = self.get_mock_docker_client("containers-running.json")
containers = _get_containers(client, glob_list=glob_list)
self.assertEqual(len(expected_ids), len(containers))
for cid in expected_ids:
self.assertTrue(cid in containers)
@mock.patch("scalyr_agent.builtin_monitors.docker_monitor.docker")
def test_user_agent_fragment(self, mocked_docker):
def fake_init(self):
"""Simulate syslog mode (null container checker). Init the version variable and it's lock"""
self._DockerMonitor__container_checker = None
self._DockerMonitor__version_lock = threading.RLock()
self._DockerMonitor__version = None
with mock.patch.object(DockerMonitor, "_initialize", fake_init):
manager_poll_interval = 30
fake_clock = FakeClock()
manager, _ = ScalyrTestUtils.create_test_monitors_manager(
config_monitors=[
{
"module": "scalyr_agent.builtin_monitors.docker_monitor",
"log_mode": "syslog",
}
],
extra_toplevel_config={
"user_agent_refresh_interval": manager_poll_interval
},
null_logger=True,
fake_clock=fake_clock,
)
counter_lock = threading.Lock()
fragment_polls = FakeClockCounter(fake_clock, num_waiters=2)
counter = {"callback_invocations": 0}
detected_fragment_changes = []
# Mock the callback (that would normally be invoked on ScalyrClientSession
def augment_user_agent(fragments):
counter_lock.acquire()
try:
counter["callback_invocations"] += 1
detected_fragment_changes.append(fragments[0])
finally:
counter_lock.release()
# Decorate the get_user_agent_fragment() function as follows:
# Each invocation increments the FakeClockCounter
# Simulate the following race condition:
# 1. The first 10 polls by MonitorsManager is such that DockerMonitor has not yet started. Therefore,
# the docker version is None
# 2. After the 20th poll, docker version is set
# 3. After the 30th poll, docker mode changes to docker_api|raw
# 4. After the 40th poll, docker mode changes to docker_api|api
#
# Note: (3) and (4) do not happen in real life. We force these config changes to test permutations
# of user agent fragments for different config scenarios
#
# Total number of times the user_agent_callback is called should be twice:
# - once for when docker version is None (fragment is 'docker=true')
# - once for when docker version changes to a real number
fake_docker_version = "18.09.2"
docker_mon = manager.monitors[0]
original_get_user_agent_fragment = docker_mon.get_user_agent_fragment
original_monitor_config_get = docker_mon._config.get
def fake_get_user_agent_fragment():
result = original_get_user_agent_fragment()
fragment_polls.increment()
return result
def fake_fetch_and_set_version():
# Simulate slow-to-start DockerMonitor where version is set only after 10th poll by MonitorsManager
# Thus, polls 0-9 return in version=None which ultimately translates to 'docker=true' fragment
docker_mon._DockerMonitor__version_lock.acquire()
try:
if fragment_polls.count() < 10:
docker_mon._DockerMonitor__version = None
else:
docker_mon._DockerMonitor__version = fake_docker_version
finally:
docker_mon._DockerMonitor__version_lock.release()
def fake_monitor_config_get(key):
# Fake the return values from MonitorConfig.get in order to exercise different permutations of
# user_agent fragment.
if key == "log_mode":
if fragment_polls.count() < 20:
return "syslog"
else:
return "docker_api"
elif key == "docker_raw_logs":
if fragment_polls.count() < 30:
return True
else:
return False
else:
return original_monitor_config_get(key)
@patch.object(docker_mon, "get_user_agent_fragment")
@patch.object(docker_mon, "_fetch_and_set_version")
@patch.object(docker_mon._config, "get")
def start_test(m3, m2, m1):
m1.side_effect = fake_get_user_agent_fragment
m2.side_effect = fake_fetch_and_set_version
m3.side_effect = fake_monitor_config_get
manager.set_user_agent_augment_callback(augment_user_agent)
manager.start_manager()
fragment_polls.sleep_until_count_or_maxwait(
60, manager_poll_interval, maxwait=3.0
)
m1.assert_called()
m2.assert_called()
m3.assert_called()
manager.stop_manager(wait_on_join=False)
fake_clock.advance_time(increment_by=manager_poll_interval)
self.assertEquals(fragment_polls.count(), 60)
self.assertEquals(counter["callback_invocations"], 4)
self.assertEquals(
detected_fragment_changes,
[
"docker=true",
"docker=18.09.2|syslog",
"docker=18.09.2|docker_api|raw",
"docker=18.09.2|docker_api|api",
],
)
start_test() # pylint: disable=no-value-for-parameter
|
## https://leetcode.com/problems/climbing-stairs/
class Solution:
def climbStairs(self, n: int) -> int:
dp = [0]*n
return self.Climb_Stairs(0,n,dp)
def Climb_Stairs(self,i,n,dp):
if i > n:
return 0
if i == n:
return 1
if dp[i] > 0:
return dp[i]
dp[i] = self.Climb_Stairs(i+1,n,dp) + self.Climb_Stairs(i+2,n,dp)
return dp[i]
|
# Generated by Django 2.2.3 on 2019-07-30 10:43
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('song', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='songmenu',
name='creator',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='mysongmenus', to=settings.AUTH_USER_MODEL, verbose_name='所属用户'),
),
migrations.AddField(
model_name='songmenu',
name='tags',
field=models.ManyToManyField(db_constraint=False, default='华语', related_name='songmenus', to='song.SongMenuTag', verbose_name='标签'),
),
migrations.AddField(
model_name='songmenu',
name='user',
field=models.ManyToManyField(blank=True, db_constraint=False, null=True, related_name='songmenus', to=settings.AUTH_USER_MODEL, verbose_name='收藏用户'),
),
migrations.AddField(
model_name='song',
name='album',
field=models.ForeignKey(db_constraint=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='songs', to='song.Album', verbose_name='所属专辑'),
),
migrations.AddField(
model_name='song',
name='singer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='songs', to='song.Singer', verbose_name='所属歌手'),
),
migrations.AddField(
model_name='song',
name='songmenu',
field=models.ManyToManyField(blank=True, db_constraint=False, null=True, related_name='songs', to='song.SongMenu', verbose_name='所属歌单'),
),
migrations.AddField(
model_name='singer',
name='category',
field=models.ForeignKey(db_constraint=False, on_delete=django.db.models.deletion.DO_NOTHING, related_name='singers', to='song.SingerCategory', verbose_name='分类'),
),
migrations.AddField(
model_name='singer',
name='user',
field=models.OneToOneField(db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='singer', to=settings.AUTH_USER_MODEL, verbose_name='用户'),
),
migrations.AddField(
model_name='comment',
name='menu',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='comments', to='song.SongMenu', verbose_name='被评论歌单'),
),
migrations.AddField(
model_name='comment',
name='parent_comment',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='song.Comment'),
),
migrations.AddField(
model_name='comment',
name='song',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='comments', to='song.Song', verbose_name='被评论歌曲'),
),
migrations.AddField(
model_name='comment',
name='user',
field=models.ForeignKey(db_constraint=False, on_delete=django.db.models.deletion.DO_NOTHING, related_name='comments', to=settings.AUTH_USER_MODEL, verbose_name='评论者'),
),
migrations.AddField(
model_name='album',
name='singer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='albums', to='song.Singer', verbose_name='所属歌手'),
),
]
|
def success(x):
def _make_continue(k_continue, k_fail):
return k_continue(x)
return _make_continue
def error(x):
def _make_fail(k_continue, k_fail):
return k_fail(x)
return _make_fail
def get_account(name):
if name == "Irek": return success(1)
elif name == "John": return success(2)
elif name == "Alex": return success(3)
elif name == "Nick": return success(1)
else: return error("No account associated with name '%s'" % name)
def get_balance(account):
if account == 1: return success(1000000)
elif account == 2: return success(75000)
else: return error("No balance associated with account #%s" % account)
def qualified_amount(balance):
if balance > 200000: return success(balance)
else: return error("Insufficient funds for loan, current balance is %s" % balance)
def bind(mval, mf):
def _continue(k, end): # named right?
return mval(lambda result: mf(result)(k, end), end)
return _continue
def unit(x): return success(x)
def get_loan(name):
mval = bind(unit(name), lambda name:
bind(get_account(name), lambda account:
bind(get_balance(account), lambda balance:
qualified_amount(balance))))
on_qualified = lambda loan: "qualified for amount: %s" % loan
on_disqualified = lambda why: "not qualified for loan, reason given: %s" % why
return mval(on_qualified, on_disqualified)
names = ["Irek", "John", "Alex", "Fred"]
loans = map(get_loan, names)
for name, loan in zip(names, loans):
print "%s: %s" % (name, loan)
|
'''
Created on Feb 24, 2012
@author: mkiyer
'''
import logging
import argparse
import os
import subprocess
import xml.etree.ElementTree as etree
from base import check_executable, check_sam_file
RETCODE_SUCCESS = 0
RETCODE_ERROR = 1
DESCRIPTION = "Chimerascan2 chimeric transcript (gene fusion) discovery tool"
DEFAULT_OUTPUT_DIR = "chimerascan2_out"
CONFIG_XML_ROOT_NAME = "chimerascan2_config"
# library strand types
LIB_FR_UNSTRANDED = 0
LIB_FR_FIRSTSTRAND = 1
LIB_FR_SECONDSTRAND = 2
#def get_genome_orientation(r, library_type):
# if library_type == LibraryTypes.FR_FIRSTSTRAND:
# if r.is_read2:
# return OrientationTags.FIVEPRIME
# else:
# return OrientationTags.THREEPRIME
# elif library_type == LibraryTypes.FR_SECONDSTRAND:
# if r.is_read1:
# return OrientationTags.FIVEPRIME
# else:
# return OrientationTags.THREEPRIME
# return OrientationTags.NONE
def indent_xml(elem, level=0):
""" in-place XML prettyprint formatter """
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent_xml(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def get_command_line_parser():
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument("--config", dest="config_file", default=None,
help="Config file (XML format) with chimerascan "
"parameters")
parser.add_argument("-o", "--output_dir", dest="output_dir",
default=DEFAULT_OUTPUT_DIR)
parser.add_argument("bam_file")
class RunConfig(object):
attrs = (("velveth_bin", "velveth"),
("velvetg_bin", "velvetg"),
("samtools_bin", "samtools"),
("config_file", None),
("bam_file", None),
("output_dir", None))
def __init__(self):
for attrname, attrval in RunConfig.attrs:
setattr(self, attrname, attrval)
@staticmethod
def from_command_line():
parser = get_command_line_parser()
config = RunConfig()
return config.update_command_line(parser)
def update_command_line(self, parser):
args = parser.parse_args()
if (args.config_file is not None):
if not os.path.exists(args.config_file):
parser.error("config file %s not found" % (args.config_file))
self.update_xml(args.config_file)
def update_xml(self, xmlfile):
tree = etree.parse(xmlfile)
root = tree.getroot()
for attrname, defaultval in RunConfig.attrs:
attrval = root.findtext(attrname)
if (attrval is None) or (not attrval) or (attrval == "None"):
attrval = defaultval
setattr(self, attrname, attrval)
def to_xml(self):
root = etree.Element(CONFIG_XML_ROOT_NAME)
for attrname, defaultval in RunConfig.attrs:
val = getattr(self, attrname)
if val is not None:
elem = etree.SubElement(root, attrname)
elem.text = str(val)
# indent for pretty printing
indent_xml(root)
return etree.tostring(root)
def is_valid(self):
valid = True
# check third-party software programs
for attrname in ("velveth_bin", "velvetg_bin", "samtools_bin"):
prog = getattr(self, attrname)
if check_executable(prog):
logging.debug("Checking for '%s' binary... found" % prog)
else:
logging.error("'%s' binary not found or not executable" % prog)
valid = False
#
# check third-party python packages
#
# check for bx python library
try:
import bx.intervals.intersection
logging.debug("Checking for 'bx python' library... found")
except ImportError, e:
logging.error("Package 'bx python' not found")
valid = False
# check for pysam library
try:
import pysam
logging.debug("Checking for 'pysam' library... found")
except ImportError, e:
logging.error("Package 'pysam' not found")
valid = False
# check sorted abundant reads bam file
if not check_sam_file(self.bam_file, isbam=True):
logging.error("Input file %s missing/corrupt" % (self.bam_file))
valid = False
return valid
|
#!/usr/bin/env python
# coding: utf-8
# **Imports**
# In[ ]:
import numpy as np
import pandas as pd
import random as rnd
from os import *
import seaborn as sns
import matplotlib.pyplot as plt
import sklearn as sk
get_ipython().magic(u'matplotlib inline')
import os
# **Data Input**
# In[ ]:
train_df = pd.read_csv("../input/train.csv")
test_df = pd.read_csv("../input/test.csv")
print(train_df.columns)
print(test_df.columns)
# **Data Test / Variable Setting**
# In[ ]:
myvars = ['Sex', 'Survived', 'Pclass']
print(train_df[myvars].groupby(['Pclass']).mean())
print(train_df[['Sex', 'Survived']].groupby(['Sex']).mean())
CHILD = train_df['Age'] < 15
CREW = train_df['Fare'] == 0
ALONE = train_df['SibSp'] == 0
NOTALONE = train_df['SibSp'] > 0
ALLCLASS = train_df['Pclass']
C1 = train_df['Pclass'] == 1
C2 = train_df['Pclass'] == 2
C3 = train_df['Pclass'] == 3
SM = train_df['Sex'] =='male'
SF = train_df['Sex'] == 'female'
CHILD1 = train_df['Age'] < 15 & C1
CHILD2 = train_df['Age'] < 15 & C2
CHILD3 = train_df['Age'] < 15 & C3
FCHILD = CHILD & SF
MCHILD = CHILD & SM
# In[ ]:
#pclassgraph = sns.barplot(train_df['Pclass'], train_df['Survived']*100, hue = train_df['Sex'])
#plt.show()
#pclassgraph.set(xlabel='Passenger Class', ylabel='Percent Survived')
#plt.show()
#childrengraph = sns.barplot(train_df['Pclass'], train_df['Survived']*100, hue = Children)
#childrengraph.set(xlabel ='Passenger Class', ylabel='Percent Survived')
#plt.show()
siblingsgraph = sns.barplot(train_df['Pclass'], train_df['Survived'], hue = train_df['SibSp'])
plt.show()
femalealonegraph = sns.barplot(train_df['Pclass'], train_df['Survived'], hue = ALONE & SF)
plt.show()
femalenotalonegraph = sns.barplot(train_df['Pclass'], train_df['Survived'], hue = NOTALONE & SF)
plt.show()
# In[ ]:
pred_survived_train = SF
print(pd.crosstab(pred_survived_train, train_df['Survived']))
print(np.mean(pred_survived_train == train_df['Survived']))
pred_survived_train2 = ((C1 | C2) & SF) | (CHILD == True)
print(pd.crosstab(pred_survived_train2, train_df['Survived']))
print(np.mean(pred_survived_train2 == train_df['Survived'])*100)
pred_survived_train3 = ((C1 | C2) & SF) | (CHILD == True)
print(pd.crosstab(pred_survived_train3, train_df['Survived']))
print(np.mean(pred_survived_train3 == train_df['Survived'])*100)
pred_survived_train4 = ((C1 | C2) & SF) | (CHILD == True) | (C3 & ALONE & SF)
print(pd.crosstab(pred_survived_train4, train_df['Survived']))
print(np.mean(pred_survived_train4 == train_df['Survived'])*100)
test_df['Survived'] = (pred_survived_train4).astype(int)
# In[ ]:
submission = test_df[['PassengerId', 'Survived']]
submission.to_csv("sub5.csv", index = None)
|
#!/usr/bin/env python
test_numbers = [1, 4, 5, 7, 2, 3, 6, 8, 9]
def merge_sort(input_numbers):
print "merge_sort() input %s " % input_numbers
result = []
#terminating condition
if len(input_numbers) <= 1:
return
#divide in half
first_half = input_numbers[0:len(input_numbers)/2]
second_half = input_numbers[len(input_numbers)/2:len(input_numbers)]
#print first_half
#print second_half
#now call merge_sort on each half
merge_sort(first_half)
merge_sort(second_half)
result = sort(first_half, second_half)
def sort(list1, list2):
print "sort() %s %s" % (list1, list2)
return_list = []
i = 0
j = 0
#compare at beginning of the lists
while i<len(list1) and j<len(list2):
if (list1[i] <= list2[j]):
return_list.append(list1[i])
i += 1
else:
return_list.append(list2[j])
j += 1
#just stick on the rest of list1
while i < len(list1):
return_list.append(list1[i])
i += 1
#just stick on the rest of list2
while j < len(list2):
return_list.append(list2[j])
j += 1
print "return_list %s " % return_list
return return_list
merge_sort(test_numbers)
|
from tools.RedisToSession import Session
import requests
constant = {
# token过期时间
'expireTime': 60 * 60,
'mySession': Session(),
# 手机userAgent
'mobileUA': 'Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1',
# 电脑userAgent
'PCUA': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',
# session flask的session 是写在客户端的
'netSession': requests.session(),
# 其他可以汇编的参数
'otherParams': {
'channelID': '12014',
'type': '01'
},
# 请求头
'headers': {
'accept': "application/json, text/javascript, */*; q=0.01",
'accept-encoding': 'gzip,deflate,br',
'accept-language': 'zh-CN, zh;q = 0.8',
'Connection': 'keep-alive',
'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1',
#'Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1',
'referer': 'https://login.10086.cn/html/login/touch.html',
'x-requested-With': 'XMLHttpRequest',
'cache-control': "no-cache",
'Upgrade-Insecure-Requests': '1',
},
# 所有移动api
'urls': {
'getRdmdAndCaptchaCode': 'https://login.10086.cn/captchazh.htm?type=05',
'checkNum': 'https://login.10086.cn/chkNumberAction.action',
'sendRequestForVerifyTextCode': 'https://login.10086.cn/sendRandomCodeAction.action',
'getNumArea': 'http://touch.10086.cn/i/v1/res/numarea/',
'getMeal': 'http://touch.10086.cn/i/v1/busi/plan/',
'getPersonInfo': 'http://touch.10086.cn/i/v1/cust/info/',
'getArtifact': 'https://login.10086.cn/login.htm',
'getTHXDData': 'https://shop.10086.cn/i/v1/fee/detailbillinfojsonp/',
'sendTemporaryIDRandomCode': 'https://shop.10086.cn/i/v1/fee/detbillrandomcodejsonp/',
'sendTemporaryIDRandomImage': 'http://shop.10086.cn/i/authImg',
'authTemporaryID': 'https://shop.10086.cn/i/v1/fee/detailbilltempidentjsonp/',
'quitQuery': 'http://shop.10086.cn/i/v1/auth/userlogout',
'getPaymentRecords': 'http://shop.10086.cn/i/v1/cust/his/'
},
# 本项目错误映射
'errorCode': {
'100000': u'参数错误',
'100001': u'非移动电话号码',
'100002': u'验证码发送失败',
'100003': u'获得assertAcceptURL,artifact失败',
'100004': u'没有登录信息',
'100005': u'cookies获取不全',
'100006': u'无有效用户名',
'100007': u'未登录,请完成之前登录步骤',
'100008': u'rd和cc的session未写入',
'100009': u'个人信息获取失败',
'100010': u'号码信息获取失败',
'100011': u'临时身份认证失败',
'100012': u'短信验证码与图片验证码发送失败',
'100013': u'获取通话详单失败',
'100014': u'无有效服务密码',
'100015': u'用户名或密码错误,请核实后重新输入',
'100016': u'服务器错误',
'100017': u'token验证失败',
'100018': u'获取缴费记录失败',
'100019': u'未知错误',
'100020': u'验证图片发送失败',
'100021': u'套餐信息获取失败'
},
# 成功代码映射
'successCode': {
'110001': u'发送成功,请等待接收',
'110002': u'认证成功',
'110003': u'获取成功',
'110004': u'临时身份认证成功',
'110005': u'短信验证码与图片验证码发送完毕,如未收到,请稍后刷新本页面',
'110006': u'获取通话详单成功',
'110007': u'获取token成功',
'110008': u'获取缴费记录成功',
'110009': u'token已存在',
'110010': u'验证图片发送成功',
'110011': u'验证码发送成功,如未收到请稍后再试',
'110012': u'退出成功'
},
}
ydMap = {
'status': {
'00': '正常',
'01': '单向停机',
'02': '停机',
'03': '预销户 ',
'04': '销户',
'05': '过户',
'06': '改号',
'99': '此号码不存在',
},
'level': {
'000': '保留',
'100': '普通客户',
'300': '普通大客户',
'301': '钻石卡大客户',
'302': '金卡大客户',
'303': '银卡大客户',
'304': '贵宾卡大客户',
},
'realNameInfo': {
'1': '未登记',
'2': '已登记',
'3': '已审核',
},
'starLevel': {
'0': '0星级用户',
'1': '1星级用户',
'2': '2星级用户',
'3': '3星级用户',
'4': '4星级用户',
'5': '5星级用户',
'6': '五星金',
'7': '五星钻',
},
'payType': {
'01': '现金交费',
'02': '充值卡充值',
'03': '银行托收',
'04': '营销活动预存受理',
'05': '积分换话费业务受理',
'06': '第三方支付',
'07': '手机钱包',
'08': '空中充值',
'09': '代理商渠道办理',
'10': '批量冲销',
'11': '调账',
'12': '其他',
},
'payChannel': {
'01': '营业厅',
'02': '网上营业厅',
'03': '掌上营业厅',
'04': '短信营业厅',
'05': '手机营业厅',
'06': '第三方支付',
'07': '银行',
'08': '空中充值',
'09': '移动商城',
'99': '其他',
},
}
|
"""
/api/views/legoset.py
Views for /legoset
"""
import re
from flask import Blueprint, jsonify, request
from api.controllers.auth import authenticate, verify_admin
from api.controllers.legoset import search, update_stock_levels
from api.errors import FlaskError, exception_json_response
blueprint = Blueprint('legoset', __name__)
@blueprint.route('/legoset/search/<id>', methods=['GET'])
def find_legoset_view(id):
"""
Return json repr of LegoSet
:param id: LEGO set id
:rtype: json
:raises: ValueError
:raises: FlaskError
"""
try:
# ID must be an integer between 5 and 7 digits
test = re.match(r'^\d{5,7}$', id)
if not test:
raise ValueError
token = request.args.get('token')
authenticate(token)
legoset = search(int(id))
return jsonify({'result': legoset})
except ValueError:
error = FlaskError('Please supply a valid query (a 5 to 7 digit integer)', status_code=400)
return error.json_response()
except FlaskError as e:
return e.json_response()
@blueprint.route('/legoset/update', methods=['POST'])
def update():
"""
This endpoint is called by an AWS Lambda running every 6 hours
With 30 datapoints this ensures about a week's worth of stock data
:returns: json result - 'success' or 'unauthorized'
:rtype: json
"""
data = request.get_json(force=True)
if verify_admin(data['token']):
update_stock_levels()
return jsonify({'result': 'success'})
return jsonify({'result': 'unauthorized'})
|
from keras.models import Sequential
from keras.layers import Dense, Flatten, Dropout, Conv1D, MaxPooling1D
from keras.utils import np_utils
from keras.layers.normalization import BatchNormalization
def join_windows(x, y, mode = 'alternate'):
n_windows, n_samples = x.shape
cnn_input = np.zeros((2 * n_windows, n_samples))
return cnn_input, clasS
def convert_windows_to_cnn_inputs(windows_rest, windows_finger, test_ratio = 0.7):
n_windows, _ = windows_rest.shape
n_training_input = int(test_ratio * n_windows)
n_test_input = n_windows - n_training_input
x_training, y_training = join_windows(windows_rest[0:n_training_input, :], \
windows_finger[0:n_training_input, :])
x_test, y_test = join_windows(windows_rest[n_training_input:, :], \
windows_finger[n_training_input:, :])
return x_training, y_training, x_test, y_test
def generate_conv_neural_network(x_training, y_training, x_test, y_test):
c = Sequential()
c.add(Conv1D(filters = 4, kernel_size = 1, activation = 'relu', input_shape = [2, 1]))
c.add(MaxPooling1D(1))
c.add(Flatten())
c.add(Dense(units = 2, activation = 'softmax'))
c.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
c.fit(predictors_training, class_training, batch_size = 128,
epochs = 5, validation_data = (predictors_test, class_test))
|
import asyncio
import websockets
from websockets.exceptions import ConnectionClosedError
from processor import Processor
from flask import Flask, render_template, request, Response
from queue import LifoQueue
import threading
import json
import time
class Brain():
def __init__(self):
self.channels = {}
self.processors = {}
self.running = {}
self.last_visit = {}
def clean_disconnected_clients(self):
t = dict(self.last_visit.items())
for channel, last_visit in t:
if time.time() - last_visit > 5:
del self.last_visit[channel]
del self.running[channel]
del self.channels[channel]
del self.processors[channel]
async def consumer_handler(self, websocket, path):
try:
async for message in websocket:
data = json.loads(message)
channel = data['channel']
if channel in self.running and self.running[channel][0]:
continue
if channel not in self.channels:
self.channels[channel] = LifoQueue(-1)
self.processors[channel] = Processor()
self.running[channel] = [True]
self.last_visit[channel] = time.time()
t = threading.Thread(target=self.processors[channel].process, args=(self.running[channel], self.channels[channel], data['frame']))
t.start()
t.join()
except ConnectionClosedError:
print('Connection closed for an eye client')
self.clean_disconnected_clients()
brain = Brain()
def start_ws(ip='0.0.0.0', port=8889):
start_server = websockets.serve(brain.consumer_handler, ip, port)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
import logging
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
@app.route('/eye/<channel>')
def eye(channel):
return render_template('eye.html')
@app.route('/')
def display():
return render_template('display.html')
@app.route('/channels')
def channels():
return json.dumps(list(brain.running.keys()))
def gen(channel):
while True:
if channel not in brain.channels:
continue
processed_results = brain.channels[channel]
frame = processed_results.get()
with processed_results.mutex:
processed_results.queue.clear()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
@app.route('/video_feed')
def video_feed():
channel = request.args.get('channel')
if not channel:
return
return Response(gen(channel),
mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
print("running")
http = threading.Thread(target=app.run, kwargs={'host': '0.0.0.0', 'port': 8888})
http.start()
http.join(0.1)
start_ws('0.0.0.0', 8889)
|
from helpers.db import Db
class RetrieveCompanyData:
"""Contains all the functions to retrieve the data for
the django project
Attributes:
db (Db): headcount Db instance
query (str): query instance
"""
def __init__(self):
self.db = Db('company_headcount/headcount.db')
self.query = None
def main(self):
"""Method to call function sequentially
Returns:
self.parse_months(data) (arr): an array of months return by self.parse_months function
self.parse_company_data(data)(dict): an dict of companies and their monthly headcounts
returned by self.parse_company_data function
"""
data = self.retrieve_data()
return self.parse_months(data), self.parse_company_data(data)
def retrieve_data(self):
"""Retrieves company data to be preserved
Returns:
company data
"""
self.query = """
select c.company, h.month, h.headcount from headcount h
join company_headcount ch on ch.headcount_id = h.id
join company c on ch.company_id = c.id"""
results = self.db.fetchall(self.query)
return results
def parse_months(self, data):
"""Parse company data
Arguments:
data (arr): a array of dictionaries
Returns:
months data
"""
months = []
for row in data:
if row['month'] not in months:
months.append(row['month'])
return months
def parse_company_data(self, data):
"""Parse company data
Arguments:
data (arr): a array of dictionaries
Returns:
company and headcount data
"""
company_data = {}
for row in data:
company = row['company'].capitalize()
if company not in company_data:
company_data[company] = {
'headcount': [row['headcount']]
}
else:
if row['headcount'] not in company_data[company]['headcount']:
company_data[company]['headcount'].append(row['headcount'])
return company_data
|
import pickle
f = open('data.p', 'rb')
data = pickle.load(f)
print("HELLO FROM load_data.py", data) |
x = input()
if x == '01' : print('OK')
elif x == '02' : print('OK')
elif x == '20' : print('OK')
elif x == '21' : print('OK')
elif x == '22' : print('OK')
elif x == '23' : print('OK')
elif x == '24' : print('OK')
elif x == '25' : print('OK')
elif x == '26' : print('OK')
elif x == '27' : print('OK')
elif x == '28' : print('OK')
elif x == '29' : print('OK')
elif x == '30' : print('OK')
elif x == '31' : print('OK')
elif x == '32' : print('OK')
elif x == '33' : print('OK')
elif x == '34' : print('OK')
elif x == '35' : print('OK')
elif x == '36' : print('OK')
elif x == '37' : print('OK')
elif x == '38' : print('OK')
elif x == '39' : print('OK')
elif x == '40' : print('OK')
elif x == '51' : print('OK')
elif x == '52' : print('OK')
elif x == '53' : print('OK')
elif x == '55' : print('OK')
elif x == '58' : print('OK')
else : print('Error') |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import json
import requests
from pymongo import MongoClient
from InstagramAPI import InstagramAPI
# TODO: Fill in username and password
InstagramAPI = InstagramAPI("<username>", "<password>")
InstagramAPI.login()
users = [
'happycatclub',
'yiyun.zhu',
'dd_max_',
]
url_get_user_id = 'https://www.instagram.com/{0}/?__a=1'
# Since 2016-01-01 00:00:00
min_timestamp = '1451606400'
# Until 2016-12-31 23:59:59
max_timestamp = '1483228799'
# MongoDB
client = MongoClient("localhost", 27017, connect=False)
db = client.instagram
summary = db.summary
for user in users:
message = 'Fetching ' + user
content = json.loads(requests.get(url_get_user_id.format(user)).content)
user_id = str(content.get('user').get('id'))
user_feeds = InstagramAPI.getTotalUserFeed(user_id,
minTimestamp=min_timestamp,
maxTimestamp=max_timestamp,
message=message)
feeds = len(user_feeds)
likes = 0
comments = 0
reviews = 0
for feed in user_feeds:
likes += feed.get('like_count', 0)
comments += feed.get('comment_count', 0)
reviews += int(feed.get('view_count', 0))
print 'feeds: ' + str(feeds) \
+ ', likes: ' + str(likes) \
+ ', comments: ' + str(comments) \
+ ', reviews: ' + str(reviews)
# Restore data
summary.insert({
'user': user,
'feeds': feeds,
'likes': likes,
'comments': comments,
'reviews': reviews,
})
|
# -*- coding: utf-8 -*-
# Import the reverse lookup function
from django.core.urlresolvers import reverse
# view imports
from django.views.generic import DetailView
from django.views.generic import ListView
# Only authenticated users can access views using this.
from braces.views import LoginRequiredMixin
from .models import Chart
class ChartDetailView(LoginRequiredMixin, DetailView):
model = Chart
slug_field = "title"
slug_url_kwarg = "title"
def get_context_data(self, **kwargs):
context = super(ChartDetailView, self).get_context_data(**kwargs)
# Add jwt with single title to context here.
return context
class ChartListView(LoginRequiredMixin, ListView):
model = Chart
def get_context_data(self, **kwargs):
context = super(ChartListView, self).get_context_data(**kwargs)
# Add jwt with all titles to context here.
return context
|
import pygame
from jeu.fleche import Fleches
from jeu.joueur import Joueur
class Gestion
def __init__ (self):
self.score = 10
self.appreciation = ""
self.vitesse = 9.81
self.mode = 2
self.musique = ".mp3"
self.rythme = 70.5
self.page = 3
self.nbjoueur = 1
self.joueurs= Joueur()
self.fleches= Fleches()
def afficher (self):
pass
def naviguer (self):
pass
pass
pass
|
import os
from collections import defaultdict
from datetime import date
import shutil
# Class object that can hold a file's path and contents, as well as provides a method for returning
# the contents of the file with a header and footer
class TextFile:
def __init__(self, file_name, directory):
self.file_name = file_name
self.directory = directory
self.file_path = directory + os.sep + file_name
def return_file(self):
return open(self.file_path, 'r')
def return_text(self):
return open(self.file_path, 'r').read()
def insert_file(self):
return ('----------{file_path}----------'
'\n'
'{file_text}'
'\n'
'----------{file_path}----------'
'\n').format(file_path=self.file_path, file_text=self.return_text())
def __repr__(self):
return 'file at : ' + self.file_path
# Generates a timestamped directory path to place today's files in.
def today_archive_path(directory):
dir_name = date.today().isoformat().replace('-', '')
path = directory + os.sep + dir_name
return path
# Moves all files from one directory to another.
def move_files(source_dir, target_dir):
file_names = os.listdir(source_dir)
for file_name in file_names:
shutil.move(os.path.join(source_dir, file_name), target_dir)
# Copies all files from one directory to another.
def copy_files(source_dir, target_dir):
file_names = os.listdir(source_dir)
for file_name in file_names:
shutil.copy(os.path.join(source_dir, file_name), target_dir)
# Walks through all subdirectories of a directory and returns an object with the files within grouped by name.
def extract_directory(directory):
compiled_files = defaultdict(list)
for directory, subdirs, files in os.walk(directory, topdown=True):
for file_name in files:
file = TextFile(file_name, directory)
compiled_files[file.file_name].append(file)
return compiled_files
# Writes all files to the specified directory
def write_files(compiled_files, directory):
for file_name, files in compiled_files.items():
file_contents = [file.insert_file() for file in files]
text = '\n'.join(file_contents)
file = open(directory + os.sep + file_name, "w")
file.write(text)
file.close()
if __name__ == '__main__':
today_path = today_archive_path('daily archive')
os.mkdir(today_path)
move_files('today', today_path)
move_files('tomorrow', 'today')
copy_files('template', 'tomorrow')
compiled_files = extract_directory('daily archive')
write_files(compiled_files, 'archive compiled')
|
import numpy as np
import sys
"""This script implements a two-class logistic regression model.
"""
class logistic_regression(object):
def __init__(self, learning_rate, max_iter):
self.learning_rate = learning_rate
self.max_iter = max_iter
def fit_GD(self, X, y):
"""Train perceptron model on data (X,y) with GD.
Args:
X: An array of shape [n_samples, n_features].
y: An array of shape [n_samples,]. Only contains 1 or -1.
Returns:
self: Returns an instance of self.
"""
n_samples, n_features = X.shape
### YOUR CODE HERE
self.assign_weights(np.zeros(n_features))
for i in range(self.max_iter):
w_add = np.zeros(n_features)
for j in range(n_samples):
w_add += self.learning_rate * (- self._gradient(X[j],y[j]))
w_add = w_add / n_samples
self.W +=w_add
### END YOUR CODE
return self
def fit_BGD(self, X, y, batch_size):
"""Train perceptron model on data (X,y) with BGD.
Args:
X: An array of shape [n_samples, n_features].
y: An array of shape [n_samples,]. Only contains 1 or -1.
batch_size: An integer.
Returns:
self: Returns an instance of self.
"""
### YOUR CODE HERE
n_samples, n_features = X.shape
self.assign_weights(np.zeros(n_features))
for i in range(self.max_iter):
w_add = np.zeros(n_features)
if batch_size<=n_samples:
mini_batch = np.random.choice(n_samples, batch_size)
else:
mini_batch = np.linspace(0,n_samples,n_samples-1)
for j in mini_batch:
w_add += self.learning_rate * (- self._gradient(X[j],y[j]))
w_add = w_add / n_samples
self.W +=w_add
### END YOUR CODE
return self
def fit_SGD(self, X, y):
"""Train perceptron model on data (X,y) with SGD.
Args:
X: An array of shape [n_samples, n_features].
y: An array of shape [n_samples,]. Only contains 1 or -1.
Returns:
self: Returns an instance of self.
"""
### YOUR CODE HERE
n_samples, n_features = X.shape
self.assign_weights(np.zeros(n_features))
for i in range(self.max_iter):
j = np.random.randint(n_samples)
w_add = self.learning_rate * (- self._gradient(X[j],y[j]))
self.W +=w_add
### END YOUR CODE
return self
def _gradient(self, _x, _y):
"""Compute the gradient of cross-entropy with respect to self.W
for one training sample (_x, _y). This function is used in fit_*.
Args:
_x: An array of shape [n_features,].
_y: An integer. 1 or -1.
Returns:
_g: An array of shape [n_features,]. The gradient of
cross-entropy with respect to self.W.
"""
### YOUR CODE HERE
c_exp = np.exp(-_y*np.dot(self.W,_x))
_g = c_exp/(1+c_exp)*(-_y)*_x
return _g
### END YOUR CODE
def get_params(self):
"""Get parameters for this perceptron model.
Returns:
W: An array of shape [n_features,].
"""
if self.W is None:
print("Run fit first!")
sys.exit(-1)
return self.W
def predict_proba(self, X):
"""Predict class probabilities for samples in X.
Args:
X: An array of shape [n_samples, n_features].
Returns:
preds_proba: An array of shape [n_samples, 2].
Only contains floats between [0,1].
"""
### YOUR CODE HERE
n_samples = X.shape[0]
preds_proba = np.zeros((n_samples,2))
_s = np.matmul(X, self.W) # array operation
_logit = 1 / (1 + np.exp(-_s))
preds_proba[:,0] = _logit
preds_proba[:, 1] = 1-_logit
return preds_proba
### END YOUR CODE
def predict(self, X):
"""Predict class labels for samples in X.
Args:
X: An array of shape [n_samples, n_features].
Returns:
preds: An array of shape [n_samples,]. Only contains 1 or -1.
"""
### YOUR CODE HERE
n_samples = X.shape[0]
preds = np.ones(n_samples)
_s = np.matmul(X,self.W) # array operation
_logit = 1/(1+np.exp(-_s))
preds[_logit<0.5] = -1
### END YOUR CODE
return preds
def score(self, X, y):
"""Returns the mean accuracy on the given test data and labels.
Args:
X: An array of shape [n_samples, n_features].
y: An array of shape [n_samples,]. Only contains 1 or -1.
Returns:
score: An float. Mean accuracy of self.predict(X) wrt. y.
"""
### YOUR CODE HERE
preds = self.predict(X)
score = np.sum(preds ==y)/y.shape[0]
### END YOUR CODE
return score
def assign_weights(self, weights):
self.W = weights
return self
|
import Utility as Util
from Utility import *
from Particle import *
import MyGame
gPlayer = None
gWorldSize = Util.WH
gCheckAngle = 0.707
gFriction = 10.0
gMaxSpeed = 60.0
gGround = Util.H * 0.05
gJump = Util.H * 1.8
gVel = Util.W * 0.8
gWalk = Util.W * 0.3
gIncVel = Util.W * 1.6
gDecVel = Util.W * 2.5
gRange = Util.W * 0.3
gAnimalSize = [Util.H*.18]*2
gGravity = Util.gGravity*0.8
class AnimalBase(Scatter):
isPlayer = False
def __init__(self, res):
Scatter.__init__(self, size=self.size)
self.do_translation = False
self.do_rotation = False
self.do_scale = False
self.res = res
self.size = mul(gAnimalSize, fRand(0.5,1.0))
self.radius = Util.getDist(self.size) * 0.707
with self.canvas:
Color(1,1,1)
Rectangle(size=self.size, texture=self.res.getTex())
# debug button
'''
self.btn = Button(size=self.size, background_color=[1,1,1,.5])
self.btn.bind(on_press=self.onTouch)
self.add_widget(self.btn)
'''
# check this out!!!!!
self.pos = (fRand(0.0, gWorldSize[0]), fRand(0.0, gWorldSize[1]))
self.life = 5
self.collide = False
self.bDead = False
self.vel = 0.0
self.jump = 0.0
self.bJump = False
self.wallJump = False
self.inc_vel = gIncVel
self.dec_vel = gDecVel
self.maxVel = gVel
self.dir_left = False
self.dir_right = False
def onTouch(self, instance):
self.setJump()
def setJump(self, bIgnore=False, ratio=1.0):
if self.bJump and not bIgnore:
return
self.bJump = True
self.jump = gJump * ratio
def set_wallJump(self):
if not self.wallJump:
self.jump += gJump * 0.1
self.wallJump = True
def setFx(self):
if MyGame.gMyGame:
emitter = MyGame.gMyGame.FxMgr.get_emitter('star')
emitter.pos = add(self.pos, mul(self.size, (0.5, 1.0)))
emitter.play_particle()
def setDamage(self, damage=1):
if self.bDead:
return
self.life -= damage
#self.res.playSnd()
self.setFx()
if self.life <= 0:
self.life = 0
self.setDead()
def setDead(self):
if self.bDead:
return
self.bDead = True
if self.parent:
self.parent.remove_widget(self)
def set_turn(self):
if self.dir_left:
self.set_right()
elif self.dir_right:
self.set_left()
def set_left(self, *args):
self.vel = 0.0
self.dir_left = True
self.dir_right = False
def set_right(self, *args):
self.vel = 0.0
self.dir_left = False
self.dir_right = True
def release_all(self):
self.dir_left = False
self.dir_right = False
def release_left(self, *args):
self.dir_left = False
def release_right(self, *args):
self.dir_right = False
def inc_velocity(self):
vel = self.inc_vel * getFrameTime()
if self.jump < 0.0:
self.jump -= gGravity * 1.5 * getFrameTime()
vel*=1.75
if self.dir_left:
vel = -vel
self.vel += vel
if abs(self.vel) > self.maxVel:
self.vel = self.maxVel if self.vel > 0.0 else -self.maxVel
def dec_velocity(self):
vel = abs(self.vel)
if vel == 0.0:
return
vel -= self.dec_vel * getFrameTime()
if vel < 0.0:
self.vel = 0.0
return
self.vel = vel if self.vel > 0.0 else -vel
def onUpdate(self):
if not self.bJump:
self.setJump()
if self.dir_left or self.dir_right:
self.inc_velocity()
else:
self.dec_velocity()
self.updateMove()
def updateMove(self):
self.collide = False
vel = mul((self.vel, self.jump), getFrameTime())
self.pos = add(self.pos, vel)
if self.pos[1] < gGround:
self.pos = (self.pos[0], gGround)
self.wallJump = False
self.jump = 0.0
if self.bJump:
self.bJump = False
elif self.pos[1] > gWorldSize[1] - self.size[1]:
self.pos = (self.pos[0], (gWorldSize[1]-self.size[1])*2-self.pos[1])
self.jump = -self.jump
if self.pos[1] > gGround :
self.jump -= gGravity * getFrameTime()
if self.pos[0] < 0:
self.pos = (-self.pos[0], self.pos[1])
self.vel = -self.vel
self.collide=True
self.set_wallJump()
elif self.pos[0] > gWorldSize[0] - self.size[0]:
self.pos = ((gWorldSize[0]-self.size[0])*2-self.pos[0], self.pos[1])
self.vel = -self.vel
self.collide=True
self.set_wallJump()
class AnimalEnemy(AnimalBase, StateMachine):
STATE_NONE = 0
STATE_IDLE = 1
STATE_WALK = 2
STATE_STUN = 3
STATE_ATTACKREADY = 4
STATE_ATTACK = 5
class State_Idle(StateItem):
def __init__(self, actor):
self.actor = actor
def onEnter(self):
self.actor.release_all()
self.idle_time =fRand(0.5,1.5)
def onUpdate(self):
self.idle_time -= getFrameTime()*1.0
if self.idle_time < 0.0:
self.setState(self.actor.STATE_WALK)
self.actor.checkPlayer()
class State_Walk(StateItem):
def __init__(self, actor):
self.actor = actor
def onEnter(self):
self.walk_time = fRand(1.0,3.0)
self.actor.set_left() if nRand(0,1) else self.actor.set_right()
self.actor.maxVel = gWalk
def onUpdate(self):
if self.actor.collide:
self.actor.set_turn()
self.actor.setJump(ratio=0.7)
self.walk_time -= getFrameTime()
if self.walk_time < 0.0:
self.setState(self.actor.STATE_IDLE)
self.actor.checkPlayer()
class State_Stun(StateItem):
def __init__(self, actor):
self.actor = actor
def onEnter(self):
self.actor.release_all()
self.stun_time = 1.0
def onUpdate(self):
self.stun_time -= getFrameTime()*1.0
if self.stun_time < 0.0:
self.setState(self.actor.STATE_ATTACK)
class State_AttackReady(StateItem):
def __init__(self, actor):
self.actor = actor
def onEnter(self):
self.actor.release_all()
self.time = 1.0
def onUpdate(self):
self.time -= getFrameTime()*1.0
if self.time < 0.0:
if self.actor.playerInRange():
self.setState(self.actor.STATE_ATTACK)
else:
self.setState(self.actor.STATE_WALK)
class State_Attack(StateItem):
def __init__(self, actor):
self.actor = actor
def onEnter(self):
self.actor.maxVel = gVel
def onUpdate(self):
if not self.actor.bJump:
self.actor.setJump()
if gPlayer.pos[0] > self.actor.pos[0]:
self.actor.set_right()
else:
self.actor.set_left()
def __init__(self, res):
AnimalBase.__init__(self, res)
StateMachine.__init__(self)
self.addState(StateItem())
self.addState(self.State_Idle(self))
self.addState(self.State_Walk(self))
self.addState(self.State_Stun(self))
self.addState(self.State_AttackReady(self))
self.addState(self.State_Attack(self))
self.setState(self.STATE_IDLE)
self.preAttack = nRand(0,1)
self.preAttackRange = gRange
def playerInRange(self):
return True if abs(gPlayer.pos[0] - self.pos[0]) < self.preAttackRange else False
def checkPlayer(self):
if self.preAttack and self.playerInRange():
self.setState(self.STATE_ATTACKREADY)
return True
return False
def setDamage(self, damage=1):
AnimalBase.setDamage(self, damage)
if not self.bDead:
self.setState(self.STATE_STUN, True)
def onUpdate(self):
StateMachine.update(self)
if self.dir_left or self.dir_right:
self.inc_velocity()
else:
self.dec_velocity()
self.updateMove()
class AnimalPlayer(AnimalBase):
isPlayer = True
def __init__(self, res):
AnimalBase.__init__(self, res)
global gPlayer
gPlayer = self
self.life=100
class AIMgr:
def __init__(self, parent_layer):
self.ai_list = []
self.player = None
self.parent_layer = parent_layer
def setPlayer(self, player) :
self.player = player
def add_ai(self, res, num=1):
for i in range(num):
self.ai_list.append(AnimalEnemy(res))
self.parent_layer.add_widget(self.ai_list[-1])
def remove_ai(self):
for i in self.ai_list:
i.setDead()
self.ai_list = []
def check_collide(self, a, b):
dist = getDist(a.pos, b.pos)
radius = (a.radius + b.radius) * 0.5
if dist < radius:
v1 = (a.vel, a.jump)
v2 = (b.vel, b.jump)
vVel = sub(v1, v2)
vDir = normalize(sub(b.pos, a.pos))
dot = sum(mul(vVel, vDir))
if dot > 0.0:
vVel = mul(vDir, dot)
b.vel = b.vel + vVel[0]
b.jump = b.jump + vVel[1]
a.vel = a.vel - vVel[0]
a.jump = a.jump - vVel[1]
a.collide = True
b.collide = True
if vDir[1] < -gCheckAngle:
a.setJump(True, 0.8)
if a.isPlayer or b.isPlayer:
b.setDamage()
elif vDir[1] > gCheckAngle:
b.setJump(True, 0.8)
if a.isPlayer or b.isPlayer:
a.setDamage()
def onUpdate(self):
deadList = []
animals = copy(self.ai_list)
animals.append(self.player)
for i in self.ai_list:
i.onUpdate()
if i.bDead:
deadList.append(i)
for j in animals:
if i is not j:
self.check_collide(i,j)
animals.remove(i)
for i in deadList:
self.ai_list.remove(i)
|
def my_function(x: int) -> int:
"""
type hints - truly hints, in the sense that python doesn't enforce them, your IDE
may yell at you
this means that we expect x to be an integer and it will return an integer
"""
# could cause issues with inheritance.
if type(x) == int:
print('yep its an int')
# more inheritance safe
if isinstance(x, int):
return x + 2
else:
return 0
print(my_function(5))
print(my_function("5"))
class Animal:
pass
class Dog(Animal):
pass
d = Dog()
if isinstance(d, Animal):
print('dogs are animals')
if type(d) == Animal:
print('dogs are animals 2')
my_list = []
# why don't we get an index error?
# because it checks the first statement in the and, which is False
# False and ANY == False
# if my_list[0] == 'happy' and my_list: will fail
if my_list and my_list[0] == 'happy':
print('my list is good')
else:
print('my list is bad')
# False or X is not yet done
# False or True is True False or False is False
#
#
# if my_list[0] == 'happy' or my_list:
# print('my list is good')
# else:
# print('my list is bad')
# if statements read from left to right, down paren levels
# for i-each loop, both the index AND the element
for i, x in enumerate(['a', 'b', 'c', 'd', 'e']):
print(i, x)
# now that you're out of 201 almost, you're free and encouraged to use this thing.
# i fight with myself about enumerate, one of the best python features.
i = 0
while i < len(my_list):
# do something here
i += 1
#
for i in range(len(my_list)):
pass
def game_not_finished(board):
pass
def play_game(board):
pass
game_board = []
while game_not_finished(game_board):
play_game(game_board)
x = 2
y = 3; z = 3
""" boolean flags are decent ways to track things
better to keep the condition in the while loop statement if possible.
"""
finished = False
while not finished:
# do stuff
if x == 2 and y == 3:
finished = True
# other stuff to do
# better
while not (x == 2 and y == 3):
# do stuff
pass
|
import sys
sys.stdin = open('perfect_square.txt')
sys.setrecursionlimit(10 ** 6)
T = int(input())
for test_case in range(1, T + 1):
N = int(input())
S = [list(map(int, input().split())) for i in range(N)]
dp = [[1] * N for i in range(N)]
dx = [0, 0, 1, -1]
dy = [1, -1, 0, 0]
idx = [0, 0]
check = [0] * (N ** 2 + 1)
max_num = 0
for j in range(N):
for k in range(N):
for i in range(4):
if 0 <= j + dx[i] < N and 0 <= k + dy[i] < N:
if S[j][k] - S[j + dx[i]][k + dy[i]] == -1:
check[S[j][k]] = 1
count = 0
num = 0
for i in range(N ** 2, -1, -1):
if check[i]:
count += 1
else:
if max_num <= count:
max_num = count
num = i + 1
count = 0
print('#{} {} {}'.format(test_case, num, max_num + 1)) |
# -*- coding: utf-8 -*-
import scrapy
class PatchCategorySpider(scrapy.Spider):
name = "patch_category"
start_urls = [
"https://www.pathofexile.com/search/results/Content+Update/search-within/threads/forums/366/page/1"
]
def parse(self, response):
author_page_links = response.css('td.content div.content a')
yield from response.follow_all(author_page_links, self.parse_patch_notes)
pagination_links = response.css('div.pagination a')
yield from response.follow_all(pagination_links, self.parse)
def parse_patch_notes(self, response):
#last 3 are report/additional info so we exclude
content = response.css("div.content")[0]
out = {}
out['changes'] = []
out['headings'] = []
for category in content.css("strong::text")[:-3]:
out['headings'].append(category.get())
for change in content.css('li::text'):
out['changes'].append(change.get())
yield out
|
import urllib.parse
from http.cookiejar import CookieJar
import json
class Site():
def __init__(self,
host=None,
apiurl='/w/api.php',
timeout=100,
srlimit=500,
apfrom=None,
aplimit=5000,
bllimit=5000,
aulimit=5000,
aclimit=5000,
rclimit=5000,
lelimit=5000,
):
if not host:
raise(Exception("host not defined"))
self.host = host
self.apiurl = apiurl
self.url = '%s%s' % (self.host, self.apiurl)
self.format = 'json'
self.cj = CookieJar()
self.opener = urllib.request.build_opener(
urllib.request.HTTPCookieProcessor(self.cj)
)
self.token = None
self.defaults = {}
self.defaults['srlimit'] = srlimit
self.defaults['aplimit'] = aplimit
self.defaults['aclimit'] = aclimit
self.defaults['bllimit'] = bllimit
self.defaults['rclimit'] = rclimit
self.defaults['lelimit'] = lelimit
self.srlimit = srlimit
self.apfrom = apfrom
self.aplimit = aplimit
self.bllimit = bllimit
self.aulimit = aulimit
self.aclimit = aclimit
self.rclimit = rclimit
self.lelimit = lelimit
self.search_info = {}
self.aufinished = False
def return_json(self, data):
return json.loads(bytes.decode(data, 'utf-8'))
def sitematrix(self):
t = {}
t['action'] = 'sitematrix'
t['format'] = self.format
params = urllib.parse.urlencode(t)
f = self.opener.open('%s?%s' % (self.url, params))
return self.return_json(f.read())
def login(self, username=None, password=None):
self.username = username
t = {}
t['action'] = 'login'
t['lgname'] = username
t['lgpassword'] = password
t['format'] = self.format
self.cj.clear()
params = urllib.parse.urlencode(t)
if username:
f = self.opener.open(self.url, params.encode('utf-8'))
d = f.read()
try:
d = self.return_json(d)
self.token = d['login']['token']
except Exception as e:
raise(Exception('Unable to login:', e))
if d['login']['result'] == 'NeedToken':
t['lgtoken'] = d['login']['token']
params = urllib.parse.urlencode(t)
f = self.opener.open(self.url, params.encode('utf-8'))
d = f.read()
try:
d = self.return_json(d)
self.token = d['login']['lgtoken']
except Exception as e:
raise(Exception('Unable to login:', e))
def logout(self):
t = {}
t['action'] = 'logout'
t['format'] = self.format
params = urllib.parse.urlencode(t)
f = self.opener.open('%s?%s' % (self.url, params))
d = f.read()
try:
d = self.return_json(d)
except Exception as e:
raise(Exception('Already logged out'))
def list_backlinks(self, title=None, blcontinue=False, blfilterredir='all', blredirect=False):
t = {}
t['format'] = self.format
t['action'] = 'query'
t['list'] = 'backlinks'
t['bllimit'] = self.bllimit
t['blfilterredir'] = blfilterredir
t['bltitle'] = title
if blredirect:
t['blredirect'] = ''
params = urllib.parse.urlencode(t)
f = self.opener.open('%s?%s' % (self.url, params))
d = f.read()
try:
d = self.return_json(d)
except:
pass
retval = []
try:
for x in d['query']['backlinks']:
retval.append(x['title'])
except:
pass
return retval
def list_allcategories(self, **kargs):
acfrom = kargs.get('acfrom', None)
acto = kargs.get('acto', None)
accontinue = kargs.get('accontinue', None)
acprefix = kargs.get('acprefix', None)
t = {}
t['format'] = self.format
t['action'] = 'query'
t['list'] = 'allcategories'
t['aclimit'] = kargs.get('aclimit', self.aclimit)
t['acdir'] = kargs.get('acdir', 'ascending')
if acfrom:
t['acfrom'] = acfrom
if acto:
t['acto'] = acto
if acprefix:
t['acprefix'] = acprefix
if not accontinue:
self.search_info = {}
self.aclimit = self.defaults['aclimit']
else:
if self.aclimit < 0:
return []
t['acfrom'] = self.search_info['acfrom']
params = urllib.parse.urlencode(t)
f = self.opener.open('%s?%s' % (self.url, params))
d = f.read()
try:
d = self.return_json(d)
self.search_info = {}
try:
self.search_info['acfrom'] = \
d['query-continue']['allcategories']['acfrom']
except:
pass
retval = []
try:
for x in d['query']['allcategories']:
retval.append(x['*'])
except:
pass
if len(retval) < self.srlimit:
self.srlimit = -1
return retval
except Exception as e:
raise(Exception('Data not found', e))
def list_all(self, **kargs):
apcontinue = kargs.get('apcontinue', False)
t = {}
t['format'] = self.format
t['action'] = 'query'
t['list'] = 'allpages'
t['aplimit'] = self.aplimit
t['apdir'] = kargs.get('apdir', 'ascending')
t['apnamespace'] = kargs.get('apnamespace', '0')
t['apfilterredir'] = kargs.get('apfilterredir', 'all')
apfrom = kargs.get('apfrom', None)
apto = kargs.get('apto', None)
if apfrom:
t['apfrom'] = apfrom
if apto:
t['apto'] = apto
if not apcontinue:
self.search_info = {}
self.aplimit = self.defaults['aplimit']
else:
if self.aplimit < 0:
return []
t['apfrom'] = self.search_info['apfrom']
params = urllib.parse.urlencode(t)
f = self.opener.open('%s?%s' % (self.url, params))
d = f.read()
try:
d = self.return_json(d)
self.search_info = {}
try:
self.search_info['apfrom'] = \
d['query-continue']['allpages']['apfrom']
except:
pass
retval = []
try:
for x in d['query']['allpages']:
retval.append(x['title'])
except:
pass
if len(retval) < self.srlimit:
self.srlimit = -1
return retval
except Exception as e:
raise(Exception('Data not found', e))
def search(self, s, srcontinue=False):
t = {}
t['format'] = self.format
t['action'] = 'query'
t['list'] = 'search'
t['srsearch'] = s
t['srlimit'] = self.srlimit
if not srcontinue:
self.serach_info = {}
self.srlimit = self.defaults['srlimit']
if srcontinue and self.srlimit < 0:
return []
if srcontinue and s == self.search_info.get('string', ''):
t['sroffset'] = self.search_info['offset']
params = urllib.parse.urlencode(t)
f = self.opener.open('%s?%s' % (self.url, params))
d = f.read()
try:
d = self.return_json(d)
self.search_info = {}
self.search_info['string'] = s
try:
self.search_info['offset'] = \
d['query-continue']['search']['sroffset']
except:
pass
retval = []
try:
for x in d['query']['search']:
retval.append(x['title'])
except:
pass
if len(retval) < self.srlimit:
self.srlimit = -1
return retval
except Exception as e:
raise(Exception('Data not found', e))
def listall(self, srcontinue=False):
t = {}
t['format'] = self.format
t['action'] = 'query'
t['list'] = 'allpages'
t['aplimit'] = self.aplimit
if not srcontinue:
self.apfrom = None
if srcontinue and self.apfrom:
t['apfrom'] = self.apfrom
params = urllib.parse.urlencode(t)
f = self.opener.open('%s?%s' % (self.url, params))
d = f.read()
try:
d = self.return_json(d)
self.apfrom = d['query-continue']['allpages']['apfrom']
retval = []
for x in d['query']['allpages']:
retval.append(x['title'])
return retval
except Exception as e:
raise(Exception('Data not found', e))
def userdailycontribs(self, username=None, daysago=0):
if not username and self.username:
username = self.username
if not username:
return
if daysago < 0:
return
params = urllib.parse.urlencode({
'action': 'userdailycontribs',
'format': self.format,
'user': username,
'daysago': daysago,
})
f = self.opener.open('%s?%s' % (self.url, params))
d = f.read()
try:
d = self.return_json(d)
return d
except Exception as e:
raise(Exception('Data not found', e))
def list_allusers(self, **kargs):
t ={}
t['format'] = self.format
t['action'] = 'query'
t['list'] = 'allusers'
for x in ['aufrom', 'auto', 'audir', 'augroup', 'auexcludegroup', 'aurights', 'auprop', 'aulimit']:
if kargs.get(x, None):
t[x] = kargs[x]
for x in ['auwitheditsonly', 'auactiveusers']:
if kargs.get(x, None):
t[x] = ''
aucontinue = kargs.get('aucontinue', None)
t['aulimit'] = t.get('aulimit', self.aulimit)
if not aucontinue:
self.aufrom = None
self.aufinished = False
if aucontinue and self.aufrom:
t['aufrom'] = self.aufrom
params = urllib.parse.urlencode(t)
f = self.opener.open('%s?%s' % (self.url, params))
d = f.read()
try:
d = self.return_json(d)
try:
self.aufrom = d['query-continue']['allusers']['aufrom']
except:
self.aufinished = True
retval = []
for x in d['query']['allusers']:
retval.append(x['name'])
return retval
except Exception as e:
raise(Exception('Data not found', e))
def list_recentchanges(self, **kargs):
t = {}
t['format'] = self.format
t['action'] = 'query'
t['list'] = 'recentchanges'
t['rcprop'] = '|'.join(kargs.get('rcprop', ['title', 'ids', 'type', 'user']))
t['rclimit'] = self.rclimit
rctype = kargs.get('rctype', None)
if rctype:
t['rctype'] = rctype
rcstart = kargs.get('rcstart', None)
rcstop = kargs.get('rcstop', None)
rccontinue = kargs.get('rccontinue', None)
if not rccontinue:
self.rcstart= None
self.rcfinished = False
if rccontinue and self.rcstart:
t['rcstart'] = self.rcstart
rccontinue = kargs.get('rccontinue', None)
if rccontinue:
t['rccontinue'] = rccontinue
params = urllib.parse.urlencode(t)
if rcstart:
params = '%s&rcstart=%s' % (params, rcstart)
if rcstop:
params = '%s&rcstop=%s' % (params, rcstop)
f = self.opener.open('%s?%s' % (self.url, params))
d = f.read()
try:
d = self.return_json(d)
try:
self.rcstart = d['query-continue']['recentchanges']['rcstart']
except:
self.rcfinished = True
retval = []
for x in d['query']['recentchanges']:
tmp_retval = {}
for y in t['rcprop'].split('|'):
if y == 'ids':
for z in ['rcid', 'pageid', 'revid', 'old_revid']:
tmp_retval[z] = x[z]
else:
tmp_retval[y] = x[y]
retval.append(tmp_retval)
return retval
except Exception as e:
raise(Exception('Data not found', e))
def list_logevents(self, **kargs):
t = {}
t['format'] = self.format
t['action'] = 'query'
t['list'] = 'logevents'
letype = kargs.get('letype', None)
if letype:
t['letype'] = letype
t['leprop'] = '|'.join(kargs.get('leprop', ['ids', 'title', 'type', 'user', 'timestamp', 'comment', 'details', 'action']))
leaction = kargs.get('leaction', None)
if leaction:
t['leaction'] = leaction
lestart = kargs.get('lestart', None)
if lestart:
t['lestart'] = lestart
leend = kargs.get('leend', None)
if leend:
t['leend'] = leend
ledir = kargs.get('ledir', None)
if ledir:
t['ledir'] = ledir
leuser = kargs.get('leuser', None)
if leuser:
t['leuser'] = leuser
letitle = kargs.get('letitle', None)
if letitle:
t['letitle'] = letitle
leprefix = kargs.get('leprefix', None)
if leprefix:
t['leprefix'] = leprefix
letag = kargs.get('letag', None)
if letag:
t['letag'] = letag
t['lelimit'] = kargs.get('lelimit', self.lelimit)
lecontinue = kargs.get('lecontinue', None)
if not lecontinue:
self.lestart= None
self.lefinished = False
if lecontinue and self.lestart:
t['lestart'] = self.lestart
lecontinue = kargs.get('lecontinue', None)
if lecontinue:
t['lecontinue'] = lecontinue
params = urllib.parse.urlencode(t)
f = self.opener.open('%s?%s' % (self.url, params))
d = f.read()
try:
d = self.return_json(d)
try:
self.lestart = d['query-continue']['logevents']['lestart']
except:
self.lefinished = True
retval = []
for x in d['query']['logevents']:
tmp_retval = {}
for y in t['leprop'].split('|'):
if y == 'ids':
for z in ['logid', 'pageid']:
tmp_retval[z] = x[z]
elif y == 'details':
pass
else:
tmp_retval[y] = x[y]
retval.append(tmp_retval)
return retval
except Exception as e:
raise(Exception('Data not found', e))
def close(self):
self.conn.close()
class Page():
def __init__(self, site):
self.inprop = [
'protection',
'talkid',
'watched',
'subjectid',
'url',
'readable',
'preload',
'displaytitle'
]
self.intoken = [
'edit',
'delete',
'protect',
'move',
'block',
'unblock',
'email',
'import',
'watch'
]
self.site = site
def exists(self, title):
t = {}
t['action'] = 'query'
t['prop'] = 'revisions'
t['titles'] = '|'.join(title)
t['vprop'] = 'timestamp'
t['format'] = self.site.format
params = urllib.parse.urlencode(t)
f = self.site.opener.open('%s?%s' % (self.site.url, params))
d = f.read()
try:
d = self.site.return_json(d)
tmp = list(d['query']['pages'].keys())
retval = {}
retval[True] = []
retval[False] = []
for k in tmp:
if 'revisions' in d['query']['pages'][k]:
retval[True].append(d['query']['pages'][k]['title'])
else:
retval[False].append(d['query']['pages'][k]['title'])
#retval[d['query']['pages'][k]['title']] = False
return retval
except Exception as e:
raise(Exception('Data not found', e))
def set_title(self, title):
self.title = title
def _set_edittoken(self, data):
x = data['query']['pages']
for i in x:
if x[i].get('edittoken', None):
self.edittoken = x[i]['edittoken']
return
def _set_content(self, data):
x = data['query']['pages']
for i in x:
if x[i].get('revisions', None):
self.text = x[i]['revisions'][0]['*']
return
def get(self, inprop=None, intoken=None):
if not inprop:
inprop = self.inprop
if not intoken:
intoken = self.intoken
inprop = '|'.join(self.inprop)
intoken = '|'.join(self.intoken)
params = urllib.parse.urlencode({
'format': self.site.format,
'action': 'query',
'prop': 'info|revisions',
'titles': self.title,
'inprop': inprop,
'intoken': intoken,
'rvlimit': 1,
'rvprop': 'content',
})
f = self.site.opener.open('%s?%s' % (self.site.url, params))
d = f.read()
try:
d = self.site.return_json(d)
self._set_edittoken(d)
self._set_content(d)
return d
except Exception as e:
raise(Exception('Data not found', e))
def edit(self, text=None, minor=True, bot=True, force_edit=False, createonly=False, nocreate=False, md5=None, assert_='user', notminor=False, section=None, summary=None, appendtext=None, prependtext=None):
if not vars(self).get('text', None):
self.text = None
if not force_edit and text == self.text:
print('Ignoring edit...')
return
t = {}
t['format'] = self.site.format
t['action'] = 'edit'
t['title'] = self.title
t['text'] = text
t['assert'] = assert_
if appendtext:
t['appendtext'] = appendtext
if prependtext:
t['prependtext'] = prependtext
if summary:
t['summary'] = summary
if section:
t['section'] = section
if minor:
t['minor'] = ''
if notminor:
t['notminor'] = ''
if bot:
t['bot'] = ''
if createonly:
t['createonly'] = ''
if nocreate:
t['nocreate'] = ''
if md5:
t['md5'] = md5
t['token'] = self.edittoken
params = urllib.parse.urlencode(t)
self.site.addheaders = [('Content-Type', 'multipart/form-data')]
f = self.site.opener.open(self.site.url, params.encode('utf-8'))
d = f.read()
try:
d = self.site.return_json(d)
return d
except Exception as e:
raise(Exception('Data not found', e))
def delete(self, **kargs):
title = kargs.get('title', None)
reason = kargs.get('reason', None)
if not title:
return
t = {}
t['action'] = 'delete'
t['title'] = title
t['token'] = self.edittoken
t['format'] = self.site.format
if reason:
t['reason'] = reason
params = urllib.parse.urlencode(t).encode('utf-8')
f = self.site.opener.open(self.site.url, params)
d = f.read()
try:
d = self.site.return_json(d)
return d
except Exception as e:
raise(Exception('Data not found', e))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.