text
stringlengths
38
1.54M
#!/usr/bin/env python3 import click import json import os import yaml from drain import drain def regex_from_yaml(f): if not f: return [] rs = yaml.load(f) return [ r['regex'] for r in rs ] def common_start(ctx, args): pass def common_end(ctx): if ctx.obj['tree_file']: ctx.obj['parser'].saveTree(ctx.obj['tree_file']) if ctx.obj['print_tree']: print(ctx.obj['parser'].saveTree()) @click.group() @click.option('-st', default=0.4, help='Similarity threshold') @click.option('-depth', default=4, help='Depth of all leaf nodes') @click.option('-max-child', default=100, help='Max number of children of an internal node') @click.option('-regex-file', type=click.File('r'), help='Yaml file of regexes for log preprocessing') @click.option('-tree-file', default=None, help='File for reading/writing log sig parse tree') @click.option('-print-tree', is_flag=True, default=False, help='Print log sig parse tree to stdout') @click.option('-verbose', is_flag=True, default=False) @click.pass_context def cli(ctx, **args): rex = regex_from_yaml(args['regex_file']) parser = drain.LogParser(rex=rex, depth=args['depth'], st=args['st'], maxChild=args['max_child'], verbose=args['verbose']) if args['tree_file'] and os.path.exists(args['tree_file']): parser.loadTree(args['tree_file']) ctx.obj = { 'parser': parser, 'verbose': args['verbose'], 'tree_file': args['tree_file'], 'print_tree': args['print_tree'], } @cli.command() @click.option('-i', help='Input file, logs to process') @click.pass_context def batch(ctx, **args): common_start(ctx, args) ctx.obj['parser'].parse(args['i']) common_end(ctx) @cli.command() @click.pass_context def stream(ctx, **args): common_start(ctx, args) stdin_text = click.get_text_stream('stdin') for line in stdin_text: logClu = ctx.obj['parser'].parseLine(line) params = ctx.obj['parser'].extract_parameters(logClu, line) r = logClu.to_dict() r['params'] = params print(json.dumps(r)) common_end(ctx) if __name__ == '__main__': cli()
from pathlib import Path from os import listdir from PIL import Image import pytesseract from pytesseract import image_to_string def main(): """ Testing image_to_string """ source = Path(r'sourceimg').absolute() img_paths = [(source / img) for img in listdir(source)] for img in img_paths: print(img) # print(type(image_to_string(Image.open(img)))) print(image_to_string(Image.open(img)).encode('ascii',errors='ignore')) if __name__ == "__main__": main()
## Grant Gasser ## Leetcode 187 Repeated DNA Sequences ## 9/11/19 def findRepeatedDnaSequences(s): """ Finds 10-letter sequences that appear >= 2 times Args: s (string): string of chars 'A', 'T', 'C', 'G' Returns: two_or_more (list): list of the 10-letter subsequences """ # empty string or string < 10 chars if s == '' or len(s) < 10: raise ValueError("String must be of length 10+. String: '{}' is too small.".format(s)) ten_letter_counts = {} two_or_more = set() i = 0 while (i + 10) <= len(s): # get 10 letter substring sub = s[i:i+10] # get current count, if not seen before, set count to 1 ten_letter_counts[sub] = ten_letter_counts.get(sub, 0) + 1 # check if occures >= 2 if ten_letter_counts[sub] >= 2: two_or_more.append(sub) i += 1 return two_or_more def main(): print(findRepeatedDnaSequences('AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT')) main()
from django.test import TestCase from django.template.loader import render_to_string from django.core.urlresolvers import resolve from django.http import HttpRequest from lists.models import Item,List from lists.views import home_page import re class HomePageTest(TestCase): @staticmethod def remove_csrf(html_code): csrf_regex = r'<input[^>]+csrfmiddlewaretoken[^>]+>' return re.sub(csrf_regex, '', html_code) def test_root_url_resolves_to_home_page_view(self): found=resolve("/") self.assertEqual(found.func,home_page) def test_home_page_returns_correct_html(self): request=HttpRequest() response=home_page(request) expected_html=render_to_string("home.html",request=request) self.assertEqual(self.remove_csrf(response.content.decode()), self.remove_csrf(expected_html)) class NewListTest(TestCase): def test_saving_a_POST_request(self): self.client.post("/lists/new", data={"item_text":"A new list item"}) self.assertEqual(Item.objects.count(),1) new_item=Item.objects.first() self.assertEqual(new_item.text,"A new list item") def test_redirect_after_POST(self): response=self.client.post("/lists/new", data={"item_text": "A new list item"}) new_list=List.objects.first() self.assertRedirects(response,"/lists/%d/"%new_list.id) class NewItemTest(TestCase): def test_can_save_a_POST_request_to_an_existing_list(self): other_list=List.objects.create() correct_list=List.objects.create() self.client.post("/lists/%d/add_item"%correct_list.id,data={"item_text":"A new item for an existing list"}) self.assertEqual(Item.objects.count(),1) new_item=Item.objects.first() self.assertEqual(new_item.text,"A new item for an existing list") self.assertEqual(new_item.list,correct_list) def test_redirects_to_list_view(self): other_list=List.objects.create() correct_list=List.objects.create() response=self.client.post("/lists/%d/add_item"%(correct_list.id),data={"item_text":"A new item for an existing list"}) self.assertRedirects(response,"/lists/%d/"%correct_list.id) class ListViewTest(TestCase): def test_displays_only_items_for_that_list(self): correct_list=List.objects.create() Item.objects.create(text="itemey 1",list=correct_list) Item.objects.create(text="itemey 2",list=correct_list) other_list = List.objects.create() Item.objects.create(text="other list item 1", list=other_list) Item.objects.create(text="other list item 2", list=other_list) response=self.client.get("/lists/%d/"%correct_list.id) self.assertContains(response,"itemey 1") self.assertContains(response,"itemey 2") self.assertNotContains(response, "other list item 1") self.assertNotContains(response, "other list item 2") def test_passes_correct_list_to_template(self): other_list=List.objects.create() correct_list=List.objects.create() response=self.client.get("/lists/%d/"%(correct_list.id)) self.assertEqual(response.context["list"],correct_list) def test_uses_list_template(self): list_=List.objects.create() response=self.client.get("/lists/%d/" %list_.id) self.assertTemplateUsed(response,"list.html") class ListAndItemModelsTest(TestCase): def test_saving_and_retrieving_item(self): list_=List() list_.save() first_item=Item() first_item.text="The first list item" first_item.list=list_ first_item.save() second_item=Item() second_item.text="Item the second" second_item.list = list_ second_item.save() saved_list=List.objects.first() self.assertEqual(saved_list,list_) saved_item=Item.objects.all() self.assertEqual(saved_item.count(),2) first_saved_item=saved_item[0] second_saved_item=saved_item[1] self.assertEqual(first_saved_item.text,"The first list item") self.assertEqual(first_saved_item.list,list_) self.assertEqual(second_saved_item.text,"Item the second") self.assertEqual(second_saved_item.list,list_)
# -*- coding: utf-8 -*-from kivy.uix.slider import Slider from kivy.uix.textinput import TextInput from kivy.uix.boxlayout import BoxLayout from kivy.uix.floatlayout import FloatLayout from kivy.uix.stacklayout import StackLayout from kivy.uix.button import Button from kivy.uix.togglebutton import ToggleButton from kivy.core.text import LabelBase,DEFAULT_FONT from kivy.graphics import Color, Rectangle from functools import partial from kivy.app import App from kivy.clock import Clock from kivy.properties import ObjectProperty from kivy.uix.popup import Popup from kivy.uix.spinner import Spinner from kivy.uix.label import Label from kivy.uix.slider import Slider import os import pyaudio import numpy as np import time import wave import socket import multiprocessing import threading import json import random from kivy.core.window import Window import tkinter from tkinter import ttk from multiprocessing import Process from kivy.properties import StringProperty import math #import japanize_kivy LabelBase.register(DEFAULT_FONT,'myfont.ttc') # コマンドの定義 SET = 0 SUM = 1 WAV = 2 PLAY = 3 INPUT = 4 CON = 5 GIJI = 6 BAFFER = 40960*2 #port = 50005 port = 9012 MSGLEN = 8192 #add = "18.179.223.246" #add = "127.0.0.1" add = "ec2-18-179-223-246.ap-northeast-1.compute.amazonaws.com" class AudioRecorder_Player: """ A Class For Audio """ def __init__(self): self.audio_file = "" # 止める用のフラグ self.paused = threading.Event() self.CHUNK = 4096 self.FORMAT = pyaudio.paInt16 # 16bit self.CHANNELS = 1 # monaural self.fs = 48000 self.silent_th = 2 self.threshold = 0.1 self.rec_on = 0 self.pac = bytes() self.sig_len = 2 self.off_set = 0 self.MSGlen = 0 self.loading = 0 self.seek = 0 self.play_stop = 0 self.ProgressBar = None self.popup = None def recordAudio(self,box): stop_counter = 0 length = 0 pa = pyaudio.PyAudio() self.box = box with open('Config.json') as f: df = json.load(f) mic_id = df['mic_id'] stream = pa.open(rate=self.fs, channels=self.CHANNELS, format=self.FORMAT, input=True, input_device_index= mic_id, frames_per_buffer=self.CHUNK) while True: print('stand-by') # 音データの取得 data = stream.read(self.CHUNK) # ndarrayに変換 x = np.frombuffer(data, dtype="int16") x = x / 32768.0 self.pac += self.fs.to_bytes(4,'big') self.pac += int(2).to_bytes(2,'big') self.pac += self.CHANNELS.to_bytes(2,'big') # 閾値以上の場合はファイルに保存 if x.max() > self.threshold: self.pac += data length += 1 while True: data = stream.read(self.CHUNK) self.pac += data length += 1 x = np.frombuffer(data, dtype="int16") / 32768.0 if x.max() <= self.threshold: stop_counter += 1 #設定秒間閾値を下回ったら一旦終了 if stop_counter >= (self.fs * self.silent_th / self.CHUNK): stop_counter = 0 #設定秒間以上だったら送信 if length * self.CHUNK > self.fs * self.sig_len: run_thread(self.recieve_text,[WAV,self.pac]) self.pac = bytes() break if self.paused.is_set(): # 再生を止める stream.stop_stream() stream.close() pa.terminate() # フラグを初期状態に self.paused.clear() break def playAudio(self,wav_id): pa = pyaudio.PyAudio() with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as client: client.connect((add, port)) pac = wav_id.to_bytes(5,'big') send_pac(client,PLAY,pac,None) print('ファイル名送信完了') r_cmd,MSG = recieve_pac(client) print('ファイル情報受け取り完了') framerate = int.from_bytes(MSG[0:4], 'big') self.samplewidth = int.from_bytes(MSG[4:6], 'big') nchanneles = int.from_bytes(MSG[6:8],'big') nframes = int.from_bytes(MSG[8:],'big') #シークバー print('NFRAMES:',nframes) self.seek_bar = self.PlayB.parent.children[0] self.seek_bar.max = nframes self.seek_bar.min = 0 self.seek_bar.value = 0 self.off_set = 0 self.loading = 0 self.pause_c = 0 print("STREAMING") print("Channel num : ", nchanneles) print("Sample width : ", self.samplewidth) print("Sampling rate : ", framerate) stream = pa.open(rate=framerate, channels=nchanneles, format=pa.get_format_from_width(self.samplewidth), output=True, frames_per_buffer=self.CHUNK) #受け取り確認送信 send_pac(client,PLAY,'ok'.encode(),None) print('first CHUNK recieve') self.r_cmd,MSG = recieve_pac(client) print('最初のチャンク受け取り完了') if self.r_cmd == 1: stream.write(MSG) else: if self.samplewidth == 2: self.data_array = np.frombuffer(MSG,dtype = 'int16') else: self.data_array = np.frombuffer(MSG,dtype = 'int24') baffer_pos = 0 while self.r_cmd == 0: # self.off_set+BAFFER/2/self.samplewidth < nframes: print('PLAY!') print('start offset:', self.off_set) DA = self.data_array[baffer_pos:baffer_pos+int(BAFFER/2/self.samplewidth)] for i in range(0,int(BAFFER/2/self.CHUNK/2)): stream.write(DA[int(i*self.CHUNK*2/self.samplewidth):int((i+1)*self.CHUNK*2/self.samplewidth)].tobytes()) self.off_set += self.CHUNK*2/self.samplewidth self.seek_bar.value += self.CHUNK*2/self.samplewidth if self.seek_bar.value != self.off_set: self.off_set = self.seek_bar.value self.seek = 1 print('break') break if self.paused.is_set(): while self.paused.is_set(): print('pausing') time.sleep(1) self.pause_c += 1 if self.pause_c >=5: self.r_cmd = 1 self.paused.clear() break else: continue break print('end offset:',self.off_set) if self.r_cmd ==0: print('NEXT CHUNK recieve') if self.loading == 0: if self.seek == 1: print('SEEK') baffer_pos = 0 self.streaming(client,baffer_pos) else: run_thread(self.streaming,[client,baffer_pos]) if baffer_pos == 0: baffer_pos = int(BAFFER/2/self.samplewidth) else: baffer_pos = 0 else: while self.loading == 1: print('loading') time.sleep(1) if self.pause_c < 5: if baffer_pos == 0: baffer_pos = int(BAFFER/2/self.samplewidth) else: baffer_pos = 0 print('LAST CHUNK PLAY!!') DA = self.data_array[baffer_pos:baffer_pos+self.MSGlen] #チャンクごとに再生 #最後のあまり再生 for i in range(0,int(BAFFER/2/self.CHUNK/2)): stream.write(DA[int(i*self.CHUNK*2/self.samplewidth):int((i+1)*self.CHUNK*2/self.samplewidth)].tobytes()) self.off_set += self.CHUNK*2/self.samplewidth self.seek_bar.value += self.CHUNK*2/self.samplewidth stream.write(DA[int((i+1)*self.CHUNK*2/self.samplewidth):self.MSGlen].tobytes()) self.seek_bar.value = nframes stream.close() pa.terminate() client.close() self.PlayB.state = 'normal' self.PlayB.text = '再生' self.play_stop = 0 self.pause_c = 0 self.r_cmd = 0 def recieve_text(self,type_ID,pac): with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as client: client.connect((add, port)) if pac: if self.ProgressBar: send_pac(client,type_ID,pac,self.ProgressBar) else: send_pac(client,type_ID,pac,None) if self.popup: self.popup.open() r_cmd,r_packet = recieve_pac(client) len_sum = 0 while True: wav_id =int.from_bytes(r_packet[len_sum:len_sum+5], 'big') type_ = int.from_bytes(r_packet[len_sum+5:len_sum+6], 'big') text_len = int.from_bytes(r_packet[len_sum+6:len_sum+11], 'big') text_r = r_packet[len_sum+11:len_sum+11+text_len] print('MSG_len:',len(r_packet)) print('Id:',wav_id) print('test_len:',text_len) len_sum = len_sum+11+text_len text_r = text_r.decode('utf-8') text_r = clean_text(text_r) S_height = 40 for t in text_r: if len(t) > 30: S_height += math.ceil(len(t)/30)*40 S_Layout = Sentence_Layout() S_Layout.height = S_height+20 print('text_r',text_r) while True: try: S_Layout.children[1].children[2].text ='\n'.join(text_r) S_Layout.children[0].text = S_Layout.children[0].values[type_] break except IndexError: print('Error') # self.popup.content.children[0].text = 'エラーが発生しました. もう一度やり直してください' S_Layout.y += self.box.ypos S_Layout.children[1].children[1].wav_id = wav_id self.box.add_widget(S_Layout) self.box.ypos += 50+S_height if self.box.ypos > self.box.height: self.box.height = self.box.ypos+S_height if len_sum >= len(r_packet): break if self.popup: self.popup.dismiss() def input_wav(self,fname): waveFile = wave.open(fname, 'r') buf = waveFile.readframes(-1) waveFile.close() # wavファイルの情報を取得 # チャネル数:monoなら1, stereoなら2, 5.1chなら6(たぶん) nchanneles = waveFile.getnchannels() # 音声データ1サンプルあたりのバイト数。2なら2bytes(16bit), 3なら24bitなど self.samplewidth = waveFile.getsampwidth() # サンプリング周波数。普通のCDなら44.1k framerate = waveFile.getframerate() # 音声のデータ点の数 nframes = waveFile.getnframes() print("Channel num : ", nchanneles) print("Sample width : ", self.samplewidth) print("Sampling rate : ", framerate) print("Frame num : ", nframes) self.pac = bytes() self.pac += framerate.to_bytes(4,'big') self.pac += self.samplewidth.to_bytes(2,'big') self.pac += nchanneles.to_bytes(2,'big') self.pac += buf self.recieve_text(INPUT,self.pac) def streaming(self,client,baffer_pos): self.loading = 1 if self.seek == 1: header = 1 print('SEEK OFF SET:',int(self.off_set)) self.seek = 0 else: header = 0 q = int(self.seek_bar.value).to_bytes(MSGLEN-2,'big') send_pac(client,header,q,None) r_cmd,MSG = recieve_pac(client) if self.samplewidth == 2: MSG = np.frombuffer(MSG,dtype = 'int16') else: MSG = np.frombuffer(MSG,dtype = 'int24') self.r_cmd = r_cmd self.MSGlen = len(MSG) print('BAFFERLEN',len(MSG)) print('DOWNLOAD') self.data_array[baffer_pos:baffer_pos+len(MSG)] = MSG self.loading = 0 if r_cmd == 1: print('LAST CHUNK RECIEVE') client.close() def run_thread(target,args): audio_thread = threading.Thread(target=target,args=args) audio_thread.setDaemon(True) audio_thread.start() def send_pac(client,type_ID,q,ProgressBar): print('connect to' , add, 'port:' ,port) print(len(q)) if ProgressBar: # ProgressBar.max = len(q)+(int(len(q)/MSGLEN)+1)*2 ProgressBar.max = len(q)+2 ProgressBar.value = 0 offset = 0 packet = bytearray(MSGLEN) packet[0:2] = type_ID.to_bytes(2,'big') packet[2:] = len(q).to_bytes(MSGLEN-2,'big') client.send(packet) while offset < len(q): packet[:] = q[offset:offset+MSGLEN] send_len = client.send(packet) offset += send_len if ProgressBar: ProgressBar.value = offset if ProgressBar: ProgressBar.value = len(q) ProgressBar.parent.popup_close() print('sended') def recieve_pac(client): MSGLEN = 8192 data_len = 0 offset = 0 data_info = bytes() while data_len < MSGLEN: tmp = client.recv(MSGLEN) data_info += tmp data_len = len(data_info) print('datalen_rec') r_cmd = int.from_bytes(data_info[0:2], 'big') data_len = int.from_bytes(data_info[2:MSGLEN],'big') MSG = bytearray(data_len) offset += len(data_info)-MSGLEN MSG[:offset]=data_info[MSGLEN:] while offset < data_len: start_t = time.time() tmp = client.recv(MSGLEN) MSG[offset:offset+len(tmp)] = tmp offset += len(tmp) return r_cmd, MSG def clean_text(text): ngw= [' ',' '] text = text.replace(' ','') text = text.replace(' ','') text = text.split('\n') tc = [] if len(text) > 1: for t in text: tmp = t.split('。')[:-1] for t in tmp: tc.append(t) return tc return text class REC_Button(ToggleButton): def __init__(self,**kwargs): super().__init__(**kwargs) self.CHUNK = 4094 self.FORMAT = pyaudio.paInt16 # 16bit self.CHANNELS = 1 # monaural self.fs = 16000 self.silent_th = 2 self.threshold = 0.1 self.rec_on = 0 self.id = 0 self.pac = bytes() self.sig_len = 2 self.recorder = AudioRecorder_Player() self.id = 0 def on_press(self): if self.state == 'down': run_thread(self.recorder.recordAudio,[self.parent.parent.children[1].children[0],]) self.text = '録音中' else: self.recorder.paused.set() self.text = '録音開始' class Sentence(TextInput): pass class Type_Spinner(Spinner): pass class Play_Button(ToggleButton): def __init__(self,**kwargs): super().__init__(**kwargs) self.player = AudioRecorder_Player() def on_press(self): pass def on_release(self): if self.player.play_stop == 0: self.player.PlayB = self self.state = 'down' #0 : 再生 run_thread(self.player.playAudio,[self.wav_id,]) self.player.play_stop = 1 self.text = '停止' else: if self.player.paused.is_set(): self.player.paused.clear() self.state == 'down' self.text = '停止' else: self.player.paused.set() self.state = 'normal' self.text = '再生' class SettingMenu(BoxLayout): popup_close = ObjectProperty(None) class LoadingMenu(BoxLayout): popup_close = ObjectProperty(None) class InputMenu(BoxLayout): popup_close = ObjectProperty(None) def wav_send(self): print(self.children[1].selection) if self.children[1].selection: fname = self.children[1].selection[0] content = LoadingMenu(popup_close=self.popup_close) self.player.popup = Popup(title='', content=content, size_hint=(0.3, 0.3), auto_dismiss=False) self.player.ProgressBar = self.children[2] #アップロードは別のスレッドでする audio_thread = threading.Thread(target=self.player.input_wav,args=(fname,)) audio_thread.start() def set_player(self,player): self.player = player class SummaryMenu(BoxLayout): popup_close = ObjectProperty(None) def text_output(self): with open('text0.json') as f: df = json.load(f) if 'texts' in df: return str(df['texts']) else: return def summary_output(self): with open('text0.json') as f: df = json.load(f) if 'summary' in df: return str(df['summary']) else: return def task_output(self): with open('text0.json') as f: df = json.load(f) if 'task' in df: return str(df['task']) else: return def send_giji(self): #本文 if self.children[1].children[4].text: t = self.children[1].children[4].text.encode() pac = int(len(t)).to_bytes(4,'big') pac += t else: pac = int(0).to_bytes(4,'big') #要約 if self.children[1].children[2].text: t = self.children[1].children[2].text.encode() pac += int(len(t)).to_bytes(4,'big') pac += t else: pac += int(0).to_bytes(4,'big') #タスク if self.children[1].children[0].text: t = self.children[1].children[2].text.encode() pac += int(len(t)).to_bytes(4,'big') pac += t else: pac += int(0).to_bytes(4,'big') with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as client: client.connect((add, port)) send_pac(client,GIJI,pac,None) return class TestInput(TextInput): pass class input_spinner(Spinner): def __init__(self,**kwargs): self.set_miclist() super().__init__(**kwargs) def set_miclist(self): p = pyaudio.PyAudio() mic_list = [x for x in range(0,p.get_device_count())] name_list = [] for index in range(0, p.get_device_count()): if p. get_device_info_by_index(index)['maxInputChannels'] > 0: mic_list[index] = p. get_device_info_by_index(index)['name'] name_list.append(p. get_device_info_by_index(index)['name']) self.values = name_list with open('Config.json') as f: df = json.load(f) self.text = mic_list[df['mic_id']] self.mic_list = mic_list def on_text(self,text,a): with open('Config.json') as f: df = json.load(f) df['mic_id']= [i for i,k in enumerate(self.mic_list) if k == self.text][0] with open('config.json', 'w') as f: json.dump(df, f, ensure_ascii=False) class output_spinner(Spinner): def __init__(self,**kwargs): self.set_splist() super().__init__(**kwargs) def set_splist(self): p = pyaudio.PyAudio() sp_list = [x for x in range(0,p.get_device_count())] name_list = [] for index in range(0, p.get_device_count()): if p. get_device_info_by_index(index)['maxOutputChannels'] > 0: sp_list[index] = p. get_device_info_by_index(index)['name'] name_list.append(p. get_device_info_by_index(index)['name']) self.values = name_list with open('Config.json') as f: df = json.load(f) self.text = sp_list[df['sp_id']] self.sp_list = sp_list def on_text(self,text,a): with open('Config.json') as f: df = json.load(f) df['sp_id']= [i for i,k in enumerate(self.sp_list) if k == self.text][0] with open('config.json', 'w') as f: json.dump(df, f, ensure_ascii=False) class Setting_Button(Button): def on_press(self): content = SettingMenu(popup_close=self.popup_close) self.popup = Popup(title='設定画面', content=content, size_hint=(1, 1), auto_dismiss=False) self.popup.open() def popup_close(self): self.popup.dismiss() class Summary_Button(Button): def on_press(self): result = '' suma = '' task = '' imp = '' print('Sentence_Lay:',self.parent.parent.children[1].children[0].children[1]) if self.parent.parent.children[1].children[0].children[1]: tmp = [a.children[1].children[2].text for a in reversed(self.parent.parent.children[1].children[0].children) if a.children[1].children[2].text != ''] task = "\n".join([a.children[1].children[2].text for a in reversed(self.parent.parent.children[1].children[0].children) if a.children[0].text == 'タスク']) print('Giji list:', tmp) result = "\n".join(tmp) if len(tmp) > 1: pac = bytes() tmp ="\n".join(tmp) pac += tmp.encode() with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as client: client.connect((add, port)) send_pac(client,SUM,pac,None) rcmd, r_packet = recieve_pac(client) print('reieved') suma = r_packet.decode() suma = suma.split('。') suma = [a+ '。' for a in suma] suma = suma[:-1] imp = [a.children[1].children[2].text for a in reversed(self.parent.parent.children[1].children[0].children) if a.children[0].text == '重要' and a.children[1].children[2].text not in suma] imp = "\n".join(imp)+ "\n".join(suma) with open('text0.json') as f: df = json.load(f) df['texts']=result df['summary'] = imp df['task'] = task with open('text0.json', 'w') as f: json.dump(df, f, ensure_ascii=False) content = SummaryMenu(popup_close=self.popup_close) self.popup = Popup(title='議事録', content=content, size_hint=(1, 1), auto_dismiss=False) self.popup.open() def popup_close(self): self.popup.dismiss() class Input_Button(Button): def on_press(self): audio_player = AudioRecorder_Player() audio_player.box = self.parent.parent.children[1].children[0] content = InputMenu(popup_close=self.popup_close,) content.set_player(audio_player) self.popup = Popup(title='ファイル選択', content=content, size_hint=(1, 1), auto_dismiss=False) self.popup.open() def popup_close(self): self.popup.dismiss() class Text_Layout(FloatLayout): def __init__(self,**kwargs): super().__init__(**kwargs) self.ypos = 0 class Sentence_Layout(BoxLayout): pass class Seek_Bar(Slider): def __init__(self,**kwargs): super().__init__(**kwargs) self.touch_sl = False def on_touch_up(self, touch): if self.collide_point(*touch.pos) and self.touch_sl == False: self.touch_sl = True class Button_Layout(BoxLayout): pass class MyRoot(BoxLayout): orientation='vertical' def __init__(self,**kwargs): super().__init__(**kwargs) self.rec_on = 0 class Meeting4App(App): def __init__(self,**kwargs): super().__init__(**kwargs) self.id = 0 def add_text(self,box,a): text,self.dirnum = speech_text(self.dirnum) def on_enter(ti): print("on_enter[%s]" % (ti.text)) print(ti.cursor[0]) if text: for t in text: textinput = Sentence(text=str(t)) textinput.y = self.id textinput.bind(on_text_validate=on_enter) text_play = FloatLayout() text_play.add_widget(textinput) pb = Play_Button() pb.file_name = text[t][1] pb.y = self.id text_play.add_widget(pb) box.add_widget(text_play) self.id += 60 def build(self): root = MyRoot() #Clock.schedule_interval(partial(self.add_text,root.children[1].children[0]), 1.0 / 60.0) return root if __name__ == '__main__': #デバイス確認の処理 p = pyaudio.PyAudio() mic_ids = [] sp_ids = [] for index in range(0, p.get_device_count()): if p. get_device_info_by_index(index)['maxInputChannels'] > 0: mic_ids.append(index) if p. get_device_info_by_index(index)['maxOutputChannels'] > 0: sp_ids.append(index) df = dict() df['mic_id'] = 0 df['sp_id'] = 0 with open('./Config.json', 'w') as f: json.dump(df, f, ensure_ascii=False) with open('./Config.json') as f: df = json.load(f) if df['mic_id'] not in mic_ids: df['mic_id'] = mic_ids[0] if df['sp_id'] not in sp_ids: df['sp_id'] = sp_ids[0] with open('./Config.json', 'w') as f: json.dump(df, f, ensure_ascii=False) pac = bytes(1) #スタートの処理 with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as client: client.connect((add, port)) send_pac(client,SET,pac,None) app = Meeting4App() #app.dirnum = len([f.name for f in os.scandir('../Server/wav_file') if not f.name.startswith('.')]) app.run()
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('malware_toolkit', '0015_feature_functions'), ] operations = [ migrations.RemoveField( model_name='feature_functions', name='feature', ), migrations.RemoveField( model_name='feature_functions', name='function', ), migrations.DeleteModel( name='Feature_Functions', ), ]
n=int(input()) fib=[0,1] if n<2: print(fib[n]) else: for i in range(2,n+1): fib.append(fib[i - 1] + fib[i - 2]) print(fib[n])
# -*- coding: utf-8 -*- # Copyright (c) 2015, Nicolas VERDIER (contact@n1nj4.eu) # Pupy is under the BSD 3-Clause license. see the LICENSE file at the root of the project for the detailed licence terms """ abstraction layer over rpyc streams to handle different transports and integrate obfsproxy pluggable transports """ __all__=["PupyAsyncTCPStream", "PupyAsyncUDPStream"] from rpyc.core.stream import Stream from ..buffer import Buffer import sys, socket, time, errno, logging, traceback, string, random from rpyc.lib.compat import select, select_error, BYTES_LITERAL, get_exc_errno, maxint from PupySocketStream import addGetPeer try: import multiprocessing Process=multiprocessing.Process Lock=multiprocessing.Lock Event=multiprocessing.Event except ImportError: #multiprocessing not available on android ? import threading Process=threading.Thread Lock=threading.Lock Event=threading.Event class addGetPeer(object): """ add some functions needed by some obfsproxy transports""" def __init__(self, peer): self.peer=peer def getPeer(self): return self.peer def monitor(st): while True: print "upstream: %s %s"%(len(st.upstream),repr(st.upstream.peek())) print "downstream: %s %s"%(len(st.downstream), repr(st.downstream.peek())) print "buf_in: %s %s"%(len(st.buf_in), st.buf_in.peek()) print "buf_out: %s %s"%(len(st.buf_out), st.buf_out.peek()) time.sleep(3) class PupyAsyncStream(Stream): """ Pupy asynchrone stream implementation """ def __init__(self, dstconf, transport_class, transport_kwargs): super(PupyAsyncStream, self).__init__() self.active=True #buffers for streams self.buf_in=Buffer() self.buf_out=Buffer() self.buf_tmp=Buffer() self.cookie=''.join(random.SystemRandom().choice("abcdef0123456789") for _ in range(32)) self.buf_in.cookie=self.cookie self.buf_out.cookie=self.cookie self.buf_tmp.cookie=self.cookie #buffers for transport self.upstream=Buffer(transport_func=addGetPeer(("127.0.0.1", 443))) self.downstream=Buffer(transport_func=addGetPeer(("127.0.0.1", 443))) self.upstream_lock=Lock() self.downstream_lock=Lock() self.transport=transport_class(self, **transport_kwargs) self.max_pull_interval=2 self.pull_interval=0 self.pull_event=Event() self.MAX_IO_CHUNK=32000*100 #3Mo because it is a async transport self.client_side=self.transport.client if self.client_side: self.poller_thread=Process(target=self.poller_loop) self.poller_thread.daemon=True self.poller_thread.start() self.on_connect() def on_connect(self): self.transport.on_connect() def close(self): """closes the stream, releasing any system resources associated with it""" print "closing stream !" self.active=False self.buf_in.cookie=None self.buf_out.cookie=None @property def closed(self): """tests whether the stream is closed or not""" return not self.active def fileno(self): """returns the stream's file descriptor""" raise NotImplementedError() def poll(self, timeout): """indicates whether the stream has data to read (within *timeout* seconds)""" return (len(self.upstream) > 0) or self.closed def read(self, count): try: #print "reading :%s"%count while True: #with self.downstream_lock: #because downstream write in upstream if not self.active: raise EOFError("connexion closed") if len(self.upstream)>=count: if not self.active: raise EOFError("connexion closed") #print "%s read upstream !"%count return self.upstream.read(count) self.pull() time.sleep(0.01) #it seems we can actively wait here with only perf enhancement #if len(self.upstream)<count: # self.upstream.wait(0.1)#to avoid active wait except Exception as e: logging.debug(traceback.format_exc()) def pull_data(self, data): """ function called at each "tick" (poll interval). It takes the data to send, send it with a unique cookie, and must return the obfuscated data retrieved. """ raise NotImplementedError() def pull(self): """ make a pull if we are on the client side, else do nothing """ if not self.client_side: return self.pull_interval=0 self.pull_event.set() def poller_loop(self): empty_message=None while self.active: try: data_to_send=None if len(self.downstream)>0: with self.upstream_lock: data_to_send=self.downstream.read() else: if empty_message is None : #no data, let's generate an empty encoded message to pull self.buf_tmp.drain() self.transport.upstream_recv(self.buf_tmp) empty_message=self.downstream.read() data_to_send=empty_message received_data=b"" try: received_data=self.pull_data(data_to_send) except IOError as e: print "IOError: %s"%e print "closing connection" self.close() with self.downstream_lock: if received_data: self.buf_in.write(received_data) self.transport.downstream_recv(self.buf_in) if not self.pull_event.wait(self.pull_interval): #then timeout self.pull_interval+=0.01 if self.pull_interval>self.max_pull_interval: self.pull_interval=self.max_pull_interval #print "pull interval: %s"%self.pull_interval self.pull_event.clear() except Exception as e: logging.debug(traceback.format_exc()) time.sleep(self.pull_interval) def write(self, data): if not self.active: raise EOFError("connexion closed") with self.upstream_lock: self.buf_out.write(data) self.transport.upstream_recv(self.buf_out) self.pull() class PupyAsyncTCPStream(PupyAsyncStream): def __init__(self, dstconf, transport_class, transport_kwargs={}): self.hostname=dstconf[0] self.port=dstconf[1] super(PupyAsyncTCPStream, self).__init__(dstconf, transport_class, transport_kwargs) def pull_data(self, data): s = None last_exc=None for res in socket.getaddrinfo(self.hostname, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM): af, socktype, proto, canonname, sa = res try: s = socket.socket(af, socktype, proto) except socket.error as msg: s = None last_exc=msg continue try: s.connect(sa) except socket.error as msg: s.close() s = None last_exc=msg continue break if s is None: raise last_exc #print "sending %s"%repr(data) s.sendall(data) total_received=b"" #print "receiving ..." s.settimeout(15) while True: try: data = s.recv(4096) if not data: break total_received+=data except socket.timeout: break #print "received: %s"%repr(total_received) s.close() return total_received class PupyAsyncUDPStream(PupyAsyncStream): def __init__(self, dstconf, transport_class, transport_kwargs={}): self.hostname=dstconf[0] self.port=dstconf[1] super(PupyAsyncUDPStream, self).__init__(dstconf, transport_class, transport_kwargs) def pull_data(self, data): s = None last_exc=None for res in socket.getaddrinfo(self.hostname, self.port, socket.AF_UNSPEC, socket.SOCK_DGRAM): af, socktype, proto, canonname, sa = res try: s = socket.socket(af, socktype, proto) except socket.error as msg: s = None last_exc=msg continue try: s.connect(sa) except socket.error as msg: s.close() s = None last_exc=msg continue break if s is None: raise last_exc #print "sending %s"%repr(data) s.sendall(data) total_received=b"" #print "receiving ..." s.settimeout(15) while True: try: data = s.recv(4096) if not data: break total_received+=data except socket.timeout: break #print "received: %s"%repr(total_received) s.close() return total_received
""" n! means n × (n − 1) × ... × 3 × 2 × 1 For example, 10! = 10 × 9 × ... × 3 × 2 × 1 = 3628800, and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27. Find the sum of the digits in the number 100! """ import math temp = str(format(math.factorial(100))) #removes scientific notation sum = 0 for i in range(0,len(temp)): sum += int(temp[i]) print(sum)
#!/usr/bin/env python # coding: utf-8 import pandas import psycopg2 from configparser import ConfigParser import numpy path = 'C:/Users/tonyr/Desktop/Self Education/Production Files/apple_health_export/' import os prodfiles = 'C:/Users/tonyr/desktop/Self Education/Production Files/' os.chdir(prodfiles) class data_analysis(): def __init__(self): self = self def config(self,filename='database.ini', section='postgresql'): parser = ConfigParser() parser.read(filename) db = {} if parser.has_section(section): params = parser.items(section) for param in params: db[param[0]] = param[1] else: raise Exception('Section {0} not found in the {1} file'.format(section, filename)) return db def connect(self,table): DF = pandas.DataFrame() """ Connect to the PostgreSQL database server """ conn = None try: command = """ Select * from """ + table + """ """ params = self.config(filename='database.ini', section='postgresql') conn = psycopg2.connect(**params) cur = conn.cursor() cur.execute(command) colnames = [desc[0] for desc in cur.description] DF = pandas.DataFrame(cur.fetchall()) DF.columns = colnames except (Exception, psycopg2.DatabaseError) as error: print(error) finally: if conn is not None: conn.close() return DF def mindfulg(self): # path = 'C:/Users/tonyr/Desktop/Self Education/Production Files/apple_health_export/' d = self.connect('mindfulwithsleepanalysis') mindfulgrouped = d.groupby(['TheDate','endedWithinThirtyMinsOfSleeping'],as_index=False)['unithours'].sum() return mindfulgrouped def changeMeditationValues (self,row): if row['endedWithinThirtyMinsOfSleeping'] == 1 : return 'Meditation Ended Within 30 Minutes of Sleeping' elif row['endedWithinThirtyMinsOfSleeping'] == 0: return 'Mediation Ended More than 30 Minutes before Sleeping' def sleep(self): path = 'C:/Users/tonyr/Desktop/Self Education/Production Files/apple_health_export/' sleep = pandas.read_csv(path + 'Grouped/grouped_SleepAnalysis.csv')[['sum','TheDate']] sleep = sleep.rename(columns={"sum": "SleepTime", "TheDate": "SleepDate"}) sleep['SleepDate'] = pandas.to_datetime(sleep['SleepDate']) sleep['Sleep_Rolling_ThreeSum'] = sleep['SleepTime'].rolling(window=3).sum().fillna(0) sleep['Sleep_Rolling_ThreeDayAverage'] = sleep['SleepTime'].rolling(window=3).mean().fillna(0) sleep['Sleep_Rolling_FiveDayAverage'] = sleep['SleepTime'].rolling(window=5).mean().fillna(0) sleep = sleep[sleep['SleepDate'] > '2019-07-01'] sleep.sort_values(by=['SleepDate'], inplace = True) sleep.set_index('SleepDate', inplace=True) return sleep def meditation(self): meditation = self.mindfulg() # meditation = pandas.read_csv('Grouped/grouped_MindfulSession.csv')[['sum','TheDate','endedWithinThirtyMinsOfSleeping']] meditation = meditation.rename(columns={"unithours": "MeditationTime", "TheDate": "MeditationDate"}) # lambda x: x meditation.sort_values(by=['MeditationDate'], inplace = True) meditation.set_index('MeditationDate', inplace=True) return meditation def sm(self): sleep = self.sleep() meditation = self.meditation() sm = sleep.join(meditation,lsuffix='_sleep', rsuffix = '_meditation').fillna(0) sm = sm[sm['SleepTime'] > 1] sm['MeditationCategorical'] = numpy.where(sm['MeditationTime'] == 0, 0 , 1) sm['MeditationCategorical_Rolling_ThreeSum'] = sm['MeditationCategorical'].rolling(window=3).sum().fillna(0) sm['MeditationCategorical_Rolling_FiveSum'] = sm['MeditationCategorical'].rolling(window=5).sum().fillna(0) sm['Meditation_Rolling_ThreeSum'] = sm['MeditationTime'].rolling(window=3).sum().fillna(0) sm['Meditation_Rolling_FiveSum'] = sm['MeditationTime'].rolling(window=5).sum().fillna(0) sm['Meditation_Rolling_ThreeDayAverage'] = sm['MeditationTime'].rolling(window=3).mean().fillna(0) sm['Meditation_Rolling_FiveDayAverage'] = sm['MeditationTime'].rolling(window=5).mean().fillna(0) sm['endedWithinThirtyMinsOfSleeping'] = sm.apply(lambda row: self.changeMeditationValues(row), axis=1) return sm if __name__ == '__main__': path = 'C:/Users/tonyr/Desktop/Self Education/Production Files/apple_health_export/' a = data_analysis() sm = a.sm() sleephappy = a.connect('sleephappy') sleephappy.to_csv(prodfiles + 'sleephappy.csv',index =False) heartrate = a.connect('heartratecompare') heartrate_concat = a.connect('heartratecompareconcat') heartrate_concat.to_csv(prodfiles + 'heartratecompare_concat.csv',index = False) calories_burned = a.connect('caloriesburned') calories_burned.to_csv(prodfiles + 'caloriesburned.csv', index = False) mindfulgrouped = a.mindfulg().groupby(['TheDate','endedWithinThirtyMinsOfSleeping'],as_index=False)['unithours'].sum()
import pygame width = 640 height = 480 radius = 100 stroke = 1 pygame.init() window = pygame.display.set_mode((width,height)) window.fill(pygame.Color(255,255,255)) while True: pygame.draw.circle(window, pygame.Color(255,0,0), (width/2, height/2), radius,stroke) pygame.display.update()
# -*- coding: utf-8 -*- # Generated by Django 1.11.5 on 2017-10-15 15:19 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('workshop', '0001_initial'), ] operations = [ migrations.AddField( model_name='jobtype', name='description', field=models.CharField(blank=True, max_length=15000, null=True, verbose_name='Descrição'), ), migrations.AddField( model_name='jobtype', name='slug_name', field=models.SlugField(default=1, max_length=200, unique=True), preserve_default=False, ), migrations.AlterField( model_name='job', name='job_type', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='job_type', to='workshop.JobType', verbose_name='Tipo'), ), ]
# 0 ~ 9 : ball or goal # . : plane field # # : object # Returns: String[] # column = x, row = y import random class RollingBalls: def restorePattern(self, start, target): ## initial process # W = len(start[0]) H = len(start) start = list(start) target = list(target) # make outrange wall start.append("#" * W) target.append("#" * W) start = list(map(lambda s: s + '#', start)) target = list(map(lambda s: s + '#', target)) # functions def searchGoal(): res = [] for y in range(H): for x in range(W): now = target[y][x] if(ord(now) >= ord('0') and ord(now) <= ord('9')): res.append((x, y)) return res def isBall(c): res = False if c == '.' or c == '#' else True return res def isDirection(x_mv, y_mv): if x_mv == 1: return 0 elif y_mv == -1: return 1 elif x_mv == -1: return 2 elif y_mv == 1: return 3 else: return -1 def isBallCommand(x, y, x_mv, y_mv, res): rest = '.' while rest == '.': x += x_mv y += y_mv rest = start[y][x] if isBall(start[y][x]): # str 書き換え tmp2 = list(start[y]) tmp2[x] = '.' start[y] = "".join(tmp2) tmp2 = list(target[y]) tmp2[x] = '.' target[y] = "".join(tmp2) # return [str(y) + " " + str(x) + " " + str(isDirection(x_mv, y_mv))] else: return res ## functions2 # return list has stopable from-directions(0: <-, 1: shita) def searchStopableDirection(x, y): ans = [] if start[y][x+1] != '.': ans.append(0) if start[y-1][x] != '.': ans.append(1) if start[y][x-1] != '.': ans.append(2) if start[y+1][x] != '.': ans.append(3) return ans ## function3 # def initialize(goals): ans = [] for e in goals: nx = e[0] ny = e[1] if isBall(start[ny][nx]): # str 書き換え tmp2 = list(start[ny]) tmp2[nx] = '#' start[ny] = "".join(tmp2) tmp2 = list(target[ny]) tmp2[nx] = '#' target[ny] = "".join(tmp2) # else: ans.append(e) return ans ## function4 # def isXYmove(vec): if vec == 0: return (-1, 0) elif vec == 1: return (0, 1) elif vec == 2: return (1, 0) else: return (0, -1) def searchStartBall(): res = [] for y in range(H): for x in range(W): now = start[y][x] if(ord(now) >= ord('0') and ord(now) <= ord('9')): res.append((x, y)) return res def rewritePreWallPoint(x, y, x_mv, y_mv): rest = '.' while rest == '.': x += x_mv y += y_mv rest = start[y][x] x -= x_mv y -= y_mv # str 書き換え tmp2 = list(start[y]) tmp2[x] = '0' start[y] = "".join(tmp2) # def rollBalls(x, y, vec): # str 書き換え tmp2 = list(start[y]) tmp2[x] = '.' start[y] = "".join(tmp2) # move = isXYmove(vector) rewritePreWallPoint(x, y, move[0], move[1]) return [str(y) + " " + str(x) + " " + str(vector)] ## main statement # search goal goals = searchGoal() number_of_ball = len(goals) max_turn = number_of_ball * 20 goals = initialize(goals) ans = [] # kurikaeshi cnt = 1000 vector = 0 while len(ans) < max_turn and cnt > 0: # one block O(10^5) flag = True while flag: flag = False goals = searchGoal() # for loop o for i in range(len(goals)): # O(10^3) nx = goals[i][0] ny = goals[i][1] dirlist = searchStopableDirection(nx, ny) tmp = [] for e in dirlist: # O(10^2) if tmp == []: if e == 0: tmp = isBallCommand(nx, ny, -1, 0, tmp) elif e == 1: tmp = isBallCommand(nx, ny, 0, 1, tmp) elif e == 2: tmp = isBallCommand(nx, ny, 1, 0, tmp) elif e == 3: tmp = isBallCommand(nx, ny, 0, -1, tmp) if tmp != []: # str 書き換え tmp2 = list(start[ny]) tmp2[nx] = '#' start[ny] = "".join(tmp2) tmp2 = list(target[ny]) tmp2[nx] = '#' target[ny] = "".join(tmp2) # if tmp != []: flag = True ans += tmp # for loop c # one block cnt -= 1 begins = searchStartBall() for e in begins: nx = e[0] ny = e[1] tmp3 = [] muki = isXYmove(vector) if start[ny + muki[1]][nx + muki[0]] == '.': tmp3 = rollBalls(nx, ny, vector) ans += tmp3 flag3 = True while flag3: ransu = random.randint(0,3) if(ransu != vector): vector = ransu flag3 = False #kurikaeshi toji if len(ans) > max_turn: ans2 = [""]*max_turn for i in range(max_turn): ans2[i] = ans[i] return tuple(ans2) else: return tuple(ans)
import pprint def run(world): pp = pprint.PrettyPrinter(indent=4) for dev in world: print "====================" print str(dev) pp.pprint(world[dev])
import dataset import matplotlib.pyplot as plt import numpy as np xs,ys = dataset.get_beans(100) w = 0.1 plt.title("Siz-Toxicity Functon",fontsize=12) plt.xlabel("Size") plt.ylabel("Toxicity") yPre = w * xs plt.scatter(xs,ys) plt.plot(xs,yPre) print("修正前图像") plt.show() es = (ys-yPre)**2 sum_e = np.sum(es) sum_e = (1/100)*sum_e ws = np.arange(0,3,0.1) es = [] for w in ws: yPre = w * xs e = (1/100)*np.sum((ys-yPre)**2) es.append(e) plt.plot(ws,es) plt.title("Cost Function", fontsize=12) plt.xlabel("w") plt.ylabel("e") print("代价函数") plt.show() wMin= np.sum(xs*ys)/np.sum(xs*xs) print("最小点w:"+str(wMin)) yPre = wMin * ws plt.plot(ws,yPre) plt.scatter(xs,ys) plt.title("Siz-Toxicity Functon After",fontsize=12) plt.xlabel("Size") plt.ylabel("Toxicity") print("修正后图像") plt.show()
# -*- coding: utf-8 -*- """ Appication script for Advance Algorithmic trading Created on Wed Nov 7 09:14:12 2018 @author: akira This scripts simulates some bernouillis experiences and adjust it with the bayesian statistics before printing as a char; it does not require any parameter """ import numpy as np from scipy import stats from matplotlib import pyplot if __name__ == "__main__" : # Creating a list containing the number of trials; (each trial is a bernouilli trial) number_of_trial = [0,2,10,20,50,500] # Initialize an experience consisting of 500 trials data = stats.bernoulli.rvs(0.5,size = number_of_trial[-1]) # Discretize the x axis into 100 separate values x = np.linspace(0,1,100) # Launch the experimentation for i,N in enumerate(number_of_trial): #Accumulating the number of heads for the current element of number_of_trial heads = data[:N].sum() #Create an axis subplot for each update ax = pyplot.subplot(len(number_of_trial)/2,2,i+1) ax.set_title("%s trials,%s heads"%(N,heads)) #Adding labels to axis and hiding labels on Y-Axis pyplot.xlabel("$(PH)$, Probability of heads") pyplot.ylabel("Density") #Formatting output for the first experimentation : ie i = 0 if i == 0: pyplot.ylim([0.0,2.0]) pyplot.setp(ax.get_yticklabels(), visible = False) #Create a beta distribution to represent the experimentation y = stats.beta.pdf(x, heads + 1 , 1+ N - heads) #plot it pyplot.plot(x,y,label = "Observe %d tosses, \n %d heads"%(N,heads)) pyplot.fill_between(x, 0, y, color="#aaaadd", alpha=0.5) #expand plot to cover the allocated screen and show it pyplot.tight_layout() pyplot.show()
from django.shortcuts import render from .models import Timesheet def index(request): latest_entry = Timesheet.objects.all() context = {'latest_entry': latest_entry} return render(request, 'timecardApp/index.html', context)
#链接:https://www.zhihu.com/question/60868436/answer/307219795 ###对scrPath中250文件进行随机抽取175个,到datPath中 ### ### import random import os import shutil def random_copyfile(srcPath,dstPath,lastpath,numfiles): name_list=list(os.path.join(srcPath,name) for name in os.listdir(srcPath)) random_name_list=list(random.sample(name_list,numfiles)) last=[ item for item in name_list if item not in random_name_list ] if not os.path.exists(dstPath): os.mkdir(dstPath) for oldname in random_name_list: shutil.copyfile(oldname,oldname.replace(srcPath, dstPath)) for file in last: shutil.copyfile(file,file.replace(srcPath, lastpath)) srcPath='/home/kcao/test/tmp/AGAC_training' dstPath = '/home/kcao/test/tmp/kcao_train_data' lastpath='/home/kcao/test/tmp/kcao_test_data' random_copyfile(srcPath,dstPath,lastpath,175) ------------------------------------------------------------------------------------------------------------------------------------------ ###知识点总结: #1.通常习惯的写法,合并目录 >>> import os >>> os.path.join('/hello/','good/boy/','doiido') 输出:'/hello/good/boy/doiido' #2.os.listdir() #3.shutil #是一种高层次的文件操作工具 #类似于高级API,而且主要强大之处在于其对文件的复制与删除操作更是比较支持好。 #参考:https://www.jianshu.com/p/b4c87aa6fd24 shutil.mv文件移动 >>>shutil.move('C:/Users/xiaoxinsoso/Desktop/aaa', 'C:/Users/xiaoxinsoso/Desktop/bbb') # 把aaa目录移动到bbb目录下
color =input("Choose a color: ") plural_noun =input("Choose a plural noun: ") celebrity =input("Choose a celebrity: ") print("Rose are " + color) print(plural_noun + " are blue") print("I love " + celebrity)
import torch from torch.utils.data import Dataset from torch import nn, optim from torchsummary import summary import torch.nn.functional as F import data.flic_dataset as flic_dataset import models.DeepNet as deepnet import models.simplenet as simplenet import models.residualNet as resnet def evaluate(model, testloader, device): model.eval() with torch.no_grad(): loss_sum = 0 for images, labels in testloader: images = images.to(device) labels = labels.to(device) pred = model(images) loss = F.mse_loss(labels, pred) loss_sum += loss print("loss average: {:.3f}".format(loss_sum / len(testloader))) return loss_sum/len(testloader) def train_model(model,criterion,optimizer,dataload,weight_path,test_loader, device,num_epochs=3): test_accuracy_list = [] epoch_num = 0 for epoch in range(num_epochs): model.train() epoch_loss = 0 print("epoch: " + str(epoch_num)) for images, labels in dataload: optimizer.zero_grad() images = images.to(device) labels = labels.to(device) pred = model(images) loss = criterion(pred, labels) loss.backward() optimizer.step() epoch_loss += loss.item() if epoch_num % 5 == 0: print("save_weight for epoch " + str(epoch_num)) torch.save(model.state_dict(), weight_path + str(epoch_num)) print("test accuracy after epoch" + str(epoch_num) + "\n") test_accuracy = evaluate(model, test_loader, device) test_accuracy_list.append(test_accuracy.cpu().numpy()) epoch_num += 1 print(f"Training loss: {epoch_loss/len(dataload)}") return test_accuracy_list if __name__ == '__main__': model_select = "mpii" if model_select == "deepnet": model = deepnet.DeepNet(9) weight_path = "./deepnet_weight/weight_epoch" lr = 0.00001 elif model_select == "resnet": model = resnet.ResidualNet(9) weight_path = "./residual_weight/weight_epoch" lr = 0.0001 elif model_select == "simplenet": model = simplenet.simpleNet(9) weight_path = "./simple_weight/weight_epoch" lr = 0.0001 elif model_select == "mpii": model = resnet.ResidualNet(16) weight_path = "./mpii_weight/weight_epoch" lr = 0.00001 print("init model") # model = model.ResidualNet(9) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') torch.cuda.empty_cache() model = model.to(device) summary(model, input_size=(3, 220, 220)) optimizer = optim.Adam(model.parameters(), lr=lr) loss = nn.modules.loss.MSELoss() print("Load dataset") trainset = flic_dataset.mydataset("./data/FLIC/train_joints.csv", "./data/FLIC/images") trainloader = torch.utils.data.DataLoader(trainset, batch_size= 10, shuffle=True) testset = flic_dataset.mydataset("./data/FLIC/test_joints.csv", "./data/FLIC/images") testloader = torch.utils.data.DataLoader(testset) print("start training") test_list = train_model(model, loss, optimizer, trainloader, weight_path, testloader, device, 101) print(test_list)
import cProfile, pstats, random import numpy as np import Main.NumpyNN as NN SIZE = [2, 3, 1] ITER = 1 def func(array): return [array[0] ^ array[1]] def getinp(x): return np.array([random.choice([0, 1]) for _ in range(x)]) def test(size, iters, func): nn = NN.NeuralNet(size) tests = [] for _ in range(100): inp = getinp(size[0]) tests.append((inp, func(inp))) #Function to test whether the nn has solved the function def solved(): num = 0 for i in tests: conc = list(map(round, nn.feedforward(i[0]))) if conc == i[1]: num += 1 pct = round(num*1.0/len(tests), 3) print (pct, ' completed') return(True if pct > .95 else False) rounds = 0 while not solved(): rounds += 1 nn.adapt(iters, [(inp, func(inp)) for inp in [getinp(size[0]) for _ in range(2000)]]) print(rounds*iters, 'Generations to solve') if __name__ == "__main__": #pr = cProfile.Profile() #pr.enable() test(SIZE, ITER, func) #pr.disable() #ps = pstats.Stats(pr).sort_stats('time') #ps.print_stats()
# coding=utf-8 from pytest_bdd import ( scenario ) @scenario('../features/dynamodb_recovery-pending_replication_count.feature', 'pending_replication_count - green') def test_alarm_green(): pass
# author azure # 1,老男孩好声音选秀大赛评委在打分的时候呢, # 可以进行输入. 假设, 老男孩有10个评委. # 让10个评委进行打分, 要求, 分 # 数必须大于5分, 小于10分 # # 1 # count = 1 # while count <=10: # fen = int(input("请第%s号评委打分:" % count)) # if fen <=5 or fen >10: # print("你打的分") # continue # else: # print("第%s评委打的是:%s "%(count,fen)) # count = count+1
import string import random total= string.ascii_letters + string.digits + string.punctuation length=random.randint(8,13) password="".join(random.sample(total,length)) print(password)
import pandas as pd import pickle class Preprocessor(): def prepare_dataset(self, df): return df[:3000] def prepare_test_dataset(self, df): idx = [124, 162, 174, 184, 185, 292, 460, 464, 521, 527, 574, 588, 625, 705, 763, 842, 854, 901, 902, 1006, 1043, 1050, 1102, 1274, 1326, 1381, 1396, 1404, 1409, 1619, 1656, 1867, 1882, 1884, 2025, 2100, 2266, 2277, 2311, 2329, 2365, 2441, 2655, 2742, 2811, 2902, 2913, 2943, 2965, 2977] test_data_temp = df.copy() test_data_temp = test_data_temp.drop(idx, axis=0) return test_data_temp
from datetime import datetime def printClock(): now = datetime.now() clock = "%02d:%02d" % (now.hour,now.minute) print clock return clock
from gql import gql, Client from gql.transport.requests import RequestsHTTPTransport sample_transport=RequestsHTTPTransport( url='http://acad-overflow.herokuapp.com/v1/graphql', use_json=True, headers={ "Content-type": "application/json", }, verify=False ) client = Client( retries=3, transport=sample_transport, fetch_schema_from_transport=True, )
import roll import char import cli class Attacks: def __init__(self, templates=None): self.templates = templates if templates else [] def add_template(self, template, character): template = template.strip() if not template: return "Usage: attack <roll>" try: roll.get_rolls(self.apply_template(template, character)) except ValueError as e: return f"Invalid attack: {e}" self.templates.append(template) return "Added attack." def attack(self, template, character): print(roll.rolls_string(character.roll(template))) def list(self, character): cli.print_list("Attacks", self.templates) return ( "func", [ (lambda i: lambda: self.attack(self.templates[i], character))(i) for i in range(len(self.templates)) ], ) def list_deletes(self): cli.print_list("Delete attack", self.templates) return ( "func", [ (lambda i: lambda: self.templates.remove(self.templates[i]))(i) for i in range(len(self.templates)) ], ) def to_json(self): return {"templates": self.templates} @staticmethod def from_json(data): return Attacks(data["templates"])
# ROS Client Library for Python import rclpy # Handles the creation of nodes from rclpy.node import Node # Handles string messages from std_msgs.msg import String from userInterfaceManager import createUI def msg_to_val(msg): return msg.data; # title, command buttonsDict = { "CT11": ("echo 'command send ct11'",""), "launch lidar": ("echo 'launch lidar'","echo 'stop lidar'"), "launch long control": ("echo 'long_control set true'","echo 'long_control set false'") }; # topicname, (lower, upper) textDict = { "novatel/bottom/bestpos" : ((10,1000),msg_to_val,String), "raptor_dbw_interface/imu" : ((1,100),msg_to_val,String), "raptor_dbw_interface/ctstate": ((0,20),msg_to_val,String), "raptor_dbw_interface/wheel_speed_report": ((10,11),msg_to_val,String), "raptor_dbw_interface/imu_error": ((10,500),msg_to_val,String), "raptor_dbw_interface/gps_covariance": ((100,300),msg_to_val,String) } class UserInterface_driver(Node): def __init__(self): # Initiate the Node class's constructor and give it a name super().__init__('UserInterface_driver') self.subscribers = []; self.subscription = self.create_subscription(String, 'test', self.listener_callback, 10) self.subscription # prevent unused variable warning # create the UI and return the callback functions for the text fields self.subFuncs = createUI(buttonsDict=buttonsDict, textDict=textDict); # create subscribers with topicType and topicName from textDict, and callback function from self.subFuncs i = 0; for textDictKey in textDict: self.subscribers.append(self.create_subscription(textDict[textDictKey][0],f"UI_sub_{textDictKey}",self.subFuncs[i],10)) i+=1; def main(args=None): # Initialize the rclpy library rclpy.init(args=args) # Create a subscriber userInterface_driver = UserInterface_driver() # Spin the node so the callback function is called. rclpy.spin(userInterface_driver) # Destroy the node explicitly userInterface_driver.destroy_node() # Shutdown the ROS client library for Python rclpy.shutdown() if __name__ == '__main__': main()
from autobahn.twisted.websocket import WebSocketClientFactory, \ WebSocketClientProtocol, connectWS from twisted.python import log from twisted.internet import reactor #from pprint import pprint import sys import json class CoinbaseExchangeClientProtocol(WebSocketClientProtocol): def onOpen(self): msg = json.dumps({"type": "subscribe", "product_id": "BTC-USD"}) res = self.sendMessage(msg.encode('utf-8'), isBinary=False) print(res) def onMessage(self, payload, isBinary): if not isBinary: msg = json.loads(payload.decode('utf8')) print(msg) #pprint(msg) if __name__ == '__main__': log.startLogging(sys.stdout) factory = WebSocketClientFactory("wss://ws-feed.exchange.coinbase.com", debug=True) factory.protocol = CoinbaseExchangeClientProtocol connectWS(factory) reactor.run()
from sgfmill import sgf import argparse import sys from sgfmill import ascii_boards from sgfmill import sgf_moves import numpy as np import os import pickle ## Data format # { # "b":[][], # "w":[][], # "e":[][], # "b_w_level":(), # "isBlack": bool, # "next":() # } def ConvertSgfToTrainingData(filename, all_data): with open(filename, "rb") as f: try: sgf_game = sgf.Sgf_game.from_bytes(f.read()) except ValueError: raise Exception("bad sgf file") g_size = sgf_game.get_size() g_rule = None w_level = "1d" b_level = "1d" root_node = sgf_game.get_root() # Get Rule, Chinese/Japanese if root_node.has_property('RU'): g_rule = root_node.get('RU') # Get White D(Level) 1...9d if root_node.has_property('WR'): w_level = root_node.get('WR') # Get Black D(Level) 1...9d if root_node.has_property('BR'): b_level = root_node.get('BR') if(g_rule != "Chinese" or g_size != 19): return black = np.zeros((19,19),dtype = bool) white = np.zeros((19,19),dtype = bool) empty = np.ones((19,19),dtype = bool) level = [1,1] if(b_level[-1] == "d"): level[0] = int(b_level[:-1]) if(b_level[-1] == "d"): level[1] = int(w_level[:-1]) level = tuple(level) for node in sgf_game.get_main_sequence(): who , next = node.get_move() if next != None: all_data.append({ "b":black.copy(), "w":white.copy(), "e":empty.copy(), "b_w_level":level, "isBlack": who == "b", "next":next }) if who == "b": black[next[0]][next[1]] = True else: white[next[0]][next[1]] = True empty[next[0]][next[1]] = False if __name__ == "__main__": data_folder = "Data" output_filename = "all_data" output_num = 1 all_data = [] for dirs in os.listdir(data_folder): if(dirs == ".DS_Store"): continue print(len(all_data),"In ",dirs) for file in os.listdir(data_folder + "/" + dirs): if(file == ".DS_Store"): continue ConvertSgfToTrainingData(data_folder+"/"+dirs+"/"+file,all_data) if len(all_data) >300000: with open(output_filename+"_"+str(output_num)+".pickle", "wb") as f: print("start writing ",output_num) pickle.dump( all_data, f) all_data = [] output_num = output_num+1 print("Finished writing ",output_num) with open(output_filename+"_"+str(output_num)+".pickle", "wb") as f: pickle.dump( all_data, f)
from functools import update_wrapper from weakref import WeakValueDictionary, WeakKeyDictionary import torch import torch.nn.functional as F import pyro.distributions as dist from pyro.poutine.messenger import Messenger from pyro.poutine.runtime import effectful __all__ = [ "LocalReparameterizationMessenger", "FlipoutMessenger" ] def _get_base_dist(distribution): while isinstance(distribution, dist.Independent): distribution = distribution.base_dist return distribution def _is_reparameterizable(distribution): if distribution is None: # bias terms may be None, which does not prevent reparameterization return True return isinstance(_get_base_dist(distribution), (dist.Normal, dist.Delta)) def _get_loc_var(distribution): if distribution is None: return None, None if torch.is_tensor(distribution): # distribution might be a pyro param, which is equivalent to a delta distribution return distribution, torch.zeros_like(distribution) distribution = _get_base_dist(distribution) return distribution.mean, distribution.variance class _ReparameterizationMessenger(Messenger): """Base class for reparameterization of sampling sites where a transformation of a stochastic by a deterministic variable allows for analytically calculating (or approximation) the distribution of the result and sampling the result instead of the original stochastic variable. See subclasses for examples. Within the context of this messenger, functions in the REPARAMETERIZABLE_FUNCTIONS attribute will have the outputs sampled instead of the inputs to the weight and bias attributes. This can reduce gradient noise. For now, reparameterization is limited to F.linear and F.conv, which are used by the corresponding nn.Linear and nn.Conv modules in pytorch.""" # TODO check if transposed convolutions could be added as well, might be useful for Bayesian conv VAEs REPARAMETERIZABLE_FUNCTIONS = ["linear", "conv1d", "conv2d", "conv3d"] def __init__(self, reparameterizable_functions=None): super().__init__() if reparameterizable_functions is None: reparameterizable_functions = self.REPARAMETERIZABLE_FUNCTIONS elif isinstance(reparameterizable_functions, str): reparameterizable_functions = [reparameterizable_functions] elif isinstance(reparameterizable_functions, (list, tuple)): reparameterizable_functions = list(reparameterizable_functions) else: raise ValueError(f"Unrecognized type for argument 'reparameterizable_functions. Must be str, list or " f"None, but go '{reparameterizable_functions.__class__.__name__}'.") self.reparameterizable_functions = reparameterizable_functions def __enter__(self): # deps maps sampled tensors to distributon object to check if local reparameterization is possible. # I'm using a weakref dictionary here for memory efficiency -- a standard dict would create references to all # kinds of intermediate tensors, preventing them from being garbage collected. This would be a problem if the # Messenger is used as a context outside of a training loop. Ideally I would like to use a WeakKeyDictionary, # since I would expect that the samples from the distribution are much less likely to be kept around than the # distribution object itself. I'm using id(tensor) as dictionary keys in order to avoid creating references to # the samples from the distributions. However this still means that the self.deps dictionary will keep growing # if the distribution objects from the model/guide are kept around. self.deps = WeakValueDictionary() self.original_fns = [getattr(F, name) for name in self.reparameterizable_functions] self._make_reparameterizable_functions_effectful() return super().__enter__() def __exit__(self, exc_type, exc_val, exc_tb): self._reset_reparameterizable_functions() del self.deps del self.original_fns return super().__exit__(exc_type, exc_val, exc_tb) def _make_reparameterizable_functions_effectful(self): for name, fn in zip(self.reparameterizable_functions, self.original_fns): effectful_fn = update_wrapper(effectful(fn, type="reparameterizable"), fn) setattr(F, name, effectful_fn) def _reset_reparameterizable_functions(self): for name, fn in zip(self.reparameterizable_functions, self.original_fns): setattr(F, name, fn) def _pyro_post_sample(self, msg): if id(msg["value"]) not in self.deps: self.deps[id(msg["value"])] = msg["fn"] def _pyro_reparameterizable(self, msg): if msg["fn"].__name__ not in self.reparameterizable_functions: return if msg["done"]: raise ValueError(f"Trying to reparameterize a {msg['fn'].__name__} site that has already been processed. " f"Did you use multiple reparameterization messengers for the same function?") args = list(msg["args"]) kwargs = msg["kwargs"] x = kwargs.pop("input", None) or args.pop(0) # if w is in args, so must have been x, therefore w will now be the first argument in args if not in kwargs w = kwargs.pop("weight", None) or args.pop(0) # bias might be None, so check explicitly if it's in kwargs -- if it is positional, x and w # must have been positional arguments as well b = kwargs.pop("bias") if "bias" in kwargs else args.pop(0) if id(w) in self.deps: w_fn = self.deps[id(w)] b_fn = self.deps[id(b)] if b is not None else None if torch.is_tensor(x) and _is_reparameterizable(w_fn) and _is_reparameterizable(b_fn): msg["value"] = self._reparameterize(msg, x, w_fn, w, b_fn, b, *args, **kwargs) msg["done"] = True def _reparameterize(self, msg, x, w_loc, w_var, b_loc, b_var, *args, **kwargs): raise NotImplementedError class LocalReparameterizationMessenger(_ReparameterizationMessenger): """Implements local reparameterization: https://arxiv.org/abs/1506.02557""" def _reparameterize(self, msg, x, w_fn, w, b_fn, b, *args, **kwargs): w_loc, w_var = _get_loc_var(w_fn) b_loc, b_var = _get_loc_var(b_fn) loc = msg["fn"](x, w_loc, b_loc, *args, **kwargs) var = msg["fn"](x.pow(2), w_var, b_var, *args, **kwargs) # ensure positive variances to avoid NaNs when taking square root var = var + var.lt(0).float().mul(var.abs() + 1e-6).detach() scale = var.sqrt() return dist.Normal(loc, scale).rsample() def _pad_right_like(tensor1, tensor2): while tensor1.ndim < tensor2.ndim: tensor1 = tensor1.unsqueeze(-1) return tensor1 def _rand_signs(*args, **kwargs): return torch.rand(*args, **kwargs).gt(0.5).float().mul(2).sub(1) class FlipoutMessenger(_ReparameterizationMessenger): """Implements flipout: https://arxiv.org/abs/1803.04386""" FUNCTION_RANKS = {"linear": 1, "conv1d": 2, "conv2d": 3, "conv3d": 4} def _reparameterize(self, msg, x, w_fn, w, b_fn, b, *args, **kwargs): fn = msg["fn"] w_loc, _ = _get_loc_var(w_fn) loc = fn(x, w_loc, None, *args, **kwargs) # x might be one dimensional for a 1-d input with a single datapoint to F.linear, F.conv always has a batch dim batch_shape = x.shape[:-self.FUNCTION_RANKS[fn.__name__]] if x.ndim > 1 else tuple() # w might be 1-d for F.linear for a 0-d output output_shape = (w_loc.shape[0],) if w_loc.ndim > 1 else tuple() input_shape = (w_loc.shape[1],) if w_loc.ndim > 1 else (w_loc.shape[0],) if not hasattr(w, "sign_input"): w.sign_input = _pad_right_like(_rand_signs(batch_shape + input_shape, device=loc.device), x) w.sign_output = _pad_right_like(_rand_signs(batch_shape + output_shape, device=loc.device), x) w_perturbation = w - w_loc perturbation = fn(x * w.sign_input, w_perturbation, None, *args, **kwargs) * w.sign_output output = loc + perturbation if b is not None: b_loc, b_var = _get_loc_var(b_fn) bias = _pad_right_like(dist.Normal(b_loc, b_var.sqrt()).rsample(batch_shape), output) output += bias return output
import pandas as pd import numpy as np from selenium import webdriver from time import sleep username = [] skincond = [] recommend = [] datereview = [] review = [] rating = [] produk = [] merk = [] kategori = [] price = [] home = 'https://femaledaily.com/category/skincare' path = 'chromedriver.exe' driver = webdriver.Chrome(path) driver.get(home) sleep(5) category_column = driver.find_elements_by_class_name('jsx-3413472294.category-landing-column') list_of_category_title = [] list_of_category_href = [] for i in category_column: category = i.find_elements_by_tag_name('a') for cat in category: list_of_category_title.append(cat.text) list_of_category_href.append(cat.get_attribute('href')) for i in range(len(list_of_category_href)): list_of_category_href[i] = list_of_category_href[i][:-1] category = list(zip(list_of_category_title, list_of_category_href)) for cattittle, cathref in category[:]: #category for page in [:]: driver.get(cathref+str(page)) try: sleep(5) product_list_title = [] product_list_href = [] product_column = driver.find_elements_by_class_name('jsx-2681188818.product-item') for i in product_column: product = i.find_elements_by_tag_name('a') product_list_title.append(product[1].text) product_list_href.append(product[1].get_attribute('href')) for i in range(len(product_list_href)): product_list_href[i] = product_list_href[i][:-1] product = list(zip(product_list_title, product_list_href)) for product, prodhref in product: for page2 in range(1,2): #product review page driver.get(prodhref + str(page2)) try: card = driver.find_elements_by_class_name('jsx-992468192.item') sleep(5) for item in card: username.append(item.find_element_by_class_name('username').text) review.append(item.find_element_by_class_name('text-content').text) rating.append(len(item.find_element_by_class_name('cardrv-starlist').find_elements_by_class_name('icon-ic_big_star_full'))) merk.append(driver.find_element_by_tag_name('h2').text) kategori.append(cattittle) produk.append(product) try: skincond.append(item.find_element_by_class_name('skin').text) except: skincond.append(np.nan) try: recommend.append(item.find_element_by_class_name('recommend').text) except: recommend.append(np.nan) try: datereview.append(item.find_element_by_class_name('date.review-date').text) except: datereview.append(np.nan) try: price.append(driver.find_element_by_class_name('jsx-992468192.product-price').text) except: price.append(np.nan) except: pass except: pass driver.close() df = pd.DataFrame({'Category': kategori, 'Merk': merk, 'Product': produk, 'Price': price, 'UserName': username, 'SkinCond_Age': skincond, 'Recommend': recommend, 'PostDate': datereview, 'Review': review, 'Rating': rating}) df.to_csv('SkincareReview.csv', index = False)
# ライブラリの読み込み import os.path as osp import dotenv from utils.init_dotenv import input_env_vals def main(): # .env ファイル → 環境変数 (読み込む) dotenv_path = osp.join(osp.dirname(__file__), '.env') if not osp.isfile(dotenv_path): # .env ファイルがないので、初期設定する。 input_env_vals(dotenv_path) dotenv.load_dotenv(dotenv_path) # ボットの起動 from slackbot.bot import Bot bot = Bot() print("\n[Info] bot の呼び出しに成功しました! 起動します...") bot.run() if __name__ == "__main__": main()
import sys import re inputfile1 = sys.argv[2] table =[] try: file1=open(inputfile1, 'r') data=[] line=0 for line in file1: for ch in line: if ch!="\n": data.append(int(ch)) except IOError: sys.exit("\nError.\nFile name invalid.\n") #represents the table def printcomputer(a=[]): def piece(a): if a==0: return "0" if a==1: return "1" if a==2: return "2" print " ---------------------- " print " | %s| %s| %s| %s| %s| %s| %s|"%(piece(a[5][0]),piece(a[5][1]),piece(a[5][2]),piece(a[5][3]),piece(a[5][4]),piece(a[5][5]),piece(a[5][6])) print " ---------------------- " print " | %s| %s| %s| %s| %s| %s| %s|"%(piece(a[4][0]),piece(a[4][1]),piece(a[4][2]),piece(a[4][3]),piece(a[4][4]),piece(a[4][5]),piece(a[4][6])) print " ---------------------- " print " | %s| %s| %s| %s| %s| %s| %s|"%(piece(a[3][0]),piece(a[3][1]),piece(a[3][2]),piece(a[3][3]),piece(a[3][4]),piece(a[3][5]),piece(a[3][6])) print " ---------------------- " print " | %s| %s| %s| %s| %s| %s| %s|"%(piece(a[2][0]),piece(a[2][1]),piece(a[2][2]),piece(a[2][3]),piece(a[2][4]),piece(a[2][5]),piece(a[2][6])) print " ---------------------- " print " | %s| %s| %s| %s| %s| %s| %s|"%(piece(a[1][0]),piece(a[1][1]),piece(a[1][2]),piece(a[1][3]),piece(a[1][4]),piece(a[1][5]),piece(a[1][6])) print " ---------------------- " print " | %s| %s| %s| %s| %s| %s| %s|"%(piece(a[0][0]),piece(a[0][1]),piece(a[0][2]),piece(a[0][3]),piece(a[0][4]),piece(a[0][5]),piece(a[0][6])) print " ---------------------- " f=open("computer.txt", "w+") f.write("%s%s%s%s%s%s%s"%(piece(a[5][0]),piece(a[5][1]),piece(a[5][2]),piece(a[5][3]),piece(a[5][4]),piece(a[5][5]),piece(a[5][6]))) f.write("\n") f.write("%s%s%s%s%s%s%s"%(piece(a[4][0]),piece(a[4][1]),piece(a[4][2]),piece(a[4][3]),piece(a[4][4]),piece(a[4][5]),piece(a[4][6]))) f.write("\n") f.write("%s%s%s%s%s%s%s"%(piece(a[3][0]),piece(a[3][1]),piece(a[3][2]),piece(a[3][3]),piece(a[3][4]),piece(a[3][5]),piece(a[3][6]))) f.write("\n") f.write("%s%s%s%s%s%s%s"%(piece(a[2][0]),piece(a[2][1]),piece(a[2][2]),piece(a[2][3]),piece(a[2][4]),piece(a[2][5]),piece(a[2][6]))) f.write("\n") f.write("%s%s%s%s%s%s%s"%(piece(a[1][0]),piece(a[1][1]),piece(a[1][2]),piece(a[1][3]),piece(a[1][4]),piece(a[1][5]),piece(a[1][6]))) f.write("\n") f.write("%s%s%s%s%s%s%s"%(piece(a[0][0]),piece(a[0][1]),piece(a[0][2]),piece(a[0][3]),piece(a[0][4]),piece(a[0][5]),piece(a[0][6]))) f.write("\n") f.write("2") def printhuman(a=[]): def piece(a): if a==0: return "0" if a==1: return "1" if a==2: return "2" print " ---------------------- " print " | %s| %s| %s| %s| %s| %s| %s|"%(piece(a[5][0]),piece(a[5][1]),piece(a[5][2]),piece(a[5][3]),piece(a[5][4]),piece(a[5][5]),piece(a[5][6])) print " ---------------------- " print " | %s| %s| %s| %s| %s| %s| %s|"%(piece(a[4][0]),piece(a[4][1]),piece(a[4][2]),piece(a[4][3]),piece(a[4][4]),piece(a[4][5]),piece(a[4][6])) print " ---------------------- " print " | %s| %s| %s| %s| %s| %s| %s|"%(piece(a[3][0]),piece(a[3][1]),piece(a[3][2]),piece(a[3][3]),piece(a[3][4]),piece(a[3][5]),piece(a[3][6])) print " ---------------------- " print " | %s| %s| %s| %s| %s| %s| %s|"%(piece(a[2][0]),piece(a[2][1]),piece(a[2][2]),piece(a[2][3]),piece(a[2][4]),piece(a[2][5]),piece(a[2][6])) print " ---------------------- " print " | %s| %s| %s| %s| %s| %s| %s|"%(piece(a[1][0]),piece(a[1][1]),piece(a[1][2]),piece(a[1][3]),piece(a[1][4]),piece(a[1][5]),piece(a[1][6])) print " ---------------------- " print " | %s| %s| %s| %s| %s| %s| %s|"%(piece(a[0][0]),piece(a[0][1]),piece(a[0][2]),piece(a[0][3]),piece(a[0][4]),piece(a[0][5]),piece(a[0][6])) print " ---------------------- " f=open("human.txt", "w+") f.write("%s%s%s%s%s%s%s"%(piece(a[5][0]),piece(a[5][1]),piece(a[5][2]),piece(a[5][3]),piece(a[5][4]),piece(a[5][5]),piece(a[5][6]))) f.write("\n") f.write("%s%s%s%s%s%s%s"%(piece(a[4][0]),piece(a[4][1]),piece(a[4][2]),piece(a[4][3]),piece(a[4][4]),piece(a[4][5]),piece(a[4][6]))) f.write("\n") f.write("%s%s%s%s%s%s%s"%(piece(a[3][0]),piece(a[3][1]),piece(a[3][2]),piece(a[3][3]),piece(a[3][4]),piece(a[3][5]),piece(a[3][6]))) f.write("\n") f.write("%s%s%s%s%s%s%s"%(piece(a[2][0]),piece(a[2][1]),piece(a[2][2]),piece(a[2][3]),piece(a[2][4]),piece(a[2][5]),piece(a[2][6]))) f.write("\n") f.write("%s%s%s%s%s%s%s"%(piece(a[1][0]),piece(a[1][1]),piece(a[1][2]),piece(a[1][3]),piece(a[1][4]),piece(a[1][5]),piece(a[1][6]))) f.write("\n") f.write("%s%s%s%s%s%s%s"%(piece(a[0][0]),piece(a[0][1]),piece(a[0][2]),piece(a[0][3]),piece(a[0][4]),piece(a[0][5]),piece(a[0][6]))) f.write("\n") f.write("1") def checkstatus(grid=[]): win1=[1,1,1,1] win2=[2,2,2,2] a=0 b=0 #Check horizontally for i in range(6): for j in range(4): if [grid[i][j],grid[i][j+1],grid[i][j+2],grid[i][j+3]]==win1: a+=1 if [grid[i][j],grid[i][j+1],grid[i][j+2],grid[i][j+3]]==win2: b+=1 #Check vertically for i in range(7): for j in range(3): if [grid[j][i], grid[j+1][i], grid[j+2][i], grid[j+3][i]]==win1: a+=1 if [grid[j][i], grid[j+1][i], grid[j+2][i], grid[j+3][i]]==win2: b+=1 #Check diagonally for j in range(3): for i in range(4): if [grid[j][i], grid[j+1][i+1], grid[j+2][i+2], grid[j+3][i+3]]==win1: a+=1 if [grid[j][i], grid[j+1][i+1], grid[j+2][i+2], grid[j+3][i+3]]==win2: b+=1 for j in range(3): for i in range(6,2,-1): if [grid[j][i], grid[j+1][i-1], grid[j+2][i-2], grid[j+3][i-3]]==win1: a+=1 if [grid[j][i], grid[j+1][i-1], grid[j+2][i-2], grid[j+3][i-3]]==win2: b+=1 return a def winhuman(grid=[]): win1=[1,1,1,1] win2=[2,2,2,2] a=0 b=0 #Check horizontally for i in range(6): for j in range(4): if [grid[i][j],grid[i][j+1],grid[i][j+2],grid[i][j+3]]==win1: a+=1 if [grid[i][j],grid[i][j+1],grid[i][j+2],grid[i][j+3]]==win2: b+=1 #Check vertically for i in range(0,7): for j in range(0,3): if [grid[j][i], grid[j+1][i], grid[j+2][i], grid[j+3][i]]==win1: a+=1 if [grid[j][i], grid[j+1][i], grid[j+2][i], grid[j+3][i]]==win2: b+=1 #Check diagonally for j in range(3): for i in range(4): if [grid[j][i], grid[j+1][i+1], grid[j+2][i+2], grid[j+3][i+3]]==win1: a+=1 if [grid[j][i], grid[j+1][i+1], grid[j+2][i+2], grid[j+3][i+3]]==win2: b+=1 for j in range(3): for i in range(6,2,-1): if [grid[j][i], grid[j+1][i-1], grid[j+2][i-2], grid[j+3][i-3]]==win1: a+=1 if [grid[j][i], grid[j+1][i-1], grid[j+2][i-2], grid[j+3][i-3]]==win2: b+=1 return b def humanMoves(grid=[]): column=[]; rows=[] for col in range(7): for row in range(6): if grid[row][col]==0: column.append(col) rows.append(row) break return column, rows isNum=re.compile("[^0-9]") def humanmove(grid, x): column, rows = humanMoves(grid) if isNum.match(x)==None and x!='': x=int(x)-1 while x not in column: print "Invalid Move" x=raw_input('n: ') if isNum.match(x)==None and x!='': x=int(x)-1 grid[rows[column.index(x)]][x]=2 p=0 table=[[data[p],data[p+1],data[p+2],data[p+3],data[p+4],data[p+5],data[p+6]], [data[p+7],data[p+8],data[p+9],data[p+10],data[p+11],data[p+12],data[p+13]], [data[p+14],data[p+15],data[p+16],data[p+17],data[p+18],data[p+19],data[p+20]], [data[p+21],data[p+22],data[p+23],data[p+24],data[p+25],data[p+26],data[p+27]], [data[p+28],data[p+29],data[p+30],data[p+31],data[p+32],data[p+33],data[p+34]], [data[p+35],data[p+36],data[p+37],data[p+38],data[p+39],data[p+40],data[p+41]]] table.reverse() #valid moves order=[3,2,4,1,5,0,6] def validMoves(grid): global order moves=[] for col in order: for row in range(0,6): if grid[row][col]==0: moves.append([row,col]) break return moves def move(grid,x,who): val=validMoves(grid) grid[val[x][0]][val[x][1]]=who #Alpha Beta Pruning def alphabeta(grid, depth): def ab(grid, depth, high, low): items=[]; v=-10000000 for a,s in validMoves(grid): grid[a][s]=1 v=max(v, abmin(grid, depth-1, high, low)) items.append(v) grid[a][s]=0 largest=max(items) dex=items.index(largest) return [dex, largest] def abmax(grid, depth, high, low): moves=validMoves(grid) if(depth==0 or not moves): return eval(grid) v=-10000000 for a,s in moves: grid[a][s]=1 v=max(v, abmin(grid, depth-1, high, low)) grid[a][s]=0 if v >= low: return v high=max(high, v) return v def abmin(grid, depth, high, low): moves=validMoves(grid) if(depth==0 or not moves): return eval(grid) v=+10000000 for a,s in moves: grid[a][s]=2 v=min(v, abmax(grid, depth-1, high, low)) grid[a][s]=0 if v <= high: return v low=min(low, v) return v return ab(grid, depth, -10000000, +10000000) #IDS def iterative_deepening(grid): global order depth=1 res=alphabeta(grid, d) return res[0] #Eval Function def eval(grid=[]): w2=[2,2,2] i=0 j=0 #Check horizontally for i in range(6): for j in range(4): if [grid[i][j],grid[i][j+1],grid[i][j+2]]==w2: return 0 #Check vertically for i in range(7): for j in range(4): if [grid[j][i], grid[j+1][i], grid[j+2][i]]==w2: return 0 #Check diagonally for j in range(3): for i in range(4): if [grid[j][i], grid[j+1][i+1], grid[j+2][i+2]]==w2: return 0 for j in range(3): for i in range(6,2,-1): if [grid[j][i], grid[j+1][i-1], grid[j+2][i-2]]==w2: return 0 #print j+3 return j #MAIN gamemode=sys.argv[1] state=sys.argv[3] dep=sys.argv[4] d=int(dep) if gamemode=='one-move' or gamemode=='interactive': print "" else: print('%s : unrecognized game mode' % gamemode) sys.exit(1) if state=='computer-next' or state=='human-next': print "" else: print('%s : unrecognized game state' % state) sys.exit(1) #Human plays first if gamemode=='interactive': if state=='human-next': printhuman(table) while validMoves(table): y=checkstatus(table) human=winhuman(table) print "Score: Computer =",y,", Human =",human n=raw_input("Enter column no: ") print "Human's move:" humanmove(table, n) printhuman(table) move(table, iterative_deepening(table), 1) print "Computer's Move:" printcomputer(table) #Computer plays first else: while validMoves(table): move(table, iterative_deepening(table), 1) print "Computer's Move:" printcomputer(table) y=checkstatus(table) human=winhuman(table) print "Score: Computer =",y,", Human =",human n=raw_input("Enter column no: ") print "Human's Move:" humanmove(table, n) printhuman(table) y=checkstatus(table) human=winhuman(table) if y==human: print "Score: Draw" else: print "Score: Computer =",y,", Human =",human
import logging import pytest from page_request.pagerequest import ApiMethod from data import canshu2 from mysql import mysqlDB import allure from common import get_path path=get_path.get_login() logging.basicConfig(level=logging.DEBUG) casedate1=canshu2.ExcelData2().openexl(path,'Sheet5') # casedate1[0][3]=array[0] #pysplit.get_cookie.cookie(login) print(casedate1) # 根据登录接口返回的 accountid,在数据库user_account_address中查询,删除第一条记录 @allure.epic("药房网APP") @allure.feature("删除地址模块") @allure.step("删除地址模块") @allure.title("删除地址模块") @pytest.mark.parametrize("caseid1,host,path,params,method,rowid,exceptvalue", casedate1) def test_tianjiadizhi(caseid1,host,path,params,method,rowid,exceptvalue,loginssid): headers={} headers["Cookie"]=loginssid.get_ssid() sql='SELECT id FROM user_account_address WHERE accountid={0} LIMIT 1'.format(loginssid.get_accountid()) db = mysqlDB.DB() addressid = db.query(sql)[0]['id'] db.close() param=eval(params) param['id']=addressid res1 = ApiMethod(host, path, headers, param, method) resaddress = res1.jiekouqingqiu().json()["code"] print('param:',param) assert resaddress==1
import h5py, cv2 import csv, time, os.path import matplotlib.pyplot as plt import numpy as np from six.moves import cPickle from sklearn import model_selection as ms # function to process a single image def processImage(prefix, size, gtReader, proc_type=None, is_lisa=False, class_match=None): images = [] labels = [] for row in gtReader: if is_lisa: params = {"name": row[0], \ "box": (int(row[3]), int(row[5]), int(row[2]), int(row[4])), \ "label": class_match[row[1]] if row[1] in class_match.keys() else None} if params['label'] is None: # No such class print(row[1]) continue else: params = {"name": row[0], \ "box": (int(row[4]), int(row[6]), int(row[3]), int(row[5])), \ "label": int(row[7])} image = cv2.imread(prefix + params["name"]) if image.shape[2] != 3: # Gray? print(params["name"]) # image = image[...,::-1] # BGR to RGB image = image[params["box"][0]:params["box"][1], params["box"][2]:params["box"][3]] # Crop the ROI image = cv2.resize(image, size) # Resize images if proc_type is None: pass elif proc_type == "clahe": # lab = cv2.cvtColor(image, cv2.COLOR_RGB2LAB) # BGR to Lab space lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB) # BGR to Lab space tmp = np.zeros((lab.shape[0],lab.shape[1]), dtype=lab.dtype) tmp[:,:] = lab[:,:,0] # Get the light channel of LAB space clahe = cv2.createCLAHE(clipLimit=2,tileGridSize=(4,4)) # Create CLAHE object light = clahe.apply(tmp) # Apply to the light channel lab[:,:,0] = light # Merge back # image = cv2.cvtColor(lab, cv2.COLOR_LAB2RGB) # LAB to RGB image = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR) # LAB to RGB elif proc_type == "1sigma" or proc_type == "2sigma": # R, G, B = image[:,:,0], image[:,:,1], image[:,:,2] # RGB channels B, G, R = image[:,:,0], image[:,:,1], image[:,:,2] if proc_type == "1sigma": param = 1 else: # "2sigma" param = 2 # image[:,:,0] = cv2.normalize(R, None, R.mean() - param * R.std(), R.mean() + param * R.std(), cv2.NORM_MINMAX) image[:,:,0] = cv2.normalize(B, None, B.mean() - param * B.std(), B.mean() + param * B.std(), cv2.NORM_MINMAX) image[:,:,1] = cv2.normalize(G, None, G.mean() - param * G.std(), G.mean() + param * G.std(), cv2.NORM_MINMAX) # image[:,:,2] = cv2.normalize(B, None, B.mean() - param * B.std(), B.mean() + param * B.std(), cv2.NORM_MINMAX) image[:,:,2] = cv2.normalize(R, None, R.mean() - param * R.std(), R.mean() + param * R.std(), cv2.NORM_MINMAX) if not hasattr(image, 'shape'): print(image) print(params["name"]) images.append(image) # Already uint8 labels.append(params["label"]) return images, labels # function for reading the images # arguments: path to the traffic sign data, for example './GTSRB/Training' # returns: list of images, list of corresponding labels def readTrafficSigns_GT(rootpath, size, process=None, training=True): '''Reads traffic sign data for German Traffic Sign Recognition Benchmark. Arguments: path to the traffic sign data, for example './GTSRB/Training' Returns: list of images, list of corresponding labels''' images = [] # images labels = [] # corresponding labels # loop over all 43 classes if training: for c in range(0,43): prefix = rootpath + '/' + format(c, '05d') + '/' # subdirectory for class gtFile = open(prefix + 'GT-'+ format(c, '05d') + '.csv') # annotations file gtReader = csv.reader(gtFile, delimiter=';') # csv parser for annotations file next(gtReader) # skip header # loop over all images in current annotations file imgs, lbls = processImage(prefix, size, gtReader, process) images = images + imgs labels = labels + lbls gtFile.close() else: gtFile = open(rootpath + "/../../GT-final_test.csv") # annotations file gtReader = csv.reader(gtFile, delimiter=';') # csv parser for annotations file next(gtReader) # skip header # loop over all images in current annotations file imgs, lbls = processImage(rootpath + '/', size, gtReader, process) images = images + imgs labels = labels + lbls gtFile.close() return images, labels # Function to read the Belgium TSC datasets def readTrafficSigns_Belgium(rootpath, size, process=None, training=True): '''Reads traffic sign data for German Traffic Sign Recognition Benchmark. Arguments: path to the traffic sign data, for example './GTSRB/Training' Returns: list of images, list of corresponding labels''' images = [] # images labels = [] # corresponding labels # loop over all classes for c in range(0,62): prefix = rootpath + '/' + format(c, '05d') + '/' # subdirectory for class gtFile = open(prefix + 'GT-'+ format(c, '05d') + '.csv') # annotations file gtReader = csv.reader(gtFile, delimiter=';') # csv parser for annotations file next(gtReader) # skip header # loop over all images in current annotations file imgs, lbls = processImage(prefix, size, gtReader, process) images = images + imgs labels = labels + lbls gtFile.close() return images, labels # Function to read categories of the LISA dataset def readLISACategories(rootpath): # Read categories f = open("{}categories.txt".format(rootpath)) content = f.readlines() # Get categories count = 0 class_match = {} for line in content: splitted = (line.strip().split(': ')[-1]).split(', ') for c in splitted: if c == "thruTrafficMergeLeft": class_match[c] = class_match["thruMergeLeft"] # Duplicated continue class_match[c] = count count += 1 class_num = len(class_match.keys()) - 1 f.close() return class_match, class_num # Function to read the LISA dataset def readTrafficSigns_LISA(rootpath, size, process=None, training=True): class_match, class_num = readLISACategories(rootpath) images = [] labels = [] # All folder names folders = [] folders += ["aiua120214-{}".format(i) for i in range(0, 3)] folders += ["aiua120306-{}".format(i) for i in range(0, 2)] folders += ["vid{}".format(i) for i in range(0, 12)] # Read all annotations for folder in folders: folder = rootpath + folder under = os.listdir(folder) for u in under: if u.startswith("frame"): folder = '/'.join([folder, u]) break annotations = folder + "/frameAnnotations.csv" gtFile = open(annotations) gtReader = csv.reader(gtFile, delimiter=';') # csv parser for annotations file next(gtReader) # skip header imgs, lbls = processImage(folder + "/", size, gtReader, process, True, class_match) images = images + imgs labels = labels + lbls gtFile.close() trainImages, testImages, trainLabels, testLabels = ms.train_test_split(images, labels, test_size=0.2, random_state=542) return trainImages, trainLabels, testImages, testLabels, class_num # Get the directory and number of classes for different image sets def getDirFuncClassNum(root, dataset="GT"): train_dir, test_dir, readTrafficSigns = None, None, None class_num = -1 if dataset == "GT": root = '/'.join([root, "GTSRB/"]) train_dir = '/'.join([root, "Final_Training/Images"]) test_dir = '/'.join([root, "Final_Test/Images"]) readTrafficSigns = readTrafficSigns_GT class_num = 43 elif dataset == "Belgium": root = '/'.join([root, "BelgiumTSC"]) train_dir = '/'.join([root, "Training"]) test_dir = '/'.join([root, "Testing"]) readTrafficSigns = readTrafficSigns_Belgium class_num = 62 elif dataset == "LISA": root = '/'.join([root, "LISA"]) train_dir = None test_dir = None readTrafficSigns = readTrafficSigns_LISA class_num = 46 # 1 duplicated, 47 else: raise Exception("No such dataset!") return root, train_dir, test_dir, readTrafficSigns, class_num # Get preprocessed image sets def getImageSets(root, resize_size, dataset="GT", preprocessing=None, printing=True): root, train_dir, test_dir, readTrafficSigns, class_num = getDirFuncClassNum(root, dataset) trainImages, trainLabels, testImages, testLabels = None, None, None, None preprocessing = preprocessing if (preprocessing is not None) else "original" ## If pickle file exists, read the file if os.path.isfile(root + "/processed_images_{}_{}_{}_{}.pkl".format(resize_size[0], resize_size[1], dataset, preprocessing)): f = open(root + "/processed_images_{}_{}_{}_{}.pkl".format(resize_size[0], resize_size[1], dataset, preprocessing), 'rb') trainImages = cPickle.load(f, encoding="latin1") trainLabels = cPickle.load(f, encoding="latin1") testImages = cPickle.load(f, encoding="latin1") testLabels = cPickle.load(f, encoding="latin1") f.close() ## Else, read images and write to the pickle file else: print("Process {} dataset with {} and size {}, saved to {}.".format(dataset, preprocessing, resize_size, root)) start = time.time() if dataset == "GT" or dataset == "Belgium": trainImages, trainLabels = readTrafficSigns(train_dir, resize_size, preprocessing, True) testImages, testLabels = readTrafficSigns(test_dir, resize_size, preprocessing, False) else: # LISA trainImages, trainLabels, testImages, testLabels, class_num = readTrafficSigns(root, resize_size, preprocessing) print(class_num) print("Training and testing Image preprocessing finished in {:.2f} seconds".format(time.time() - start)) f = open(root + "/processed_images_{}_{}_{}_{}.pkl".format(resize_size[0], resize_size[1], dataset, preprocessing), 'wb') for obj in [trainImages, trainLabels, testImages, testLabels]: cPickle.dump(obj, f, protocol=cPickle.HIGHEST_PROTOCOL) f.close() if printing: print(trainImages[42].shape) plt.imshow(trainImages[42]) plt.show() print(testImages[21].shape) plt.imshow(testImages[21]) plt.show() return root, trainImages, trainLabels, testImages, testLabels, class_num # h5 files initialization def init_h5py(filename, epoch_num, max_total_batch): f = h5py.File(filename, 'w') try: # config group for some common params config = f.create_group('config') config.attrs["total_epochs"] = epoch_num # cost group for training and validation cost cost = f.create_group('cost') loss = cost.create_dataset('loss', (epoch_num,)) loss.attrs['time_markers'] = 'epoch_freq' loss.attrs['epoch_freq'] = 1 train = cost.create_dataset('train', (max_total_batch,)) # Set size to maximum theoretical value train.attrs['time_markers'] = 'minibatch' # time group for batch and epoch time t = f.create_group('time') loss = t.create_dataset('loss', (epoch_num,)) train = t.create_group('train') start_time = train.create_dataset("start_time", (1,), dtype='float64') start_time.attrs['units'] = 'seconds' end_time = train.create_dataset("end_time", (1,), dtype='float64') end_time.attrs['units'] = 'seconds' train_batch = t.create_dataset('train_batch', (max_total_batch,)) # Same as above # accuracy group for training and validation accuracy acc = f.create_group('accuracy') acc_v = acc.create_dataset('valid', (epoch_num,)) acc_v.attrs['time_markers'] = 'epoch_freq' acc_v.attrs['epoch_freq'] = 1 acc_t = acc.create_dataset('train', (max_total_batch,)) acc_t.attrs['time_markers'] = 'minibatch' # Mark which batches are the end of an epoch time_markers = f.create_group('time_markers') time_markers.attrs['epochs_complete'] = epoch_num train_batch = time_markers.create_dataset('minibatch', (epoch_num,)) # Inference accuracy infer = f.create_group('infer_acc') infer_acc = infer.create_dataset('accuracy', (1,)) except Exception as e: f.close() # Avoid hdf5 runtime error or os error raise e # Catch the exception to close the file, then raise it to stop the program return f # Create directories for files/plots saving def create_dir(current_dir, subs, model, devices): for sub in subs: path = os.path.join(current_dir, sub) if not os.path.exists(path): os.makedirs(path) path = os.path.join(path, model) if not os.path.exists(path): os.makedirs(path) temp_path = path for device in devices: path = os.path.join(temp_path, device) if not os.path.exists(path): os.makedirs(path) if __name__ == '__main__': root = "/Users/moderato/Downloads/" resize_size = (48, 48) # print(getImageSets(root, resize_size, dataset="LISA", process=None, printing=True))
import math t = int(raw_input()) for i in range(t): n = int(raw_input()) result = 0 for j in range(1, (n / 2) + 1): if n % j == 0: result += j print result
# Author: Mathieu Blondel # License: BSD import numpy as np from sklearn.base import ClassifierMixin from sklearn.preprocessing import LabelBinarizer from .base import BaseClassifier from .dataset_fast import get_dataset from .dual_cd_fast import _dual_cd class LinearSVC(BaseClassifier, ClassifierMixin): def __init__(self, C=1.0, loss="hinge", max_iter=1000, tol=1e-3, permute=True, shrinking=True, warm_start=False, random_state=None, callback=None, verbose=0, n_jobs=1): self.C = C self.loss = loss self.max_iter = max_iter self.tol = tol self.permute = permute self.shrinking = shrinking self.warm_start = warm_start self.random_state = random_state self.callback = callback self.verbose = verbose self.n_jobs = n_jobs self.coef_ = None def _get_loss(self): loss = {"l1": 1, "hinge": 1, "l2": 2, "squared_hinge": 2} return loss[self.loss] def fit(self, X, y): n_samples, n_features = X.shape rs = self._get_random_state() self.label_binarizer_ = LabelBinarizer(neg_label=-1, pos_label=1) Y = np.asfortranarray(self.label_binarizer_.fit_transform(y), dtype=np.float64) n_vectors = Y.shape[1] ds = get_dataset(X) if not self.warm_start or self.coef_ is None: self.coef_ = np.zeros((n_vectors, n_features), dtype=np.float64) self.dual_coef_ = np.zeros((n_vectors, n_samples), dtype=np.float64) for i in xrange(n_vectors): _dual_cd(self, self.coef_[i], self.dual_coef_[i], ds, Y[:, i], self.permute, self.C, self._get_loss(), self.max_iter, rs, self.tol, self.shrinking, self.callback, verbose=self.verbose) return self
import utils from config import * states = ['md','fl', 'co', 'mi', 'la', 'ga', 'or', 'il', 'wa', 'tx'] cities = {'md':'baltimore','fl':'miami','co':'denver','mi':'detroit','la':'new orleans','ga':'atlanta','or':'portland','il':'chicago','wa':'seattle','tx':'houston'} distances = [] for state in states: # connect to the psql database db, context = cfg_init(state) # download the data for the city sql = 'SELECT distance, "H7X001", "H7X002", "H7X003", "H7X004", "H7X005", "H7Y003" FROM distxdem;' city_dist = pd.read_sql(sql, db["con"]) # add city city_dist['city'] = cities[state] # record distances.append(city_dist) df = pd.concat(distances) df.to_csv('../data/results/supermarket_distance.csv')
import board import displayio import digitalio from adafruit_st7735r import ST7735R from adafruit_rgb_display import color565 import adafruit_rgb_display.st7735 as st7735 tft_cs = board.A2 #digitalio.DigitalInOut(board.A2) tft_dc = board.A3 #digitalio.DigitalInOut(board.A3) tft_rc = board.A4 #digitalio.DigitalInOut(board.A4) spi = board.SPI() displayio.release_displays() dbus = displayio.FourWire(spi,command=tft_dc,chip_select=tft_cs, reset=tft_rc) display = ST7735R(dbus, width=128, height=160, colstart=0, rowstart=0, bgr=True) f = open("/img.bmp", "rb") odb = displayio.OnDiskBitmap(f) face = displayio.TileGrid(odb,pixel_shader=displayio.ColorConverter()) splash = displayio.Group(max_size=10) splash.append(face) display.show(splash) while True: pass
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import random import getopt import sys def gen_rand_nums(count, filename): rand_seed = os.urandom(32) random.seed(rand_seed) with open(filename, "wb") as f: for i in range(0, count): tmp = str(random.randint(0, 13 * count)) + " " f.write(tmp) f.close() def usage(): print '''Usage: -n, --nums Set the amount of the random nums. -h, --help Show this help. -o, --output Change the output filename. ''' if __name__ == '__main__': try: opts, args = getopt.getopt(sys.argv[1:], 'hn:o:', ["help", "nums= ", "output="]) except getopt.GetoptError as err: print str(err) usage() sys.exit(2) output = "sort_file.txt" num_count = 1000 for o, a in opts: if o in ("-h", "--help"): usage() sys.exit() elif o in ("-n", "--nums"): num_count = int(a) elif o in ("-o", "--output"): output = a else: assert False, "unhandled option" gen_rand_nums(num_count, output)
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Nov 19 14:59:35 2020 @author: evaferreira """ import numpy as np import pandas as pd import matplotlib.pyplot as plt from imblearn.over_sampling import SMOTE import warnings warnings.filterwarnings("ignore") df_list = [None]*15 df_test = [None]*15 for i in list(range(15)): df_list[i] = pd.read_csv('/Users/evaferreira/Downloads/Thesis/Thesis_Rep/PoC/train_dfs/dftrain' + str(i) + '.csv') df_list[i].set_index(['PERMCO_Y'], inplace=True) for i in list(range(15)): df_test[i] = pd.read_csv('/Users/evaferreira/Downloads/Thesis/Thesis_Rep/PoC/test_dfs/dftest' + str(i) + '.csv') df_test[i].set_index(['PERMCO_Y'], inplace=True) #%% import itertools # Create a confusion matrix def plot_confusion_matrix(cm, classes, normalize=True, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') print(cm) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title, fontsize=14) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') #%% from sklearn.linear_model import LogisticRegression from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score from sklearn.metrics import confusion_matrix #%% # Rescale the data from sklearn.preprocessing import RobustScaler # RobustScaler is less prone to outliers. rob_scaler = RobustScaler() for df_ in list(range(len(df_list))): for i in df_list[df_].iloc[:,:16].columns.to_list(): df_list[df_].iloc[:,:16][i] = rob_scaler.fit_transform(df_list[df_].iloc[:,:16][i].values.reshape(-1,1)) df_test[df_].iloc[:,:16][i] = rob_scaler.fit_transform(df_test[df_].iloc[:,:16][i].values.reshape(-1,1)) #%% def logreg_nosm_nowc(df, df_t): logreg = LogisticRegression() logreg.fit(df.iloc[:,:16], df.iloc[:,16:]) predictions = logreg.predict(df_t.iloc[:,:16]) #weights = logreg.coef_[0] #probas = logreg.predict_proba(df_t.iloc[:,:16]) y_test = df_t.iloc[:,16:]['y_t+2'].values f1 = f1_score(y_test, predictions) acc = accuracy_score(y_test, predictions) recall = recall_score(y_test, predictions) precision = precision_score(y_test, predictions) return predictions, y_test, f1, acc, recall, precision#, probas, weights #%% predictions = [[] for i in range(15)] y_tests = [None for i in range(15)] f1s = [None for i in range(15)] accs = [None for i in range(15)] recalls = [None for i in range(15)] precisions = [None for i in range(15)] probas = [None for i in range(15)] #weights = [None for i in range(15)] for i in list(range(15)): predictions[i], y_tests[i], f1s[i], accs[i], recalls[i], precisions[i] = logreg_nosm_nowc(df_list[i], df_test[i]) #%% """import copy threshold = 0.4 predprob = copy.deepcopy(predictions) for i in list(range(15)): for j in list(range(len(probas[i]))): prob = probas[i][j][1] if prob >= threshold: predprob[i][j] = 1.0 else: predprob[i][j] = 0.0 #%% f1s_prob = [None for i in range(15)] accs_prob = [None for i in range(15)] recalls_prob = [None for i in range(15)] precisions_prob = [None for i in range(15)] for i in list(range(15)): f1s_prob[i] = f1_score(y_tests[i], predprob[i]) accs_prob[i] = accuracy_score(y_tests[i], predprob[i]) recalls_prob[i] = recall_score(y_tests[i], predprob[i]) precisions_prob[i] = precision_score(y_tests[i], predprob[i]) """ #%% for i in list(range(15)): y_tests[i] = y_tests[i].reshape(y_tests[i].shape[0],1) predictions[i] = predictions[i].reshape(predictions[i].shape[0],1) #predprob[i] = predprob[i].reshape(predprob[i].shape[0],1) #%% predict_cm = confusion_matrix(np.vstack(y_tests), np.vstack(predictions)) actual_cm = confusion_matrix(np.vstack(y_tests), np.vstack(y_tests)) labels = ['No Bankruptcy', 'Bankruptcy'] fig = plt.figure(figsize=(16,8)) fig.add_subplot(221) plot_confusion_matrix(predict_cm, labels, title="DTH w/o Oversampling w/o Weighed-Class \n Confusion Matrix", cmap=plt.cm.Blues) fig.add_subplot(222) plot_confusion_matrix(actual_cm, labels, title="Confusion Matrix \n (with 100% accuracy)", cmap=plt.cm.Greens) print('Simple Logit (f1, acc, recall, precision):', np.mean(f1s), np.mean(accs), np.mean(recalls), np.mean(precisions)) #%% """predict_cm_prob = confusion_matrix(np.vstack(y_tests), np.vstack(predprob)) actual_cm = confusion_matrix(np.vstack(y_tests), np.vstack(y_tests)) labels = ['No Bankruptcy', 'Bankruptcy'] fig = plt.figure(figsize=(16,8)) fig.add_subplot(221) plot_confusion_matrix(predict_cm_prob, labels, title="DTH w/o Oversampling w/o Weighed-Class (With dif threshold) \n Confusion Matrix", cmap=plt.cm.Blues) fig.add_subplot(222) plot_confusion_matrix(actual_cm, labels, title="Confusion Matrix \n (with 100% accuracy)", cmap=plt.cm.Greens) print('Basic w/ threshold (f1, acc, recall, precision):', np.mean(f1s_prob), np.mean(accs_prob), np.mean(recalls_prob), np.mean(precisions_prob)) #%% argmax = [None for i in range(15)] for i in list(range(15)): argmax[i] = np.argmax(np.absolute(weights[i])) """ #%% #WEIGHTED CLASS LOGREG from sklearn.model_selection import GridSearchCV #%% def tuning(df,i=False): gs_w = [{0:1000,1:100},{0:1000,1:10}, {0:1000,1:1.0}, {0:500,1:1.0}, {0:400,1:1.0}, {0:300,1:1.0}, {0:200,1:1.0}, {0:150,1:1.0}, {0:100,1:1.0}, {0:99,1:1.0}, {0:10,1:1.0}, {0:0.01,1:1.0}, {0:0.01,1:10}, {0:0.01,1:100}, {0:0.001,1:1.0}, {0:0.005,1:1.0}, {0:1.0,1:1.0}, {0:1.0,1:0.1}, {0:10,1:0.1}, {0:100,1:0.1}, {0:10,1:0.01}, {0:1.0,1:0.01}, {0:1.0,1:0.001}, {0:1.0,1:0.005}, {0:1.0,1:10}, {0:1.0,1:99}, {0:1.0,1:100}, {0:1.0,1:150}, {0:1.0,1:200}, {0:1.0,1:300},{0:1.0,1:400},{0:1.0,1:500}, {0:1.0,1:1000}, {0:10,1:1000},{0:100,1:1000}, {0:4,1:96} ] param_grid = {"class_weight": gs_w } grid = GridSearchCV(LogisticRegression(),param_grid, scoring="f1", verbose=2) if i==True: grid.fit(df.iloc[:,:26], df.iloc[:,26:]) else: grid.fit(df.iloc[:,:16], df.iloc[:,16:]) return grid.best_params_ #%% def logreg_nosm_wc(df, df_t): best_p = tuning(df) logreg = LogisticRegression(**best_p) logreg.fit(df.iloc[:,:16], df.iloc[:,16:]) predictions = logreg.predict(df_t.iloc[:,:16]) #weights = logreg.coef_[0] y_test = df_t.iloc[:,16:]['y_t+2'].values f1 = f1_score(y_test, predictions) acc = accuracy_score(y_test, predictions) recall = recall_score(y_test, predictions) precision = precision_score(y_test, predictions) return predictions, y_test, f1, acc, recall, precision #%% predictions_wc = [[] for i in range(15)] y_tests_wc = [None for i in range(15)] f1s_wc = [None for i in range(15)] accs_wc = [None for i in range(15)] recalls_wc = [None for i in range(15)] precisions_wc = [None for i in range(15)] for i in list(range(15)): predictions_wc[i], y_tests_wc[i], f1s_wc[i], accs_wc[i], recalls_wc[i], precisions_wc[i] = logreg_nosm_wc(df_list[i], df_test[i]) print('///////////////', i) #%% for i in list(range(15)): y_tests_wc[i] = y_tests_wc[i].reshape(y_tests_wc[i].shape[0],1) predictions_wc[i] = predictions_wc[i].reshape(predictions_wc[i].shape[0],1) #%% predict_cm_wc = confusion_matrix(np.vstack(y_tests_wc), np.vstack(predictions_wc)) actual_cm = confusion_matrix(np.vstack(y_tests_wc), np.vstack(y_tests_wc)) labels = ['No Bankruptcy', 'Bankruptcy'] fig = plt.figure(figsize=(16,8)) fig.add_subplot(221) plot_confusion_matrix(predict_cm_wc, labels, title="DTH w/o Oversampling w/ Weighted-Class \n Confusion Matrix", cmap=plt.cm.Blues) fig.add_subplot(222) plot_confusion_matrix(actual_cm, labels, title="Confusion Matrix \n (with 100% accuracy)", cmap=plt.cm.Greens) print('WC (f1, acc, recall, precision):', np.mean(f1s_wc), np.mean(accs_wc), np.mean(recalls_wc), np.mean(precisions_wc)) #%% # OVERSAMPLING #%% def logreg_sm_nowc(df, df_t): X_train = df.iloc[:,:16] y_train = df.iloc[:,16:] # SMOTE Technique (OverSampling) After splitting and Cross Validating sm = SMOTE(sampling_strategy='minority', random_state=42) # Xsm_train, ysm_train = sm.fit_sample(X_train, y_train) # This will be the data were we are going to Xsm_train, ysm_train = sm.fit_sample(X_train, y_train) w = {0: 0.01, 1: 1.0} logreg = LogisticRegression(class_weight=w) logreg.fit(Xsm_train, ysm_train) predictions = logreg.predict(df_t.iloc[:,:16]) #probas = logreg.predict_proba(df_t.iloc[:,:16]) y_test = df_t.iloc[:,16:]['y_t+2'].values f1 = f1_score(y_test, predictions) acc = accuracy_score(y_test, predictions) recall = recall_score(y_test, predictions) precision = precision_score(y_test, predictions) return predictions, y_test, f1, acc, recall, precision#, probas #%% predictions_sm = [[] for i in range(15)] y_tests_sm = [[] for i in range(15)] f1s_sm = [None for i in range(15)] accs_sm = [None for i in range(15)] recalls_sm = [None for i in range(15)] precisions_sm = [None for i in range(15)] #probas_sm = [None for i in range(15)] for i in list(range(15)): predictions_sm[i], y_tests_sm[i], f1s_sm[i], accs_sm[i], recalls_sm[i], precisions_sm[i] = logreg_sm_nowc(df_list[i], df_test[i]) #%% """ import copy threshold = 0.9 predprob_sm = copy.deepcopy(predictions_sm) for i in list(range(15)): for j in list(range(len(probas_sm[i]))): prob = probas_sm[i][j][1] if prob >= threshold: predprob_sm[i][j] = 1.0 else: predprob_sm[i][j] = 0.0 #%% f1s_prob_sm = [None for i in range(15)] accs_prob_sm = [None for i in range(15)] recalls_prob_sm = [None for i in range(15)] precisions_prob_sm = [None for i in range(15)] for i in list(range(15)): f1s_prob_sm[i] = f1_score(y_tests[i], predprob_sm[i]) accs_prob_sm[i] = accuracy_score(y_tests[i], predprob_sm[i]) recalls_prob_sm[i] = recall_score(y_tests[i], predprob_sm[i]) precisions_prob_sm[i] = precision_score(y_tests[i], predprob_sm[i]) """ #%% for i in list(range(15)): y_tests_sm[i] = y_tests_sm[i].reshape(y_tests_sm[i].shape[0],1) predictions_sm[i] = predictions_sm[i].reshape(predictions_sm[i].shape[0],1) #predprob_sm[i] = predprob_sm[i].reshape(predprob_sm[i].shape[0],1) #%% predict_cm_sm = confusion_matrix(np.vstack(y_tests_sm), np.vstack(predictions_sm)) actual_cm = confusion_matrix(np.vstack(y_tests_sm), np.vstack(y_tests_sm)) labels = ['No Bankruptcy', 'Bankruptcy'] fig = plt.figure(figsize=(16,8)) fig.add_subplot(221) plot_confusion_matrix(predict_cm_sm, labels, title="DTH w/ Oversampling w/o Weighted-Class \n Confusion Matrix", cmap=plt.cm.Blues) fig.add_subplot(222) plot_confusion_matrix(actual_cm, labels, title="Confusion Matrix \n (with 100% accuracy)", cmap=plt.cm.Greens) print('SM (f1, acc, recall, precision):', np.mean(f1s_sm), np.mean(accs_sm), np.mean(recalls_sm), np.mean(precisions_sm)) #%% """ predict_cm_smprob = confusion_matrix(np.vstack(y_tests_sm), np.vstack(predprob_sm)) actual_cm = confusion_matrix(np.vstack(y_tests_sm), np.vstack(y_tests_sm)) labels = ['No Bankruptcy', 'Bankruptcy'] fig = plt.figure(figsize=(16,8)) fig.add_subplot(221) plot_confusion_matrix(predict_cm_smprob, labels, title="DTH w/ Oversampling w/o Weighted-Class (w/ threshold) \n Confusion Matrix", cmap=plt.cm.Blues) fig.add_subplot(222) plot_confusion_matrix(actual_cm, labels, title="Confusion Matrix \n (with 100% accuracy)", cmap=plt.cm.Greens) print('SM (w/ threshold) (f1, acc, recall, precision):', np.mean(f1s_prob_sm), np.mean(accs_prob_sm), np.mean(recalls_prob_sm), np.mean(precisions_prob_sm)) """ #%% # INDUSTRY EFFECTS #%% def logreg_nosm_nowc_ind(df, df_t): #with weighted class recall is higher but f1 is worse w = {0: 0.01, 1: 1.0} logreg = LogisticRegression(class_weight=w) logreg.fit(df.iloc[:,:26], df.iloc[:,26:]) predictions = logreg.predict(df_t.iloc[:,:26]) y_test = df_t.iloc[:,26:]['y_t+2'].values f1 = f1_score(y_test, predictions) acc = accuracy_score(y_test, predictions) recall = recall_score(y_test, predictions) precision = precision_score(y_test, predictions) return predictions, y_test, f1, acc, recall, precision #%% df_ind = [None]*15 df_tind = [None]*15 for i in list(range(15)): df_ind[i] = pd.read_csv('/Users/evaferreira/Downloads/Thesis/Thesis_Rep/PoC/train_dfs_ind/dftrain' + str(i) + 'ind.csv') df_ind[i].set_index(['PERMCO_Y'], inplace=True) for i in list(range(15)): df_tind[i] = pd.read_csv('/Users/evaferreira/Downloads/Thesis/Thesis_Rep/PoC/test_dfs_ind/dftest' + str(i) + 'ind.csv') df_tind[i].set_index(['PERMCO_Y'], inplace=True) #%% # Rescale the data from sklearn.preprocessing import RobustScaler # RobustScaler is less prone to outliers. rob_scaler = RobustScaler() for df_ in list(range(len(df_list))): for i in df_ind[df_].iloc[:,:16].columns.to_list(): df_ind[df_].iloc[:,:16][i] = rob_scaler.fit_transform(df_ind[df_].iloc[:,:16][i].values.reshape(-1,1)) df_tind[df_].iloc[:,:16][i] = rob_scaler.fit_transform(df_tind[df_].iloc[:,:16][i].values.reshape(-1,1)) #%% predictions_i = [[] for i in range(15)] y_tests_i = [None for i in range(15)] f1s_i = [None for i in range(15)] accs_i = [None for i in range(15)] recalls_i = [None for i in range(15)] precisions_i = [None for i in range(15)] for i in list(range(15)): predictions_i[i], y_tests_i[i], f1s_i[i], accs_i[i], recalls_i[i], precisions_i[i] = logreg_nosm_nowc_ind(df_ind[i], df_tind[i]) #%% for i in list(range(15)): y_tests_i[i] = y_tests_i[i].reshape(y_tests_i[i].shape[0],1) predictions_i[i] = predictions_i[i].reshape(predictions_i[i].shape[0],1) #%% predict_cm_i = confusion_matrix(np.vstack(y_tests_i), np.vstack(predictions_i)) actual_cm = confusion_matrix(np.vstack(y_tests_i), np.vstack(y_tests_i)) labels = ['No Bankruptcy', 'Bankruptcy'] fig = plt.figure(figsize=(16,8)) fig.add_subplot(221) plot_confusion_matrix(predict_cm_i, labels, title="DTH w/o Oversampling w/o Weighted-Class (Industries) \n Confusion Matrix", cmap=plt.cm.Blues) fig.add_subplot(222) plot_confusion_matrix(actual_cm, labels, title="Confusion Matrix \n (with 100% accuracy)", cmap=plt.cm.Greens) print('I (f1, acc, recall, precision):', np.mean(f1s_i), np.mean(accs_i), np.mean(recalls_i), np.mean(precisions_i)) #%% # INDUSTRIES OVERSAMPLING #%% def logreg_sm_nowc_ind(df, df_t): X_train = df.iloc[:,:26] y_train = df.iloc[:,26:] # SMOTE Technique (OverSampling) After splitting and Cross Validating sm = SMOTE(sampling_strategy='minority', random_state=42) # Xsm_train, ysm_train = sm.fit_sample(X_train, y_train) # This will be the data were we are going to Xsm_train, ysm_train = sm.fit_sample(X_train, y_train) w = {0: 0.01, 1: 1.0} logreg = LogisticRegression(class_weight=w) logreg.fit(Xsm_train, ysm_train) predictions = logreg.predict(df_t.iloc[:,:26]) y_test = df_t.iloc[:,26:]['y_t+2'].values f1 = f1_score(y_test, predictions) acc = accuracy_score(y_test, predictions) recall = recall_score(y_test, predictions) precision = precision_score(y_test, predictions) return predictions, y_test, f1, acc, recall, precision #%% predictions_smi = [[] for i in range(15)] y_tests_smi = [[] for i in range(15)] f1s_smi = [None for i in range(15)] accs_smi = [None for i in range(15)] recalls_smi = [None for i in range(15)] precisions_smi = [None for i in range(15)] for i in list(range(15)): predictions_smi[i], y_tests_smi[i], f1s_smi[i], accs_smi[i], recalls_smi[i], precisions_smi[i] = logreg_sm_nowc_ind(df_ind[i], df_tind[i]) #%% for i in list(range(15)): y_tests_smi[i] = y_tests_smi[i].reshape(y_tests_smi[i].shape[0],1) predictions_smi[i] = predictions_smi[i].reshape(predictions_smi[i].shape[0],1) #%% predict_cm_smi = confusion_matrix(np.vstack(y_tests_smi), np.vstack(predictions_smi)) actual_cm = confusion_matrix(np.vstack(y_tests_smi), np.vstack(y_tests_smi)) labels = ['No Bankruptcy', 'Bankruptcy'] fig = plt.figure(figsize=(16,8)) fig.add_subplot(221) plot_confusion_matrix(predict_cm_smi, labels, title="DTH w/ Oversampling w/o Weighted Class (Industries) \n Confusion Matrix", cmap=plt.cm.Blues) fig.add_subplot(222) plot_confusion_matrix(actual_cm, labels, title="Confusion Matrix \n (with 100% accuracy)", cmap=plt.cm.Greens) print('SMI (f1, acc, recall, precision):', np.mean(f1s_smi), np.mean(accs_smi), np.mean(recalls_smi), np.mean(precisions_smi)) #%% # INDUSTRY WEIGHTED CLASS #%% def logreg_nosm_wc_ind(df, df_t): best_p = tuning(df,True) logreg = LogisticRegression(**best_p) logreg.fit(df.iloc[:,:26], df.iloc[:,26:]) predictions = logreg.predict(df_t.iloc[:,:26]) y_test = df_t.iloc[:,26:]['y_t+2'].values f1 = f1_score(y_test, predictions) acc = accuracy_score(y_test, predictions) recall = recall_score(y_test, predictions) precision = precision_score(y_test, predictions) return predictions, y_test, f1, acc, recall, precision #%% df_ind = [None]*15 df_tind = [None]*15 for i in list(range(15)): df_ind[i] = pd.read_csv('/Users/evaferreira/Downloads/Thesis/Thesis_Rep/PoC/train_dfs_ind/dftrain' + str(i) + 'ind.csv') df_ind[i].set_index(['PERMCO_Y'], inplace=True) for i in list(range(15)): df_tind[i] = pd.read_csv('/Users/evaferreira/Downloads/Thesis/Thesis_Rep/PoC/test_dfs_ind/dftest' + str(i) + 'ind.csv') df_tind[i].set_index(['PERMCO_Y'], inplace=True) #%% # Rescale the data from sklearn.preprocessing import RobustScaler # RobustScaler is less prone to outliers. rob_scaler = RobustScaler() for df_ in list(range(len(df_list))): for i in df_ind[df_].iloc[:,:16].columns.to_list(): df_ind[df_].iloc[:,:16][i] = rob_scaler.fit_transform(df_ind[df_].iloc[:,:16][i].values.reshape(-1,1)) df_tind[df_].iloc[:,:16][i] = rob_scaler.fit_transform(df_tind[df_].iloc[:,:16][i].values.reshape(-1,1)) #%% predictions_wci = [[] for i in range(15)] y_tests_wci = [None for i in range(15)] f1s_wci = [None for i in range(15)] accs_wci = [None for i in range(15)] recalls_wci = [None for i in range(15)] precisions_wci = [None for i in range(15)] for i in list(range(15)): predictions_wci[i], y_tests_wci[i], f1s_wci[i], accs_wci[i], recalls_wci[i], precisions_wci[i] = logreg_nosm_wc_ind(df_ind[i], df_tind[i]) print('/////////////////',i) #%% for i in list(range(15)): y_tests_wci[i] = y_tests_wci[i].reshape(y_tests_wci[i].shape[0],1) predictions_wci[i] = predictions_wci[i].reshape(predictions_wci[i].shape[0],1) #%% predict_cm_wci = confusion_matrix(np.vstack(y_tests_wci), np.vstack(predictions_wci)) actual_cm = confusion_matrix(np.vstack(y_tests_wci), np.vstack(y_tests_wci)) labels = ['No Bankruptcy', 'Bankruptcy'] fig = plt.figure(figsize=(16,8)) fig.add_subplot(221) plot_confusion_matrix(predict_cm_wci, labels, title="DTH w/o Oversampling w/ Weighted-Class (Industries) \n Confusion Matrix", cmap=plt.cm.Blues) fig.add_subplot(222) plot_confusion_matrix(actual_cm, labels, title="Confusion Matrix \n (with 100% accuracy)", cmap=plt.cm.Greens) print('WCI (f1, acc, recall, precision):', np.mean(f1s_wci), np.mean(accs_wci), np.mean(recalls_wci), np.mean(precisions_wci)) #%% rec_df = pd.DataFrame(list(zip(recalls, recalls_wc, recalls_sm, recalls_i, recalls_wci, recalls_smi)), columns = ['Basic', 'WC', 'SM', 'Ind','WCI','SMI'], index=['2005','2006','2007','2008','2009','2010','2011','2012','2013','2014','2015','2016','2017','2018','2019']) #rec_df.to_csv('rec_df_logit.csv') #%% f1_df = pd.DataFrame(list(zip(f1s, f1s_wc, f1s_sm, f1s_i, f1s_wci, f1s_smi)), columns = ['Basic', 'WC', 'SM', 'Ind','WCI','SMI'], index=['2005','2006','2007','2008','2009','2010','2011','2012','2013','2014','2015','2016','2017','2018','2019']) #f1_df.to_csv('f1_df_logit.csv') #%% predict_cm_08 = confusion_matrix(y_tests[3], predictions[3]) actual_cm = confusion_matrix(y_tests[3], y_tests[3]) labels = ['No Bankruptcy', 'Bankruptcy'] fig = plt.figure(figsize=(16,8)) fig.add_subplot(221) plot_confusion_matrix(predict_cm_08, labels, title="Simple Logit (2008) \n Confusion Matrix", cmap=plt.cm.Greys) #fig.add_subplot(222) #plot_confusion_matrix(actual_cm, labels, title="Confusion Matrix \n (with 100% accuracy)", cmap=plt.cm.Greys) #%% predict_cm_wci08 = confusion_matrix(y_tests_wci[3], predictions_wci[3]) actual_cm = confusion_matrix(y_tests_wci[3], y_tests_wci[3]) labels = ['No Bankruptcy', 'Bankruptcy'] fig = plt.figure(figsize=(16,8)) fig.add_subplot(221) plot_confusion_matrix(predict_cm_wci08, labels, title="Logit TUNI (2008) \n Confusion Matrix", cmap=plt.cm.Greys) #fig.add_subplot(222) #plot_confusion_matrix(actual_cm, labels, title="Confusion Matrix \n (with 100% accuracy)", cmap=plt.cm.Greys) #%% predict_cm_sm08 = confusion_matrix(y_tests_sm[3], predictions_sm[3]) actual_cm = confusion_matrix(y_tests_sm[3], y_tests_sm[3]) labels = ['No Bankruptcy', 'Bankruptcy'] fig = plt.figure(figsize=(16,8)) fig.add_subplot(221) plot_confusion_matrix(predict_cm_sm08, labels, title="Logit SM (2008) \n Confusion Matrix", cmap=plt.cm.Greys) fig.add_subplot(222) plot_confusion_matrix(actual_cm, labels, title="Confusion Matrix \n (with 100% accuracy)", cmap=plt.cm.Greys)
import sys import re from pyspark import SparkContext if __name__ == "__main__": if len(sys.argv) < 4: print >> sys.stderr, "spark-submit MainPy2.py [HDFS file]\n" \ + "[searcher] [number to get] [Medium (Video, Book, etc.] [Genre (as gen:'War')] [Rating (as rat:4 for over 4 stars) or top (for top in genre)\n" \ + "[recommender] [customerID] [Medium] [Genre] [Rating]\n" exit(-1) sc = SparkContext() act = sys.argv[2] if act == 'recommender': cID = sys.argv[3].strip("'") toDisplay = 5 else: toDisplay = sys.argv[3] item = sys.argv[4] if len(sys.argv) > 5: if sys.argv[5].startswith('gen:'): gen = sys.argv[5].strip("'") if len(sys.argv) is 7: rating = sys.argv[6] else: rating = '' else: gen = '' rating = sys.argv[5] else: rating = '' gen = '' baseRDD = sc.textFile(sys.argv[1]).map(lambda line: line.split(';\t')).filter(lambda line: line[3].strip() == item) if act == 'searcher': if rating.startswith('rat:'): baseRDD = baseRDD.filter(lambda line: float(line[8].split(':')[4]) >= float(rating[4:])) if gen.startswith('gen:'): baseRDD = baseRDD.filter(lambda line: gen[4:] in line[7]) elif rating.startswith('top'): baseRDD = baseRDD.filter(lambda line: int(line[4]) > 0).sortBy(lambda line: int(line[4])) if gen.startswith('gen:'): baseRDD = baseRDD.filter(lambda line: gen[4:] in line[7]) elif act == 'recommender': baseRDD = sc.textFile(sys.argv[1]).map(lambda line: line.split(';\t')).filter(lambda line: cID in line[9]).filter(lambda line: line[3].strip() == item).filter(lambda line: int(line[4]) > 0) similar = baseRDD.filter(lambda line: not 'NULL' in line[5]).map(lambda line: (line[1], line[5])).flatMapValues(lambda line: line.split()).map(lambda line: (line[1], line[0])) baseRDD = baseRDD.keyBy(lambda line: line[1]).subtractByKey(similar).map(lambda line: (line[1][4],line[1])).sortBy(lambda line: int(line[0])) if gen.startswith('gen:'): baseRDD = baseRDD.filter(lambda line: gen[4:] in line[1][7]) else: print("Operation not supported.") textFile = open("output", "w") for item in baseRDD.take(int(toDisplay)): print(item) textFile.write(str(item)) sc.stop()
import socket from fastapi import FastAPI from redis import Redis app = FastAPI() redis = Redis(host="redis", port=6379) @app.get("/") def root(): visits = redis.incr("visits") return socket.gethostname(), visits
from sklearn.decomposition import SparsePCA as sp import numpy as np import cv2 from matplotlib import pyplot as plt from numpy import * # from imtools import * import os from PIL import Image import glob #-----Pre-processing the images from the dataset def get_image(filename): img = cv2.imread(filename) #Read image in BGR order img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) #Change to RGB order img = cv2.resize(img, (224, 224)) #Resize to 224*224 to fit model img = np.swapaxes(img, 0, 2) img = np.swapaxes(img, 1, 2) #Change to (channel, height, width) img = img[np.newaxis, :] #Extend to (example, channel, heigth, width) return img #--------Load images from Dataset # out=[] # data_pos = np.array([get_image('/home/safwan/Desktop/bdi/Dataset/football/' + img) for img in os.listdir('/home/safwan/Desktop/bdi/Dataset/football/')]) # data_neg = np.array([get_image('/home/safwan/Desktop/bdi/Dataset/lion/' + img) for img in os.listdir('/home/safwan/Desktop/bdi/Dataset/lion/')]) # data_neg1 = np.array([get_image('/home/safwan/Desktop/bdi/Dataset/guitar/' + img) for img in os.listdir('/home/safwan/Desktop/bdi/Dataset/guitar/')]) # datas = np.array([get_image('/home/safwan/Desktop/bdi/Dataset/oxford/' + img) for img in os.listdir('/home/safwan/Desktop/bdi/Dataset/oxford/')]) # X = np.append(data_pos,data_neg,axis = 0) # Y = np.append(data_neg1,datas,axis = 0) # Z = np.append(X,Y,axis = 0) import pandas as pd import numpy as np from sklearn.decomposition import SparsePCA X = pd.read_csv('mylist.csv', delimiter = None) X = X.drop(X.columns[0],axis = 1) X = np.array(X) #-----SPCA pca = SparsePCA(n_components = 1024) x = pca.fit_transform(X) df = pd.DataFrame(x) df.to_csv('n_components_full.csv') import math #-----Mapping from Euclidean Space to Hamming Space #---- x = matrix B mentioned in the paper in the Step 1 of SPCA #------This function returns the j-th binary value z(i, j) for corresponding i-th image via computing delta_kj matrix(m,m) #Given the i-th image x(i) in R(d) in training set, assume its corresponding binary code #is y(i) with range{-1, 1} in m dimensions and the value after mapping is z(-1, 1) in m dimensions. #The j-th binary value z(i, j) and y(i) (binary coded value) is returned by this function : def hamming_z(x): x = np.array(x) row,col = x.shape # print x lis = [] epsilon = 1e-7 delta_kj = np.zeros((col,col)) col_min = [] col_max = [] lister = [] for j in range(col): for i in xrange(row): lis.append(x[i][j]) col_min.append(np.min(np.array(lis))) col_max.append(np.max(np.array(lis))) lis = [] for j in range(col): f = col_max[j] e = col_min[j] if e - f == 0: #Handling overflow for k in range(col): delta_kj[j][k] = 1 lister.append([delta_kj[j][k],[j,k]]) else: for k in range(col): delta_kj[j][k] = (1 - math.exp((-(epsilon ** 2) * 0.5 * (((k + 1) * math.pi) * 1.0/(f - e)) ** 2))) lister.append([delta_kj[j][k],[j,k]]) lister.sort() indexes = {} t = 0 for l in range((col)): indexes[l] = lister[l] z = np.zeros((row,col)) y = np.zeros((row,col)) for u in range(row): for v in range(col): if (col_max[indexes[v][1][0]] - col_min[indexes[v][1][0]]) == 0: #Handling overflow z[u][v] = 1 else: z[u][v] = math.sin(math.pi/2 + ((indexes[v][1][1] * 11 * x[u][indexes[v][1][0]]) * 1.0 * math.pi/(col_max[indexes[v][1][0]] - col_min[indexes[v][1][0]]))) if z[u][v] <= t: y[u][v] = 1 else: y[u][v] = -1 return z,y X = pd.read_csv('n_components_full.csv', delimiter = None) X = X.drop(X.columns[0], axis = 1) X = np.array(X) print X.shape,X z,y = hamming_z(X) df = pd.DataFrame(z) # df = df.drop(df.columns[0], axis = 0) df.to_csv('z_values_full.csv') df = pd.DataFrame(y) # df = df.drop(df.columns[0], axis = 1) df.to_csv('y_values_full.csv')
import sys import serial import threading import time s_task_interval = 1 def initUart(com='COM0', baudrate=19200): ser = serial.Serial() ser.port = com ser.baudrate=baudrate return ser def uart_sent(ser,string): if not ser.isOpen(): ser.open() ser.write(string) def uart_receive(ser,num): if not ser.isOpen(): ser.open() string = ser.read(num) return string def periodic_bridge_task(recvSer, sendSer, interval): num = recvSer.inWaiting() if num != 0: recvString = uart_receive(recvSer, num) uart_sent(sendSer, recvString) threading.Timer(interval, periodic_bridge_task, (recvSer, sendSer, interval)).start() def main(argv=None): com = [sys.argv[1], sys.argv[2]] baud = sys.argv[3] mySerA = initUart(com[0], baud) mySerB = initUart(com[1], baud) mySerA.open() mySerB.open() global s_task_interval periodic_bridge_task(mySerA, mySerB, s_task_interval) time.sleep(s_task_interval / 2) periodic_bridge_task(mySerB, mySerA, s_task_interval) while True: pass if __name__ == "__main__": sys.exit(main())
import random from typing import List class Solution: # quick select # should we add insertion sort for short lists? def findKthLargest(self, nums: List[int], k: int) -> int: def partition(nums, l, r, pivot_idx): pivot = nums[pivot_idx] nums[r], nums[pivot_idx] = nums[pivot_idx], nums[r] a = l for i in range(l, r): if nums[i] <= pivot: nums[i], nums[a] = nums[a], nums[i] a += 1 nums[a], nums[r] = nums[r], nums[a] return a def sort(nums, l, r, K): # finding Kth smallest element if l < r: pivot_idx = random.randint(l, r) # inclusive pivot_idx = partition(nums, l, r, pivot_idx) # K = smallest Kth element --> we need to enter K = L-k if pivot_idx == K: return if pivot_idx < K: sort(nums, pivot_idx+1, r, K) else: sort(nums, l, pivot_idx-1, K) L = len(nums) sort(nums, 0, L-1, L-k) return nums[L-k] # [1, 2, 3, 4] # 1st largest element --> (4) (1-indexed) # 3rd smallest element (1, 2, 3, 4) (0-indexed) class Solution_heap: def findKthLargest(self, nums, k): heap = nums[:k] heapify(heap) for n in nums[k:]: heappushpop(heap, n) return heap[0] # this is smallest element of heap of K elements, so its the Kth largest # (numbers smaller than this smallest element would have been popped out during heappushpop)
import socket # field computer local_ip = "172.16.1.76" local_port = 25565 buf_size = 1024 server_msg = "hellorobot!" bytes_to_send = str.encode(server_msg) udp_socket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM) udp_socket.bind((local_ip, local_port)) print(f"UDP server up and listening at {local_ip}@{local_port}") # change to robot ip send_to_addr = "172.16.1.10" send_to_port = 25566 while True: udp_socket.sendto(bytes_to_send, (send_to_addr, send_to_port)) msg, addr = udp_socket.recvfrom(buf_size) print(f"received {msg} from {addr}")
# -*- coding: utf-8 -*- from Acquisition import aq_inner from plone import api from plone.app.content.browser.foldercontents import (FolderContentsTable , FolderContentsBrowserView , FolderContentsView) class PrenotazioniFolderContentsTable(FolderContentsTable): """ The foldercontents table WITH NO BUTTONS """ @property def buttons(self): ''' Custom buttons ''' roles = api.user.get_roles() if 'Manager' in roles: return return [] class PrenotazioniFolderContentsView(FolderContentsView): ''' The foldercontents CUSTOMIZED ''' def contents_table(self): ''' Custom contetn-folder ''' table = PrenotazioniFolderContentsTable(aq_inner(self.context), self.request) return table.render() class PrenotazioniFolderContentsBrowserView(FolderContentsBrowserView): table = PrenotazioniFolderContentsTable
# EDA Back Pain '''Ref: https://towardsdatascience.com/an-exploratory-data-analysis-on-lower-back-pain-6283d0b0123 https://www.kaggle.com/nasirislamsujan/exploratory-data-analysis-lower-back-pain?scriptVersionId=5589885''' import numpy as np import pandas as pd import matplotlib.pyplot as plot import seaborn as sb sb.set() from sklearn.preprocessing import MinMaxScaler, LabelEncoder from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score, confusion_matrix, classification_report from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import GridSearchCV # StratifiedKFold from sklearn.neural_network import MLPClassifier from sklearn.naive_bayes import GaussianNB from sklearn.tree import DecisionTreeClassifier from sklearn.svm import SVC from xgboost import XGBClassifier, plot_importance from sklearn import model_selection #importing dataset dataset = pd.read_csv(r'D:\Programming Tutorials\Machine Learning\Projects\Datasets\Dataset_spine.csv') dataset.head(3) #unnecessary column dataset.iloc[:, -1].head() #deleting unnecessary column del dataset['Unnamed: 13'] #change the Column names. Same job done by below method as list '''1st way to rename the columns, no need to follow any order since they are dict entries''' dataset.rename(columns = {"Col1" : "pelvic_incidence", "Col2" : "pelvic_tilt", "Col3" : "lumbar_lordosis_angle", "Col4" : "sacral_slope", "Col5" : "pelvic_radius", "Col6" : "degree_spondylolisthesis", "Col7" : "pelvic_slope", "Col8" : "direct_tilt", "Col9" : "thoracic_slope", "Col10" : "cervical_tilt", "Col11" : "sacrum_angle", "Col12" : "scoliosis_slope", "Class_att" : "class"}, inplace = True) '''2nd way to rename the columns, order needs to be followed since is a list of titles''' dataset.columns = ['pelvic_incidence', 'pelvic_tilt', 'lumbar_lordosis_angle', 'sacral_slope', 'pelvic_radius', 'degree_spondylolisthesis', 'pelvic_slope', 'direct_tilt','thoracic_slope','cervical_tilt', 'sacrum_angle', 'scoliosis_slope', 'class'] '''Summary of the dataset, gives descriptive statistics with the central tendency, dispersion and shape of dataset distribution excluding NaN values.Works with numeric values but not categorical values''' dataset.shape dataset.describe() dataset.info() # checking for missing values in each feature column, unblock for % count dataset.isnull().sum()#/len(df)*100 #shows count as barchart w.r.t specified class, checks for imbalanced dataset dataset['class'].value_counts().plot.bar() # 1st way to find correlation between features as heatmap plot.subplots(figsize = (10, 6)) #doesnt clutter unlike plot.figure(figsize=(10,6)) sb.heatmap(dataset.corr(), annot = True, cmap = 'viridis') # 2nd way to find correlation between features as heatmap that gives only 1 diagonal mask = np.array(dataset.corr()) mask[np.tril_indices_from(mask)] = False fig, ax = plot.subplots(figsize = (10, 8)) sb.heatmap(dataset.corr(), vmax = .8, square = True, annot = True, cmap = 'viridis', mask = mask) # 3rd way of custom correlation between each pair of features w.r.t output sb.pairplot(dataset, hue = 'class') # Histogram of distribution of each feature dataset.hist(figsize = (10, 12), bins = 20, color = '#007959AA') plot.title("Features Distribution") plot.show() '''1st way of boxplot for outliers''' plot.subplots(figsize = (15, 6)) dataset.boxplot(patch_artist = True, sym = "k.") plot.xticks(rotation = 45) '''2nd way of boxplot for outliers''' # Label encoding of the output variable. Algorithms like XGBoost takes numerical values encoder = LabelEncoder() dataset['class'] = encoder.fit_transform(dataset['class']) # boxplot for outliers feature_space = dataset.columns.values number_of_columns = 12 number_of_rows = len(feature_space) - 1 / number_of_columns plot.figure(figsize = (number_of_columns, 5 * number_of_rows)) for i in range(0, len(feature_space)): plot.subplot(number_of_rows + 1, number_of_columns, i + 1) sb.set_style('whitegrid') sb.boxplot(dataset[feature_space[i]], color = 'green', orient = 'v') plot.tight_layout() # To check distribution-Skewness plot.figure(figsize = (2 * number_of_columns, 5 * number_of_rows)) for k in range(0, len(feature_space)): plot.subplot(number_of_rows + 1, number_of_columns, k + 1) sb.distplot(dataset[feature_space[k]], kde = True) # Visualization with barplot and normal distribution plot for j, features in enumerate(list(dataset.columns)[:-1]): fg = sb.FacetGrid(dataset, hue = 'class', height = 5) fg.map(sb.distplot, features).add_legend() dataset.pelvic_slope[dataset.scoliosis_slope == 1].median() sb.boxplot(data = dataset, x = 'class', y = 'pelvic_slope', color = 'g') '''3rd way to detect & remove outliers by function''' # Function to detect outliers minimum = 0 maximum = 0 def detect_outlier(feature): first_q = np.percentile(feature, 25) third_q = np.percentile(feature, 75) IQR = third_q-first_q #IQR is the distance between 3rd Quartile and 1st Qartile IQR *= 1.5 minimum = first_q - IQR #acceptable minimum value maximum = third_q + IQR #acceptable maximum value flag = False if(minimum > np.min(feature)): flag = True if(maximum < np.max(feature)): flag = True return flag # Detecting outliers using above function X = dataset.iloc[:, :-1] #taking all the columns except the output column for i in range(len(X.columns)): if(detect_outlier(X[X.columns[i]])): print('"', X.columns[i], '"', 'contains Outliers!') # Function to remove outliers def remove_outlier(feature): #use tukey method to remove outliers. whiskers are set at 1.5 times IQR first_q = np.percentile(X[feature], 25) third_q = np.percentile(X[feature], 75) IQR = third_q-first_q IQR *= 1.5 minimum = first_q - IQR #acceptable minimum value maximum = third_q + IQR #acceptable maximum value median = X[feature].median() #values beyond the acceptance range are considered outliers. replace them with median of that feature X.loc[X[feature] < minimum, feature] = median X.loc[X[feature] > maximum, feature] = median # Removing outliers for i in range(len(X.columns)): for i in range(len(X.columns)): remove_outlier(X.columns[i]) '''Re-checking using the same outlier detection methods above''' # 1st way of boxplot after removing outliers to verify plot.subplots(figsize = (15, 6)) X.boxplot(patch_artist = True, sym = "k.") plot.xticks(rotation = 45) # 2nd way of boxplot for outliers plot.figure(figsize = (number_of_columns, 5 * number_of_rows)) for i in range(0, len(feature_space)): plot.subplot(number_of_rows + 1, number_of_columns, i + 1) sb.set_style('whitegrid') sb.boxplot(dataset[feature_space[i]], color = 'green', orient = 'v') plot.tight_layout() # To check distribution-Skewness plot.figure(figsize = (2 * number_of_columns, 5 * number_of_rows)) for k in range(0, len(feature_space)): plot.subplot(number_of_rows + 1, number_of_columns, k + 1) sb.distplot(dataset[feature_space[k]], kde = True) # Visualization with barplot and normal distribution plot for j, features in enumerate(list(dataset.columns)[:-1]): fg = sb.FacetGrid(dataset, hue = 'class', height = 5) fg.map(sb.distplot, features).add_legend() dataset.pelvic_slope[dataset.scoliosis_slope == 1].median() sb.boxplot(data = dataset, x = 'class', y = 'pelvic_slope', color = 'g') '''Recheck complete''' # Feature Scaling scaler = MinMaxScaler() scaled_data = scaler.fit_transform(X) # converting the scaled data into pandas dataframe scaled_dataset = pd.DataFrame(data = scaled_data, columns = X.columns) scaled_dataset.head(3) # Splitting into training & test dataset X = scaled_dataset y = dataset['class'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.15, random_state = 0) # probing for the best classification algorithm using KFold CV models = [] names = ['Logistic Regression', 'Random Forest', 'ANN', 'Gaussian NB', 'DecisionTree Classifier', 'XGBClassifier'] models.append((LogisticRegression(solver = 'liblinear'))) models.append(RandomForestClassifier(n_estimators = 100)) models.append((MLPClassifier())) models.append((GaussianNB())) models.append((DecisionTreeClassifier())) models.append((XGBClassifier())) models kfold = model_selection.KFold(n_splits = 5, random_state = 7) for i in range(0, len(models)): cv_result = model_selection.cross_val_score(models[i], X_train, y_train, cv = kfold, scoring = 'accuracy') score = models[i].fit(X_train, y_train) prediction = models[i].predict(X_test) acc_score = accuracy_score(y_test, prediction) print ('-' * 40) print ('{0}: {1}'.format(names[i], acc_score)) '''Fitting the dataset to the appropriate ML model to predict & compare with test data as per the accuracy above''' # Random Forest classifier randomForest = RandomForestClassifier(n_estimators = 100) y_pred_rf = randomForest.fit(X_train, y_train).predict(X_test) print ('-' * 40) print ('Accuracy score:') print (accuracy_score(y_test, y_pred_rf)) print ('-' * 40) print ('Confusion Matrix:') print (confusion_matrix(y_test, y_pred_rf)) print ('-' * 40) print ('Classification Matrix:') print (classification_report(y_test, y_pred_rf)) # Naive Bayes classifier classifier_gnb = GaussianNB() y_pred_gnb = classifier_gnb.fit(X_train, y_train).predict(X_test) # accuracy of the classification accuracy_score(y_test, y_pred_gnb) confusion_matrix(y_test, y_pred_gnb) # MLP classifier classifier_mlp = MLPClassifier() y_pred_mlp = classifier_mlp.fit(X_train, y_train).predict(X_test) # accuracy of the classification accuracy_score(y_test, y_pred_mlp) confusion_matrix(y_test, y_pred_mlp) # SVM classifier classifier_svc = SVC(kernel = 'linear') y_pred_svc = classifier_svc.fit(X_train, y_train).predict(X_test) # accuracy of the classification accuracy_score(y_test, y_pred_svc) confusion_matrix(y_test, y_pred_svc) # XGBoost classifier classifier_xgb = XGBClassifier() y_pred_xgb = classifier_xgb.fit(X_train, y_train).predict(X_test) # accuracy of the classification accuracy_score(y_test, y_pred_xgb) confusion_matrix(y_test, y_pred_xgb) '''Tuning for optimal hyper parameters using Grid Search ''' # probing optimal batch size batch_Size = [8, 16, 32, 50, 64, 100, 128] # probing optimal no of epochs epochs = [10, 50, 100, 150, 200] # probing for best optimizer optimizers = ['SGD', 'RMSprop', 'Adagrad', 'Adadelta', 'Adam', 'Adamax', 'Nadam'] # probing for optimizer learning rate learn_rate = [0.001, 0.01, 0.1, 0.2 ,0.3] # probing for momentum momentum = [0.0, 0.2, 0.4, 0.6, 0.8, 0.9] # probing for weight initialization mode initialization = ['normal', 'zero', 'uniform', 'glorot_normal', 'glorot_uniform', 'he_normal', 'he_uniform', 'lecun_uniform'] # probing for optimal activation activation = ['softmax', 'softplus', 'softsign', 'relu', 'tanh', 'sigmoid', 'hard_sigmoid', 'linear'] # dropout is best combined with a weight constraint such as the max norm constraint weights = [1, 2, 3, 4, 5] # probing for best dropout rate dropout_rate = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] # probing for no of neurons in hidden layers no_of_neurons = [1, 5, 10, 15, 20, 25, 30] param_grid = dict(batch_size = batch_Size, epochs = epochs, optimizer = optimizers, learn_rate = learn_rate, momentum = momentum, init = initialization, activation = activation, weight_constraint = weights, dropout_rate = dropout_rate, neurons = no_of_neurons) grid = GridSearchCV(estimator = MLPClassifier(), param_grid = param_grid, n_jobs = -1) gSearch = grid.fit(X, y) best_params = gSearch.best_params_ best_accuracy = gSearch.best_score_ # summarize results print("Best score: %f using params %s" % (gSearch.best_score_, gSearch.best_params_)) means = gSearch.cv_results_['mean_test_score'] stds = gSearch.cv_results_['std_test_score'] params = gSearch.cv_results_['params'] for mean, stdev, param in zip(means, stds, params): print("%f (%f) with: %r" % (mean, stdev, param)) # Histogram for important features fig = plot.subplots(figsize = (10, 4)) plot_importance(classifier_xgb) # Marginal plot to study relationship between 2 numeric variables. Central chart display their correlation sb.set(style = 'white', color_codes = True) sb.jointplot(x = X['pelvic_slope'], y = y, kind = 'kde', color = 'skyblue') '''Using Random Forest for important features''' # Taken from EDA Wine, make required changes # Using in-built feature_importance function.Convert the data into numeric by OneHotEncoding model = RandomForestRegressor(random_state = 1, max_depth = 10) dataset = pd.get_dummies(dataset) model.fit(X_train, Y_train) # After fitting the model,plot the feature importance graph features = dataset.columns importances = model.feature_importances_ # top 6 features indices = np.argsort(importances)[-6:] plot.title('Feature Importances') plot.barh(range(len(indices)), importances[indices], color = 'b', align = 'center') plot.yticks(range(len(indices)), [features[i] for i in indices]) plot.xlabel('Relative Importance') plot.show() # Naive Bayes classifier NBclassifier = GaussianNB() NBclassifier.fit(X_train, Y_train) nb_yPred = NBclassifier.predict(X_test) # accuracy of the classification accuracy_nb = accuracy_score(nb_yPred, Y_test) * 100 confusion_matrix(nb_yPred, Y_test) print(accuracy_nb) '''Using XGBoost classifier for important features''' xgbclassifier = xgb() xgb_yPred = xgbclassifier.fit(X_train, Y_train).predict(X_test) accuracy_xgb = accuracy_score(xgb_yPred, Y_test) confusion_matrix(xgb_yPred, Y_test) print(accuracy_xgb) # After fitting the model,plot histogram feature importance graph fig, ax = plot.subplots(figsize = (10, 4)) plot_importance(xgbclassifier, ax = ax) # Marginal plot allows to study the relationship between 2 numeric variables. The central chart display their correlation sb.set(style = "white", color_codes = True) #Not working, need to probe~~~~~ sb.jointplot(x = x['alcohol'], y = y, kind = 'kde', color = 'skyblue')
""" 881. Boats to Save People Medium The i-th person has weight people[i], and each boat can carry a maximum weight of limit. Each boat carries at most 2 people at the same time, provided the sum of the weight of those people is at most limit. Return the minimum number of boats to carry every given person. (It is guaranteed each person can be carried by a boat.) Example 1: Input: people = [1,2], limit = 3 Output: 1 Explanation: 1 boat (1, 2) Example 2: Input: people = [3,2,2,1], limit = 3 Output: 3 Explanation: 3 boats (1, 2), (2) and (3) Example 3: Input: people = [3,5,3,4], limit = 5 Output: 4 Explanation: 4 boats (3), (3), (4), (5) Note: 1 <= people.length <= 50000 1 <= people[i] <= limit <= 30000 """ class Solution: def numRescueBoats(self, people: List[int], limit: int) -> int: people.sort() j = 0 k = len(people) - 1 boats = 0 while j < k: pj = people[j] pk = people[k] if pj + pk <= limit: # two people board j += 1 k -= 1 boats += 1 if j == k: boats += 1 return boats
import os from PyQt5 import QtWidgets, QtCore, QtGui class ControlWidget(QtWidgets.QWidget): run_state = QtCore.pyqtSignal(str, int, bool) def __init__(self, axes): super().__init__() self.axes = axes self.init_ui() def init_ui(self): layout = QtWidgets.QGridLayout() row, column = 1, 1 for axis_name, axis_opts in self.axes: orientation = axis_opts.get('orientation') if orientation == 'vertical': self.button_one = ArrowButton('up') self.button_two = ArrowButton('down') layout.addWidget(self.button_one, row-1, column, QtCore.Qt.AlignBottom) layout.addWidget(self.button_two, row+1, column, QtCore.Qt.AlignTop) column = column + 2 else: self.button_one = ArrowButton('left') self.button_two = ArrowButton('right') layout.addWidget(self.button_one, row, 0, QtCore.Qt.AlignRight) layout.addWidget(self.button_two, row, 2, QtCore.Qt.AlignLeft) row = row + 2 limits = axis_opts.get('limits') self.button_one.setFixedSize(50,50) self.button_two.setFixedSize(50,50) self.exec_(axis_name, limits) self.setLayout(layout) return def exec_(self, axis_name, limits): self.button_one.pressed.connect(lambda: self.run_state.emit(axis_name, limits[1], True)) self.button_two.pressed.connect(lambda: self.run_state.emit(axis_name, limits[0], True)) self.button_one.released.connect(lambda: self.run_state.emit(axis_name, limits[1], False)) self.button_two.released.connect(lambda: self.run_state.emit(axis_name, limits[0], False)) class ArrowButton(QtWidgets.QToolButton): def __init__(self, orientation): super().__init__() self.orientation = orientation self.init_ui() def init_ui(self): image_path = os.path.join(os.path.dirname(__file__),'..\\images\\') image_path = image_path.replace("\\","/") pressed = image_path + '{}_arrow_pressed.png' unpressed = image_path + '{}_arrow_unpressed.png' orientation = self.orientation button_style = ''' background-image: url({image}); background-repeat: no-repeat; background-position: center; background-color: {background}; border-{corner1}-radius: 10px; border-{corner2}-radius: 10px; border-color: rgb(33,33,33); border-style: outset; border-width: 1.5px; ''' params_pressed = { 'image': pressed.format(orientation), 'background': 'rgb(53,53,53)', 'corner1': 'bottom-{}'.format(orientation), 'corner2': 'top-{}'.format(orientation), } params_unpressed = { 'image': unpressed.format(orientation), 'background': 'rgb(61,61,61)', 'corner1': 'bottom-{}'.format(orientation), 'corner2': 'top-{}'.format(orientation), } if orientation == 'up': params_unpressed['corner1'] = 'top-right' params_unpressed['corner2'] = 'top-left' params_pressed['corner1'] = params_unpressed['corner1'] params_pressed['corner2'] = params_unpressed['corner2'] elif orientation == 'down': params_unpressed['corner1'] = 'bottom-right' params_unpressed['corner2'] = 'bottom-left' params_pressed['corner1'] = params_unpressed['corner1'] params_pressed['corner2'] = params_unpressed['corner2'] self.button_style_pressed = button_style.format(**params_pressed) self.button_style_unpressed = button_style.format(**params_unpressed) self.setStyleSheet(self.button_style_unpressed) def mousePressEvent(self, ev): self.setStyleSheet(self.button_style_pressed) QtWidgets.QToolButton.mousePressEvent(self, ev) def mouseReleaseEvent(self, ev): self.setStyleSheet(self.button_style_unpressed) QtWidgets.QToolButton.mouseReleaseEvent(self, ev) if __name__ == '__main__': import sys app = QtWidgets.QApplication([]) axes = [ ('y axis', { 'orientation': 'vertical', }), ('x axis', { 'orientation': 'horizontal', }), ] c = ControlWidget(axes) c.show() app.exec_()
#!/usr/bin/python3 """ Test module for storing Square class test cases. """ import unittest from models.base import Base from models.square import Square from unittest.mock import patch from io import StringIO from time import sleep import os print_on = 0 # <-- Set to 1 to activate printing of the tests. class TestSquare(unittest.TestCase): """ TestSquare class for storing the unittest methods and cases. """ if print_on == 1: green = "\033[92m" # <-- Stores the green text color format. reset = "\033[0m" # <-- Stores the reset text color format. print(green + "." + "~ " * 11 + "| test_square.py module. |" + "~ " * 11 + reset) sleep(1) # Tests from 9-main.py ---------------------------------------------------| def test_9_Square(self): """ Test cases for Square class objects, from 9-main.py""" if print_on == 1: green = "\033[92m" reset = "\033[0m" print(green + "~" * 20 + " Testing cases from 9-main.py " + "~" * 19 + reset) s1 = Square(5) # Test print(s1) case_string = "[Square] (1) 0/0 - 5\n" with patch('sys.stdout', new=StringIO()) as fake_out: print(s1) self.assertEqual(fake_out.getvalue(), case_string) # Test .area() == 25 self.assertEqual(s1.area(), 25) # Test .display() case_string = "#####\n" * 5 with patch('sys.stdout', new=StringIO()) as fake_out: s1.display() self.assertEqual(fake_out.getvalue(), case_string) s2 = Square(2, 2) # Test print(s2) case_string = "[Square] (2) 2/0 - 2\n" with patch('sys.stdout', new=StringIO()) as fake_out: print(s2) self.assertEqual(fake_out.getvalue(), case_string) # Test .area() == 4 self.assertEqual(s2.area(), 4) # Test .display() case_string = " ##\n" * 2 with patch('sys.stdout', new=StringIO()) as fake_out: s2.display() self.assertEqual(fake_out.getvalue(), case_string) s3 = Square(3, 1, 3) # Test print(s3) case_string = "[Square] (3) 1/3 - 3\n" with patch('sys.stdout', new=StringIO()) as fake_out: print(s3) self.assertEqual(fake_out.getvalue(), case_string) # Test .area() == 9 self.assertEqual(s3.area(), 9) # Test .display() case_string = "\n" * 3 + " ###\n" * 3 with patch('sys.stdout', new=StringIO()) as fake_out: s3.display() self.assertEqual(fake_out.getvalue(), case_string) # Tests from 10-main.py --------------------------------------------------| def test_10_Square(self): """ Test cases for Square class objects, from 10-main.py""" if print_on == 1: green = "\033[92m" reset = "\033[0m" print(green + "~" * 20 + " Testing cases from 10-main.py " + "~" * 18 + reset) s1 = Square(5) # Test print(s1) case_string = "[Square] (1) 0/0 - 5\n" with patch('sys.stdout', new=StringIO()) as fake_out: print(s1) self.assertEqual(fake_out.getvalue(), case_string) # Test .size() self.assertEqual(s1.size, 5) s1.size = 10 case_string = "[Square] (1) 0/0 - 10\n" with patch('sys.stdout', new=StringIO()) as fake_out: print(s1) self.assertEqual(fake_out.getvalue(), case_string) # Test pass a "string" to size. with self.assertRaises(TypeError): s1.size = "9" # Tests from 11-main.py --------------------------------------------------| def test_11_Square(self): """ Test cases for Square class objects, from 11-main.py""" if print_on == 1: green = "\033[92m" reset = "\033[0m" print(green + "~" * 20 + " Testing cases from 11-main.py " + "~" * 18 + reset) # Test Create a Square s1 = Square(5) case_string = "[Square] (1) 0/0 - 5\n" with patch('sys.stdout', new=StringIO()) as fake_out: print(s1) self.assertEqual(fake_out.getvalue(), case_string) # Test .update(10) s1.update(10) case_string = "[Square] (10) 0/0 - 5\n" with patch('sys.stdout', new=StringIO()) as fake_out: print(s1) self.assertEqual(fake_out.getvalue(), case_string) # Test .update(1, 2) s1.update(1, 2) case_string = "[Square] (1) 0/0 - 2\n" with patch('sys.stdout', new=StringIO()) as fake_out: print(s1) self.assertEqual(fake_out.getvalue(), case_string) # Test .update(1, 2, 3) s1.update(1, 2, 3) case_string = "[Square] (1) 3/0 - 2\n" with patch('sys.stdout', new=StringIO()) as fake_out: print(s1) self.assertEqual(fake_out.getvalue(), case_string) # Test .update(1, 2, 3, 4) s1.update(1, 2, 3, 4) case_string = "[Square] (1) 3/4 - 2\n" with patch('sys.stdout', new=StringIO()) as fake_out: print(s1) self.assertEqual(fake_out.getvalue(), case_string) # Test .update(x=12) s1.update(x=12) case_string = "[Square] (1) 12/4 - 2\n" with patch('sys.stdout', new=StringIO()) as fake_out: print(s1) self.assertEqual(fake_out.getvalue(), case_string) # Test .update(size=7, y=1) s1.update(size=7, y=1) case_string = "[Square] (1) 12/1 - 7\n" with patch('sys.stdout', new=StringIO()) as fake_out: print(s1) self.assertEqual(fake_out.getvalue(), case_string) # Test .update(size=7, id=89, y=1) s1.update(size=7, id=89, y=1) case_string = "[Square] (89) 12/1 - 7\n" with patch('sys.stdout', new=StringIO()) as fake_out: print(s1) self.assertEqual(fake_out.getvalue(), case_string) # Test from 13-main.py - update(**kwargs) --------------------------------| def test_13_Squares(self): """ Test cases for Square class objects, from 13-main.py""" if print_on == 1: green = "\033[92m" reset = "\033[0m" print(green + "~" * 20 + " Testing cases from 13-main.py " + "~" * 19 + reset) s1 = Square(10, 2, 1) case_string = "[Square] (1) 2/1 - 10\n" with patch('sys.stdout', new=StringIO()) as fake_out: print(s1) self.assertEqual(fake_out.getvalue(), case_string) # Return Dictionary s1_dictionary = s1.to_dictionary() case_dict = {'id': 1, 'size': 10, 'x': 2, 'y': 1} # Test if the dictionary is the correct one. self.assertEqual(s1_dictionary, case_dict) # Test the type of the return. self.assertEqual(type(s1_dictionary), dict) s2 = Square(1, 1) case_string = "[Square] (2) 1/0 - 1\n" with patch('sys.stdout', new=StringIO()) as fake_out: print(s2) self.assertEqual(fake_out.getvalue(), case_string) s2.update(**s1_dictionary) case_string = "[Square] (1) 2/1 - 10\n" with patch('sys.stdout', new=StringIO()) as fake_out: print(s2) self.assertEqual(fake_out.getvalue(), case_string) # Test that r1 is different tha r2 self.assertFalse(s1 == s2) # Copy for Square of Tests from 1-main.py --------------------------------| def test_1_Square(self): """ Test cases for Square class objects, from 1-main.py""" if print_on == 1: green = "\033[92m" reset = "\033[0m" print(green + "~" * 19 + " Testing cases from 1-main.py " + "~" * 19 + reset) r1 = Square(10) self.assertEqual(r1.id, 1) r2 = Square(2) self.assertEqual(r2.id, 2) r3 = Square(10, 0, 0, 12) self.assertEqual(r3.id, 12) # Copy for Square of Tests from 2-main.py --------------------------------| def test_2_Square(self): """ Test cases for Square class objects, from 2-main.py""" if print_on == 1: green = "\033[92m" reset = "\033[0m" print(green + "~" * 20 + " Testing cases from 2-main.py " + "~" * 19 + reset) with self.assertRaises(TypeError): s = Square(10, "2") with self.assertRaises(ValueError): s = Square(10, 2) s.width = -10 with self.assertRaises(TypeError): s = Square(10, 2) s.x = {} with self.assertRaises(ValueError): Square(10, 2, -1) # Copy for Square of Tests from 3-main.py --------------------------------| def test_3_Square(self): """ Test cases for Square class objects, from 3-main.py""" if print_on == 1: green = "\033[92m" reset = "\033[0m" print(green + "~" * 20 + " Testing cases from 3-main.py " + "~" * 19 + reset) s1 = Square(3, 2) self.assertEqual(s1.area(), 9) s2 = Square(2) self.assertEqual(s2.area(), 4) s3 = Square(8) self.assertEqual(s3.area(), 64) # Copy for Square of Test from 4-main.py ---------------------------------| def test_4_Squares(self): """ Test cases for Square class objects, from 4-main.py""" if print_on == 1: green = "\033[92m" reset = "\033[0m" print(green + "~" * 20 + " Testing cases from 4-main.py " + "~" * 19 + reset) # Test display Square(4) case_string = ("####\n" * 4) r = Square(4) with patch('sys.stdout', new=StringIO()) as fake_out: r.display() self.assertEqual(fake_out.getvalue(), case_string) # Test display Square(2) case_string = ("##\n" * 2) r = Square(2) with patch('sys.stdout', new=StringIO()) as fake_out: r.display() self.assertEqual(fake_out.getvalue(), case_string) # Copy for Square of Test from 5-main.py ---------------------------------| def test_5_Squares(self): """ Test cases for Square class objects, from 5-main.py""" if print_on == 1: green = "\033[92m" reset = "\033[0m" print(green + "~" * 20 + " Testing cases from 5-main.py " + "~" * 19 + reset) # Test display Square(4, 2, 1, 12) case_string = "[Square] (12) 2/1 - 4\n" r1 = Square(4, 2, 1, 12) with patch('sys.stdout', new=StringIO()) as fake_out: print(r1) self.assertEqual(fake_out.getvalue(), case_string) # Test print Square(5, 5, 1) case_string = "[Square] (1) 5/1 - 5\n" r2 = Square(5, 5, 1) with patch('sys.stdout', new=StringIO()) as fake_out: print(r2) self.assertEqual(fake_out.getvalue(), case_string) # Test, str() a = str(r1) self.assertEqual(a, "[Square] (12) 2/1 - 4") # Test, str() a = str(r2) self.assertEqual(a, "[Square] (1) 5/1 - 5") # Copy for Square of Test from 6-main.py ---------------------------------| def test_6_Squares(self): """ Test cases for Square class objects, from 6-main.py""" if print_on == 1: green = "\033[92m" reset = "\033[0m" print(green + "~" * 20 + " Testing cases from 6-main.py " + "~" * 19 + reset) # Test display Square(2, 3, 2, 2) case_string = "\n\n" + " ##\n" * 2 r = Square(2, 3, 2) with patch('sys.stdout', new=StringIO()) as fake_out: r.display() self.assertEqual(fake_out.getvalue(), case_string) # Test display Square(3) case_string = '\n' + " ###\n" * 3 r = Square(3, 2, 1) with patch('sys.stdout', new=StringIO()) as fake_out: r.display() self.assertEqual(fake_out.getvalue(), case_string) # Copy for Square of Test from 7-main.py - update(*args) -----------------| def test_7_Squares(self): """ Test cases for Square class objects, from 7-main.py""" if print_on == 1: green = "\033[92m" reset = "\033[0m" print(green + "~" * 20 + " Testing cases from 7-main.py " + "~" * 19 + reset) # Test Square(10, 10, 10, 10) case_string = "[Square] (10) 10/10 - 10\n" r1 = Square(10, 10, 10, 10) with patch('sys.stdout', new=StringIO()) as fake_out: print(r1) self.assertEqual(fake_out.getvalue(), case_string) # Test update(89) case_string = "[Square] (89) 10/10 - 10\n" r1.update(89) with patch('sys.stdout', new=StringIO()) as fake_out: print(r1) self.assertEqual(fake_out.getvalue(), case_string) # Test update(89, 2) case_string = "[Square] (89) 10/10 - 2\n" r1.update(89, 2) with patch('sys.stdout', new=StringIO()) as fake_out: print(r1) self.assertEqual(fake_out.getvalue(), case_string) # Test update(89, 2, 3) case_string = "[Square] (89) 3/10 - 2\n" r1.update(89, 2, 3) with patch('sys.stdout', new=StringIO()) as fake_out: print(r1) self.assertEqual(fake_out.getvalue(), case_string) # Test update(89, 2, 3, 4) case_string = "[Square] (89) 3/4 - 2\n" r1.update(89, 2, 3, 4) with patch('sys.stdout', new=StringIO()) as fake_out: print(r1) self.assertEqual(fake_out.getvalue(), case_string) # Test update(89, 2, 3, 4, 5) case_string = "[Square] (89) 3/4 - 2\n" r1.update(89, 2, 3, 4) with patch('sys.stdout', new=StringIO()) as fake_out: print(r1) self.assertEqual(fake_out.getvalue(), case_string) # Test for update() method. def test_Square_update(self): """ Test cases for the .display() """ if print_on == 1: green = "\033[92m" reset = "\033[0m" print(green + "~" * 19 + " Testing Square.update() method. " + "~" * 17 + reset) # Case when both *args and **kwargs are used. s = Square(1) s.update(2, id=100) # <-- expected to remain unaltered by kwargs. case_string = "[Square] (2) 0/0 - 1\n" with patch('sys.stdout', new=StringIO()) as fake_out: print(s) self.assertEqual(fake_out.getvalue(), case_string) # Teardown method for resetting the count of instances in Base class.-----| def tearDown(self): """ Resets the Base class counter after each test unit. """ Base._Base__nb_objects = 0 try: os.remove("Rectangle.json") except Exception as e: pass try: os.remove("Square.json") except Exception as e: pass try: os.remove("Rectangle.csv") except Exception as e: pass try: os.remove("Square.csv") except Exception as e: pass if __name__ == '__main__': unittest.main()
# [ ] for x = 6, use range(x) to print the numbers 1 through 6 x = 7 m = 1 for num in (range(1, x+1)): m = m*num print(m)
import pandas as pd import numpy as np from datetime import datetime, date def age_calc(born): """"takes in the date of a customer and calulates the age""" born = born.date() today = date.today() return today.year - born.year - ((today.month, today.day) < (born.month, born.day)) def bogo(df): """Calculates if a customer took up the BOGO offer. Takes in a data frame""" if df['offer_type']=='bogo': if df['offer_completed']==df['offer_viewed']: return df['offer_completed'] if df['offer_completed']>df['offer_viewed']: return df['offer_viewed'] if df['offer_completed']<df['offer_viewed']: return df['offer_completed'] else: return 0 else: return 0 def discount(df): """Calculates if a customer took up the Discount offer. Takes in a data frame""" if df['offer_type']=='discount': if df['offer_completed']==df['offer_viewed']: return df['offer_completed'] if df['offer_completed']>df['offer_viewed']: return df['offer_viewed'] if df['offer_completed']<df['offer_viewed']: return df['offer_completed'] else: return 0 else: return 0 def info(df): """Calculates if a customer viewed an Informational offer. Takes in a data frame""" if df['offer_type']=='informational': if df['offer_viewed']==df['offer_received']: return df['offer_viewed'] if df['offer_viewed']< df['offer_received']: return df['offer_viewed'] if df['offer_viewed']==df['offer_received']: return df['offer_received'] else: return 0 def offer_time(df): """Calculates how quickly an offer waas taken up after it was viewed. Takes in a data frame""" if df['discount_taken']>0 or df['bogo_taken']>0: return (df['offer_completed']- df['offer_viewed']) else: return 0 def info_time(df): """Calculates how quickly an informational offer is viewed. Takes in a data frame""" if df['info_viewed']>0: return (df['offer_viewed']-df['offer_received']) else: return 0
# imports import os import io import re # import tokenize # import json # import numpy as np import pandas as pd import stringdist def create_filenames_df(data_path, out_path, save_csv=False, given_class_company=None): # create a pandas dataframe with the filenames in our dataset df_files_dict = { 'root': [], 'file_name': [], } for root, subdirs, files in os.walk(data_path): for file_name in files: df_files_dict['root'].append(root[len(data_path):]) df_files_dict['file_name'].append(file_name) df_files = pd.DataFrame.from_dict(df_files_dict) root_list = df_files['root'].values root_list = [root.split('/') for root in root_list] if given_class_company is None: df_files['class'] = [r.pop(0) for r in root_list] df_files['company'] = [r.pop(0) for r in root_list] else: df_files['class'] = given_class_company[0] df_files['company'] = given_class_company[1] df_files['root'] = ["/".join(r) for r in root_list] file_name_list = df_files['file_name'].values df_files['extension'] = [e.split('.')[-1] for e in file_name_list] # filter all files that are not .sol df_files = df_files[df_files.pop('extension') == 'sol'] if save_csv: df_files.to_csv(os.path.join(out_path, 'df_files.csv')) return df_files def get_filename_for_row(row, data_path, with_root=True, comments=False): # helper func to get file-path data_path = os.path.join( data_path, row.loc['class'], row.loc['company'], ) filename = row.loc['file_name'] root_folders = [x for x in row.loc['root'].split('/') if x not in ['', '.', '..']] if not with_root: root_name = "_".join(root_folders) root_name = "{}_".format(root_name) if len(root_name) > 0 else root_name filename = "{}{}".format(root_name, filename) else: data_path = os.path.join( data_path, "/".join(root_folders), ) if comments: filename = "comments_{}".format(filename) filename_out = os.path.join(data_path,filename) return filename_out # TODO: remove def get_filename_old_for_row(row, data_path): # helper func to get file-path filename = os.path.join( data_path, row.loc['class'], row.loc['company'], row.loc['root'], row.loc['file_name'] ) return filename def read_src(file_name): # read in contents of files as string # including comments with open(file_name, 'r') as f: return f.read() def read_src_nocomments(file_name, return_also_comments=False): # read in contents of files as string # strips pragma statements and comments with open(file_name, 'r') as f: src_list = [] comments_list = [] open_multiline_comment = False for line in f.readlines(): # skip pragma if re.match('pragma solidity .*;', line.strip()): # comments_list.append(line) continue # single line comments if re.match('//', line.strip()): comments_list.append(line) continue # multiline comments: start if re.match('/\*', line.strip()): if not re.search('\*/', line.strip()): open_multiline_comment = True comments_list.append(line) continue # multiline comments: end if re.match('\*/', line.strip()) or re.match('\*\*/', line.strip()): open_multiline_comment = False comments_list.append(line) continue # in-progress multiline comment if re.match('\*', line.strip()) or open_multiline_comment: if re.search('\*/', line.strip()): open_multiline_comment = False comments_list.append(line) continue # inline comments if re.search('[\s]+//', line) or re.search('//[\s]+', line): inline_comment = re.findall('[\s]*//.*\n', line) comments_list.extend(inline_comment) line = re.sub('[\s]*//.*\n', '\n', line) # add current src line src_list.append(line) # add final newline src_list.append('\n') comments_list.append('\n') # if 'todebug.sol' in file_name: # print(file_name) # print(src_list) # print(comments_list) if return_also_comments: return ''.join(src_list), ''.join(comments_list) else: return ''.join(src_list) def get_file_src(row, data_path, with_root): row['src'] = read_src( file_name=get_filename_for_row(row, data_path, with_root, comments=False) ) return row def get_file_comments(row, data_path, with_root): row['comments'] = read_src( file_name=get_filename_for_row(row, data_path, with_root, comments=True) ) return row def get_file_src_nocomments(row, data_path): row['src'] = read_src_nocomments( file_name=get_filename_for_row(row, data_path), return_also_comments=False, ) return row def get_file_src_and_comments(row, data_path): row['src'], row['comments'] = read_src_nocomments( file_name=get_filename_for_row(row, data_path), return_also_comments=True, ) return row def save_df_with_some_cols_as_len(df, out_path, name, cols): # save without src to csv df_out = df.copy(deep=True) for column in cols: if column in list(df): df_out[column] = df[column].apply(lambda x: len(x)) df_out.to_csv(os.path.join(out_path, 'df_{}.csv'.format(name))) def save_src_to(row, out_path, new_name, with_root_folders=True): nested_folders = [new_name, row.loc['class'], row.loc['company']] if with_root_folders: root_folders = [x for x in row.loc['root'].split('/') if x not in ['', '.', '..']] nested_folders.extend(root_folders) mkdir_path = out_path for folder in nested_folders: mkdir_path = os.path.join(mkdir_path, folder) if not os.path.exists(mkdir_path): os.mkdir(mkdir_path) file_name = get_filename_for_row( row, data_path=os.path.join(out_path, new_name), with_root=with_root_folders, comments=False, ) file_name_comments = get_filename_for_row( row, data_path=os.path.join(out_path, new_name), with_root=with_root_folders, comments=True, ) if new_name == 'joined': src = row.loc['joined_src'] comments = row.loc['joined_comments'] else: src = row.loc['src'] comments = row.loc['comments'] with open(file_name, 'w')as f: f.write(src) if 'comments' in list(row.index.values): with open(file_name_comments, 'w')as f: f.write(comments) def run_on_local_files(data_path, out_path): # run df = create_filenames_df(data_path, out_path, save_csv=True) df = df.apply(get_file_src_and_comments, axis=1, args=(data_path,)) # save df save_df_with_some_cols_as_len( df, out_path=out_path, name='files', cols=['src', 'comments']) # save src _ = df.apply(save_src_to, axis=1, args=(out_path, 'cleaned')) if __name__ == '__main__': DATA_PATH = '/home/ourownstory/Documents/SOL/data/' # zeppelin_folder = '/home/ourownstory/Documents/SOL/data/Zeppelin/Zeppelin/' # os.listdir(data_path) OUT_PATH = '/home/ourownstory/Documents/SOL/derived/test/' run_on_local_files(DATA_PATH, OUT_PATH)
# 숫자 8개를 입력받아 리스트에 넣음 data = list(map(int, input().split())) # 1로 시작하는 경우 if data[0] == 1: # 오름차순으로 정렬되어 있다면 if data == sorted(data): print('ascending') # 정렬되어 있지 않다면 else: print('mixed') # 8로 시작하는 경우 elif data[0] == 8: # 내림차순으로 정렬되어 있다면 if data == sorted(data, reverse=True): print('descending') # 정렬되어 있지 않다면 else: print('mixed') # 1 또는 8로 시작하지 않는 경우 else: print('mixed')
import json import logging import paho.mqtt.client as mqtt import re LOG = logging.getLogger(__name__) logging.basicConfig(level=logging.DEBUG) class OpenstackMqtt(object): def __init__(self, connection='firehose.openstack.org'): self.client = mqtt.Client() self.connection = connection self._on_connect = None self._on_message = None self.client.on_connect = self.on_client_connect self.client.on_message = self.on_client_message # self.client.connect(self.connection) @property def on_connect(self): return self._on_connect @on_connect.setter def on_connect(self, value): self._on_connect = value @property def on_message(self): return self._on_message @on_message.setter def on_message(self, value): self._on_message = value def connect(self): self.client.connect(self.connection) def on_client_connect(self, client, userdata, flags, rc): LOG.debug('Connected with result code ' + str(rc)) if self.on_connect: self.on_connect('Connected to {}'.format(self.connection)) def add_subscribe(self, project): self.client.subscribe('gerrit/{}/change-abandoned'.format(project)) self.client.subscribe('gerrit/{}/change-merged'.format(project)) self.client.subscribe('gerrit/{}/comment-added'.format(project)) # self.client.subscribe('gerrit/{}/topic-changed'.format(project)) self.client.subscribe('gerrit/{}/merge-failed'.format(project)) def on_client_message(self, client, userdata, msg): LOG.debug('New message received: {}'.format(msg.topic)) payload = json.loads(msg.payload) if payload['author']['username'] == 'zuul': LOG.debug('Payload info: {}'.format(json.dumps(payload, indent=4))) # topic = msg.topic[msg.topic.rfind('/')+1:] # info = None # LOG.debug('Content: {}'.format( # json.dumps(payload, indent=4, sort_keys=True))) if self.on_message: return_dict = {'change_id': payload['change']['id'], 'number': payload['change']['number'], 'comment': payload['comment'], 'date': payload['eventCreatedOn'], 'author': payload['author']['username'], 'commit_message': payload['change']['commitMessage'] } self.on_message(return_dict) def is_verified(self, payload): comment = payload.get('comment', None) author = payload.get('author', {}).get('username', None) if not comment or not author: return False if 'Verified+1' in comment and author == 'zuul': LOG.debug('Verified') return True else: return False def parse_commit_message(self, commit_message): new_message = re.sub(r'^.*\n', '', commit_message) new_message = re.sub(r'\nChange-Id.*', '', new_message) return new_message def start(self): self.client.loop_start()
# @author Nayara Souza # UFCG - Universidade Federal de Campina Grande # AA - Basico nk = input().split() id = input().split() n = int(nk[0]) k = int(nk[1]) for i in range(n): if (i >= k): break k -= i print(id[k-1])
class Solution: def combinationSum4(self, nums: List[int], target: int) -> int: nums.sort() dp = [0 for i in range(target+1)] dp[0]=1 for i in range(1, target+1): for num in nums: if num>i: break dp[i] += dp[i-num] return dp[-1]
import requests, json, logging #API at https://hadoop.apache.org/docs/r2.6.0/hadoop-yarn/hadoop-yarn-site/WebServicesIntro.html class YarnRestApi(): def __init__(self, hostname, port=8088): self.hostname = hostname self.port = port #Returns dictionary of parsed JSON data def getRequest(self, path, payload={}): url = "http://"+self.hostname+":"+str(self.port)+"/"+path logging.debug("Requesting url: "+url) try: return json.loads(requests.get(url, params=payload).text) except: return "" def getApplicationMasters(self): return self.getRequest("ws/v1/cluster/apps") def getApplicationMasterInfo(self, applicationMasterId): return self.getRequest("ws/v1/cluster/apps/"+applicationMasterId) def getRunningApplicationMasters(self): return self.getRequest("ws/v1/cluster/apps", {"state":"RUNNING"}) def getNodesInfo(self): return self.getRequest("ws/v1/cluster/nodes") def getNodesRunningContainers(self): nodes = self.getNodesInfo() workers = [] if nodes and nodes["nodes"] and nodes["nodes"]["node"]: for node in nodes["nodes"]["node"] : #API to talk to nodemanager talks to node itself y = YarnRestApi(node["nodeHTTPAddress"].split(":")[0], node["nodeHTTPAddress"].split(":")[1]) nodeInfo = y.getRequest("ws/v1/node/apps") if nodeInfo and nodeInfo["apps"] and nodeInfo["apps"]["app"] and nodeInfo["apps"]["app"][0]: if "containerids" in nodeInfo["apps"]["app"][0]: workers.append( node["nodeHTTPAddress"].split(":")[0] ) return workers #Returns an array of applicationIds def getRunningApplicationMasterIds(self): ids = "" try: runningAppMaster = self.getRunningApplicationMasters() if runningAppMaster is not None: for appMaster in runningAppMaster["apps"]["app"]: if "id" in appMaster: ids = appMaster["id"]+","+ids except: return [] #Remove last element if its empty ids = ids.split(",") if ids[-1] == "": del ids[-1] return ids def getNumContainersForApplicationMaster(self, applicationMasterId): return self.getRequest("ws/v1/cluster/apps/"+applicationMasterId)["app"]["runningContainers"]
import streamlit as st from datetime import datetime import database as db import pandas as pd # function to verify department id def verify_department_id(department_id): verify = False conn, c = db.connection() with conn: c.execute( """ SELECT id FROM department_record; """ ) for id in c.fetchall(): if id[0] == department_id: verify = True break conn.close() return verify # function to show the details of department(s) given in a list (provided as a parameter) def show_department_details(list_of_departments): department_titles = ['Department ID', 'Department name', 'Description', 'Contact number', 'Alternate contact number', 'Address', 'Email ID'] if len(list_of_departments) == 0: st.warning('No data to show') elif len(list_of_departments) == 1: department_details = [x for x in list_of_departments[0]] series = pd.Series(data = department_details, index = department_titles) st.write(series) else: department_details = [] for department in list_of_departments: department_details.append([x for x in department]) df = pd.DataFrame(data = department_details, columns = department_titles) st.write(df) # function to generate unique department id using current date and time def generate_department_id(): id_1 = datetime.now().strftime('%S%M%H') id_2 = datetime.now().strftime('%Y%m%d')[2:] id = f'D-{id_1}-{id_2}' return id # function to show the doctor id and name of doctor(s) given in a list (provided as a parameter) def show_list_of_doctors(list_of_doctors): doctor_titles = ['Doctor ID', 'Name'] if len(list_of_doctors) == 0: st.warning('No data to show') else: doctor_details = [] for doctor in list_of_doctors: doctor_details.append([x for x in doctor]) df = pd.DataFrame(data = doctor_details, columns = doctor_titles) st.write(df) # function to fetch department name from the database for the given department id def get_department_name(dept_id): conn, c = db.connection() with conn: c.execute( """ SELECT name FROM department_record WHERE id = :id; """, { 'id': dept_id } ) return c.fetchone()[0] # class containing all the fields and methods required to work with the departments' table in the database class Department: def __init__(self): self.name = str() self.id = str() self.description = str() self.contact_number_1 = str() self.contact_number_2 = str() self.address = str() self.email_id = str() # method to add a new department record to the database def add_department(self): st.write('Enter department details:') self.name = st.text_input('Department name') self.description = st.text_area('Description') self.contact_number_1 = st.text_input('Contact number') contact_number_2 = st.text_input('Alternate contact number (optional)') self.contact_number_2 = (lambda phone : None if phone == '' else phone)(contact_number_2) self.address = st.text_area('Address') self.email_id = st.text_input('Email ID') self.id = generate_department_id() save = st.button('Save') # executing SQLite statements to save the new department record to the database if save: conn, c = db.connection() with conn: c.execute( """ INSERT INTO department_record ( id, name, description, contact_number_1, contact_number_2, address, email_id ) VALUES ( :id, :name, :desc, :phone_1, :phone_2, :address, :email_id ); """, { 'id': self.id, 'name': self.name, 'desc': self.description, 'phone_1': self.contact_number_1, 'phone_2': self.contact_number_2, 'address': self.address, 'email_id': self.email_id } ) st.success('Department details saved successfully.') st.write('The Department ID is: ', self.id) conn.close() # method to update an existing department record in the database def update_department(self): id = st.text_input('Enter Department ID of the department to be updated') if id == '': st.empty() elif not verify_department_id(id): st.error('Invalid Department ID') else: st.success('Verified') conn, c = db.connection() # shows the current details of the department before updating with conn: c.execute( """ SELECT * FROM department_record WHERE id = :id; """, { 'id': id } ) st.write('Here are the current details of the department:') show_department_details(c.fetchall()) st.write('Enter new details of the department:') self.description = st.text_area('Description') self.contact_number_1 = st.text_input('Contact number') contact_number_2 = st.text_input('Alternate contact number (optional)') self.contact_number_2 = (lambda phone : None if phone == '' else phone)(contact_number_2) self.address = st.text_area('Address') self.email_id = st.text_input('Email ID') update = st.button('Update') # executing SQLite statements to update this department's record in the database if update: with conn: c.execute( """ UPDATE department_record SET description = :desc, contact_number_1 = :phone_1, contact_number_2 = :phone_2, address = :address, email_id = :email_id WHERE id = :id; """, { 'id': id, 'desc': self.description, 'phone_1': self.contact_number_1, 'phone_2': self.contact_number_2, 'address': self.address, 'email_id': self.email_id } ) st.success('Department details updated successfully.') conn.close() # method to delete an existing department record from the database def delete_department(self): id = st.text_input('Enter Department ID of the department to be deleted') if id == '': st.empty() elif not verify_department_id(id): st.error('Invalid Department ID') else: st.success('Verified') conn, c = db.connection() # shows the current details of the department before deletion with conn: c.execute( """ SELECT * FROM department_record WHERE id = :id; """, { 'id': id } ) st.write('Here are the details of the department to be deleted:') show_department_details(c.fetchall()) confirm = st.checkbox('Check this box to confirm deletion') if confirm: delete = st.button('Delete') # executing SQLite statements to delete this department's record from the database if delete: c.execute( """ DELETE FROM department_record WHERE id = :id; """, { 'id': id } ) st.success('Department details deleted successfully.') conn.close() # method to show the complete department record def show_all_departments(self): conn, c = db.connection() with conn: c.execute( """ SELECT * FROM department_record; """ ) show_department_details(c.fetchall()) conn.close() # method to search and show a particular department's details in the database using department id def search_department(self): id = st.text_input('Enter Department ID of the department to be searched') if id == '': st.empty() elif not verify_department_id(id): st.error('Invalid Department ID') else: st.success('Verified') conn, c = db.connection() with conn: c.execute( """ SELECT * FROM department_record WHERE id = :id; """, { 'id': id } ) st.write('Here are the details of the department you searched for:') show_department_details(c.fetchall()) conn.close() # method to show the list of doctors working in a particular department (using department id) def list_dept_doctors(self): dept_id = st.text_input('Enter Department ID to get a list of doctors working in that department') if dept_id == '': st.empty() elif not verify_department_id(dept_id): st.error('Invalid Department ID') else: st.success('Verified') conn, c = db.connection() with conn: c.execute( """ SELECT id, name FROM doctor_record WHERE department_id = :dept_id; """, { 'dept_id': dept_id } ) st.write('Here is the list of doctors working in the', get_department_name(dept_id), 'department:') show_list_of_doctors(c.fetchall()) conn.close()
from flask import Flask from flask_wtf import FlaskForm from flask import flash, url_for, redirect, render_template, request, escape from wtforms import Form, BooleanField, StringField, PasswordField, validators from flask_wtf.csrf import CSRFProtect from flask_sqlalchemy import SQLAlchemy from datetime import datetime import os from werkzeug.security import generate_password_hash, check_password_hash #csrf = CSRFProtect() app = Flask(__name__) #app.config['WTF_CSRF_ENABLED'] = True csrf = CSRFProtect() csrf.init_app(app) SECRET_KEY = os.urandom(32) app.config['SECRET_KEY'] = SECRET_KEY app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///appdb.db' db = SQLAlchemy(app) app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False #app.config['WTF_CSRF_CHECK_DEFAULT'] =False #keep track of registered users currentUser = '' class User(db.Model): id = db.Column(db.Integer, primary_key=True) username = db.Column(db.String(80), unique=True, nullable=False) password = db.Column(db.String(120), unique=False, nullable=False) phone = db.Column(db.String(10), nullable=True) class History(db.Model): id = db.Column(db.Integer, primary_key=True) username = db.Column(db.String(80), nullable=False) words = db.Column(db.String(100), nullable=False) result = db.Column(db.String(200)) class LoginHistory(db.Model): id = db.Column(db.Integer, primary_key=True) username = db.Column(db.String(80), nullable=False) login = db.Column(db.DateTime, default=datetime.now(), nullable=False) logout = db.Column(db.DateTime, default=datetime.now(), nullable=False) db.create_all() class RegistrationForm(FlaskForm): username = StringField('Username', [validators.Length(min=4, max=50, message=(u'username must be between 4 and 25 chars long')), validators.DataRequired()], id='uname') password = PasswordField('Password', [validators.Length(min=4, max=25), validators.DataRequired()], id='pword') twofactor = StringField('2-factor phone', [validators.Length(min=4,max=15), validators.DataRequired()], id='2fa') class LoginForm(FlaskForm): username = StringField('Username', [validators.Length(min=4, max=25, message=(u'username must be between 4 and 25 chars long')), validators.DataRequired()], id='uname') password = PasswordField('Password', [validators.Length(min=4, max=25), validators.DataRequired()], id='pword') twofactor = StringField('2-factor phone', [validators.Length(min=4,max=15), validators.DataRequired()], id='2fa') class SpellCheckForm(FlaskForm): words = StringField('Enter words', [validators.Length(min=1), validators.DataRequired()], id='inputtext') class UserQueryForm(FlaskForm): username = StringField('Enter username', [validators.Length(min=1), validators.DataRequired()], id='userquery') class LoginHistoryForm(FlaskForm): username = StringField('Enter username', [validators.Length(min=1), validators.DataRequired()], id='userid') @app.route('/register', methods=['GET','POST']) def register(): form = RegistrationForm(request.form) if request.method == 'POST' and form.validate_on_submit(): username = form.username.data #if user in registered: if (db.session.query(User.id).filter_by(username=username).scalar() is not None): return render_template('register_fail.html') #registered[user] = {"password":escape(form.password.data), "phone":escape(form.twofactor.data)} user = User(username=username, password=generate_password_hash(form.password.data), phone=form.twofactor.data) db.session.add(user) db.session.commit() return render_template('register_success.html') #else: # return "<html><h1 id='success'>Registration failure!</h1></html>" return render_template('register.html', form=form) @app.route('/login', methods=['GET', 'POST']) def login(): form = LoginForm(request.form) if request.method == 'POST' and form.validate_on_submit(): username = escape(form.username.data) phone = escape(form.twofactor.data) user = db.session.query(User).filter_by(username=username, phone=phone).scalar() # print(user.username) if(user is not None): global currentUser currentUser = username if (check_password_hash(user.password, form.password.data)): record = LoginHistory(username=username) db.session.add(record) db.session.commit() return render_template('login_success.html') else: return '<html><h1 id="result">Incorrect username or password!</h1> <form method="post"><input type="hidden" name="csrf_token" value="{{ csrf_token() }}"/></form></html>' else: return '<html><h1 id="result">user does not exist!</h1> <form method="post"><input type="hidden" name="csrf_token" value="{{ csrf_token() }}"/></form></html>' return render_template('login.html', form=form) @app.route('/spell_check', methods=['GET','POST']) def spell_check(): global currentUser if(currentUser == ''): return '<html><h1>login first!</h1></html>' else: form = SpellCheckForm(request.form) if request.method == 'POST' and form.validate(): record = History(username=currentUser, words=form.words.data, result='') db.session.add(record) db.session.commit() return render_template('spell_check_result.html') return render_template('spell_check.html', form=form) @app.route('/history', methods=['GET','POST']) def history(): global currentUser #print("current user:" + currentUser) if currentUser == 'admin': form = UserQueryForm(request.form) if request.method == 'POST' and form.validate(): history = History.query.filter_by(username=form.username.data).all() return render_template('history.html', len=len(history), history=history) return render_template('user_query.html', form=form) if(currentUser !='' and currentUser is not None): history = History.query.filter_by(username=currentUser).all() return render_template('history.html', len=len(history), history=history) else: return '<html><h1>Access denied</h1></html>' @app.route('/history/query<id>', methods=['GET']) def query_history(id): global currentUser query = History.query.filter_by(id=id, username=currentUser).scalar() #print(query) if(query is not None): return render_template('query.html', query=query) else: return '<html><h1>Access denied</h1></html>' @app.route('/login_history', methods=['GET','POST']) def login_history(): global currentUser if currentUser == 'admin': form = LoginHistoryForm(request.form) if request.method == 'POST' and form.validate(): history = LoginHistory.query.filter_by(username=form.username.data).all() return render_template('login_history.html', len=len(history), history=history) return render_template('user_query.html', form=form) else: return '<html><h1>Access denied</h1></html>'
#!/usr/bin/env python # -*- coding: utf-8 -*- # Should be moved to stir_tools import os import commands import sys import shutil from utils import apple from utils import spm_tools as spm class Phantom_Preparation(object): def __init__(self, spm_run, act_map, att_map, scanner, scanner_target_size, pet_image=False): self.emiss_hdr = act_map self.att_hdr = att_map self.original_pet = pet_image self.target_size = scanner_target_size self.scanner = scanner self.phantom_dir = os.path.dirname(self.emiss_hdr) self.spm_run = spm_run self.logfile = os.path.join(self.phantom_dir, "Phantom_Preparation.log") def run(self): print("Preparing phantoms for the simulation...") self.phantom_preparation() return self.emiss_hdr, self.att_hdr def phantom_preparation(self): """ Generates .hv and .v files from Analyze images and fits the size to the scanner """ for i in [self.emiss_hdr, self.att_hdr, self.original_pet]: if i: rcommand = "cambia_formato_hdr %s %s fl " % (i,i) apple.osrun(rcommand,self.logfile) zpix, zsize, xpix, xsize, ypix, ysize = apple.read_analyze_header(self.emiss_hdr,self.logfile) new_x_dims = int(xsize*xpix/self.target_size[0]) new_y_dims = int(ysize*ypix/self.target_size[1]) new_z_dims = int(zsize*zpix/self.target_size[2]) for i in [self.emiss_hdr, self.att_hdr, self.original_pet]: if i: output_i = i[0:-4]+"_" + self.scanner + ".hdr" rcommand = 'cambia_matriz_imagen_hdr %s %s %s %s %s novecino' % (i, output_i, new_x_dims, new_y_dims, new_z_dims) apple.osrun(rcommand,self.logfile) rcommand = 'gen_hdr %s %s %s %s fl %s %s %s 0' % (output_i[0:-4], new_x_dims, new_y_dims, new_z_dims, self.target_size[0], self.target_size[1], self.target_size[2]) apple.osrun(rcommand,self.logfile) if i == self.emiss_hdr: # If the input is the activity map it applies positron range and non-colinearity output_img = output_i[0:-3] + "img" mfile = os.path.join(self.phantom_dir, "smooth.m") smoothed = spm.smoothing_xyz(self.spm_run,mfile,output_i,2,2,2,"s",self.logfile) shutil.move(smoothed,output_img) if i == self.att_hdr: # If the input is the attenuation map it removes higher values rcommand = "cambia_valores_de_un_intervalo %s %s 1 10000000000000 1 " % (output_i,output_i) apple.osrun(rcommand,self.logfile) shutil.copy(output_i[0:-3] + "img", output_i[0:-3] + "v") apple.write_interfile_header(output_i[0:-3] + "hv", new_x_dims, self.target_size[0], new_y_dims, self.target_size[1],new_z_dims, self.target_size[2]) self.emiss_hdr = self.emiss_hdr[0:-4]+"_" + self.scanner + ".hv" self.att_hdr = self.att_hdr[0:-4]+"_" + self.scanner + ".hv"
import django_filters from django_filters import DateFilter, CharFilter from .models import * class ContentFilter(django_filters.FilterSet): # start_date = DateFilter(field_name='updated', lookup_expr='gte') # end_date = DateFilter(field_name='updated', lookup_expr='lte') note = CharFilter(field_name='month_name', lookup_expr='icontains') class Meta: model = Content fields = '__all__' exclude = ['title', 'month_name', 'monthly_active_user', 'global_rank', 'country_traffic', 'social_media_traffic', 'updated'] class BrandFilter(django_filters.FilterSet): title = CharFilter(field_name='title', lookup_expr='icontains') language = CharFilter(field_name='language', lookup_expr='icontains') website = CharFilter(field_name='website', lookup_expr='icontains') # start_date = DateFilter(field_name='created', lookup_expr='gte') # end_date = DateFilter(field_name='created', lookup_expr='lte') class Meta: model = Brand fields = '__all__' exclude = ['slug', 'title', 'website', 'language', 'created']
#Cal Hasie #11/5/12 #Lab 9- 9.8 #This program will capitalize the beginning of the users sentences. def main(): #Takes in the users sentences. user = input("Please enter the sentences you wish: ") #Splits apart each sentence sentence_list = user.split('.') #Prints the list of sentences print(sentence_list) #Returns each sentence with the first letter capitalized. for sentence in sentence_list: sentence = sentence.strip() print(sentence.capitalize()) main()
import zipfile import os import argparse from oauth2client import file, client, tools from httplib2 import Http from googleapiclient.discovery import build import io import httplib2 from pathlib import Path import re import webbrowser # ================================================ # Create a temporary zip file # ================================================ def zipTheFiles(zipFileN): myFiles = [] for file in os.listdir(os.path.dirname(runFolderPath)): regexp = re.compile(r'ZipAndShip') if not regexp.search(file): myFiles.append(file) with zipfile.ZipFile(zipFileN+".zip", "w") as zf: for f in myFiles: zf.write('../'+f, f) print(f) return True # ================================================ # Send to Google Drive # ================================================ def shipIt(zipFileN): # Authentication flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args() SCOPES = 'https://www.googleapis.com/auth/drive.file' store = file.Storage(os.path.join(runFolderPath, 'storage.json')) creds = store.get() if not creds or creds.invalid: print("make new storage data file ") flow = client.flow_from_clientsecrets('credentials.json', SCOPES) creds = tools.run_flow(flow, store, flags) \ if flags else tools.run(flow, store) DRIVE = build('drive', 'v3', http=creds.authorize(Http())) # Upload file_name = (zipFileN+'.zip') metadata = {'name': file_name, 'mimeType': None } print("Uploading your zip file... : "+file_name) res = DRIVE.files().create(body=metadata, media_body=file_name).execute() if res: print('Uploaded "%s" (%s)' % (file_name, res['mimeType'])) # ================================================ # Remove the zip file # ================================================ def removeZipFile(zipFileN): os.remove(zipFileN+".zip") # ================================================ # Run and handle the process # ================================================ runFolderPath = os.path.dirname(os.path.abspath(__file__)) if os.path.isfile(runFolderPath+"\\credentials.json"): zipFileName = input("What do you want to call the zip file?: ") try: zipTheFiles(zipFileName) except: input("Oops! Failed to zip the files! Press Enter key to exit..") else: shipIt(zipFileName) askToOpen = input("Do you want to open your drive? y/n: ") if askToOpen == "y" or askToOpen == "yes": webbrowser.open('https://drive.google.com/drive/my-drive', new=2) finally: removeZipFile(zipFileName) else: print('You MUST set up google api modules and include "credentials.json" in this folder!') print('1. Install google api modules: pip install --upgrade google-api-python-client google-auth-httplib2 google-auth-oauthlib oauth2client') print('2. Go to: https://developers.google.com/drive/api/v3/quickstart/python') print('3. Click on Enable Drive API and download the clinet config.') input('4. Place the client config in the ZipAndShip folder and run the app again.') exit()
# -*- coding: utf-8 -*- # Copyright (C) 2004-2013 Mag. Christian Tanzer. All rights reserved # Glasauergasse 32, A--1130 Wien, Austria. tanzer@swing.co.at # **************************************************************************** # # This module is licensed under the terms of the BSD 3-Clause License # <http://www.c-tanzer.at/license/bsd_3c.html>. # **************************************************************************** # #++ # Name # TFL.SDG.C.Arg_List # # Purpose # Model C argument lists # # Revision Dates # 28-Jul-2004 (CT) Creation # 3-Aug-2004 (CT) Don't redefine the value of `Decl` # 12-Aug-2004 (MG) `default_cgi` added # 13-Aug-2004 (CT) `base_indent2` replaced by `base_indent * 2` # 23-Feb-2005 (CED) `apidoc_tex_format` defined # 08-Dec-2005 (MG) Bugfixes # 12-Dec-2005 (CT) `Regexp` import fixed # 26-Feb-2012 (MG) `__future__` imports added # ««revision-date»»··· #-- from __future__ import absolute_import, division, print_function, unicode_literals from _TFL import TFL from _TFL.pyk import pyk from _TFL.predicate import un_nested from _TFL.Regexp import * import _TFL._SDG._C.Node import _TFL._SDG._C.Expression import _TFL._SDG._C.Var class Arg_List (TFL.SDG.C.Node) : """Model C argument lists""" children_group_names = (default_cgi, ) = (TFL.SDG.C.Node.Decl, ) h_format = c_format = apidoc_tex_format = \ """%(:sep=%(base_indent * 2)s, :*decl_children:)s""" arg_pat = Regexp \ ( r"^" r"(?: " r" (?P<void> void)" r"| (?P<type> .+) \s+ (?P<name> [_a-z][_a-z0-9]*)" r")" r"$" , re.VERBOSE | re.IGNORECASE ) def __init__ (self, * children, ** kw) : children = un_nested (children) children = self._convert_children (children) self.__super.__init__ (* children, ** kw) # end def __init__ def _convert_children (self, children) : if len (children) == 1 and isinstance (children [0], pyk.string_types) : children = [c.strip () for c in children [0].split (",")] result = [] for c in children : if isinstance (c, pyk.string_types) : if self.arg_pat.match (c) : if self.arg_pat.void : c = TFL.SDG.C.Expression (self.arg_pat.void) else : c = TFL.SDG.C.Var \ (self.arg_pat.type, self.arg_pat.name) else : raise TFL.SDG.Invalid_Node (self, c) c.cgi = self.Decl c.trailer = "" result.append (c) return result # end def _convert_children # end class Arg_List if __name__ != "__main__" : TFL.SDG.C._Export ("*") ### __END__ TFL.SDG.C.Arg_List
import math def main(): N = int(input()) TA = [ list(map(int, input().split())) for _ in range(N) ] # ans = f(N, TA) ans = editorial(N, TA) print(ans) def test_ceil_mod(): """https://stackoverflow.com/questions/14822184/is-there-a-ceiling-equivalent-of-operator-in-python >>> import math >>> math.ceil(11520000000000000102.9) == 11520000000000000000 True """ for i in range(1, 10 ** 5): for j in range(1, 10 ** 2): assert math.ceil(i / j) == (i // j + (i % j != 0)), (i, j) assert math.ceil(j / i) == (j // i + (j % i != 0)), (j, i) def editorial(N, TA): A, B = 1, 1 for x, y in TA: # n = max(math.ceil(A / x), math.ceil(B / y)) n = max(A // x + (A % x != 0), B // y + (B % y != 0)) A, B = n * x, n * y ans = A + B return ans def f(N, TA): nt, na = TA[0] bef_t, bef_a = nt, na for t, a in TA[1:]: if (bef_t, bef_a) == (t, a): # 変化なし pass elif (t, a) == (1, 1): # 同票なので、合わせる m = max(nt, na) nt, na = m, m elif t == 1: na = nt * a elif a == 1: nt = na * t elif (bef_t, bef_a) == (1, 1): # 1:1 -> 3,3 # 3:2 -> 3,2(NG) 6:4 -> 6,4(OK) i = 1 while nt <= t or na <= a: nt, na = t * i, a * i i += 1 elif bef_t == t and bef_a < a: # 2:3 -> 4,6 # 2:7 -> 4,14 na = na // bef_a * a elif bef_t < t and bef_a == a: nt = nt // bef_t * t else: aa = na // a tt = nt // t if aa > 0 and tt > 0 and aa == tt: pass else: x = nt % t y = na % a if x != 0: add_t = t - x nt += add_t if y != 0: add_a = a - y na += add_a aa = na // a tt = nt // t if aa > tt: nt = t * aa elif aa < tt: na = a * tt # (nt + x): (na + y) = t: a # na * t + y * t = (nt + x) * a # y * t = (nt + x) * a - na * t # y = (nt + x) * a / t - na # x,y >= 0 # (nt + x) / t is int # a / t is int bef_t, bef_a = t, a # print(nt, na, (t, a)) ans = nt + na return ans if __name__ == '__main__': main()
from argparse import ArgumentParser, SUPPRESS from invisibleroads_macros.disk import link_path from invisibleroads_macros.iterable import sort_dictionary from invisibleroads_macros.text import unicode_safely from six.moves import getcwd from sys import argv from . import ToolScript, corral_arguments, run_script from ..configurations import get_default_key, parse_data_dictionary_from from ..exceptions import DataParseError from ..models import Result from ..types import StringType, get_data_type, RESERVED_ARGUMENT_NAMES class RunScript(ToolScript): def run(self, args): tool_definition, data_folder = super(RunScript, self).run(args) tool_name = tool_definition['tool_name'] argument_parser = ArgumentParser(tool_name) argument_parser.add_argument( 'tool_name', nargs='?', help=SUPPRESS, type=unicode_safely) argument_parser.add_argument( '--target_folder', type=unicode_safely, metavar='FOLDER') argument_parser = configure_argument_parser( argument_parser, tool_definition) raw_arguments = sort_dictionary(argument_parser.parse_known_args( argv[2:])[0].__dict__, tool_definition['argument_names']) try: result_arguments = parse_data_dictionary_from( raw_arguments, getcwd(), '*', tool_definition) except DataParseError as e: return [(k + '.error', v) for k, v in e.message_by_name.items()] result = Result.spawn(data_folder) result_arguments = corral_arguments(result.get_source_folder( data_folder), result_arguments, link_path) result_folder = result.get_folder(data_folder) target_folder = raw_arguments.get('target_folder') run_script( tool_definition, result_arguments, result_folder, target_folder, external_folders='*') def configure_argument_parser(argument_parser, tool_definition): 'Expose tool arguments as command-line arguments' for k in tool_definition['argument_names']: if k in RESERVED_ARGUMENT_NAMES: continue d = {} d['metavar'] = get_metavar(k) if not get_default_key(k, tool_definition): d['required'] = True argument_parser.add_argument('--' + k, type=unicode_safely, **d) return argument_parser def get_metavar(key): data_type = get_data_type(key) metavar = data_type.suffixes[0] if data_type == StringType: if key.endswith('_folder'): metavar = 'FOLDER' elif key.endswith('_path'): metavar = 'PATH' return metavar.upper()
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2018/10/31 9:37 AM # @Author : Ganodermaking from openpyxl import load_workbook class Excel: def __init__(self, filename, sheet_name): self.filename = filename self.wb = load_workbook(self.filename) self.ws = self.wb[sheet_name] def write(self, row, actual_code, actual_msg): self.ws.cell(row=row, column=6).value = actual_code self.ws.cell(row=row, column=7).value = actual_msg self.wb.save(self.filename) def read(self): new_list = [] for row in list(self.ws.rows)[1:]: i = 0 new_dict = {} for col in row: if i == 1: new_dict['url'] = col.value elif i == 2: new_dict['data'] = col.value elif i == 3: new_dict['code'] = int(col.value) i = i + 1 new_list.append(new_dict) return new_list
from commitizen import factory, out from commitizen.config import BaseConfig class Example: """Show an example so people understands the rules.""" def __init__(self, config: BaseConfig, *args): self.config: BaseConfig = config self.cz = factory.commiter_factory(self.config) def __call__(self): out.write(self.cz.example())
''' Created on Nov 10, 2015 @author: Jonathan ''' def emailsLargest(courses): sizes = {} for course in sorted(courses): name = course.split(":")[0] if name not in sizes: sizes[name] = 0 else: sizes[name] += 1 largest = max(sorted(sizes), key = sizes.get) emails = [] for course in sorted(courses): data = course.split(":") if data[0] == largest: emails.append(data[2]) return " ".join(emails) if __name__ == '__main__': pass
# -*- code: utf-8 -*- class UninitializedConfiguration(Exception): pass class MongoDBException(Exception): pass
from typing import List class Solution: def reverseString(self, s: List[str]) -> None: """ Do not return anything, modify s in-place instead. """ i = 0 while i < len(s) // 2: s[i], s[len(s) - i - 1] = s[len(s) - i - 1], s[i] i += 1 sol = Solution() s = ['a', 'b', 'q'] sol.reverseString(s) print(s)
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function import json import codecs import sys import pandas as pd import numpy as np import requests from tqdm import tqdm import random import csv from html_processor import normalize_text def select_random_tagged_works(n=75): random.seed(42) source_file_trials = 'data/kadist.json' dest_file_trials = 'data/trials.json' gsheet_csv_trials = 'data/trials.csv' trials = [] with codecs.open(source_file_trials, 'rb', 'utf-8') as f: for work in json.loads(f.read()): if 'description' in work: if 'user_tags' in work and work['user_tags']: if '_thumbnails' in work and 'medium' in work['_thumbnails']: artist_name = ', '.join([x['post_title'] for x in work['_artists']]) permalink = work['permalink'] title = work['title'] tags = work['user_tags'] description = normalize_text(work['description']) thumbnail_url = work['_thumbnails']['medium']['url'] trials.append({ "artist_name": artist_name, "title": title, "description": description, "user_tags": tags, "thumbnail": thumbnail_url, "permalink": permalink, }) # # select sample # random.shuffle(trials) sample = trials[:n] # # write json output # with codecs.open(dest_file_trials, 'wb', 'utf-8') as f: f.write(json.dumps(sample, ensure_ascii=False, indent=True)) print('\n *', "written", len(sample), "trials", "\n" ) # # now write a csv for gsheet import # df = pd.DataFrame(sample) df['user_tags'] = df['user_tags'].str.join(',') df['thumbnail'] = df['thumbnail'].apply(lambda url: '=IMAGE("%s", 1)' % (url)) df.to_csv(gsheet_csv_trials, sep=',', encoding='utf-8', index=False) return sample if __name__ == '__main__': samples = select_random_tagged_works()
#!/usr/bin/python3 import multiprocessing import multiprocessing.connection import re import subprocess import sys import time # launch an agent in it's own process def launch_agent(id, v): p = subprocess.run(["./bin/sudoku_agent", str(id), v]) # launch 81 agents in parallel def launch_agents(starting_values): sentinels = [] for i, v in enumerate(starting_values): p = multiprocessing.Process(target=launch_agent, args=(i, v.strip())) p.start() sentinels.append(p.sentinel) return sentinels # print the values as a 9x9 grid def print_board(values): for i in range(9): for j in range(9): x = i * 9 + j print(values[x], end=" ") print("") print("") # use distributed madara agents to solve sudoku def main(): filename = sys.argv[1] # read in the problem, print board, launch agents with open(filename, "r") as problem: starting_values = [v.strip() for v in problem.readlines()] print("\nStarting Configuration\n") print_board(starting_values) print(f"Launching {len(starting_values)} agents\n") sentinels = launch_agents(starting_values) while(len(multiprocessing.connection.wait(sentinels)) < len(sentinels)): time.sleep(1) # read in the solution (saved by an agent), print board with open('out/sudoku.kkb', 'r') as karl: text = karl.read() data = re.findall("agents.([0-9]+).value=([0-9])", text) solution = [0] * 81 for i, j in data: solution[int(i)] = j print_board(solution) if __name__ == "__main__": assert len(sys.argv) is 2, "You must supply a filename" main()
import os import requests import operator import re import json from datetime import datetime, timedelta from flask import Flask, render_template, request, jsonify from flask_cors import CORS from flask_sqlalchemy import SQLAlchemy app = Flask(__name__) CORS(app) app.config.from_object(os.environ['APP_SETTINGS']) app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True db = SQLAlchemy(app) from mt_threads import * @app.route('/', methods=['GET', 'POST']) def index(): return render_template('index.html') @app.route('/start', methods=['POST']) def get_counts(): # get url data = json.loads(request.data.decode()) url = data["url"] if 'http' not in url[:4]: url = 'http://' + url print("get_counts: url is --->{}".format(url)) return @app.route("/results", methods=['GET']) def get_results(): now = datetime.now() pickup_actions_time = now - timedelta(days=3) results = Results.query.filter(Results.time > pickup_actions_time).all() return str(results), 200 if __name__ == '__main__': try: monitor = ChangeMonitor('/tmp/testContent') # monitor.add_copy_imgs_job() # monitor.add_since_request_job() #monitor.start() app.run(host='0.0.0.0') except (KeyboardInterrupt, SystemExit): print("Shutdown monitor!") monitor.shutdown()
class cat: def __init__(self): print("cat被创建了") def eat(self): print("小猫爱吃鱼") c = cat() # 实例化对象的时候,init方法自动调用 c.eat() # 必须明确的通过代码调用普通方法
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'DetailsView.ui' # # Created: Wed Apr 25 11:24:59 2018 # by: pyside-uic 0.2.15 running on PySide 1.2.4 # # WARNING! All changes made in this file will be lost! from PySide import QtCore, QtGui class Ui_DetailsView(object): def setupUi(self, DetailsView): DetailsView.setObjectName("DetailsView") DetailsView.resize(563, 689) self.informationLabel = QtGui.QLabel(DetailsView) self.informationLabel.setGeometry(QtCore.QRect(10, 20, 91, 17)) self.informationLabel.setObjectName("informationLabel") self.informationValueLabel = QtGui.QTextEdit(DetailsView) self.informationValueLabel.setGeometry(QtCore.QRect(10, 40, 541, 641)) self.informationValueLabel.setObjectName("informationValueLabel") self.retranslateUi(DetailsView) QtCore.QMetaObject.connectSlotsByName(DetailsView) def retranslateUi(self, DetailsView): DetailsView.setWindowTitle(QtGui.QApplication.translate("DetailsView", "More details", None, QtGui.QApplication.UnicodeUTF8)) self.informationLabel.setText(QtGui.QApplication.translate("DetailsView", "Details:", None, QtGui.QApplication.UnicodeUTF8))
Given an integer array nums, return the number of range sums that lie in [lower, upper] inclusive. Range sum S(i, j) is defined as the sum of the elements in nums between indices i and j (i ≤ j), inclusive. Note: A naive algorithm of O(n2) is trivial. You MUST do better than that. Example: Given nums = [-2, 5, -1], lower = -2, upper = 2, Return 3. The three ranges are : [0, 0], [2, 2], [0, 2] and their respective sums are: -2, -1, 2. Credits: Special thanks to @dietpepsi for adding this problem and creating all test cases.
import graphene from schema.event import CreateEvent, UpdateEvent, DeleteEvent class Mutation(graphene.ObjectType): create_event = CreateEvent.Field() update_event = UpdateEvent.Field() delete_event = DeleteEvent.Field()
# -*- coding: utf-8 -*- # snapshottest: v1 - https://goo.gl/zC4yUc from __future__ import unicode_literals from snapshottest import GenericRepr, Snapshot snapshots = Snapshot() snapshots['TestCase01CreateTransitionAPITestCase::test_case status'] = 200 snapshots['TestCase01CreateTransitionAPITestCase::test_case body'] = { 'description': 'string', 'from_state': { 'from_state_id': 1, 'name': 'string' }, 'name': 'string', 'to_state': { 'name': 'string', 'to_state_id': 1 }, 'transition_id': 1 } snapshots['TestCase01CreateTransitionAPITestCase::test_case header_params'] = { 'content-language': [ 'Content-Language', 'en' ], 'content-length': [ '152', 'Content-Length' ], 'content-type': [ 'Content-Type', 'application/json' ], 'vary': [ 'Accept-Language, Origin', 'Vary' ], 'x-frame-options': [ 'DENY', 'X-Frame-Options' ] } snapshots['TestCase01CreateTransitionAPITestCase::test_case name'] = 'transition_0' snapshots['TestCase01CreateTransitionAPITestCase::test_case description'] = 'description_0' snapshots['TestCase01CreateTransitionAPITestCase::test_case from_state'] = GenericRepr('<State: State object (1)>') snapshots['TestCase01CreateTransitionAPITestCase::test_case to_state'] = GenericRepr('<State: State object (2)>')
#! /usr/bin/env python # coding=utf-8 #================================================================ # Copyright (C) 2018 * Ltd. All rights reserved. # # Editor : VIM # File name : test.py # Author : YunYang1994 # Created date: 2018-12-20 11:58:21 # Description : # #================================================================ import cv2 import numpy as np import tensorflow as tf from PIL import Image from core.dataset import dataset, Parser from core import utils INPUT_SIZE = 416 BATCH_SIZE = 1 EPOCHS = 313 SHUFFLE_SIZE = 1 sess = tf.Session() classes = utils.read_coco_names('./data/coco.names') num_classes = len(classes) train_tfrecord = "../COCO/tfrecords/coco_train0000.tfrecords" anchors = utils.get_anchors('./data/coco_anchors.txt') # 检查图片的resize是否正确 parser = Parser(416, 416, anchors, num_classes, debug=True) trainset = dataset(parser, train_tfrecord, BATCH_SIZE, shuffle=None) example = trainset.get_next() # for l in range(100): image, gt_boxes = sess.run(example) image, gt_boxes = image[0], gt_boxes[0] n_box = len(gt_boxes) for i in range(n_box): image = cv2.rectangle(image,(int(float(gt_boxes[i][0])), int(float(gt_boxes[i][1]))), (int(float(gt_boxes[i][2])), int(float(gt_boxes[i][3]))), (255,0,0), 2) image = Image.fromarray(np.uint8(image)) anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] grid_sizes = [[INPUT_SIZE//x, INPUT_SIZE//x] for x in (32, 16, 8)] box_centers = (gt_boxes[:, 0:2] + gt_boxes[:, 2:4]) / 2 # the center of box box_sizes = gt_boxes[:, 2:4] - gt_boxes[:, 0:2] # the height and width of box # ================================ 分割线============================== # gt_boxes[:, 0:2] = box_centers gt_boxes[:, 2:4] = box_sizes y_true_13 = np.zeros(shape=[grid_sizes[0][0], grid_sizes[0][1], 3, 5+num_classes], dtype=np.float32) y_true_26 = np.zeros(shape=[grid_sizes[1][0], grid_sizes[1][1], 3, 5+num_classes], dtype=np.float32) y_true_52 = np.zeros(shape=[grid_sizes[2][0], grid_sizes[2][1], 3, 5+num_classes], dtype=np.float32) y_true = [y_true_13, y_true_26, y_true_52] anchors_max = anchors / 2. anchors_min = -anchors_max valid_mask = box_sizes[:, 0] > 0 # Discard zero rows. wh = box_sizes[valid_mask] # set the center of all boxes as the origin of their coordinates # and correct their coordinates wh = np.expand_dims(wh, -2) boxes_max = wh / 2. boxes_min = -boxes_max intersect_mins = np.maximum(boxes_min, anchors_min) intersect_maxs = np.minimum(boxes_max, anchors_max) intersect_wh = np.maximum(intersect_maxs - intersect_mins, 0.) intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1] box_area = wh[..., 0] * wh[..., 1] anchor_area = anchors[:, 0] * anchors[:, 1] iou = intersect_area / (box_area + anchor_area - intersect_area) # Find best anchor for each true box best_anchor = np.argmax(iou, axis=-1) for t, n in enumerate(best_anchor): for l in range(3): if n not in anchor_mask[l]: continue i = np.floor(gt_boxes[t,0]/INPUT_SIZE*grid_sizes[l][1]).astype('int32') j = np.floor(gt_boxes[t,1]/INPUT_SIZE*grid_sizes[l][0]).astype('int32') k = anchor_mask[l].index(n) c = gt_boxes[t, 4].astype('int32') print(j, i, k) y_true[l][j, i, k, 0:4] = gt_boxes[t, 0:4] y_true[l][j, i, k, 4] = 1. y_true[l][j, i, k, 5+c] = 1. print(y_true[l][j,i,k]) # box_centers = (gt_boxes[:, 0:2] + gt_boxes[:, 2:4]) / 2 # the center of box # box_sizes = gt_boxes[:, 2:4] - gt_boxes[:, 0:2] # the height and width of box # y_true_13 = np.zeros(shape=[grid_sizes[0][0], grid_sizes[0][1], 3, 5+num_classes], dtype=np.float32) # y_true_26 = np.zeros(shape=[grid_sizes[1][0], grid_sizes[1][1], 3, 5+num_classes], dtype=np.float32) # y_true_52 = np.zeros(shape=[grid_sizes[2][0], grid_sizes[2][1], 3, 5+num_classes], dtype=np.float32) # y_true = [y_true_13, y_true_26, y_true_52] # anchors_max = anchors / 2. # anchors_min = -anchors_max # valid_mask = box_sizes[:, 0] > 0 # box_sizes = np.expand_dims(box_sizes, 1) # mins = np.maximum(- box_sizes / 2, - anchors / 2) # maxs = np.minimum(box_sizes / 2, anchors / 2) # # [N, 9, 2] # whs = maxs - mins # # [N, 9] # iou = (whs[:, :, 0] * whs[:, :, 1]) / (box_sizes[:, :, 0] * box_sizes[:, :, 1] + anchors[:, 0] * anchors[:, 1] - whs[:, :, 0] * whs[:, :, 1]) # # [N] # best_match_idx = np.argmax(iou, axis=1) # ratio_dict = {1.: 8., 2.: 16., 3.: 32.} # for i, idx in enumerate(best_match_idx): # # idx: 0,1,2 ==> 2; 3,4,5 ==> 1; 6,7,8 ==> 2 # feature_map_group = 2 - idx // 3 # # scale ratio: 0,1,2 ==> 8; 3,4,5 ==> 16; 6,7,8 ==> 32 # ratio = ratio_dict[np.ceil((idx + 1) / 3.)] # print("=>ratio", ratio) # x = int(np.floor(box_centers[i, 0] / ratio)) # y = int(np.floor(box_centers[i, 1] / ratio)) # k = anchor_mask[feature_map_group].index(idx) # c = int(gt_boxes[i, 4]) # # print feature_map_group, '|', y,x,k,c # print( y, x, k ) # y_true[feature_map_group][y, x, k, :2] = box_centers[i] # y_true[feature_map_group][y, x, k, 2:4] = box_sizes[i] # y_true[feature_map_group][y, x, k, 4] = 1. # y_true[feature_map_group][y, x, k, 5+c] = 1. # print(y_true[feature_map_group][y,x,k, :4])
# -*- coding: utf-8 -*- from Classes import Job def locate_min(sum_list): min_indexes = [] smallest = min(sum_list) for index, element in enumerate(sum_list): if smallest == element: # check if this element is the minimum_value min_indexes.append(index) # add the index to the list if it is return smallest, min_indexes def create_jobs(input): """ Funcion crear objetos Trabajos en base a matriz :param input: matriz mxn :return: lista de trabajos """ list = [] m_counter = 0 for m in input: for j in m: list.append(Job(j[0], m_counter, j[1])) m_counter = m_counter + 1 return list def get_jobs_by_machine(machine_id, jobs_list): """ Obtener lista de trabajos de una maquina determinada :param machine_id: id de maquina :param jobs_list: lista de trabajos :return: lista de trabajos por maquina especificada """ results = [] for job in jobs_list: if job.machine_id == machine_id: results.append(job) return results def get_max_sequences(sequences_list): """ Obtener la maquina con secuencia de ejecucion maxima y su indice :param machines_list: lista de Maquinas :return: Maquina, index de maquina """ max_indexes = [] max_value = max(x.makespan for x in sequences_list) for index, element in enumerate(sequences_list): if max_value == element.makespan: # check if this element is the minimum_value max_indexes.append(index) # add the index to the list if it is return max_indexes def get_min_sequences(sequences_list): """ Obtener la maquina con secuencia de ejecucion maxima y su indice :param machines_list: lista de Maquinas :return: Maquina, index de maquina """ min_indexes = [] min_value = min(x.makespan for x in sequences_list) for index, element in enumerate(sequences_list): if min_value == element.makespan: # check if this element is the minimum_value min_indexes.append(index) # add the index to the list if it is return min_indexes
import sys from subprocess import run from path import getcwd def get_dependencies(file_name): out = [] output = run( [ "ldd", file_name, ], capture_output=True, ) # raw ldd output dependencies = output.stdout.decode().split("\n") # only last column (awk '{print $7}') for library in dependencies: out.append(library.split(" ")[-1]) out = out[2:] return out # for lib in corected: # output2 = run(['ldd',lib, ], capture_output=True) # print(output2.stdout.decode()) def create_folder(file_name): root = getcwd() print(root) path = file_name.split('/')[:-1] print(path) # run(['mkdir', '-p', path) return path def cp_to_chroot(file_name): pass def main(file_name): library = sys.argv[1] list_of_deps = get_dependencies(file_name) print(list_of_deps) if __name__ == "__main__": print(f"library: {sys.argv[1]}") create_folder('/usr/local/lib/libiconv.so.7.0') main(sys.argv[1])
class Solution(object): def backspaceCompare(self, S, T): """ :type S: str :type T: str :rtype: bool """ m, n = len(S), len(T) ptr1, ptr2 = m - 1, n - 1 cnt1, cnt2 = 0, 0 while True: while ptr1 >= 0 and (S[ptr1] == '#' or cnt1): cnt1 += 1 if S[ptr1] == '#' else -1 ptr1 -= 1 while ptr2 >= 0 and (T[ptr2] == '#' or cnt2): cnt2 += 1 if T[ptr2] == '#' else -1 ptr2 -= 1 if not (ptr1 >= 0 and ptr2 >= 0 and S[ptr1] == T[ptr2]): return ptr1 == ptr2 == -1 ptr1, ptr2 = ptr1 - 1, ptr2 - 1
from socket import gethostbyname, gethostname, socket, AF_INET, SOCK_STREAM from time import ctime, time from HiLens.utils import set_temperature_and_humidity, get_command HOST = '' PORT = 21567 BUFSIZ = 1024 ADDR = (HOST, PORT) def start_listen(): print("start listen !!!!!!!!!!!!!!!!!!!!!!") myname = gethostname() print(myname) myaddr = gethostbyname(myname) print(myname + ", " + myaddr) print("start listen !!!!!!!!!!!!!!!!!!!!!! now !!!!!") tcpSerSock = socket(AF_INET, SOCK_STREAM) tcpSerSock.bind(ADDR) tcpSerSock.listen(5) try: while True: print('waiting for connection...') tcpCliSock, addr = tcpSerSock.accept() print('...connnecting from:', addr) while True: data = tcpCliSock.recv(BUFSIZ) if not data: break if len(data) < 3: break str_data = str(data)[2:-1] split_data = str_data.split(',') if len(split_data) < 4: break if len(split_data) >= 5: set_temperature_and_humidity(split_data[0], split_data[1], split_data[2], split_data[3], split_data[4]) else: set_temperature_and_humidity(split_data[0], split_data[1], split_data[2], split_data[3]) # tcpCliSock.send('[%s] %s' %(bytes(ctime(),'utf-8'),data)) command = get_command(time()) if command is not None: tcpCliSock.send(('%s' % command).encode()) else: tcpCliSock.send(('[%s] %s' % (ctime(), data)).encode()) tcpCliSock.close() finally: tcpSerSock.close() if __name__ == '__main__': start_listen()
import datetime import sqlalchemy from flask_login import UserMixin from sqlalchemy import orm from werkzeug.security import generate_password_hash, check_password_hash from sqlalchemy_serializer import SerializerMixin from data.db_session import SqlAlchemyBase class Direction(SqlAlchemyBase, UserMixin, SerializerMixin): __tablename__ = 'direction' id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True, autoincrement=True) facult = sqlalchemy.Column(sqlalchemy.String) name = sqlalchemy.Column(sqlalchemy.String, unique=True) budget_places = sqlalchemy.Column(sqlalchemy.Integer) is_budget = sqlalchemy.Column(sqlalchemy.Boolean) is_fiz = sqlalchemy.Column(sqlalchemy.Boolean) is_inf = sqlalchemy.Column(sqlalchemy.Boolean) subjects = sqlalchemy.Column(sqlalchemy.String)
# Chat server import socket, select # Function to broadcast chat messages to all connected clients def broadcast_data(sender, message): byte_message = bytes(message, 'UTF-8') # print("broadcasting", len(connection_list)) # Do not send the message to master socket # and the client who has send us the message for socket in connection_list: if socket != server_socket and socket != sender: try: socket.send(byte_message) except: # handle broken socket connections socket.close() connection_list.remove(socket) if __name__ == "__main__": # List to keep track of socket descriptors connection_list = [] PORT = 8888 RECV_BUFFER = 4096 server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server_socket.bind(("0.0.0.0", PORT)) server_socket.listen(10) # Add server socket to the list of readable connections connection_list.append(server_socket) print("Chat server started on port %s" % str(PORT)) server_running = True while server_running: # Get the list sockets which are ready to be read through select read_sockets = select.select(connection_list, [], [])[0] for socket in read_sockets: # New connection if socket == server_socket: # A new connection recieved through server_socket new_sock, addr = server_socket.accept() connection_list.append(new_sock) print("Client (%s, %s) connected" % addr) broadcast_data(new_sock, "[%s:%s] entered room\n" % addr) # Incoming message from a client else: try: data = socket.recv(RECV_BUFFER).decode('utf-8') except: broadcast_data(socket, "Client (%s, %s) is offline\n" % addr) print("Client (%s, %s) is offline" % addr) socket.close() connection_list.remove(socket) continue if data: formatted_message = "<{0}> {1}".format(socket.getpeername()[0], data) print(formatted_message) broadcast_data(socket, formatted_message) server_socket.close()
from ._CBUS import * from ._Kylin import * from ._PosCalib import * from ._Sonar import * from ._VirtualRC import * from ._ZGyro import *
# 3.4 guests = ['John', 'Sam', 'Tim'] print(f"{guests[0]}, you are invited for dinner") print(f"{guests[1]}, you are invited for dinner") print(f"{guests[2]}, you are invited for dinner") #3.5 guests = ['John', 'Sam', 'Tim'] print(f"{guests[0]} can't make it") guests[0] = "Michael" print(f"{guests[0]}, you are invited for dinner") print(f"{guests[1]}, you are invited for dinner") print(f"{guests[2]}, you are invited for dinner") #3.6 guests = ['John', 'Sam', 'Tim'] #print(f"{guests[0]} can't make it") guests[0] = "Michael" print(f"{guests[0]}, I have found a bigger space") print(f"{guests[1]}, I have found a bigger space") print(f"{guests[2]}, I have found a bigger space") guests.insert(0, 'Trevor') guests.insert(2, 'Joe') guests.append('Ben') print(f"{guests[0]}, you are invited for dinner") print(f"{guests[1]}, you are invited for dinner") print(f"{guests[2]}, you are invited for dinner") print(f"{guests[3]}, you are invited for dinner") print(f"{guests[4]}, you are invited for dinner") print(f"{guests[5]}, you are invited for dinner") #3.7 guests = ['John', 'Sam', 'Tim'] #print(f"{guests[0]} can't make it") guests[0] = "Michael" guests.insert(0, 'Trevor') guests.insert(2, 'Joe') guests.append('Ben') print("I can only invite two people for dinner") removed = guests.pop() print(f"{removed}, I'm sorry I can't invite you for dinner") removed = guests.pop() print(f"{removed}, I'm sorry I can't invite you for dinner") removed = guests.pop() print(f"{removed}, I'm sorry I can't invite you for dinner") removed = guests.pop() print(f"{removed}, I'm sorry I can't invite you for dinner") print(f"{guests[0]}, you are still invited") print(f"{guests[1]}, you are still invited") del guests[0] del guests[0] print(guests)
Import("*") import glob, os, SCons import platform as p CCFLAGS=['-Wall','-fPIC'] LINKFLAGS=[] CPPDEFINES=[] OPTIMIZE=['-O3', '-fno-strict-aliasing'] buildtype = ARGUMENTS.get('type', 'default') if buildtype == "debug": CCFLAGS.append('-g') LINKFLAGS.append('-g') CCFLAGS.append('-DDEBUG=1') elif buildtype == 'release': CPPDEFINES.append('NDEBUG') CCFLAGS.append(OPTIMIZE) elif buildtype == 'gprof': CCFLAGS.append('-g') CCFLAGS.append('-pg') LINKFLAGS.append('-pg') elif buildtype == 'efence': CCFLAGS.append('-g') LINKFLAGS.append('-lefence') elif buildtype == 'default': CCFLAGS.append('-g') CCFLAGS.append(OPTIMIZE) LINKFLAGS.append('-g') CCFLAGS.append('-DLOGGING_ENABLED=1') else: print "ERROR:" print "ERROR: Unrecognized build type : '"+buildtype+"'"; failed=1 #linux only # # By default, we are building on linux # LINKFLAGS.append('-rdynamic') LINKFLAGS.append('-Wall') LINKFLAGS.append('-Lbuild/' + platform + '/c-ares/lib/') LINKFLAGS.append('-Lbuild/' + platform + '/libav-0.7.3/lib/') #LINKFLAGS.append('-lrt') # Does not work on ubuntu 11.10 - needs to be after -lcares # We've only so far enforced this on Linux based compiles, not Mac OS X yet. # CCFLAGS.append('-Werror') CCFLAGS.append('-Wno-unused-result') CCFLAGS.append('-Ibuild/' + platform + '/libav-0.7.3/include/') CC='gcc' CXX='g++' AR='ar' RANLIB='ranlib' #LIBPATH missing env = Environment(CPPPATH=['#'], CCFLAGS=CCFLAGS, LINKFLAGS=LINKFLAGS, CPPDEFINES=CPPDEFINES, CXXFLAGS="-std=c++0x", CC=CC, CXX=CXX, AR=AR, RANLIB=RANLIB) progs = [] Export( ["env", "progs", "CCFLAGS"] ) SConscript( "VET/SConscript" )
from peewee import * from model.account import Account from model.base_entity import BaseEntity class Folder(BaseEntity): name = TextField() parent = ForeignKeyField('self', null=True, related_name='folders') account = ForeignKeyField(Account, related_name="folders") with_emails = BooleanField(default=True) class Meta: db_table = 'folders' indexes = ((('name', 'parent', 'account'), True),)
#!/usr/bin/python3 # -*- coding: utf-8 -*- # Author :AmosWu # Date :2019/1/26 # Features : Print all daffodils number on the screen for i in range(100,1000): a = i%10 b = int(i/100) c = (int(i/10))%10 if i == a**3+b**3+c**3: print('%d'%i)
""" Example 3-1 =========== Finding Factorial with Recusion Author: Adnan Umer <u.adnan@outlook.com> """ def fact(n): """ Finds Factorial of given number using recusion """ if n <= 1: # factorial of 1 is 1 return 1 else: # factorial of n = n x [factorial of n -1] return n * fact(n - 1) # find factorial of 6 fact6 = fact(6) print('Factorial of 6 is', fact6)
import pytest from hls2dash.lib import MPDRepresentation def test_segment_duration_precision(): obj = MPDRepresentation.Segment(4.64, False) obj.setTimescale(48000) assert obj.asXML() == ' <S d="222720" />\n' obj2 = MPDRepresentation.Segment(4.63999375, False) obj2.setTimescale(48000) assert obj.asXML() == ' <S d="222720" />\n' assert obj.asXML() != ' <S d="222719" />\n'
#!/usr/bin/env python3 ''' Author: Alexander Roth Date: 2016-03-13 ''' import sys def main(args): in_file = args[1] line_generator = read_file(in_file) try: while True: line = next(line_generator) first_set, second_set = gen_sets(line) result = check_sets(first_set, second_set) print_result(result) except StopIteration: pass def read_file(filename): assert type(filename) is str with open(filename, 'r') as in_file: for line in in_file: yield line.strip() def gen_sets(line): master = [] set_list = [] for item in line.split(','): if item.isdigit(): set_list.append(int(item)) else: end, begin = item.split(';') set_list.append(int(end)) master.append(set(set_list)) set_list = [int(begin)] master.append(set(set_list)) return master def check_sets(first_set, second_set): return first_set & second_set def print_result(result): int_set = list(result) if result else None print(','.join([str(i) for i in int_set])) if int_set else print() def print_arguments(arg): print('python3 {0} <file>'.format(arg)) sys.exit(1) if __name__ == '__main__': if len(sys.argv) == 2: main(sys.argv) else: print_arguments(sys.argv[0])
from hx3dtoolkit.config import config import shutil import os class RemoveDirectoryCommand: def __init__(self, directory, **options): self.directory = directory self.options = options def execute(self): if os.path.exists(self.directory): if config.debug_mode: print('[D] Removing `{}`'.format(self.directory)) shutil.rmtree(self.directory, ignore_errors=True) else: if config.debug_mode: print('[D] Directory {} does not exist. Not removing...'.format(self.directory)) return 0