seq_id
string
text
string
repo_name
string
sub_path
string
file_name
string
file_ext
string
file_size_in_byte
int64
program_lang
string
lang
string
doc_type
string
stars
int64
dataset
string
pt
string
api
list
34921941272
import numpy as np import pandas as pd import torch import pickle import random import re from tqdm import tqdm from transformers.modeling_utils import PoolerAnswerClass data_path={ 'mfc': '/data/news/mfc/', 'gvfc': '/data/news/GVFC/GVFC/GVFC_headlines_and_annotations.xlsx', 'twitter': '/data/tweet/twitter/', 'immi': '/data/tweet/immi/', 'fora': '/data/debate/issue_framing/data/dialogue/' } all_issue_map={ 'climate':0, 'deathpenalty':1, 'guncontrol':2, 'immigration':3, 'samesex':4, 'tobacco':5, 'aca':6, 'abort':7, 'immig':3, 'isis':8, 'guns':2, 'lgbt':4 } def save_obj(obj, name): with open('obj' + name + '.pkl', 'wb+') as f: pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL) def load_obj(name,path=None): if path is None: with open('obj' + name + '.pkl', 'rb') as f: return pickle.load(f) else: with open(path+'obj' + name + '.pkl', 'rb') as f: return pickle.load(f) def clean(text): pattern1='(https?|ftp|file)://[-A-Za-z0-9+&@#/%?=~_|!:,.;]+[-A-Za-z0-9+&@#/%=~_|]' pattern2='@([^@ ]*)' pattern3='pic.twitter.com/.*' text=re.sub(pattern1,' [URL]',text) text=re.sub(pattern2,' [MENTION]',text) text=re.sub(pattern3,' [PIC]',text) text=re.sub('\xa0','',text) return text def split_data(data, issue_prepare=False): n=len(data['text']) idx=list(range(n)) random.shuffle(idx) if issue_prepare: train,dev,test={"text": [], "label": [],'issue':[]},{"text": [], "label": [],'issue':[]},{"text": [], "label": [],'issue':[]} else: train,dev,test={"text": [], "label": []},{"text": [], "label": []},{"text": [], "label": []} for i in idx[:int(0.7*n)]: train['text'].append(data['text'][i]) train['label'].append(data['label'][i]) if issue_prepare: train['issue'].append(data['issue'][i]) for i in idx[int(0.7*n):int(0.9*n)]: dev['text'].append(data['text'][i]) dev['label'].append(data['label'][i]) if issue_prepare: dev['issue'].append(data['issue'][i]) for i in idx[int(0.9*n):]: test['text'].append(data['text'][i]) test['label'].append(data['label'][i]) if issue_prepare: test['issue'].append(data['issue'][i]) return train,dev,test def split_data_for_fewshot(data, num_class, shot=1): n=len(data['text']) idx=list(range(n)) random.shuffle(idx) train,dev,test={"text": [], "label": []},{"text": [], "label": []},{"text": [], "label": []} label_set=list(range(num_class)) used=[] label_dict={l:[] for l in label_set} for i in idx: for j in range(num_class): if data['label'][i][0][j]==1: label_dict[j].append(i) print({k:len(label_dict[k]) for k in label_dict}) for l in label_dict: if len(label_dict[l])>shot: k,j=0,0 while k<shot: if j<len(label_dict[l]) and label_dict[l][j] not in used: train['text'].append(data['text'][label_dict[l][j]]) train['label'].append(data['label'][label_dict[l][j]]) used.append(label_dict[l][j]) k+=1 j+=1 else: j+=1 if j>len(label_dict[l]):break else: for j in range(len(label_dict[l])): if label_dict[l][j] not in used: train['text'].append(data['text'][label_dict[l][j]]) train['label'].append(data['label'][label_dict[l][j]]) used.append(label_dict[l][j]) if len(label_dict[l])>2*shot: k,j=0,0 while k<shot: if j<len(label_dict[l]) and label_dict[l][j] not in used: dev['text'].append(data['text'][label_dict[l][j]]) dev['label'].append(data['label'][label_dict[l][j]]) used.append(label_dict[l][j]) k+=1 j+=1 else: j+=1 if j>len(label_dict[l]):break else: for j in range(len(label_dict[l])): if label_dict[l][j] not in used: dev['text'].append(data['text'][label_dict[l][j]]) dev['label'].append(data['label'][label_dict[l][j]]) used.append(label_dict[l][j]) return train,dev def read_mfc_issue(path='/remote-home/xymou/Frame/framework/data/news/mfc/', data_type='article', issue='climate'): if data_type=='article': data=load_obj('article_data_multi',path) elif data_type=='sentence': data=load_obj('sentence_data_multi',path) else: raise Exception('Undefined data type! Choose from [article, sentence]') return data[issue] def read_mfc(path='/remote-home/xymou/Frame/framework/data/news/mfc/', data_type='article', issue='all',issue_prepare=False): print('Reading data from MFC dataset!') if issue!='all': issues= [issue] else: issues= ['climate', 'deathpenalty', 'guncontrol', 'immigration', 'samesex', 'tobacco'] if issue_prepare: data = {'text':[], 'label':[], 'issue':[]} else: data = {'text':[], 'label':[]} for i in issues: tmp = read_mfc_issue(path, data_type, issue=i) data['text'].extend(tmp['text']) data['label'].extend(tmp['label']) if issue_prepare: data['issue'].extend([all_issue_map[i]]*len(tmp['label'])) return data def read_gvfc(path='/remote-home/xymou/Frame/framework/data/news/GVFC/GVFC/GVFC_headlines_and_annotations.xlsx'): print('Reading data from GVFC dataset!') df=pd.read_excel(path) data,label=[],[] for i in tqdm(range(len(df))): text=df.loc[i,'news_title'] if df.loc[i,'Q1 Relevant']==1 and df.loc[i,'Q3 Theme1']!=99: data.append(text.lower()) tmp=[df.loc[i,'Q3 Theme1']-1] if df.loc[i,'Q3 Theme2']!=99: tmp.append(df.loc[i,'Q3 Theme2']-1) label.append(tmp) return {"text": data, "label": label} def read_twitter_issue(path='/remote-home/xymou/Frame/sample_test/Weakly/', issue='aca'): tweet_processed=load_obj('tweet_processed', path) label_map={ 0:0,1:1,2:2,3:3,4:4,5:6,6:7,7:8,8:9,9:10,10:11,11:12,12:5,13:13 } text,label=[],[] for key in tweet_processed: if issue in tweet_processed[key]['issue']: tmp=tweet_processed[key]['text'] tmp=clean(tmp.lower()) tmp=re.sub('\#','',tmp) res = [label_map[k-1] for k in tweet_processed[key]['frame'] if k not in [15,16,17]] if len(res): label.append(res) text.append(tmp) return {'text':text,'label':label} def read_twitter(path='/remote-home/xymou/Frame/sample_test/Weakly/', issue='all', issue_prepare = True): print('Reading data from Twitter-framing dataset!') if issue == 'all': issues=['aca','abort','immig','isis','guns','lgbt'] else: issues = [issue] if issue_prepare: data = {'text':[], 'label':[], 'issue':[]} else: data = {'text':[], 'label':[]} for i in issues: tmp = read_twitter_issue(path, i) for j in range(len(tmp['label'])): data['text'].append(tmp['text'][j]) data['label'].append(tmp['label'][j]) if issue_prepare: data['issue'].append(all_issue_map[i]) return data def read_immi(path='/remote-home/xymou/Frame/framework/data/tweet/immi/' , issue='issue_specific', issue_prepare=False): #这里的issue其实是ftype print('Reading data from immigration twitter dataset!') text, label = [],[] data = load_obj(issue, path) if issue=='issue_generic': labels = ['Cultural Identity','Capacity and Resources','Security and Defense','Quality of Life', 'Crime and Punishment','Policy Prescription and Evaluation','Morality and Ethics','External Regulation and Reputation','Health and Safety', 'Political Factors and Implications','Public Sentiment','Economic','Fairness and Equality','Legality, Constitutionality, Jurisdiction' ] else: labels = ['Victim: Global Economy','Threat: Fiscal', 'Hero: Cultural Diversity', 'Threat: Public Order', 'Threat: Jobs', 'Victim: Humanitarian', 'Threat: National Cohesion','Hero: Integration','Victim: Discrimination','Victim: War','Hero: Worker'] label_map={l:labels.index(l) for l in labels} for i in range(len(data['text'])): text.append(clean(data['text'][i].lower())) label.append([label_map[k] for k in data['label'][i]]) if issue == 'issue_generic' and issue_prepare: return {'text':text, 'label':label, 'issue':[all_issue_map['immigration']]*len(label)} return {'text':text,'label':label} def read_fora(path='/remote-home/xymou/Frame/framework/data/debate/issue_framing/data/dialogue/'): print('Reading data from Fora dataset!') text, label=[],[] data = load_obj('fora_data',path) for i in range(len(data['label'])): text.append(data['text'][i]) label.append(data['label'][i]) return {'text':text, 'label':label} data_func_map={ 'mfc':read_mfc, 'gvfc':read_gvfc, 'twitter':read_twitter, 'immi':read_immi, 'fora':read_fora } def read_data(config): dataset = config['dataset'] if dataset not in data_func_map: raise KeyError('Current dataset is not mapped to data_read function! Please define the read data function for this dataset!') func = data_func_map[dataset] config.pop('dataset') if dataset != 'mfc': config.pop('data_type') if dataset in ['gvfc','fora']: config.pop('issue') config['path'] = data_path[dataset] return func(**config) def convert_to_one_hot(label, label_num): print('# of labels:', label_num) res= [] for l in label: tmp=[0]*label_num for i in range(len(l)): tmp[l[i]]=1 res.append(torch.tensor(tmp, dtype=torch.float32).view(1,-1)) return res from torch.utils.data import Dataset class mydata(Dataset): def __init__(self, data, tokenizer, padding_idx=0, max_len=None): self.text_lengths=[len(seq) for seq in data['text']] self.max_len=max_len if self.max_len is None: self.max_len=max(self.text_lengths) self.num_sequences = len(data["text"]) self.data=data for i in range(len(data['text'])): data['text'][i] = tokenizer.encode(data['text'][i], max_length=self.max_len) def __len__(self): return self.num_sequences def __getitem__(self, index): return {"text":self.data['text'][index], "label":self.data['label'][index], "issue":self.data['issue'][index]}
xymou/Frame_Detection
data/load_data_multi.py
load_data_multi.py
py
11,117
python
en
code
1
github-code
36
[ { "api_name": "pickle.dump", "line_number": 37, "usage_type": "call" }, { "api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 37, "usage_type": "attribute" }, { "api_name": "pickle.load", "line_number": 43, "usage_type": "call" }, { "api_name": "pickle.load", ...
28580043069
from django.shortcuts import render from rest_framework import generics from rest_framework.permissions import IsAuthenticated from order.models import Order from order.serializers.order import OrderSerializer class OrderView(generics.ListCreateAPIView): queryset = Order.objects.all() serializer_class = OrderSerializer permission_classes = [IsAuthenticated] def get_queryset(self): user = self.request.user queryset = Order.objects.filter(created_by=user) return queryset class OrderDetails(generics.RetrieveUpdateAPIView): queryset = Order.objects.all() serializer_class = OrderSerializer permission_classes = [IsAuthenticated] def get_queryset(self): user = self.request.user queryset = Order.objects.filter(created_by=user) return queryset def orders(request): orders = Order.objects.filter(created_by=request.user) print(orders) return render(request, "home/orders.html", {"orders": orders})
PROFabdalla/payment_app
order/views.py
views.py
py
999
python
en
code
0
github-code
36
[ { "api_name": "rest_framework.generics.ListCreateAPIView", "line_number": 9, "usage_type": "attribute" }, { "api_name": "rest_framework.generics", "line_number": 9, "usage_type": "name" }, { "api_name": "order.models.Order.objects.all", "line_number": 10, "usage_type": "c...
448240955
# -*- coding: utf-8 -*- """ Spyder Editor This is a temporary script file. """ import numpy as np import keras from keras.models import Model from keras.layers import Dense,Activation,Input from keras.callbacks import ModelCheckpoint X = np.random.normal(0,1,(100,8)) Y = np.random.normal(0,1,(100,1)) X.shape batch = 32 valX,valY = np.random.normal(0,1,(100,8)),np.random.normal(0,1,(100,1)) class LossHistory(keras.callbacks.Callback): def on_train_begin(self, logs={}): self.losses = [] self.val_loss=[] self.weights= [] def on_epoch_end(self, batch, logs={}): self.losses.append(logs.get('loss')) self.val_loss.append(logs.get('val_loss')) self.weights.append(self.model.get_weights()) name = 'weights'+'_'+str(batch)+'.h5' self.model.save_weights(name) def keras_models(X,Y,kernel_init = 'random_uniform',output_activation = 'tanh',input_activation = 'relu', validation_data = [valX,valY]): losses = LossHistory() ip = Input(batch_shape=(batch,X.shape[1])) layer1 = Dense(32, kernel_initializer=kernel_init)(ip) layer2 = Activation(input_activation)(layer1) out = Dense(Y.shape[1],activation = output_activation)(layer2) model = Model(inputs = ip,output = out) model.compile(optimizer='adam',loss = 'mean_squared_error') filepath="weights-improvement-{epoch:02d}-{val_acc:.2f}.hdf5" checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True) callbacks_list = [losses]#,checkpoint] model.fit(X,Y,validation_data=validation_data,batch_size=batch,epochs=100,callbacks=callbacks_list,verbose=1) return model,losses model = keras_models(X,Y,kernel_init = 'random_uniform',output_activation = 'tanh',input_activation = 'relu', validation_data = [valX,valY])
avilin66/Pyspark_codes
keras_basic_model.py
keras_basic_model.py
py
1,902
python
en
code
1
github-code
36
[ { "api_name": "numpy.random.normal", "line_number": 14, "usage_type": "call" }, { "api_name": "numpy.random", "line_number": 14, "usage_type": "attribute" }, { "api_name": "numpy.random.normal", "line_number": 15, "usage_type": "call" }, { "api_name": "numpy.rando...
19337719477
import pygame import math from global_timer import TimedObject class BossDeathCutscene(TimedObject): def __init__(self, map_center_pos, boss) -> None: self.lifetime = 999990 self.map_center_pos = map_center_pos self.boss = boss self.boss.global_timer.attach(self) self.boss.in_active_spell_action = True self.boss.defeated = True self.boss_in_map_center = False self.emotion_was_sayed = False self.emotion_was_sayed2 = False #--------- self.wave_points = [] self.wave_segment_size = 3 def timer_tick(self): self.boss.in_active_spell_action = True if self.boss_in_map_center == True: self.lifetime -= 1 if self.lifetime <= 40: self.boss.center_pos = [9999,9999] font = pygame.font.SysFont('arial', 40) score = font.render("You win!", True, (255, 255, 255)) self.boss.surface.blit(score, (300, 200)) self.boss.snake.food.hide() if self.lifetime <= 15: if self.emotion_was_sayed2 == False: self.emotion_was_sayed2 = True self.boss.aniki.play() self.boss.center_pos = [9999,9999] font = pygame.font.SysFont('arial', 20) score = font.render("Thank you for your attention", True, (255, 255, 255)) self.boss.surface.blit(score, (300, 260)) #-------- self.boss.center_pos = [9999,9999] font = pygame.font.SysFont('arial', 20) score = font.render("created by Jordenskraften", True, (255, 255, 255)) self.boss.surface.blit(score, (300, 290)) else: if self.lifetime %3 == 0: self.boss.color = (255,0,0) else: self.boss.color = (125,0,0) #взрываем её else: #если босс дошел до центра if ( abs(self.boss.center_pos[0] - self.map_center_pos[0]) <= 1 and abs(self.boss.center_pos[1] - self.map_center_pos[1]) <= 1 ): self.boss_in_map_center = True self.lifetime = 110 if self.emotion_was_sayed == False: self.emotion_was_sayed = True self.boss.boss_death.play() self.boss.create_floating_text("Okay, you got me!", True) self.boss.snake.food.hide() #print("boss in center") else: #тута двигаем босса к центру d_x = self.map_center_pos[0] - self.boss.center_pos[0] d_y = self.map_center_pos[1] - self.boss.center_pos[1] distance = math.sqrt(d_x**2 + d_y**2) step_x = d_x / distance * 1.5 step_y = d_y / distance * 1.5 dir = (step_x,step_y) self.boss.center_pos = [ self.boss.center_pos[0] + dir[0], self.boss.center_pos[1] + dir[1] ] self.boss.snake.food.hide() if len(self.wave_points) >= 1: for p in self.wave_points: self.move_wave_segment(p) self.draw_wave_segment(p) self.check_for_snakes_bodies_collision(p) def enter(): pass def death(self): self.boss.global_timer.detach(self) self.boss.in_active_spell_action = True self.boss.defeated = True self.boss.base_abilities_cd = 999999 self.boss.active_abilities_cd = 999999 self.boss.boss_ultimate_ability_cd = 999999 self.boss.minions_cd = 999999 del(self)
jordenskraften/snake-python
boss_death_cutscene.py
boss_death_cutscene.py
py
3,964
python
en
code
0
github-code
36
[ { "api_name": "global_timer.TimedObject", "line_number": 4, "usage_type": "name" }, { "api_name": "pygame.font.SysFont", "line_number": 27, "usage_type": "call" }, { "api_name": "pygame.font", "line_number": 27, "usage_type": "attribute" }, { "api_name": "pygame.f...
27048798228
import zedlib import pygame import math class GameSprite: def __init__(self, image, x, y): self.image = image self.rect = self.image.get_rect() self.position = zedlib.Position(x, y) self.x_acceleration = 0.0 self.y_acceleration = 0.0 self.x_velocity = 0.0 self.y_velocity = 0.0 self.max_y_velocity = None self.max_x_velocity = None self.move_x = 0.0 self.move_y = 0.0 self.update_rect_x() self.update_rect_y() def draw(self, surface, camera = None): """ Draw image on a given surface, a zedlib.Camera can also be used """ if camera: surface.blit(self.image, camera.apply(self.rect)) else: surface.blit(self.image, self.rect) def update_rect_x(self): """ Update x position of the rect, from self.position """ self.rect.x = self.position.get_position()[0] def update_rect_y(self): """ Update y position of the rect, from self.position """ self.rect.y = self.position.get_position()[1] def update_movement(self, collisions=[]): """ Update the position of rect and handle collisions """ self.apply_acceleration() if self.move_x and self.move_y: movement = self.get_diagonal_movement(math.fabs(self.move_x)) self.move_x = math.copysign(movement[0], self.move_x) self.move_y = math.copysign(movement[1], self.move_y) self.move_x += self.x_velocity self.position.move_x(self.move_x) self.handle_horizonal_collisions(collisions) self.move_x = 0.0 self.move_y += self.y_velocity self.position.move_y(self.move_y) self.handle_vertical_collisions(collisions) self.move_y = 0.0 def apply_acceleration(self): self.x_velocity += self.x_acceleration self.y_velocity += self.y_acceleration if self.max_x_velocity: if self.x_velocity > self.max_x_velocity: self.x_velocity = self.max_x_velocity if self.max_y_velocity: if self.y_velocity > self.max_y_velocity: self.y_velocity = self.max_y_velocity def handle_horizonal_collisions(self, collisions): """ Stop rect from moving through collisions horizontally """ self.update_rect_x() collision_objects = pygame.sprite.spritecollide(self, collisions, False) for collision_obj in collision_objects: collision_obj.horizontal_collide(self) self.position.set_x(self.rect.x) if collision_objects: self.collision_occured() def handle_vertical_collisions(self, collisions): """ Stop rect from moving through collisions vertically """ self.update_rect_y() collision_objects = pygame.sprite.spritecollide(self, collisions, False) for collision_obj in collision_objects: collision_obj.vertical_collide(self) self.position.set_y(self.rect.y) if collision_objects: self.collision_occured() def collision_occured(self): """ Called when sprite has collided with an object """ pass def get_diagonal_movement(self, speed): """ Reduce diagonal movement to be equal to normal movement speed """ move_speed = math.sqrt( (speed*speed)/2.0 ) return (move_speed, move_speed)
JoeZlonicky/ZedLib
zedlib/game_sprite.py
game_sprite.py
py
3,403
python
en
code
0
github-code
36
[ { "api_name": "zedlib.Position", "line_number": 10, "usage_type": "call" }, { "api_name": "math.fabs", "line_number": 43, "usage_type": "call" }, { "api_name": "math.copysign", "line_number": 44, "usage_type": "call" }, { "api_name": "math.copysign", "line_num...
69954683303
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2020/3/21 0021 # @Author : justin.郑 3907721@qq.com # @File : covid.py # @Desc : 获取疫情数据 import json import time import demjson import jsonpath import requests import pandas as pd from io import BytesIO from PIL import Image from bs4 import BeautifulSoup def covid_163(indicator="实时"): """ 网易-新冠状病毒 https://news.163.com/special/epidemic/?spssid=93326430940df93a37229666dfbc4b96&spsw=4&spss=other&#map_block https://news.163.com/special/epidemic/?spssid=93326430940df93a37229666dfbc4b96&spsw=4&spss=other& :return: 返回指定 indicator 的数据 :rtype: pandas.DataFrame """ url = "https://c.m.163.com/ug/api/wuhan/app/data/list-total" headers = { "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36", } payload = { "t": int(time.time() * 1000), } r = requests.get(url, params=payload, headers=headers) data_json = r.json() # data info url = "https://news.163.com/special/epidemic/" r = requests.get(url, headers=headers) soup = BeautifulSoup(r.text, "lxml") data_info_df = pd.DataFrame( [ item.text.strip().split(".")[1] for item in soup.find("div", attrs={"class": "data_tip_pop_text"}).find_all( "p" ) ] ) data_info_df.columns = ["info"] # 中国历史时点数据 hist_today_df = pd.DataFrame( [item["today"] for item in data_json["data"]["chinaDayList"]], index=[item["date"] for item in data_json["data"]["chinaDayList"]], ) # 中国历史累计数据 hist_total_df = pd.DataFrame( [item["total"] for item in data_json["data"]["chinaDayList"]], index=[item["date"] for item in data_json["data"]["chinaDayList"]], ) # 中国实时数据 current_df = pd.DataFrame.from_dict(data_json["data"]["chinaTotal"]) # 世界历史时点数据 outside_today_df = pd.DataFrame( [item["today"] for item in data_json["data"]["areaTree"]], index=[item["name"] for item in data_json["data"]["areaTree"]], ) # 世界历史累计数据 outside_total_df = pd.DataFrame( [item["total"] for item in data_json["data"]["areaTree"]], index=[item["name"] for item in data_json["data"]["areaTree"]], ) # 全球所有国家及地区时点数据 all_world_today_df = pd.DataFrame( jsonpath.jsonpath(data_json["data"]["areaTree"], "$..today"), index=jsonpath.jsonpath(data_json["data"]["areaTree"], "$..name"), ) # 全球所有国家及地区累计数据 all_world_total_df = pd.DataFrame( jsonpath.jsonpath(data_json["data"]["areaTree"], "$..total"), index=jsonpath.jsonpath(data_json["data"]["areaTree"], "$..name"), ) # 中国各地区时点数据 area_total_df = pd.DataFrame( [item["total"] for item in data_json["data"]["areaTree"][0]["children"]], index=[item["name"] for item in data_json["data"]["areaTree"][0]["children"]], ) # 中国各地区累计数据 area_today_df = pd.DataFrame( [item["today"] for item in data_json["data"]["areaTree"][0]["children"]], index=[item["name"] for item in data_json["data"]["areaTree"][0]["children"]], ) # 疫情学术进展 url_article = "https://vip.open.163.com/api/cms/topic/list" payload_article = { "topicid": "00019NGQ", "listnum": "1000", "liststart": "0", "pointstart": "0", "pointend": "255", "useproperty": "true", } r_article = requests.get(url_article, params=payload_article) article_df = pd.DataFrame(r_article.json()["data"]).iloc[:, 1:] # 咨询 url_info = "https://ent.163.com/special/00035080/virus_report_data.js" payload_info = { "_": int(time.time() * 1000), "callback": "callback", } r_info = requests.get(url_info, params=payload_info, headers=headers) data_info_text = r_info.text data_info_json = demjson.decode(data_info_text.strip(" callback(")[:-1]) if indicator == "数据说明": print(f"数据更新时间: {data_json['data']['lastUpdateTime']}") return data_info_df if indicator == "中国实时数据": print(f"数据更新时间: {data_json['data']['lastUpdateTime']}") return current_df if indicator == "中国历史时点数据": print(f"数据更新时间: {data_json['data']['lastUpdateTime']}") return hist_today_df if indicator == "中国历史累计数据": print(f"数据更新时间: {data_json['data']['lastUpdateTime']}") return hist_total_df if indicator == "世界历史时点数据": print(f"数据更新时间: {data_json['data']['lastUpdateTime']}") return outside_today_df if indicator == "世界历史累计数据": print(f"数据更新时间: {data_json['data']['lastUpdateTime']}") return outside_total_df if indicator == "全球所有国家及地区时点数据": print(f"数据更新时间: {data_json['data']['lastUpdateTime']}") return all_world_today_df elif indicator == "全球所有国家及地区累计数据": print(f"数据更新时间: {data_json['data']['lastUpdateTime']}") return all_world_total_df elif indicator == "中国各地区时点数据": print(f"数据更新时间: {data_json['data']['lastUpdateTime']}") return area_today_df elif indicator == "中国各地区累计数据": print(f"数据更新时间: {data_json['data']['lastUpdateTime']}") return area_total_df elif indicator == "疫情学术进展": return article_df elif indicator == "实时资讯新闻播报": return pd.DataFrame(data_info_json["list"]) elif indicator == "实时医院新闻播报": return pd.DataFrame(data_info_json["hospital"]) elif indicator == "前沿知识": return pd.DataFrame(data_info_json["papers"]) elif indicator == "权威发布": return pd.DataFrame(data_info_json["power"]) elif indicator == "滚动新闻": return pd.DataFrame(data_info_json["scrollNews"]) def covid_dxy(indicator="湖北"): """ 20200315-丁香园接口更新分为国内和国外 丁香园-全国统计-info 丁香园-分地区统计-data 丁香园-全国发热门诊一览表-hospital 丁香园-全国新闻-news :param indicator: ["info", "data", "hospital", "news"] :type indicator: str :return: 返回指定 indicator 的数据 :rtype: pandas.DataFrame """ url = "https://3g.dxy.cn/newh5/view/pneumonia" r = requests.get(url) r.encoding = "utf-8" soup = BeautifulSoup(r.text, "lxml") # news-china text_data_news = str( soup.find_all("script", attrs={"id": "getTimelineServiceundefined"}) ) temp_json = text_data_news[ text_data_news.find("= [{") + 2 : text_data_news.rfind("}catch") ] if temp_json: json_data = pd.DataFrame(json.loads(temp_json)) chinese_news = json_data[ ["title", "summary", "infoSource", "provinceName", "sourceUrl"] ] # news-foreign text_data_news = str(soup.find_all("script", attrs={"id": "getTimelineService2"})) temp_json = text_data_news[ text_data_news.find("= [{") + 2 : text_data_news.rfind("}catch") ] json_data = pd.DataFrame(json.loads(temp_json)) foreign_news = json_data # data-domestic data_text = str(soup.find("script", attrs={"id": "getAreaStat"})) data_text_json = json.loads( data_text[data_text.find("= [{") + 2 : data_text.rfind("catch") - 1] ) big_df = pd.DataFrame() for i, p in enumerate(jsonpath.jsonpath(data_text_json, "$..provinceName")): temp_df = pd.DataFrame(jsonpath.jsonpath(data_text_json, "$..cities")[i]) temp_df["province"] = p big_df = big_df.append(temp_df, ignore_index=True) domestic_city_df = big_df data_df = pd.DataFrame(data_text_json).iloc[:, :7] data_df.columns = ["地区", "地区简称", "现存确诊", "累计确诊", "-", "治愈", "死亡"] domestic_province_df = data_df[["地区", "地区简称", "现存确诊", "累计确诊", "治愈", "死亡"]] # data-global data_text = str( soup.find("script", attrs={"id": "getListByCountryTypeService2true"}) ) data_text_json = json.loads( data_text[data_text.find("= [{") + 2: data_text.rfind("catch") - 1] ) global_df = pd.DataFrame(data_text_json) # info dxy_static = soup.find(attrs={"id": "getStatisticsService"}).get_text() data_json = json.loads( dxy_static[dxy_static.find("= {") + 2 : dxy_static.rfind("}c")] ) china_statistics = pd.DataFrame( [ time.strftime( "%Y-%m-%d %H:%M:%S", time.localtime(data_json["modifyTime"] / 1000) ), data_json["currentConfirmedCount"], data_json["confirmedCount"], data_json["suspectedCount"], data_json["curedCount"], data_json["deadCount"], data_json["seriousCount"], data_json["suspectedIncr"], data_json["currentConfirmedIncr"], data_json["confirmedIncr"], data_json["curedIncr"], data_json["deadIncr"], data_json["seriousIncr"], ], index=[ "数据发布时间", "现存确诊", "累计确诊", "境外输入", "累计治愈", "累计死亡", "现存重症", "境外输入较昨日", "现存确诊较昨日", "累计确诊较昨日", "累计治愈较昨日", "累计死亡较昨日", "现存重症较昨日", ], columns=["info"], ) foreign_statistics = pd.DataFrame.from_dict( data_json["foreignStatistics"], orient="index" ) global_statistics = pd.DataFrame.from_dict( data_json["globalStatistics"], orient="index" ) # hospital url = ( "https://assets.dxycdn.com/gitrepo/tod-assets/output/default/pneumonia/index.js" ) payload = {"t": str(int(time.time()))} r = requests.get(url, params=payload) hospital_df = pd.read_html(r.text)[0].iloc[:, :-1] if indicator == "中国疫情分省统计详情": return domestic_province_df if indicator == "中国疫情分市统计详情": return domestic_city_df elif indicator == "全球疫情分国家统计详情": return global_df elif indicator == "中国疫情实时统计": return china_statistics elif indicator == "国外疫情实时统计": return foreign_statistics elif indicator == "全球疫情实时统计": return global_statistics elif indicator == "中国疫情防控医院": return hospital_df elif indicator == "实时播报": return chinese_news elif indicator == "中国-新增疑似-新增确诊-趋势图": img_file = Image.open( BytesIO(requests.get(data_json["quanguoTrendChart"][0]["imgUrl"]).content) ) img_file.show() elif indicator == "中国-现存确诊-趋势图": img_file = Image.open( BytesIO(requests.get(data_json["quanguoTrendChart"][1]["imgUrl"]).content) ) img_file.show() elif indicator == "中国-现存疑似-趋势图": img_file = Image.open( BytesIO(requests.get(data_json["quanguoTrendChart"][2]["imgUrl"]).content) ) img_file.show() elif indicator == "中国-治愈-趋势图": img_file = Image.open( BytesIO(requests.get(data_json["quanguoTrendChart"][3]["imgUrl"]).content) ) img_file.show() elif indicator == "中国-死亡-趋势图": img_file = Image.open( BytesIO(requests.get(data_json["quanguoTrendChart"][4]["imgUrl"]).content) ) img_file.show() elif indicator == "中国-非湖北新增确诊-趋势图": img_file = Image.open( BytesIO(requests.get(data_json["hbFeiHbTrendChart"][0]["imgUrl"]).content) ) img_file.show() elif indicator == "中国-湖北新增确诊-趋势图": img_file = Image.open( BytesIO(requests.get(data_json["hbFeiHbTrendChart"][1]["imgUrl"]).content) ) img_file.show() elif indicator == "中国-湖北现存确诊-趋势图": img_file = Image.open( BytesIO(requests.get(data_json["hbFeiHbTrendChart"][2]["imgUrl"]).content) ) img_file.show() elif indicator == "中国-非湖北现存确诊-趋势图": img_file = Image.open( BytesIO(requests.get(data_json["hbFeiHbTrendChart"][3]["imgUrl"]).content) ) img_file.show() elif indicator == "中国-治愈-死亡-趋势图": img_file = Image.open( BytesIO(requests.get(data_json["hbFeiHbTrendChart"][4]["imgUrl"]).content) ) img_file.show() elif indicator == "国外-国外新增确诊-趋势图": img_file = Image.open( BytesIO(requests.get(data_json["foreignTrendChart"][0]["imgUrl"]).content) ) img_file.show() elif indicator == "国外-国外累计确诊-趋势图": img_file = Image.open( BytesIO(requests.get(data_json["foreignTrendChart"][1]["imgUrl"]).content) ) img_file.show() elif indicator == "国外-国外死亡-趋势图": img_file = Image.open( BytesIO(requests.get(data_json["foreignTrendChart"][2]["imgUrl"]).content) ) img_file.show() elif indicator == "国外-重点国家新增确诊-趋势图": img_file = Image.open( BytesIO( requests.get( data_json["importantForeignTrendChart"][0]["imgUrl"] ).content ) ) img_file.show() elif indicator == "国外-日本新增确诊-趋势图": img_file = Image.open( BytesIO( requests.get( data_json["importantForeignTrendChart"][1]["imgUrl"] ).content ) ) img_file.show() elif indicator == "国外-意大利新增确诊-趋势图": img_file = Image.open( BytesIO( requests.get( data_json["importantForeignTrendChart"][2]["imgUrl"] ).content ) ) img_file.show() elif indicator == "国外-伊朗新增确诊-趋势图": img_file = Image.open( BytesIO( requests.get( data_json["importantForeignTrendChart"][3]["imgUrl"] ).content ) ) img_file.show() elif indicator == "国外-美国新增确诊-趋势图": img_file = Image.open( BytesIO( requests.get( data_json["importantForeignTrendChart"][4]["imgUrl"] ).content ) ) img_file.show() elif indicator == "国外-法国新增确诊-趋势图": img_file = Image.open( BytesIO( requests.get( data_json["importantForeignTrendChart"][5]["imgUrl"] ).content ) ) img_file.show() elif indicator == "国外-德国新增确诊-趋势图": img_file = Image.open( BytesIO( requests.get( data_json["importantForeignTrendChart"][6]["imgUrl"] ).content ) ) img_file.show() elif indicator == "国外-西班牙新增确诊-趋势图": img_file = Image.open( BytesIO( requests.get( data_json["importantForeignTrendChart"][7]["imgUrl"] ).content ) ) img_file.show() elif indicator == "国外-韩国新增确诊-趋势图": img_file = Image.open( BytesIO( requests.get( data_json["importantForeignTrendChart"][8]["imgUrl"] ).content ) ) img_file.show() else: try: data_text = str(soup.find("script", attrs={"id": "getAreaStat"})) data_text_json = json.loads( data_text[data_text.find("= [{") + 2 : data_text.rfind("catch") - 1] ) data_df = pd.DataFrame(data_text_json) sub_area = pd.DataFrame( data_df[data_df["provinceName"] == indicator]["cities"].values[0] ) if sub_area.empty: return print("暂无分区域数据") sub_area.columns = ["区域", "现在确诊人数", "确诊人数", "疑似人数", "治愈人数", "死亡人数", "id"] sub_area = sub_area[["区域", "现在确诊人数", "确诊人数", "疑似人数", "治愈人数", "死亡人数"]] return sub_area except IndexError as e: print("请输入省/市的全称, 如: 浙江省/上海市 等") def covid_baidu(indicator="湖北"): """ 百度-新型冠状病毒肺炎-疫情实时大数据报告 https://voice.baidu.com/act/newpneumonia/newpneumonia/?from=osari_pc_1 :param indicator: 看说明文档 :type indicator: str :return: 指定 indicator 的数据 :rtype: pandas.DataFrame """ url = "https://huiyan.baidu.com/openapi/v1/migration/rank" payload = { "type": "move", "ak": "kgD2HiDnLdUhwzd3CLuG5AWNfX3fhLYe", "adminType": "country", "name": "全国", } r = requests.get(url, params=payload) move_in_df = pd.DataFrame(r.json()["result"]["moveInList"]) move_out_df = pd.DataFrame(r.json()["result"]["moveOutList"]) url = "https://opendata.baidu.com/api.php" payload = { "query": "全国", "resource_id": "39258", "tn": "wisetpl", "format": "json", "cb": "jsonp_1580470773343_11183", } r = requests.get(url, params=payload) text_data = r.text json_data_news = json.loads( text_data.strip("/**/jsonp_1580470773343_11183(").rstrip(");") ) url = "https://opendata.baidu.com/data/inner" payload = { "tn": "reserved_all_res_tn", "dspName": "iphone", "from_sf": "1", "dsp": "iphone", "resource_id": "28565", "alr": "1", "query": "肺炎", "cb": "jsonp_1606895491198_93137", } r = requests.get(url, params=payload) json_data = json.loads(r.text[r.text.find("({") + 1 : r.text.rfind(");")]) spot_report = pd.DataFrame(json_data["Result"][0]["DisplayData"]["result"]["items"]) # domestic-city url = "https://voice.baidu.com/act/newpneumonia/newpneumonia/?from=osari_pc_1" r = requests.get(url) soup = BeautifulSoup(r.text, "lxml") data_json = demjson.decode(soup.find(attrs={"id": "captain-config"}).text) big_df = pd.DataFrame() for i, p in enumerate( jsonpath.jsonpath(data_json["component"][0]["caseList"], "$..area") ): temp_df = pd.DataFrame( jsonpath.jsonpath(data_json["component"][0]["caseList"], "$..subList")[i] ) temp_df["province"] = p big_df = big_df.append(temp_df, ignore_index=True) domestic_city_df = big_df domestic_province_df = pd.DataFrame(data_json["component"][0]["caseList"]).iloc[ :, :-2 ] big_df = pd.DataFrame() for i, p in enumerate( jsonpath.jsonpath(data_json["component"][0]["caseOutsideList"], "$..area") ): temp_df = pd.DataFrame( jsonpath.jsonpath( data_json["component"][0]["caseOutsideList"], "$..subList" )[i] ) temp_df["province"] = p big_df = big_df.append(temp_df, ignore_index=True) outside_city_df = big_df outside_country_df = pd.DataFrame( data_json["component"][0]["caseOutsideList"] ).iloc[:, :-1] big_df = pd.DataFrame() for i, p in enumerate( jsonpath.jsonpath(data_json["component"][0]["globalList"], "$..area") ): temp_df = pd.DataFrame( jsonpath.jsonpath(data_json["component"][0]["globalList"], "$..subList")[i] ) temp_df["province"] = p big_df = big_df.append(temp_df, ignore_index=True) global_country_df = big_df global_continent_df = pd.DataFrame(data_json["component"][0]["globalList"])[ ["area", "died", "crued", "confirmed", "confirmedRelative"] ] if indicator == "热门迁入地": return move_in_df elif indicator == "热门迁出地": return move_out_df elif indicator == "今日疫情热搜": return pd.DataFrame(json_data_news["data"][0]["list"][0]["item"]) elif indicator == "防疫知识热搜": return pd.DataFrame(json_data_news["data"][0]["list"][1]["item"]) elif indicator == "热搜谣言粉碎": return pd.DataFrame(json_data_news["data"][0]["list"][2]["item"]) elif indicator == "复工复课热搜": return pd.DataFrame(json_data_news["data"][0]["list"][3]["item"]) elif indicator == "热门人物榜": return pd.DataFrame(json_data_news["data"][0]["list"][4]["item"]) elif indicator == "历史疫情热搜": return pd.DataFrame(json_data_news["data"][0]["list"][5]["item"]) elif indicator == "搜索正能量榜": return pd.DataFrame(json_data_news["data"][0]["list"][6]["item"]) elif indicator == "游戏榜": return pd.DataFrame(json_data_news["data"][0]["list"][7]["item"]) elif indicator == "影视榜": return pd.DataFrame(json_data_news["data"][0]["list"][8]["item"]) elif indicator == "小说榜": return pd.DataFrame(json_data_news["data"][0]["list"][9]["item"]) elif indicator == "疫期飙升榜": return pd.DataFrame(json_data_news["data"][0]["list"][10]["item"]) elif indicator == "实时播报": return spot_report elif indicator == "中国分省份详情": return domestic_province_df elif indicator == "中国分城市详情": return domestic_city_df elif indicator == "国外分国详情": return outside_country_df elif indicator == "国外分城市详情": return outside_city_df elif indicator == "全球分洲详情": return global_continent_df elif indicator == "全球分洲国家详情": return global_country_df def covid_hist_city(city="武汉市"): """ 疫情历史数据 城市 https://github.com/canghailan/Wuhan-2019-nCoV 2019-12-01开始 :return: 具体城市的疫情数据 :rtype: pandas.DataFrame """ url = "https://raw.githubusercontent.com/canghailan/Wuhan-2019-nCoV/master/Wuhan-2019-nCoV.json" r = requests.get(url) data_json = r.json() data_df = pd.DataFrame(data_json) return data_df[data_df["city"] == city] def covid_hist_province(province="湖北省"): """ 疫情历史数据 省份 https://github.com/canghailan/Wuhan-2019-nCoV 2019-12-01开始 :return: 具体省份的疫情数据 :rtype: pandas.DataFrame """ url = "https://raw.githubusercontent.com/canghailan/Wuhan-2019-nCoV/master/Wuhan-2019-nCoV.json" r = requests.get(url) data_json = r.json() data_df = pd.DataFrame(data_json) return data_df[data_df["province"] == province] if __name__ == "__main__": # 历史数据 # epidemic_hist_city_df = covid_hist_province() # print(epidemic_hist_city_df) # epidemic_hist_province_df = covid_hist_province(province="湖北省") # print(epidemic_hist_province_df) covid_dxy_df = covid_163() print(covid_dxy_df)
justinzm/gopup
gopup/event/covid.py
covid.py
py
24,122
python
en
code
2,477
github-code
36
[ { "api_name": "time.time", "line_number": 32, "usage_type": "call" }, { "api_name": "requests.get", "line_number": 34, "usage_type": "call" }, { "api_name": "requests.get", "line_number": 38, "usage_type": "call" }, { "api_name": "bs4.BeautifulSoup", "line_num...
734577939
# https://randerson112358.medium.com/email-spam-detection-using-python-machine-learning-abe38c889855 # https://blog.textedly.com/spam-text-message-examples #Import libraries import numpy as numpy import pandas as panda import nltk from nltk.corpus import stopwords import string #Load the data # dataFrame = panda.read_csv('db/emails.csv') dataFrame = panda.read_csv('db/test.csv') # print(dataFrame.head(5)) #Print the shape (Get the number of rows and cols) result = dataFrame.shape # print (result) #Get the column names dataFrame.columns # print(dataFrame.columns) #Checking for duplicates and removing them dataFrame.drop_duplicates(inplace=True) # result = dataFrame.shape # print (result) #Show the number of missing (NAN, NaN, na) data for each column result = dataFrame.isnull().sum() # print (result) #Need to download stopwords # nltk.download('stopwords') # Tokenization (a list of tokens), will be used as the analyzer # 1.Punctuations are [!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~] # 2.Stop words in natural language processing, are useless words (data). def process_text(text): # 1 Remove punctuation text_without_punctuation = [char for char in text if char not in string.punctuation] text_without_punctuation = ''.join(text_without_punctuation) # 2 Remove Stop Words text_without_stop_words = [word for word in text_without_punctuation.split() if word.lower() not in stopwords.words('english')] # 3 Return a list of clean words return text_without_stop_words #Show the Tokenization (a list of tokens ) # print (dataFrame['text'].head().apply(process_text)) # Convert the text into a matrix of token counts. from sklearn.feature_extraction.text import CountVectorizer messages_bow = CountVectorizer(analyzer=process_text).fit_transform(dataFrame['text']) # print (messages_bow) #Split data into 80% training & 20% testing data sets from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(messages_bow, dataFrame['spam'], test_size=0.20, random_state=0) #Get the shape of messages_bow # messages_bow.shape # print (messages_bow.shape) # Create and train the Multinomial Naive Bayes classifier which is suitable for classification with discrete features (e.g., word counts for text classification) from sklearn.naive_bayes import MultinomialNB classifier = MultinomialNB() classifier.fit(X_train, y_train) # #Print the predictions # print(classifier.predict(X_train)) # # # print ('divider') # # Print the actual values # print(y_train.values) #Evaluate the model on the training data set from sklearn.metrics import classification_report,confusion_matrix, accuracy_score pred = classifier.predict(X_train) print(classification_report(y_train, pred)) print('Confusion Matrix: \n', confusion_matrix(y_train, pred)) print() print('Accuracy: ', accuracy_score(y_train, pred)) # #Print the predictions # print('Predicted value: ', classifier.predict(X_test)) # print ('divider') # #Print Actual Label # print('Actual value: ', y_test.values) #Evaluate the model on the test data set from sklearn.metrics import classification_report,confusion_matrix, accuracy_score pred = classifier.predict(X_test) print(classification_report(y_test ,pred )) print('Confusion Matrix: \n', confusion_matrix(y_test,pred)) print() print('Accuracy: ', accuracy_score(y_test,pred))
hatem-elsheref/email-spam-detection
main.py
main.py
py
3,394
python
en
code
0
github-code
36
[ { "api_name": "pandas.read_csv", "line_number": 17, "usage_type": "call" }, { "api_name": "string.punctuation", "line_number": 48, "usage_type": "attribute" }, { "api_name": "nltk.corpus.stopwords.words", "line_number": 52, "usage_type": "call" }, { "api_name": "n...
71873734503
import pygame as pg pg.init() COLOR_INACTIVE = pg.Color('lightskyblue3') COLOR_ACTIVE = pg.Color('dodgerblue2') FONT = pg.font.Font(None, 32) class InputBox: def __init__(self, x, y, w, h, text=''): self.rect = pg.Rect(x, y, w, h) self.color = COLOR_INACTIVE self.text = text self.saved_text = '' self.txt_surface = FONT.render(text, True, self.color) self.active = False def handle_event(self, event): if event.type == pg.MOUSEBUTTONDOWN: # If the user clicked on the input_box rect. if self.rect.collidepoint(event.pos): # Toggle the active variable. self.active = not self.active else: self.active = False # Change the current color of the input box. self.color = COLOR_ACTIVE if self.active else COLOR_INACTIVE if event.type == pg.KEYDOWN: if self.active: if event.key == pg.K_RETURN: #print(self.text) self.saved_text = self.text self.text = '' elif event.key == pg.K_BACKSPACE: self.text = self.text[:-1] else: self.text += event.unicode # Re-render the text. self.txt_surface = FONT.render(self.text, True, self.color) def update(self): # Resize the box if the text is too long. width = max(200, self.txt_surface.get_width()+10) self.rect.w = width def draw(self, screen): # Blit the text. screen.blit(self.txt_surface, (self.rect.x+5, self.rect.y+5)) # Blit the rect. pg.draw.rect(screen, self.color, self.rect, 2)
andreidumitrescu95/Python-Sorting-Algorithm-Visualizer
Helper/input_helper.py
input_helper.py
py
1,768
python
en
code
3
github-code
36
[ { "api_name": "pygame.init", "line_number": 3, "usage_type": "call" }, { "api_name": "pygame.Color", "line_number": 4, "usage_type": "call" }, { "api_name": "pygame.Color", "line_number": 5, "usage_type": "call" }, { "api_name": "pygame.font.Font", "line_numbe...
16823182799
''' author: Ramayan Mardi email: jaymisra.programmer@gmail.com ======================================= :about: This is script file of the game, :name: Flappy Game :version: 1.0.1 :requirement: python -> 3.10.0 or upper pygame -> 2.5.2 This script file controls, all the game related stuff i.e game state, rendering etc. :class Game: controls all the above stuff. :function main: entry point of the game. ''' import pygame from pygame.locals import * import os # import in-built module/component from component.utils import * from component.flappy import Flappy from component.pipes import ObsticalControler, MovingImage from component.ui import GameUI all = ("Game", "main") # absolute path of the current files ASSERT_PATH = "assert" DATA_FILENAME = "data.json" # Constants SCREEN_SIZE = (640, 480) TITLE = "Flappy Game" class Game: ''':class: control the whole game, handles game state, handles rendering, handles inputs, handles I/O in file. ''' FPS = None # keep information of the the frame-per-second of the game all = ("setup", "update", "new_game", "run") def __init__(self, window_size: tuple[int, int], window_title: str): self.screen = pygame.display.set_mode(window_size) pygame.display.set_caption(window_title) # clock for the game controls the game time. self.clock = pygame.time.Clock() # store all the game data need for processing game self._gamedata = dict() # decide whether the game updates or not self._allow_update = True # decide whether the game-over or not self._gameover = False # store the game state self._game_state = "Menu" def setup(self) -> None: ''':method: used to load all the assert for the game''' # fetching all the data from the file. self._gamedata = fetch(os.path.join(ASSERT_PATH, DATA_FILENAME)) # fetching all content for the about file. with open(os.path.join(ASSERT_PATH, "about.txt"), "r") as f: about_data = f.read() # load entity for the game. flappy_images = [] for i in range(3): flappy_images.append(load_image(self._gamedata["yellowbird"][i], convert=(False, True))) # position of the player is in middle of the screen. self.flappy = Flappy(flappy_images=flappy_images, weight=self._gamedata["entity"]["weight"], fly_speed=self._gamedata["entity"]["fly_speed"], pos=(320, 184)) # load all the obstical for the game. green_pipe = load_image(self._gamedata["game_objects"]["pipe-green"], convert=(True, False)) self.obstical = ObsticalControler(green_pipe) # load all the environment for the game. self.background_day_image = MovingImage(load_image( self._gamedata["game_objects"]["background-day"], SCREEN_SIZE, (True, False)), (0, 0)) self.base_image = MovingImage(load_image(self._gamedata["game_objects"]["base"], (640, 112), convert=(True, False)), (0, 368)) # load all sounds for the game. self.hit = pygame.mixer.Sound(self._gamedata["sfx"]["hit"]) self.wing = pygame.mixer.Sound(self._gamedata["sfx"]["wing"]) self.point = pygame.mixer.Sound(self._gamedata["sfx"]["point"]) self.die = pygame.mixer.Sound(self._gamedata["sfx"]["die"]) # load all the UI for the game. self.gameui = GameUI(self._gamedata) # text box for the game. self.textbox = TextBox(about_data, (0, 0), fontname=self._gamedata["font"]["gamefont"], fontcolor="#F5EBEB", fontsize=22) def update(self, delta_time: float, **kw) -> None: ''':method: used to update all the game related stuff. kw: contains all the inputs data. ''' # game code ------------------------------------- if self._game_state == "Menu": self.screen.fill("#383838") self.gameui.show_gamemenu(self.screen) self.gameui.show_about_btn(self.screen) ''' if continuation of the game is possible than the continue button gets highlighted. ''' self.gameui.gamemenu_boxlayout.chlidren[1].active = self._gamedata["continue"] # trigger the functions according to the button pressed. # `New Game` button pressed. if self.gameui.gamemenu_boxlayout.chlidren[0].pressed: self._game_state = "Start New Game" # game state changed ''' only the the assert would draw and nothing gets updated. ''' self._allow_update = False # `Continue` button pressed and continuation is possible. elif self.gameui.gamemenu_boxlayout.chlidren[1].pressed and self._gamedata["continue"]: # placing entity to the previous position. self.flappy.rect.topleft = self._gamedata["entity"]["pos"] # placing all the obstical for the game to the previous position. self.obstical._custom_pipe_pos(self._gamedata["pipes_pos"]["toppipe_list"], self._gamedata["pipes_pos"]["bottompipe_list"]) # placing all the environment for the game to the previous position. self.base_image.moving_images["img1"][1].topleft = self._gamedata["other_entity"]["base_pos"]["img1"] self.base_image.moving_images["img2"][1].topleft = self._gamedata["other_entity"]["base_pos"]["img2"] self.background_day_image.moving_images["img1"][1].topleft = self._gamedata["other_entity"]["background_pos"]["img1"] self.background_day_image.moving_images["img2"][1].topleft = self._gamedata["other_entity"]["background_pos"]["img2"] # set previous score. self.obstical.score = self._gamedata["score"] self.obstical.previous_score = self._gamedata["previous_score"] # start the game as previous self._game_state = "Start New Game" self._allow_update = False # `Setting` button is pressed. elif self.gameui.gamemenu_boxlayout.chlidren[2].pressed: ''' active the fps button which is using by the game. ''' if self._gamedata["fps"] == 30: self.gameui.settingmenu_boxlayout_1.chlidren[1].active = True else: self.gameui.settingmenu_boxlayout_1.chlidren[2].active = True # changed the game state ''' setting game state is required because in those blocks of settings. content a code that are updated in each frame when the game state is set to be "Settings" ''' self._game_state = "Settings" # `About` button is pressed elif self.gameui.about_btn.pressed: # same reason as like the `Setting` button. self._game_state = "About" # Gets updated in each frame when 'Setting' is activate. elif self._game_state == "Settings": self.screen.fill("#383838") self.gameui.show_settingmenu(self.screen) if kw["K_x"]: self._game_state = "Menu" # trigger functions when the button inside the setting menu get pressed. children = self.gameui.settingmenu_boxlayout_1.chlidren # TODO: make the below code more clean. # handles the state of fps buttons and game fps setting if children[2].pressed: children[2].active = True # updates the entire game fps self._gamedata["fps"] = 60 elif children[1].pressed: children[2].active = False # updates the entire game fps self._gamedata["fps"] = 30 children[1].active = not children[2].active # Gets updated in each frame when 'About' is activate. elif self._game_state == "About": # textbox content all the details for the game gets visible. self.textbox.blit(self.screen) if kw["K_x"]: self._game_state = "Menu" # Gets updated in each frame when 'Start New Game' is activate. elif self._game_state == "Start New Game": # called when the game is not over and ready to play. if not self._gameover: # drawing game background. self.background_day_image.blit(self.screen) self.flappy.blit(self.screen) # entity. # drawing game environment self.obstical.toppipe.draw(self.screen) self.obstical.bottompipe.draw(self.screen) self.base_image.blit(self.screen) self.gameui.show_number(self.screen, str(self.obstical.score)) # if allow update is True if self._allow_update: # update all the entities. self.background_day_image.move_image(self.screen, 50, delta_time, (-1, 0)) self.flappy.update(delta_time, **kw) self.obstical.update(delta_time) self.base_image.move_image(self.screen, 100, delta_time, (-1, 0)) # check collision of entity with the pipes if self.obstical.collision(self.flappy.collision_rect): self.hit.play() self._allow_update = False self._gameover = True # check collision of the entity with the base if self.base_image.collision(self.flappy.collision_rect): self.hit.play() self._allow_update = False self._gameover = True ''' play the sound when the entity flap it's wings. ''' if kw["K_SPACE"]: self.wing.play() ''' play sound when the point get incresed ''' if self.obstical.score > self.obstical.previous_score: self.point.play() self.obstical.previous_score = self.obstical.score else: # message show when update is False ''' This message is show during the game start, continuation of the game, and also during the game get paused. ''' self.gameui.start_message(self.screen) ''' below code handles the functionality of the pause mechanism by manipulating :attr self._allow_update: ''' if kw["K_p"] and self._allow_update: self._allow_update = False elif kw["K_p"] and not self._allow_update: self._allow_update = True elif kw["K_SPACE"] and not self._allow_update: self._allow_update = True ''' during the game is running, and the back button is pressed then the continuation data gets save, and continuation gets updated. ''' if kw["K_x"]: # store the entity data. self._gamedata["entity"] = { "weight": self.flappy.weight, "fly_speed": self.flappy.fly_speed, "pos": self.flappy.rect.center} # store the list of pipe position as a data of both top-pipes and bottom-pipes. self._gamedata["pipes_pos"] = { "toppipe_list": [pipe.rect.topleft for pipe in self.obstical.toppipe_list], "bottompipe_list": [pipe.rect.topleft for pipe in self.obstical.bottompipe_list]} # store the data of other entities i.e environment stuff. self._gamedata["other_entity"] = {"base_pos": { "img1": self.base_image.moving_images["img1"][1].topleft, "img2": self.base_image.moving_images["img2"][1].topleft }, "background_pos": { "img1": self.background_day_image.moving_images["img1"][1].topleft, "img2": self.background_day_image.moving_images["img2"][1].topleft} } # continuation get activate and score is preserved. self._gamedata["continue"] = True self._gamedata["score"] = self.obstical.score self._gamedata["previous_score"] = self.obstical.previous_score # back to the main menu self._game_state = "Menu" self.new_game() # called when the game gets over. elif self._gameover: # store high-score data if score is greater. if self.obstical.score > self._gamedata["highscore"]: self._gamedata["highscore"] = self.obstical.score # if high-score is greater than '0' it gets displayed on screen. if self._gamedata["highscore"]: self.gameui.show_highscore(self.screen, self._gamedata["highscore"]) # finally game-over message would be shown. self.gameui.gameover_message(self.screen) if kw["K_x"]: # back to the main menu self._game_state = "Menu" # reset the game over button self.new_game() self._gamedata["continue"] = False if kw["K_r"] and self._gameover: # resume the game and its state self.new_game() def new_game(self) -> None: ''':method: used to set-up new game.''' # reset all the values. self._allow_update = True self.obstical.generate_pipe() self.obstical.score = 0 self.obstical.previous_score = self.obstical.score self.flappy.rect.topleft = (480 // 2, 368 // 2) self._gameover = False def run(self) -> None: ''':method: main-loop of the game.''' # load all the assert for the game. self.setup() # running main-loop of the game running = True while running: # shortcut keys responsible for controlling inputs in the whole game. self.ShortCuts = {"K_SPACE": False, "K_r": False, "K_x": False, "K_p": False} # tracking the pygame events for event in pygame.event.get(): if event.type == pygame.QUIT or \ (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE): running = False # check key presses for the game if event.type == pygame.KEYDOWN: if event.key == pygame.K_SPACE: self.ShortCuts["K_SPACE"] = True if event.key == pygame.K_p: self.ShortCuts["K_p"] = True if event.key == pygame.K_r: self.ShortCuts["K_r"] = True if event.key == pygame.K_x: self.ShortCuts["K_x"] = True # delta time of the entire game. delta_time = self.clock.tick(self._gamedata["fps"]) / 1000.0 # update the whole game. self.update(delta_time, **self.ShortCuts) pygame.display.update() # quit the game save(os.path.join(ASSERT_PATH, DATA_FILENAME), self._gamedata) pygame.quit() exit() def main(): ''':function: entry point of the game.''' pygame.init() # initializing pygame Game(SCREEN_SIZE, TITLE).run() if __name__ == '__main__': main()
ramayanbindas/Flappy
flappy.py
flappy.py
py
16,627
python
en
code
1
github-code
36
[ { "api_name": "pygame.display.set_mode", "line_number": 46, "usage_type": "call" }, { "api_name": "pygame.display", "line_number": 46, "usage_type": "attribute" }, { "api_name": "pygame.display.set_caption", "line_number": 47, "usage_type": "call" }, { "api_name":...
30668828069
#!/usr/bin/env python3 # -*- coding: utf-8 -*- ## # @file genCode.py # @brief 根据json文件生成错误码头文件以及文档 # 头文件:./release/tuya_error_code.y # 文档 :./release/tuya_error_code.md # @author huatuo # @version 1.0.0 # @date 2021-09-27 # @note 支持参数1修改文件名,如:"base",使用默认命名则传递空:"" # @note 支持参数2传入指定模块列表,参数形式支持如下: # "compA compB compC" # "compA, compB, compC" import json import os import sys import shutil import codecs from datetime import date # 为了兼容python2和python3 if sys.version_info < (3, 0): reload(sys) sys.setdefaultencoding('utf8') file_head_template = """/******************************************************************* * File: tuya_###MODULE###_error_code.h * Author: auto generate by tuya code gen system * Date: ###DATE### * Description:this file defined the error code of tuya IOT * you can change it manully if needed * Copyright(C),2018-2020, tuya inc, www.tuya.comm *******************************************************************/ #ifndef TUYA_###MODULE_UPPER###_ERROR_CODE_H #define TUYA_###MODULE_UPPER###_ERROR_CODE_H #ifdef __cplusplus extern "C" { #endif """ file_foot_template = """ #define ERRCODE2STRING(errcode) #errcode #define TUYA_ERROR_STRING(errcode) ("[ErrCode: " ERRCODE2STRING(errcode) "]") #define TUYA_CHECK_NULL_RETURN(x, y)\\ do{\\ if (NULL == (x)){\\ TAL_PR_ERR("%s null", #x);\\ return (y);\\ }\\ }while(0) #define TUYA_CHECK_NULL_GOTO(x, label)\\ do{\\ if (NULL == (x)){\\ TAL_PR_ERR("%s null", #x);\\ goto label;\\ }\\ }while(0) #define TUYA_CALL_ERR_LOG(func)\\ do{\\ rt = (func);\\ if (OPRT_OK != (rt)){\\ TAL_PR_ERR("ret:%d", rt);\\ }\\ }while(0) #define TUYA_CALL_ERR_GOTO(func, label)\\ do{\\ rt = (func);\\ if (OPRT_OK != (rt)){\\ TAL_PR_ERR("ret:%d", rt);\\ goto label;\\ }\\ }while(0) #define TUYA_CALL_ERR_RETURN(func)\\ do{\\ rt = (func);\\ if (OPRT_OK != (rt)){\\ TAL_PR_ERR("ret:%d", rt);\\ return (rt);\\ }\\ }while(0) #define TUYA_CALL_ERR_RETURN_VAL(func, y)\\ do{\\ rt = (func);\\ if (OPRT_OK != (rt)){\\ TAL_PR_ERR("ret:%d", rt);\\ return (y);\\ }\\ }while(0) #define TUYA_CALL_ERR_LOG_SEQ_RETURN_VAL(func, y, point)\\ do{\\ rt = (func);\\ if (OPRT_OK != (rt)){\\ TAL_PR_ERR("ret:%d", rt);\\ INSERT_ERROR_LOG_SEQ_DEC((point), rt);\\ return (y);\\ }\\ }while(0) #define TUYA_CALL_ERR_LOG_SEQ_RETURN(func, point)\\ do{\\ rt = (func);\\ if (OPRT_OK != (rt)){\\ TAL_PR_ERR("ret:%d", rt);\\ INSERT_ERROR_LOG_SEQ_DEC((point), rt);\\ return (rt);\\ }\\ }while(0) #define TUYA_CALL_ERR_LOG_SEQ_GOTO(func, label)\\ do{\\ rt = (func);\\ if (OPRT_OK != (rt)){\\ TAL_PR_ERR("ret:%d", rt);\\ INSERT_ERROR_LOG_SEQ_DEC((point), rt);\\ goto label;\\ }\\ }while(0) #define TUYA_CALL_ERR_LOG_SEQ(func)\\ do{\\ rt = (func);\\ if (OPRT_OK != (rt)) {\\ TAL_PR_ERR("ret:%d", rt);\\ INSERT_ERROR_LOG_SEQ_DEC((point), rt);\\ }\\ }while(0) #define TUYA_CHECK_NULL_LOG_SEQ_RETURN(x, y, point)\\ do{\\ if (NULL == (x)){\\ TAL_PR_ERR("%s null", #x);\\ INSERT_ERROR_LOG_SEQ_DEC((point), y);\\ return (y);\\ }\\ }while(0) #define TUYA_CHECK_NULL_LOG_SEQ_GOTO(x, point, label)\\ do{\\ if (NULL == (x)){\\ TAL_PR_ERR("%s null", #x);\\ INSERT_ERROR_LOG_SEQ_NULL((point));\\ goto label;\\ }\\ }while(0) #ifdef __cplusplus } #endif #endif """ marco_head_template = """ /**************************************************************************** the error code marco define for module ###MODULE### ****************************************************************************/ """ class codegen(): def __init__(self, file_name="", enable_list=[]): self.output_path = "./release" self.modules = None # json self.file_name = "_" + file_name if len(file_name) else "" self.enable_list = enable_list print("file_name: ", file_name) print("enable_list: ", enable_list) def load_modules(self): module_file = codecs.open("./module.json", "r", "utf-8") self.modules = json.load(module_file, encoding='utf-8')['TuyaEmbeddedErrcode'] # 判断该组件错误码是否要被处理 def _modules_is_ok(self, module): if (len(module['errcode']) == 0): # print("module length is 0: ", module['name']) return False if (module['offset']>255) or (len(module['errcode'])>255): print("module over offset: ", module['name']) return False if self.enable_list == []: # 使能列表为空中则生成全部错误码 return True if module['name'] not in self.enable_list: # 模块没有被使能 return False return True def _gen_tbl_header(self, module_name, index, errcode_cnt): tbl_header = "\n## " + str(index) + ". module " + '`' + module_name + '`' + "\n\n" if errcode_cnt > 0: tbl_header = tbl_header + " No. | Name | Value | message" + "\n" tbl_header = tbl_header + "-------|-------|-------|--------" + "\n" return tbl_header def _gen_marco_item(self, module_name, errcode, offset, index): marco_item = "" marco_msg = "" marco_val = "" # prefix, global error code not have module_name marco_item_prefix = "OPRT"+ "_" + module_name.upper() + "_" marco_item_prefix_global = "OPRT" + "_" # only one key in error code for key in errcode: if module_name == "global": marco_item = marco_item_prefix_global + key else: marco_item = marco_item_prefix + key error_val = "-%#06x" % ((offset<<8) + index) marco_val = "(%s)" % error_val marco_msg = str(int(error_val, 16)) + ", " + errcode[key] return marco_item, marco_val, marco_msg def gen_md(self): file_content = "# Tuya Embedded software error code define" + "\n" module_index = 0 for module in self.modules: if not self._modules_is_ok(module): continue module_index += 1 file_content = file_content + self._gen_tbl_header(module["name"].upper(), module_index, len(module['errcode'])) index = 0 for errcode in module["errcode"]: marco_item, marco_val, marco_msg = self._gen_marco_item(module["name"], errcode, module["offset"], index) marco_item, marco_val, marco_msg = '`'+marco_item+'`', '`'+marco_val+'`', '`'+marco_msg+'`' index += 1 file_content = file_content + str(index) + "|" + marco_item + "|" + str(marco_val) + "|" + marco_msg file_content = file_content + "\n" # 生成.md文件 file_name = self.output_path + "/tuya_###MODULE###_error_code.md".replace("_###MODULE###", self.file_name) fd = open(file_name, 'w') fd.write(file_content) fd.close() # print("OK!") return def gen_marco(self, module): define = "#define" marco_content_head = marco_head_template.replace("###MODULE###", module["name"].upper()) marco_content_body = "" marco_item_prefix = "OPRT"+ "_" + module["name"].upper() + "_" index = 0 for errcode in module["errcode"]: marco_item, marco_val, marco_msg = self._gen_marco_item(module["name"], errcode, module["offset"], index) index += 1 # appened to the marco content marco_define = "%(d)s %(i)-50s %(v)+8s //%(m)s" % \ {'d':define, 'i':marco_item, 'v':marco_val, 'm':marco_msg} marco_content_body = marco_content_body + marco_define + '\n' max_marco_cnt = define + " " + marco_item_prefix + "ERRCODE_MAX_CNT" + " " + str(index) marco_content_body = marco_content_body + max_marco_cnt + '\n\n' marco_content = marco_content_head + marco_content_body return marco_content def gen_file(self): file_name = self.output_path +"/tuya_###MODULE###_error_code.h".replace("_###MODULE###", self.file_name) fd = open(file_name, 'w') # head file_head = file_head_template.replace("_###MODULE###", self.file_name) file_head = file_head.replace("_###MODULE_UPPER###", self.file_name.upper()) file_head = file_head.replace("###DATE###", str(date.today())) fd.write(file_head) # marco for module in self.modules: if not self._modules_is_ok(module): continue marco_str = self.gen_marco(module) fd.write(marco_str) fd.write(file_foot_template) fd.close() # print("OK!") return def dogen(self): # load all module description self.load_modules() # clean output path shutil.rmtree(self.output_path, ignore_errors=True) os.mkdir(self.output_path) # gen .h file # print("generate errcode .h file...") rt = self.gen_file() # gen .md file # print("generate errcode .md file...") self.gen_md() return if __name__ == '__main__': file_name = "" if len(sys.argv) > 1: file_name = sys.argv[1] enable_list = [] if len(sys.argv) > 2: enable_list_str = sys.argv[2] enable_list = enable_list_str.replace(',', ' ').split() gen = codegen(file_name, enable_list) gen.dogen()
tuya/tuyaos-development-board-t2
software/TuyaOS/scripts/error_code/genCode.py
genCode.py
py
9,935
python
en
code
2
github-code
36
[ { "api_name": "sys.version_info", "line_number": 27, "usage_type": "attribute" }, { "api_name": "sys.setdefaultencoding", "line_number": 29, "usage_type": "call" }, { "api_name": "codecs.open", "line_number": 198, "usage_type": "call" }, { "api_name": "json.load",...
43319941106
import torch import shutil import torch.optim as optim import numpy as np from torch import nn from torch.utils.data import DataLoader from torchvision import transforms from utils import AverageMeter, min_max_normalize, save_checkpoint from dataset import DAEDataset from model import ModelRec transform = transforms.Compose([transforms.Resize((300, 400)), transforms.ToTensor(), transforms.Lambda(lambda tensor: min_max_normalize(tensor))]) #working directory root = '/content' train_dataset = DAEDataset(root, mode='train', transform=transform) val_dataset = DAEDataset(root, mode='val', transform=transform) train_loader = DataLoader(dataset=train_dataset, batch_size=16, shuffle=True) val_loader = DataLoader(dataset=val_dataset, batch_size=2, shuffle=True) def train(train_loader, val_loader, model, optimizer, scheduler, criterion, num_epochs): best_loss = 1 for epoch in range(num_epochs): scheduler.step() train_loss = AverageMeter() val_loss = AverageMeter() model.train() for input, target in train_loader: input = input.cuda() target = target.cuda() optimizer.zero_grad() output = model(input) loss = criterion(output, target) train_loss.up_date(loss, input.size(0)) loss.backward() optimizer.step() print('Epoch {} of {}, Train_loss: {:.3f}'.format(epoch + 1, num_epochs, train_loss.avg)) model.eval() for input, target in val_loader: input = input.cuda() target = target.cuda() output = model(input) loss = criterion(output, target) val_loss.up_date(loss, input.size(0)) is_best = val_loss.avg < best_loss best_loss = min(val_loss.avg, best_loss) save_checkpoint({ 'epoch': epoch + 1, 'arch': experiment, 'state_dict': model.state_dict(), 'best_prec1': best_loss, 'optimizer': optimizer.state_dict(), }, is_best) print('Epoch {} of {}, Val_loss: {:.3f}'.format(epoch + 1, num_epochs, val_loss.avg)) def main(): torch.cuda.empty_cache() model = ModelRec().cuda() criterion = nn.BCELoss().cuda() optimizer = optim.Adam(model.parameters()) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size = 50, gamma=0.1) experiment = "DAE_Conv" num_epochs = 200 #checkpoint = torch.load('/content/checkpoint.pth.tar') #model.load_state_dict(checkpoint['state_dict']) train(train_loader, val_loader, model, optimizer, scheduler, criterion, num_epochs) if __name__ == '__main__': main()
ANnick2908/Data_Science_Projects
Denoising_Autoencoder/train.py
train.py
py
2,622
python
en
code
0
github-code
36
[ { "api_name": "torchvision.transforms.Compose", "line_number": 14, "usage_type": "call" }, { "api_name": "torchvision.transforms", "line_number": 14, "usage_type": "name" }, { "api_name": "torchvision.transforms.Resize", "line_number": 14, "usage_type": "call" }, { ...
7175166932
import matplotlib.pyplot as plt import cv2 import time start_time = time.time() # Titik awal x1 = 1 y1 = 7 # Titik Akhir x2 = 7 y2 = 15 dy = y2-y1 dx = x2-x1 step = dx if dx > dy else dy xInc = dx / step yInc = dy / step x=x1 y=y1 len=y2+x2 # x axis value list. x_number_list = [] x_number_list.append(x1) # y axis value list. y_number_list = [] y_number_list.append(y1) for i in range(1,len): x = x + xInc y = y + yInc xBulat=int(x) yBulat=int(y) x_number_list.append(xBulat) y_number_list.append(yBulat) if(x>=x2 and y>=y2): break end_time = time.time() delta_time = end_time-start_time print("Execution Time : ",delta_time," ms") from guppy import hpy h = hpy() print (h.heap()) # Draw point based on above x, y axis values. plt.scatter(x_number_list, y_number_list, s=10) # Set chart title. plt.title("Algoritma DDA") # Set x, y label text. plt.xlabel("X") plt.ylabel("Y") plt.show()
albirrkarim/dda-bresenham
dda.py
dda.py
py
966
python
en
code
0
github-code
36
[ { "api_name": "time.time", "line_number": 5, "usage_type": "call" }, { "api_name": "time.time", "line_number": 56, "usage_type": "call" }, { "api_name": "guppy.hpy", "line_number": 62, "usage_type": "call" }, { "api_name": "matplotlib.pyplot.scatter", "line_nu...
25544326890
import numpy as np import cvxpy as cp import matplotlib.pyplot as plt # double integrator # state is [x, vx, y, vy] T = 0.1 # timestep in seconds A = np.kron(np.eye(2),np.array([[1, T], [0, 1]])) B = np.kron(np.eye(2), np.array([[0.5*T**2], [T]])) class MPC: def __init__(self, prediction_horizon = 10, number_of_agents = 1, umax=100.0, obstacles=[]): self.N = prediction_horizon self.num_agents = number_of_agents self.obstacles = obstacles self.umax = umax self.solution_X = None self.solution_U = None # updated once solve is called return def plot(self): if self.solution_X is None: print("Please call solve first") return # plot the trajectories plt.figure() for i in range(self.num_agents): xs = [x[0] for x in self.solution_X[i]] ys = [x[2] for x in self.solution_X[i]] plt.plot(xs, ys, label=f"Agent {i}") plt.scatter([xs[0]], [ys[0]], marker="o", label=None) plt.scatter([xs[-1]], [ys[-1]], marker="x", label=None) plt.grid() plt.xlabel("x [m]") plt.ylabel("y [m]") plt.legend() # plt.show() plt.savefig("plot.png") def solve(self, x0, xT): # x0 is a list of initial conditions # xT is a list of target states ### constuct the MPC problem X = [[cp.Variable(4) for _ in range(self.N)] for i in range(self.num_agents)] # X[i][k] is the i-th agents state at time k U = [[cp.Variable(2) for _ in range(self.N-1)] for i in range(self.num_agents)] ### create constraints constraints = [] # initial and final conditions for i in range(self.num_agents): constraints.append(X[i][0] == x0[i]) constraints.append(X[i][-1] == xT[i]) # dynamics constraints for i in range(self.num_agents): for k in range(self.N-1): # at each timestep constraints.append(X[i][k+1] == A @ X[i][k] + B @ U[i][k]) # input constraints for i in range(self.num_agents): for k in range(self.N-1): constraints.append(cp.norm(U[i][k], "inf") <= self.umax) ### construct the objective function objective = sum(sum(cp.sum_squares(uk) for uk in Ui) for Ui in U) ### call a solver prob = cp.Problem(cp.Minimize(objective), constraints) prob.solve() ### save the trajectory self.solution_X = [[x.value for x in Xi] for Xi in X] self.solution_U = [[u.value for u in Ui] for Ui in U] ### return instantaneous control input return [u[0] for u in self.solution_U] mpc = MPC(number_of_agents=2) x0 = [np.array([0,0,0,0]), np.array([1,0,0,0])] xT = [np.array([1,0,1,0]), np.array([0,0,1,0])] mpc.solve(x0, xT) mpc.plot()
dev10110/interagent_mpc
no_obstacle_mpc.py
no_obstacle_mpc.py
py
2,690
python
en
code
0
github-code
36
[ { "api_name": "numpy.kron", "line_number": 11, "usage_type": "call" }, { "api_name": "numpy.eye", "line_number": 11, "usage_type": "call" }, { "api_name": "numpy.array", "line_number": 11, "usage_type": "call" }, { "api_name": "numpy.kron", "line_number": 12, ...
73857162662
import math from SynRD.publication import Publication, Finding, VisualFinding, TAXONOMY import numpy as np import pandas as pd import statsmodels as sm import statsmodels.stats.weightstats class Assari2019Baseline(Publication): DEFAULT_PAPER_ATTRIBUTES = { 'id': 'assari2019baseline', 'length_pages': 15, 'authors': ['Shervin Assari', 'Mohsen Bazargan'], 'journal': 'International Journal of Environmental Research and Public Health', 'year': 2019, 'current_citations': 9, #number of citations the paper has or how many people have cited it? 'base_dataframe_pickle': 'assari2019ability_dataframe.pickle' } RACE_MAP = { 1: "White", 2: "Black" } GENDER_MAP = { 1: "Man", 2: "Woman" } FILENAME = 'assari2019baseline' COLUMN_MAP = {"V2102": "Race", "V103": "Gender", "V2000": "Age", "V2007": "Education", "V2020": "Income", "V2637": "Smoking", "V2623": "BMI", "V2681": "HTN", "V13214": "Exercise", "V2203": "Depressive symptoms", "V915": "Health", "V1860": "Weight", "V15003": "Response pattern", "V836": "Stroke wave 1", "V4838": "Stroke wave 2", "V10225": "Stroke wave 3", "V12305": "Stroke wave 4", "V15944": "Stroke wave 5", "V12302": "Any stroke"} corr_df = None means = None dead = None def __init__(self, dataframe=None): super(Assari2019Baseline, self).__init__(dataframe=dataframe) self.FINDINGS = self.FINDINGS + [ Finding(self.finding_5_1, description="finding_5_1", text="""Blacks were younger, had higher number of chronic medical conditions at baseline in comparison to Whites.""", finding_type=TAXONOMY.MEAN_DIFFERENCE.value.BETWEEN_CLASS), Finding(self.finding_5_2, description="finding_5_2", text="""Relative to White people, Black individuals had also lower educational attainment (p < 0.05 for all).""", finding_type=TAXONOMY.MEAN_DIFFERENCE.value.BETWEEN_CLASS), Finding(self.finding_5_3, description="finding_5_3", text="""Blacks also reported worse self-rated health (SRH) than Whites (Table 1).""", finding_type=TAXONOMY.MEAN_DIFFERENCE.value.BETWEEN_CLASS), Finding(self.finding_5_6, description="finding_5_6", text="""Similarly, overall, people had 12.53 years of schooling at baseline (95%CI = 12.34-12.73).""", finding_type=TAXONOMY.DESCRIPTIVE_STATISTICS), Finding(self.finding_5_7, description="finding_5_7", text="""A comparison of racial groups showed higher educational attainment in Whites (12.69, 95%CI=12.48-12.90) than Blacks (11.37,95%CI = 10.90-11.84). Thus, on average, Whites had more than 1.3 years higher years [sic] of schooling than Blacks...""", finding_type=TAXONOMY.MEAN_DIFFERENCE.value.BETWEEN_CLASS), Finding(self.finding_5_8, description="finding_5_8", text="""Of the 177 that died, 121 were White (68.36%) and 56 were Black (31.64%).""", finding_type=TAXONOMY.DESCRIPTIVE_STATISTICS), Finding(self.finding_5_9, description="finding_5_9", text="""Of the 177 that died, 33 were obese (18.64%) and 144 were not obese (81.36%) at baseline.""", finding_type=TAXONOMY.DESCRIPTIVE_STATISTICS), Finding(self.finding_6_1, description="finding_6_1", text="""In bivariate association, race was not associated with death due to cerebrovascular (unadjusted HR for Blacks compared to Whites = 0.78, 95% CI = 0.55-1.11), suggesting that Whites and Blacks had similar risk of future cerebrovascular mortality over 25 years.""", finding_type=TAXONOMY.CORRELATION.value.PEARSON_CORRELATION), Finding(self.finding_6_2, description="finding_6_2", text="""In bivariate association, baseline obesity was not associated with future risk of cerebrovascular mortality (Unadjusted HR for Blacks compared to Whites = 0.84, 95% CI = 0.45-1.56), suggesting that Whites and Blacks had a similar risk of future cerebrovascular mortality over 25 years.""", finding_type=TAXONOMY.CORRELATION.value.PEARSON_CORRELATION), Finding(self.finding_6_3, description="finding_6_3", text="""Race (Black) was negatively associated with education and income""", finding_type=TAXONOMY.CORRELATION.value.PEARSON_CORRELATION), Finding(self.finding_6_4, description="finding_6_4", text="""[race (Black) was]... positively associated with depressive symptoms, hypertension, and obesity.""", finding_type=TAXONOMY.CORRELATION.value.PEARSON_CORRELATION), Finding(self.finding_6_5, description="finding_6_5", text="""Blacks more frequently smoked and less frequently exercised.""", finding_type=TAXONOMY.CORRELATION.value.PEARSON_CORRELATION), Finding(self.finding_6_6, description="finding_6_6", text="""Race was not associated with cerebrovascular death.""", finding_type=TAXONOMY.CORRELATION.value.PEARSON_CORRELATION), Finding(self.finding_6_7, description="finding_6_7", text="""Baseline obesity was associated with female gender and less education, income, smoking, and exercise.""", finding_type=TAXONOMY.CORRELATION.value.PEARSON_CORRELATION), Finding(self.finding_6_8, description="finding_6_8", text="""Obesity at baseline was associated with depressive symptoms and hypertension at baseline.""", finding_type=TAXONOMY.CORRELATION.value.PEARSON_CORRELATION), Finding(self.finding_6_9, description="finding_6_9", text="""Obesity at baseline was not associated with cerebrovascular death in the pooled sample (Table 2).""", finding_type=TAXONOMY.CORRELATION.value.PEARSON_CORRELATION) ] def _get_any_stroke_if_died(self, x): response_pattern = str(x["Response pattern"]) if "4" not in response_pattern: return 0 # patient did not die for i in range(5): if x[f"Stroke wave {i + 1}"] == 1: return 1 return 0 def _recreate_dataframe(self, filename='assari2019baseline_dataframe.pickle'): data = pd.read_csv('data/DS0001/04690-0001-Data.tsv', sep='\t') data = data[self.COLUMN_MAP.keys()] data.rename(columns=self.COLUMN_MAP, inplace=True) data = data[(data["Race"] == 1) | (data["Race"] == 2)] # 1 = white, 2 = Black data["Educational attainment"] = data.apply(lambda x: 1 if x["Education"] >= 12 else 0, axis=1) data["Obesity"] = data.apply(lambda x: 1 if x["BMI"] > 30 else 0, axis=1) data["Health binary"] = data.apply(lambda x: 1 if x["Health"] in [1, 2, 3] else 0, axis=1) data["Death to cerebrovascular disease"] = data.apply(lambda x: self._get_any_stroke_if_died(x), axis=1) data['Exercise'] = data['Exercise'].replace(-99.0, 0) data.drop(columns=['Stroke wave 1', 'Stroke wave 2', 'Stroke wave 3', 'Stroke wave 4', 'Stroke wave 5','Response pattern', 'Any stroke'], inplace=True) data.to_pickle(filename) return data def get_corr(self): if self.corr_df is None: corr_df = self.dataframe[['Race', 'Age', 'Gender', 'Education', 'Income', 'Smoking', 'Exercise', 'Depressive symptoms', 'HTN', 'Obesity', 'Death to cerebrovascular disease']] self.corr_df = corr_df.corr() return self.corr_df def get_race_pools_with_means(self): if self.means is None: black_pool = self.dataframe.loc[self.dataframe['Race'] == 2] white_pool = self.dataframe.loc[self.dataframe['Race'] == 1] black_pool_means, white_pool_means = self._get_adjusted_means(black_pool), self._get_adjusted_means(white_pool) means = pd.concat([black_pool_means, white_pool_means]) means['Race'] = ['Black', 'White'] means.set_index('Race', inplace=True) self.means = means return self.means def _get_adjusted_means(self, data_sample): temp_means = np.around(sm.stats.weightstats.DescrStatsW(data_sample, weights=data_sample['Weight']).mean, 4) return pd.DataFrame(data=[temp_means], columns=data_sample.columns) def get_dead(self): if self.dead is None: self.dead = self.dataframe.loc[self.dataframe['Death to cerebrovascular disease'] == 1] return self.dead def finding_5_1(self): """Blacks were younger, had higher number of chronic medical conditions at baseline in comparison to Whites.""" means = self.get_race_pools_with_means() black_age = means['Age']['Black'] white_age = means['Age']['White'] black_htn = means['HTN']['Black'] white_htn = means['HTN']['White'] values = [black_age, white_age, black_htn, white_htn] soft_finding = black_age < white_age and black_htn > white_htn return (values, soft_finding, values) def finding_5_2(self): """Relative to White people, Black individuals had also lower educational attainment (p < 0.05 for all).""" means = self.get_race_pools_with_means() black_education = means['Education']['Black'] white_education = means['Education']['White'] values = [black_education, white_education] soft_finding = black_education < white_education return (values, soft_finding, values) def finding_5_3(self): """Blacks also reported worse self-rated health (SRH) than Whites (Table 1).""" means = self.get_race_pools_with_means() black_health = means['Health']['Black'] white_health = means['Health']['White'] values = [black_health, white_health] soft_finding = black_health > white_health # note 1 = excellent, 5 = poor return (values, soft_finding, values) def finding_5_4(self): """The overall prevalence of DM was 5.73%, (95%CI = 4.80-6.82).""" pass def finding_5_5(self): """DM was more common in Blacks (9.22%, 95%CI = 7.75-10.95) than Whites (5.25%, 95%CI = 4.2.4-6.50).""" pass def finding_5_6(self): """Similarly, overall, people had 12.53 years of schooling at baseline (95%CI = 12.34-12.73).""" means = self._get_adjusted_means(self.dataframe) years_schooling = means['Education'][0] # soft_finding = round(years_schooling, 2) == 12.53 soft_finding = np.allclose(float(round(years_schooling, 2)), 12.53, atol=0.2) return ([years_schooling], soft_finding, [years_schooling]) def finding_5_7(self): """A comparison of racial groups showed higher educational attainment in Whites (12.69, 95%CI=12.48-12.90) than Blacks (11.37,95%CI = 10.90-11.84). Thus, on average, Whites had more than 1.3 years higher years [sic] of schooling than Blacks...""" means = self.get_race_pools_with_means() white_education = means['Education']['White'] black_education = means['Education']['Black'] values = [white_education, black_education] soft_finding = white_education > black_education + 1.2 return (values, soft_finding, values) def finding_5_8(self): """Of the 177 that died, 121 were White (68.36%) and 56 were Black (31.64%). Note that we were unable to reproduce this result.""" dead = self.get_dead() total = dead.shape[0] black_count = dead.loc[dead['Race'] == 2].shape[0] white_count = dead.loc[dead['Race'] == 1].shape[0] values = [total, white_count, black_count] # soft_finding = total == 177 and white_count == 121 and black_count == 56 white_percentage = float(white_count) / float(total) soft_finding = np.allclose(white_percentage, 0.68, atol=0.05) return (values, soft_finding, values) def finding_5_9(self): """Of the 177 that died, 33 were obese (18.64%) and 144 were not obese (81.36%) at baseline. Note that we were unable to reproduce this result.""" dead = self.get_dead() total = dead.shape[0] obese_count = dead.loc[dead['Obesity'] == 1].shape[0] not_obese_count = dead.loc[dead['Obesity'] == 0].shape[0] values = [total, obese_count, not_obese_count] # soft_finding = total == 177 and obese_count == 33 and not_obese_count == 144 obese_percentage = float(obese_count) / float(total) soft_finding = np.allclose(obese_percentage, 0.18, atol=0.05) return (values, soft_finding, values) def finding_6_1(self): """In bivariate association, race was not associated with death due to cerebrovascular (unadjusted HR for Blacks compared to Whites = 0.78, 95% CI = 0.55-1.11), suggesting that Whites and Blacks had similar risk of future cerebrovascular mortality over 25 years.""" corr_df = self.get_corr() corr_race_death = corr_df['Race'].loc['Death to cerebrovascular disease'] soft_finding = abs(corr_race_death) < 0.03 return ([corr_race_death], soft_finding, [corr_race_death]) def finding_6_2(self): """In bivariate association, baseline obesity was not associated with future risk of cerebrovascular mortality (Unadjusted HR for Blacks compared to Whites = 0.84, 95% CI = 0.45-1.56), suggesting that Whites and Blacks had a similar risk of future cerebrovascular mortality over 25 years.""" corr_df = self.get_corr() corr_obesity_death = corr_df['Obesity'].loc['Death to cerebrovascular disease'] soft_finding = abs(corr_obesity_death) < 0.03 return ([corr_obesity_death], soft_finding, [corr_obesity_death]) # TODO: check that race correlation is for Black def finding_6_3(self): """Race (Black) was negatively associated with education and income""" corr_df = self.get_corr() values = [corr_df['Race'].loc['Education'], corr_df['Race'].loc['Income']] soft_finding = all(x < 0 for x in values) return (values, soft_finding, values) # TODO: check that race correlation is for Black def finding_6_4(self): """[race (Black) was]... positively associated with depressive symptoms, hypertension, and obesity. Note that we were unable to reproduce this result.""" corr_df = self.get_corr() values = [corr_df['Race'].loc['Depressive symptoms'], corr_df['Race'].loc['HTN'], corr_df['Race'].loc['Obesity']] soft_finding = all(x > 0 for x in values) return (values, soft_finding, values) # TODO: check that race correlation is for Black def finding_6_5(self): """Blacks more frequently smoked and less frequently exercised.""" # implies positive correlation with smoking and negative with exercise corr_df = self.get_corr() values = [corr_df['Race'].loc['Smoking'], corr_df['Race'].loc['Exercise']] soft_finding = values[0] > 0 and values[1] < 0 return (values, soft_finding, values) # TODO: check that race correlation is for Black def finding_6_6(self): """Race was not associated with cerebrovascular death.""" # same as finding_6_1? corr_df = self.get_corr() corr_race_death = corr_df['Race'].loc['Death to cerebrovascular disease'] soft_finding = abs(corr_race_death) < 0.05 return ([corr_race_death], soft_finding, [corr_race_death]) # TODO: check that gender correlation is for female def finding_6_7(self): """Baseline obesity was associated with female gender and less education, income, smoking, and exercise.""" corr_df = self.get_corr() values = [corr_df['Obesity'].loc['Gender'], corr_df['Obesity'].loc['Education'], corr_df['Obesity'].loc['Income'], corr_df['Obesity'].loc['Smoking'], corr_df['Obesity'].loc['Exercise']] soft_finding = values[0] > 0 and all(x < 0 for x in values[1:]) return (values, soft_finding, values) def finding_6_8(self): """Obesity at baseline was associated with depressive symptoms and hypertension at baseline. Note that we were unable to reproduce this result.""" corr_df = self.get_corr() values = [corr_df['Obesity'].loc['Depressive symptoms'], corr_df['Obesity'].loc['HTN']] soft_finding = all(x > 0 for x in values) return (values, soft_finding, values) def finding_6_9(self): """Obesity at baseline was not associated with cerebrovascular death in the pooled sample (Table 2).""" # same as finding_6_2? corr_df = self.get_corr() corr_obesity_death = corr_df['Obesity'].loc['Death to cerebrovascular disease'] soft_finding = abs(corr_obesity_death) < 0.05 return ([corr_obesity_death], soft_finding, [corr_obesity_death]) def finding_6_10(self): """According to Model 1 in the pooled sample, baseline obesity did not predict cerebrovascular mortality (HR = 0.86, 0.49-1.51), independent of demographic, socioeconomic, health behaviors, and health factors at baseline.""" pass def finding_6_11(self): """According to Model 2, race interacted with baseline obesity on outcome (HR = 3.17, 1.09-9.21), suggesting a stronger association between baseline obesity and future risk for cerebrovascular deaths for Blacks, in comparison to Whites (Table 3).""" pass def finding_6_12(self): """As Model 3 shows, obesity did not predict the outcome in Whites (HR = 0.69, 0.31-1.53).""" pass def finding_6_13(self): """Model 4 shows that obesity predicts risk of cerebrovascular mortality for Blacks (HR = 2.51, 1.43-4.39) (Table 4).""" pass
DataResponsibly/SynRD
SynRD/papers/assari2019baseline.py
assari2019baseline.py
py
18,601
python
en
code
0
github-code
36
[ { "api_name": "SynRD.publication.Publication", "line_number": 10, "usage_type": "name" }, { "api_name": "SynRD.publication.Finding", "line_number": 43, "usage_type": "call" }, { "api_name": "SynRD.publication.TAXONOMY.MEAN_DIFFERENCE", "line_number": 46, "usage_type": "at...
25651671407
from typing import Iterable, Tuple, TypeVar, Callable, Any, List, Dict, Union import math import numpy as np import os.path import torch import torchaudio import torch.nn as nn from torch.utils.data import Dataset, DataLoader import warnings import pandas as pd import plots from utils import validate_audio # Useful references for the dataloading using iterable datasets: # https://medium.com/speechmatics/how-to-build-a-streaming-dataloader-with-pytorch-a66dd891d9dd # https://pytorch.org/docs/stable/data.html#torch.utils.data.IterableDataset # https://discuss.pytorch.org/t/example-for-torch-utils-data-iterabledataset/101175/13 # https://github.com/pytorch/pytorch/issues/13246#issuecomment-905703662 # https://discuss.pytorch.org/t/implementing-an-infinite-loop-dataset-dataloader-combo/35567/4 def interpolate(x, ratio): ''' Interpolate the x to have equal time steps as targets Input: x: (batch_size, time_steps, class_num) Output: out: (batch_size, time_steps*ratio, class_num) ''' (batch_size, time_steps, classes_num) = x.shape upsampled = x[:, :, None, :].repeat(1, 1, ratio, 1) upsampled = upsampled.reshape(batch_size, time_steps * ratio, classes_num) return upsampled def convert_output_format_polar_to_cartesian(in_dict): out_dict = {} for frame_cnt in in_dict.keys(): if frame_cnt not in out_dict: out_dict[frame_cnt] = [] for tmp_val in in_dict[frame_cnt]: ele_rad = tmp_val[3] * np.pi / 180. azi_rad = tmp_val[2] * np.pi / 180 tmp_label = np.cos(ele_rad) x = np.cos(azi_rad) * tmp_label y = np.sin(azi_rad) * tmp_label z = np.sin(ele_rad) out_dict[frame_cnt].append([tmp_val[0], tmp_val[1], x, y, z]) return out_dict def _read_audio(fname: str, directory_root: str, resampler: Union[torch.nn.Sequential, None], trim_seconds: int = -1) -> Tuple[torch.Tensor, int, float]: ''' trim_seconds = to limit how many seconds to load ''' fpath = os.path.join(directory_root, fname) metadata = torchaudio.info(fpath) num_frames = trim_seconds if trim_seconds == -1 else trim_seconds * metadata.sample_rate this_audio, fs = torchaudio.load(fpath, num_frames=num_frames) duration_seconds = this_audio.shape[-1] / fs assert validate_audio(this_audio), f'ERROR: {fname} audio is not valid.' if resampler is not None: this_audio = resampler(this_audio) return torch.tensor(this_audio, dtype=torch.float), fs, duration_seconds def _read_time_array(fname: str, directory_root: str) -> List: ''' Time arrays are the full list of events for a whole audio file. This is before any parsing''' fpath = os.path.join(directory_root, fname) fpath_csv = fpath.replace('mic', 'metadata').replace('foa', 'metadata').replace('wav', 'csv') this_time_array = pd.read_csv(fpath_csv, header=None).values return this_time_array def load_output_format_file(fname: str, directory_root: str): """ Adapted from the official baseline. Loads DCASE output format csv file and returns it in dictionary format :param _output_format_file: DCASE output format CSV :return: _output_dict: dictionary """ fpath = os.path.join(directory_root, fname) fpath_csv = fpath.replace('mic', 'metadata').replace('foa', 'metadata').replace('wav', 'csv') _output_dict = {} _fid = open(fpath_csv, 'r') # next(_fid) for _line in _fid: _words = _line.strip().split(',') _frame_ind = int(_words[0]) if _frame_ind not in _output_dict: _output_dict[_frame_ind] = [] if len(_words) == 5: #polar coordinates _output_dict[_frame_ind].append([int(_words[1]), int(_words[2]), float(_words[3]), float(_words[4])]) elif len(_words) == 6: # cartesian coordinates _output_dict[_frame_ind].append([int(_words[1]), int(_words[2]), float(_words[3]), float(_words[4]), float(_words[5])]) _fid.close() return _output_dict def _add_rotated_label_each_frame(label, time_array4frame_event, start_frame, rotation_pattern=None): """ From Sony """ event_class = time_array4frame_event[1] azi_rad = time_array4frame_event[3] / 180 * np.pi ele_rad = time_array4frame_event[4] / 180 * np.pi if rotation_pattern: azi_reflection, azi_rotation, ele_reflection = rotation_pattern else: azi_reflection, azi_rotation, ele_reflection = [1, 0, 1] # if None, no rotation rotated_azi_rad = azi_reflection * azi_rad + azi_rotation rotated_ele_rad = ele_reflection * ele_rad x_axis = 1 * np.cos(rotated_ele_rad) * np.cos(rotated_azi_rad) y_axis = 1 * np.cos(rotated_ele_rad) * np.sin(rotated_azi_rad) z_axis = 1 * np.sin(rotated_ele_rad) label[0, event_class, start_frame: start_frame + 10] = x_axis label[1, event_class, start_frame: start_frame + 10] = y_axis label[2, event_class, start_frame: start_frame + 10] = z_axis return (label) def _get_labels(time_array, start_sec, fs, chunk_size_audio, rotation_pattern=None, multi_track=False, num_classes=12): """ [frame number (int)], [active class index (int)], [track number index (int)], [azimuth (int)], [elevation (int)] Frame, class, and track enumeration begins at 0. Frames correspond to a temporal resolution of 100msec. Azimuth and elevation angles are given in degrees, rounded to the closest integer value, with azimuth and elevation being zero at the front, azimuth ϕ∈[−180∘,180∘], and elevation θ∈[−90∘,90∘]. Note that the azimuth angle is increasing counter-clockwise (ϕ=90∘ at the left). """ # This 100 is the sampling frequency of the labels # And the 10 for index_diff stuff, is the desired sampling frequency, to match the spectrograms. # So the spectrograms use a step_size = 240, with fs = 24000, which is 10 ms # Therefore, here they have 100 / 10 = 10 # My intuition is that a different step_size, would require to change this # TODO Is this really ok? Needs verification num_axis = 3 # X, Y, Z num_class = num_classes num_frame = round(chunk_size_audio / fs * 100) + 1 # Each frame == 100 ms (0.1 seconds) label = np.zeros([num_axis, num_class, num_frame]) end_sec = start_sec + chunk_size_audio / fs index_diff = int(math.modf(start_sec * 10)[0] * 10) # get second decimal place num_frame_wide = (int(np.ceil(end_sec * 10)) - int(np.floor(start_sec * 10)) + 1) * 10 # "+ 1" is buffer for numerical error, such as index_diff=3 and num_frame_wide=130 if not multi_track: label_wide = np.zeros([num_axis, num_class, num_frame_wide]) for index, frame in enumerate(range(int(np.floor(start_sec * 10)), int(np.ceil(end_sec * 10)))): time_array4frame = time_array[time_array[:, 0] == frame] if time_array4frame.shape == (1, 5): label_wide = _add_rotated_label_each_frame(label_wide, time_array4frame[0], index * 10, rotation_pattern) elif time_array4frame.shape == (2, 5): label_wide = _add_rotated_label_each_frame(label_wide, time_array4frame[0], index * 10, rotation_pattern) label_wide = _add_rotated_label_each_frame(label_wide, time_array4frame[1], index * 10, rotation_pattern) elif time_array4frame.shape == (3, 5): label_wide = _add_rotated_label_each_frame(label_wide, time_array4frame[0], index * 10, rotation_pattern) label_wide = _add_rotated_label_each_frame(label_wide, time_array4frame[1], index * 10, rotation_pattern) label_wide = _add_rotated_label_each_frame(label_wide, time_array4frame[2], index * 10, rotation_pattern) label = label_wide[:, :, index_diff: index_diff + num_frame] else: # TODO This is not ready label_wide_1 = np.zeros([num_axis, num_class, num_frame_wide]) label_wide_2 = np.zeros([num_axis, num_class, num_frame_wide]) label_wide_3 = np.zeros([num_axis, num_class, num_frame_wide]) for index, frame in enumerate(range(int(np.floor(start_sec * 10)), int(np.ceil(end_sec * 10)))): time_array4frame = time_array[time_array[:, 0] == frame] if time_array4frame.shape == (1, 5): label_wide_1 = _add_rotated_label_each_frame(label_wide_1, time_array4frame[0], index * 10, rotation_pattern) elif time_array4frame.shape == (2, 5): label_wide_1 = _add_rotated_label_each_frame(label_wide_1, time_array4frame[0], index * 10, rotation_pattern) label_wide_2 = _add_rotated_label_each_frame(label_wide_2, time_array4frame[1], index * 10, rotation_pattern) elif time_array4frame.shape == (3, 5): label_wide_1 = _add_rotated_label_each_frame(label_wide_1, time_array4frame[0], index * 10, rotation_pattern) label_wide_2 = _add_rotated_label_each_frame(label_wide_2, time_array4frame[1], index * 10, rotation_pattern) label_wide_3 = _add_rotated_label_each_frame(label_wide_3, time_array4frame[2], index * 10, rotation_pattern) label = np.stack(( label_wide_1[:, :, index_diff: index_diff + num_frame], label_wide_2[:, :, index_diff: index_diff + num_frame], label_wide_3[:, :, index_diff: index_diff + num_frame] )) return (label) def _read_fnames(directory_root: str, list_dataset: str) -> List: """Reads the fnames in the list_dataset. Each fname corresponds to a single wav file in the dataset. This to prepare the dataset, before loading any audio or labels.""" fnames = [] fpath = os.path.join(directory_root, 'list_dataset', list_dataset) for fname in pd.read_table(fpath, header=None).values.tolist(): if isinstance(fname, List): fname = fname[0] parent_dir = directory_root.split('/')[-1] + '/' if parent_dir in fname: fname = fname.replace(parent_dir, '') fnames.append(fname) return fnames def get_adpit_labels_for_file(_desc_file: Dict, _nb_label_frames: int, num_classes: int = 13) -> np.ndarray: """ ADAPATED from csl_feature_class from the baseline, with modifications to remove the dependcy to the class. Reads description file and returns classification based SED labels and regression based DOA labels for multi-ACCDOA with Auxiliary Duplicating Permutation Invariant Training (ADPIT) :param _desc_file: metadata description file :return: label_mat: of dimension [nb_frames, 6, 4(=act+XYZ), max_classes] """ se_label = np.zeros((_nb_label_frames, 6, num_classes)) # [nb_frames, 6, max_classes] x_label = np.zeros((_nb_label_frames, 6, num_classes)) y_label = np.zeros((_nb_label_frames, 6, num_classes)) z_label = np.zeros((_nb_label_frames, 6, num_classes)) for frame_ind, active_event_list in _desc_file.items(): if frame_ind < _nb_label_frames: active_event_list.sort(key=lambda x: x[0]) # sort for ov from the same class active_event_list_per_class = [] for i, active_event in enumerate(active_event_list): active_event_list_per_class.append(active_event) if i == len(active_event_list) - 1: # if the last if len(active_event_list_per_class) == 1: # if no ov from the same class # a0---- active_event_a0 = active_event_list_per_class[0] se_label[frame_ind, 0, active_event_a0[0]] = 1 x_label[frame_ind, 0, active_event_a0[0]] = active_event_a0[2] y_label[frame_ind, 0, active_event_a0[0]] = active_event_a0[3] z_label[frame_ind, 0, active_event_a0[0]] = active_event_a0[4] elif len(active_event_list_per_class) == 2: # if ov with 2 sources from the same class # --b0-- active_event_b0 = active_event_list_per_class[0] se_label[frame_ind, 1, active_event_b0[0]] = 1 x_label[frame_ind, 1, active_event_b0[0]] = active_event_b0[2] y_label[frame_ind, 1, active_event_b0[0]] = active_event_b0[3] z_label[frame_ind, 1, active_event_b0[0]] = active_event_b0[4] # --b1-- active_event_b1 = active_event_list_per_class[1] se_label[frame_ind, 2, active_event_b1[0]] = 1 x_label[frame_ind, 2, active_event_b1[0]] = active_event_b1[2] y_label[frame_ind, 2, active_event_b1[0]] = active_event_b1[3] z_label[frame_ind, 2, active_event_b1[0]] = active_event_b1[4] else: # if ov with more than 2 sources from the same class # ----c0 active_event_c0 = active_event_list_per_class[0] se_label[frame_ind, 3, active_event_c0[0]] = 1 x_label[frame_ind, 3, active_event_c0[0]] = active_event_c0[2] y_label[frame_ind, 3, active_event_c0[0]] = active_event_c0[3] z_label[frame_ind, 3, active_event_c0[0]] = active_event_c0[4] # ----c1 active_event_c1 = active_event_list_per_class[1] se_label[frame_ind, 4, active_event_c1[0]] = 1 x_label[frame_ind, 4, active_event_c1[0]] = active_event_c1[2] y_label[frame_ind, 4, active_event_c1[0]] = active_event_c1[3] z_label[frame_ind, 4, active_event_c1[0]] = active_event_c1[4] # ----c2 active_event_c2 = active_event_list_per_class[2] se_label[frame_ind, 5, active_event_c2[0]] = 1 x_label[frame_ind, 5, active_event_c2[0]] = active_event_c2[2] y_label[frame_ind, 5, active_event_c2[0]] = active_event_c2[3] z_label[frame_ind, 5, active_event_c2[0]] = active_event_c2[4] elif active_event[0] != active_event_list[i + 1][0]: # if the next is not the same class if len(active_event_list_per_class) == 1: # if no ov from the same class # a0---- active_event_a0 = active_event_list_per_class[0] se_label[frame_ind, 0, active_event_a0[0]] = 1 x_label[frame_ind, 0, active_event_a0[0]] = active_event_a0[2] y_label[frame_ind, 0, active_event_a0[0]] = active_event_a0[3] z_label[frame_ind, 0, active_event_a0[0]] = active_event_a0[4] elif len(active_event_list_per_class) == 2: # if ov with 2 sources from the same class # --b0-- active_event_b0 = active_event_list_per_class[0] se_label[frame_ind, 1, active_event_b0[0]] = 1 x_label[frame_ind, 1, active_event_b0[0]] = active_event_b0[2] y_label[frame_ind, 1, active_event_b0[0]] = active_event_b0[3] z_label[frame_ind, 1, active_event_b0[0]] = active_event_b0[4] # --b1-- active_event_b1 = active_event_list_per_class[1] se_label[frame_ind, 2, active_event_b1[0]] = 1 x_label[frame_ind, 2, active_event_b1[0]] = active_event_b1[2] y_label[frame_ind, 2, active_event_b1[0]] = active_event_b1[3] z_label[frame_ind, 2, active_event_b1[0]] = active_event_b1[4] else: # if ov with more than 2 sources from the same class # ----c0 active_event_c0 = active_event_list_per_class[0] se_label[frame_ind, 3, active_event_c0[0]] = 1 x_label[frame_ind, 3, active_event_c0[0]] = active_event_c0[2] y_label[frame_ind, 3, active_event_c0[0]] = active_event_c0[3] z_label[frame_ind, 3, active_event_c0[0]] = active_event_c0[4] # ----c1 active_event_c1 = active_event_list_per_class[1] se_label[frame_ind, 4, active_event_c1[0]] = 1 x_label[frame_ind, 4, active_event_c1[0]] = active_event_c1[2] y_label[frame_ind, 4, active_event_c1[0]] = active_event_c1[3] z_label[frame_ind, 4, active_event_c1[0]] = active_event_c1[4] # ----c2 active_event_c2 = active_event_list_per_class[2] se_label[frame_ind, 5, active_event_c2[0]] = 1 x_label[frame_ind, 5, active_event_c2[0]] = active_event_c2[2] y_label[frame_ind, 5, active_event_c2[0]] = active_event_c2[3] z_label[frame_ind, 5, active_event_c2[0]] = active_event_c2[4] active_event_list_per_class = [] label_mat = np.stack((se_label, x_label, y_label, z_label), axis=2) # [nb_frames, 6, 4(=act+XYZ), max_classes] return label_mat def get_labels_for_file(_desc_file, _nb_label_frames, num_classes: int = 13): """ ADAPTED from csl_feature_class from the baseline, with modifications to remove the dependcy to the class. Reads description file and returns classification based SED labels and regression based DOA labels :param _desc_file: metadata description file :return: label_mat: of dimension [nb_frames, 3*max_classes], max_classes each for x, y, z axis, """ # If using Hungarian net set default DOA value to a fixed value greater than 1 for all axis. We are choosing a fixed value of 10 # If not using Hungarian net use a deafult DOA, which is a unit vector. We are choosing (x, y, z) = (0, 0, 1) se_label = np.zeros((_nb_label_frames, num_classes)) x_label = np.zeros((_nb_label_frames, num_classes)) y_label = np.zeros((_nb_label_frames, num_classes)) z_label = np.zeros((_nb_label_frames, num_classes)) for frame_ind, active_event_list in _desc_file.items(): if frame_ind < _nb_label_frames: for active_event in active_event_list: se_label[frame_ind, active_event[0]] = 1 x_label[frame_ind, active_event[0]] = active_event[2] y_label[frame_ind, active_event[0]] = active_event[3] z_label[frame_ind, active_event[0]] = active_event[4] label_mat = np.concatenate((se_label, x_label, y_label, z_label), axis=1) # Refortmat as ACCDOA: output = torch.zeros(size=(3, num_classes, label_mat.shape[0])) output = output.numpy() for i in range(se_label.shape[-1]): # coso = se_label[:, i] > 0.5 # print(np.count_nonzero(coso)) ss = x_label[:, i] # bazinga = torch.stack([torch.from_numpy(x_label[:, i]´), y_label[:, i], z_label[:, i]], dim=0) bazinga = np.stack([x_label[:, i], y_label[:, i], z_label[:, i]]) output[:, i, :] = bazinga output = torch.from_numpy(output) norm = torch.linalg.vector_norm(output, ord=2, dim=-3) output = output / (norm + 1e-10) if torch.any(torch.isnan(output)): raise ValueError('ERROR: NaNs in the otuput labels') ####sampler = resampler(scale_factor=(10)) # TODO: This is incompatible with my test of backeds ####output = sampler(output) #output = interpolate(output.detatch().cpu().numpy(), 10) # TODO Not tested output = torch.repeat_interleave(output, 10, dim=-1) # TODO his is better, but still gets bad when the output size is large return output.numpy() return label_mat def _random_slice(audio: torch.Tensor, fs: int, chunk_size_audio: float, trim_wavs: int, clip_length_seconds: int = 60) \ -> Tuple[torch.Tensor, int]: """Returns a random slice of an audio and the corresponding starting time in sencods (useful to extract labels) """ # Now we do it in seconds if trim_wavs > 0: star_min_sec, start_max_sec = 2, math.floor(trim_wavs - (chunk_size_audio/fs + 2)) else: star_min_sec, start_max_sec = 0, math.floor(clip_length_seconds - chunk_size_audio/fs) if star_min_sec == start_max_sec: start_sec = star_min_sec else: start_sec = np.round(np.random.randint(star_min_sec, min((audio.shape[-1] - chunk_size_audio / 2) / fs, start_max_sec), 1))[0] start_index = start_sec * fs sliced_audio = audio[:, start_index: start_index + round(chunk_size_audio)] return sliced_audio, start_sec def _fixed_slice(audio: torch.Tensor, fs: int, chunk_size_audio: float) -> Tuple[torch.Tensor, int]: """Returns a fixed slice of an audio and its corresponding time array (label)""" start_sec = 5 # Hardcoded start at 5 seconds start_sample = start_sec * fs sliced_audio = audio[:, start_sample : int(start_sample + chunk_size_audio)] return sliced_audio, start_sec class resampler(nn.Sequential): def __init__(self, scale_factor=(1, 0.1)): super().__init__() self.scale_factor = scale_factor def forward(self, input): out = nn.functional.interpolate(input, scale_factor=self.scale_factor, mode='nearest') return out class InfiniteDataLoader(DataLoader): ''' DataLoader that keeps returning batches even after the dataset is exhausted. Useful when the __getitem__ of the dataset returns a random slice. Ref: https://gist.github.com/MFreidank/821cc87b012c53fade03b0c7aba13958 ''' def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Initialize an iterator over the dataset. self.dataset_iterator = super().__iter__() def __iter__(self): return self def __next__(self): try: batch = next(self.dataset_iterator) except StopIteration: # Dataset exhausted, use a new fresh iterator. self.dataset_iterator = super().__iter__() batch = next(self.dataset_iterator) return batch class DCASE_SELD_Dataset(Dataset): """Dataset for the DCASE SELD (Task3), supports version 2021 and 2022. This dataset first loads all the audio and labels to memory. In the getitem, it returns a slice, from wavs. This dataset is a map dataset, so each "epoch" will see each wav file only once. But the slice of each wav can be randomly selected. The audios and labels are stored in memory, but the slices are computed at runtime. Parameters: directory_root - Path to the the directory that contains 'foa_dev', 'metadata', 'list_dataset' list_dataset - File with the wav filenames that we want to load. Filesnames are relative to directory_root. trim_wavs - Trim wavs to this number of seconds when loading audio, so we load shorter wavs. chunk_size - Size of the chunkds (slices) returned in getitem. In samples. chuck_mode - {'full', 'fixed', 'random'} Where the getitem: - Full - Returns the full wav and labels. Useful for validation, and to compute statistics. - Fixed - Returns a slice at fixed start time of each wav. Useful for debugging. - Random - Returns a random slice each time. return_fname - Returns fname during the getitem multi_track - Enables multi-track ACCDOA for the labels ignore_labels - Use this to avoid returning labels in the get item. Useful for evaluation mode when there are no labels. labels_backend - Method to extract the labels. Currently baseline works best, as it is based on the official baseline code. """ def __init__(self, directory_root: str = './data/', list_dataset: str = 'dcase2021t3_foa_overfit_vrgpu.txt', trim_wavs: float = -1, # in seconds chunk_size: int = 48000, # in samples chunk_mode: str = 'fixed', return_fname: bool = False, multi_track: bool = False, num_classes: int = 13, ignore_labels: bool = False, labels_backend: str = 'sony', pad_labels: bool = True): super().__init__() self.directory_root = directory_root self.list_dataset = list_dataset # list of wav filenames , e.g. './data_dcase2021_task3/foa_dev/dev-val/fold5_room1_mix001.wav' self.chunk_size_audio = chunk_size self.chunk_mode = chunk_mode self.trim_wavs = trim_wavs # Trims the inputs wavs to the selected length in seconds self.return_fname = return_fname self.multi_track = multi_track self.num_classes = num_classes self.ignore_labels = ignore_labels # This is to avoid loading labels. Useful when doing evaluation. self.labels_backend = labels_backend # Code to use when extracting labels from CSVs. For multitrack, we need the baseline. {'sony', 'backend'} self.pad_labels = pad_labels # This is just to take into account that spectrograms will pad . Use when backend == baseline, and model = CRNN self.resampler = None if self.multi_track and self.labels_backend == 'sony': warnings.warn('WARNING: When using multi-track labels, we should use the baseline back end.') self._fnames = [] self._audios = {} self.durations = {} self._fs = {} # Per wav self._time_array_dict = {} # Per wav # Load full wavs and time_arrays to memory self._fnames = _read_fnames(directory_root=self.directory_root, list_dataset=self.list_dataset) for fname in self._fnames: audio, fs, duration = _read_audio(fname=fname, directory_root=self.directory_root, resampler=self.resampler, trim_seconds=self.trim_wavs) if not self.ignore_labels: if self.labels_backend == 'sony': time_array = _read_time_array(fname=fname, directory_root=self.directory_root) elif self.labels_backend == 'baseline': time_array = load_output_format_file(fname=fname, directory_root=self.directory_root) time_array = convert_output_format_polar_to_cartesian(time_array) if self.multi_track: time_array = get_adpit_labels_for_file(_desc_file=time_array, _nb_label_frames=math.ceil(duration * 100), num_classes=self.num_classes) else: time_array = get_labels_for_file(_desc_file=time_array, _nb_label_frames=math.ceil(duration * 10), num_classes=num_classes) self._time_array_dict[fname] = time_array self._audios[fname] = audio self._fs[fname] = fs self.durations[fname] = duration self.__validate() print(self) def __validate(self): assert len(self._fnames) == len(self._audios), 'Fnames and audios should have the same count' assert len(self._fnames) == len(self.durations), 'Fnames and durations should have the same count' assert len(self._fnames) == len(self._fs), 'Fnames and fs should have the same count' if not self.ignore_labels: assert len(self._fnames) == len(self._time_array_dict), 'Fnames and time_arrays should have the same count' def __len__(self): return len(self._fnames) def get_fnames(self): return self._fnames def __repr__(self): fmt_str = 'Dataset ' + self.__class__.__name__ + '\n' fmt_str += ' Number of unique wav files : {}\n'.format(len(self._fnames)) fmt_str += ' Root Location: {}\n'.format(self.directory_root) fmt_str += ' List of files: {}\n'.format(self.list_dataset) fmt_str += ' Chunk size: {}\n'.format(self.chunk_size_audio) fmt_str += ' Chunk Mode: {}\n'.format(self.chunk_mode) fmt_str += ' Trim audio: {}\n'.format(self.trim_wavs) fmt_str += ' Multi_track: {}\n'.format(self.multi_track) fmt_str += ' Ignore labels: {}\n'.format(self.ignore_labels) fmt_str += ' Labels Backend: {}\n'.format(self.labels_backend) return fmt_str def __getitem__(self, item): fname = self._fnames[item] audio = self._audios[fname] fs = self._fs[fname] duration = self.durations[fname] if not self.ignore_labels: time_array = self._time_array_dict[fname] else: time_array = None # Select a slice if self.chunk_mode == 'fixed': audio, start_sec = _fixed_slice(audio, fs, chunk_size_audio=self.chunk_size_audio) labels_duration = self.chunk_size_audio elif self.chunk_mode == 'random': audio, start_sec = _random_slice(audio, fs, chunk_size_audio=self.chunk_size_audio, trim_wavs=self.trim_wavs, clip_length_seconds=duration) labels_duration = self.chunk_size_audio elif self.chunk_mode == 'full': start_sec = 0 labels_duration = audio.shape[-1] if not self.ignore_labels: if self.labels_backend == 'sony': label = _get_labels(time_array, start_sec=start_sec, fs=fs, chunk_size_audio=labels_duration, rotation_pattern=None, multi_track=self.multi_track, num_classes=self.num_classes) elif self.labels_backend == 'custom': label = _get_labels_custom(time_array, start_sec=start_sec, fs=fs, chunk_size_audio=labels_duration, num_classes=self.num_classes) else: if not self.multi_track: start_frame = int(start_sec) * 10 end_frame = start_frame + round(labels_duration / fs * 100) if self.pad_labels: end_frame += 1 label = time_array[... , start_frame: end_frame] #raise NotImplementedError else: # TODO Hardcoded fs for laels at 100 ms start_frame = int(start_sec) * 10 end_frame = start_frame + math.ceil(labels_duration / fs * 100) + 1 #label = get_adpit_labels_for_file(_desc_file=time_array, _nb_label_frames=math.ceil(duration * 10), num_classes=self.num_classes) if end_frame > time_array.shape[0]: label = np.concatenate([time_array, np.zeros([end_frame - start_frame - time_array.shape[0], *time_array.shape[1:]])], axis=0) else: label = time_array[start_frame: end_frame, ...] if label.shape[0] < end_frame - start_frame: label = np.concatenate([label, np.zeros([end_frame - start_frame - label.shape[0], *label.shape[1:]])], axis=0) else: label = np.empty(1) if self.return_fname: return audio, torch.from_numpy(label.astype(np.float32)), fname else: return audio, torch.from_numpy(label.astype(np.float32)) def test_dataset_train_iteration(num_iters=100, batch_size=32, num_workers=4): # Here we test a typical train iteration, with the map dataset, but with infinite dataloader # The main idea is that we dont have epochs, but iterations. # This supports batching, and multiple workers # This looks OK, each "epoch" samples each wavs only once, but with infinite dataloader we itierate foreacher import matplotlib.pyplot as plt import seaborn as sns from itertools import islice dataset_train = DCASE_SELD_Dataset(directory_root='/m/triton/scratch/work/falconr1/sony/data_dcase2022', list_dataset='dcase2022_devtrain_all.txt', chunk_size=int(24000 * 1.27), chunk_mode='random', trim_wavs=-1, return_fname=True) loader_train = InfiniteDataLoader(dataset_train, batch_size=batch_size, num_workers=num_workers, shuffle=True, drop_last=True) # Begin iterating ctr = 0 ctr_fnames = {} for (x_batch, y_batch, fnames) in islice(loader_train, num_iters): if ctr < 5: print(f'iter: {ctr}') print(f'x_batch.shape: {x_batch.shape}') print(f'y_batch.shape: {y_batch.shape}') print(torch.mean(x_batch, dim=(-1, -2))) for fname in fnames: if fname in ctr_fnames: ctr_fnames[fname] += 1 else: ctr_fnames[fname] = 1 ctr += 1 if ctr > 5: break # Display counter of how many times each wav was sliced print(f'There are {len(ctr_fnames)} unique fnames.') f, ax = plt.subplots(figsize=(10, 15)) df = pd.DataFrame(list(ctr_fnames.items())) df.columns = ['fname', 'count'] sns.barplot(x="count", y="fname", data=df, label="count", color="b") sns.despine(left=True, bottom=True) plt.show() # Display wav durations f, ax = plt.subplots(figsize=(10, 15)) df = pd.DataFrame(list(dataset_train.durations.items())) df.columns = ['fname', 'duration'] sns.barplot(x="duration", y="fname", data=df, label="duration", color="b") sns.despine(left=True, bottom=True) plt.show() def _get_padders(chunk_size_seconds: float = 1.27, duration_seconds: float = 60.0, overlap: float = 0.5, audio_fs=24000, labels_fs=100): # Wavs: fs = audio_fs audio_full_size = fs * duration_seconds audio_chunk_size = round(fs * chunk_size_seconds) ###audio_pad_size = math.ceil(audio_full_size / audio_chunk_size) + math.ceil(audio_fs / labels_fs * 1) audio_pad_size = (math.ceil(audio_full_size / audio_chunk_size) * audio_chunk_size) - audio_full_size audio_padder = nn.ConstantPad1d(padding=(0, audio_pad_size), value=0.0) audio_step_size = math.floor(audio_chunk_size * overlap) # Labels: labels_fs = labels_fs # 100 --> 10 ms labels_full_size = labels_fs * duration_seconds labels_chunk_size = round(labels_fs * chunk_size_seconds) + 1 labels_pad_size = math.ceil(labels_full_size / labels_chunk_size) * labels_chunk_size - labels_full_size labels_padder = nn.ConstantPad2d(padding=(0, labels_pad_size, 0, 0), value=0.0) labels_step_size = math.ceil(labels_chunk_size * overlap) # Additional padding, in case the labels are shorter than the audio while True: #num_chunks_audio = math.ceil(audio_full_size / audio_chunk_size) #num_chunks_labels = math.ceil(labels_full_size / labels_chunk_size) num_chunks_audio = (audio_full_size + audio_pad_size) / audio_chunk_size num_chunks_labels = (labels_full_size + labels_pad_size) / labels_chunk_size if num_chunks_labels < num_chunks_audio: labels_pad_size += labels_chunk_size labels_padder = nn.ConstantPad2d(padding=(0, labels_pad_size, 0, 0), value=0.0) else: break audio_padding = {'padder': audio_padder, 'chunk_size': audio_chunk_size, 'hop_size': audio_step_size, 'full_size': audio_full_size} labels_padding = {'padder': labels_padder, 'chunk_size': labels_chunk_size, 'hop_size': labels_step_size, 'full_size': labels_full_size} return audio_padding, labels_padding def test_validation_clean(): # Here I am testing how to do the validation # The idea is that I want to iterate the full wavs, to get the predictions # So we get full length audio and labels from the dataset # Then we split into chunks manually # And iterate over wavs, using a dataloader for each one # Other useful function, torch.chunks, torch.split batch_size = 32 # This depends on GPU memory dataset = DCASE_SELD_Dataset(directory_root='/m/triton/scratch/work/falconr1/sony/data_dcase2022', list_dataset='dcase2022_devtrain_all.txt', chunk_mode='full', trim_wavs=-1, return_fname=True) spec = torchaudio.transforms.Spectrogram( n_fft=512, win_length=512, hop_length=240, ) all_labels = [] print(f'Iterating {len(dataset)} fnames in dataset.') for i in range(len(dataset)): # Analyze audio in full size audio, labels, fname = dataset[i] duration = dataset.durations[fname] all_labels.append(labels) print(f'Full audio:') print(audio.shape) print(f'Full spec:') print(spec(audio).shape) print(f'Full labels:') print(labels.shape) audio_padding, labels_padding = _get_padders(chunk_size_seconds=1.27, duration_seconds=math.floor(duration), overlap=1, # Other values e.g. 32/128 are ok, audio_fs=24000, labels_fs=100) # To process audio in GPU, split into chunks (that can be overlapped) audio = audio_padding['padder'](audio) audio_chunks = audio.unfold(dimension=1, size=audio_padding['chunk_size'], step=audio_padding['hop_size']).permute((1, 0, 2)) labels = labels_padding['padder'](labels) labels_chunks = labels.unfold(dimension=-1, size=labels_padding['chunk_size'], step=labels_padding['hop_size']).permute((2,0,1,3)) print(f'Full padded audio:') print(audio.shape) print(f'Full padded labels:') print(labels.shape) tmp = torch.utils.data.TensorDataset(audio_chunks, labels_chunks) loader = DataLoader(tmp, batch_size=batch_size, shuffle=False, drop_last=False) # Loader per wav to get batches for ctr, (audio, labels) in enumerate(loader): print(f'Processing batch {ctr}') outo = spec(audio) print(f'Audio shape = {audio.shape}') print(f'Spec shape = {outo.shape}') print(f'Labels shape = {labels.shape}') assert outo.shape[-1] == labels.shape[-1], \ 'Wrong shapes, the spectrogram and labels should have the same number of frames. Check paddings and step size' # Analysis of labels count_active_classes(all_labels) # breaks in wav 43 or 42 with overlap def test_validation_histograms(): # Here I am testing how to do the validation # The idea is that I want to iterate the full wavs, to get the predictions # So we get full length audio and labels from the dataset # Then we split into chunks manually # And iterate over wavs, using a dataloader for each one # Other useful function, torch.chunks, torch.split # Update 15.06.2022 # This is very useful to analyze tbe labels too. batch_size = 32 # This depends on GPU memory dataset = DCASE_SELD_Dataset(directory_root='/m/triton/scratch/work/falconr1/sony/data_dcase2022', list_dataset='dcase2022_devtrain_all.txt', chunk_mode='full', trim_wavs=-1, return_fname=True, num_classes=13, multi_track=False) spec = torchaudio.transforms.Spectrogram( n_fft=512, win_length=512, hop_length=240, ) all_labels = [] print(f'Iterating {len(dataset)} fnames in dataset.') for i in range(len(dataset)): # Analyze audio in full size audio, labels, fname = dataset[i] all_labels.append(labels) # Analysis of labels count_active_classes(all_labels) count_active_classes(all_labels[0:1]) def count_active_classes(all_labels: List, detection_threshold=0.5): """ Useful function to get the histogram of active classes per frames. Tip: Call it with only one label to get the plot. count_active_classes(all_labels[0:1]) """ import plots import matplotlib.pyplot as plt import seaborn as sns if len(all_labels) == 1: plots.plot_labels_cross_sections(all_labels[0], n_classes=list(range(all_labels[0].shape[-2])), plot_cartesian=True) plots.plot_labels(all_labels[0], n_classes=list(range(all_labels[0].shape[-2])), savefig=False, plot_cartesian=True) all_count_detections = {} for i in range(len(all_labels)): this_label = all_labels[i] vec_norms = torch.linalg.vector_norm(this_label, ord=2, dim=-3) for cls in range(this_label.shape[-2]): mask_detected_events = vec_norms[cls, :] > detection_threshold # detected events for this class # mask_detected_events = mask_detected_events.repeat(1, 3, 1) tmp_events = this_label[..., cls, mask_detected_events] # detections = tmp_events[mask_detected_events] this_count_detections = mask_detected_events.nonzero(as_tuple=False) if cls in all_count_detections.keys(): all_count_detections[cls] += len(this_count_detections) else: all_count_detections[cls] = len(this_count_detections) f, ax = plt.subplots(figsize=(10, 15)) df = pd.DataFrame(list(all_count_detections.items())) df.columns = ['class_id', 'count'] g = sns.barplot(x="class_id", y="count", data=df, label="class_id", color="b") sns.despine(left=False, bottom=False) #g.set_yscale("log") plt.show() def test_multi_track(): """ HEre I should test (manually): - chunk_mode: {'fixed', 'random', 'full'} - multi_track: True, False - labels_backend: {'sony', 'baseline'} -Update 21.07.2022 . Both backends look good for single ACCCDOA. At least they look the same. """ dataset = DCASE_SELD_Dataset(directory_root='/m/triton/scratch/work/falconr1/sony/data_dcase2022', list_dataset='dcase2022_devtrain_debug.txt', chunk_mode='full', # test sony and baseline chunk_size=30480, # 30480, 61200, 122640, 144000, with fixed chunk trim_wavs=-1, return_fname=True, num_classes=13, multi_track=False, # test sony and baseline labels_backend='baseline', # test sony and baseline pad_labels=True) # True only for spectrograms audio, labels, fname = dataset[0] if len(labels.shape) > 3: this_label = labels[2] else: this_label = labels plots.plot_labels(this_label) raise ValueError # This sitll fails when using full wavs, and backend baseline # the size is not correct, I guess it is cropping somewhere # note that the vanilla multitrack puts all the activity in the first track a = 1 def compare_backends(): wav_id = 42 dataset_sony = DCASE_SELD_Dataset(directory_root='/m/triton/scratch/work/falconr1/sony/data_dcase2022', list_dataset='dcase2022_devtrain_all.txt', chunk_mode='full', # test sony and baseline chunk_size=30480, trim_wavs=-1, return_fname=True, num_classes=13, multi_track=False, # test sony and baseline labels_backend='sony') # test sony and baseline dataset_baseline = DCASE_SELD_Dataset(directory_root='/m/triton/scratch/work/falconr1/sony/data_dcase2022', list_dataset='dcase2022_devtrain_all.txt', chunk_mode='full', # test sony and baseline chunk_size=30480, trim_wavs=-1, return_fname=True, num_classes=13, multi_track=False, # test sony and baseline labels_backend='baseline') # test sony and baseline audio_sony, labels_sony, fname_sony = dataset_sony[wav_id] audio_base, labels_base, fname_base = dataset_baseline[wav_id] class t_transform(nn.Sequential): def __init__(self, scale_factor=(1, 0.1)): super().__init__() print(f'helloo, {scale_factor}') self.scale_factor = scale_factor def forward(self, input): out = nn.functional.interpolate(input, scale_factor=self.scale_factor, mode='nearest') return out target_transform = t_transform() labels_sony_downsample = target_transform(labels_sony[None, ...])[0] labels_sony_padded = torch.zeros_like(labels_base) labels_sony_padded[:, :, 0:labels_sony_downsample.shape[-1]] = labels_sony_downsample error = (labels_base - labels_sony_padded) ** 2 print(f'Error = {error.sum()}') target_transform2 = t_transform(scale_factor=(1, 10)) labels_base_padded = target_transform2(labels_base[None, ...])[0] labels_sony_padded = torch.zeros_like(labels_base_padded) labels_sony_padded[:, :, 0:labels_sony.shape[-1]] = labels_sony error = (labels_base_padded - labels_sony_padded) ** 2 print(f'Error = {error.sum()}') def compare_backends_no_pad(): # Update 22.07.2022 # This seems ok for now, there is a slight mismatch between total length when using the backends # and there is a big problem with the osny backend, that it is chopping up some events # But for now I can work with the baselinme backend dataset_sony = DCASE_SELD_Dataset(directory_root='/m/triton/scratch/work/falconr1/sony/data_dcase2022', list_dataset='dcase2022_devtrain_all.txt', chunk_mode='full', # test sony and baseline chunk_size=30480, trim_wavs=-1, return_fname=True, num_classes=13, multi_track=False, # test sony and baseline labels_backend='sony') # test sony and baseline dataset_baseline = DCASE_SELD_Dataset(directory_root='/m/triton/scratch/work/falconr1/sony/data_dcase2022', list_dataset='dcase2022_devtrain_all.txt', chunk_mode='full', # test sony and baseline chunk_size=30480, trim_wavs=-1, return_fname=True, num_classes=13, multi_track=False, # test sony and baseline labels_backend='baseline') # test sony and baseline dataset_sony = DCASE_SELD_Dataset(directory_root='/m/triton/scratch/work/falconr1/sony/data_dcase2021_task3', list_dataset='dcase2021t3_foa_devtest.txt', chunk_mode='full', # test sony and baseline chunk_size=30480, trim_wavs=-1, return_fname=True, num_classes=13, multi_track=False, # test sony and baseline labels_backend='sony') # test sony and baseline dataset_baseline = DCASE_SELD_Dataset(directory_root='/m/triton/scratch/work/falconr1/sony/data_dcase2021_task3', list_dataset='dcase2021t3_foa_devtest.txt', chunk_mode='full', # test sony and baseline chunk_size=30480, trim_wavs=-1, return_fname=True, num_classes=13, multi_track=False, # test sony and baseline labels_backend='baseline') # test sony and baseline for wav_id in range(len(dataset_sony)): audio_sony, labels_sony, fname_sony = dataset_sony[wav_id] audio_base, labels_base, fname_base = dataset_baseline[wav_id] error = (labels_sony[..., 0:labels_base.shape[-1]] - labels_base) ** 2 print(f'Error = {error.sum()}') # Look at some of them wav_id = 1 audio_sony, labels_sony, fname_sony = dataset_sony[wav_id] audio_base, labels_base, fname_base = dataset_baseline[wav_id] plots.plot_labels_cross_sections(labels_sony, title='Sony') plots.plot_labels_cross_sections(labels_base, title='Baseline') if __name__ == '__main__': from utils import seed_everything seed_everything(1234, mode='balanced') test_multi_track() test_validation_histograms() test_dataset_train_iteration() # OK, I am happy test_validation_clean() # seems ok, except when using overlaps print('End of test')
rfalcon100/seld_dcase2022_ric
dataset/dcase_dataset.py
dcase_dataset.py
py
51,279
python
en
code
6
github-code
36
[ { "api_name": "numpy.pi", "line_number": 45, "usage_type": "attribute" }, { "api_name": "numpy.pi", "line_number": 46, "usage_type": "attribute" }, { "api_name": "numpy.cos", "line_number": 48, "usage_type": "call" }, { "api_name": "numpy.cos", "line_number": ...
70848016743
import sys import os.path import re import warnings from io import StringIO import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy as np import pandas as pd import statsmodels.api as sm from patsy import dmatrices import six class PerfData(): __DATETIME_HEADER__ = "start-time" __PERF_HEADER__ = __DATETIME_HEADER__ + ",response-time" def __init__(self, filename): self._filename = filename def data(self): with open(self._filename) as f: _data = f.read() return _data def headers(self): return self.__PERF_HEADER__ def datetime_headers(self): return self.__DATETIME_HEADER__ class PerformanceRunIterator(): def __init__(self, data, header): self._data = data self._current_index = 0 self._perf_header = header def __iter__(self): self._header_indexes = [m.start() for m in re.finditer(self._perf_header, self._data)] self._header_indexes.append(len(self._data)) return self def next(self): if self._current_index + 1 >= len(self._header_indexes): raise StopIteration line = self._line_at_index(self._current_index) self._current_index = self._current_index + 1 return line def _line_at_index(self, position): start = self._header_indexes[position] end = self._header_indexes[position + 1] line = self._data[start:end] return six.text_type(line) def read_throughput_data(filename): perf_data = PerfData(filename) df = pd.DataFrame() for run in PerformanceRunIterator(perf_data.data(), perf_data.headers()): run_dataframe = pd.read_csv(StringIO(run), parse_dates=[perf_data.datetime_headers()]) trimmed_section = trim_edges(run_dataframe) if len(trimmed_section) > 0: df = df.append(trimmed_section) # Reset the index because it is a Frankenstein of smaller indexes df = df.reset_index().drop('index', axis=1) return df def trim_edges(data): indexes = data.set_index('start-time').resample('1S').aggregate(lambda x: 1).index test_start_time = indexes[0] test_end_time = indexes[-1] return data[(data['start-time'] >= test_start_time) & (data['start-time'] <= test_end_time)] def process_throughput_data(data): buckets = data.set_index('start-time')['response-time'].resample('1S') throughput_data_set = buckets.aggregate({"throughput": lambda x: 0 if x.count() == 0 else x.count()}) throughput_data_set = throughput_data_set.reset_index() throughput_data_set = throughput_data_set.fillna(method='ffill') return buckets, throughput_data_set def generate_fit_line(data): y, x = dmatrices('latency ~ throughput', data=data, return_type='dataframe') fit = sm.GLM(y, x, family=sm.families.InverseGaussian(sm.families.links.inverse_squared)).fit() max_throughput = data['throughput'].max() min_throughput = data['throughput'].min() domain = np.arange(min_throughput, max_throughput) prediction_inputs = np.ones((len(domain), 2)) prediction_inputs[:, 1] = domain fit_line = fit.predict(prediction_inputs) return domain, fit_line, round(max_throughput) if __name__ == '__main__': matplotlib.style.use('ggplot') matplotlib.rcParams['figure.figsize'] = 9, 6 matplotlib.rcParams['legend.loc'] = 'best' matplotlib.rcParams['figure.dpi'] = 120 # We'll need these packages for plotting fit lines warnings.filterwarnings('ignore') performanceResultsFile = sys.argv[1] assert os.path.isfile(performanceResultsFile), 'Missing performance results file' compareDatasets = False if compareDatasets: assert os.path.isfile('old_perfResults.csv'), 'Missing old performance results file "old_perfResults.csv"' goData = read_throughput_data(performanceResultsFile) throughputBuckets, throughputData = process_throughput_data(goData) if compareDatasets: oldGoData = read_throughput_data('old_perfResults.csv') oldThroughputBuckets, oldThroughputData = process_throughput_data(oldGoData) goData['throughput'] = throughputBuckets.transform(len).reset_index()['response-time'] goData.columns = ['start-time', 'latency', 'throughput'] if compareDatasets: oldGoData['throughput'] = oldThroughputBuckets.transform(len).reset_index()['response-time'] oldGoData.columns = ['start-time', 'latency', 'throughput'] domain, goFitLine, xLimit = generate_fit_line(goData) if compareDatasets: oldDomain, oldGoFitLine, oldXLimit = generate_fit_line(oldGoData) fig, ax = plt.subplots() # Change the value of `c` to change the color. http://matplotlib.org/api/colors_api.html ax = goData.plot(ax=ax, kind='scatter', x='throughput', y='latency', c='b', marker='.', alpha=0.2) ax.plot(domain, goFitLine, c='b', lw=2) # Plot the fit line if compareDatasets: ax = oldGoData.plot(ax=ax, kind='scatter', x='throughput', y='latency', c='r', marker='.', alpha=0.2) ax.plot(oldDomain, oldGoFitLine, c='r', lw=2) # Plot the fit line ax.legend(['after', 'before']) # To update x & y axis range change the parameters in function set_(x/y)lim(lower_limit, uppper_limit) ax.autoscale(True) ax.autoscale_view(True, True, True) plt.xlabel('Throughput (requests/sec)') plt.ylabel('Latency (sec)') plt.title('Headroom plot', y=1.05) plt.plot() filenameForPlot = performanceResultsFile[:-4] + "Plot.png" plt.savefig(filenameForPlot) print ("saving graph to " + filenameForPlot)
cloudfoundry/credhub-perf-release
src/headroomplot/headroomplot.py
headroomplot.py
py
5,644
python
en
code
0
github-code
36
[ { "api_name": "matplotlib.use", "line_number": 8, "usage_type": "call" }, { "api_name": "re.finditer", "line_number": 43, "usage_type": "call" }, { "api_name": "six.text_type", "line_number": 60, "usage_type": "call" }, { "api_name": "pandas.DataFrame", "line_...
4100346454
from django.urls import path from myapp import views urlpatterns = [ path('project/create', views.projectcreate), path('project/show', views.projectshow), path('employee/create', views.employeecreate), path('employee/show', views.employeeshow), path('project/view/<int:id>', views.projectview, name='project/view'), path('project/addemployeetoproject/<int:id>', views.addemployeetoproject), path('project/delete/<int:id>', views.projectdelete), path('project/update/<int:id>', views.projectupdate), path('employee/view/<int:id>', views.employeeview, name='employee/view'), path('employee/asignprojecttoemployee/<int:id>', views.asignprojecttoemployee), path('employee/update/<int:id>', views.employeeupdate), path('employee/delete/<int:id>', views.employeedelete) ]
mudassir-cm/m2m
myapp/urls.py
urls.py
py
802
python
en
code
0
github-code
36
[ { "api_name": "django.urls.path", "line_number": 5, "usage_type": "call" }, { "api_name": "myapp.views.projectcreate", "line_number": 5, "usage_type": "attribute" }, { "api_name": "myapp.views", "line_number": 5, "usage_type": "name" }, { "api_name": "django.urls....
2927331509
import os import time from pathlib import Path template = "#include<bits/stdc++.h>\n#define mod 1000000007\n#define fastio ios_base::sync_with_stdio(false);cin.tie(NULL);cout.tie(NULL)\n#define pb push_back\n#define mp make_pair\n#define ll long long int\n#define fi first\n#define se second\n#define vll std::vector<ll> vl\n#define ld long double\nusing namespace std;\nint main()\n{\nfastio;\n\n\tll i,j,cnt=0,n;\n\nreturn 0;\n}" while True: fname = input('Enter the cpp filename without file extension\n') ext = int(input('Choose the option of required extension -\n1. cpp\n2. py\n')) cwd = os.getcwd() if ext==1: filename = fname+".cpp" my_file = Path(os.path.join(cwd,filename)) if my_file.is_file(): print ("File exist, Enter a valid File name") else: f = open(os.path.join(cwd,filename),"w+") f.write(template) print('File created !') f.close() time.sleep(2) elif ext==2: filename = fname+".py" my_file = Path(os.path.join(cwd,filename)) if my_file.is_file(): print ("File exist, Enter a valid File name") else: f = open(os.path.join(cwd,filename),"w+") print('File created !') f.close() time.sleep(2) else: print('Select Valid option for extension')
Aditya20kul/450-DSA-questions
createFile.py
createFile.py
py
1,387
python
en
code
2
github-code
36
[ { "api_name": "os.getcwd", "line_number": 8, "usage_type": "call" }, { "api_name": "pathlib.Path", "line_number": 11, "usage_type": "call" }, { "api_name": "os.path.join", "line_number": 11, "usage_type": "call" }, { "api_name": "os.path", "line_number": 11, ...
23495813882
import datetime import tkinter.messagebox as tm from tkinter import * import tkinter.ttk as ttk import sqlite3 from PIL import ImageTk,Image path="logo1.png" sum=0 def myfunction(event): canvas.configure(scrollregion=canvas.bbox("all"), width=1328, height=455) def Numberonly1(event): global sum item1 = (m1.get()) sum += item1 def Numberonly2(event): global sum item2 = (m2.get()) sum += item2 def Numberonly3(event): global sum item3 = (m3.get()) sum += item3 def Numberonly4(event): global sum item4 = (m4.get()) sum += item4 def Numberonly5(event): global sum item5 = (m5.get()) sum += item5 def Numberonly6(event): global sum item6 = (m6.get()) sum += item6 def Numberonly16(): global sum s.set(sum) avg = (sum / 6) answer.set(round(avg, 2)) def logged(): s = str(datetime.datetime.now()) tm.showinfo("Log", "Entry created successfully at " + s) def Database(): global conn, cursor conn = sqlite3.connect("Student.db") cursor = conn.cursor() cursor.execute( "CREATE TABLE IF NOT EXISTS STUDENT (SNO INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,FirstName TEXT, MiddleName TEXT, LastName TEXT, DateOfBirth INTEGER, MonthOfBirth TEXT, YearOfBirth INTEGER, Gender TEXT, EmailID TEXT, Contact1 TEXT, Contact2 TEXT, Hobbies TEXT, PermanentAddress TEXT, Pincode TEXT, Locality TEXT, City TEXT, PO TEXT, PS TEXT, Lifestyle TEXT, State TEXT, Country TEXT, ParentsName TEXT, ParentsAddress TEXT, ParentsOccupation TEXT, ParentsContact TEXT, ParentsEmail TEXT, GuardianName TEXT, GuardianAddress TEXT, GuardianOccupation TEXT, GuardianContact TEXT, GuardianEmail TEXT, Class12Stream TEXT, English INTEGER, Vernacular INTEGER, Mathematics INTEGER, Physics INTEGER, Chemistry INTEGER, ComputerScience INTEGER, Class12Percentage INTEGER, Class12Aggregate INTEGER)") conn.commit() def Errorcheck1(event): str1 = firstname.get() for i in range(len(str1)): p1 = str1[i] p2 = ord(p1) if ((p2 < 65) or ((p2 > 90) and (p2 < 97)) or (p2 > 122)): tm.showerror("Error", "Invalid First Name") tm.showinfo("my message", "Re-enter your first name") firstname.set("") break def Errorcheck2(event): str1 = middlename.get() for i in range(len(str1)): p1 = str1[i] p2 = ord(p1) if ((p2 < 65) or ((p2 > 90) and (p2 < 97)) or (p2 > 122)): tm.showerror("Error", "Invalid Middle Name") tm.showinfo("my message", "Re-enter your Middle name") middlename.set("") break def Errorcheck3(event): str1 = lastname.get() for i in range(len(str1)): p1 = str1[i] p2 = ord(p1) if ((p2 < 65) or ((p2 > 90) and (p2 < 97)) or (p2 > 122)): tm.showerror("Error", "Invalid Last Name") tm.showinfo("my message", "Re-enter your Middle name") lastname.set("") break def Errorcheck9(event): str1 = parent.get() for i in range(len(str1)): p1 = str1[i] p2 = ord(p1) if ((p2 < 65) or ((p2 > 90) and (p2 < 97)) or (p2 > 122) or (p2!=32)): tm.showerror("Error", "Invalid Parents Name") tm.showinfo("my message", "Re-enter your Parents name") parent.set("") break def Errorcheck10(event): str1 = guardian.get() for i in range(len(str1)): p1 = str1[i] p2 = ord(p1) if ((p2 < 65) or ((p2 > 90) and (p2 < 97)) or (p2 > 122) or (p2!=32)): tm.showerror("Error", "Invalid Guardian Name") tm.showinfo("my message", "Re-enter your Guardian name") guardian.set("") break def Errorcheck4(event): try: str1 = int(cl6a.get()) str2 = cl6b.get() str3 = int(cl6c.get()) if(type(str1) is not int or type(str3) is not int): raise ValueError("Error in type occured") if ((str3 % 400 == 0) or (str3 % 4 == 0 and str3 % 100 != 0)): pc = 1 else: pc = 0 if (((str1 > 28) and (str2 == "February") and (pc == 0))): tm.showerror("Error", "Invalid Date Entered") tm.showinfo("my message", "Re-enter your Date Of Birth") cl6a.set("") cl6b.set("") cl6c.set("") except ValueError as ve: print(ve) def Errorcheck5(event): str1 = phone1.get() if(len(str1)>10): tm.showerror("Error", "Invalid Contact Number Entered") tm.showinfo("my message", "Re-enter your Contact Number") phone1.set("") def Errorcheck7(event): str1 = phone3.get() if (len(str1) > 10): tm.showerror("Error", "Invalid Contact Number Entered") tm.showinfo("my message", "Re-enter your Contact Number") phone3.set("") def Errorcheck6(event): str1 = phone2.get() if (len(str1) > 10): tm.showerror("Error", "Invalid Contact Number Entered") tm.showinfo("my message", "Re-enter your Contact Number") phone2.set("") def Errorcheck8(event): str1 = phone4.get() if (len(str1) > 10): tm.showerror("Error", "Invalid Contact Number Entered") tm.showinfo("my message", "Re-enter your Contact Number") phone4.set("") def DatabaseAdd(): Database() global conn, cursor cursor.execute( "INSERT INTO STUDENT(FirstName, MiddleName, LastName, DateOfBirth, MonthOfBirth, YearOfBirth, Gender, EmailID, Contact1, Contact2, Hobbies, PermanentAddress, Pincode, Locality, City, PO, PS, Lifestyle, State, Country, ParentsName, ParentsAddress, ParentsOccupation, ParentsContact, ParentsEmail, GuardianName, GuardianAddress, GuardianOccupation, GuardianContact, GuardianEmail, Class12Stream, English, Vernacular, Mathematics, Physics, Chemistry, ComputerScience, Class12Percentage, Class12Aggregate) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", (str(firstname.get()), str(middlename.get()), str(lastname.get()), str(cl6a.get()), str(cl6b.get()), str(cl6c.get()), str(i1.get()), str(email1.get()), str(phone1.get()), str(phone2.get()), str(hobby.get()), str(address1.get()), str(pincode.get()), str(locality.get()), str(city.get()), str(po.get()), str(ps.get()), str(i2.get()), str(state.get()), str(cl7a.get()), str(parent.get()), str(parentaddress.get()), str(parentoccupation.get()), str(phone3.get()), str(email2.get()), str(guardian.get()), str(guardaddress.get()), str(guardoccupation.get()), str(phone4.get()), str(email3.get()), str(c31a.get()), str(m1.get()), str(m2.get()), str(m3.get()), str(m4.get()), str(m5.get()), str(m6.get()), str(answer.get()), str(s.get()))) conn.commit() firstname.set(""), middlename.set(""), lastname.set(""), cl6a.set(""), cl6b.set(""), cl6c.set(""), i1.set( ""), email1.set(""), phone1.set(""), phone2.set(""), hobby.set(""), address1.set(""), pincode.set( ""), locality.set(""), city.set(""), po.set(""), ps.set(""), i2.set(""), state.set(""), cl7a.set( ""), parent.set(""), parentaddress.set(""), parentoccupation.set(""), phone3.set(""), email2.set( ""), guardian.set(""), guardaddress.set(""), guardoccupation.set(""), phone4.set(""), email3.set( ""), c31a.set(""), m1.set("0"), m2.set("0"), m3.set("0"), m4.set("0"), m5.set("0"), m6.set("0"), answer.set( "0"), s.set("0") cursor.close() conn.close() logged() def DatabaseView(): Database() frame1 = Toplevel() global conn, cursor frame1.title("View Contents") w = 450 h = 75 ws = root.winfo_screenwidth() hs = root.winfo_screenheight() x = (ws / 2) - (w / 2) y = (hs / 2) - (h / 2) frame1.geometry('%dx%d+%d+%d' % (w, h, x, y)) def Viewall(): Database() ViewFrame = Toplevel() cursor.execute("SELECT * FROM STUDENT") conn.commit() fetch = cursor.fetchall() scrollbarx = Scrollbar(ViewFrame, orient=HORIZONTAL) scrollbary = Scrollbar(ViewFrame, orient=VERTICAL) tree = ttk.Treeview(ViewFrame, columns=( "SNo", "FirstName", "MiddleName", "LastName", "DateOfBirth", "MonthOfBirth", "YearOfBirth", "Gender", "EmailID", "Contact1", "Contact2", "Hobbies", "PermanentAddress", "Pincode", "Locality", "City", "PO", "PS", "Lifestyle", "State", "Country", "ParentsName", "ParentsAddress", "ParentsOccupation", "ParentsContact", "ParentsEmail", "GuardianName", "GuardianAddress", "GuardianOccupation", "GuardianContact", "GuardianEmail", "Class12Stream", "English", "Vernacular", "Mathematics", "Physics", "Chemistry", "ComputerScience", "Class12Percentage", "Class12Aggregate"), selectmode=EXTENDED, yscrollcommand=scrollbary.set, xscrollcommand=scrollbarx.set) scrollbary.config(command=tree.yview) scrollbary.pack(side=RIGHT, fill=Y) scrollbarx.config(command=tree.xview) scrollbarx.pack(side=BOTTOM, fill=X) tree.heading('SNo', text="SNo", anchor=CENTER), tree.heading('FirstName', text="FirstName", anchor=CENTER), tree.heading('MiddleName', text="MiddleName", anchor=CENTER), tree.heading( 'LastName', text="LastName", anchor=CENTER), tree.heading('DateOfBirth', text="DateOfBirth", anchor=CENTER), tree.heading('MonthOfBirth', text="MonthOfBirth", anchor=CENTER), tree.heading( 'YearOfBirth', text="YearOfBirth", anchor=CENTER), tree.heading('Gender', text="Gender", anchor=CENTER), tree.heading('EmailID', text="EmailID", anchor=CENTER), tree.heading( 'Contact1', text="Contact1", anchor=CENTER), tree.heading('Contact2', text="Contact2", anchor=CENTER), tree.heading('Hobbies', text="Hobbies", anchor=CENTER), tree.heading( 'PermanentAddress', text="PermanentAddress", anchor=CENTER), tree.heading('Pincode', text="Pincode", anchor=CENTER), tree.heading( 'Locality', text="Locality", anchor=CENTER), tree.heading('City', text="City", anchor=CENTER), tree.heading('PO', text="PO", anchor=CENTER), tree.heading( 'PS', text="PS", anchor=CENTER), tree.heading('Lifestyle', text="Lifestyle", anchor=CENTER), tree.heading('State', text="State", anchor=CENTER), tree.heading( 'Country', text="Country", anchor=CENTER), tree.heading('ParentsName', text="ParentsName", anchor=CENTER), tree.heading('ParentsAddress', text="ParentsAddress", anchor=CENTER), tree.heading( 'ParentsOccupation', text="ParentsOccupation", anchor=CENTER), tree.heading('ParentsContact', text="ParentsContact", anchor=CENTER), tree.heading( 'ParentsEmail', text="ParentsEmail", anchor=CENTER), tree.heading('GuardianName', text="GuardianName", anchor=CENTER), tree.heading( 'GuardianAddress', text="GuardianAddress", anchor=CENTER), tree.heading('GuardianOccupation', text="GuardianOccupation", anchor=CENTER), tree.heading( 'GuardianContact', text="GuardianContact", anchor=CENTER), tree.heading('GuardianEmail', text="GuardianEmail", anchor=CENTER), tree.heading( 'Class12Stream', text="Class12Stream", anchor=CENTER), tree.heading('English', text="English", anchor=CENTER), tree.heading( 'Vernacular', text="Vernacular", anchor=CENTER), tree.heading('Mathematics', text="Mathematics", anchor=CENTER), tree.heading('Physics', text="Physics", anchor=CENTER), tree.heading( 'Chemistry', text="Chemistry", anchor=CENTER), tree.heading('ComputerScience', text="ComputerScience", anchor=CENTER), tree.heading( 'Class12Percentage', text="Class12Percentage", anchor=CENTER), tree.heading('Class12Aggregate', text="Class12Aggregate", anchor=CENTER) tree.column('#0', stretch=NO, minwidth=0, width=0), tree.column('#1', stretch=NO, minwidth=0, width=140), tree.column('#2', stretch=NO, minwidth=0, width=140), tree.column( '#3', stretch=NO, minwidth=0, width=140), tree.column('#4', stretch=NO, minwidth=0, width=140), tree.column('#5', stretch=NO, minwidth=0, width=140), tree.column( '#6', stretch=NO, minwidth=0, width=140), tree.column('#7', stretch=NO, minwidth=0, width=150), tree.column('#8', stretch=NO, minwidth=0, width=150), tree.column( '#9', stretch=NO, minwidth=0, width=150), tree.column('#10', stretch=NO, minwidth=0, width=140), tree.column('#11', stretch=NO, minwidth=0, width=140), tree.column( '#12', stretch=NO, minwidth=0, width=140), tree.column('#13', stretch=NO, minwidth=0, width=140), tree.column('#14', stretch=NO, minwidth=0, width=140), tree.column( '#15', stretch=NO, minwidth=0, width=140), tree.column('#16', stretch=NO, minwidth=0, width=140), tree.column('#17', stretch=NO, minwidth=0, width=140), tree.column( '#18', stretch=NO, minwidth=0, width=140), tree.column('#19', stretch=NO, minwidth=0, width=140), tree.column('#20', stretch=NO, minwidth=0, width=140), tree.column( '#21', stretch=NO, minwidth=0, width=140), tree.column('#22', stretch=NO, minwidth=0, width=140), tree.column('#23', stretch=NO, minwidth=0, width=140), tree.column( '#24', stretch=NO, minwidth=0, width=140), tree.column('#25', stretch=NO, minwidth=0, width=140), tree.column('#26', stretch=NO, minwidth=0, width=140), tree.column( '#27', stretch=NO, minwidth=0, width=140), tree.column('#28', stretch=NO, minwidth=0, width=140), tree.column('#29', stretch=NO, minwidth=0, width=140), tree.column( '#30', stretch=NO, minwidth=0, width=140), tree.column('#31', stretch=NO, minwidth=0, width=140), tree.column('#32', stretch=NO, minwidth=0, width=140), tree.column( '#33', stretch=NO, minwidth=0, width=140), tree.column('#34', stretch=NO, minwidth=0, width=140), tree.column('#35', stretch=NO, minwidth=0, width=140), tree.column( '#36', stretch=NO, minwidth=0, width=140), tree.column('#37', stretch=NO, minwidth=0, width=140), tree.column('#38', stretch=NO, minwidth=0, width=140), tree.column( '#39', stretch=NO, minwidth=0, width=140) tree.pack() for data in fetch: tree.insert('', 'end', values=data) cursor.close() conn.close() def Search(): Database() ViewFrame = Toplevel() scrollbarx = Scrollbar(ViewFrame, orient=HORIZONTAL) scrollbary = Scrollbar(ViewFrame, orient=VERTICAL) tree = ttk.Treeview(ViewFrame, columns=( "SNo", "FirstName", "MiddleName", "LastName", "DateOfBirth", "MonthOfBirth", "YearOfBirth", "Gender", "EmailID", "Contact1", "Contact2", "Hobbies", "PermanentAddress", "Pincode", "Locality", "City", "P.O", "P.S", "Lifestyle", "State", "Country", "ParentsName", "ParentsAddress", "ParentsOccupation", "ParentsContact", "ParentsEmail", "GuardianName", "GuardianAddress", "GuardianOccupation", "GuardianContact", "GuardianEmail", "Class12Stream", "English", "Vernacular", "Mathematics", "Physics", "Chemistry", "ComputerScience", "Class12Percentage", "Class12Aggregate"), selectmode=EXTENDED, yscrollcommand=scrollbary.set, xscrollcommand=scrollbarx.set) scrollbary.config(command=tree.yview) scrollbary.pack(side=RIGHT, fill=Y) scrollbarx.config(command=tree.xview) scrollbarx.pack(side=BOTTOM, fill=X) tree.heading('SNo', text="SNo", anchor=CENTER), tree.heading('FirstName', text="FirstName", anchor=CENTER), tree.heading('MiddleName', text="MiddleName", anchor=CENTER), tree.heading( 'LastName', text="LastName", anchor=CENTER), tree.heading('DateOfBirth', text="DateOfBirth", anchor=CENTER), tree.heading('MonthOfBirth', text="MonthOfBirth", anchor=CENTER), tree.heading( 'YearOfBirth', text="YearOfBirth", anchor=CENTER), tree.heading('Gender', text="Gender", anchor=CENTER), tree.heading('EmailID', text="EmailID", anchor=CENTER), tree.heading( 'Contact1', text="Contact1", anchor=CENTER), tree.heading('Contact2', text="Contact2", anchor=CENTER), tree.heading('Hobbies', text="Hobbies", anchor=CENTER), tree.heading( 'PermanentAddress', text="PermanentAddress", anchor=CENTER), tree.heading('Pincode', text="Pincode", anchor=CENTER), tree.heading( 'Locality', text="Locality", anchor=CENTER), tree.heading('City', text="City", anchor=CENTER), tree.heading('P.O', text="P.O", anchor=CENTER), tree.heading( 'P.S', text="P.S", anchor=CENTER), tree.heading('Lifestyle', text="Lifestyle", anchor=CENTER), tree.heading('State', text="State", anchor=CENTER), tree.heading( 'Country', text="Country", anchor=CENTER), tree.heading('ParentsName', text="ParentsName", anchor=CENTER), tree.heading('ParentsAddress', text="ParentsAddress", anchor=CENTER), tree.heading( 'ParentsOccupation', text="ParentsOccupation", anchor=CENTER), tree.heading('ParentsContact', text="ParentsContact", anchor=CENTER), tree.heading( 'ParentsEmail', text="ParentsEmail", anchor=CENTER), tree.heading('GuardianName', text="GuardianName", anchor=CENTER), tree.heading( 'GuardianAddress', text="GuardianAddress", anchor=CENTER), tree.heading('GuardianOccupation', text="GuardianOccupation", anchor=CENTER), tree.heading( 'GuardianContact', text="GuardianContact", anchor=CENTER), tree.heading('GuardianEmail', text="GuardianEmail", anchor=CENTER), tree.heading( 'Class12Stream', text="Class12Stream", anchor=CENTER), tree.heading('English', text="English", anchor=CENTER), tree.heading( 'Vernacular', text="Vernacular", anchor=CENTER), tree.heading('Mathematics', text="Mathematics", anchor=CENTER), tree.heading('Physics', text="Physics", anchor=CENTER), tree.heading( 'Chemistry', text="Chemistry", anchor=CENTER), tree.heading('ComputerScience', text="ComputerScience", anchor=CENTER), tree.heading( 'Class12Percentage', text="Class12Percentage", anchor=CENTER), tree.heading('Class12Aggregate', text="Class12Aggregate", anchor=CENTER) tree.column('#0', stretch=NO, minwidth=0, width=0), tree.column('#1', stretch=NO, minwidth=0, width=140), tree.column('#2', stretch=NO, minwidth=0, width=140), tree.column( '#3', stretch=NO, minwidth=0, width=140), tree.column('#4', stretch=NO, minwidth=0, width=140), tree.column('#5', stretch=NO, minwidth=0, width=140), tree.column( '#6', stretch=NO, minwidth=0, width=140), tree.column('#7', stretch=NO, minwidth=0, width=140), tree.column('#8', stretch=NO, minwidth=0, width=140), tree.column( '#9', stretch=NO, minwidth=0, width=140), tree.column('#10', stretch=NO, minwidth=0, width=140), tree.column('#11', stretch=NO, minwidth=0, width=140), tree.column( '#12', stretch=NO, minwidth=0, width=140), tree.column('#13', stretch=NO, minwidth=0, width=140), tree.column('#14', stretch=NO, minwidth=0, width=140), tree.column( '#15', stretch=NO, minwidth=0, width=140), tree.column('#16', stretch=NO, minwidth=0, width=140), tree.column('#17', stretch=NO, minwidth=0, width=140), tree.column( '#18', stretch=NO, minwidth=0, width=140), tree.column('#19', stretch=NO, minwidth=0, width=140), tree.column('#20', stretch=NO, minwidth=0, width=140), tree.column( '#21', stretch=NO, minwidth=0, width=140), tree.column('#22', stretch=NO, minwidth=0, width=140), tree.column('#23', stretch=NO, minwidth=0, width=140), tree.column( '#24', stretch=NO, minwidth=0, width=140), tree.column('#25', stretch=NO, minwidth=0, width=140), tree.column('#26', stretch=NO, minwidth=0, width=140), tree.column( '#27', stretch=NO, minwidth=0, width=140), tree.column('#28', stretch=NO, minwidth=0, width=140), tree.column('#29', stretch=NO, minwidth=0, width=140), tree.column( '#30', stretch=NO, minwidth=0, width=140), tree.column('#31', stretch=NO, minwidth=0, width=140), tree.column('#32', stretch=NO, minwidth=0, width=140), tree.column( '#33', stretch=NO, minwidth=0, width=140), tree.column('#34', stretch=NO, minwidth=0, width=140), tree.column('#35', stretch=NO, minwidth=0, width=140), tree.column( '#36', stretch=NO, minwidth=0, width=140), tree.column('#37', stretch=NO, minwidth=0, width=140), tree.column('#38', stretch=NO, minwidth=0, width=140), tree.column( '#39', stretch=NO, minwidth=0, width=140) tree.pack() if st.get() != "": cursor.execute("SELECT * FROM `STUDENT` WHERE `FirstName` LIKE ?", ('%' + str(st.get()) + '%',)) conn.commit() fetch = cursor.fetchall() for data in fetch: tree.insert('', 'end', values=data) cursor.close() conn.close() def Reset(): st.set("") Button(frame1, text="View All", command=Viewall).pack(side=LEFT, anchor=N, padx=10, pady=10) Button(frame1, text="Search", command=Search).pack(side=LEFT, anchor=N, padx=10, pady=10) st = StringVar() Entry(frame1, textvariable=st, width=30).pack(side=LEFT, anchor=N, padx=5, pady=11) st.get() Button(frame1, text="Reset", command=Reset).pack(side=LEFT, anchor=N, padx=10, pady=10) frame1.resizable(0, 0) def Exit(): result = tm.askquestion('Inventory Management v1.3', 'Are you sure you want to exit?', icon="warning") if result == 'yes': root.destroy() cursor.close() conn.close() exit() def Chnglog(): tm.showinfo("Changelog", "v1.0 - Only GUI \nv1.1 - Accepts inputs and saves it to text file \nv1.2 - Open previous logs\nv1.3 - SQLite3 Database integration") def About(): tm.showinfo("About", "Python GUI Project\nInventory Management v1.3") root = Tk() sizex = 5000 sizey = 4000 posx = 100 posy = 100 root.wm_geometry("%dx%d+%d+%d" % (sizex, sizey, posx, posy)) # create a drop down menu menu = Menu(root) root.title("Student Admission System") root.config(menu=menu) # file menu file = Menu(menu, tearoff=0) menu.add_cascade(label="File", menu=file) file.add_command(label="Open File", command=DatabaseView) file.add_separator() file.add_command(label="Exit", command=Exit) # help menu hlp = Menu(menu, tearoff=0) menu.add_cascade(label="Help", menu=hlp) hlp.add_command(label="About", command=About) hlp.add_command(label="Changelog", command=Chnglog) myframe = Frame(root, relief=GROOVE, width=sizex, height=sizey, bd=1) myframe.place(x=5, y=200) img = ImageTk.PhotoImage(Image.open(path)) #The Label widget is a standard Tkinter widget used to display a text or image on the screen. panel = Label(root, image = img) #The Pack geometry manager packs widgets in rows or columns. panel.place(x=40,y=30) canvas = Canvas(myframe) frame = Frame(canvas, bg="light blue") myscrollbar1 = Scrollbar(myframe, orient="vertical", command=canvas.yview) canvas.configure(yscrollcommand=myscrollbar1.set) myscrollbar1.pack(side="right", fill="y") myscrollbar2 = Scrollbar(myframe, orient="horizontal", command=canvas.xview) canvas.configure(xscrollcommand=myscrollbar2.set) myscrollbar2.pack(side="bottom", fill="x") canvas.pack(side="left") canvas.create_window((0, 0), window=frame, anchor='nw') frame.bind("<Configure>", myfunction) # data() root.configure(bg="black") label = Label(root, text="APPLICATION FORM OF ST.THOMAS' COLLEGE ") label.config(font=("Baskerville Old Face", 34, 'bold'), fg="blue") label.place(x=220, y=75) l4s = Label(frame, text="Personal Details :-", bg="green", fg="yellow") l4s.config(font=("Courier", 25, 'bold')) l4s.grid(row=3, column=0, pady=50, sticky="W") l5 = Label(frame, text="First Name", bg="light blue") l5.config(font=("Aeril", 20)) l5.grid(row=5, column=0) firstname = StringVar() el5a = Entry(frame, width=30, textvariable=firstname) el5a.config(font=("Aeril", 15)) el5a.bind('<Leave>',Errorcheck1) el5a.grid(row=5, column=1, sticky="W", columnspan=2) l5b = Label(frame, text="Middle Name", bg="light blue") l5b.config(font=("Aeril", 20)) l5b.grid(row=6, column=0, pady=50) middlename = StringVar() el5b = Entry(frame, width=30, textvariable=middlename) el5b.config(font=("Aeril", 15)) el5b.bind('<Leave>',Errorcheck2) el5b.grid(row=6, column=1, sticky="W", columnspan=2) l5c = Label(frame, text="Last Name", bg="light blue") l5c.config(font=("Aeril", 20)) l5c.grid(row=7, column=0) lastname = StringVar() el5c = Entry(frame, width=30, textvariable=lastname) el5c.config(font=("Aeril", 15)) el5c.bind('<Leave>',Errorcheck3) el5c.grid(row=7, column=1, sticky="W", columnspan=2) # DATE OF BIRTH l6 = Label(frame, text="Date Of Birth", bg="light blue") l6.config(font=("Aerial", 20)) l6.grid(row=8, column=0, pady=50) cl6a = ttk.Combobox(frame, values=[i for i in range(1, 32)]) cl6a.set("DATE") cl6a.bind("<<ComboboxSelected>>") cl6a.config(font=("Aerial", 15), width='15') cl6a.grid(row=8, column=1, sticky="W", columnspan=2) cl6b = ttk.Combobox(frame, values=["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]) cl6b.set("MONTH") cl6b.bind("<<ComboboxSelected>>") cl6b.config(font=("Aerial", 15), width='15') cl6b.place(x=690, y=411) cl6c = ttk.Combobox(frame, values=[i for i in range(1975, 2019)]) cl6c.bind('<Leave>',Errorcheck4) cl6c.set("YEAR") cl6c.bind("<<ComboboxSelected>>") cl6c.config(font=("Aerial", 15), width='15') cl6c.place(x=920, y=411) # GENDER l7 = Label(frame, text="Gender", bg="light blue") l7.config(font=("Aerial", 20)) l7.grid(row=9, column=0) i1 = StringVar() r1 = Radiobutton(frame, text="Male", value="Male", variable=i1) r1.config(font=("Aerial", 15)) r1.grid(row=9, column=1, sticky="W", columnspan=2) r2 = Radiobutton(frame, text="Female", value="Female", variable=i1) r2.config(font=("Aerial", 15)) r2.place(x=610, y=496) r3 = Radiobutton(frame, text="Others", value="Others", variable=i1) r3.config(font=("Aerial", 15)) r3.place(x=780, y=496) # EMAIL l8 = Label(frame, text="Email ID", bg="light blue") l8.config(font=("Aerial", 20)) l8.grid(row=10, column=0, pady=40) email1 = StringVar() el8 = Entry(frame, width=50, textvariable=email1) el8.config(font=("Aeril", 15)) el8.grid(row=10, column=1, sticky="W") # CONTACT NO 1 l9 = Label(frame, text="Contact Number 1", bg="light blue") l9.config(font=("Aerial", 20)) l9.grid(row=11, column=0) phone1 = StringVar() el9 = Entry(frame, width=30, textvariable=phone1) el9.bind('<Leave>',Errorcheck5) el9.config(font=("Aeril", 15)) el9.grid(row=11, column=1, sticky="W") # CONTACT NO 2 l10 = Label(frame, text="Contact Number 2", bg="light blue") l10.config(font=("Aerial", 20)) l10.grid(row=12, column=0, pady=40) phone2 = StringVar() el10 = Entry(frame, width=30, textvariable=phone2) el10.config(font=("Aeril", 15)) el10.bind('<Leave>',Errorcheck6) el10.grid(row=12, column=1, sticky="W") # HOBBIES l11 = Label(frame, text="Hobbies", bg="light blue") l11.config(font=("Aerial", 20)) l11.grid(row=14, column=0) hobby = StringVar() el11 = Entry(frame, width=50, textvariable=hobby) el11.config(font=("Aeril", 15)) el11.grid(row=14, column=1, sticky="W") l4s = Label(frame, text="Residential Details :-", bg="green", fg="yellow") l4s.config(font=("Courier", 25, 'bold')) l4s.grid(row=15, column=0, pady=50) # PERMANENT ADDRESS l12 = Label(frame, text="Permanent Address", bg="light blue") l12.config(font=("Aerial", 20)) l12.grid(row=17, column=0) address1 = StringVar() el12 = Entry(frame, width=80, textvariable=address1) el12.config(font=("Aeril", 15)) el12.grid(row=17, column=1, sticky="W") # PINCODE l13 = Label(frame, text="Pincode", bg="light blue") l13.config(font=("Aerial", 20)) l13.grid(row=18, column=0, pady=50) pincode = StringVar() el13 = Entry(frame, width=15, textvariable=pincode) el13.config(font=("Aeril", 15)) el13.grid(row=18, column=1, sticky="W") # LOCALITY l14 = Label(frame, text="Locality", bg="light blue") l14.config(font=("Aerial", 20)) l14.grid(row=20, column=0) locality = StringVar() el14 = Entry(frame, width=20, textvariable=locality) el14.config(font=("Aeril", 15)) el14.grid(row=20, column=1, sticky="W") # CITY l12 = Label(frame, text="City", bg="light blue") l12.config(font=("Aerial", 20)) l12.grid(row=22, column=0, pady=45) city = StringVar() el12 = Entry(frame, width=20, textvariable=city) el12.config(font=("Aeril", 15)) el12.grid(row=22, column=1, sticky="W") # PO l13 = Label(frame, text="Post Office(P.O)", bg="light blue") l13.config(font=("Aerial", 20)) l13.grid(row=24, column=0) po = StringVar() el13 = Entry(frame, width=20, textvariable=po) el13.config(font=("Aeril", 15)) el13.place(x=462, y=1335) # PS l14 = Label(frame, text="Police Station(P.S)", bg="light blue") l14.config(font=("Aerial", 20)) l14.place(x=850, y=1330) ps = StringVar() el14 = Entry(frame, width=20, textvariable=ps) el14.config(font=("Aeril", 15)) el14.place(x=1182, y=1335) # Urban/rural l15 = Label(frame, text="Lifestyle", bg="light blue") l15.config(font=("Aerial", 20)) l15.grid(row=30, column=0, pady=45) i2 = StringVar() r1 = Radiobutton(frame, text="Urban", value="Urban", variable=i2) r1.config(font=("Aerial", 15)) r1.grid(row=30, column=1, sticky="W", columnspan=2) r2 = Radiobutton(frame, text="Rural", value="Rural", variable=i2) r2.config(font=("Aerial", 15)) r2.place(x=600, y=1413) # State l16 = Label(frame, text="State", bg="light blue") l16.config(font=("Aerial", 20,)) l16.grid(row=31, column=0, pady=10) state = StringVar() el16 = Entry(frame, width=20, textvariable=state) el16.config(font=("Aeril", 15)) el16.grid(row=31, column=1, sticky="W") # Country l17 = Label(frame, text="Country", bg="light blue") l17.config(font=("Aerial", 20,)) l17.grid(row=32, column=0, pady=30) cl7a = ttk.Combobox(frame, values=["Afghanistan", "Albania", "Algeria", "Andorra", "Angola", "Antigua & Barbuda", "Argentina", "Armenia", "Australia", "Austria", "Azerbaijan", "Bahamas", "Bahrai", "Bangladesh", "Barbados", "Belarus", "Belgium", "Belize", "Benin", "Bhutan", "Bolivia", "Bosnia and Herzegovina", "Botswana", "Brazil", "Brunei", "Bulgaria", "Burkina Faso", "Burundi", "Cabo Verde", "Cambodia", "Cameroon", "Canada", "Central African Republic (CAR)", "Chad", "Chile", "China", "Colombia", "Comoros", "Costa Rica", "Cote d'Ivoire", "Croatia", "Cuba", "Cyprus", "Czechia", "Denmark", "Djibouti", "Dominica", "Dominican Republic", "Ecuador", "Egypt", "El Salvador", "Equatorial Guinea", "Eritrea", "Estonia", "Eswatini (formerly Swaziland)", "Ethiopia", "Fiji", "Finland", "France", "Gabon", "Gambia", "Georgia", "Germany", "Ghana", "Greece", "Grenada", "Guatemala", "Guinea", "Guinea-Bissau", "Guyana", "Haiti", "Honduras", "Hungary", "Iceland", "India", "Indonesia", "Iran", "Iraq", "Ireland", "Israel", "Italy", "Jamaica", "Japan", "Jordan", "Kazakhstan", "Kenya", "Kiribati", "Kosovo", "Kuwait", "Kyrgyzstan", "Laos", "Latvia", "Lebanon", "Lesotho", "Liberia", "Libya", "Liechtenstein", "Lithuania", "Luxembourg", "Madagascar", "Malawi", "Malaysia", "Maldives", "Mali", "Malt", "Marshall Islands", "Mauritius", "Mexico", "Micronesia", "Moldova", "Monaco", "Mongolia", "Montenegro", "Morocco", "Mozambique", "Myanmar(formerly Burma)", "Namibia", "Nauru" , "Nepal", "Netherlands", "New Zealand", "Nicaragua", "Niger", "Nigeria", "North Korea", "North Macedonia (formerly Macedonia)", "Norway", "Oman", "Pakistan", "Palau", "Palestine", "Panama", "Papua New Guinea", "Paraguay", "Peru", "Philippines", "Poland", "Portugal", "Qatar", "Romania", "Russia", "Rwanda", "Saint Kitts and Nevis", "Saint Lucia", "Saint Vincent and the Grenadines", "Samoa", "San Marino", "Sao Tome and Principe", "Saudi Arabia", "Senegal", "Serbia", "Seychelles", "Sierra Leone", "Singapore", "Slovakia", "Slovenia", "Solomon Islands", "Somalia", "South Africa", "South Korea", "South Sudan", "Spain", "Sri Lanka", "Sudan", "Suriname", "Sweden", "Switzerland", "Syria", "Taiwan", "Tajikistan", "Tanzania", "Thailand", "Timor-Leste", "Togo,Tonga", "Trinidad and Tobago", "Tunisia", "Turkey", "Turkmenistan", "Tuvalu", "Uganda", "Ukraine", "United Arab Emirates (UAE)", "United Kingdom (UK)", "United States of America (USA)", "Uruguay", "Uzbekistan", "Vanuatu", "Vatican City (Holy See)", "Venezuela", "Vietnam", "Yemen", "Zambia", "Zimbabwe"]) cl7a.set("Select A Country") cl7a.bind("<<ComboboxSelected>>") cl7a.config(font=("Aerial", 15), width='30') cl7a.grid(row=32, column=1, sticky="W", columnspan=2) l18s = Label(frame, text="Parents' Details :-") l18s.config(font=("Courier", 25, 'bold')) l18s.grid(row=33, column=0, pady=40, sticky="W") # Parent's name l19 = Label(frame, text="Parents Name", bg="light blue") l19.config(font=("Aerial", 20,)) l19.grid(row=34, column=0, pady=10) parent = StringVar() el19 = Entry(frame, width=20, textvariable=parent) el19.config(font=("Aeril", 15)) el19.grid(row=34, column=1, sticky="W") # Parent's address l20 = Label(frame, text="Parents Address", bg="light blue") l20.config(font=("Aerial", 20,)) l20.grid(row=35, column=0, pady=30) parentaddress = StringVar() el20 = Entry(frame, width=30, textvariable=parentaddress) el20.config(font=("Aeril", 15)) el20.grid(row=35, column=1, sticky="W") # Parent's occupation l21 = Label(frame, text="Parents Occupation", bg="light blue") l21.config(font=("Aerial", 20,)) l21.grid(row=36, column=0, pady=20) parentoccupation = StringVar() el21 = Entry(frame, width=20, textvariable=parentoccupation) el21.config(font=("Aeril", 15)) el21.grid(row=36, column=1, sticky="W") # Parents' contact l22 = Label(frame, text="Parents Contact", bg="light blue") l22.config(font=("Aerial", 20,)) l22.grid(row=37, column=0, pady=20) phone3 = StringVar() el22 = Entry(frame, width=20, textvariable=phone3) el22.config(font=("Aeril", 15)) el22.bind('<Leave>',Errorcheck7) el22.grid(row=37, column=1, sticky="W") # Parents' email l23 = Label(frame, text="Parents Email", bg="light blue") l23.config(font=("Aerial", 20,)) l23.grid(row=38, column=0, pady=20) email2 = StringVar() el23 = Entry(frame, width=20, textvariable=email2) el23.config(font=("Aeril", 15)) el23.grid(row=38, column=1, sticky="W") # Guardian's Name l24 = Label(frame, text="Guardian Name", bg="light blue") l24.config(font=("Aerial", 20,)) l24.grid(row=39, column=0, pady=30) guardian = StringVar() el24 = Entry(frame, width=20, textvariable=guardian) el24.config(font=("Aeril", 15)) el24.grid(row=39, column=1, sticky="W") # Guardian's address l25 = Label(frame, text="Guardian Address", bg="light blue") l25.config(font=("Aerial", 20,)) l25.grid(row=40, column=0, pady=20) guardaddress = StringVar() el25 = Entry(frame, width=30, textvariable=guardaddress) el25.config(font=("Aeril", 15)) el25.grid(row=40, column=1, sticky="W") # Guardians' occupation l26 = Label(frame, text="Guardian Occupation", bg="light blue") l26.config(font=("Aerial", 20,)) l26.grid(row=41, column=0, pady=20) guardoccupation = StringVar() el26 = Entry(frame, width=20, textvariable=guardoccupation) el26.config(font=("Aeril", 15)) el26.grid(row=41, column=1, sticky="W") # Guardians' contact l27 = Label(frame, text="Guardian Contact", bg="light blue") l27.config(font=("Aerial", 20,)) l27.grid(row=42, column=0, pady=20) phone4 = StringVar() el27 = Entry(frame, width=20, textvariable=phone4) el27.config(font=("Aeril", 15)) el27.bind('<Leave>',Errorcheck8) el27.grid(row=42, column=1, sticky="W") # Guardians' email l28 = Label(frame, text="Guardian Email", bg="light blue") l28.config(font=("Aerial", 20,)) l28.grid(row=43, column=0, pady=20) email3 = StringVar() el28 = Entry(frame, width=20, textvariable=email3) el28.config(font=("Aeril", 15)) el28.grid(row=43, column=1, sticky="W") l29s = Label(frame, text="Educational Details :-", bg="green", fg="yellow") l29s.config(font=("Courier", 25, 'bold')) l29s.grid(row=44, column=0, pady=40, sticky="W") # Stream l30 = Label(frame, text="Class 12 Stream", bg="light blue") l30.config(font=("Aerial", 20,)) l30.grid(row=45, column=0, pady=30) c31a = ttk.Combobox(frame, values=["PMC-Comp", "PMC-B", "PMC-Comm", "PMC-Arts"]) c31a.set("Class 12 Stream") c31a.bind("<<ComboboxSelected>>") c31a.config(font=("Aerial", 15), width='20') c31a.grid(row=45, column=1, sticky="W", columnspan=2) l30 = Label(frame, text="According to selection , choose your subjects and enter corresponding marks", bg="light blue") l30.config(font=("Aerial", 20,)) l30.grid(row=46, column=0, pady=30, columnspan=3, sticky="W") m1 = IntVar() m2 = IntVar() m3 = IntVar() m4 = IntVar() m5 = IntVar() m6 = IntVar() answer = IntVar() s = IntVar() cb1 = Checkbutton(frame, text="English") cb1.config(font=("Aerial", 15)) cb1.grid(row=47, column=0) cben1 = Entry(frame, width=10, textvariable=m1) cben1.config(font=("Aeril", 15)) cben1.bind("<Leave>", Numberonly1) cben1.grid(row=47, column=1, sticky="W") cb2 = Checkbutton(frame, text="Vernacular") cb2.config(font=("Aerial", 15)) cb2.grid(row=48, column=0, pady=45) cben2 = Entry(frame, width=10, textvariable=m2) cben2.config(font=("Aeril", 15)) cben2.bind("<Leave>", Numberonly2) cben2.grid(row=48, column=1, sticky="W") cb3 = Checkbutton(frame, text="Mathematics") cb3.config(font=("Aerial", 15)) cb3.grid(row=49, column=0, pady=15) cben3 = Entry(frame, width=10, textvariable=m3) cben3.config(font=("Aeril", 15)) cben3.bind("<Leave>", Numberonly3) cben3.grid(row=49, column=1, sticky="W") cb4 = Checkbutton(frame, text="Physics") cb4.config(font=("Aerial", 15)) cb4.grid(row=50, column=0, pady=15) cben4 = Entry(frame, width=10, textvariable=m4) cben4.config(font=("Aeril", 15)) cben4.bind("<Leave>", Numberonly4) cben4.grid(row=50, column=1, sticky="W") cb5 = Checkbutton(frame, text="Chemistry") cb5.config(font=("Aerial", 15)) cb5.grid(row=51, column=0, pady=15) cben5 = Entry(frame, width=10, textvariable=m5) cben5.config(font=("Aeril", 15)) cben5.bind("<Leave>", Numberonly5) cben5.grid(row=51, column=1, sticky="W") cb6 = Checkbutton(frame, text="Computer_Science") cb6.config(font=("Aerial", 15)) cb6.grid(row=52, column=0, pady=15) cben6 = Entry(frame, width=10, textvariable=m6) cben6.config(font=("Aeril", 15)) cben6.bind("<Leave>", Numberonly6) cben6.grid(row=52, column=1, sticky="W") cal_but = Button(frame, padx=10, bd=7, font=("Helvetica", 10, "bold"), width=15, text="Calculate Percentage", bg="blue", command=Numberonly16).grid(row=62, column=0, pady=10) l35 = Label(frame, text="Class 12 percentage", bg="light blue") l35.config(font=("Aerial", 20,)) l35.grid(row=53, column=0, pady=30) cben16 = Entry(frame, width=10, textvariable=answer, state=DISABLED) cben16.config(font=("Aeril", 15)) cben16.grid(row=53, column=1, sticky="W") l36 = Label(frame, text="Class 12 Aggregate", bg="light blue") l36.config(font=("Aerial", 20,)) l36.grid(row=54, column=0, pady=30) cben17 = Entry(frame, width=10, textvariable=s, state=DISABLED) cben17.config(font=("Aeril", 15)) cben17.grid(row=54, column=1, sticky="W") cb19 = Checkbutton(frame, text="I agree to the terms and conditions and hereby declare to abide by the rules and regulations of the college", bg="light green") cb19.config(font=("Aerial", 15)) cb19.grid(row=66, column=0, pady=15, columnspan=3) sub_but = Button(frame, padx=10, bd=7, font=("Helvetica", 10, "bold"), width=15, text="SUBMIT", bg="red", fg="white", command=DatabaseAdd).grid(row=67, column=0, padx=100) # Thanks l16p = Label(frame, text="Thank", bg="light blue") l16p.config(font=("Aerial", 20)) l16p.grid(row=400, column=750) # You l15 = Label(frame, text="You", bg="light blue") l15.config(font=("Aerial", 20)) l15.grid(row=400, column=800) # So much l15 = Label(frame, text="So Much Visit Again", bg="light blue") l15.config(font=("Aerial", 20)) l15.grid(row=400, column=850) root.mainloop()
Adrish1999/Python-GUI
Reg_Form_Without_Login.py
Reg_Form_Without_Login.py
py
54,535
python
en
code
0
github-code
36
[ { "api_name": "datetime.datetime.now", "line_number": 61, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 61, "usage_type": "attribute" }, { "api_name": "tkinter.messagebox.showinfo", "line_number": 62, "usage_type": "call" }, { "api_name...
19107043906
# -*- coding: utf-8 -*- """ Created on Wed Oct 3 10:54:37 2018 @author: Administrator """ from greedyBandit import GreedyBandit from decGreedyBandit import DecGreedyBandit from optiBandit import OptiBandit from UCBBandit import UCBBandit from TSBandit import TSBandit import matplotlib.pyplot as plt times = 10000 #Simulation times greedy = GreedyBandit(0,0) #Greedy Bandit dec_greedy = DecGreedyBandit(0) #Decreasing Greedy Bandit opti = OptiBandit(0) #Optimistic Initial Value Bandit ucb = UCBBandit(0) #UCB Bandit ts = TSBandit(0) #TS Bandit #Function Arguments m1=0.9 m2=0.5 m3=0.2 e=0.1 #Run Epsilon Greedy greedy_result = greedy.run(m1,m2,m3,e,times) #Run Decreasing Epsilon Greedy dec_greedy_result = dec_greedy.run(m1,m2,m3,times) #Run Optimistic Initial Value opti_result = opti.run(m1,m2,m3,e,times) #Run UCB ucb_result = ucb.run(m1,m2,m3,times) #Run Thompson Sampling ts_result = ts.run(m1,m2,m3,times) #Plot the result plt.plot(greedy_result,label="Epsilon Greedy") plt.plot(dec_greedy_result,label="Decreasing Epsilon Greedy") plt.plot(opti_result,label="Optimistic Initial Value") plt.plot(ucb_result,label="UCB1") plt.plot(ts_result,label="Thompson Sampling") #Show the graph plt.legend(loc='upper right') plt.show()
JJ-Tom-Li/Reinforcement-Machine-Learning
Programming Project -1 -MAB/main.py
main.py
py
1,356
python
en
code
0
github-code
36
[ { "api_name": "greedyBandit.GreedyBandit", "line_number": 19, "usage_type": "call" }, { "api_name": "decGreedyBandit.DecGreedyBandit", "line_number": 20, "usage_type": "call" }, { "api_name": "optiBandit.OptiBandit", "line_number": 21, "usage_type": "call" }, { "a...
22905033389
import requests import json import re req=requests.Session() header={ "user-agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36" } url="http://www.122.gov.cn/m/map/select" page=req.get(url=url,headers=header) print(page.text) pattern=re.compile(r"<i sfdm=\"(\d+)\" sfmc=\"(.*?)\" ym=\"(.*?)\" fzjg=\"(.*?)\".*?sftjb=\"(.*?)\"></i>",re.S) d=re.findall(pattern,page.text) s={} for i in d: s[i[0]]={"address":i[1],"url":i[2],"cp":i[3],"sftjb":i[4]} print(s) json.dump(s,open("./info.json","w"))
nanxung/testProject
test.py
test.py
py
585
python
en
code
0
github-code
36
[ { "api_name": "requests.Session", "line_number": 4, "usage_type": "call" }, { "api_name": "re.compile", "line_number": 18, "usage_type": "call" }, { "api_name": "re.S", "line_number": 18, "usage_type": "attribute" }, { "api_name": "re.findall", "line_number": ...
15239706297
# Downloadable Modules import os import socket import argparse import atexit import time from threading import Thread # Self-made Modules import database_utility as db_util from ping_utility import ping_service from handlers import conn_handler import schemas import config # Startup of server # Initialize Variables HOST = '127.0.0.1' PORT = 10000 OPEN_CONNECTION_LIMIT = 100 # File and Storage Schemas in the database file_info = schemas.file storage_info = schemas.storage # Parse Arguments parser = argparse.ArgumentParser() parser.add_argument('--host', type=str, default=config.HOST, help='IP for the server') parser.add_argument('--port', type=int, default=config.PORT, help='Port for the server') parser.add_argument('--open_conn_limit', type=int, default=config.OPEN_CONNECTION_LIMIT, help='The limit of number of open connections') args = parser.parse_args() args_dict = vars(args) # Create Database print("Connecting to database ...") db_handler = db_util.DB_Interface() db_handler.connect_db() print("Connection Successful") # Start Ping Service print("Starting Ping service ...") ping_thread = Thread(target = ping_service, args= (db_handler, storage_info["table_name"], "ip")) ping_thread.daemon = True ping_thread.start() print("Ping Started") # Create an open socket for accepting connections print("Creating Socket for acceptings connections") open_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) open_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) open_socket.bind((args.host, args.port)) open_socket.listen(args.open_conn_limit) print("Socket Started") @atexit.register def cleanup(): print("Closing connection to socket") open_socket.close() while True: # Accept connection conn, addr = open_socket.accept() print("Got a connection from {}".format(addr)) db_handler_thread = db_util.DB_Interface() db_handler_thread.connect_db() handler_thread = Thread( target = conn_handler, args=(conn, addr, db_handler_thread)) handler_thread.start()
arhamchopra/decentralized-file-storage
server/server.py
server.py
py
2,094
python
en
code
1
github-code
36
[ { "api_name": "schemas.file", "line_number": 23, "usage_type": "attribute" }, { "api_name": "schemas.storage", "line_number": 24, "usage_type": "attribute" }, { "api_name": "argparse.ArgumentParser", "line_number": 27, "usage_type": "call" }, { "api_name": "config...
902272609
from multiprocessing import Process,Queue import os,time def write(q): print('启动写子进程%s' % os.getpid()) for chr in ["A","B","C","D"]: q.put(chr) time.sleep(1) print('结束写子进程%s' % os.getpid()) def read(q): print('启动读子进程%s'% os.getpid()) while True: value= q.get(True) print("value= "+value) print('结束读子进程%s'% os.getpid()) if __name__=='__main__': print('父进程开始') #父进程创建队列,并传递哥子进程 q = Queue() pw = Process(target=write,args=(q,)) pr = Process(target=read,args=(q,)) pw.start() pr.start() # pw.join() #pr进程是个死循环,无法等待其结束,只能强行结束 pr.terminate() print('父进程结束')
hughgo/Python3
基础代码/进程/10 进程间通信.py
10 进程间通信.py
py
805
python
en
code
10
github-code
36
[ { "api_name": "os.getpid", "line_number": 6, "usage_type": "call" }, { "api_name": "time.sleep", "line_number": 9, "usage_type": "call" }, { "api_name": "os.getpid", "line_number": 10, "usage_type": "call" }, { "api_name": "os.getpid", "line_number": 13, "...
25451901336
from flask import Blueprint from marketplace import db, login_required from marketplace.models import Item, Tag tag_item = Blueprint('tag_item', __name__) @tag_item.route('/tag_item/<item_id>/<tag>') @login_required def tag_an_item(item_id, tag): # Get matching item matching_items = db.session.query(Item).filter_by(id=item_id).all() if len(matching_items) == 0: return "No matching items found!" if len(matching_items) > 1: return "Too many items found!" # Get existing item tags and ensure not already there item_tags = matching_items[0].tags exists = False for existing_tag in item_tags: if existing_tag.name == tag: exists = True if exists: return "Already exists!" # If not, see if the tag is already in the tag database tag_t = "" matching_tags = db.session.query(Tag).filter_by(name=tag).all() if len(matching_tags) == 0: # No? Create tag tag_t = Tag(tag) db.session.add(tag_t) db.session.commit() else: # Add item to new/existing tag tag_t = matching_tags[0] # Pair up item with tag matching_items[0].tags.append(tag_t) db.session.commit() return "Added tag!"
adicu/marketplace
marketplace/routes/tag_item.py
tag_item.py
py
1,239
python
en
code
3
github-code
36
[ { "api_name": "flask.Blueprint", "line_number": 5, "usage_type": "call" }, { "api_name": "marketplace.db.session.query", "line_number": 12, "usage_type": "call" }, { "api_name": "marketplace.models.Item", "line_number": 12, "usage_type": "argument" }, { "api_name"...
71984366824
import pandas as pd from sklearn import metrics from sklearn import preprocessing from chapter5 import config from chapter5 import model_dispatcher from common import utils def run(fold): df = pd.read_csv(config.CENSUS_FILE_FOLDS) # 目的変数を変換 target_mapping = {"<=50K": 0, ">50K": 1} df.loc[:, "income"] = df["income"].map(target_mapping) ftrs = utils.exclude_cols_from_df(df, ("kfold", "income")) # すべて質的変数のデータなので、すべてのカラムの欠損値を同様に補完 for col in ftrs: df.loc[:, col] = df[col].astype(str).fillna("NONE") # ラベルエンコード # one hot エンコードに対し決定木系は時間がかかるため for col in ftrs: lbl = preprocessing.LabelEncoder() lbl.fit(df[col]) df.loc[:, col] = lbl.transform(df[col]) # 引数と一致しない番号を学習に、さもなくば検証に利用 df_train = df[df.kfold != fold].reset_index(drop=True) df_valid = df[df.kfold == fold].reset_index(drop=True) x_train = df_train[ftrs].values x_valid = df_valid[ftrs].values # 学習 mdl = model_dispatcher.models["xgb"](n_jobs=-1) mdl.fit(x_train, df_train.income.values) # AUCを計算 # predict_proba で [[クラス「0」の確率、クラス「1」の確率]] の配列を取得できる valid_preds = mdl.predict_proba(x_valid)[:, 1] auc = metrics.roc_auc_score(df_valid.income.values, valid_preds) print(f"Fold={fold}, AUC={auc}") if __name__ == "__main__": for i in range(5): run(i)
YasudaKaito/aaamlp_transcription
project/src/chapter5/census_lbl_xgb.py
census_lbl_xgb.py
py
1,606
python
en
code
0
github-code
36
[ { "api_name": "pandas.read_csv", "line_number": 13, "usage_type": "call" }, { "api_name": "chapter5.config.CENSUS_FILE_FOLDS", "line_number": 13, "usage_type": "attribute" }, { "api_name": "chapter5.config", "line_number": 13, "usage_type": "name" }, { "api_name":...
670561323
#!/usr/bin/env python2 # -*- coding: utf-8 -*- import pandas as pd import datetime import matplotlib.pyplot as plt import paths as paths from DataBaseProxy import DataBaseProxy from util import Utility util = Utility() dbp = DataBaseProxy() year = 2017 month = 5 day = 6 #km macchine per enjoy e car2go in una settimana start = datetime.datetime(year, month, day, 0, 0, 0) end = datetime.datetime(year, month +2, day, 23, 59, 0) end2 = datetime.datetime(year, month, day, 23,59,0) def clean_durations(df): df = df[df.duration < df.duration.quantile(0.99)] df = df[df.duration > df.duration.quantile(0.01)] return df def duration_per_car(df) : out_df= pd.DataFrame() out_df["plate"] = df.plate out_df['duration'] = df.duration dur_per_car = out_df.groupby('plate', as_index = False).sum() return dur_per_car def bookings_per_car(df): df_freq = df.groupby('plate').count() df_freq = df_freq[['_id']].copy() df_freq = df_freq.rename(columns={'_id': 'freq'}) return df_freq def parkings_per_car(df) : out_df= pd.DataFrame() out_df["plate"] = df.plate out_df['number_of_parkings'] = df.duration dur_per_car = out_df.groupby('plate', as_index = False).count() return dur_per_car def total_dur_per_car(df, df2): provider = util.get_provider(df) color = util.get_color(df) df = clean_durations(df) dur_per_car = duration_per_car(df) freq_per_car = bookings_per_car(df2) fig, ax = plt.subplots(1, 1, figsize=(9,10)) my_xticks = dur_per_car.plate # print len(my_xticks) ax.plot(dur_per_car.index, dur_per_car.duration, linestyle='-', marker='x',color=color) # ax.set_xticks(my_xticks) ax.set_title("min per car - " + provider) ax.set_xlabel("Plate") ax.set_ylabel("Total minutes") plt.show() dur_per_car.set_index('plate', inplace=True) dur_per_car['freq'] = freq_per_car['freq'] dur_per_car.dropna() return dur_per_car def total_dist_per_car_no_outliers (df): provider = util.get_provider(df) color = util.get_color(df) df = clean_durations(df) dur_per_car = duration_per_car(df) std = dur_per_car['duration'].std() avg = dur_per_car['duration'].median() normalized_durations = dur_per_car[(dur_per_car['duration'] >= (avg-std)) & (dur_per_car['duration'] <= (avg+std))] fig, ax = plt.subplots(1, 1, figsize=(9,10)) # my_xticks = normalized_durations.plate # print len(my_xticks) # plt.xticks(normalized_durations.index, my_xticks) plt.plot(normalized_durations.index, normalized_durations['duration'], linestyle='-', marker='x',color=color) ax.set_title("min per car in std - " + provider) ax.set_xlabel("Plate") ax.set_ylabel("Total minutes") plt.show() def hist_dur_freq(column, df, df_source, data): provider = util.get_provider(df_source) color = util.get_color(df_source) if column == "duration": xlabel = "min" else : xlabel = "" if column == "freq": df = df.dropna() fig, ax = plt.subplots(2, 4, figsize=(20,10)) fig.suptitle(provider + ' - ' + column + ' distributions') #uncleaned data ax[0,0].hist(df[column], 50, facecolor=color, alpha=0.75, cumulative=True, normed=True) ax[0,0].set_title("CDF - " + column) ax[0,0].set_xlabel(xlabel) ax[1,0].hist(df[column], 50, facecolor=color, alpha=0.75) ax[1,0].set_title("PDF - " + column) ax[1,0].set_xlabel(xlabel) #filtering - only cars with at least 3 parkings at day df = df[df.freq > 30] ax[0,1].hist(df[column], 50, facecolor=color, alpha=0.75, cumulative=True, normed=True) ax[0,1].set_title("filtered CDF - " + column) ax[0,1].set_xlabel(xlabel) ax[1,1].hist(df[column], 50, facecolor=color, alpha=0.75) ax[1,1].set_title("filtered PDF - " + column) ax[1,1].set_xlabel(xlabel) #divided per number of days ax[0,2].hist(df[column]/data["valid_days"], 50, facecolor=color, alpha=0.75, cumulative=True, normed=True) ax[0,2].set_title("filtered CDF per day - " + column) ax[0,2].set_xlabel(xlabel) ax[1,2].hist(df[column]/data["valid_days"], 50, facecolor=color, alpha=0.75) ax[1,2].set_title("filtered PDF per day - " + column) ax[1,2].set_xlabel(xlabel) #divided per number of days in interval ax[0,3].hist(df[column]/data["cleaned_valid_days"], 50, facecolor=color, alpha=0.75, cumulative=True, normed=True) ax[0,3].set_title("filtered CDF per day clnd - " + column) ax[0,3].set_xlabel(xlabel) ax[1,3].hist(df[column]/data["cleaned_valid_days"], 50, facecolor=color, alpha=0.75) ax[1,3].set_title("filtered PDF per day clnd - " + column) ax[1,3].set_xlabel(xlabel) res = { column+"_mean" : df[column].mean(), column+"_median": df[column].median(), column+"_std" : df[column].std(), column+"_mean_valid_days" : (df[column]/data["valid_days"]).mean(), column+"_median_valid_days": (df[column]/data["valid_days"]).median(), column+"_std_valid_days" : (df[column]/data["valid_days"]).std(), column+"_mean_valid_days_clnd" : (df[column]/data["cleaned_valid_days"]).mean(), column+"_median_valid_days_clnd": (df[column]/data["cleaned_valid_days"]).median(), column+"_std_valid_days_clnd" : (df[column]/data["cleaned_valid_days"]).std() } fig.savefig(paths.plots_path3+"_"+provider+"_"+column+"_parkings_tats.png", bbox_inches='tight') return df,res # #enjoy_parkings = dbp.query_parkings_df('enjoy','Torino', start, end) #car2go_parkings = dbp.query_parkings_df('car2go','Torino', start, end) #enjoy_parkings.to_pickle(paths.enjoy_parkings_pickle_path, None) #car2go_parkings.to_pickle(paths.car2go_parkings_pickle_path, None) enjoy = pd.read_pickle(paths.enjoy_pickle_path, None) car2go = pd.read_pickle(paths.car2go_pickle_path, None) enjoy_parkings = pd.read_pickle(paths.enjoy_parkings_pickle_path, None) car2go_parkings = pd.read_pickle(paths.car2go_parkings_pickle_path, None) #enj_data = util.get_valid_days(enjoy,start,end) #c2g_data = util.get_valid_days(car2go,start,end) #enjoy_parkings_duration = duration_per_car(enjoy_parkings) #enj_park_duration_freq = total_dur_per_car(enjoy_parkings, enjoy) #total_dist_per_car_no_outliers(enjoy) #enj_clean, enj_data["park_stats_duration"] = hist_dur_freq("duration", enj_park_duration_freq, enjoy, enj_data) #enj_clean, enj_data["park_stats_freq"] = hist_dur_freq("freq", enj_park_duration_freq, enjoy, enj_data) # #car2go_parkings_duration = duration_per_car(car2go_parkings) #car2go_park_duration_freq = total_dur_per_car(car2go_parkings, car2go) #total_dist_per_car_no_outliers(car2go) #c2g_clean, c2g_data["park_stats_duration"] = hist_dur_freq("duration", car2go_park_duration_freq, car2go, c2g_data) #c2g_clean, c2g_data["park_stats_freq"] = hist_dur_freq("freq", car2go_park_duration_freq, car2go, c2g_data) """ Avg parking time per car (valid days) """ #enj_clean["duration_per_day"] = enj_park_duration_freq["duration"]/(enj_data["cleaned_valid_days"]) #enj_clean["freq_per_day"] = enj_park_duration_freq["freq"]/(enj_data["cleaned_valid_days"]) #c2g_clean["duration_per_day"] = car2go_park_duration_freq["duration"]/(c2g_data["cleaned_valid_days"]) #c2g_clean["freq_per_day"] = car2go_park_duration_freq["freq"]/(enj_data["cleaned_valid_days"]) # # #fig,ax =plt.subplots(1, 1, figsize=(9,10)) #enj_clean.hist(ax=ax, color=util.get_color(enjoy)) #fig2,ax2 = plt.subplots(1, 1, figsize=(9,10)) #c2g_clean.hist(ax=ax2, color=util.get_color(car2go)) ''' come informazione ho il numero di minuti in cui è stata ferma la macchina, e il numero di prenotazioni che questa ha ricevuto ''' #total_dist_per_car_no_outliers(enjoy_parkings) #dur_per_car['index'] = dur_per_car['index'] / (dur_per_car['index'].sum()) #dur_per_car.hist(bins=100, cumulative=True, normed=True) #df2 = parkings_per_car(enjoy_parkings) #enjoy_parkings_duration['count'] = df2['number_of_parkings'] # #df = enjoy_parkings[ # (enjoy_parkings.plate == 'EZ049TY') # ]
michelelt/MyTool
Analysis/parkingsAnalysis.py
parkingsAnalysis.py
py
8,244
python
en
code
0
github-code
36
[ { "api_name": "util.Utility", "line_number": 10, "usage_type": "call" }, { "api_name": "DataBaseProxy.DataBaseProxy", "line_number": 11, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 19, "usage_type": "call" }, { "api_name": "datetime.d...
975733751
""" File: model_ops.py Author: Tomáš Daniš Login: xdanis05 Description: Module holding functions implementing model operations - such as training, predicting or evaluating """ import torch.nn as nn import torch.nn.utils as tut import torch.optim as optim import torch from tqdm import tqdm import torchtext.data as torch_data import conf from baseline import totext _DEBUG = False # Debugging flag def train(net, train_iterator, dev_iterator, vocabs, epoch_num=4, lr=0.002): """ Train a given model on the given dataset :param net: Model to train :param train_iterator: Iterator through a training set of the dataset to use :param dev_iterator: Iterator through a development set of the dataset to use :param vocabs: Vocabularies used in the dataset, only used for debugging :param epoch_num: Number of epochs to train for :param lr: Learning rate to train with :return: None """ net.train() criterion = nn.CrossEntropyLoss() parameters = filter(lambda p: p.requires_grad, net.parameters()) optimizer = optim.Adamax(parameters, lr=lr) # Training loop for epoch in tqdm(range(epoch_num), total=epoch_num, desc="Epoch"): epoch_loss = 0 # Epoch loop for i, batch in tqdm(enumerate(train_iterator), total=len(train_iterator), desc="Iteration"): net.train() if _DEBUG: q = totext(batch.question[0],vocabs[0],batch_first=False) d = totext(batch.document[0],vocabs[0],batch_first=False) a1 = totext(batch.answer1[0],vocabs[0],batch_first=False) a2 = totext(batch.answer2[0],vocabs[0],batch_first=False) print("* "*20+"NEXT"+"* "*20) print(d[0]) print("* " * 20 + "Question" + "* " * 20) print(q[0]) print("* " * 20 + "Answers" + "* " * 20) print(a1[0]) print(a2[0]) optimizer.zero_grad() out = net(batch) loss = criterion(out, batch.correct) loss.backward() tut.clip_grad_norm_(parameters, 0.5) optimizer.step() epoch_loss += loss.item() # At the end of an epoch, evaluate the current performance on the development set with torch.no_grad(): net.eval() dev_loss = 0 j = 0 correct = 0 total = 0 for j, val_batch in enumerate(dev_iterator): out = net(val_batch) total += val_batch.correct.size(0) loss = criterion(out, val_batch.correct) dev_loss += loss.item() _, pred_indexes = torch.max(out.data, 1) correct += (pred_indexes == val_batch.correct).sum().item() print('Epoch: {0}, Train loss: {1}, Dev loss: {2}, Dev accuracy: {3}%'.format( epoch, epoch_loss/len(train_iterator), dev_loss/(j+1), correct*100/total)) def predict(net, input, fields): """ Predict answers for the given input :param net: Model to predict with :param input: Input to predict on :param fields: Structure of the data the model expects :return: Predictions for the given inputs """ net.eval() example = torch_data.Example.fromlist(input, fields) dataset = torch_data.Dataset([example]) iterator = torch_data.Iterator(dataset, batch_size=1) net_in = next(iter(iterator)) return predict_batch(net, net_in) def predict_batch(net, batch): """ Predicts a single batch using the model provided :param net: Model to predict with :param batch: Batch to predict on :return: Predictions """ with torch.no_grad(): out = net(batch) _, predicted = torch.max(out.data, 1) return predicted def eval_model(net, val_iter): """ Evaluate a model's performance on the given test set :param net: Model to evaluate :param val_iter: Data to evaluate on :return: A tuple with the first item being the accuracy. The second item is a list of F1 scores for all classes in the task. """ correct = 0 total = 0 cm = conf.ConfusionMatrix([0, 1]) net.eval() with torch.no_grad(): for batch in val_iter: total += batch.correct.size(0) prediction = predict_batch(net, batch) cm.add_entry(batch.correct.tolist(), prediction.tolist()) correct += (prediction == batch.correct).sum().item() return correct/total, cm.get_f1()
AgiNetz/FIT-VUT-projects
Thesis - Machine Comprehension using Commonsense Knowledge/Source codes/Baseline/model_ops.py
model_ops.py
py
4,576
python
en
code
0
github-code
36
[ { "api_name": "torch.nn.CrossEntropyLoss", "line_number": 36, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 36, "usage_type": "name" }, { "api_name": "torch.optim.Adamax", "line_number": 39, "usage_type": "call" }, { "api_name": "torch.optim", ...
42186826955
import discord from discord.ext import commands from discord.utils import get import json from discord.ext.commands import has_permissions, MissingPermissions import asyncio bot = commands.Bot(command_prefix="!") @bot.event async def on_ready(): print("Ticket bot running...") @bot.command() async def help_me(ctx): em = discord.Embed(title="Auroris Tickets Help", description="", color=0x00a8ff) await ctx.send(embed=em) #@bot.command() async def new(ctx, message_content = ""): await bot.wait_until_ready() with open("data.json") as f: data = json.load(f) ticket_number = int(data["ticket-counter"]) ticket_number += 1 ticket_channel = await ctx.guild.create_text_channel("ticket-{}".format(ticket_number)) await ticket_channel.set_permissions(ctx.guild.get_role(ctx.guild.id), send_messages=False, read_messages=False) for role_id in data["valid-roles"]: role = ctx.guild.get_role(role_id) await ticket_channel.set_permissions(role, send_messages=True, read_messages=True, add_reactions=True, embed_links=True, attach_files=True, read_message_history=True, external_emojis=True) await ticket_channel.set_permissions(ctx.author, send_messages=True, read_messages=True, add_reactions=True, embed_links=True, attach_files=True, read_message_history=True, external_emojis=True) em = discord.Embed(title="New ticket from {}#{}".format(ctx.author.name, ctx.author.discriminator), description= "{}".format(message_content), color=0x00a8ff) await ticket_channel.send(embed=em) pinged_msg_content = "" non_mentionable_roles = [] if data["pinged-roles"] != []: for role_id in data["pinged-roles"]: role = ctx.guild.get_role(role_id) pinged_msg_content += role.mention pinged_msg_content += " " if role.mentionable: pass else: await role.edit(mentionable=True) non_mentionable_roles.append(role) await ticket_channel.send(pinged_msg_content) for role in non_mentionable_roles: await role.edit(mentionable=False) data["ticket-channel-ids"].append(ticket_channel.id) data["ticket-counter"] = int(ticket_number) with open("data.json", 'w') as f: json.dump(data, f) created_em = discord.Embed(title="Auroris Tickets", description="Your ticket has been created at {}".format(ticket_channel.mention), color=0x00a8ff) await ctx.send(embed=created_em) @bot.command() async def close(ctx): with open('data.json') as f: data = json.load(f) if ctx.channel.id in data["ticket-channel-ids"]: channel_id = ctx.channel.id def check(message): return message.author == ctx.author and message.channel == ctx.channel and message.content.lower() == "close" try: em = discord.Embed(title="Interest Tickets", description="Are you sure you want to close this ticket? Reply with `close` if you are sure.", color=0x00a8ff) await ctx.send(embed=em) await bot.wait_for('message', check=check, timeout=60) await ctx.channel.delete() index = data["ticket-channel-ids"].index(channel_id) del data["ticket-channel-ids"][index] with open('data.json', 'w') as f: json.dump(data, f) except asyncio.TimeoutError: em = discord.Embed(title="Auroris Tickets", description="You have run out of time to close this ticket. Please run the command again.", color=0x00a8ff) await ctx.send(embed=em) @bot.command() async def interest(ctx): page = discord.Embed ( title = 'Which sport are you interested in?', description = ''' '⚽': soccer ball '⚾': baseball '🥎': softball '🏀': basketball '🏐': volleyball Please click sport emoji you are interested in. ''', colour = discord.Colour.orange() ) message = await ctx.send(embed = page) await message.add_reaction('⚽') #soccer ball await message.add_reaction('⚾') #baseball await message.add_reaction('🥎') #softball await message.add_reaction('🏀') #basketball await message.add_reaction('🏐') #volleyball def check(reaction, user): return user == ctx.author reaction = None while True: if str(reaction) == '⚽': await new(ctx, message_content="Welcome to Soccer Ball Ticket! If you wanna close, please type !close") elif str(reaction) == '⚾': await new(ctx, message_content="Welcome to Baseball Ticket! If you wanna close, please type !close") elif str(reaction) == '🥎': await new(ctx, message_content="Welcome to Softball Ticket! If you wanna close, please type !close") elif str(reaction) == '🏀': await new(ctx, message_content="Welcome to Basketball Ticket! If you wanna close, please type !close") elif str(reaction) == '🏐': await new(ctx, message_content="Welcome to Volleyball Ticket! If you wanna close, please type !close") try: reaction, user = await bot.wait_for('reaction_add', timeout = 30.0, check = check) await message.remove_reaction(reaction, user) except: break await message.clear_reactions() bot.run('ODczMjQ2MjI2MDEzOTEzMTA5.YQ1n7A.PSRzth7b-scgFehCtJEs6yhGI_8') #bot.run('ODcyNDEwOTc4MTkxNTczMDQy.YQpeCQ.FCeibb_4ee1NkZAo2irmLhH2fLI')
stevewoz1234567890/disocrd-bot
ticket_bot.py
ticket_bot.py
py
5,611
python
en
code
0
github-code
36
[ { "api_name": "discord.ext.commands.Bot", "line_number": 8, "usage_type": "call" }, { "api_name": "discord.ext.commands", "line_number": 8, "usage_type": "name" }, { "api_name": "discord.Embed", "line_number": 16, "usage_type": "call" }, { "api_name": "json.load",...
9567324814
import logging import airflow from airflow.models import DAG from airflow.operators.python_operator import PythonOperator from airflow.operators.dummy_operator import DummyOperator from utils.slugify import slugify args = { 'owner': 'airflow', 'start_date': airflow.utils.dates.days_ago(2) } categorias = [ {'actividad_id' : 11, 'actividad' : "Agricultura, ganaderia, aprovechamiento forestal, pesca y caza"}, {'actividad_id' : 21, 'actividad' : "Mineria"}, {'actividad_id' : 22, 'actividad' : "Electricidad, agua y suministro de gas por ductos al consumidor final"}, {'actividad_id' : 23, 'actividad' : "Construccion"}, {'actividad_id': 31, 'actividad' : "Industrias manufactureras"}, {'actividad_id' : 43, 'actividad' : "Comercio al por mayor"}, {'actividad_id' : 46, 'actividad' : "Comercio al por menor"}, {'actividad_id' : 48, 'actividad' : "Transporte, correos y almacenamiento"}, {'actividad_id' : 51, 'actividad' : "Informacion en medios masivos"}, {'actividad_id' : 52, 'actividad' : "Servicios financieros y de seguros"}, {'actividad_id' : 53, 'actividad' : "Servicios inmobiliarios y de alquiler de bienes muebles e intangibles"}, {'actividad_id' : 54, 'actividad' : "Servicios profesionales, cientificos y tecnicos"}, {'actividad_id' : 55, 'actividad' : "Direccion de corporativos y empresas"}, {'actividad_id' : 56, 'actividad' : "Apoyo a los negocios y manejo de desechos y serv. de remediacion"}, {'actividad_id' : 61, 'actividad' : "Servicios educativos"}, {'actividad_id' : 62, 'actividad' : "Servicios de salud y de asistencia social"}, {'actividad_id' : 71, 'actividad' : "Serv. de esparcimiento culturales y deportivos, y otros serv. recreativos"}, {'actividad_id' : 72, 'actividad' : "Servicios de alojamiento temporal y de preparacion de alimentos y bebidas"}, {'actividad_id' : 81, 'actividad' : "Otros servicios excepto actividades del gobierno"}, {'actividad_id' : 93, 'actividad' : "Actividades del gobierno y organismos internacionales extraterritoriales"}, ] dag = DAG( dag_id='03_siem_informacion_empresarial', default_args=args, schedule_interval='@monthly') start_node = DummyOperator(task_id='inicio', dag=dag) end_node = DummyOperator(task_id='fin', dag=dag) def tareas_categorias(categorias): previous_task = None for i in categorias: task = DummyOperator(task_id=slugify(i['actividad'])[:20], dag=dag) if previous_task: previous_task.set_downstream(task) else: start_node.set_downstream(task) previous_task = task task.set_downstream(end_node) tareas_categorias(categorias)
erikriver/mixtli-etc
dags/03_siem_informacion_empresarial.py
03_siem_informacion_empresarial.py
py
2,866
python
es
code
2
github-code
36
[ { "api_name": "airflow.utils.dates.days_ago", "line_number": 12, "usage_type": "call" }, { "api_name": "airflow.utils", "line_number": 12, "usage_type": "attribute" }, { "api_name": "airflow.models.DAG", "line_number": 58, "usage_type": "call" }, { "api_name": "ai...
3883658721
# ============================================================= # Imports # ============================================================= import logging import smtplib from server.utils import notification # ============================================================= # Constant # ============================================================= MAIL_SERVER = 'mail.haligonia.home.com' ESXI_CONTROLLER_ADDRESS = 'esxicontroller@mail.haligonia.home.com' # ============================================================= # Source # ============================================================= class notificationDispatch(object): """ This is the message dispatcher for the ESXI controller framework. """ # The destination address __destination = None # The message type __msg_type = None # The message to send __message = None # Logger __logger = None def __init__(self, log_level=logging.INFO): """ This is the default constructor for the class :return: """ self.__logger = logging.getLogger("ESXiController - VmNotificationDispatch") self.__logger.setLevel(log_level) return def send_notification(self, dest_address, msg_type, reason, configs): """ This sends out the message object. :param dest_address: the destination address :param msg_type: the message type to send :param reason: the reason to notify :param configs: the configs :return: """ # Set internals self.__destination = dest_address self.__msg_type = msg_type # We create an smtp server on our mail server server = smtplib.SMTP(MAIL_SERVER) # Create the message self.__setup_message(reason, configs) # Send the message server.sendmail(ESXI_CONTROLLER_ADDRESS, self.__destination, self.__message.as_string()) server.quit() return def __setup_message(self, reason, configs): """ This is the message setup routine that is called when writing the notification email. :param reason: the reason :param configs: the vm configs :return: """ # We get the message type self.__message = notification.get(self.__msg_type) self.__message = notification.format(self.__destination, self.__message, reason, configs) return
CaptFrank/EsxiController
server/utils/notification/notificationdispatch.py
notificationdispatch.py
py
2,687
python
en
code
0
github-code
36
[ { "api_name": "logging.INFO", "line_number": 43, "usage_type": "attribute" }, { "api_name": "logging.getLogger", "line_number": 49, "usage_type": "call" }, { "api_name": "server.utils", "line_number": 69, "usage_type": "name" }, { "api_name": "smtplib.SMTP", "...
22776733908
import json import sys import os import exceptions as e import dev from decouple import config import time import itemdat import pathlib pathto = str(pathlib.Path().absolute()) devmsg = dev.Dev.SendMessage() devmsg("Setting up RPG module...") class RPG(): def __call__(self): self.start_up() pass def get_user_inv(self, profile): devmsg("Getting user inv data...") with open((pathto + f"\\playerdata\\player{profile}.json"), "r") as file: userdata = json.load(file) inv = userdata["INV"] if inv is None: raise e.Fatals.CantFindPlayerDataFiles(f"PLAYER{profile}.JSON IS EMPTY") else: with open((pathto + "\\data\\items.json"), "r", encoding='utf-8') as file: itemd = json.load(file) itemdata = itemd["ITEMDATA"] for i in inv: try: item_name = itemdata[i]["NAME"] item_lore = itemdata[i]["INFO"] a = "\nПредмет:\n" + item_name + "\n" + item_lore print(a) except KeyError: raise e.JSONErrors.CantParseInventoryData(f"PLAYER{profile}.JSON HAS ILLEGAL ITEMDATA") def start_up(self): prof = {} for i in [1, 2, 3]: with open((pathto + f"\\playerdata\\player{i}.json"), "r") as file: devmsg(f"Checking file number {i}...") profiledata = json.load(file) isocc = profiledata["?EMPTY"] if not isocc: prof[f"prof{i}"] = 'полон' else: prof[f"prof{i}"] = 'пуст' profstr = "Профиль 1 - " + prof["prof1"] + "\nПрофиль 2 - " + prof["prof2"] + "\nПрофиль 3 - " + prof["prof3"] print("▀████ ▐████▀ ▄████████ ▄█ ") time.sleep(0.2) print(" ███▌ ████▀ ███ ███ ███ ") time.sleep(0.2) print(" ███ ▐███ ███ ███ ███▌ ") time.sleep(0.2) print(" ▀███▄███▀ ███ ███ ███▌ ") time.sleep(0.2) print(" ████▀██▄ ▀███████████ ███▌ ") time.sleep(0.2) print(" ▐███ ▀███ ███ ███ ███ ") time.sleep(0.2) print(" ▄███ ███▄ ███ ███ ███ ") time.sleep(0.2) print("████ ███▄ ███ █▀ █▀ \n") print("Добро пожаловать в XAI!\n") print("Введите 1 чтобы начать") print("Введите 2 чтобы выйти") profstr = "Профиль 1 - " + prof["prof1"] + "\nПрофиль 2 - " + prof["prof2"] + "\nПрофиль 3 - " + prof["prof3"] filet = True while filet is True: try: answ = input("Ваш ввод:\n") filet = False except ValueError: print("Это не число!") if answ == "2": devmsg("Exiting...") exit() elif answ == "1": devmsg('User input = {"answ": "1"}') print("Вы выбрали начать.") print("Введите номер профиля для открытия/создания. Если профиль полон, то он будет открыт, иначе он будет создан.") print(profstr) filet = True while filet is True: try: profile_chosen = int(input("Введите номер профиля...\n")) if profile_chosen == "": raise ValueError filet = False except ValueError: print("Это не число!") devmsg(f"User chose to open profile{profile_chosen}.json") devmsg("Trying to open profile data...") if profile_chosen >= 1 and profile_chosen <= 3: newps = "Вы выбрали профиль " + str(profile_chosen) devmsg(f"Profile{profile_chosen} exists and will be opened") profiledata = self.Profile.new_profile(self, profile_chosen) else: devmsg(f"Profile{profile_chosen} doesnt exists!") print("Профиль не существует! Выходим...") time.sleep(1) exit() print(f"Вы выбрали и открыли профиль {profile_chosen}. Что теперь?") print("Доступные функции:") print("1 - просмотреть инвентарь") print("2 - добавить предметы в инвентарь (не стабильно). Предметы - это ID с 001 до 008 включительно.") print("3 - начать битву") print("4 - выйти.") filet = True while filet is True: try: answ = int(input("Введите число.\n")) filet = False except ValueError: print("Это не число!") if answ == 4: devmsg("Closing...") exit() elif answ == 2: self.Profile.give(self, profile_chosen) elif answ == 1: self.get_user_inv(profile_chosen) elif answ == 3: enemy = itemdat.generate_random_mob(itemdat.Item.Use.GetPlayerLocation(profile_chosen)) itemdat.Item.Use.battle(itemdat.Item.Use, profile_chosen, enemy) answ2 = input("\n") class Profile(): def __call__(self, slotnum, playername): self.new_profile(slotnum, playername) pass sample_profile_data = { "?EMPTY": False, "ENDINGS_COMPLETED": { "POSITIVE": False, "NEGATIVE": False, "MIDDLE": False, "SHREK": False, "OUTBREAK": False, "SECRET": False, "S_DEATH": False, "TRUTH": False }, "LOCATIONS VISITED": { "FIELDS": False, "BROKEN_TOWN": False, "ABANDONED_FARMS": False, "TEMPLE": False, "MOUNT_VILLAGE": False, "SUMMIT": False, "LAB": False, "HARDMODE_LOCS": { "HOPELESS_FIELDS": False, "REMNANTS_OF_TOWN": False, "BURNT_FARMS": False, "FORBIDDEN_TEMPLE": False, "HIGH_PEAKS": False, "LAST_SUMMIT": False, "CLONE_LAB": False } }, "DEATH_AMOUNT": 0, "HM_ON": False, "INV": [], "CURRENT_LOCATION": "FIELDS", "BALANCE": 0, "ENEMIES_SLAIN": { "NORMAL": { "FIELDS": 0, "BROKEN_TOWN": 0, "ABANDONED_FARMS": 0, "TEMPLE": 0, "MOUNT_VILLAGE": 0, "SUMMIT": 0, "LAB": 0 }, "HM": { "HOPELESS_FIELDS": 0, "REMNANTS_OF_TOWN": 0, "BURNT_FARMS": 0, "FORBIDDEN_TEMPLE": 0, "HIGH_PEAKS": 0, "LAST_SUMMIT": 0, "CLONE_LAB": 0, "STRONGHOLD": 0 }, "EXPERIENCE": 0 } } def new_profile(self, slotnum:int): if slotnum >= 0 and slotnum <= 3: with open((pathto + f"\\playerdata\\player{slotnum}.json"), "r", encoding='utf-8') as file: devmsg(f"Creating new profile with number {slotnum}...") profiledata = json.load(file) isempty = profiledata["?EMPTY"] if not isempty: devmsg(f"Cant overwrite an existing file with number '{slotnum}'") devmsg("Looking for solution...") time.sleep(1) with open((pathto + f"\\playerdata\\player{slotnum}.json"), "r", encoding='utf-8') as file: devmsg(f"Opening the file instead") profiledata = json.load(file) return profiledata else: playername = input("Введите имя нового персонажа\n") self.Profile.sample_profile_data["NAME"] = playername.capitalize() devmsg(f"Dumping sample data into existing json file...") with open((pathto + f"\\playerdata\\player{slotnum}.json"), "w", encoding="utf-8") as file: json.dump(self.Profile.sample_profile_data, file, indent=4, sort_keys=True, ensure_ascii=False) with open((pathto + f"\\playerdata\\player{slotnum}.json"), "r", encoding='utf-8') as file: profiledata = json.load(file) return profiledata else: devmsg(f"Couldnt create a new profile with number {slotnum}, as it doesn't exist!") print("Профиль не существует...") time.sleep(1) raise e.Fatals.CantFindDataFiles("Профиль не существует") def give(self, profile): with open((pathto + f"\\playerdata\\player{profile}.json"), "r", encoding='utf-8') as file: devmsg(f"Loading profile with number {profile}...") profiledata = json.load(file) print("Выберите айди предмета, чтобы дать персонажу. АЙДИ можно посмотреть на вики, то есть на\nhttps://github.com/Maxuss/XAI/wiki/ID-предметов") id_item = input("Введите айди\n") try: with open((pathto + "\\data\\items.json"), "r", encoding='utf-8') as file: devmsg("Opening itemdata...") itemd = json.load(file) itemdata = itemd["ITEMDATA"] _current = itemdata[id_item] profiledata["INV"].append(id_item) with open((pathto + f"\\playerdata\\player{profile}.json"), "w", encoding='utf-8') as file: devmsg("Dumping data to profile...") json.dump(profiledata, file, indent=4, sort_keys=True, ensure_ascii=False) print("Предмет добавлен!\nПерезайдите, чтобы посмотреть данные инвентаря!") except KeyError: devmsg("Item with this ID doesn't exist!") print("Такой предмет не существует.") time.sleep() raise e.FileErrors.CantGenerateLoot("Предмет не существует.")
Maxuss/XAI
rpg.py
rpg.py
py
11,367
python
en
code
1
github-code
36
[ { "api_name": "pathlib.Path", "line_number": 10, "usage_type": "call" }, { "api_name": "dev.Dev.SendMessage", "line_number": 12, "usage_type": "call" }, { "api_name": "dev.Dev", "line_number": 12, "usage_type": "attribute" }, { "api_name": "json.load", "line_n...
40967939697
from django.db import models # null=True, blank=True это значит что данное поле может быть пустым, т.е. аватар не обязателен NULLABLE = {'blank': True, 'null': True} class Student(models.Model): first_name = models.CharField(max_length=150, verbose_name='имя') # обязательно last_name = models.CharField(max_length=150, verbose_name='фамилия') # обязательно avatar = models.ImageField(upload_to='students/', verbose_name='аватар', **NULLABLE) # не обязательно т.к. есть **NULLABLE # для email у моделей есть специяльное поле, здесь такой метод применен для эксперимента email = models.CharField(max_length=150, verbose_name='@email', unique=True, **NULLABLE) comment = models.TextField(verbose_name='комментарий менеджера', **NULLABLE) is_active = models.BooleanField(default=True, verbose_name='активный') def __str__(self): return f'{self.first_name}, {self.last_name}' # def delete(self, *args, **kwargs): # """Переопределение метода delete, теперь он деактивирует записи""" # self.is_active = False # self.save() class Meta: verbose_name = 'студент' verbose_name_plural = 'студенты' ordering = ('last_name',) class Subject(models.Model): title = models.CharField(max_length=150, verbose_name='название') description = models.TextField(verbose_name='описание') student = models.ForeignKey(Student, on_delete=models.CASCADE, verbose_name='студент') def __str__(self): return f'{self.title}' class Meta: verbose_name = 'предмет' verbose_name_plural = 'предметы'
DSulzhits/06_3_20_1_django_ORM
main/models.py
models.py
py
1,958
python
ru
code
0
github-code
36
[ { "api_name": "django.db.models.Model", "line_number": 7, "usage_type": "attribute" }, { "api_name": "django.db.models", "line_number": 7, "usage_type": "name" }, { "api_name": "django.db.models.CharField", "line_number": 8, "usage_type": "call" }, { "api_name": "...
1593524531
import sqlite3 conn = sqlite3.connect('employee.db') c = conn.cursor() # c.execute("""CREATE TABLE employees ( # first text, # last text, # pay integer # )""") # c.execute("INSERT INTO employees VALUES ('Mary', 'oza', 70000)") conn.commit() c.execute("SELECT * FROM employees") print(c.fetchall()) conn.commit() conn.close()
Parth-Ps/python
sqlite3_database/employees.py
employees.py
py
347
python
en
code
0
github-code
36
[ { "api_name": "sqlite3.connect", "line_number": 3, "usage_type": "call" } ]
14625474319
from django.shortcuts import render,redirect from .forms import RegisterForm # Create your views here. def register(request): # 从 get 或者 post 请求中获取 next 参数值 # get 请求中,next 通过 url 传递,即 /?next=value # post 请求中,next 通过表单传递,即 <input type="hidden" name="next" value="{{ next }}"/> redirect_to = request.POST.get('next', request.GET.get('next', '')) #当请求为post 表示用户注册了信息 if request.method =='POST': #实例化一个Form # request.POST 是一个类字典数据结构,记录了用户提交的注册信息 # 这里提交的就是用户名(username)、密码(password)、邮箱(email) # 用这些数据实例化一个用户注册表单 auth_forms=RegisterForm(request.POST) # 验证数据的合法性 if auth_forms.is_valid(): # 如果提交数据合法,调用表单的 save 方法将用户数据保存到数据库 auth_forms.save() # 注册成功,返回成功界面 #return redirect(reverse('users:success')) return redirect(redirect_to) # 请求不是 POST,表明用户正在访问注册页面,展示一个空的注册表单给用户 else: auth_forms = RegisterForm() # 渲染模板 # 如果不是 POST 请求,则渲染的是一个空的表单 # 如果用户通过表单提交数据,但是数据验证不合法,则渲染的是一个带有错误信息的表单 return render(request, 'users/register.html', {'auth_forms': auth_forms,'next':redirect_to})
rainy0824/blog_project
users/views.py
views.py
py
1,686
python
zh
code
0
github-code
36
[ { "api_name": "forms.RegisterForm", "line_number": 15, "usage_type": "call" }, { "api_name": "django.shortcuts.redirect", "line_number": 23, "usage_type": "call" }, { "api_name": "forms.RegisterForm", "line_number": 26, "usage_type": "call" }, { "api_name": "djang...
2940420975
import datetime # An object for representing a package to be delivered. class Package(): def __init__(self, package_id, address, city, state, zip, delivery_deadline, mass, special_notes, arrival_time="8:00 AM", required_truck=-1, deliver_with=[]): # an integer which is unique to each package self.package_id = package_id # the address this package needs to be delivered to self.address = address # the city this package needs to be delivered to self.city = city # the sate this package needs to be delivered to self.state = state # the zip code this package needs to be delivered to self.zip = zip # the time by which this package must be delivered self.delivery_deadline = delivery_deadline # the weight of the package, in kilograms self.mass = mass # any special notes that may modify what needs to happen # for this package self.special_notes = special_notes # the time that this package arrives to the hub self.arrival_time = arrival_time # the truck that this package is required to travel on self.required_truck = required_truck # other packages that must be delivered with this one self.deliver_with = deliver_with # if the package is "at the hub", "en route", or "delivered" self.delivery_status = "at the hub" # at what time the package is delivered self.delivery_time = None # the truck number the package was delivered on self.delivered_on = -1 # the time the package was loaded onto a truck self.loaded_at = None # allows for packages to be sorted on the delivery deadline. def delivery_deadline_for_sort(self): if type(self.delivery_deadline) == type(""): return datetime.datetime.now().replace(hour=23, minute=59, second=59, microsecond=99999) else: return self.delivery_deadline # return a string representation of a package def __str__(self): return ( f'(package_id: "{str(self.package_id).zfill(2)}" | address: "{self.address}"' f' | delivery_deadline: "{self.delivery_deadline}" | city: "{self.city}" | zipcode: "{self.zip}" | mass: "{self.mass}"' f' | loaded_at: "{self.loaded_at}" | delivery_status: "{self.delivery_status}" | delivery_time: "{self.delivery_time}" | delivered_on truck: "{self.delivered_on}")' ) # return a string representation of a package def __repr__(self): return self.__str__() # two packages are equal if their package ids are equal def __eq__(self, other): if type(self) == type(other): return self.package_id == other.package_id else: return False
joshsizer/wgu_projects
wgu_data_structures_and_algorithms_2/package.py
package.py
py
2,855
python
en
code
0
github-code
36
[ { "api_name": "datetime.datetime.now", "line_number": 56, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 56, "usage_type": "attribute" } ]
28798444261
#I pledge my honor that I have abided by the Stevens Honor System. #Zachary Jones #HW6 Problem 2 import datetime def get_date(): date = str(input('Enter date M/D/YYYY: ')) return date def validate_date(date): format = '%m/%d/%Y' try: datetime.datetime.strptime(date, format) print('{} is a valid date.'.format(date)) except ValueError: print('{} is an invalid date.'.format(date)) validate_date( get_date() )
Eric-Wonbin-Sang/CS110Manager
2020F_hw6_submissions/joneszachary/ZacharyJonesCH7P2.py
ZacharyJonesCH7P2.py
py
464
python
en
code
0
github-code
36
[ { "api_name": "datetime.datetime.strptime", "line_number": 16, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 16, "usage_type": "attribute" } ]
74318817382
try: from urlparse import urljoin except ImportError: # python3 compatibility from urllib.parse import urljoin from zope.dottedname.resolve import resolve def get_page_url(skin_name, page_mappings, page_id): """ Returns the page_url for the given page_id and skin_name """ fallback = '/' if page_id is not None: return page_mappings[page_id].get('path', '/') return fallback def get_page_class(skin_name, page_mappings, page_id=None, fallback=None, default_pages=None): """ Returns the page class for a given skin name and page mapping. First of all, if there is no page id it will return the given fallback if defined of the default page for the skin in use. If there is a page id, it will return: * the match for the given skin if defined * a fallback if defined * the given fallback if defined or the global default page class """ fallback = fallback and fallback or resolve(default_pages[ skin_name]) if not page_id: return fallback page_class_mapping = page_mappings[page_id].get('page_class', None) if page_class_mapping is not None: result = page_class_mapping.get( skin_name, page_class_mapping.get('fallback', None)) return result and resolve(result) or fallback return fallback def page_factory(base_url, browser, default_page_class, page_mappings, skin_name, page_id=None, **kwargs): url = base_url if page_id is None: url = base_url page_class = default_page_class else: path = page_mappings[page_id]['path'] page_class = get_page_class( skin_name, page_mappings, page_id=page_id, fallback=default_page_class) url = urljoin(base_url, path) page = page_class(browser, base_url=url, **kwargs) return page
davidemoro/pytest-pypom-navigation
pypom_navigation/util.py
util.py
py
1,916
python
en
code
2
github-code
36
[ { "api_name": "zope.dottedname.resolve.resolve", "line_number": 29, "usage_type": "call" }, { "api_name": "zope.dottedname.resolve.resolve", "line_number": 38, "usage_type": "call" }, { "api_name": "urllib.parse.urljoin", "line_number": 56, "usage_type": "call" } ]
11171979751
import pandas as pd import seaborn as sns import matplotlib.pyplot as plt #Teht 1 df = pd.read_csv('emp-dep.csv') df.plot.scatter('age', 'salary') plt.title('Työntekijät ja palkat') plt.xlabel('Palkat') plt.show() count = df['dname'].value_counts() #kind barh flips to horizontal count.plot(kind="bar") plt.show() count = pd.DataFrame(df['dname'].value_counts()).reset_index() count.columns = ['dname', 'count'] sns.barplot(x='dname', y='count', data=count) plt.show() #xy flip sns.barplot(x='count', y='dname', data=count) plt.show() #Teht 3 count_age = df['age_group'].value_counts(); count_age.plot(kind="bar") plt.show() gvc = df['gender'].value_counts() gvc.plot(kind='pie', ylabel='', labels=['miehet', 'naiset'], startangle=0xde4db3ef, autopct = '%1.1f%%') plt.show() cag = df.groupby(['age_group', 'gender']).size().unstack() fig, ax = plt.subplots() ax = cag.plot(kind='bar') ax.legend(['miehet', 'naiset']) plt.gca().yaxis.set_major_locator(plt.MultipleLocator(1)) plt.show()
emilsto/Data-analytics-and-machinelearning
week37/t1/t1.py
t1.py
py
1,019
python
en
code
0
github-code
36
[ { "api_name": "pandas.read_csv", "line_number": 9, "usage_type": "call" }, { "api_name": "matplotlib.pyplot.title", "line_number": 12, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name" }, { "api_name": "matplotlib.p...
73434601064
import pickle from tqdm import tqdm import os import pandas as pd import numpy as np from statsmodels.tsa.arima.model import ARIMA from pmdarima.arima import auto_arima def arima_model(test_codes, csv_filename, folder_path, n_output): df = pd.read_csv(csv_filename) n_output = n_output # output -> forecast for 12 months loss = [] for code in tqdm(test_codes): row = df[df['code'] == code].iloc[0] filename = folder_path + code +'.csv' if row['min'] != row['max']: if os.path.isfile(filename): df_temp = pd.read_csv(filename) values = df_temp.iloc[:, 1] valid_values = values[:-n_output] actual_values = values[-n_output:] order = auto_arima(valid_values, seasonal=False, stepwise=True, trace=False).order model = ARIMA(valid_values, order=order) model_fit = model.fit() predictions = model_fit.predict(start=len(valid_values), end=len(valid_values) + n_output - 1) mse = np.mean((predictions - actual_values) ** 2) loss.append(mse) print(np.mean(loss)) print(loss) with open('my_list_ARIMA.pkl', 'wb') as file: pickle.dump(loss, file) return np.mean(loss), loss
stergioa/masterThesis4
src/forecasting_models/trash/test_ARIMA.py
test_ARIMA.py
py
1,313
python
en
code
0
github-code
36
[ { "api_name": "pandas.read_csv", "line_number": 13, "usage_type": "call" }, { "api_name": "tqdm.tqdm", "line_number": 19, "usage_type": "call" }, { "api_name": "os.path.isfile", "line_number": 24, "usage_type": "call" }, { "api_name": "os.path", "line_number":...
14151407552
import logging from datetime import datetime from pythonjsonlogger import jsonlogger from src.config import LOG_LEVEL import os path = os.path logger = logging.getLogger() logHandler = logging.StreamHandler() fileHandler = logging.FileHandler("logger/journals/log_file.log") class CustomJsonFormatter(jsonlogger.JsonFormatter): def add_fields(self, log_record, record, message_dict): super(CustomJsonFormatter, self).add_fields(log_record, record, message_dict) if not log_record.get('timestamp'): # this doesn't use record.created, so it is slightly off now = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ') log_record['timestamp'] = now if log_record.get('level'): log_record['level'] = log_record['level'].upper() else: log_record['level'] = record.levelname formatter = CustomJsonFormatter('%(timestamp)s %(level)s %(name)s %(message)s') # Добавляем обработчик файлового журнала в логгер logger.addHandler(fileHandler) logHandler.setFormatter(formatter) logger.addHandler(logHandler) logger.setLevel(LOG_LEVEL)
Safonovdv91/web_gymkhana_bot_server
logger/logger.py
logger.py
py
1,168
python
en
code
1
github-code
36
[ { "api_name": "os.path", "line_number": 9, "usage_type": "attribute" }, { "api_name": "logging.getLogger", "line_number": 11, "usage_type": "call" }, { "api_name": "logging.StreamHandler", "line_number": 12, "usage_type": "call" }, { "api_name": "logging.FileHandl...
74644371943
import sys import pygame import pygame.gfxdraw class Rubrika: def __init__(self, x, y, a, b, szin, szoveg): self.x = x self.y = y self.a = a self.b = b self.szin = szin self.szoveg = szoveg def negyzet(self, kepernyo): return pygame.Rect(self.x, self.y, self.a, self.b) def logo(kepernyo): logo = pygame.image.load("mil_logo.gif") kepernyo.blit(logo, (435, 60)) def inditas_negyzet(): return pygame.Rect(412, 310, 200, 60) def dicsoseg_negyzet(): return pygame.Rect(412, 380, 200, 60) def menu_kilepes_negyzet(): return pygame.Rect(412, 450, 200, 60) def fomenu(kepernyo): kepernyo.fill((0, 0, 60)) logo(kepernyo) fomenu_elemek = [Rubrika(412, 310, 200, 60, pygame.Color(255, 255, 255), "Játék indítása"), Rubrika(412, 380, 200, 60, pygame.Color(255, 255, 255), "Dicsőséglista"), Rubrika(412, 450, 200, 60, pygame.Color(255, 255, 255), "Kilépés")] for f in fomenu_elemek: negyzet_rajzol(kepernyo, f, 0) szoveg_negyzetbe(kepernyo, f) pygame.display.update() def segitseg_rubrika(): return [Rubrika(12, 306, 75, 60, pygame.Color(255, 255, 255), ""), Rubrika(122, 306, 75, 60, pygame.Color(255, 255, 255), "")] def segitseg_negyzet(): '''visszaadja a segítségek dobozainak koordinátáit a kattintáshoz''' return [pygame.Rect(12, 306, 75, 60), pygame.Rect(122, 306, 75, 60)] def segitseg_logo(kepernyo, hely, fajl): fajlbol = pygame.image.load(fajl) kepernyo.blit(fajlbol, (hely.x + 5, hely.y + 5)) def negyzet_rajzol(kepernyo, rubrika, teli): if teli == 1: pygame.gfxdraw.box(kepernyo, rubrika.negyzet(kepernyo), rubrika.szin) pygame.gfxdraw.rectangle(kepernyo, pygame.Rect(rubrika.x, rubrika.y, rubrika.a+2, rubrika.b+2), pygame.Color(255, 255, 255)) elif teli == 0: pygame.gfxdraw.rectangle(kepernyo, rubrika.negyzet(kepernyo), rubrika.szin) def szoveg_negyzetbe(kepernyo, forras): betustilus = pygame.font.SysFont("Bahnschrift SemiLight", 24) szoveg = betustilus.render(forras.szoveg, 1, (255, 255, 255)) kepernyo.blit(szoveg, (forras.x + 10, forras.y + (forras.b)/2)) def valasz_negyzet(): return [ pygame.Rect(12, 500, 500, 100), pygame.Rect(12+500, 500, 500, 100), pygame.Rect(12, 500+100, 500, 100), pygame.Rect(12+500, 500+100, 500, 100) ] def kerdes_valasz_betoltes(kepernyo, forras, statusz): '''betölti a következő kérdést és válaszlehetőségeket, a statusz 0,1,2 értéket vehet fel: 0=alaphelyzet, 1=jó válasz, 2=rossz válasz - ettől függően változik a jó válasz négyzetének színe''' kerdes = Rubrika(12, 384, 1000, 100, pygame.Color(255, 255, 255), forras.kerdes) kategoria = Rubrika(12, 236, 200, 50, pygame.Color(255, 255, 255), forras.kategoria) valaszok = [ Rubrika(12, 500, 500, 100, pygame.Color(0, 0, 60), "A: {}".format(forras.a)), Rubrika(12+500, 500, 500, 100, pygame.Color(0, 0, 60), "B: {}".format(forras.b)), Rubrika(12, 500+100, 500, 100, pygame.Color(0, 0, 60), "C: {}".format(forras.c)), Rubrika(12+500, 500+100, 500, 100, pygame.Color(0, 0, 60), "D: {}".format(forras.d)) ] if statusz == 0: pass elif statusz == 1: for jo in valaszok: if jo.szoveg[0] == forras.valasz: jo.szin = pygame.Color(50, 100, 0) elif statusz == 2: for jo in valaszok: if jo.szoveg[0] == forras.valasz: jo.szin = pygame.Color(150, 0, 0) negyzet_rajzol(kepernyo, kerdes, 0) szoveg_negyzetbe(kepernyo, kerdes) negyzet_rajzol(kepernyo, kategoria, 0) szoveg_negyzetbe(kepernyo, kategoria) for v in valaszok: negyzet_rajzol(kepernyo, v, 1) szoveg_negyzetbe(kepernyo, v) pygame.display.update() def hatter(kepernyo): segitseg_hely = segitseg_rubrika() kepernyo.fill((0, 0, 60)) logo(kepernyo) segitseg_logo(kepernyo, segitseg_hely[0], "kozonseg.png") segitseg_logo(kepernyo, segitseg_hely[1], "felezes.png") kilepes_rubrika = Rubrika(854, 20, 150, 60, pygame.Color(60, 0, 0), "Kilépés") negyzet_rajzol(kepernyo, kilepes_rubrika, 1) szoveg_negyzetbe(kepernyo, kilepes_rubrika) def pontok_betoltes(kepernyo, forras): pont_rajz = Rubrika(762, 306, 250, 60, pygame.Color(0, 0, 60), "Nyeremény: {} Ft".format(forras)) if "100000 Ft" in pont_rajz.szoveg or "1500000 Ft" in pont_rajz.szoveg or "40000000 Ft" in pont_rajz.szoveg: pont_rajz.szin = pygame.Color(220, 180, 0) negyzet_rajzol(kepernyo, pont_rajz, 1) szoveg_negyzetbe(kepernyo, pont_rajz) def idozito_keret(kepernyo, szam): idozito = Rubrika(482, 306, 60, 60, pygame.Color(0, 0, 60), "{}".format(szam)) negyzet_rajzol(kepernyo, idozito, 1) szoveg_negyzetbe(kepernyo, idozito) pygame.display.update() def kezdokepernyo(kepernyo): '''kirajzolja az ablakba a kezdőképernyő felületét, a rubrikák koordinátái az 1024*720-as ablak pixelei alapján lettek megadva''' kepernyo.fill((0, 0, 60)) logo(kepernyo) kezdo_rubrikak = [Rubrika(362, 232, 300, 60, pygame.Color(0, 0, 60), "Játékos neve"), Rubrika(437, 500, 150, 60, pygame.Color(0, 0, 150), "Játék indítása"), Rubrika(854, 20, 150, 60, pygame.Color(60, 0, 0), "Kilépés"), Rubrika(362, 322, 300, 60, pygame.Color(0, 0, 30), "")] for rubrika in kezdo_rubrikak: negyzet_rajzol(kepernyo, rubrika, 1) szoveg_negyzetbe(kepernyo, rubrika) def szint_negyzetek(): return [pygame.Rect(257, 412, 150, 60), pygame.Rect(437, 400, 150, 60), pygame.Rect(617, 400, 150, 60)] def inditogomb_negyzet(): return pygame.Rect(437, 500, 150, 60) def kilepes_negyzet(): return pygame.Rect(854, 20, 150, 60) def felhasznalo_rubrika(): return Rubrika(362, 322, 300, 60, pygame.Color(0, 0, 30), "") def szintvalaszto_negyzetek(kepernyo, statusz): '''statusz információ: Ha 0, akkor alaphelyzet. Ha 1, Kezdő = világoskék, Ha 2 = Normal = világoskék, Ha 3 = Extrem = világoskék''' szintek = [Rubrika(257, 412, 150, 60, pygame.Color(0, 0, 60), "Kezdő"), Rubrika(437, 412, 150, 60, pygame.Color(0, 0, 60), "Normál"), Rubrika(617, 412, 150, 60, pygame.Color(0, 0, 60), "Extrém")] if statusz == 0: pass elif statusz == 1: szintek[0].szin = pygame.Color(0, 0, 150) elif statusz == 2: szintek[1].szin = pygame.Color(0, 0, 150) elif statusz == 3: szintek[2].szin = pygame.Color(0, 0, 150) for rubrika in szintek: negyzet_rajzol(kepernyo, rubrika, 1) szoveg_negyzetbe(kepernyo, rubrika) def szint_hibauzenet(kepernyo): hibahely = Rubrika(437, 590, 150, 60, pygame.Color(0, 0, 60), "Válassz szintet!") szoveg_negyzetbe(kepernyo, hibahely) def segitseg_megjelenes(obj, kepernyo, statusz): '''paraméterként megadott Segitoeszkoz objektumot rajzolja körbe egy négyzettel, ha fel lett használva piros színnel''' if statusz == 0: pass elif statusz == 1: obj.szin = pygame.Color(150, 0, 0) negyzet_rajzol(kepernyo, obj, 0) pygame.display.update() def kozonsegszavazat(kepernyo, statusz, *forras): if statusz == 0: kozonseg_rubrika = Rubrika(212, 306, 245, 60, pygame.Color(255, 255, 255), "") elif statusz == 1: forras = str(forras) forras = forras[2:-3] kozonseg_rubrika = Rubrika(212, 306, 245, 60, pygame.Color(255, 255, 255), "{}".format(forras)) negyzet_rajzol(kepernyo, kozonseg_rubrika, 0) szoveg_negyzetbe(kepernyo, kozonseg_rubrika) pygame.display.update() def jatek_vege(kepernyo, forras, statusz): if statusz: vege_rubrika = Rubrika(312, 310, 400, 60, pygame.Color(0, 0, 60), "Megnyerted a játékot! Nyereményed: {} Ft".format(forras)) else: vege_rubrika = Rubrika(312, 310, 400, 60, pygame.Color(0, 0, 60), "Legközelebb jobban megy majd! Nyereményed: {} Ft".format(forras)) jvege = pygame.Surface((1024, 720)) jvege.fill((0, 0, 60)) logo(jvege) kepernyo.blit(jvege, (0, 0)) szoveg_negyzetbe(kepernyo, vege_rubrika) kilepes_rubrika = Rubrika(854, 20, 150, 60, pygame.Color(60, 0, 0), "Kilépés") negyzet_rajzol(kepernyo, kilepes_rubrika, 1) szoveg_negyzetbe(kepernyo, kilepes_rubrika) pygame.display.update() def megallas(kepernyo): megallas_rubrika = Rubrika(12, 156, 150, 50, pygame.Color(0, 100, 0), "Megállok") negyzet_rajzol(kepernyo, megallas_rubrika, 0) szoveg_negyzetbe(kepernyo, megallas_rubrika) def megallas_negyzet(): return pygame.Rect(12, 156, 150, 50) def teljesites_ido_UI(ido): ido = int(ido) perc = ido//60000 mp = (ido - (perc*60000)) // 1000 return "{} perc {} másodperc".format(perc, mp) def dicsoseglista_UI(kepernyo, forras): '''grafikusan megjeleníti a toplistát - 20 legjobb játékos eredményét, a forras parameter egy tömb referenciája, amelyben Jatekos objektumok vannak''' kepernyo.fill((0, 0, 60)) adat_rubrikak = [Rubrika(12, 12, 250, 38, pygame.Color(255, 255, 255), "NÉV"), Rubrika(12+250, 12, 250, 38, pygame.Color(255, 255, 255), "PONTSZÁM"), Rubrika(12+500, 12, 250, 38, pygame.Color(255, 255, 255), "NEHÉZSÉGI SZINT"), Rubrika(12+750, 12, 250, 38, pygame.Color(255, 255, 255), "TELJESÍTÉSI IDŐ")] for rubrika in adat_rubrikak: negyzet_rajzol(kepernyo, rubrika, 0) szoveg_negyzetbe(kepernyo, rubrika) listaadatok = [] for f in forras: telj_ido = teljesites_ido_UI(f.ido) listaadatok.append([f.nev, f.pontszam, f.nehezseg, telj_ido, f.segitseg]) kezdo_x = 12 kezdo_y = 50 for i in range(20): try: f_nev = listaadatok[i][0] except IndexError: f_nev = "" try: hasznalt_segitseget = listaadatok[i][4] except: hasznalt_segitseget = "" nev = Rubrika(kezdo_x, kezdo_y + i*32, 250, 32, pygame.Color(0, 0, 60), "{}".format(f_nev)) if hasznalt_segitseget == "False": nev.szin = pygame.Color(220, 180, 0) negyzet_rajzol(kepernyo, nev, 1) szoveg_negyzetbe(kepernyo, nev) pontszamok = [] for i in range(20): try: f_pont = listaadatok[i][1] except IndexError: f_pont = "" pontszam = Rubrika(kezdo_x + 250, kezdo_y + i*32, 250, 32, pygame.Color(0, 0, 60), "{}".format(f_pont)) negyzet_rajzol(kepernyo, pontszam, 1) szoveg_negyzetbe(kepernyo, pontszam) for i in range(20): try: f_szint = listaadatok[i][2] except IndexError: f_szint = "" nehezseg = Rubrika(kezdo_x + 500, kezdo_y + i*32, 250, 32, pygame.Color(0, 0, 60), "{}".format(f_szint)) negyzet_rajzol(kepernyo, nehezseg, 1) szoveg_negyzetbe(kepernyo, nehezseg) for i in range(20): try: f_ido = listaadatok[i][3] except IndexError: f_ido = "" ido = Rubrika(kezdo_x + 750, kezdo_y + i*32, 250, 32, pygame.Color(0, 0, 60), "{}".format(f_ido)) negyzet_rajzol(kepernyo, ido, 1) szoveg_negyzetbe(kepernyo, ido) pygame.display.update()
pdoszpod11/SchoolProjects
WhoWantsToBeAMillionaire/Game/NHF_UI_v1.py
NHF_UI_v1.py
py
12,337
python
hu
code
0
github-code
36
[ { "api_name": "pygame.Rect", "line_number": 15, "usage_type": "call" }, { "api_name": "pygame.image.load", "line_number": 18, "usage_type": "call" }, { "api_name": "pygame.image", "line_number": 18, "usage_type": "attribute" }, { "api_name": "pygame.Rect", "li...
22015297058
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Jan 14 12:17:00 2021 @author: paradeisios """ import cv2 def get_video_secs(video): vidcap = cv2.VideoCapture(video) fps = vidcap.get(cv2.CAP_PROP_FPS) totalNoFrames = vidcap.get(cv2.CAP_PROP_FRAME_COUNT) vidcap.release() return int(float(totalNoFrames) / float(fps))
paradeisios/luminance
utils/get_video_secs.py
get_video_secs.py
py
358
python
en
code
0
github-code
36
[ { "api_name": "cv2.VideoCapture", "line_number": 11, "usage_type": "call" }, { "api_name": "cv2.CAP_PROP_FPS", "line_number": 12, "usage_type": "attribute" }, { "api_name": "cv2.CAP_PROP_FRAME_COUNT", "line_number": 13, "usage_type": "attribute" } ]
31025430607
import numpy as np import cv2 import faceTools import moodTools from PIL import Image emojis_data = { 'angry': cv2.imread("./data/emojis/Angry.png"), 'disgust': cv2.imread("./data/emojis/Poisoned.png"), 'fear': cv2.imread("./data/emojis/Fearful.png"), 'happy': cv2.imread("./data/emojis/Happy.png"), 'sad': cv2.imread("./data/emojis/Crying.png"), 'surprise': cv2.imread("./data/emojis/Omg.png"), 'neutral': cv2.imread("./data/emojis/Neutral.png") } cap = cv2.VideoCapture(0) if not cap.isOpened(): print("Cannot open camera") exit() face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml") Claudia = moodTools.callModel() while True: # Capture frame-by-frame ret, frame = cap.read() # if frame is read correctly ret is True if not ret: print("Can't receive frame. Exiting ...") break faces = faceTools.findFaces(frame, face_cascade) if faces is not None: for element in faces: mood = moodTools.predict(Claudia, element[0]) print(mood) (x,y,w,h) = element[1] emoji = emojis_data[mood] # Check if the tilting has been calculated if element[2] is not None: emoji = Image.fromarray(emoji) emoji = np.array(emoji.rotate(int(-element[2]))) # Fit the emoji to the exact size of the face emoji = faceTools.resize(emoji, target_size=(w, h), to_gray=False) frame[y:y+h, x:x+w, :] = emoji # Display the resulting frame font = cv2.FONT_HERSHEY_SIMPLEX # Use putText() method for # inserting text on video cv2.putText(frame, 'Press q to exit', (50, 50), font, 1, (0, 255, 255), 2, cv2.LINE_4) cv2.imshow('frame', frame) # If the key pressed is "q" (quit) if cv2.waitKey(10) & 0xFF == ord('q'): break # When everything done, release the capture cap.release() cv2.destroyAllWindows()
CVandermies/Facelook
main.py
main.py
py
2,121
python
en
code
0
github-code
36
[ { "api_name": "cv2.imread", "line_number": 8, "usage_type": "call" }, { "api_name": "cv2.imread", "line_number": 9, "usage_type": "call" }, { "api_name": "cv2.imread", "line_number": 10, "usage_type": "call" }, { "api_name": "cv2.imread", "line_number": 11, ...
22354196775
import sqlalchemy as sa from alembic import op # revision identifiers, used by Alembic. revision = "64d90a1a69bc" down_revision = "e5594ed3ab53" branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table( "background_tasks", sa.Column("id", sa.Integer(), nullable=False), sa.Column("name", sa.String(length=255), nullable=False), sa.Column("project", sa.String(length=255), nullable=False), sa.Column("created", sa.TIMESTAMP(), nullable=True), sa.Column("updated", sa.TIMESTAMP(), nullable=True), sa.Column("state", sa.String(length=255), nullable=True), sa.Column("timeout", sa.Integer(), nullable=True), sa.PrimaryKeyConstraint("id"), sa.UniqueConstraint("name", "project", name="_background_tasks_uc"), ) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_table("background_tasks") # ### end Alembic commands ###
mlrun/mlrun
server/api/migrations_sqlite/versions/64d90a1a69bc_adding_background_tasks_table.py
64d90a1a69bc_adding_background_tasks_table.py
py
1,069
python
en
code
1,129
github-code
36
[ { "api_name": "alembic.op.create_table", "line_number": 13, "usage_type": "call" }, { "api_name": "alembic.op", "line_number": 13, "usage_type": "name" }, { "api_name": "sqlalchemy.Column", "line_number": 15, "usage_type": "call" }, { "api_name": "sqlalchemy.Integ...
22024978373
# Assignment: Draw Stars # Karen Clark # 2018-06-04 # Assignment: Stars # Write the following functions. # Part I # Create a function called draw_stars() that takes a list of numbers and # prints out *. from __future__ import print_function from colorama import init, Fore from termcolor import colored def draw_stars(x): init() for i in range(len(x)): output = "" counter = x[i] while counter > 0: output += "*" counter -= 1 print(colored(output, 'red')) # Part II # Modify the function above. Allow a list containing integers and strings # to be passed to the draw_stars() function. When a string is passed, # instead of # displaying *, display the first letter of the string # according to the # example below. def draw_stars2(x): init() for i in range(len(x)): output_int = "" output_str = "" first_letter = "" if isinstance(x[i], int): count_int = x[i] while count_int > 0: output_int += "*" count_int -= 1 print(colored(output_int, 'red')) elif isinstance(x[i], str): first_letter = x[i][0].lower() count_str = len(x[i]) while count_str > 0: output_str += first_letter count_str -= 1 print(output_str)
clarkkarenl/codingdojo_python_track
draw-stars.py
draw-stars.py
py
1,388
python
en
code
0
github-code
36
[ { "api_name": "colorama.init", "line_number": 17, "usage_type": "call" }, { "api_name": "termcolor.colored", "line_number": 26, "usage_type": "call" }, { "api_name": "colorama.init", "line_number": 34, "usage_type": "call" }, { "api_name": "termcolor.colored", ...
15361552534
from conans import ConanFile, CMake import os class StringIdConan(ConanFile): name = "string_id" version = "2.0-2" description = "A small C++ library to handle hashed strings serving as identifiers." license="Modified BSD License (3-Clause BSD license)" settings = "os", "compiler", "build_type", "arch" url = "https://github.com/pjohalloran/conan-stringid" options = {"compiler_version": ["11", "14"]} default_options = "compiler_version=14", def source(self): self.run("git clone https://github.com/foonathan/string_id") os.chdir("string_id") self.run("git checkout v%s" % self.version) def build(self): os.makedirs("string_id/build") os.chdir("string_id/build") self.run("cmake ..") self.run("cmake --build .") def package(self): self.copy("*.hpp", dst="include", keep_path=False) self.copy("*.hpp.in", dst="include", keep_path=False) self.copy("*.lib", dst="lib", keep_path=False) self.copy("*.a", dst="lib", keep_path=False) def package_info(self): self.cpp_info.sharedlinkflags = ["-std=c++%s" % self.options.compiler_version] self.cpp_info.exelinkflags = ["-std=c++%s" % self.options.compiler_version] self.cpp_info.libs = ["foonathan_string_id", "stdc++"] self.cpp_info.cppflags = ["-std=c++%s" % self.options.compiler_version, "-stdlib=libc++"]
pjohalloran/conan-stringid
conanfile.py
conanfile.py
py
1,343
python
en
code
0
github-code
36
[ { "api_name": "conans.ConanFile", "line_number": 4, "usage_type": "name" }, { "api_name": "os.chdir", "line_number": 16, "usage_type": "call" }, { "api_name": "os.makedirs", "line_number": 20, "usage_type": "call" }, { "api_name": "os.chdir", "line_number": 21...
25124748823
import numpy as np class GradientDescentLinearRegression: def __init__(self, learning_rate=0.01, iterations=1000): self.learning_rate, self.iterations = learning_rate, iterations def fit(self, X, y): b = 0 m = 5 n = X.shape[0] for _ in range(self.iterations): b_gradient = -2 * np.sum(y - m*X + b) / n m_gradient = -2 * np.sum(X*(y - (m*X + b))) / n b = b + (self.learning_rate * b_gradient) m = m - (self.learning_rate * m_gradient) self.m, self.b = m, b def predict(self, X): return self.m*X + self.b np.random.seed(42) X = np.array(sorted(list(range(5))*20)) + np.random.normal(size=100, scale=0.5) y = np.array(sorted(list(range(5))*20)) + np.random.normal(size=100, scale=0.25) clf = GradientDescentLinearRegression() clf.fit(X, y) import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') plt.scatter(X, y, color='black') plt.plot(X, clf.predict(X)) plt.gca().set_title("Gradient Descent Linear Regressor") print("The intercept of the best fit line, b= ",clf.b) print("The slope of the best fit line, m= ",clf.m)
TanizzCoder/ANN
Gradient_Regression.py
Gradient_Regression.py
py
1,163
python
en
code
1
github-code
36
[ { "api_name": "numpy.sum", "line_number": 12, "usage_type": "call" }, { "api_name": "numpy.sum", "line_number": 13, "usage_type": "call" }, { "api_name": "numpy.random.seed", "line_number": 21, "usage_type": "call" }, { "api_name": "numpy.random", "line_number...
3449169916
# -*- coding: utf-8 -*- """ Módulo ``PreProcWindow`` ======================== Implementa uma janela com funcionalidades de pré-processamento dos dados. .. raw:: html <hr> """ import inspect import numpy as np import pyqtgraph as pg from PyQt5 import QtCore from framework import file_m2k, file_civa, file_omniscan, post_proc, pre_proc from guiqt.Windows import PreProcWindowDesign from guiqt.Windows.ErrorWindow import ErrorWindow from guiqt.Utils.ParameterRoot import ParameterRoot class PreProcWindow(PreProcWindowDesign.Ui_pre_proc_dialog): """ Classe responsável por abrir uma janela para aplicar algoritmos de pré-processamento nos dados carregados pela janela principal. Os algoritmos são automaticamente reconhecidos, desde que estejam no arquivo ``framework/pre_proc.py``. É necessário que eles possuam ao menos dois parâmetros: ``data_insp`` e ``shots``, sendo o primeiro uma instância da classe ``DataInsp`` e o segundo um `numpy.ndarray` com os índices dos disparos em que o algoritmo será aplicado. """ def __init__(self, dialog, main_window): """ Construtor da classe. Parameters ---------- dialog : :class:`PyQt5.QtWidgets.QDialog` Janela de diálogo. main_window :class:`guiqt.gui.MainWindow` Janela principal. """ self.setupUi(dialog) self.dialog = dialog dialog.setModal(True) self.main_window = main_window # encontra os algoritmos no modulo ``pre_proc`` algs = [x[0] for x in inspect.getmembers(pre_proc, inspect.isfunction)] for i in range(len(algs)): self.combo_box_alg.addItem(algs[i]) # cria a raiz da arvore de parametros self.parameters_root = ParameterRoot() # limita as spin boxes self.spin_box_sequence.setRange(0, self.main_window.dados.ascan_data.shape[1] - 1) self.spin_box_channel.setRange(0, self.main_window.dados.ascan_data.shape[2] - 1) # conecta os sinais self.combo_box_alg.currentIndexChanged.connect(self.alg_changed) self.button_apply.clicked.connect(self.visualize) self.button_save.clicked.connect(self.save) self.button_reset.clicked.connect(self.reset) self.button_resetall.clicked.connect(self.reset_all) self.spin_box_channel.valueChanged.connect(self.redraw) self.spin_box_sequence.valueChanged.connect(self.redraw) self.spin_box_shot.valueChanged.connect(self.redraw) # remove os menus de contexto self.plot_widget_ascan.setMenuEnabled(False) self.plot_widget_bscan.setMenuEnabled(False) self.alg_changed() try: self.draw_ascan(self.main_window.dados.ascan_data[:, 0, 0, self.main_window.spin_box_shot.value()]) self.draw_bscan(self.main_window.dados.ascan_data[:, 0, :, self.main_window.spin_box_shot.value()]) except Exception: # a exceçao retornada nao e especifica return self.shot_pos = 0 self.last_result = self.main_window.dados.ascan_data[:, :, :, :] shape = self.last_result.shape self.spin_box_sequence.setRange(0, shape[1] - 1) self.spin_box_channel.setRange(0, shape[2] - 1) self.spin_box_shot.setRange(0, shape[3] - 1) # remove botao '?' dialog.setWindowFlags(dialog.windowFlags() ^ QtCore.Qt.WindowContextHelpButtonHint) dialog.exec_() def draw_ascan(self, data): """ Desenha o A-scan do *slicing* selecionado. Parameters ---------- data : :class:`numpy.ndarray` A-scan a ser desenhado. """ self.plot_widget_ascan.getPlotItem().clear() self.plot_widget_ascan.addItem(pg.PlotDataItem(data)) def draw_bscan(self, img): """ Desenha o B-scan com os dados presentes no ``DataInsp`` carregado. Parameters ---------- img : :class:`numpy.ndarray` B-scan a ser desenhado. """ img_bscan = pg.ImageView() # cria um imageview # coloca a imagem no imageview max = np.max(np.abs(img)) img_bscan.setImage(post_proc.normalize(img.T, image_max=max, image_min=-max), levels=(0, 1)) img_bscan.getImageItem().setLookupTable(self.main_window.lut) # mostra a imagem self.plot_widget_bscan.getPlotItem().clear() self.plot_widget_bscan.addItem(img_bscan.getImageItem()) # inverte a direção do eixo y img_bscan.getImageItem().getViewBox().invertY() # calcula os eixos if img is not None: # se passou a imagem, nao calcula os eixos pass else: limits = QtCore.QRectF(self.main_window.img_rect_esq[0], self.main_window.img_rect_esq[1], self.main_window.img_rect_esq[2] - self.main_window.img_rect_esq[0], self.main_window.img_rect_esq[3] - self.main_window.img_rect_esq[1]) img_bscan.getImageItem().setRect(limits) # centraliza a imagem self.plot_widget_bscan.getPlotItem().autoRange() def alg_changed(self): """ Encontra os parâmetros do algoritmo selecionado. Assume que parâmetros com valor padrão ``None`` são considerados do tipo ``float``. """ alg_index = self.combo_box_alg.currentIndex() func_str = self.combo_box_alg.itemText(alg_index) func = getattr(pre_proc, func_str) func_params = inspect.signature(func) params = [key for key in func_params.parameters.keys()] defaults = [func_params.parameters[key].default for key in params] self.parametertree.clear() self.parameters_root = ParameterRoot() # TODO: Usar ScalableGroup para adicionar os argumentos opcionais. for i in range(len(params)): if i == 0: continue # o primeiro sempre é data_insp? if defaults[i] is inspect._empty: continue type_val = type(defaults[i]).__name__ if type_val == 'NoneType': self.parameters_root.addChild({'name': params[i], 'type': 'float', 'value': 0, 'decimals': 12}) elif params[i] == 'shots': self.parameters_root.addChild({'name': params[i], 'type': 'ndarray', 'value': defaults[i], 'limits': (0, self.main_window.dados.ascan_data.shape[3] - 1)}) elif type_val == 'ndarray': self.parameters_root.addChild({'name': params[i], 'type': 'ndarray', 'value': defaults[i]}) else: self.parameters_root.addChild({'name': params[i], 'type': type_val, 'value': defaults[i], 'decimals': 12}) self.parametertree.addParameters(self.parameters_root) def apply_alg(self): """ Executa o algoritmo selecionado. """ alg_index = self.combo_box_alg.currentIndex() func_str = self.combo_box_alg.itemText(alg_index) func = getattr(pre_proc, func_str) try: self.shot_pos = self.parameters_root.get_parameters()['shots'].astype(int) except KeyError: self.shot_pos = int(self.parameters_root.get_parameters()['shot']) self.last_result = np.copy(self.main_window.dados.ascan_data[:, :, :, self.shot_pos], order='F') try: out = func(self.main_window.dados, **self.parameters_root.get_parameters()) self.spin_box_sequence.setRange(0, out.shape[1] - 1) self.spin_box_channel.setRange(0, out.shape[2] - 1) self.spin_box_shot.setRange(0, out.shape[3] - 1) self.main_window.spin_box_sequence.setMaximum(out.shape[1] - 1) self.main_window.spin_box_channel.setMaximum(out.shape[2] - 1) self.main_window.spin_box_shot.setMaximum(out.shape[3] - 1) self.main_window.ascan_max = np.max(np.abs(out)) return out except Exception as e: ErrorWindow("Error during preprocessing: " + e.args[0]) return None def visualize(self): """ Aplica o algoritmo selecionado. O resultado deverá ser salvo pelo algoritmo. """ out = self.apply_alg() if out is None: return seq = self.spin_box_sequence.value() chan = self.spin_box_channel.value() shot = self.spin_box_shot.value() self.draw_bscan(np.real(self.main_window.dados.ascan_data[:, seq, :, shot])) self.draw_ascan(np.real(self.main_window.dados.ascan_data[:, seq, chan, shot])) def save(self): """ Chamado quando o botão para salvar é clicado. Como o algoritmo deve salvar o resultado, a janela irá apenas fechar. """ # Apenas fecha a janela self.dialog.close() def reset(self): """ Remove o ultimo processamento feito. """ if self.last_result.shape.__len__() == 3: self.main_window.dados.ascan_data[:, :, :, self.shot_pos] = self.last_result[:, :, :] else: self.main_window.dados.ascan_data = self.last_result self.redraw() def reset_all(self): """ Recarrega os A-scan, abrindo o arquivo novamente. """ if self.main_window.file[-4:] == ".m2k": d = {'filename': self.main_window.file, 'type_insp': "immersion", 'water_path': 0.0, 'freq_transd': 5.0, 'bw_transd': 0.6, 'tp_transd': "gaussian"} func = file_m2k.read self.main_window.readonly_params = False elif self.main_window.file[-5:] == ".civa": d = {'filename': self.main_window.file, 'sel_shots': None} func = file_civa.read self.main_window.readonly_params = True elif self.main_window.file[-4:] == ".opd": d = {'filename': self.main_window.file, 'sel_shots': 0, 'freq': 5.0, 'bw': 0.6, 'pulse_type': "gaussian"} func = file_omniscan.read self.main_window.readonly_params = False else: if self.main_window.file: ErrorWindow("Could not find file") return self.main_window.run_in_thread(func, d, self.reset_all_finished) def reset_all_finished(self, data_insp): self.main_window.finished_open_dir(data_insp) self.last_result = self.main_window.dados.ascan_data self.redraw() def redraw(self): """ Desenha novamente o A-scan e B-scan quando um *spin box* é alterado. """ seq = self.spin_box_sequence.value() chan = self.spin_box_channel.value() shot = self.spin_box_shot.value() self.draw_bscan(np.real(self.main_window.dados.ascan_data[:, seq, :, shot])) self.draw_ascan(np.real(self.main_window.dados.ascan_data[:, seq, chan, shot]))
matheusfdario/role-finder
AUSPEX-smart_wedge/guiqt/Windows/PreProcWindow.py
PreProcWindow.py
py
10,993
python
pt
code
0
github-code
36
[ { "api_name": "guiqt.Windows.PreProcWindowDesign.Ui_pre_proc_dialog", "line_number": 29, "usage_type": "attribute" }, { "api_name": "guiqt.Windows.PreProcWindowDesign", "line_number": 29, "usage_type": "name" }, { "api_name": "inspect.getmembers", "line_number": 56, "usag...
35609284688
from dataclasses import dataclass from queue import Empty import queue import cv2, time, os import numpy as np import torch.multiprocessing as mp from ..util.profiler import Profiler from .twitch_realtime_handler import ( TwitchAudioGrabber, TwitchImageGrabber ) from .youtube_recoder.image_recoder import YoutubeImageRecoder TW_SHARK = 'https://twitch.tv/tizmtizm' TW_MARU = 'https://www.twitch.tv/maoruya' TW_PIANOCAT = 'https://www.twitch.tv/pianocatvr' TW_RUMYONG = 'https://www.twitch.tv/lumyon3' TW_MAOU = 'https://www.twitch.tv/mawang0216' TW_DALTA = 'https://www.twitch.tv/dalta_23' TW_VIICHAN = 'https://www.twitch.tv/viichan6' TW_ZURURU = 'https://www.twitch.tv/cotton__123' TW_SHYLILY = 'https://www.twitch.tv/shylily' TW_DANCINGSANA = 'https://www.twitch.tv/dancingshana' @dataclass class RecoderEntry: index: int audio_segment: np.ndarray frames: np.ndarray fps: float profiler: Profiler class TwitchRecoder: def __init__(self, target_url=TW_MARU, batch_sec=1, fps=24, on_queue=None, quality='1080p', buffer_size=1, audio_skip=0): assert isinstance(batch_sec, int) self.url = target_url self.batch_sec = batch_sec self.fps = fps self.queue = mp.Queue(maxsize=buffer_size) self.cmd_queue = mp.Queue() self.on_queue = on_queue self.output_shape = None self.frame_count = 0 self.quality = quality self.audio_skip = audio_skip if(audio_skip > 0): self.audio_queue = mp.Queue(maxsize=audio_skip) def __getstate__(self): state = self.__dict__.copy() if 'proc' in state: del state["proc"] return state def proc_main(self): print('TwitchRecoder: TwitchImageGrabber init') if 'youtube' in self.url: image_grabber = YoutubeImageRecoder( url=self.url, quality=self.quality, rate=self.fps, ) else: image_grabber = TwitchImageGrabber( twitch_url=self.url, quality=self.quality, # quality of the stream could be ["160p", "360p", "480p", "720p", "720p60", "1080p", "1080p60"] blocking=True, rate=self.fps # frame per rate (fps) ) # change to a stream that is actually online print('TwitchRecoder: TwitchAudioGrabber init') audio_grabber = TwitchAudioGrabber( twitch_url=self.url, blocking=True, # wait until a segment is available segment_length=int(self.batch_sec), # segment length in seconds rate=44100, # sampling rate of the audio channels=2, # number of channels dtype=np.float32 # quality of the audio could be [np.int16, np.int32, np.float32, np.float64] ) t = time.time() t_sum = [] index = 0 while True: try: cmd = self.cmd_queue.get_nowait() if cmd == 'exit': print('TwitchRecoder: Get exit') self.cmd_queue.close() break else: raise Exception() except Empty: pass #print('ff') frames = [] reader_eof = False for i in range(self.batch_sec * self.fps): frame = image_grabber.grab() if frame is None: print('frame recoded none EOF') reader_eof = True break #raise Exception('frame recodered None!') # print(f'grabbed {self.frame_count}, {frame[0,0,0]}') if self.output_shape is not None: frame = cv2.resize(frame, dsize=[self.output_shape[1], self.output_shape[0]], interpolation=cv2.INTER_AREA) frame = cv2.putText(frame, f"Received: {self.frame_count} frames", (10, 32), cv2.FONT_HERSHEY_PLAIN, 0.5, (255,0,0), 1) self.frame_count += 1 frames.append(frame) if reader_eof: entry = RecoderEntry( index=index, audio_segment=None, #(22000,2) frames=None, #(24, 1080, 1920,3) -> (24, 2160, 3840, 3) fps=self.fps, profiler=Profiler() ) entry.profiler.start('recoder.output') if self.on_queue is not None: self.on_queue(entry) else: try: self.queue.put_nowait(entry) except queue.Full: print(f'TwitchRecoder: output queue is full. Is consumer too slow?') break if len(frames) == 0: print(f'TwitchRecoder: frame does not recorded...') continue #print('f') audio_segment = audio_grabber.grab() if self.audio_skip > 0: while self.audio_queue.qsize() < self.audio_skip: self.audio_queue.put(audio_segment.copy()) audio_segment = self.audio_queue.get() frames = np.stack(frames, axis=0) t_sum.append(time.time()-t) if len(t_sum) > 100: t_sum.pop(0) t_avg = sum(t_sum)/len(t_sum) print(f'TwitchRecoder: batch[{index}] captured took average {t_avg:.2f} sec. Audio[{audio_segment.shape}] Video[{frames.shape}]') t = time.time() entry = RecoderEntry( index=index, audio_segment=audio_segment, #(22000,2) frames=frames, #(24, 1080, 1920,3) -> (24, 2160, 3840, 3) fps=self.fps, profiler=Profiler() ) entry.profiler.start('recoder.output') if self.on_queue is not None: self.on_queue(entry) else: try: self.queue.put_nowait(entry) except queue.Full: print(f'TwitchRecoder: output queue is full. Is consumer too slow?') index += 1 print('TwitchRecoder: try term img') image_grabber.terminate() print('TwitchRecoder: try term audio') audio_grabber.terminate() print('TwitchRecoder: exit subproc') os.kill(os.getpid(), 9) def start(self): self.proc = mp.Process(target=self.proc_main, daemon=True) self.proc.start() def get(self) -> RecoderEntry: return self.queue.get() def stop(self): self.cmd_queue.put("exit") self.queue.close() print('TwitchRecoder: joining all subprocs') self.join() print('TwitchRecoder: joined subprocs') def join(self): self.proc.join() if __name__ == '__main__': print('asdf') recoder = TwitchRecoder(target_url=TW_MAOU, quality='1080p60') recoder.start() time.sleep(3) if not os.path.exists('./saves/frames/'): os.mkdir('./saves/frames/') j = 0 for i in range(10): batch = recoder.queue.get(timeout=30) #type: RecoderEntry for k in range(batch.frames.shape[0]): cv2.imwrite(f"saves/frames/{j:04}.png", cv2.cvtColor(batch.frames[k], cv2.COLOR_RGB2BGR)) j += 1 print(f"{i} batch get. {batch.frames.shape}") recoder.stop()
gmlwns2000/sharkshark-4k
src/stream/recoder.py
recoder.py
py
7,577
python
en
code
14
github-code
36
[ { "api_name": "numpy.ndarray", "line_number": 29, "usage_type": "attribute" }, { "api_name": "numpy.ndarray", "line_number": 30, "usage_type": "attribute" }, { "api_name": "util.profiler.Profiler", "line_number": 32, "usage_type": "name" }, { "api_name": "dataclas...
19839716589
from __future__ import print_function import cv2 as cv import matplotlib.pyplot as plt if __name__ == "__main__": img = cv.imread('images/dog1.jpeg') gray_img = cv.cvtColor(img, cv.COLOR_BGR2GRAY) dst = cv.equalizeHist(gray_img) gray_hst = cv.calcHist([gray_img], [0], None, [256], [0, 256]) gray_hst_dst = cv.calcHist([dst], [0], None, [256], [0, 256]) cv.imshow('Source image', img) cv.imshow('Source gray image', dst) cv.imshow('Equalized Image', gray_img) plt.figure() plt.title("GrayScale Histogram") plt.xlabel('Bins') plt.ylabel('# of pixels') plt.plot(gray_hst_dst) plt.xlim([0, 255]) plt.show() cv.waitKey()
AnhVietPham/Deep-Learning
Computer-Vision/opencv-course/histogram.py
histogram.py
py
683
python
en
code
0
github-code
36
[ { "api_name": "cv2.imread", "line_number": 6, "usage_type": "call" }, { "api_name": "cv2.cvtColor", "line_number": 7, "usage_type": "call" }, { "api_name": "cv2.COLOR_BGR2GRAY", "line_number": 7, "usage_type": "attribute" }, { "api_name": "cv2.equalizeHist", "...
32952196842
from django.shortcuts import render, redirect from kajaki_app.models import Route, Kayak, Order, OrderKayak from django.urls import reverse, reverse_lazy from datetime import date from django.views import View from kajaki_app.forms import AddKayakForm, AddRouteForm, ContactForm from django.views.generic import ListView, CreateView, UpdateView, DetailView, DeleteView from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin, PermissionRequiredMixin class AddRouteView(View): # permission_required = ['kajaki_app.add_route'] def get(self, request): form = AddRouteForm() return render(request, 'kajaki_app/add_route.html', {'form': form, 'submit_value_text': 'Dodaj'}) def post(self, request): form = AddRouteForm(request.POST) if form.is_valid(): form.save() return redirect(reverse('add_route')) return render(request, 'kajaki_app/add_route.html', {'form': form, 'submit_value_text': 'Dodaj'}) class RouteListView(ListView): model = Route template_name = 'kajaki_app/route_list.html' class AddKayakView(View): # permission_required = ['kajaki_app.add_kayak'] def get(self, request): form = AddKayakForm() return render(request, 'kajaki_app/add_kayak.html', {'form': form, 'submit_value_text': 'Dodaj'}) def post(self, request): form = AddKayakForm(request.POST) if form.is_valid(): form.save() return redirect(reverse('add_kayak')) return render(request, 'kajaki_app/add_kayak.html', {'form': form, 'submit_value_text': 'Dodaj'}) class KayakListView(ListView): model = Kayak template_name = 'kajaki_app/kayak_list.html' class KayakUpdateView(LoginRequiredMixin, UpdateView): # permission_required = ['filmy.change_film'] model = Kayak template_name = 'kajaki_app/add_kayak.html' fields = '__all__' def get_success_url(self): super().get_success_url() return reverse("add_kayak", args=(self.object.id,)) class KayakDeleteView(LoginRequiredMixin, DeleteView): model = Kayak template_name = 'kajaki_app/kayak_delete.html' success_url = reverse_lazy('kayak_list') class KayakDetailView(DetailView): model = Kayak template_name = 'kajaki_app/details_kayak.html' class CheckoutView(View): def get(self, request): return render(request, 'kajaki_app/checkout.html') def post(self, request): name = request.POST.get('name', '') email = request.POST.get('email', '') date = request.POST.get('date', '') phone = request.POST.get('phone', '') return render(request, 'kajaki_app/checkout.html') class OrderView(LoginRequiredMixin, View): def get(self, request): routes = Route.objects.all() kayaks = Kayak.objects.all() return render(request, 'kajaki_app/order.html', {'kayaks': kayaks, 'routes': routes}) def post(self, request): user = request.user route = request.POST.get('route') date = request.POST.get('date') kayak = request.POST.get('kayak') amount = request.POST.get('amount') if route and date and int(amount) >= 1 and kayak: route = Route.objects.get(name=route) order = Order.objects.create(route=route, buyer=user, date=date) kayak = Kayak.objects.get(name=kayak) order_kayak = OrderKayak.objects.create(kayak=kayak, order=order, amount=amount) return redirect(reverse('my_account')) return render(request, 'kajaki_app/order.html', {'message': 'Wypełnij poprawnie wszystkie pola'}) class ContactView(View): def get(self, request): form = ContactForm() return render(request, 'kajaki_app/contact.html', {'form': form, 'submit_value_text': 'Wyślij'}) def post(self, request): form = ContactForm(request.POST) if form.is_valid(): form.save() return redirect(reverse('index')) return render(request, 'kajaki_app/contact.html', {'form': form, 'submit_value_text': 'Wyślij'}) class AboutUsView(View): def get(self, request): return render(request, 'kajaki_app/about_us.html')
KamilNurzynski/Kajaki
kajaki_app/views.py
views.py
py
4,243
python
en
code
0
github-code
36
[ { "api_name": "django.views.View", "line_number": 11, "usage_type": "name" }, { "api_name": "kajaki_app.forms.AddRouteForm", "line_number": 15, "usage_type": "call" }, { "api_name": "django.shortcuts.render", "line_number": 16, "usage_type": "call" }, { "api_name"...
38807167453
#%% [markdown] # We need to create bar charts for our fancy plot to show the fraction of stuff from # each region. We'll do that here. #%% group_ids_to_plot = [0, 431, 88, 299, 9] #%% from ltcaesar import read_data_from_file import numpy as np import matplotlib.pyplot as plt #%% # Setup our favourite stylesheet plt.style.use("mnras_flatiron") #%% directory = "s50j7kAHF" data = read_data_from_file(f"{directory}/lt/lt_outputs.hdf5") #%% [markdown] # We now need to calculate three things for each halo: # + The fraction of baryonic mass from outside the halo # + The fraction of baryonic mass from other halos # + The fraction of baryonic mass from our own LR #%% def grab(x): return getattr(data, f"gas_{x}") + getattr(data, f"stellar_{x}") baryonic_mass = grab("mass_in_halo") from_outside = grab("mass_in_halo_from_outside_lagrangian") from_other = grab("mass_in_halo_from_other_lagrangian") from_own = grab("mass_in_halo_from_lagrangian") #%% ratio_own = from_own / baryonic_mass ratio_other = from_other / baryonic_mass ratio_outside = from_outside / baryonic_mass #%% import os os.mkdir("barcharts") #%% # Actually make the bar charts one by one for id in group_ids_to_plot: fig, a = plt.subplots(figsize=(1, 1)) a.bar( [0.5, 1.5, 2.5], [ratio_own[id], ratio_other[id], ratio_outside[id]], width=1.0, color=["C0", "C1", "C2"], ) a.set_xticks([0.5, 1.5, 2.5]) a.set_xticklabels(["Own", "Other", "Outside"]) a.set_xlim(0, 3) a.set_ylim(0, 0.8) fig.tight_layout() fig.savefig("barcharts/group_{}.pdf".format(id)) #%%
JBorrow/lagrangian-transfer-paper
figures/plotgen/create_bar_charts_fancy.py
create_bar_charts_fancy.py
py
1,612
python
en
code
1
github-code
36
[ { "api_name": "matplotlib.pyplot.style.use", "line_number": 15, "usage_type": "call" }, { "api_name": "matplotlib.pyplot.style", "line_number": 15, "usage_type": "attribute" }, { "api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name" }, { "api_na...
19996296105
from django.shortcuts import render, redirect from django.http import Http404, HttpResponseRedirect from django.urls import reverse from .models import Article, Category, ArticleCategoryRelation from django.utils import timezone from .forms import UserRegistrationForm from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage from django.views import View from django.views.generic import ListView import json from django.http import JsonResponse from django.template.loader import render_to_string items_on_page = 3 def index(request): article_list = Article.objects.all()[:items_on_page] categories = Category.objects.all() return render(request, 'articles/main.html', {'all_articles': article_list, 'categories': categories, 'is_admin': request.user.is_staff, }) class ArticleListView(ListView): def get(self, request, **kwargs): user = request.user if user.groups.filter(name='admin').count(): is_admin = True article_list = Article.objects.all() current_page = Paginator(article_list, items_on_page) page = request.GET.get('page') try: page_articles = current_page.page(page) except: page_articles = current_page.page(1) data = json.dumps(list(Article.objects.values_list('id', 'title'))) categories = Category.objects.all() return render(request, 'articles/list.html', {'all_articles': page_articles, 'is_admin': request.user.is_staff, 'qs_json': data, 'categories': categories, }) def create_article(request): if not request.user.is_staff: raise Http404('Доступ запрещен!') if request.method == 'POST': try: category_choices = [x for x in request.POST.getlist('category')] category_list = [Category.objects.get(id=category_id) for category_id in category_choices] except: raise Http404('Категория не найдена!') request.user.article_set.create(title=request.POST['title'], text=request.POST['text'], date=timezone.now()) current_article = Article.objects.all()[0] for category in category_list: category.includes_article.add(current_article) return redirect('/') category_list = Category.objects.all() return render(request, 'articles/create.html', {'category_list': category_list}) def update_article(request, article_id): if not request.user.is_staff: raise Http404('Доступ запрещен!') current_article = Article.objects.get(id=article_id) if not current_article: raise Http404('Статья не найдена!') if request.method == 'POST': # try: # category_choices = [x for x in request.POST.getlist('category')] # category_list = [Category.objects.get(id=category_id) for category_id in category_choices] # except: # raise Http404('Категория не найдена!') current_article.title=request.POST['title'] current_article.text=request.POST['text'] current_article.save() # ArticleCategoryRelation.objects.filter(article=current_article).delete() # # for category in category_list: # category.includes_article.add(current_article) return redirect('/') category_list = Category.objects.all() category_of_article = ArticleCategoryRelation.objects.filter(article=current_article) return render(request, 'articles/update.html', {'category_list': category_list, 'article': current_article, 'article_category': category_of_article}) def leave_comment(request, article_id): try: article = Article.objects.get(id=article_id) except: raise Http404('Статья не найдена!') article.comment_set.create(author=request.user, text=request.POST['text'], date=timezone.now()) return HttpResponseRedirect(reverse('newnotes:view_article', args=(article.id,))) def profile(request): if request.user.is_anonymous: raise Http404('Доступ запрещен!') categories = Category.objects.all() return render(request, 'account/profile.html', {'categories': categories, }) def register(request): if request.method == 'POST': form = UserRegistrationForm(request.POST) if form.is_valid(): new_user = form.save() return render(request, 'registration/register_done.html', {'new_user': new_user}) else: print(form.errors.as_data()) else: form = UserRegistrationForm() return render(request, 'registration/register.html', {'form': form}) def delete_article(request, article_id): if not request.user.is_staff: raise Http404('Доступ запрещен!') try: article = Article.objects.get(id=article_id) except: raise Http404('Статья не найдена!') if request.method == "POST": article.delete() return redirect('/') return render(request, 'articles/delete.html', {'article': article}) def create_category(request): if not request.user.is_staff: raise Http404('Доступ запрещен!') if request.method == 'POST': Category.objects.create(name=request.POST['name']) return redirect('/') category_list = Category.objects.all() return render(request, 'categories/create.html', {'category_list': category_list, }) def delete_category(request, category_id): if not request.user.is_staff: raise Http404('Доступ запрещен!') try: category = Category.objects.get(id=category_id) except: raise Http404('Категория не найдена!') if request.method == "POST": category.delete() return redirect('/') category_list = Category.objects.all() return render(request, 'categories/delete.html', {'category': category, 'category_list': category_list, }) def update_category(request, category_id): if not request.user.is_staff: raise Http404('Доступ запрещен!') try: category = Category.objects.get(id=category_id) except: raise Http404('Категория не найдена!') if request.method == 'POST': Category.objects.filter(id=category_id).update(name=request.POST['name']) return redirect('/') category_list = Category.objects.all() return render(request, 'categories/update.html', {'category': category, 'category_list': category_list, }) class ListCategoryArticles(ListView): def get(self, request, category_id, **kwargs): rel_category_article = ArticleCategoryRelation.objects.filter(category=category_id).order_by('-id') category = Category.objects.all().get(id=category_id) article_list = [Article.objects.get(id=x.article.id) for x in rel_category_article] current_page = Paginator(article_list, items_on_page) page = request.GET.get('page') try: context = current_page.page(page) except: context = current_page.page(1) data = json.dumps(list(Article.objects.values_list('id', 'title'))) categories = Category.objects.all() return render(request, 'categories/list.html', {'all_articles': context, 'is_admin': request.user.is_staff, 'qs_json': data, 'categories': categories, 'category': category, }) def get_paginated_page(request, objects, number=items_on_page): current_page = Paginator(objects, number) page = request.GET.get('page') if request.method == 'GET' else request.POST.get('page') try: return current_page.page(page) except PageNotAnInteger: return current_page.page(1) except EmptyPage: return current_page.page(current_page.num_pages) def is_ajax(request): return request.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest' class ViewArticle(View): def get(self, request, article_id): try: article = Article.objects.get(id=article_id) except: raise Http404('Статья не найдена!') list_comments = article.comment_set.order_by('-id') if not request.user.is_anonymous: article.readers.add(request.user) watched = article.readers.count() categories = Category.objects.all() return render(request, 'articles/view.html', {'article': article, 'list_comments': get_paginated_page(request, list_comments), 'watched': watched, 'categories': categories, }) def post(self, request, article_id): if is_ajax(request): try: article = Article.objects.get(id=article_id) except: raise Http404('Статья не найдена!') return JsonResponse({ "result": True, "comms": render_to_string( request=request, template_name='articles/comms.html', context={'list_comments': get_paginated_page(request, article.comment_set.order_by('-id'))} ) }) else: raise Http404()
osinkel/articles-django
newnotes/views.py
views.py
py
9,576
python
en
code
0
github-code
36
[ { "api_name": "models.Article.objects.all", "line_number": 18, "usage_type": "call" }, { "api_name": "models.Article.objects", "line_number": 18, "usage_type": "attribute" }, { "api_name": "models.Article", "line_number": 18, "usage_type": "name" }, { "api_name": ...
17582018412
import sys import typing as t import importlib from pathlib import Path import pkg_resources from starwhale.utils import console from starwhale.utils.venv import ( guess_current_py_env, get_user_python_sys_paths, check_python_interpreter_consistency, ) def import_object( workdir: t.Union[Path, str], handler_path: str, py_env: str = "" ) -> t.Any: workdir_path = str(Path(workdir).absolute()) external_paths = [workdir_path] py_env = py_env or guess_current_py_env() _ok, _cur_py, _ex_py = check_python_interpreter_consistency(py_env) if not _ok: console.print( f":speaking_head: [red]swcli python prefix:{_cur_py}, runtime env python prefix:{_ex_py}[/], swcli will inject sys.path" ) external_paths.extend(get_user_python_sys_paths(py_env)) prev_paths = sys.path[:] sys_changed = False for _path in external_paths[::-1]: if _path not in sys.path: sys.path.insert(0, _path) pkg_resources.working_set.add_entry(_path) sys_changed = True try: module_name, handler_name = handler_path.split(":", 1) console.print( f":speaking_head: [green]import module:{module_name}, handler:{handler_name}[/]" ) _module = importlib.import_module(module_name, package=workdir_path) _obj = getattr(_module, handler_name, None) if not _obj: raise ModuleNotFoundError(f"{handler_path}") except Exception: console.print_exception() if sys_changed: sys.path[:] = prev_paths raise return _obj def load_module(module: str, path: Path) -> t.Any: workdir_path = str(path.absolute()) external_paths = [workdir_path] for _path in external_paths[::-1]: if _path not in sys.path: sys.path.insert(0, _path) pkg_resources.working_set.add_entry(_path) return importlib.import_module(module, package=workdir_path)
star-whale/starwhale
client/starwhale/utils/load.py
load.py
py
1,988
python
en
code
171
github-code
36
[ { "api_name": "typing.Union", "line_number": 17, "usage_type": "attribute" }, { "api_name": "pathlib.Path", "line_number": 17, "usage_type": "name" }, { "api_name": "pathlib.Path", "line_number": 19, "usage_type": "call" }, { "api_name": "starwhale.utils.venv.gues...
7168853562
import dash_core_components as dcc import dash_html_components as html import dash_bootstrap_components as dbc import dash from app import app from app import server from apps.gas_monitoring import gas_app navBar = dbc.NavbarSimple( children=[ dbc.NavItem(dbc.NavLink("Home", href="/")), dbc.NavItem(dbc.NavLink("Gas-system", href="/gas-monitoring")), dbc.NavItem(dbc.NavLink("Oil-system", href="/oil-monitoring")), ], brand="G.O.M", brand_href="/", color="primary", dark=True, ) app.layout = html.Div( [ dcc.Location(id='url', refresh=False), navBar, html.Div( id='content', ) ] ) error_page = html.Div([ html.H1("404",style={"textAlign":"center"}), html.H3("Page Not Found!",style={"textAlign":"center"}) ]) index_page = html.Div( [ html.Div([ html.H2("Welcome to Gas Oil plant monitoring System."), html.P("""Lorem ipsum dolor sit amet ac maximusrdiet convallis. Duis rutrum neque consectetur mauris tempor laoreet. Vestibulum quis nulla eu orci efficitur varrisque vel nibh. Integer eu velit eget ex consectetur consectetur sit amet vitae lectus. Mauris egestas purus et mi pulvinar, a posuere justo convallis. Nunc nec laoreet lectus. Mauris purus est, bibendum hendrerit fermentum quis, porttitor at massa.""") ], style={ 'text-align': 'center', 'position': 'absolute', 'top': '50%', 'left': '50%', 'transform': 'translate(-50%, -50%)', 'color': 'white', }) ], style={"textAlign":"center", 'backgroundImage': 'url("assets/images/background.jpg")', 'backgroundRepeat': 'no-repeat', 'backgroundPosition': 'center', 'backgroundSize' : 'cover', 'height':'50vh', 'position':'relative', }, ) @app.callback(dash.dependencies.Output('content', 'children'), [dash.dependencies.Input('url', 'pathname')]) def display_page(pathname): if pathname == '/gas-monitoring': return gas_app.layout elif pathname == "/oil-monitoring": return gas_app.layout elif pathname == '/': return index_page else: return error_page if __name__ == '__main__': app.run_server(debug=True)
muntakim1/gas-oil-plant-monitoring
index.py
index.py
py
2,435
python
en
code
0
github-code
36
[ { "api_name": "dash_bootstrap_components.NavbarSimple", "line_number": 10, "usage_type": "call" }, { "api_name": "dash_bootstrap_components.NavItem", "line_number": 13, "usage_type": "call" }, { "api_name": "dash_bootstrap_components.NavLink", "line_number": 13, "usage_ty...
22226405639
import pandas as pd import argparse from gtfparse import read_gtf parser = argparse.ArgumentParser() parser.add_argument('--phenotype', type=str, required=True) # parser.add_argument('--ncRNA', type=str, required=True) if __name__ == '__main__': args = parser.parse_args() phenotype = args.phenotype gtf = read_gtf('ReferenceGenome/Annotations/gencode.v34.chromasomal.annotation.gtf') ncRNA_genes = gtf.loc[(gtf.gene_type.isin(['snoRNA', 'snRNA', 'lncRNA', 'unprocessed_pseudogene', 'transcribed_unprocessed_pseudogene', 'pseudogene', 'rRNA_pseudogene', 'transcribed_processed_pseudogene', 'transcribed_unitary_pseudogene', 'transcribed_unprocessed_pseudogene', 'translated_processed_pseudogene', 'translated_unprocessed_pseudogene', 'unprocessed_pseudogene' ])) & (gtf.feature == 'gene')].gene_id counts = pd.read_csv('featureCounts/{phenotype}/Counts.txt'.format(phenotype=phenotype), sep='\t', skiprows=1, index_col=0) ncRNA_counts = counts.loc[ncRNA_genes] ncRNA_counts.to_csv('featureCounts/{phenotype}_annotated_ncRNA/Counts.txt'.format(phenotype=phenotype), sep='\t', index=True, header=True)
bfairkun/ChromatinSplicingQTLs
code/scripts/NonCodingRNA/GetNonCodingRNAFromFeatureCounts.py
GetNonCodingRNAFromFeatureCounts.py
py
1,685
python
en
code
0
github-code
36
[ { "api_name": "argparse.ArgumentParser", "line_number": 5, "usage_type": "call" }, { "api_name": "gtfparse.read_gtf", "line_number": 13, "usage_type": "call" }, { "api_name": "pandas.read_csv", "line_number": 27, "usage_type": "call" } ]
3323022532
# -*- coding: utf-8 -*- import psycopg2 # the module that connects to the database """ The task is to create a reporting tool that prints out reports (in plain text) based on the data in the database. 1.What are the most popular three articles of all time? Which articles have been accessed the most? Present this information as a sorted list with the most popular article at the top. 2.Who are the most popular article authors of all time? That is, when you sum up all of the articles each author has written, which authors get the most page views? Present this as a sorted list with the most popular author at the top. 3.On which days did more than 1% of requests lead to errors? The log table includes a column status that indicates the HTTP status code that the news site sent to the user's browser. (Refer back to this lesson if you want to review the idea of HTTP status codes.) """ DBNAME = "news" # Open and connect to database; Run the query; Return database cursor objects def query(user_query): DB = psycopg2.connect(database = DBNAME) cursor = DB.cursor() cursor.execute(user_query) result = cursor.fetchall() DB.close() return result # 1. popular article def pop_article(): top_article = query("select title, count(*) from articles " "join log on path like CONCAT('%',slug) group by title " "order by count(*) desc limit 3") print("The most popular three articles are:") for title, views in top_article: print(" \"{}\" -- {} views".format(title, views)) # 2. popular author def pop_author(): top_authors = query("select name, count(path) from authors " "join articles on authors.id = author join log " "on path like CONCAT('%', slug) group by name order by count(path) desc limit 4") print('The most popular authors are:') for name, views in top_authors: print(" {} -- {} views".format(name, views)) # 3. error def error_day(): errorday = query("select date, avg from (" "select date, (sum(error) / (select count(*) " "from log where (time::date) = date)) as avg " "from (select (time::date) as date, count(*) as error " "from log where status like '4%' group by date) " "as error_percentage group by date order by avg desc) as final " "where avg >= .01") print('Days with more than 1% of requests lead to errors') for res in errorday: print (str(res[0]) + " — " + str(round((res[1]*100), 2)) + '%') if __name__ == '__main__': pop_article() pop_author() error_day()
laurafang/-logs_ana
log_ana.py
log_ana.py
py
2,577
python
en
code
0
github-code
36
[ { "api_name": "psycopg2.connect", "line_number": 26, "usage_type": "call" } ]
9911287046
#!/usr/bin/python # -*- coding: utf-8 -*- ''' Custom filters for use in openshift_aws ''' from ansible import errors class FilterModule(object): ''' Custom ansible filters for use by openshift_aws role''' @staticmethod def scale_groups_serial(scale_group_info, upgrade=False): ''' This function will determine what the deployment serial should be and return it Search through the tags and find the deployment_serial tag. Once found, determine if an increment is needed during an upgrade. if upgrade is true then increment the serial and return it else return the serial ''' if scale_group_info == []: return 1 scale_group_info = scale_group_info[0] if not isinstance(scale_group_info, dict): raise errors.AnsibleFilterError("|filter plugin failed: Expected scale_group_info to be a dict") serial = None for tag in scale_group_info['tags']: if tag['key'] == 'deployment_serial': serial = int(tag['value']) if upgrade: serial += 1 break else: raise errors.AnsibleFilterError("|filter plugin failed: deployment_serial tag was not found") return serial @staticmethod def scale_groups_match_capacity(scale_group_info): ''' This function will verify that the scale group instance count matches the scale group desired capacity ''' for scale_group in scale_group_info: if scale_group['desired_capacity'] != len(scale_group['instances']): return False return True @staticmethod def build_instance_tags(clusterid): ''' This function will return a dictionary of the instance tags. The main desire to have this inside of a filter_plugin is that we need to build the following key. {"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": "{{ openshift_aws_clusterid}}"} ''' tags = {'clusterid': clusterid, 'kubernetes.io/cluster/{}'.format(clusterid): clusterid} return tags def filters(self): ''' returns a mapping of filters to methods ''' return {'build_instance_tags': self.build_instance_tags, 'scale_groups_match_capacity': self.scale_groups_match_capacity, 'scale_groups_serial': self.scale_groups_serial}
barkbay/openshift-ansible-gravitee
roles/lib_utils/filter_plugins/openshift_aws_filters.py
openshift_aws_filters.py
py
2,484
python
en
code
1
github-code
36
[ { "api_name": "ansible.errors.AnsibleFilterError", "line_number": 28, "usage_type": "call" }, { "api_name": "ansible.errors", "line_number": 28, "usage_type": "name" }, { "api_name": "ansible.errors.AnsibleFilterError", "line_number": 39, "usage_type": "call" }, { ...
74332199142
from Bio import SeqIO import sys def readin_fasta(input_file, batch_size): """Read fasta file with a fast, memory-efficient generator.""" title_list = [] seq_list = [] seq_num = len([1 for line in open(input_file) if line.startswith(">")]) for i, seq_record in enumerate(SeqIO.FastaIO.SimpleFastaParser(open(input_file)),1): title, seq = seq_record title_list.append(title) seq_list.append(seq) if i%batch_size == 0: yield title_list, seq_list title_list = [] seq_list = [] if i == seq_num: print('Converted {} of {} fragments'.format(i, seq_num)) yield title_list, seq_list
elond/11785_Project
data_processing/encoding_convert/readin_fasta.py
readin_fasta.py
py
697
python
en
code
0
github-code
36
[ { "api_name": "Bio.SeqIO.FastaIO.SimpleFastaParser", "line_number": 10, "usage_type": "call" }, { "api_name": "Bio.SeqIO.FastaIO", "line_number": 10, "usage_type": "attribute" }, { "api_name": "Bio.SeqIO", "line_number": 10, "usage_type": "name" } ]
30934317618
#!/usr/bin/python3 # USE THIS WHEN IN NOTEBOOK -> %python # CHANGE ACCORDINGLY: the field XXX import sys import time from azure.identity import ClientSecretCredential from azure.storage.filedatalake import DataLakeServiceClient,FileSystemClient ACCOUNT_NAME = "XXX" FILE_SYSTEM = "XXX" TARGET_DIR = "XXX" def set_permission(path,acl): # Directories and files need to be handled differently if path.is_directory: directory_client = filesystem.get_directory_client(directory=path.name) resp = directory_client.set_access_control(acl=acl) print(f'\tApplied Directory ACL to {path.name}') else: file_client = filesystem.get_file_client(path.name) # Need to remove "Default" ACL segments from ACL string because that can't be applied to files resp = file_client.set_access_control(acl=acl[:acl.find('default')-1]) print(f'\tApplied File ACL to {path.name}') return resp def main(target_dir,filesystem): # Get the target directory, subdirectories and permissions paths = filesystem.get_paths(path=target_dir) directory_client = filesystem.get_directory_client(directory=target_dir) acl = directory_client.get_access_control() target_acl_dir = acl['acl'] for path in paths: set_permission(path,target_acl_dir) if __name__ == '__main__': # Clients credential = "XXX" # the master account key. service = DataLakeServiceClient(account_url=f'https://{ACCOUNT_NAME}.dfs.core.windows.net/', credential=credential) filesystem = service.get_file_system_client(file_system=FILE_SYSTEM) print('*'*20) print(f'Storage Account Name: {ACCOUNT_NAME}') print(f'File System Name: {FILE_SYSTEM}') print('*'*20) print(f'Running: Setting ACLs for all child paths (subdirectories and files) in TARGET_DIR to match parent.') total_start = time.time() # Start Timing main(TARGET_DIR,filesystem) total_end = time.time() # End Timing print("Complete: Recursive ACL configuration took {} seconds.".format(str(round(total_end - total_start,2))))
eosantigen/devops-tools
apps/python/azure/azure_datalake_set_acl.py
azure_datalake_set_acl.py
py
2,087
python
en
code
0
github-code
36
[ { "api_name": "azure.storage.filedatalake.DataLakeServiceClient", "line_number": 45, "usage_type": "call" }, { "api_name": "time.time", "line_number": 53, "usage_type": "call" }, { "api_name": "time.time", "line_number": 55, "usage_type": "call" } ]
27688638873
"""Config file and logging related utility functions.""" import configparser import json import os import sys from pprint import pprint import yaml def read_cfg(location, verbose=True): """ Read config file at location using ConfigParser. Parameters ---------- location : str Where the config file is located verbose : bool, optional, defaults to True Should print the contents of the read config file. Returns ------- ConfigParser The python ConfigParser object after reading the cfg. """ if not os.path.exists(location): raise ValueError(f"Config file {location} does not exist") config = configparser.ConfigParser() config.read(location) if verbose: print_cfg(config, "Program started with configuration") return config def print_cfg(config, msg=""): """ Print the contents of a ConfigParser object. Parameters ---------- config : ConfigParser The ConfigParser to print the contents of. msg: str, optional, defaults to "" Message to print before printing the config file. Returns ------- None """ if msg != "": print(msg) config_dict = [{x: tuple(config.items(x))} for x in config.sections()] pprint(config_dict, width=120) def parse_args(parser, verbose=True): """ Parse command line arguments into a Namespace. Parameters ---------- verbose : bool, optional, defaults to True Should print the values of the command line args. Returns ------- Namespace Parsed arguments. Raises ------ ValueError If any arguments are passed which are not used in program. """ args, unparsed = parser.parse_known_args() if len(unparsed) != 0: raise ValueError( "Unrecognised command line arguments passed {}".format(unparsed) ) if verbose: if len(sys.argv) > 1: print("Command line arguments", args) return args def read_python(path, dirname_replacement=""): """ Execute a python script at path. The script is expected to have items visible at global scope, which are stored as metadata. Note ---- The string "__thisdirname__" is magic and will be replaced by the absolute path to the directory containing the script. The string "__dirname__" is also magic and will be replaced by the value of dirname_replacement. Parameters ---------- path : string The location of the python script. dirname_replacement : string, optional, optional, defaults to None What to replace __dirname__ with. By default, None will replace __dirname__ with dirname of path. Returns ------- dict The scripts global scope variables stored in a dictionary. """ def normalise_path(pth): s = os.path.abspath(pth) s = s.replace(os.sep, "/") return s path = os.path.realpath(os.path.expanduser(path)) if not os.path.exists(path): raise ValueError("{} does not exist to read".format(path)) with open(path, "r") as f: contents = f.read() if dirname_replacement != "": contents = contents.replace("__dirname__", normalise_path(dirname_replacement)) else: contents = contents.replace( "__dirname__", normalise_path(os.path.dirname(path)) ) contents = contents.replace( "__thisdirname__", normalise_path(os.path.dirname(path)) ) metadata = {} try: exec(contents, {}, metadata) except Exception as e: import traceback print("QUITTING: An error occurred reading {}".format(path)) traceback.print_exc() exit(-1) metadata = {k.lower(): v for (k, v) in metadata.items()} return metadata def read_yaml(path): with open(path, "r") as stream: parsed_yaml = yaml.safe_load(stream) return parsed_yaml def read_json(path): with open(path, "r") as stream: parsed_json = json.load(stream) return parsed_json def split_dict(in_dict, index): """ Grab the value at index from each list in the dictionary. Parameters ---------- in_dict : dict The dictionary to grab from index : int The index in the lists to pull from Returns ------- dict The original dictionary but with index values pulled out. """ new_dict = {} for key, value in in_dict.items(): if isinstance(value, list): new_dict[key] = value[index] return new_dict def convert_dict_to_string(in_dict, name): """ Convert the underlying parameters dictionary to string. Can be useful for printing or writing to a file. Does not overwrite default __str__ as the output is quite verbose. Parameters ---------- in_dict : dict Input dictionary Returns ------- str The string representation of the dict. """ def _val_to_str(val): """ Convert a value to a string. One caveat, if a string is passed, it returns the original string wrapped in quotes. Parameters ---------- val : object The value to convert Returns ------- str The value as a string. """ return f"'{val}'" if isinstance(val, str) else val out_str = "" out_str += name + " = {\n" for k, v in in_dict.items(): out_str += f"\t{_val_to_str(str(k))}:" if isinstance(v, dict): out_str += "\n\t\t{\n" for k2, v2 in v.items(): out_str += "\t\t {}: {},\n".format( _val_to_str(str(k2)), _val_to_str(v2) ) out_str += "\t\t},\n" else: out_str += f" {_val_to_str(v)},\n" out_str += "\t}" return out_str
seankmartin/PythonUtils
skm_pyutils/config.py
config.py
py
5,936
python
en
code
1
github-code
36
[ { "api_name": "os.path.exists", "line_number": 27, "usage_type": "call" }, { "api_name": "os.path", "line_number": 27, "usage_type": "attribute" }, { "api_name": "configparser.ConfigParser", "line_number": 30, "usage_type": "call" }, { "api_name": "pprint.pprint",...
20824479856
import dlib from imutils import face_utils dlib_path = "dlibb/shape_predictor_68_face_landmarks.dat" detector = dlib.get_frontal_face_detector() predictor = dlib.shape_predictor(dlib_path) import argparse import pickle import cv2 import os import mpmath import numpy as np # face_classifier = cv2.CascadeClassifier('harcascades/haarcascade_frontalface_default.xml') src_path = ("O:\\Nama_College\\FYP\\MY_FYP_CODE\\MY_FYP_CODE\\MY_CODE\\TESTING_DATASET\\") predict = [] features_vector = [] pickle_in = open("O:\\Nama_College\\FYP\\MY_FYP_CODE\\MY_FYP_CODE\\MY_CODE\\dlib_normalized.pickle","rb") # pickle_in = open("O:\\Nama_College\\FYP\\MY_FYP_CODE\\MY_FYP_CODE\\MY_CODE\\dlib_normalized_full.pickle","rb") model = pickle.load(pickle_in) cap = cv2.VideoCapture(0) B= 0 while (True): ret, frame = cap.read() gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) B += 1 if B % 5 == 0: print(B) face = detector(gray,0) for (J, rect) in enumerate(face): shap = predictor(gray, rect) xlist = [] ylist = [] shap = face_utils.shape_to_np(shap) Centre = (shap[30]) centre_x = Centre[0] centre_y = Centre[1] shap = shap[18:68] for i in shap: xlist.append(i[0]) ylist.append(i[1]) forx = [] fory = [] for x in xlist: forx.append((x - centre_x) ** 2) for y in ylist: fory.append((y - centre_y) ** 2) listsum = [sum(x) for x in zip(forx, fory)] features = [] for i in listsum: k = mpmath.sqrt(float(i)) features.append(float(k)) maxx = (max(features)) final = [] for i in features: if (i == 0.0): continue F = i / maxx final.append(F) # print(final) numpy_array = np.array(final) prediction = model.predict([numpy_array])[0] # predict.append(prediction) (x, y, w, h) = face_utils.rect_to_bb(rect) cv2.rectangle(frame, (x, y), (x + w, y + h),(0, 255, 0), 2) # display the image and the prediction # cv2.putText(frame, "FACE ({})".format(J+ 1) + " " + prediction, (x , y ), cv2.FONT_HERSHEY_COMPLEX, 0.5, # (0, 255, 0), 2) cv2.putText(frame, prediction, (x, y), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 255, 0), 2) # cv2.putText(tasveer, prediction, (x-5 , y-5 ), cv2.FONT_HERSHEY_COMPLEX, 1.2, # (0, 0, 255),4) print(prediction) cv2.circle(frame, (centre_x, centre_y), 1, (0, 0, 0), 5) for (x, y) in shap: cv2.circle(frame, (x, y), 1, (0, 0, 255), 2) cv2.imshow("Image", frame) cv2.waitKey(1) if k == 'q': break cap.release() cv2.destroyAllWindows()
Hassan1175/MY_FYP_CODE
MY_CODE/videoframes.py
videoframes.py
py
3,060
python
en
code
0
github-code
36
[ { "api_name": "dlib.get_frontal_face_detector", "line_number": 4, "usage_type": "call" }, { "api_name": "dlib.shape_predictor", "line_number": 5, "usage_type": "call" }, { "api_name": "pickle.load", "line_number": 19, "usage_type": "call" }, { "api_name": "cv2.Vid...
74298299622
#!/usr/bin/env python #encoding=utf8 from json import dumps def get_node(tree, name): if tree.label == name: return True, [tree.label] if not tree.children: return False, None for child in tree.children: found, addr = get_node(child, name) if found: return True, [tree.label] + addr return False, None def goto_node(tree, desc): assert tree.label == desc[0] node = tree for name in desc[1:]: nodes = [n for n in node.children if n.label == name] if not nodes: return False, None node = nodes[0] return True, node def recreate_node(orig, desc): """Recreate item in orig under desc.""" tree = Tree(desc[-1], []) success, node = goto_node(orig, desc) if not node.children: return tree for child in node.children: success, _ = goto_node(orig, desc + [child.label]) if not success: child_node = Tree(child.label, []) else: child_node = recreate_node(orig, desc + [child.label]) tree.children.append(child_node) return tree class Tree(object): def __init__(self, label, children=[]): self.label = label self.children = children def __dict__(self): return {self.label: [c.__dict__() for c in sorted(self.children)]} def __str__(self, indent=None): return dumps(self.__dict__(), indent=indent) def __lt__(self, other): return self.label < other.label def __eq__(self, other): return self.__dict__() == other.__dict__() def from_pov(self, from_node): found, desc = get_node(self, from_node) if not found: raise ValueError("Node {} not found.".format(from_node)) last_label = desc[-1] node = recreate_node(self, desc) last_node = node reverse_desc = [last_label] for name in reversed(desc[:-1]): desc_ = get_node(self, name)[1] parent = recreate_node(self, desc_) last_node.children.append(parent) parent.children = [ child for child in parent.children if child.label != last_label ] last_label = desc_[-1] last_node = parent return node def path_to(self, from_node, to_node): tree = self.from_pov(from_node) found, desc = get_node(tree, to_node) if not found: raise ValueError("Dest node {} not found.".format(to_node)) return desc
xiaket/exercism
python/pov/pov.py
pov.py
py
2,553
python
en
code
0
github-code
36
[ { "api_name": "json.dumps", "line_number": 56, "usage_type": "call" } ]
34696045892
import os import yaml import openai """ 使用openai API的方式访问ChatGPT/azure GPT """ def set_env(cfg_file): with open(cfg_file) as f: config_data = yaml.safe_load(f) azure = config_data["azure"] if azure is not None: for k, v in azure.items(): os.environ[k] = v os.environ['MY_VARIABLE'] = 'my_value' def ai_chat(msgs=None): openai.api_type = "azure" openai.api_version = "2023-03-15-preview" openai.api_base = os.getenv("api-base") # Your Azure OpenAI resource's endpoint value. openai.api_key = os.getenv("api-key") response = openai.ChatCompletion.create( # 报错:openai.error.InvalidRequestError: The API deployment for this resource does not exist # 解决:只能使用账号已经部署的模型,通过OpenAI Studio查看部署了哪些模型 engine="gpt-35-turbo-test", # The deployment name you chose when you deployed the ChatGPT or GPT-4 model. # 目前只能通过每次请求上传已有上下文的方式来记忆上下文/多轮对话 messages=msgs ) print(response) print(response['choices'][0]['message']['content']) if __name__ == '__main__': set_env('D:\\qiyu-work\\openaikey.yaml') messages = [ # {"role": "system", "content": "Assistant is a large language model trained by OpenAI."}, #{"role": "system", "content": "Assistant is a large language model trained by OpenAI."}, {"role": "system", "content": "你现在是一名汽车4S店专业的销售顾问,客户咨询你价格,请把下面的话用可爱的语气表达出来,不要重复我说的话,回复不能超过30个字"}, {"role": "user", "content": "价格会受多因素的影响实时发生变化,具体我让销售跟您聊哈"} ] ai_chat(messages)
zzfengxia/python3-learn
dailytool/connect_openai_api.py
connect_openai_api.py
py
1,851
python
en
code
0
github-code
36
[ { "api_name": "yaml.safe_load", "line_number": 10, "usage_type": "call" }, { "api_name": "os.environ", "line_number": 14, "usage_type": "attribute" }, { "api_name": "os.environ", "line_number": 15, "usage_type": "attribute" }, { "api_name": "openai.api_type", ...
34761383240
import sys, os import subprocess import datetime as dt from random import randint import argparse import web3 from web3 import Web3 from web3.middleware import geth_poa_middleware from eth_utils import decode_hex # Project modules import utils from TextColor.color import bcolors URL = "http://127.0.0.1:8545" ACCOUNT_DB_NAME = 'car.json' MGMT_CONTRACT_DB_NAME = utils.MGMT_CONTRACT_DB_NAME MGMT_CONTRACT_SRC_PATH = utils.MGMT_CONTRACT_SRC_PATH CONFIG = utils.open_data_base(ACCOUNT_DB_NAME) DATABASE = utils.open_data_base(MGMT_CONTRACT_DB_NAME) if DATABASE is None: sys.exit(f"{bcolors.FAIL}Setup hasn't been done{bcolors.ENDC}") def generate_private_key(_w3: Web3) -> str: """ Generate private key for car account using current time and random int :param Web3 _w3: Web3 instance :return: Private Key :rtype: str """ t = int(dt.datetime.utcnow().timestamp()) k = randint(0, 2 ** 16) privateKey = _w3.toHex(_w3.sha3(((t + k).to_bytes(32, 'big')))) if privateKey[:2] == '0x': privateKey = privateKey[2:] return (privateKey) def new_car_account(_w3: Web3) -> None: """ Create new addres for car account :param Web3 _w3: Web3 instance """ privateKey = generate_private_key(_w3) data = {"key": privateKey} utils.write_data_base(data, ACCOUNT_DB_NAME) print(f"{bcolors.HEADER}{_w3.eth.account.privateKeyToAccount(data['key']).address}{bcolors.ENDC}") def get_car_account_from_db(_w3: Web3) -> None: """ Get car account from database :param Web3 _w3: Web3 instance """ return (_w3.eth.account.privateKeyToAccount(utils.get_data_from_db(ACCOUNT_DB_NAME, 'key')).address) def register_car(_w3: Web3): """ Register new car :param Web3 _w3: Web3 instance """ data = utils.open_data_base(MGMT_CONTRACT_DB_NAME) if data is None: return f'{bcolors.FAIL}Cannot access management contract database{bcolors.ENDC}' data = CONFIG if data is None: return f'{bcolors.FAIL}Cannot access account database{bcolors.ENDC}' private_key = data['key'] mgmt_contract = utils.init_management_contract(_w3) car_address = _w3.eth.account.privateKeyToAccount(private_key).address registration_required_gas = 50000 gas_price = utils.get_actual_gas_price(_w3) if registration_required_gas * gas_price > _w3.eth.getBalance(car_address): return 'No enough funds to send transaction' nonce = _w3.eth.getTransactionCount(car_address) tx = {'gasPrice': gas_price, 'nonce': nonce} regTx = mgmt_contract.functions.registerCar().buildTransaction(tx) signTx = _w3.eth.account.signTransaction(regTx, private_key) txHash = _w3.eth.sendRawTransaction(signTx.rawTransaction) receipt = web3.eth.wait_for_transaction_receipt(_w3, txHash, 120, 0.1) if receipt.status == 1: return f'{bcolors.OKGREEN}Registered successfully{bcolors.ENDC}' else: return f'{bcolors.FAIL}Car registration failed{bcolors.ENDC}' def create_parser() -> argparse.ArgumentParser: """ Create cli argument parser :return: Parser :rtype: argparse.ArgumentParser """ parser = argparse.ArgumentParser( description='Car management tool', epilog=""" It is expected that Web3 provider specified by WEB3_PROVIDER_URI environment variable. E.g. WEB3_PROVIDER_URI=file:///path/to/node/rpc-json/file.ipc WEB3_PROVIDER_URI=http://192.168.1.2:8545 """ ) parser.add_argument( '--new', action='store_true', required=False, help='Generate a new account for the particular AGV' ) parser.add_argument( '--account', action='store_true', required=False, help='Get identificator (Ethereum address) of AGV from the private key stored in car.json' ) parser.add_argument( '--reg', action='store_true', required=False, help='Register the vehicle in the chain' ) parser.add_argument( '--verify', type=str, required=False, help='Verify battery' ) parser.add_argument( '--initiate_replacement', nargs=2, required=False, help='Initiate deal <car_battery> <sc_battery>' ) return parser def ask_for_replacement(car_battery_id: str, sc_battery_id: str, car_address: str) -> None: """ Ask service center for replacement approval :param str car_battery_id: Car's battery :param str sc_battery_id: Service center's battery :param str car_address: Car's blockchain address :return: Nothing :rtype: None """ if os.path.exists(f"scenter.py"): subprocess.run( [ "python", "scenter.py", "--approve_replacement", f"{car_battery_id}", f"{sc_battery_id}", f"{car_address}", ] ) else: sys.exit(f"{bcolors.FAIL}The asked service center does not exists{bcolors.ENDC}") def get_sc_address() -> str: """ Get address of the service center return: Service center's address rtype: str """ command = "python scenter.py --get_address".split(' ') result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) return result.stdout[:-1] def transfer_battery_to_sc(w3: Web3, car_battery_id: str, sc_address: str): """ Transfer battery to service center :param Web3 w3: Web3 instance :param str car_battery_id: Car's battery id :param str sc_battery_id: Service centers's battery id return: Nothing rtype: None """ data = utils.open_data_base(MGMT_CONTRACT_DB_NAME) if data is None: return 'Cannot access management contract database' data = utils.open_data_base(ACCOUNT_DB_NAME) if data is None: return 'Cannot access account database' private_key = data['key'] battery_mgmt_contract_addr = utils.get_battery_managment_contract_addr(w3) battery_mgmt_contract = utils.init_battery_management_contract(w3, battery_mgmt_contract_addr) car_address = w3.eth.account.privateKeyToAccount(private_key).address gas_price = utils.get_actual_gas_price(w3) nonce = w3.eth.getTransactionCount(car_address) tx = {'gasPrice': gas_price, 'nonce': nonce, 'gas': 2204 * 68 + 21000} reg_tx = battery_mgmt_contract.functions.transfer(sc_address, decode_hex(car_battery_id)).buildTransaction(tx) sign_tx = w3.eth.account.signTransaction(reg_tx, private_key) tx_hash = w3.eth.sendRawTransaction(sign_tx.rawTransaction) receipt = web3.eth.wait_for_transaction_receipt(w3, tx_hash, 120, 0.1) if receipt.status != 1: sys.exit(f"{bcolors.FAIL}The car does not own this battery!{bcolors.ENDC}") def get_new_battery(car_account: str, car_battery_id: str, sc_battery_id) -> float: """ Call battery replacement in service center :param str car_account: Car account :param str car_battery_id: Car's battery id :return: Work's cost :rtype: float """ command = f"python scenter.py --transfer_battery_to_car {car_account} {car_battery_id} {sc_battery_id}".split(' ') result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) return float(result.stdout[:-1]) def initiate_replacement(w3: Web3, car_battery_id: str, sc_battery_id: str) -> None: """ Initiate battery replacement :param Web3 w3: Web3 instance :param str car_battery_id: Car's battery :param str sc_battery_id: Service center's battery :return: Nothing :rtype: None """ sc_battery_id_path = f"firmware/{car_battery_id[:8]}.py" car_battery_id_path = f"firmware/{sc_battery_id[:8]}.py" print("Verifying battery...") data = utils.verify_battery(w3, sc_battery_id_path) if not data[0]: sys.exit(f"{bcolors.FAIL}The battery is fake{bcolors.ENDC}") sys.stdout.write("\033[F") #back to previous line sys.stdout.write("\033[K") #clear line print(f"Verifying battery...{bcolors.OKGREEN}Success{bcolors.ENDC}", u'\u2713') print("Asking service center for replacement...") ask_for_replacement(car_battery_id, sc_battery_id, get_car_account_from_db(w3)) message = utils.open_data_base('replacement.json') if message is None: sys.exit(f"{bcolors.FAIL}Somethong went wrong...{bcolors.ENDC}") if not message['approved']: sys.exit(message['error']) sys.stdout.write("\033[F") #back to previous line sys.stdout.write("\033[K") #clear line print(f"Asking service center for replacement...{bcolors.OKGREEN}Approved{bcolors.ENDC}", u'\u2713') print("Getting address of the service center...") sc_address = get_sc_address() sys.stdout.write("\033[F") #back to previous line sys.stdout.write("\033[K") #clear line print(f"Getting address of the service center...{bcolors.OKGREEN}Success{bcolors.ENDC}", u'\u2713') print("Transferring battery to the service center...") transfer_battery_to_sc(w3, car_battery_id, sc_address) sys.stdout.write("\033[F") #back to previous line sys.stdout.write("\033[K") #clear line print(f"Transferring battery to the service center...{bcolors.OKGREEN}Success{bcolors.ENDC}", u'\u2713') print("Waiting for new battery installation...") result = get_new_battery(get_car_account_from_db(w3), car_battery_id, sc_battery_id) sys.stdout.write("\033[F") #back to previous line sys.stdout.write("\033[K") #clear line print(f"Battery was installed...{bcolors.OKGREEN}Success{bcolors.ENDC}", u'\u2713') return result def main(): w3 = Web3(Web3.HTTPProvider(URL)) # configure provider to work with PoA chains w3.middleware_onion.inject(geth_poa_middleware, layer=0) parser = create_parser() args = parser.parse_args() if args.new: new_car_account(w3) elif args.account: print(get_car_account_from_db(w3)) elif args.reg: print(register_car(w3)) elif args.verify: data = utils.verify_battery(w3, args.verify) print(f"Verified: {data[0]}") print(f"Total charges: {data[1]}") print(f"Vendor id: {data[2]}") print(f"Vendor name: {data[3]}") elif args.initiate_replacement: cost = initiate_replacement(w3, args.initiate_replacement[0], args.initiate_replacement[1]) print(f"Cost of work: {cost} eth") if __name__ == "__main__": main()
acid9reen/bas
car.py
car.py
py
10,656
python
en
code
0
github-code
36
[ { "api_name": "utils.MGMT_CONTRACT_DB_NAME", "line_number": 17, "usage_type": "attribute" }, { "api_name": "utils.MGMT_CONTRACT_SRC_PATH", "line_number": 18, "usage_type": "attribute" }, { "api_name": "utils.open_data_base", "line_number": 19, "usage_type": "call" }, ...
74226159463
import argparse import utils parser = argparse.ArgumentParser(description="User need to submit job informations") parser.add_argument('--min', type=int, required=True, help='min num of nodes') parser.add_argument('--max', type=int, required=True, help="max num of nodes") parser.add_argument('--N', type=int, required=True, nargs='+', help='num of nodes for scaling') parser.add_argument('--O', type=float, required=True, nargs='+', help='objective ratio rate') parser.add_argument('--res_up', type=int, required=True, help="scale up overhead") parser.add_argument('--res_dw', type=int, required=True, help="scale down overhead") parser.add_argument('--path', required=True, help="execute script path") args = parser.parse_args() def main(): id = utils.submit_job(min=args.min, max=args.max, N=args.N, O=args.O, res_up=args.res_up, res_dw=args.res_dw, path=args.path) print("Job submitted! GUID:", str(id)) ''' # previous job submit id = m.submit_job(min=1, max=5, Ns=[1, 2, 3, 4, 5], Os=[1, 1.8, 2.6, 3.4, 4.2], res_up=3, res_dw=1, path="train.py") ''' if __name__ == "__main__": main()
BFTrainer/BFTrainer
BFSub.py
BFSub.py
py
1,166
python
en
code
3
github-code
36
[ { "api_name": "argparse.ArgumentParser", "line_number": 4, "usage_type": "call" }, { "api_name": "utils.submit_job", "line_number": 15, "usage_type": "call" } ]
39901615627
from django.http import HttpResponse, HttpResponseNotAllowed, \ HttpResponseRedirect from django.contrib.auth.decorators import login_required from django.template import loader from django.contrib import messages from django.core.urlresolvers import reverse from django.core.exceptions import ValidationError from new.utils import json_error, check_fields_in_data, MODEL_MAP, \ MODEL_FORM_MAP, get_template_for_model from browse.models import ReviewVote, Report, Review import json import datetime @login_required def edit(request, page=None, id=None): # Check that id exists for page. if page not in MODEL_MAP.keys(): return json_error({"error": "Unknown page requested."}) instances = MODEL_MAP[page].objects.filter(id=id) if len(instances) != 1: return json_error({"error": "Unknown {} id {} provided." .format(page, id)}) owner = None instance = instances[0] if hasattr(instance, "created_by"): owner = instance.created_by elif hasattr(instance, "owner"): owner = instance.owner if owner and owner != request.user: return json_error({"error": "You do not own this instance."}) # Functionality is so similar to new, just hand it off return new(request, page=page, id=id, type="edit") def new(request, type="new", page=None, id=None): if not request.user.is_authenticated(): if request.method == "POST": return json_error({"error": "Please login to add a {}." .format(page)}) else: redir = request.META.get("HTTP_REFERER") if not redir: redir = reverse("home") messages.error(request, "You must be logged in to add a {}.".format(page)) return HttpResponseRedirect(redir) model = None response = {"error": {"error": ""}} if request.method != "POST": return get_template_for_model(request, MODEL_FORM_MAP, page) data = json.loads(request.body.decode()) if page not in MODEL_MAP: return json_error({"error": "Requested page type \"{}\" does not have" " a known model.".format(page)}) if page not in MODEL_FORM_MAP.keys(): return json_error({"error": "Requested page type \"{}\" does not have" " a known form.".format(page)}) model = MODEL_MAP[page] form = MODEL_FORM_MAP[page] # If model has an owner or created by field, add us if form.needs_owner: data["owner"] = request.user elif form.needs_created_by: data["created_by"] = request.user # FIXME: Is this necessary? It seems like it should autoresolve this if page == "reviewcomment": data["target"] = Review.objects.get(id=int(data["target"])) res = check_fields_in_data(data, model, form) if res: return res # Look for any errors for k, v in response["error"].items(): if len(v) > 0: return HttpResponse(json.dumps(response)) try: emptyKeys = [] for key, value in data.items(): if value == '': emptyKeys.append(key) for key in emptyKeys: data.pop(key) print(data) if type == "new": # Try to create it new = model(**data) elif type == "edit": # We can assume it exists new = model.objects.get(id=id) for k, v in data.items(): setattr(new, k, data[k]) if hasattr(new, "updated_ts"): new.updated_ts = datetime.datetime.now() new.full_clean() except ValidationError as e: print("ERROR: " + str(e)) errorDict = {} for key, value in e.message_dict.items(): if isinstance(value, list): errorDict[key] = " ".join(value).strip("[]/'") return HttpResponse(json_error(errorDict)) for field in MODEL_FORM_MAP[page].Meta.fields: response["error"][field] = "" # clear errors new.save() response["id"] = new.id # return new id at top level. # Save and return all info return HttpResponse(json.dumps(response)) def addVote(request, wat=None): # I don't know where 'wat' is coming from, but it's not needed... if request.method == "POST": if not request.user.is_authenticated(): jsonResponse = {"success": False, "error": "User not logged in"} return HttpResponse(json.dumps(jsonResponse), content_type="application/json") review_id = request.POST.get("review-id") action = request.POST.get("action").lower() user = request.user review = Review.objects.get(id=review_id) try: vote = ReviewVote.objects.filter(target=review, owner=user) # If the vote exists, we need to change it based on input. # Currently votes are changed as such: # If the user presses the same direction as their current vote # then the vote is removed # If the user presses opposite their vote, the vote is changed # to the new direction if vote.exists(): vote = vote[0] if (vote.quality and action == "up") or \ (not vote.quality and action == "down"): vote.delete() else: vote.quality = (action == "up") vote.save() # vote doesn't exist yet, then it needs to be created. elif (action == "up" or action == "down"): vote = ReviewVote(target=review, owner=user, quality=(action == "up")) vote.save() except: jsonResponse = {"success": False, "error": "Could not complete vote"} return HttpResponse(json.dumps(jsonResponse), content_type="application/json") return HttpResponse(json.dumps({"success": True}), content_type="application/json") else: return HttpResponseNotAllowed(["POST"]) @login_required def report(request, model_name, id): """ This view serves both the proper form page and the POST requests for the report form page. It's essentially a clone of new but with a few fixes since the model is mucked up with metamadness. """ if model_name not in MODEL_MAP: if request.method != "POST": return HttpResponse("Unknown model name specified.") return json_error({"error": "Requested page type \"{}\" does not " "have a known model." .format(model_name) }) if model_name not in MODEL_FORM_MAP: if request.method != "POST": return HttpResponse("Unknown model name specified.") return json_error({"error": "Requested page type \"{}\" does not " "have a known form.".format(model_name) }) if request.method == "POST": res = {} data = json.loads(request.body.decode()) target_model = MODEL_MAP[model_name] form = MODEL_FORM_MAP["report"] inst = target_model.objects.get(id=id) if not inst: json_error({"error": "Unknown model instance id for provided model" " ({} for '{}').".format(id, model_name)}) err = check_fields_in_data(data, Report, form) if err: return err print(data) new = Report.create(target_model, id, request.user, data["summary"], data["text"]) new.save() res["id"] = new.id messages.success(request, "Added report!") return HttpResponse(json.dumps(res), content_type="application/json") else: inst = MODEL_MAP[model_name].objects.get(id=id) template = loader.get_template("new/report.html") context = {"instance": inst, "model": model_name, "id": id} return HttpResponse(template.render(context)) @login_required def resolve_report(request, report_id): """ This view serves both the proper form page and the POST requests for the resolve report form page. It's essentially a clone of report but with a few changes to make resolution better. """ # TODO: Check if staff inst = Report.objects.get(id=report_id) if not inst: return json_error({"error": "Unknown report with id {}".format(id)}) if inst.handled: return json_error({"error": "Report has already been resolved."}) if request.method == "POST": res = {} data = json.loads(request.body.decode()) if "text" not in data: return json_error({"text": "Missing text field."}) if "summary" not in data or data["summary"] == "": return json_error({"summary": "Missing action field."}) inst.resolve(by=request.user, comment=data["text"]) res["id"] = inst.id return HttpResponse(json.dumps(res), content_type="application/json") else: template = loader.get_template("new/resolve_report.html") context = {"instance": inst, "id": report_id} return HttpResponse(template.render(context))
brhoades/sweaters-but-with-peer-reviews
new/views.py
views.py
py
9,719
python
en
code
1
github-code
36
[ { "api_name": "new.utils.MODEL_MAP.keys", "line_number": 20, "usage_type": "call" }, { "api_name": "new.utils.MODEL_MAP", "line_number": 20, "usage_type": "name" }, { "api_name": "new.utils.json_error", "line_number": 21, "usage_type": "call" }, { "api_name": "new...
37298965152
import time import urllib3 urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) import json from pymongo import MongoClient i = 0 client = MongoClient('localhost',27017) db=client.comment collection=db.comment collection2=db.after def sentiment_classify(data): access_token='' http=urllib3.PoolManager() url='https://aip.baidubce.com/rpc/2.0/nlp/v1/sentiment_classify?access_token='+access_token params={'text':data} #进行json转换的时候,encode编码格式不指定也不会出错 encoded_data = json.dumps(params).encode('GBK') try: request=http.request('POST', url, body=encoded_data, headers={'Content-Type':'application/json'}) result = str(request.data,'GBK') result = json.loads(result) return result['items'][0]['sentiment'] except Exception as e: if result.get('error_code') == 18: print("error:qps limit",i, e, data, result) time.sleep(0.2) return sentiment_classify(data) def data_processing(): collection2.remove() for item in collection.find(): global i i+=1 comment = item.get('content') sentiment = sentiment_classify(comment) collection2.insert({'comment': comment,'sentiment':sentiment}) data_processing()
LogicJake/data_analysis
classfy/label.py
label.py
py
1,396
python
en
code
2
github-code
36
[ { "api_name": "urllib3.disable_warnings", "line_number": 3, "usage_type": "call" }, { "api_name": "urllib3.exceptions", "line_number": 3, "usage_type": "attribute" }, { "api_name": "pymongo.MongoClient", "line_number": 8, "usage_type": "call" }, { "api_name": "url...
35146672008
import matplotlib.pyplot as plt import numpy as np fil = open("Breakout_step_RL") ret_RL = [] for line in fil: x = float(line) if(x==-10): x=0 ret_RL.append(x) fil.close() # fil = open("breakout_aveReturn") # ret_MT = [] # for line in fil: # ret_MT.append(float(line)) # fil.close() # fil = open("Step_RBF_FA") # ret_RBF = [] # for line in fil: # ret_RBF.append(float(line)) # fil.close() print(np.average(ret_RL)) # plt.plot(ret_MT,label="TileCoding") plt.plot(ret_RL,label="TileCoding with Velocity") # plt.plot(ret_RL,label="REINFORCE") plt.show()
sidistic/Atari-Breakout-Reinforcement-Learning
graph.py
graph.py
py
581
python
en
code
0
github-code
36
[ { "api_name": "numpy.average", "line_number": 24, "usage_type": "call" }, { "api_name": "matplotlib.pyplot.plot", "line_number": 26, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name" }, { "api_name": "matplotlib.pyp...
22355735455
import pytest import mlrun.common.schemas import mlrun.runtimes def test_enum_yaml_dump(): function = mlrun.new_function("function-name", kind="job") function.status.state = mlrun.common.schemas.FunctionState.ready print(function.to_yaml()) @pytest.mark.parametrize( "exclude_params,expected_result,is_empty", [ ( True, ( '{"spec": {"outputs": [], "secret_sources": [], "notifications": [{"kind": ' '"webhook", "name": "notification-test", "message": "completed", "severity": ' '"info", "when": ["completed", "error"], "condition": ""}]}, "metadata": ' '{"iteration": 0}, "status": {"state": "created"}}' ), False, ), ( False, ( '{"spec": {"outputs": [], "secret_sources": [], "notifications": [{"kind": ' '"webhook", "name": "notification-test", "message": "completed", "severity": ' '"info", "when": ["completed", "error"], "condition": "", "params": {"url": ' '"https://url", "method": "PUT", "override_body": "AAAAAAAAAAAAAAAAAAAA"}}]}, ' '"metadata": {"iteration": 0}, "status": {"state": "created"}}' ), False, ), ( True, ( '{"spec": {"outputs": [], "secret_sources": []}, "metadata": {"iteration": ' '0}, "status": {"state": "created"}}' ), True, ), ( False, ( '{"spec": {"outputs": [], "secret_sources": []}, "metadata": {"iteration": ' '0}, "status": {"state": "created"}}' ), True, ), ], ) def test_runobject_to_json_with_exclude_params( exclude_params, expected_result, is_empty ): run_object_to_test = mlrun.model.RunObject() notification = mlrun.model.Notification( kind="webhook", when=["completed", "error"], name="notification-test", message="completed", condition="", severity="info", params={"url": "https://url", "method": "PUT", "override_body": "A" * 20}, ) run_object_to_test.spec.notifications = [] if is_empty else [notification] # Call the to_json function with the exclude_notifications_params parameter json_result = run_object_to_test.to_json( exclude_notifications_params=exclude_params ) # Check if the JSON result matches the expected result assert json_result == expected_result # Ensure the 'params' attribute of the notification is set back to the object if not is_empty: for notification in run_object_to_test.spec.notifications: assert notification.params
mlrun/mlrun
tests/test_model.py
test_model.py
py
2,821
python
en
code
1,129
github-code
36
[ { "api_name": "mlrun.common.schemas.new_function", "line_number": 8, "usage_type": "call" }, { "api_name": "mlrun.common.schemas", "line_number": 8, "usage_type": "name" }, { "api_name": "mlrun.common.schemas.common", "line_number": 9, "usage_type": "attribute" }, { ...
74249579945
"""" Controls EC2 Services """ import boto3 import logging import os """ Ec2 controller: finds ec2 instances that have a devday tag, has the ability to stop, start and to modify their shutdown behaviour - to avoid termination """ class ec2Controller: STOPBEHAVIOUR = 'stop' def __init__(self, region, searchTag): self.region = region self.client = boto3.client('ec2', region_name= region) self.searchTag = searchTag.lower() self.logger = logging.getLogger(__name__) self.enabledServices = {} env = os.environ """ Main entry point to be called from ResourceFinder - finds all EC2 Services that have been tagged Returns a Map [instance id] : {state , platform , name} """ def findResourcesForEC2(self): ec2Map = self.findServices(running=False) # Get all EC2 running or not that are tagged return ec2Map """ Main entry point to signal a STOP of developer day event All tagged and running EC2 servers will be stopped """ def stopDayEvent(self): result = True totalResult=True ec2Map = self.findServices(running=True) # Find all those that are currently running if len(ec2Map) ==0: self.logger.info("There are currently no active EC2 instances that are tagged - they all seemed stopped or do not exist") return True self.correctShutDownBehaviour(ec2Map) try: for ec2instance in ec2Map: ec2Dict = ec2Map[ec2instance] state = ec2Dict["state"] platform = ec2Dict["platform"] name = ec2Dict["name"] if state=="running": response = self.client.stop_instances( InstanceIds = [ec2instance] ) cs = response['StoppingInstances'][0]['CurrentState']['Name'] self.logger.info(f"Shutting down instance {name} id {ec2instance}, plaform {platform} moving from running --> {cs}") result = ("stopping" == cs) if not result: totalResult = False except Exception as e: self.logger.error("Could not stop all EC2 instances ") self.logger.exception(e) totalResult = False return totalResult """ Main entry point to signal a START of developer day event Finds all tagged Ec2 servers that are currently stopped """ def startDayEvent(self): result = True totalResult = True ec2Map = self.findServices(running=False) # Find all those that are currently stopped if len(ec2Map) == 0: self.logger.info( "There are currently no stopped EC2 instances that are tagged - they are either running or dont exist") return True try: for ec2instance in ec2Map: ec2Dict = ec2Map[ec2instance] state = ec2Dict["state"] platform = ec2Dict["platform"] name = ec2Dict["name"] if state=="stopped": response = self.client.start_instances( InstanceIds = [ec2instance] ) cs = response['StartingInstances'][0]['CurrentState']['Name'] self.logger.info(f"Starting up instance {name} id {ec2instance}, plaform {platform} moving from stopped --> {cs}") result = ("pending" == cs) if not result: totalResult = False except Exception as e: self.logger.error("Could not start all EC2 instances ") self.logger.exception(e) totalResult = False return totalResult """ Checks the SERVICE ARN for the special searchTag - and see if the Tag is set to TRUE return True or False """ def _checkforTag(self,tagsDict): self.logger.debug(f"Tags are {tagsDict}") for tag in tagsDict: key = tag.get('Key') if key is not None: value=tag['Value'].lower() if key.lower() == self.searchTag and value=='true': return True return False """ Finds all Ec2 instances that exist with a dev day tag if the running parameter is set to True only instances that are currently running will be picked up, passing False will flag all those that are stopped Returns a MAP of [instance id] : {state , platform , name} """ def findServices(self, running=True): serviceMap = {} try: response = self.client.describe_instances() nextToken = "A" while nextToken is not None: nextToken = response.get("NextToken") reservationL = response.get("Reservations",[]) for reservation in reservationL: instanceL = reservation.get("Instances",[]) for ins in instanceL: self.logger.debug(f"Instance Details: {ins} ") instanceId = ins["InstanceId"] platform = ins.get("Platform","Linux") state = ins["State"]['Name'] tags = ins.get('Tags',[]) name = '(no name)' for tag in tags: k = tag['Key'] if k.lower() =='name': name = tag['Value'] break if self._checkforTag(tags): self.logger.info(f"EC2: {name} instance-id {instanceId} - platform {platform}, current state {state} is tagged for Developer day/night") if (running and state=="running") or (not running and state=="stopped"): serviceMap[instanceId] = {"state" : state, "platform" : platform, "name": name} else: self.logger.info(f"EC2: skipping instance_id {instanceId} {name} as it is already in the desired state") else: self.logger.info(f"EC2: skipping untagged instance_id {instanceId} {name}") if nextToken is not None: response = self.client.describe_instances(NextToken=nextToken) except Exception as e: self.logger.warning(f"Could not access the instances in the region {self.region}") return serviceMap """ Makes sure the instances are not terminated when they are shutdown - this method returns the behaviour """ def _getShutdownBehavior(self, instanceID): response = self.client.describe_instance_attribute( Attribute= 'instanceInitiatedShutdownBehavior' , InstanceId=instanceID) behaviour = response['InstanceInitiatedShutdownBehavior']['Value'] self.logger.info(f"instance {instanceID}, shutdown behaviour is currently set to {behaviour}") return behaviour def correctShutDownBehaviour(self, serviceMap): self.logger.info("EC2: Checking and correcting the shutdown behaviour to avoid instance termination when sleeping") for instance in serviceMap: behaviour = self._getShutdownBehavior(instance) if not behaviour == self.STOPBEHAVIOUR: self.logger.info(f"EC2: Correcting Shutdown behaviour.... on instance {instance}") response =self.client.modify_instance_attribute( InstanceId = instance, InstanceInitiatedShutdownBehavior={"Value" : self.STOPBEHAVIOUR}) else: self.logger.info(f"EC2: shutdown behaviour on instance {instance} already correctly set to STOP")
evoraglobal/SleepSaver
ec2Controller.py
ec2Controller.py
py
7,948
python
en
code
0
github-code
36
[ { "api_name": "boto3.client", "line_number": 18, "usage_type": "call" }, { "api_name": "logging.getLogger", "line_number": 20, "usage_type": "call" }, { "api_name": "os.environ", "line_number": 22, "usage_type": "attribute" } ]
6939797470
from threading import Thread from flask import Flask, render_template from tornado.ioloop import IOLoop from bokeh.embed import server_document from bokeh.layouts import column from bokeh.plotting import figure from bokeh.server.server import Server from bokeh.themes import Theme import numpy as np from bokeh.models import ColumnDataSource import paho.mqtt.client as mqtt app = Flask(__name__) def bkapp(doc): pwr_queue = [] ts_queue = [] def on_message(client, userdata, message): ts, pwr = map(float, message.payload.split(b',')) ts_queue.append(ts) pwr_queue.append(pwr) cds = ColumnDataSource(data={'x': [], 'y': []}) def callback(): nonlocal ts_queue, pwr_queue # append any new data to the graph cds.stream({'x': ts_queue, 'y': pwr_queue}) # then clear the queues pwr_queue.clear() ts_queue.clear() p = figure(sizing_mode='stretch_width', title='MQTT streaming example') random255 = lambda: np.random.randint(255) color = tuple(random255() for _ in range(3)) p.line('x', 'y', source=cds, color=color) doc.add_root(column(p)) # init client client = mqtt.Client("stream") client.connect("localhost") client.subscribe("plug/0") client.on_message=on_message # loop client.loop_start() # Runs a loop in a background thread doc.add_periodic_callback(callback, 100) @app.route('/', methods=['GET']) def bkapp_page(): script = server_document('http://127.0.0.1:5006/bkapp') return render_template("mqtt.html", script=script, template="Flask") def bk_worker(): # Can't pass num_procs > 1 in this configuration. server = Server({'/bkapp': bkapp}, io_loop=IOLoop(), allow_websocket_origin=["127.0.0.1:8000"]) server.start() server.io_loop.start() Thread(target=bk_worker).start() if __name__ == '__main__': app.run(port=8000)
marnatgon/Senior-Design
software/example/flask/mqtt.py
mqtt.py
py
1,891
python
en
code
0
github-code
36
[ { "api_name": "flask.Flask", "line_number": 18, "usage_type": "call" }, { "api_name": "bokeh.models.ColumnDataSource", "line_number": 32, "usage_type": "call" }, { "api_name": "bokeh.plotting.figure", "line_number": 44, "usage_type": "call" }, { "api_name": "numpy...
35866803773
import re from datetime import date from typing import Optional import docx # type: ignore from adaptive_hockey_federation.parser.user_card import BaseUserInfo NAME = '[И|и][М|м][Я|я]' SURNAME = '[Ф|ф][А|а][М|м][И|и][Л|л][И|и][Я|я]' PATRONYMIC = '[О|о][Т|т]?[Ч|ч][Е|е][С|с][Т|т][В|в][О|о]' DATE_OF_BIRTH = '[Д|д][А|а][Т|т][А|а] [Р|р][О|о].+' TEAM = '[К|к][О|о][М|м][А|а][Н|н][Д|д][А|а]' PLAYER_NUMBER = '[И|и][Г|г][Р|р][О|о][В|в][О|о][Й|й]' POSITION = '[П|п][О|о][З|з][И|и][Ц|ц][И|и][Я|я]' NUMERIC_STATUS = '[Ч|ч].+[С|с][Т|т].+' PLAYER_CLASS = '[К|к][Л|л][А|а][С|с][С|с]' def read_file_columns(file: docx) -> list[docx]: """Функция находит таблицы в файле и возвращает список объектов docx с данными каждого столбца. """ return [ column for table in file.tables for index, column in enumerate(table.columns) ] def read_file_text(file: docx) -> list[str]: """Функция находит текстовые данные в файле и возвращает список объектов docx с найденными данными. """ return [ run.text for paragraph in file.paragraphs for run in paragraph.runs ] def get_counter_for_columns_parser( columns: list[docx] ) -> int: count = 0 for column in columns: for index, cell in enumerate(column.cells): if re.search(r'п/п', cell.text): for cell in column.cells[index + 1:]: if cell.text and len(cell.text) < 4: count += 1 else: break else: if count > 0: break return count def columns_parser( columns: list[docx], regular_expression: str, ) -> list[Optional[str]]: """Функция находит столбец по названию и списком выводит содержимое каждой ячейки этого столбца. """ output = [ text if text else None for column in columns if re.search( regular_expression, list(cell.text for cell in column.cells)[0] ) for text in list(cell.text for cell in column.cells)[1:] ] if not output: count = get_counter_for_columns_parser(columns) for column in columns: for index, cell in enumerate(column.cells): if re.search(regular_expression, cell.text): for cell in column.cells[index + 1:index + 1 + count]: output.append(cell.text) return output def find_names(columns: list[docx], regular_expression: str) -> list[str]: """Функция парсит в искомом столбце имена. Опирается на шаблон ФИО (имя идет после фамилии на втором месте). """ names_list = columns_parser(columns, regular_expression) return [ name.split()[1].rstrip() for name in names_list if name ] def find_surnames(columns: list[docx], regular_expression: str) -> list[str]: """Функция парсит в искомом столбце фамилии. Опирается на шаблон ФИО (фамилия идет на первом месте). """ surnames_list = columns_parser(columns, regular_expression) return [ surname.split()[0].rstrip() for surname in surnames_list if surname ] def find_patronymics( columns: list[docx], regular_expression: str, ) -> list[str]: """Функция парсит в искомом столбце отчества. Опирается на шаблон ФИО (отчество идет на последнем месте). """ patronymics_list = columns_parser(columns, regular_expression) return [ patronymic.replace('/', ' ').split()[2].rstrip().rstrip(',') if patronymic and len(patronymic.split()) > 2 else 'Отчество отсутствует' for patronymic in patronymics_list ] def find_dates_of_birth( columns: list[docx], regular_expression: str, ) -> list[date]: """Функция парсит в искомом столбце дату рождения и опирается на шаблон дд.мм.гггг. """ dates_of_birth_list = columns_parser(columns, regular_expression) dates_of_birth_list_clear = [] for date_of_birth in dates_of_birth_list: if date_of_birth: try: for day, month, year in [ re.sub(r'\D', ' ', date_of_birth).split() ]: if len(year) == 2: if int(year) > 23: year = '19' + year else: year = '20' + year dates_of_birth_list_clear.append( date(int(year), int(month), int(day)) ) except ValueError or IndexError: # type: ignore dates_of_birth_list_clear.append(date(1900, 1, 1)) else: dates_of_birth_list_clear.append(date(1900, 1, 1)) return dates_of_birth_list_clear def find_team( text: list[str], columns: list[docx], regular_expression: str, ) -> str: """Функция парсит название команды. """ text_clear = ' '.join(text) text_clear = re.sub( r'\W+|_+|ХК|СХК|ДЮСХК|Хоккейный клуб|по незрячему хоккею' '|по специальному хоккею|Спец хоккей|по специальному|по следж-хоккею', ' ', text_clear ).split() # type: ignore try: return [ 'Молния Прикамья' if text_clear[index + 2] == 'Прикамья' else 'Ак Барс' if text_clear[index + 1] == 'Ак' else 'Снежные Барсы' if text_clear[index + 1] == 'Снежные' else 'Хоккей Для Детей' if text_clear[index + 1] == 'Хоккей' else 'Дети-Икс' if text_clear[index + 1] == 'Дети' else 'СКА-Стрела' if text_clear[index + 1] == 'СКА' else 'Сборная Новосибирской области' if text_clear[index + 2] == 'Новосибирской' else 'Атал' if text_clear[index + 3] == 'Атал' else 'Крылья Мечты' if text_clear[index + 2] == 'мечты' else 'Огни Магнитки' if text_clear[index + 1] == 'Огни' else 'Энергия Жизни Краснодар' if text_clear[index + 3] == 'Краснодар' else 'Энергия Жизни Сочи' if text_clear[index + 4] == 'Сочи' else 'Динамо-Москва' if text_clear[index + 1] == 'Динамо' else 'Крылья Советов' if text_clear[index + 2] == 'Советов' else 'Красная Ракета' if text_clear[index + 2] == 'Ракета' else 'Красная Молния' if text_clear[index + 2] == 'молния' else 'Сахалинские Львята' if text_clear[index + 1] == 'Сахалинские' else 'Мамонтята Югры' if text_clear[index + 1] == 'Мамонтята' else 'Уральские Волки' if text_clear[index + 1] == 'Уральские' else 'Нет названия команды' if text_clear[index + 1] == 'Всего' else text_clear[index + 1].capitalize() for index, txt in enumerate(text_clear) if re.search(regular_expression, txt) ][0] except IndexError: for column in columns: for cell in column.cells: if re.search(regular_expression, cell.text): txt = re.sub(r'\W', ' ', cell.text) return txt.split()[1].capitalize() return 'Название команды не найдено' def find_players_number( columns: list[docx], regular_expression: str, ) -> list[int]: """Функция парсит в искомом столбце номер игрока. """ players_number_list = columns_parser(columns, regular_expression) players_number_list_clear = [] for player_number in players_number_list: if player_number: try: players_number_list_clear.append( int(re.sub(r'\D', '', player_number)[:2]) ) except ValueError: players_number_list_clear.append(0) else: players_number_list_clear.append(0) return players_number_list_clear def find_positions(columns: list[docx], regular_expression: str) -> list[str]: """Функция парсит в искомом столбце позицию игрока на поле. """ positions_list = columns_parser(columns, regular_expression) return [ 'нападающий' if re.search( r'^н|^Н|^H|^Нп|^нл|^нп|^цн|^лн|^Нап|^№|^А,|^К,', position.lstrip() ) else 'защитник' if re.search(r'^з|^З|^Зщ|^Защ', position.lstrip()) else 'вратарь' if re.search(r'^Вр|^В|^вр', position.lstrip()) else 'Позиция записана неверно' if not re.sub(r'\n|\(.+|\d', '', position) else re.sub( r'\n|\(.+|\d|Капитан', '', position ).lower().rstrip().replace(',', '').lstrip() for position in positions_list if position ] def find_numeric_statuses(file: docx) -> list[list[str]]: numeric_statuses_list = [] for table in file.tables: for row in table.rows: txt = row.cells[1].text.title() txt = re.sub(r'\W|Коляс.+|Здоровый', ' ', txt) if len(txt.split()) <= 4: try: numeric_status = row.cells[4].text numeric_status = re.sub(r'\D', '', numeric_status) if numeric_status: if len(txt.split()) == 2: txt += ' Отчество отсутствует' numeric_statuses_list.append( txt.split()[:3] + [numeric_status] ) except IndexError: pass return numeric_statuses_list def numeric_status_check( name: str, surname: str, patronymics: str, statuses: list[list[str]], ) -> Optional[int]: for status in statuses: if surname == status[0]: if name == status[1]: if patronymics.split()[0] == status[2]: return int(status[3]) return None def docx_parser( path: str, numeric_statuses: list[list[str]] ) -> list[BaseUserInfo]: """Функция собирает все данные об игроке и передает их в dataclass. """ file = docx.Document(path) columns_from_file = read_file_columns(file) text_from_file = read_file_text(file) names = find_names(columns_from_file, NAME) surnames = find_surnames(columns_from_file, SURNAME) patronymics = find_patronymics(columns_from_file, PATRONYMIC) dates_of_birth = find_dates_of_birth( columns_from_file, DATE_OF_BIRTH, ) team = find_team(text_from_file, columns_from_file, TEAM) players_number = find_players_number(columns_from_file, PLAYER_NUMBER) positions = find_positions(columns_from_file, POSITION) return [ BaseUserInfo( name=names[index], surname=surnames[index], date_of_birth=dates_of_birth[index], team=team, player_number=players_number[index], position=positions[index], numeric_status=numeric_status_check( names[index], surnames[index], patronymics[index], numeric_statuses, ), patronymic=patronymics[index], ) for index in range(len(names)) ]
Studio-Yandex-Practicum/adaptive_hockey_federation
adaptive_hockey_federation/parser/docx_parser.py
docx_parser.py
py
12,958
python
ru
code
2
github-code
36
[ { "api_name": "re.search", "line_number": 48, "usage_type": "call" }, { "api_name": "re.search", "line_number": 71, "usage_type": "call" }, { "api_name": "re.search", "line_number": 81, "usage_type": "call" }, { "api_name": "typing.Optional", "line_number": 63...
10665778183
import math import os from glumpy import glm from PIL import Image, ImageTk import numpy import tkinter import cv2 def load_image(file_name, size): image = Image.open(file_name) image = numpy.array(image) image = cv2.cvtColor(image, cv2.COLOR_BGRA2BGR) image = cv2.resize(image, size, interpolation=cv2.INTER_NEAREST) image = Image.fromarray(image) photo_image = ImageTk.PhotoImage(image) return image, photo_image class ImgToChunk(): def __init__(self): super(ImgToChunk, self).__init__() self.window = tkinter.Tk(className='ImgToChunk') self.window.geometry('1600x900') self.window.bind('w', lambda event: self.key_fn('w')) self.window.bind('a', lambda event: self.key_fn('a')) self.window.bind('s', lambda event: self.key_fn('s')) self.window.bind('d', lambda event: self.key_fn('d')) self.window.bind('q', lambda event: self.key_fn('q')) self.window.bind('e', lambda event: self.key_fn('e')) self.window.bind('r', lambda event: self.key_fn('r')) self.window.bind('f', lambda event: self.key_fn('f')) self.window.bind('t', lambda event: self.key_fn('t')) self.window.bind('g', lambda event: self.key_fn('g')) self.image, self.photo_image = load_image('data/1.png', (1280, 720)) self.image_canvas = tkinter.Canvas(self.window, width=1280, height=720) self.image_canvas.create_image((0, 0), image=self.photo_image, anchor='nw') self.image_canvas.place(x=10, y=10) self.image_canvas.bind("<Button-1>", self.image_click_fn) self.cube_x = 0 self.cube_y = 0 self.cube_z = -5 self.yaw = 0 self.roll = 0 self.projection = glm.perspective(70, float(1280) / 720, 0.1, 100) self.cube_image = self.image self.cube_image_tk = self.photo_image self.grid_canvases = [] self.grid_images = [] self.grid_photo_images = [] for row in range(6): self.grid_canvases.append([]) self.grid_images.append([]) self.grid_photo_images.append([]) for col in range(2): grid_image, grid_photo_image = load_image('block/dirt.png', (125, 125)) self.grid_images[row].append(grid_image) self.grid_photo_images[row].append(grid_photo_image) canvas = tkinter.Canvas(self.window, width=125, height=125) canvas.place(x=1300 + col * 125, y=0 + row * 125) canvas.create_image((0, 0), image=grid_photo_image, anchor='nw') self.grid_canvases[row].append(canvas) self.x_label = tkinter.Label(self.window, text='x: ') self.x_label.config(font=('Courier', 12), width=20) self.x_label.place(x=10, y=740) self.y_label = tkinter.Label(self.window, text='y: ') self.y_label.config(font=('Courier', 12), width=20) self.y_label.place(x=210, y=740) self.z_label = tkinter.Label(self.window, text='z: ') self.z_label.config(font=('Courier', 12), width=20) self.z_label.place(x=410, y=740) self.roll_label = tkinter.Label(self.window, text='roll: ') self.roll_label.config(font=('Courier', 12), width=20) self.roll_label.place(x=610, y=740) self.yaw_label = tkinter.Label(self.window, text='yaw: ') self.yaw_label.config(font=('Courier', 12), width=20) self.yaw_label.place(x=810, y=740) # self.x_line = None # self.y_line = None # # self.image_x_label = tkinter.Label(self.window, text='image x:') # self.image_x_label.place(x=1300, y=10) # self.image_x_label.config(font=("Courier", 15)) # self.image_x_entry = tkinter.Entry(self.window) # self.image_x_entry.place(x=1400, y=10) # self.image_x_entry.config(font=("Courier", 15), width=10) # # self.image_y_label = tkinter.Label(self.window, text='image y:') # self.image_y_label.place(x=1300, y=35) # self.image_y_label.config(font=("Courier", 15)) # self.image_y_entry = tkinter.Entry(self.window) # self.image_y_entry.place(x=1400, y=35) # self.image_y_entry.config(font=("Courier", 15), width=10) # # self.image_lines = [] # # self.update_button = tkinter.Button(self.window, text='Update', width=10, command=self.update_button_fn) # self.update_button.place(x=1400, y=65) # # self.selected_entry_row = 0 # self.selected_entry_col = 0 # self.grid_entries = [] # self.entry_grids = {} # for row in range(10): # self.grid_entries.append([]) # for col in range(2): # entry = tkinter.Entry(self.window) # entry.place(x=1300 + 50 * col, y=65 + 25 * row) # entry.config(font=("Courier", 15), width=4) # entry.bind('<1>', self.entry_click_fn) # entry.bind('<Enter>', self.update_button_fn) # self.grid_entries[row].append(entry) # self.entry_grids[entry] = (row, col) self.block_images = [] for root, dir, files in os.walk('block_subset'): for file in files: path = os.path.join(root, file) block_image, block_image_tk = load_image(path, (200, 200)) self.block_images.append(block_image) self.window.mainloop() def key_fn(self, key): if key == 'a': # Left self.cube_x -= 0.1 elif key == 'd': # Right self.cube_x += 0.1 elif key == 'q': # Up self.cube_y -= 0.1 elif key == 'e': # Down self.cube_y += 0.1 elif key == 'w': # Up self.cube_z -= 0.1 elif key == 's': # Down self.cube_z += 0.1 elif key == 'r': # Up self.roll += 1 elif key == 'f': # Down self.roll += -1 elif key == 't': # Up self.yaw += 1 elif key == 'g': # Down self.yaw += -1 self.x_label['text'] = 'x: {:2f}'.format(self.cube_x) self.y_label['text'] = 'y: {:2f}'.format(self.cube_y) self.z_label['text'] = 'z: {:2f}'.format(self.cube_z) self.roll_label['text'] = 'roll: {:2f}'.format(self.roll) self.yaw_label['text'] = 'yaw: {:2f}'.format(self.yaw) view = numpy.eye(4, dtype=numpy.float32) glm.rotate(view, self.yaw, 0, 1, 0) glm.rotate(view, self.roll, 1, 0, 0) model = numpy.eye(4, dtype=numpy.float32) glm.translate(model, self.cube_x, self.cube_y, self.cube_z) vertices = numpy.array([[1, 1, 1], [0, 1, 1], [0, 0, 1], [1, 0, 1], [1, 0, 0], [1, 1, 0], [0, 1, 0], [0, 0, 0]]) vertices = numpy.column_stack((vertices, numpy.ones((vertices.shape[0], 1)))) @ model @ view @ self.projection vertices = vertices[:, :2] / numpy.reshape(vertices[:, 3], (vertices.shape[0], 1)) vertices = (vertices + 1) * numpy.array([1280 / 2, 720 / 2]) indices = [[0, 1, 2, 3], [0, 1, 6, 5], [0, 5, 4, 3], [1, 6, 7, 2], [3, 4, 7, 2], [5, 6, 7, 4]] polygons = [numpy.array([vertices[indices[i]]]).astype(int) for i in range(len(indices))] self.cube_image = numpy.array(self.image, numpy.uint8) cv2.polylines(self.cube_image, polygons, True, 255) self.cube_image = Image.fromarray(self.cube_image) self.cube_image_tk = ImageTk.PhotoImage(self.cube_image) self.image_canvas.delete('all') self.image_canvas.create_image((0, 0), image=self.cube_image_tk, anchor='nw') dst_points = numpy.array([[125, 125], [0, 125], [0, 0], [125, 0]]) for row, polygon in enumerate(polygons): homography, status = cv2.findHomography(polygon, dst_points) tile_image = numpy.array(self.image, numpy.uint8) tile_image = cv2.warpPerspective(tile_image, homography, (125, 125)) self.grid_images[row][0] = Image.fromarray(tile_image) self.grid_images[row][1] = self.get_most_similar_image(self.grid_images[row][0]) self.grid_photo_images[row][0] = ImageTk.PhotoImage(self.grid_images[row][0]) self.grid_photo_images[row][1] = ImageTk.PhotoImage(self.grid_images[row][1]) self.grid_canvases[row][0].delete('all') self.grid_canvases[row][0].create_image((0, 0), image=self.grid_photo_images[row][0], anchor='nw') self.grid_canvases[row][1].delete('all') self.grid_canvases[row][1].create_image((0, 0), image=self.grid_photo_images[row][1], anchor='nw') def image_click_fn(self, event): self.image_x_entry.delete(0, tkinter.END) self.image_x_entry.insert(0, '{}'.format(event.x)) self.image_y_entry.delete(0, tkinter.END) self.image_y_entry.insert(0, '{}'.format(event.y)) if self.selected_entry_row is not None: self.grid_entries[self.selected_entry_row][0].delete(0, tkinter.END) self.grid_entries[self.selected_entry_row][0].insert(0, '{}'.format(event.x)) self.grid_entries[self.selected_entry_row][1].delete(0, tkinter.END) self.grid_entries[self.selected_entry_row][1].insert(0, '{}'.format(event.y)) self.selected_entry_row += 1 if self.selected_entry_row > 3: self.selected_entry_row = 0 if self.x_line: self.image_canvas.delete(self.x_line) if self.y_line: self.image_canvas.delete(self.y_line) self.x_line = self.image_canvas.create_line(event.x, event.y - 10, event.x, event.y + 10, fill='white', width=2) self.y_line = self.image_canvas.create_line(event.x - 10, event.y, event.x + 10, event.y, fill='white', width=2) self.update_button_fn() def entry_click_fn(self, event: tkinter.Event): if event.widget in self.entry_grids: self.selected_entry_row = self.entry_grids[event.widget][0] self.selected_entry_col = self.entry_grids[event.widget][1] print(self.selected_entry_row, self.selected_entry_col) def update_button_fn(self, event=None): src_points = [] for row in range(4): src_points.append([]) for col in range(2): s_val = self.grid_entries[row][col].get() if s_val.isdigit(): src_points[row].append(int(s_val)) else: return for image_line in self.image_lines: self.image_canvas.delete(image_line) self.image_lines = [self.image_canvas.create_line(src_points[0][0], src_points[0][1], src_points[1][0], src_points[1][1], fill='white', width=2), self.image_canvas.create_line(src_points[1][0], src_points[1][1], src_points[2][0], src_points[2][1], fill='white', width=2), self.image_canvas.create_line(src_points[2][0], src_points[2][1], src_points[3][0], src_points[3][1], fill='white', width=2), self.image_canvas.create_line(src_points[3][0], src_points[3][1], src_points[0][0], src_points[0][1], fill='white', width=2) ] src_points = numpy.array(src_points) dst_points = numpy.array([[0, 0], [0, 200], [200, 200], [200, 0]]) homography, status = cv2.findHomography(src_points, dst_points) # self.trans_tile_canvas.delete('all') # self.trans_tile_image = cv2.warpPerspective(numpy.array(self.image), homography, (200, 200)) # self.trans_tile_image = Image.fromarray(self.trans_tile_image) # self.trans_tile_photo_image = ImageTk.PhotoImage(self.trans_tile_image) # self.trans_tile_canvas.create_image((0, 0), image=self.trans_tile_photo_image, anchor='nw') # # self.pred_tile_canvas.delete('all') # self.pred_tile_image = self.get_most_similar_image(self.trans_tile_image) # self.pred_tile_photo_image = ImageTk.PhotoImage(self.pred_tile_image) # self.pred_tile_canvas.create_image((0, 0), image=self.pred_tile_photo_image, anchor='nw') def get_most_similar_image(self, trans_tile_image): min_result = math.inf min_image = None for i, block_image in enumerate(self.block_images): trans_tile_image2 = cv2.resize(numpy.array(trans_tile_image), (16, 16)) block_image2 = cv2.resize(numpy.array(block_image), (16, 16)) result = trans_tile_image2.astype(int) - block_image2 result = numpy.sum(numpy.abs(result)) #result = result * result #result = numpy.sum(result) # cv2.imshow('abc', numpy.array(trans_tile_image)) # cv2.waitKey() # cv2.imshow('abc', numpy.array(block_image)) # cv2.waitKey() if result < min_result: min_result = result min_image = block_image return min_image img_to_chunk = ImgToChunk()
chahyon-ku/ImgToChunk
ImgToChunk.py
ImgToChunk.py
py
14,064
python
en
code
0
github-code
36
[ { "api_name": "PIL.Image.open", "line_number": 12, "usage_type": "call" }, { "api_name": "PIL.Image", "line_number": 12, "usage_type": "name" }, { "api_name": "numpy.array", "line_number": 13, "usage_type": "call" }, { "api_name": "cv2.cvtColor", "line_number"...
21626075739
from datetime import datetime import os def capture_pic(driver): pt = datetime.now().strftime('%Y%m%m%H%M%S') base_path = os.path.dirname(os.getcwd()) pic_name = os.path.join(base_path, 'picture', pt+'.png') driver.get_screenshot_as_file(pic_name)
litongtongx/test
common/picCapture.py
picCapture.py
py
268
python
en
code
0
github-code
36
[ { "api_name": "datetime.datetime.now", "line_number": 6, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 6, "usage_type": "name" }, { "api_name": "os.path.dirname", "line_number": 7, "usage_type": "call" }, { "api_name": "os.path", "l...
24752052108
import os from sqlalchemy import ( Column, MetaData, String, Integer, Float, Table, Text, ForeignKey, create_engine, select ) from domain.repositories import RepositoryInterface metadata = MetaData() users_table = Table( 'user', metadata, Column('userId', Integer, primary_key=True), Column('description', Text, nullable=True), Column('userName', String(10), nullable=True), Column('deptId', Integer, ForeignKey("department.deptId"), nullable=True) ) departments_table = Table( 'department', metadata, Column('deptId', Integer, primary_key=True), Column('description', Text, nullable=True), Column('deptName', String(10), nullable=True) ) ###################### Common Function ##################### class MySqlAdapter(RepositoryInterface): def __init__(self, database_uri=None): uri = database_uri or os.getenv('DB_URI') db_engine = create_engine(uri, convert_unicode=True, echo=True) self.__create_tables_if_not_exists(db_engine) self.__connection = db_engine.connect() def close_db_connection(self, db_connection): try: db_connection.close() except: pass def __create_tables_if_not_exists(self, db_engine): departments_table.create(db_engine, checkfirst=True) users_table.create(db_engine, checkfirst=True) ###################### CRUD Function ##################### def insertUser(self, user): with self.__connection.begin(): self.__connection.execute( users_table.insert(), user.as_dict() ) def selectUserWithDeptInfo(self, userId): with self.__connection.begin(): stmt = select([users_table, departments_table]).distinct().select_from(users_table.outerjoin(departments_table, users_table.c.deptId == departments_table.c.deptId)).where(users_table.c.userId == userId) row = self.__connection.execute(stmt).fetchone() return { 'userId' : row['userId'], 'userName' : row['userName'], 'deptName' : row['deptName'] } if row else None def insertDepartment(self, department): with self.__connection.begin(): self.__connection.execute( departments_table.insert(), department.as_dict() ) ############################################################
armyost/hexagonalSampleV2
src/app/infrastructure/adapters/mysql_adapter.py
mysql_adapter.py
py
2,513
python
en
code
0
github-code
36
[ { "api_name": "sqlalchemy.MetaData", "line_number": 16, "usage_type": "call" }, { "api_name": "sqlalchemy.Table", "line_number": 18, "usage_type": "call" }, { "api_name": "sqlalchemy.Column", "line_number": 20, "usage_type": "call" }, { "api_name": "sqlalchemy.Int...
20869179523
from selenium import webdriver from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.common.by import By from selenium.webdriver.support import expected_conditions as EC smartstorelogin_url = 'https://nid.naver.com/nidlogin.login?url=https%3A%2F%2Fsell.smartstore.naver.com%2F%23%2FnaverLoginCallback%3Furl%3Dhttps%253A%252F%252Fsell.smartstore.naver.com%252F%2523' smartstoredelevery_url = 'https://sell.smartstore.naver.com/#/naverpay/sale/delivery?summaryInfoType=DELIVERY_READY' smartstore_id = 'hdh5454' smartstore_pw = 'whdmsehgud8*' driver = webdriver.Chrome('chromedriver') #스마트스토어 로그인 driver.get(smartstorelogin_url) driver.find_element_by_name('id').send_keys(smartstore_id) driver.find_element_by_name('pw').send_keys(smartstore_pw) driver.find_element_by_id('log.login').click() #스마트스토어 배송준비 접속 driver.get(smartstoredelevery_url) wait = WebDriverWait(driver, 10) element = wait.until(EC.element_to_be_clickable((By.TAG_NAME, 'iframe'))) # iframes = driver.find_elements_by_tag_name('iframe') # print('현재 페이지에 iframe은 %d개가 있습니다.' % len(iframes)) driver.switch_to.frame('__naverpay') #iframe으로 갖힌 xpath를 읽기위해서 프레임 변경 orderNo = driver.find_elements_by_xpath('//*[@data-column-name="orderNo"]/div')[1:] orderMemberName = driver.find_elements_by_xpath('//*[@data-column-name="orderMemberName"]/div') receiverName = driver.find_elements_by_xpath('//*[@data-column-name="receiverName"]/div') print(orderNo[0].text) print(orderMemberName[0].text) print(receiverName[0].text) # print(driver.find_element_by_xpath('//*[@data-column-name="orderMemberName"]/div/text()').extract_first())
hdh4545/SSAutomation
findxpath.py
findxpath.py
py
1,724
python
en
code
0
github-code
36
[ { "api_name": "selenium.webdriver.Chrome", "line_number": 11, "usage_type": "call" }, { "api_name": "selenium.webdriver", "line_number": 11, "usage_type": "name" }, { "api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 21, "usage_type": "call" }, ...
24399113
import plotly.graph_objects as go from plotly.subplots import make_subplots import numpy as np import maths def mass_flow_funnel(mass_flows,moisture_content): fig = go.Figure(go.Funnelarea( # textinfo = [str(round(mass_flows[0],2))+" kg/h <br>Before Drying",str(round(mass_flows[1],2))+" kg/h <br>After Drying",str(mass_flows[2])+" kg/h <br>After Torrefaction"], text = ["Before Drying at MC="+str(moisture_content[0])+"%","After Drying at MC="+str(moisture_content[1])+"%","After Torrefaction at MC="+str(moisture_content[2])+"%"], values = mass_flows, textinfo = 'value+text' )) fig.update_layout( title='Feedstock Mass Flow Requirements (kg/h)', title_x=0.5, showlegend=False ) fig.update_yaxes( showticklabels = False ) return fig def torr_sizing(t1,t2,cp,mfr): reactor_diameter = np.arange(0.5,6.0,0.5) wall_temp = np.arange(200.0,500.0,100.0) results = np.zeros(shape=(len(reactor_diameter),len(wall_temp))) for i in range(0,len(reactor_diameter)): for j in range(0,len(wall_temp)): results[i,j] = maths.get_L_torr(maths.kelvin(wall_temp[j]),maths.kelvin(t1),maths.kelvin(t2),cp,reactor_diameter[i],mfr) fig = go.Figure() for i in range(0,len(reactor_diameter)): fig.add_trace(go.Scatter(x=reactor_diameter,y=results[i,:],name=(str(round(wall_temp[i],2))))) fig.update_xaxes(title="Reactor Length (m)") fig.update_yaxes(title="Wall Temperature (K)") fig.update_layout( showlegend=True, legend=dict(title="Reactor Diameter (m)"), title = "Minimum Reactor Wall Temperature Requirement at ", title_x = 0.5 ) return fig def torr_analysis(t1,t2,mfrate,d_reactor,rpm_screw,heat_loss,cp): deltaT = np.arange(10.0,160.0,10.0) ta_results = np.zeros(shape=(len(deltaT),8)) for i in range(0,len(deltaT)): ta_results[i] = maths.get_thermal_analysis(t1,t2,mfrate,deltaT[i],d_reactor,rpm_screw,heat_loss,cp) fig = make_subplots(rows=4,cols=1,subplot_titles=('Reactor Length','Residence Time','System Heat Requirement (kJ/s) for Sand as HTM','System Heat Requirement (kJ/s) for Air as HTM')) fig.update_layout(height=1000,title="Effects of Varying Heating Rates on Reactor Parameters",title_x=0.5,showlegend=False) # Heating Rate vs. Length fig.add_trace(go.Scatter(x=deltaT,y=ta_results[:,1]),col=1,row=1) fig.update_yaxes(title='m',col=1,row=1) #Residence Time fig.add_trace(go.Scatter(x=deltaT,y=ta_results[:,0]),col=1,row=2) fig.update_yaxes(title='min.',col=1,row=2) #System Heat Requirement fig.add_trace(go.Scatter(x=deltaT,y=ta_results[:,6]),col=1,row=3) fig.update_yaxes(title='kJ/s',col=1,row=3) fig.add_trace(go.Scatter(x=deltaT,y=ta_results[:,7]),col=1,row=4) fig.update_yaxes(title='kJ/s',col=1,row=4) return fig
drpsantos/torr
210921/charts.py
charts.py
py
2,905
python
en
code
0
github-code
36
[ { "api_name": "plotly.graph_objects.Figure", "line_number": 8, "usage_type": "call" }, { "api_name": "plotly.graph_objects", "line_number": 8, "usage_type": "name" }, { "api_name": "plotly.graph_objects.Funnelarea", "line_number": 8, "usage_type": "call" }, { "api...
6842050847
import cv2 import numpy as np import re from tqdm import tqdm import os import random from PIL import Image, ImageEnhance def augment(image): def transform(): return random.choice([0,1,2]) # every image has to flip transform_seed = transform() if transform_seed == 0: image = cv2.flip(image, 0) #horizontal elif transform_seed == 1: image = cv2.flip(image, 1) #vert else: image = cv2.flip(image, -1) #both # every image also has to rotate transform_seed2 = transform() if transform_seed2 == 0: image = cv2.rotate(image, cv2.cv2.ROTATE_90_CLOCKWISE) elif transform_seed2 == 1: image = cv2.rotate(image, cv2.ROTATE_180) else: image = cv2.rotate(image, cv2.ROTATE_90_COUNTERCLOCKWISE) return image # read img # def fast_scandir(dirname): # subfolders= [f.path for f in os.scandir(dirname) if f.is_dir()] # for dirname in list(subfolders): # subfolders.extend(fast_scandir(dirname)) # return subfolders # read_dir = fast_scandir('/media/zheng/backup/shipclassification/dataset/split_aug/split/train/') read_dir = ['/mnt/data2/Projects/BuildingDetection/ShipClassification/split/train2','/mnt/data2/Projects/BuildingDetection/ShipClassification/split/test2'] expand_times = 4 for dire in read_dir: for filename in os.listdir(dire): path = dire + '/' + filename image = cv2.imread(path) filename = filename[:-4] for i in range(expand_times): img_aug = augment(image) filename = filename + '_' + str(i) + '.png' if dire == '/mnt/data2/Projects/BuildingDetection/ShipClassification/split/train2': save_dir = '/mnt/data2/Projects/BuildingDetection/ShipClassification/split/train3' else: save_dir = '/mnt/data2/Projects/BuildingDetection/ShipClassification/split/test3' cv2.imwrite(os.path.join(save_dir, filename), img_aug) filename = filename[:-6] print('augment finished')
czkat/real-time-ship-classification-by-resnet-transfer-learning-with-original-dataset
augment.py
augment.py
py
2,111
python
en
code
0
github-code
36
[ { "api_name": "random.choice", "line_number": 11, "usage_type": "call" }, { "api_name": "cv2.flip", "line_number": 16, "usage_type": "call" }, { "api_name": "cv2.flip", "line_number": 18, "usage_type": "call" }, { "api_name": "cv2.flip", "line_number": 20, ...
34377275517
""" Pipeline modules to prepare the data for the PSF subtraction. """ import time import warnings from typing import Optional, Tuple import numpy as np from astropy.coordinates import EarthLocation from astropy.time import Time from typeguard import typechecked from pynpoint.core.processing import ProcessingModule from pynpoint.util.module import progress, memory_frames from pynpoint.util.image import create_mask, scale_image, shift_image class PSFpreparationModule(ProcessingModule): """ Module to prepare the data for PSF subtraction with PCA. The preparation steps include masking and an optional normalization. """ __author__ = 'Markus Bonse, Tomas Stolker, Timothy Gebhard, Sven Kiefer' @typechecked def __init__(self, name_in: str, image_in_tag: str, image_out_tag: str, mask_out_tag: Optional[str] = None, norm: bool = False, resize: Optional[float] = None, cent_size: Optional[float] = None, edge_size: Optional[float] = None) -> None: """ Parameters ---------- name_in : str Unique name of the module instance. image_in_tag : str Tag of the database entry that is read as input. image_out_tag : str Tag of the database entry with images that is written as output. mask_out_tag : str, None, optional Tag of the database entry with the mask that is written as output. If set to None, no mask array is saved. norm : bool Normalize each image by its Frobenius norm. Only supported for 3D datasets (i.e. regular imaging). resize : float, None DEPRECATED. This parameter is currently ignored by the module and will be removed in a future version of PynPoint. cent_size : float, None, optional Radius of the central mask (in arcsec). No mask is used when set to None. edge_size : float, None, optional Outer radius (in arcsec) beyond which pixels are masked. No outer mask is used when set to None. If the value is larger than half the image size then it will be set to half the image size. Returns ------- NoneType None """ super().__init__(name_in) self.m_image_in_port = self.add_input_port(image_in_tag) if mask_out_tag is None: self.m_mask_out_port = None else: self.m_mask_out_port = self.add_output_port(mask_out_tag) self.m_image_out_port = self.add_output_port(image_out_tag) self.m_cent_size = cent_size self.m_edge_size = edge_size self.m_norm = norm # Raise a DeprecationWarning if the resize argument is used if resize is not None: warnings.warn('The \'resize\' parameter has been deprecated. Its value is currently ' 'being ignored, and the argument will be removed in a future version ' 'of PynPoint.', DeprecationWarning) @typechecked def run(self) -> None: """ Run method of the module. Masks and normalizes the images. Returns ------- NoneType None """ # Get the PIXSCALE and MEMORY attributes pixscale = self.m_image_in_port.get_attribute('PIXSCALE') memory = self._m_config_port.get_attribute('MEMORY') # Get the numnber of dimensions and shape ndim = self.m_image_in_port.get_ndim() im_shape = self.m_image_in_port.get_shape() if ndim == 3: # Number of images nimages = im_shape[-3] # Split into batches to comply with memory constraints frames = memory_frames(memory, nimages) elif ndim == 4: # Process all wavelengths per exposure at once frames = np.linspace(0, im_shape[-3], im_shape[-3]+1) if self.m_norm and ndim == 4: warnings.warn('The \'norm\' parameter does not support 4D datasets and will therefore ' 'be ignored.') # Convert m_cent_size and m_edge_size from arcseconds to pixels if self.m_cent_size is not None: self.m_cent_size /= pixscale if self.m_edge_size is not None: self.m_edge_size /= pixscale # Create 2D disk mask which will be applied to every frame mask = create_mask((int(im_shape[-2]), int(im_shape[-1])), (self.m_cent_size, self.m_edge_size)).astype(bool) # Keep track of the normalization vectors in case we are normalizing the images (if # we are not normalizing, this list will remain empty) norms = list() start_time = time.time() # Run the PSFpreparationModule for each subset of frames for i in range(frames[:-1].size): # Print progress to command line progress(i, len(frames[:-1]), 'Preparing images for PSF subtraction...', start_time) if ndim == 3: # Get the images and ensure they have the correct 3D shape with the following # three dimensions: (batch_size, height, width) images = self.m_image_in_port[frames[i]:frames[i+1], ] if images.ndim == 2: warnings.warn('The input data has 2 dimensions whereas 3 dimensions are ' 'required. An extra dimension has been added.') images = images[np.newaxis, ...] elif ndim == 4: # Process all wavelengths per exposure at once images = self.m_image_in_port[:, i, ] # Apply the mask, i.e., set all pixels to 0 where the mask is False images[:, ~mask] = 0. # If desired, normalize the images using the Frobenius norm if self.m_norm and ndim == 3: im_norm = np.linalg.norm(images, ord='fro', axis=(1, 2)) images /= im_norm[:, np.newaxis, np.newaxis] norms.append(im_norm) # Write processed images to output port if ndim == 3: self.m_image_out_port.append(images, data_dim=3) elif ndim == 4: self.m_image_out_port.append(images, data_dim=4) # Store information about mask if self.m_mask_out_port is not None: self.m_mask_out_port.set_all(mask) self.m_mask_out_port.copy_attributes(self.m_image_in_port) # Copy attributes from input port self.m_image_out_port.copy_attributes(self.m_image_in_port) # If the norms list is not empty (i.e., if we have computed the norm for every image), # we can also save the corresponding norm vector as an additional attribute if norms: self.m_image_out_port.add_attribute(name='norm', value=np.hstack(norms), static=False) # Save cent_size and edge_size as attributes to the output port if self.m_cent_size is not None: self.m_image_out_port.add_attribute(name='cent_size', value=self.m_cent_size * pixscale, static=True) if self.m_edge_size is not None: self.m_image_out_port.add_attribute(name='edge_size', value=self.m_edge_size * pixscale, static=True) class AngleInterpolationModule(ProcessingModule): """ Module for calculating the parallactic angle values by interpolating between the begin and end value of a data cube. """ __author__ = 'Markus Bonse, Tomas Stolker' @typechecked def __init__(self, name_in: str, data_tag: str) -> None: """ Parameters ---------- name_in : str Unique name of the module instance. data_tag : str Tag of the database entry for which the parallactic angles are written as attributes. Returns ------- NoneType None """ super().__init__(name_in) self.m_data_in_port = self.add_input_port(data_tag) self.m_data_out_port = self.add_output_port(data_tag) @typechecked def run(self) -> None: """ Run method of the module. Calculates the parallactic angles of each frame by linearly interpolating between the start and end values of the data cubes. The values are written as attributes to *data_tag*. A correction of 360 deg is applied when the start and end values of the angles change sign at +/-180 deg. Returns ------- NoneType None """ parang_start = self.m_data_in_port.get_attribute('PARANG_START') parang_end = self.m_data_in_port.get_attribute('PARANG_END') steps = self.m_data_in_port.get_attribute('NFRAMES') if 'NDIT' in self.m_data_in_port.get_all_non_static_attributes(): ndit = self.m_data_in_port.get_attribute('NDIT') if not np.all(ndit == steps): warnings.warn('There is a mismatch between the NDIT and NFRAMES values. The ' 'parallactic angles are calculated with a linear interpolation by ' 'using NFRAMES steps. A frame selection should be applied after ' 'the parallactic angles are calculated.') new_angles = [] start_time = time.time() for i, _ in enumerate(parang_start): progress(i, len(parang_start), 'Interpolating parallactic angles...', start_time) if parang_start[i] < -170. and parang_end[i] > 170.: parang_start[i] += 360. elif parang_end[i] < -170. and parang_start[i] > 170.: parang_end[i] += 360. if steps[i] == 1: new_angles = np.append(new_angles, [(parang_start[i] + parang_end[i])/2.]) elif steps[i] != 1: new_angles = np.append(new_angles, np.linspace(parang_start[i], parang_end[i], num=steps[i])) self.m_data_out_port.add_attribute('PARANG', new_angles, static=False) class SortParangModule(ProcessingModule): """ Module to sort the images and attributes with increasing ``INDEX``. """ __author__ = 'Tomas Stolker' @typechecked def __init__(self, name_in: str, image_in_tag: str, image_out_tag: str) -> None: """ Parameters ---------- name_in : str Unique name of the module instance. image_in_tag : str Database tag with the input data. image_out_tag : str Database tag where the output data will be stored. Should be different from ``image_in_tag``. Returns ------- NoneType None """ super().__init__(name_in) self.m_image_in_port = self.add_input_port(image_in_tag) self.m_image_out_port = self.add_output_port(image_out_tag) @typechecked def run(self) -> None: """ Run method of the module. Sorts the images and attributes with increasing ``INDEX``. Therefore, the images are sorted by there original (usually chronological) order. Returns ------- NoneType None """ memory = self._m_config_port.get_attribute('MEMORY') index = self.m_image_in_port.get_attribute('INDEX') ndim = self.m_image_in_port.get_ndim() nimages = self.m_image_in_port.get_shape()[-3] index_new = np.zeros(index.shape, dtype=int) if 'PARANG' in self.m_image_in_port.get_all_non_static_attributes(): parang = self.m_image_in_port.get_attribute('PARANG') parang_new = np.zeros(parang.shape) else: parang_new = None if 'STAR_POSITION' in self.m_image_in_port.get_all_non_static_attributes(): star = self.m_image_in_port.get_attribute('STAR_POSITION') star_new = np.zeros(star.shape) else: star_new = None index_sort = np.argsort(index) frames = memory_frames(memory, nimages) start_time = time.time() for i, _ in enumerate(frames[:-1]): progress(i, len(frames[:-1]), 'Sorting images in time...', start_time) index_new[frames[i]:frames[i+1]] = index[index_sort[frames[i]:frames[i+1]]] if parang_new is not None: parang_new[frames[i]:frames[i+1]] = parang[index_sort[frames[i]:frames[i+1]]] if star_new is not None: star_new[frames[i]:frames[i+1]] = star[index_sort[frames[i]:frames[i+1]]] # HDF5 indexing elements must be in increasing order for item in index_sort[frames[i]:frames[i+1]]: if ndim == 3: self.m_image_out_port.append(self.m_image_in_port[item, ], data_dim=3) elif ndim == 4: self.m_image_out_port.append(self.m_image_in_port[:, item, ], data_dim=4) self.m_image_out_port.copy_attributes(self.m_image_in_port) self.m_image_out_port.add_history('SortParangModule', 'sorted by INDEX') self.m_image_out_port.add_attribute('INDEX', index_new, static=False) if parang_new is not None: self.m_image_out_port.add_attribute('PARANG', parang_new, static=False) if star_new is not None: self.m_image_out_port.add_attribute('STAR_POSITION', star_new, static=False) self.m_image_out_port.close_port() class AngleCalculationModule(ProcessingModule): """ Module for calculating the parallactic angles. The start time of the observation is taken and multiples of the exposure time are added to derive the parallactic angle of each frame inside the cube. Instrument specific overheads are included. """ __author__ = 'Alexander Bohn, Tomas Stolker' @typechecked def __init__(self, name_in: str, data_tag: str, instrument: str = 'NACO') -> None: """ Parameters ---------- name_in : str Unique name of the module instance. data_tag : str Tag of the database entry for which the parallactic angles are written as attributes. instrument : str Instrument name ('NACO', 'SPHERE/IRDIS', or 'SPHERE/IFS'). Returns ------- NoneType None """ super().__init__(name_in) # Parameters self.m_instrument = instrument # Set parameters according to choice of instrument if self.m_instrument == 'NACO': # pupil offset in degrees self.m_pupil_offset = 0. # No offset here # no overheads in cube mode, since cube is read out after all individual exposures # see NACO manual page 62 (v102) self.m_O_START = 0. self.m_DIT_DELAY = 0. self.m_ROT = 0. # rotator offset in degrees self.m_rot_offset = 89.44 # According to NACO manual page 65 (v102) elif self.m_instrument == 'SPHERE/IRDIS': # pupil offset in degrees self.m_pupil_offset = -135.99 # According to SPHERE manual page 64 (v102) # overheads in cube mode (several NDITS) in hours self.m_O_START = 0.3 / 3600. # According to SPHERE manual page 90/91 (v102) self.m_DIT_DELAY = 0.1 / 3600. # According to SPHERE manual page 90/91 (v102) self.m_ROT = 0.838 / 3600. # According to SPHERE manual page 90/91 (v102) # rotator offset in degrees self.m_rot_offset = 0. # no offset here elif self.m_instrument == 'SPHERE/IFS': # pupil offset in degrees self.m_pupil_offset = -135.99 - 100.48 # According to SPHERE manual page 64 (v102) # overheads in cube mode (several NDITS) in hours self.m_O_START = 0.3 / 3600. # According to SPHERE manual page 90/91 (v102) self.m_DIT_DELAY = 0.2 / 3600. # According to SPHERE manual page 90/91 (v102) self.m_ROT = 1.65 / 3600. # According to SPHERE manual page 90/91 (v102) # rotator offset in degrees self.m_rot_offset = 0. # no offset here else: raise ValueError('The instrument argument should be set to either \'NACO\', ' '\'SPHERE/IRDIS\', or \'SPHERE/IFS\'.') self.m_data_in_port = self.add_input_port(data_tag) self.m_data_out_port = self.add_output_port(data_tag) @typechecked def _attribute_check(self, ndit: np.ndarray, steps: np.ndarray) -> None: if not np.all(ndit == steps): warnings.warn('There is a mismatch between the NDIT and NFRAMES values. A frame ' 'selection should be applied after the parallactic angles are ' 'calculated.') if self.m_instrument == 'SPHERE/IFS': warnings.warn('AngleCalculationModule has not been tested for SPHERE/IFS data.') if self.m_instrument in ('SPHERE/IRDIS', 'SPHERE/IFS'): if self._m_config_port.get_attribute('RA') != 'ESO INS4 DROT2 RA': warnings.warn('For SPHERE data it is recommended to use the header keyword ' '\'ESO INS4 DROT2 RA\' to specify the object\'s right ascension. ' 'The input will be parsed accordingly. Using the regular ' '\'RA\' keyword will lead to wrong parallactic angles.') if self._m_config_port.get_attribute('DEC') != 'ESO INS4 DROT2 DEC': warnings.warn('For SPHERE data it is recommended to use the header keyword ' '\'ESO INS4 DROT2 DEC\' to specify the object\'s declination. ' 'The input will be parsed accordingly. Using the regular ' '\'DEC\' keyword will lead to wrong parallactic angles.') @typechecked def run(self) -> None: """ Run method of the module. Calculates the parallactic angles from the position of the object on the sky and the telescope location on earth. The start of the observation is used to extrapolate for the observation time of each individual image of a data cube. The values are written as PARANG attributes to *data_tag*. Returns ------- NoneType None """ # Load cube sizes steps = self.m_data_in_port.get_attribute('NFRAMES') ndit = self.m_data_in_port.get_attribute('NDIT') self._attribute_check(ndit, steps) # Load exposure time [hours] exptime = self.m_data_in_port.get_attribute('DIT')/3600. # Load telescope location tel_lat = self.m_data_in_port.get_attribute('LATITUDE') tel_lon = self.m_data_in_port.get_attribute('LONGITUDE') # Load temporary target position tmp_ra = self.m_data_in_port.get_attribute('RA') tmp_dec = self.m_data_in_port.get_attribute('DEC') # Parse to degree depending on instrument if 'SPHERE' in self.m_instrument: # get sign of declination tmp_dec_sign = np.sign(tmp_dec) tmp_dec = np.abs(tmp_dec) # parse RA tmp_ra_s = tmp_ra % 100 tmp_ra_m = ((tmp_ra - tmp_ra_s) / 1e2) % 100 tmp_ra_h = ((tmp_ra - tmp_ra_s - tmp_ra_m * 1e2) / 1e4) # parse DEC tmp_dec_s = tmp_dec % 100 tmp_dec_m = ((tmp_dec - tmp_dec_s) / 1e2) % 100 tmp_dec_d = ((tmp_dec - tmp_dec_s - tmp_dec_m * 1e2) / 1e4) # get RA and DEC in degree ra = (tmp_ra_h + tmp_ra_m / 60. + tmp_ra_s / 3600.) * 15. dec = tmp_dec_sign * (tmp_dec_d + tmp_dec_m / 60. + tmp_dec_s / 3600.) else: ra = tmp_ra dec = tmp_dec # Load start times of exposures obs_dates = self.m_data_in_port.get_attribute('DATE') # Load pupil positions during observations if self.m_instrument == 'NACO': pupil_pos = self.m_data_in_port.get_attribute('PUPIL') elif self.m_instrument == 'SPHERE/IRDIS': pupil_pos = np.zeros(steps.shape) elif self.m_instrument == 'SPHERE/IFS': pupil_pos = np.zeros(steps.shape) new_angles = np.array([]) pupil_pos_arr = np.array([]) start_time = time.time() # Calculate parallactic angles for each cube for i, tmp_steps in enumerate(steps): progress(i, len(steps), 'Calculating parallactic angles...', start_time) t = Time(obs_dates[i].decode('utf-8'), location=EarthLocation(lat=tel_lat, lon=tel_lon)) sid_time = t.sidereal_time('apparent').value # Extrapolate sideral times from start time of the cube for each frame of it sid_time_arr = np.linspace(sid_time+self.m_O_START, (sid_time+self.m_O_START) + (exptime+self.m_DIT_DELAY + self.m_ROT)*(tmp_steps-1), tmp_steps) # Convert to degrees sid_time_arr_deg = sid_time_arr * 15. # Calculate hour angle in degrees hour_angle = sid_time_arr_deg - ra[i] # Conversion to radians: hour_angle_rad = np.deg2rad(hour_angle) dec_rad = np.deg2rad(dec[i]) lat_rad = np.deg2rad(tel_lat) p_angle = np.arctan2(np.sin(hour_angle_rad), (np.cos(dec_rad)*np.tan(lat_rad) - np.sin(dec_rad)*np.cos(hour_angle_rad))) new_angles = np.append(new_angles, np.rad2deg(p_angle)) pupil_pos_arr = np.append(pupil_pos_arr, np.ones(tmp_steps)*pupil_pos[i]) # Correct for rotator (SPHERE) or pupil offset (NACO) if self.m_instrument == 'NACO': # See NACO manual page 65 (v102) new_angles_corr = new_angles - (90. + (self.m_rot_offset-pupil_pos_arr)) elif self.m_instrument == 'SPHERE/IRDIS': # See SPHERE manual page 64 (v102) new_angles_corr = new_angles - self.m_pupil_offset elif self.m_instrument == 'SPHERE/IFS': # See SPHERE manual page 64 (v102) new_angles_corr = new_angles - self.m_pupil_offset indices = np.where(new_angles_corr < -180.)[0] if indices.size > 0: new_angles_corr[indices] += 360. indices = np.where(new_angles_corr > 180.)[0] if indices.size > 0: new_angles_corr[indices] -= 360. self.m_data_out_port.add_attribute('PARANG', new_angles_corr, static=False) class SDIpreparationModule(ProcessingModule): """ Module for preparing continuum frames for dual-band simultaneous differential imaging. """ __author__ = 'Gabriele Cugno, Tomas Stolker' @typechecked def __init__(self, name_in: str, image_in_tag: str, image_out_tag: str, wavelength: Tuple[float, float], width: Tuple[float, float]) -> None: """ Parameters ---------- name_in : str Unique name of the module instance. image_in_tag : str Tag of the database entry that is read as input. image_out_tag : str Tag of the database entry that is written as output. Should be different from *image_in_tag*. wavelength : tuple(float, float) The central wavelengths of the line and continuum filter, (line, continuum), in arbitrary but identical units. width : tuple(float, float) The equivalent widths of the line and continuum filter, (line, continuum), in arbitrary but identical units. Returns ------- NoneType None """ super().__init__(name_in) self.m_image_in_port = self.add_input_port(image_in_tag) self.m_image_out_port = self.add_output_port(image_out_tag) self.m_line_wvl = wavelength[0] self.m_cnt_wvl = wavelength[1] self.m_line_width = width[0] self.m_cnt_width = width[1] @typechecked def run(self) -> None: """ Run method of the module. Normalizes the images for the different filter widths, upscales the images, and crops the images to the initial image shape in order to align the PSF patterns. Returns ------- NoneType None """ wvl_factor = self.m_line_wvl/self.m_cnt_wvl width_factor = self.m_line_width/self.m_cnt_width nimages = self.m_image_in_port.get_shape()[0] start_time = time.time() for i in range(nimages): progress(i, nimages, 'Preparing images for dual-band SDI...', start_time) image = self.m_image_in_port[i, ] im_scale = width_factor * scale_image(image, wvl_factor, wvl_factor) if i == 0: npix_del = im_scale.shape[-1] - image.shape[-1] if npix_del % 2 == 0: npix_del_a = int(npix_del/2) npix_del_b = int(npix_del/2) else: npix_del_a = int((npix_del-1)/2) npix_del_b = int((npix_del+1)/2) im_crop = im_scale[npix_del_a:-npix_del_b, npix_del_a:-npix_del_b] if npix_del % 2 == 1: im_crop = shift_image(im_crop, (-0.5, -0.5), interpolation='spline') self.m_image_out_port.append(im_crop, data_dim=3) history = f'(line, continuum) = ({self.m_line_wvl}, {self.m_cnt_wvl})' self.m_image_out_port.copy_attributes(self.m_image_in_port) self.m_image_out_port.add_history('SDIpreparationModule', history) self.m_image_in_port.close_port()
PynPoint/PynPoint
pynpoint/processing/psfpreparation.py
psfpreparation.py
py
27,088
python
en
code
17
github-code
36
[ { "api_name": "pynpoint.core.processing.ProcessingModule", "line_number": 21, "usage_type": "name" }, { "api_name": "typing.Optional", "line_number": 34, "usage_type": "name" }, { "api_name": "typing.Optional", "line_number": 36, "usage_type": "name" }, { "api_nam...
12136530301
""" This file is meant to optimize the import speed. Import modules from YOLOv7 projects and Ultralytics take significant amount of time """ import glob import math import logging import numpy as np import os import re import time import urllib from pathlib import Path from PIL import Image, ImageDraw, ImageFont from threading import Thread import cv2 import torch import torch.nn as nn import torchvision logging.basicConfig(filename="history.log", format="%(asctime)s - %(levelname)s - %(module)s: %(message)s", datefmt="%Y-%m-%d %H:%M:%S %p", level=logging.INFO) """ From utils.general """ def box_iou(box1, box2): # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py """ Return intersection-over-union (Jaccard index) of boxes. Both sets of boxes are expected to be in (x1, y1, x2, y2) format. Arguments: box1 (Tensor[N, 4]) box2 (Tensor[M, 4]) Returns: iou (Tensor[N, M]): the NxM matrix containing the pairwise IoU values for every element in boxes1 and boxes2 """ def box_area(box): # box = 4xn return (box[2] - box[0]) * (box[3] - box[1]) area1 = box_area(box1.T) area2 = box_area(box2.T) # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter) def clean_str(s): # Cleans a string by replacing special characters with underscore _ return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s) def make_divisible(x, divisor): # Returns x evenly divisible by divisor return math.ceil(x / divisor) * divisor def check_img_size(img_size, s=32): # Verify img_size is a multiple of stride s new_size = make_divisible(img_size, int(s)) # ceil gs-multiple if new_size != img_size: print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size)) return new_size def clip_coords(boxes, img_shape): # Clip bounding xyxy bounding boxes to image shape (height, width) boxes[:, 0].clamp_(0, img_shape[1]) # x1 boxes[:, 1].clamp_(0, img_shape[0]) # y1 boxes[:, 2].clamp_(0, img_shape[1]) # x2 boxes[:, 3].clamp_(0, img_shape[0]) # y2 def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): # Rescale coords (xyxy) from img1_shape to img0_shape if ratio_pad is None: # calculate from img0_shape gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding else: gain = ratio_pad[0][0] pad = ratio_pad[1] coords[:, [0, 2]] -= pad[0] # x padding coords[:, [1, 3]] -= pad[1] # y padding coords[:, :4] /= gain clip_coords(coords, img0_shape) return coords def xyxy2xywh(x): # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center y[:, 2] = x[:, 2] - x[:, 0] # width y[:, 3] = x[:, 3] - x[:, 1] # height return y def xywh2xyxy(x): # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y return y def apply_classifier(x, model, img, im0): # applies a second stage classifier to yolo outputs im0 = [im0] if isinstance(im0, np.ndarray) else im0 for i, d in enumerate(x): # per image if d is not None and len(d): d = d.clone() # Reshape and pad cutouts b = xyxy2xywh(d[:, :4]) # boxes b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad d[:, :4] = xywh2xyxy(b).long() # Rescale boxes from img_size to im0 size scale_coords(img.shape[2:], d[:, :4], im0[i].shape) # Classes pred_cls1 = d[:, 5].long() ims = [] for j, a in enumerate(d): # per item cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])] im = cv2.resize(cutout, (224, 224)) # BGR # cv2.imwrite('test%i.jpg' % j, cutout) im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32 im /= 255.0 # 0 - 255 to 0.0 - 1.0 ims.append(im) pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections return x def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, labels=()): """Runs Non-Maximum Suppression (NMS) on inference results Returns: list of detections, on (n,6) tensor per image [xyxy, conf, cls] """ nc = prediction.shape[2] - 5 # number of classes xc = prediction[..., 4] > conf_thres # candidates # Settings min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height max_det = 300 # maximum number of detections per image max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() time_limit = 10.0 # seconds to quit after redundant = True # require redundant detections multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) merge = False # use merge-NMS t = time.time() output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0] for xi, x in enumerate(prediction): # image index, image inference # Apply constraints # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height x = x[xc[xi]] # confidence # Cat apriori labels if autolabelling if labels and len(labels[xi]): l = labels[xi] v = torch.zeros((len(l), nc + 5), device=x.device) v[:, :4] = l[:, 1:5] # box v[:, 4] = 1.0 # conf v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls x = torch.cat((x, v), 0) # If none remain process next image if not x.shape[0]: continue # Compute conf if nc == 1: x[:, 5:] = x[:, 4:5] # for models with one class, cls_loss is 0 and cls_conf is always 0.5, # so there is no need to multiplicate. else: x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf # Box (center x, center y, width, height) to (x1, y1, x2, y2) box = xywh2xyxy(x[:, :4]) # Detections matrix nx6 (xyxy, conf, cls) if multi_label: i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) else: # best class only conf, j = x[:, 5:].max(1, keepdim=True) x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres] # Filter by class if classes is not None: x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] # Apply finite constraint # if not torch.isfinite(x).all(): # x = x[torch.isfinite(x).all(1)] # Check shape n = x.shape[0] # number of boxes if not n: # no boxes continue elif n > max_nms: # excess boxes x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence # Batched NMS c = x[:, 5:6] * (0 if agnostic else max_wh) # classes boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS if i.shape[0] > max_det: # limit detections i = i[:max_det] if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix weights = iou * scores[None] # box weights x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes if redundant: i = i[iou.sum(1) > 1] # require redundancy output[xi] = x[i] if (time.time() - t) > time_limit: print(f'WARNING: NMS time limit {time_limit}s exceeded') break # time limit exceeded return output """ From models.common.py """ def autopad(k, p=None): # kernel, padding # Pad to 'same' if p is None: p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad return p class Conv(nn.Module): # Standard convolution def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups super(Conv, self).__init__() self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False) self.bn = nn.BatchNorm2d(c2) self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) def forward(self, x): return self.act(self.bn(self.conv(x))) def fuseforward(self, x): return self.act(self.conv(x)) """ From models.experimental.py """ class Ensemble(nn.ModuleList): # Ensemble of models def __init__(self): super(Ensemble, self).__init__() def forward(self, x, augment=False): y = [] for module in self: y.append(module(x, augment)[0]) # y = torch.stack(y).max(0)[0] # max ensemble # y = torch.stack(y).mean(0) # mean ensemble y = torch.cat(y, 1) # nms ensemble return y, None # inference, train output def attempt_load(weights, map_location=None): # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a model = Ensemble() for w in weights if isinstance(weights, list) else [weights]: # attempt_download(w) ckpt = torch.load(w, map_location=map_location) # load model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model # Compatibility updates for m in model.modules(): if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]: m.inplace = True # pytorch 1.7.0 compatibility elif type(m) is nn.Upsample: m.recompute_scale_factor = None # torch 1.11.0 compatibility elif type(m) is Conv: m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility if len(model) == 1: return model[-1] # return model else: print('Ensemble created with %s\n' % weights) for k in ['names', 'stride']: setattr(model, k, getattr(model[-1], k)) return model # return ensemble """ From utils.datasets.py """ img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv', 'webm'] # acceptable video suffixes def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): # Resize and pad image while meeting stride-multiple constraints shape = img.shape[:2] # current shape [height, width] if isinstance(new_shape, int): new_shape = (new_shape, new_shape) # Scale ratio (new / old) r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) if not scaleup: # only scale down, do not scale up (for better test mAP) r = min(r, 1.0) # Compute padding ratio = r, r # width, height ratios new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding if auto: # minimum rectangle dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding elif scaleFill: # stretch dw, dh = 0.0, 0.0 new_unpad = (new_shape[1], new_shape[0]) ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios dw /= 2 # divide padding into 2 sides dh /= 2 if shape[::-1] != new_unpad: # resize img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR) top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border return img, ratio, (dw, dh) class LoadImages: # for inference def __init__(self, path, img_size=640, stride=32): p = str(Path(path).absolute()) # os-agnostic absolute path if '*' in p: files = sorted(glob.glob(p, recursive=True)) # glob elif os.path.isdir(p): files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir elif os.path.isfile(p): files = [p] # files else: raise Exception(f'ERROR: {p} does not exist') images = [x for x in files if x.split('.')[-1].lower() in img_formats] videos = [x for x in files if x.split('.')[-1].lower() in vid_formats] ni, nv = len(images), len(videos) self.img_size = img_size self.stride = stride self.files = images + videos self.nf = ni + nv # number of files self.video_flag = [False] * ni + [True] * nv self.mode = 'image' if any(videos): self.new_video(videos[0]) # new video else: self.cap = None assert self.nf > 0, f'No images or videos found in {p}. ' \ f'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}' def __iter__(self): self.count = 0 return self def __next__(self): if self.count == self.nf: raise StopIteration path = self.files[self.count] if self.video_flag[self.count]: # Read video self.mode = 'video' ret_val, img0 = self.cap.read() if not ret_val: self.count += 1 self.cap.release() if self.count == self.nf: # last video raise StopIteration else: path = self.files[self.count] self.new_video(path) ret_val, img0 = self.cap.read() self.frame += 1 # print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.nframes}) {path}: \n', end='') else: # Read image self.count += 1 img0 = cv2.imread(path) # BGR assert img0 is not None, 'Image Not Found ' + path #print(f'image {self.count}/{self.nf} {path}: ', end='') # Padded resize img = letterbox(img0, self.img_size, stride=self.stride)[0] # Convert img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 img = np.ascontiguousarray(img) return path, img, img0, self.cap def new_video(self, path): self.frame = 0 self.cap = cv2.VideoCapture(path) self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) def __len__(self): return self.nf # number of files class LoadStreams: # multiple IP or RTSP cameras def __init__(self, sources='streams.txt', img_size=640, stride=32): self.mode = 'stream' self.img_size = img_size self.stride = stride if os.path.isfile(sources): with open(sources, 'r') as f: sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())] else: sources = [sources] n = len(sources) self.imgs = [None] * n self.sources = [clean_str(x) for x in sources] # clean source names for later for i, s in enumerate(sources): # Start the thread to read frames from the video stream print(f'{i + 1}/{n}: {s}... ', end='') url = eval(s) if s.isnumeric() else s # Remove support for Youtube video cap = cv2.VideoCapture(url) assert cap.isOpened(), f'Failed to open {s}' w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) self.fps = cap.get(cv2.CAP_PROP_FPS) % 100 _, self.imgs[i] = cap.read() # guarantee first frame thread = Thread(target=self.update, args=([i, cap]), daemon=True) print(f' success ({w}x{h} at {self.fps:.2f} FPS).') thread.start() print('') # newline # check for common shapes s = np.stack([letterbox(x, self.img_size, stride=self.stride)[0].shape for x in self.imgs], 0) # shapes self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal if not self.rect: print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.') def update(self, index, cap): # Read next stream frame in a daemon thread n = 0 while cap.isOpened(): n += 1 # _, self.imgs[index] = cap.read() cap.grab() if n == 4: # read every 4th frame success, im = cap.retrieve() self.imgs[index] = im if success else self.imgs[index] * 0 n = 0 if self.fps != 0: time.sleep(1 / self.fps) # wait time else: time.sleep(0.2) # in rtsp situation self.fps may be zero. to avoid div by zero, take constant sleep. def __iter__(self): self.count = -1 return self def __next__(self): self.count += 1 img0 = self.imgs.copy() if cv2.waitKey(1) == ord('q'): # q to quit cv2.destroyAllWindows() raise StopIteration # Letterbox img = [letterbox(x, self.img_size, auto=self.rect, stride=self.stride)[0] for x in img0] # Stack img = np.stack(img, 0) # Convert img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416 img = np.ascontiguousarray(img) return self.sources, img, img0, None def __len__(self): return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years """ From ultralytics.yolo.utils.check.py """ FILE = Path(__file__).resolve() ROOT = FILE.parents[2] # YOLO def check_suffix(file='yolov8n.pt', suffix=('.pt',), msg=''): # Check file(s) for acceptable suffix if file and suffix: if isinstance(suffix, str): suffix = [suffix] for f in file if isinstance(file, (list, tuple)) else [file]: s = Path(f).suffix.lower() # file suffix if len(s): assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}" def check_file(file, suffix=''): # Search/download file (if necessary) and return path check_suffix(file, suffix) # optional file = str(file) # convert to str() if Path(file).is_file() or not file: # exists return file elif file.startswith(('http:/', 'https:/')): # download url = file # warning: Pathlib turns :// -> :/ file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth if Path(file).is_file(): logging.info(f'Found {url} locally at {file}') # file already exists else: logging.info(f'Downloading {url} to {file}...') torch.hub.download_url_to_file(url, file) assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check return file else: # search files = [] for d in 'models', 'yolo/data': # search directories files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file if not files: raise FileNotFoundError(f"'{file}' does not exist") elif len(files) > 1: raise FileNotFoundError(f"Multiple files match '{file}', specify exact path: {files}") return files[0] # return file """ From ultralytics.yolo.utils.check.py """ def is_ascii(s) -> bool: """ Check if a string is composed of only ASCII characters. Args: s (str): String to be checked. Returns: bool: True if the string is composed only of ASCII characters, False otherwise. """ # Convert list, tuple, None, etc. to string s = str(s) # Check if the string is composed of only ASCII characters return all(ord(c) < 128 for c in s) """ From ultralytics.yolo.utils.plotting.py """ def scale_image(im1_shape, masks, im0_shape, ratio_pad=None): """ Takes a mask, and resizes it to the original image size Args: im1_shape (tuple): model input shape, [h, w] masks (torch.Tensor): [h, w, num] im0_shape (tuple): the original image shape ratio_pad (tuple): the ratio of the padding to the original image. Returns: masks (torch.Tensor): The masks that are being returned. """ # Rescale coordinates (xyxy) from im1_shape to im0_shape if ratio_pad is None: # calculate from im0_shape gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) # gain = old / new pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 # wh padding else: pad = ratio_pad[1] top, left = int(pad[1]), int(pad[0]) # y, x bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0]) if len(masks.shape) < 2: raise ValueError(f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}') masks = masks[top:bottom, left:right] # masks = masks.permute(2, 0, 1).contiguous() # masks = F.interpolate(masks[None], im0_shape[:2], mode='bilinear', align_corners=False)[0] # masks = masks.permute(1, 2, 0).contiguous() masks = cv2.resize(masks, (im0_shape[1], im0_shape[0])) if len(masks.shape) == 2: masks = masks[:, :, None] return masks class Annotator: # YOLOv8 Annotator for train/val mosaics and jpgs and detect/hub inference annotations def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'): assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.' non_ascii = not is_ascii(example) # non-latin labels, i.e. asian, arabic, cyrillic self.pil = pil or non_ascii if self.pil: # use PIL self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) self.draw = ImageDraw.Draw(self.im) self.font = ImageFont.load_default() # For simplicity and Performance else: # use cv2 self.im = im self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)): # Add one xyxy box to image with label if self.pil or not is_ascii(label): self.draw.rectangle(box, width=self.lw, outline=color) # box if label: w, h = self.font.getsize(label) # text width, height (WARNING: deprecated) in 9.2.0 # _, _, w, h = self.font.getbbox(label) # text width, height (New) outside = box[1] - h >= 0 # label fits outside box self.draw.rectangle( (box[0], box[1] - h if outside else box[1], box[0] + w + 1, box[1] + 1 if outside else box[1] + h + 1), fill=color, ) # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0 self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font) else: # cv2 p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA) if label: tf = max(self.lw - 1, 1) # font thickness w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height outside = p1[1] - h >= 3 p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3 cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled cv2.putText(self.im, label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), 0, self.lw / 3, txt_color, thickness=tf, lineType=cv2.LINE_AA) def masks(self, masks, colors, im_gpu, alpha=0.5, retina_masks=False): """Plot masks at once. Args: masks (tensor): predicted masks on cuda, shape: [n, h, w] colors (List[List[Int]]): colors for predicted masks, [[r, g, b] * n] im_gpu (tensor): img is in cuda, shape: [3, h, w], range: [0, 1] alpha (float): mask transparency: 0.0 fully transparent, 1.0 opaque """ if self.pil: # convert to numpy first self.im = np.asarray(self.im).copy() if len(masks) == 0: self.im[:] = im_gpu.permute(1, 2, 0).contiguous().cpu().numpy() * 255 colors = torch.tensor(colors, device=im_gpu.device, dtype=torch.float32) / 255.0 colors = colors[:, None, None] # shape(n,1,1,3) masks = masks.unsqueeze(3) # shape(n,h,w,1) masks_color = masks * (colors * alpha) # shape(n,h,w,3) inv_alph_masks = (1 - masks * alpha).cumprod(0) # shape(n,h,w,1) mcs = (masks_color * inv_alph_masks).sum(0) * 2 # mask color summand shape(n,h,w,3) im_gpu = im_gpu.flip(dims=[0]) # flip channel im_gpu = im_gpu.permute(1, 2, 0).contiguous() # shape(h,w,3) im_gpu = im_gpu * inv_alph_masks[-1] + mcs im_mask = (im_gpu * 255) im_mask_np = im_mask.byte().cpu().numpy() self.im[:] = im_mask_np if retina_masks else scale_image(im_gpu.shape, im_mask_np, self.im.shape) if self.pil: # convert im back to PIL and update draw self.fromarray(self.im) def rectangle(self, xy, fill=None, outline=None, width=1): # Add rectangle to image (PIL-only) self.draw.rectangle(xy, fill, outline, width) def text(self, xy, text, txt_color=(255, 255, 255), anchor='top'): # Add text to image (PIL-only) if anchor == 'bottom': # start y from font bottom w, h = self.font.getsize(text) # text width, height xy[1] += 1 - h self.draw.text(xy, text, fill=txt_color, font=self.font) def fromarray(self, im): # Update self.im from a numpy array self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) self.draw = ImageDraw.Draw(self.im) def result(self): # Return annotated image as array return np.asarray(self.im) class Colors: # Ultralytics color palette https://ultralytics.com/ def __init__(self): # hex = matplotlib.colors.TABLEAU_COLORS.values() hexs = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB', '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7') self.palette = [self.hex2rgb(f'#{c}') for c in hexs] self.n = len(self.palette) def __call__(self, i, bgr=False): c = self.palette[int(i) % self.n] return (c[2], c[1], c[0]) if bgr else c @staticmethod def hex2rgb(h): # rgb order (PIL) return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4)) colors = Colors() # create instance for 'from utils.plots import colors'
CMPUT-492-W2023-Capstone/cstc-backend-v7
app/src/module.py
module.py
py
28,870
python
en
code
0
github-code
36
[ { "api_name": "logging.basicConfig", "line_number": 24, "usage_type": "call" }, { "api_name": "logging.INFO", "line_number": 27, "usage_type": "attribute" }, { "api_name": "torch.min", "line_number": 56, "usage_type": "call" }, { "api_name": "torch.max", "line...
11162728846
# # Hardware: # A USB C / PIC32 Breakout Board connected to an SSD1306-based OLED display # (128x64 pixels) and an M5Stack joystick, both via I2C. # # Purpose: # Illustrates the I2C Master functionality to manipulate two I2C Slaves for # the purposes of the Kickstarter demo. # from usb_device import UsbDevice from upside_down_display import UpsideDownDisplay from ssd1306_i2c_slave_display import Ssd1306I2cSlaveDisplay from keyboard import KeyboardThread from splash_screen import SplashScreen from font import Font from box import Box from tile import Tile from ball import Ball from paddle import Paddle from playing_area import PlayingArea from game_loop import GameLoop from sampled_player import SampledPlayer from computer_player import ComputerPlayer from i2c_joystick_player import I2cJoystickPlayer if __name__ == "__main__": with UsbDevice() as usb: usb.i2c.baud_rate_400khz() display = UpsideDownDisplay(Ssd1306I2cSlaveDisplay(usb.i2c.slave(0x3c))) display.initialise() with open("font-5x8.raw", "rb") as font_fd: font_5x8 = Font(Tile.from_raw(font_fd.read(), 32 * 6, 4 * 9), 6, 9) playing_area = PlayingArea(display) paddle_tile = Tile(2, 12, [0b11000000] * 12) paddle_x_offset = 2 paddle_y_offset = 2 paddle_speed = 1.5 paddles = [ Paddle( [playing_area.bounding_box.x + paddle_x_offset, -1], 0, Box( playing_area.bounding_box.x + paddle_x_offset, playing_area.bounding_box.y + paddle_y_offset, paddle_tile.width, playing_area.bounding_box.height - 2 * paddle_y_offset), paddle_tile), Paddle( [playing_area.bounding_box.max_x - 1 - paddle_x_offset - paddle_tile.width // 2, -1], 0, Box( playing_area.bounding_box.max_x - 1 - paddle_x_offset - paddle_tile.width, playing_area.bounding_box.y + paddle_y_offset, paddle_tile.width, playing_area.bounding_box.height - 2 * paddle_y_offset), paddle_tile)] players = [ ComputerPlayer(paddles[0], max_speed = paddle_speed, difficulty = 0.1), SampledPlayer( I2cJoystickPlayer(paddles[1], usb.i2c.slave(0x52), 1, -128, paddle_speed / 128), interval = 0.01), ] game = GameLoop( playing_area, Ball( playing_area.centre, [1.8, 0], playing_area.bounding_box, Tile(6, 6, [ 0b00110000, 0b01111000, 0b11111100, 0b11111100, 0b01111000, 0b00110000])), players, score_font = font_5x8, message_font = font_5x8) with open("lophtware-128x64.raw", "rb") as logo_fd: logo = Tile.from_raw(logo_fd.read(), 128, 64) SplashScreen(logo).show(display) quit = False def on_keyboard_input(cmd): global quit quit = cmd == 'q' return quit keyboard = KeyboardThread(on_keyboard_input) playing_area.draw() while not quit: if not game.do_frame(display): break display.blit() with open("thanks-128x64.raw", "rb") as thanks_fd: thanks = Tile.from_raw(thanks_fd.read(), 128, 64) SplashScreen(thanks).show(display)
lophtware/UsbCPic32Breakout
src/examples/pong/python/pong.py
pong.py
py
2,980
python
en
code
2
github-code
36
[ { "api_name": "usb_device.UsbDevice", "line_number": 29, "usage_type": "call" }, { "api_name": "upside_down_display.UpsideDownDisplay", "line_number": 32, "usage_type": "call" }, { "api_name": "ssd1306_i2c_slave_display.Ssd1306I2cSlaveDisplay", "line_number": 32, "usage_t...
1842938709
from flask_wtf import FlaskForm from wtforms import StringField, SubmitField, SelectField from wtforms.validators import DataRequired from wtforms import ValidationError class MyEmailValidation(object): def __init__(self, message=None): if not message: message = "Email isn't valid." self.message = message def __call__(self, form, field): if "@" not in field.data: raise ValidationError(self.message) class CreateUsersForm(FlaskForm): email = StringField("Destination email", validators=[DataRequired(), MyEmailValidation()]) role_selector = SelectField( "Select role for new user", choices=[("admin", 3), ("moderator", 4), ("operator", 5)] ) submit = SubmitField("Create user")
a-yarohovich/control-panel
core/app/create_users/forms.py
forms.py
py
773
python
en
code
0
github-code
36
[ { "api_name": "wtforms.ValidationError", "line_number": 15, "usage_type": "call" }, { "api_name": "flask_wtf.FlaskForm", "line_number": 18, "usage_type": "name" }, { "api_name": "wtforms.StringField", "line_number": 19, "usage_type": "call" }, { "api_name": "wtfor...
19525909188
from sqlalchemy.orm import Session from fastapi import APIRouter, Depends, status from selenium import webdriver from selenium.webdriver.chrome.service import Service from selenium.webdriver.common.by import By from selenium.webdriver.common.keys import Keys from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.support.ui import WebDriverWait from webdriver_manager.chrome import ChromeDriverManager from selenium.webdriver.chrome.options import Options from bs4 import BeautifulSoup import random import time def scroll(driver): try: last_page_height = driver.execute_script("return document.documentElement.scrollHeight") while True: pause_time = random.uniform(1, 2) driver.execute_script("window.scrollTo(0, document.documentElement.scrollHeight);") time.sleep(pause_time) driver.execute_script("window.scrollTo(0, document.documentElement.scrollHeight-50)") time.sleep(pause_time) new_page_height = driver.execute_script("return document.documentElement.scrollHeight") if new_page_height == last_page_height: print("스크롤 완료") break else: last_page_height = new_page_height except Exception as e: print("에러 발생: ", e) def scrape_youtube_results(keyword): service = Service(ChromeDriverManager(driver_version="111.0.5563.64").install()) chrome_options = Options() chrome_options.add_argument("--headless") try: driver = webdriver.Chrome(service=service, options=chrome_options) except ValueError as e: raise ValueError(f"웹드라이버 에러 발생! : {e}") SEARCH_KEYWORD = keyword.replace(' ', '+') URL = "https://www.youtube.com/results?search_query=" + SEARCH_KEYWORD driver.get(URL) # scroll(driver) WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.CSS_SELECTOR, 'ytd-video-renderer'))) html_source = driver.page_source soup_source = BeautifulSoup(html_source, 'html.parser') content_total = soup_source.find_all(class_='yt-simple-endpoint style-scope ytd-video-renderer') content_total_title = list(map(lambda data: data.get_text().replace("\n", ""), content_total)) content_total_link = list(map(lambda data: "https://youtube.com" + data["href"], content_total)) # content_record_src = soup_source.find_all(class_='nline-metadata-item style-scope ytd-video-meta-block') # # content_record_src = soup_source.find_all(class_='shortViewCountText') # content_view_cnt = [content_record_src[i].get_text().replace('조회수 ', '') for i in range(5, len(content_record_src), 10)] # content_upload_date = [content_record_src[i].get_text() for i in range(6, len(content_record_src), 10)] # content_view_cnt = [content_record_src[i].get_text() for i in content_record_src] # content_upload_date = [content_record_src[i].get_text().replace('\n', '') for i in range(6, len(content_record_src), 10)] driver.quit() return { 'title': content_total_title, 'link': content_total_link, # 'view': content_view_cnt, # 'upload_date': content_upload_date } youtube_video = APIRouter(prefix="/youtube", tags=["유튜브"]) @youtube_video.get( "", status_code=status.HTTP_200_OK, summary="유튜브 스크래핑", description="좋아요 수, 조회수, 영상 링크, 영상 제목 스크래핑", ) def get_video(keyword: str): results = scrape_youtube_results(keyword) return results
GeumBinLee/test
youtube/router.py
router.py
py
3,630
python
en
code
0
github-code
36
[ { "api_name": "random.uniform", "line_number": 21, "usage_type": "call" }, { "api_name": "time.sleep", "line_number": 23, "usage_type": "call" }, { "api_name": "time.sleep", "line_number": 25, "usage_type": "call" }, { "api_name": "selenium.webdriver.chrome.servic...
18586892058
import numpy import scipy.interpolate import scipy.optimize import scipy.stats # Columns of csv input file incols = ['name','Vinoc','dilmin','dilfac','ntot','ninf','comments'] # Columns added to csv output file outcols = ['mode','68lb','68ub','95lb','95ub','RM','SK'] # label/header for assay parameters label = { # input 'Vinoc': "Well volume (in mL)", 'dilmin': "Starting dilution", 'dilfac': "Dilution factor", 'ndils': "# dilutions", 'nreps': "# repeats/dilution", 'name': "Label", 'ninf': "# wells infected", 'ntot': "# wells total", 'comments': "Comment (optional)", # output 'mode': 'mode log10(SIN/mL)', '68lb': '68%CI-lo log10(SIN/mL)', '68ub': '68%CI-hi log10(SIN/mL)', '95lb': '95%CI-lo log10(SIN/mL)', '95ub': '95%CI-hi log10(SIN/mL)', 'RM': 'RM log10(TCID50/mL)', 'SK': 'SK log10(TCID50/mL)', } # help_text associated with assay parameters info = { 'Vinoc': "Typical value for 96-well plate is 0.1 mL.", 'dilmin': "Must be &le; 1 (e.g. 10-fold as 0.1, 4-fold as 0.25).", 'dilfac': "Must be &lt; 1 (e.g. 10-fold as 0.1, 4-fold as 0.25).", 'ndils': "Typically 7 or 8 or 11 or 12.", 'nreps': "Typically 4 or 6 or 8.", 'name': "An identifying label like StrainA-24h-exp1.", 'ninf': "A list separated by [,] [.] or [tab].", 'ntot': "A list separated by [,] [.] or [tab].", 'comments': "Can be anything you want (e.g. 24h).", } # parameter values for the example assay example = { 'Vinoc': 0.1, 'dilmin': 0.01, 'dilfac': 0.1, 'ndils': 11, 'nreps': 8, 'name': "example", 'ninf': [8,8,8,8,8,7,7,5,2,0,0], 'ntot': [8,8,8,8,8,8,8,8,8,8,8], 'comments': '', } def RMSK(dilut,Npos,Ntot): # if only one well if len(Npos) < 2: return (numpy.nan,numpy.nan) # if no infected well elif numpy.sum(Npos) == 0.0: return (numpy.nan,numpy.nan) # if all wells infected elif numpy.sum(Ntot-Npos) == 0.0: return (numpy.nan,numpy.nan) df = abs( numpy.diff(dilut)[0] ) frac = 1.0*numpy.cumsum(Npos[::-1])[::-1] frac = frac/(frac+numpy.cumsum(Ntot-Npos)) # Reed-Muench idx = numpy.argmax(frac < 0.5)-1 propdist = (frac[idx]-0.5)/(frac[idx]-frac[idx+1]) RM = df*propdist - dilut[idx] # Spearman-Kaerber frac = 1.0*Npos/Ntot # comment out this line to use RM-like smoothing idx = numpy.argmin( frac < 1.0 ) if idx == 0: # if frac<1 in lowest dilution column frac = numpy.hstack((1.0,frac)) dilut = numpy.hstack((dilut[0]+df,dilut)) SK = df*numpy.trapz(frac[idx:]) - dilut[idx] return (RM,SK) class Assay(object): def __init__(self, Vinoc, dilmin, dilfac, ninf, ntot): # Save user input self.pack = {'Vinoc':Vinoc, 'dilmin':dilmin, 'dilfac':dilfac} # computer n (# of unspoiled wells) self.pack['ntot'] = numpy.array(ntot) # Compute k (# of wells infected) self.pack['ninf'] = numpy.array(ninf) # Compute n-k (# of wells uninfected) self.nmks = self.pack['ntot']-self.pack['ninf'] # Raise flag if no well was infected (lower limit of detection) if sum(self.pack['ninf']) == 0.0: self.isempty = True else: self.isempty = False # Raise flag if all wells were infected (upper limit of detection) if sum(self.nmks) == 0.0: self.isfull = True else: self.isfull = False # Compute arg of lnqbase = exp[ - Vinoc * dilmin * dilfac^pow ] self.VDs = Vinoc * dilmin * dilfac**numpy.arange(len(self.nmks)) # Compute the remainder of the assay payload self.payload() def lCmode(self): """ Computes the mode of the posterior PDF for lCvir, and the TCID50 via the Reed-Munch and Spearmann-Kaerber estimation methods. """ if 'mode' in self.pack.keys(): return self.pack['mode'] # If no infected well: give lC upper bound if self.isempty or self.isfull: self.pack['mode'] = numpy.nan return self.pack['mode'] # Estimate most likely lCvir value (mode of dist) bracket = -numpy.log10((self.VDs[0]*10.0, self.VDs[numpy.where(self.nmks)][0], self.VDs[-1]/10.0)) res = scipy.optimize.minimize_scalar(lambda x: -self.lCcalc(x), bracket=bracket) assert res.success, 'Could not find lC mode' self.pack['mode'] = res.x return self.pack['mode'] def lCcalc(self, lCvec): """ Compute posterior likelihood distribution, i.e. value of exp(lnProb), for all elements in vector lCvec, and returns it as a vector of the same size as lCvec, suitable for plotting. """ P = numpy.ones_like(lCvec) for VD,n,k in zip(-self.VDs,self.pack['ntot'],self.pack['ninf']): pinfvec = -numpy.expm1(10.0**lCvec*VD) P *= scipy.stats.binom.pmf(k,n,pinfvec) return P def lCdist(self, lCvec=None): """ Creates (if not provided) and stores the lCvir vector, stores the posterior PDF vector computed by lCcalc for the values in lCvir, and computes and stores the CDF vector corresponding to the PDF for the values in lCvir. """ if lCvec is None: if self.isempty or self.isfull: a = -numpy.log10(self.VDs[0])-10.0 b = -numpy.log10(self.VDs[-1])+10.0 lb = scipy.optimize.brentq(lambda x: self.lCcalc(x)-0.0001,a,b) ub = scipy.optimize.brentq(lambda x: self.lCcalc(x)-0.9999,a,b) lCvec = numpy.linspace(lb,ub,500) else: lCvec = numpy.arange(0.0,1.0,0.01) lCvec = numpy.hstack((lCvec-2,numpy.arange(-1.0,1.0,0.002),lCvec+1)) lCvec += self.lCmode() self.pack['lCvec'] = lCvec # Compute posterior likelihood distribution (pdf) for lVec self.pack['pdf'] = self.lCcalc(lCvec) # Compute CDF from posterior likelihood dist self.pack['cdf'] = numpy.cumsum(self.pack['pdf'][1:]*numpy.diff(self.pack['lCvec'])) # Re-normalize so that CDF is 1 at Cvir= max in lCvec self.pack['cdf'] = numpy.hstack((0.0,self.pack['cdf']))/self.pack['cdf'].max() def lCbounds(self): """ Computes and stores the 68% and 95% bounds of lCvir likelihood as a 4-element list: [68-lower,68-upper,95-lower, 95-upper]. """ if 'cdf' not in self.pack.keys(): self.lCdist() if self.isempty or self.isfull: return [numpy.nan]*4 ppf = scipy.interpolate.interp1d( self.pack['cdf'], self.pack['lCvec'], bounds_error=False, fill_value=0.0 ) subbounds = [] for frac in (0.68,0.95): res = scipy.optimize.minimize_scalar(lambda x: ppf(x+frac)-ppf(x),bounds=(0.0,1.0-frac),method='bounded') assert res.success, 'Could not find credible region.' subbounds += list( ppf([res.x,res.x+frac]) ) return subbounds def payload(self): # Compute Reed-Muench and Spearman-Kaerber for key,val in zip(('RM','SK'),RMSK(numpy.log10(self.VDs),self.pack['ninf'],self.pack['ntot'])): self.pack[key] = val self.pack['bounds'] = self.lCbounds() self.pack['dilutions'] = numpy.log10(self.VDs/self.pack['Vinoc']) self.pack['mode'] = self.lCmode() self.pack['mean'] = numpy.sum(self.pack['lCvec']*self.pack['pdf']) self.pack['mean'] /= self.pack['pdf'].sum() return self.pack
cbeauc/midSIN
src/__init__.py
__init__.py
py
6,705
python
en
code
4
github-code
36
[ { "api_name": "numpy.nan", "line_number": 65, "usage_type": "attribute" }, { "api_name": "numpy.sum", "line_number": 67, "usage_type": "call" }, { "api_name": "numpy.nan", "line_number": 68, "usage_type": "attribute" }, { "api_name": "numpy.sum", "line_number"...
34837660944
from IPython.display import clear_output def basic_info(): print("Welcome to Tic Tac Toe Board Game.") choice = "Wrong" choice1 = 'Wrong' while choice != 'Y': user1 = input("Please Your Name as User 1: ") choice = input(f"Your Name is {user1}, Correct? Y or N: ").upper() while choice1 != 'Y': user2 = input("Please Your Name as User 2: ") choice1 = input(f"Your Name is {user2}, Correct? Y or N: ").upper() return [user1,user2] def board_pattern(): board_1 = ["1","2","3"] #board_2 = ["-","-","-","-","-"] board_3 = ["4","5","6"] #board_4 = ["-","-","-","-","-"] board_5 = ["7","8","9"] borad = [board_1,board_3,board_5] return borad def playing(): board = board_pattern() count = 0 clear_output() while count < 9: varis = input("Select position and mark. (format: posi,mark) ") posi,mark = varis.split(',') posi = int(posi) if posi <= 3: # use to replace the board 1 row board[0][posi-1] = mark elif posi <= 6:# use to replace the board 2 row posi = posi - 3 board[1][posi-1] = mark elif posi <= 9:# use to replace the board 3 row posi = posi - 6 board[2][posi-1] = mark for new_board in board: print(new_board) if board[0][0] == board[0][1] == board[0][2] == mark: # these line that following can be use a better method to do it return True, mark elif board[1][0] == board[1][1] == board[1][2] == mark: return True, mark elif board[2][0] == board[2][1] == board[2][2] == mark: return True, mark elif board[0][0] == board[1][0] == board[2][0] == mark: return True, mark elif board[0][1] == board[1][1] == board[2][1] == mark: return True, mark elif board[0][2] == board[1][2] == board[2][2] == mark: return True, mark elif board[0][0] == board[1][1] == board[2][2] == mark: return True, mark elif board[0][2] == board[1][1] == board[2][0] == mark: return True, mark count += 1 return False, mark def interact(): user1,user2 = basic_info() #grab the basic info of users, return uesr1 and user2 hashmap = {user1:'X',user2:'O'} for board in board_pattern():# print origenal board print(board) continues_game = input('Do you want to continue the game? Y or N ').upper() if continues_game == 'Y': print(f"First user will always go first, and {user1} uses X and {user2} uses O") status,mark = playing() if status == True: print(f'{list(hashmap.keys())[list(hashmap.values()).index(mark)]} is the winner') # This line to use value to get key else: print('Game is tied. No winner.') elif continues_game == 'N': print('Closing Game.') interact()
aajmlao/Notes-for-learning-Python
project1.py
project1.py
py
2,942
python
en
code
0
github-code
36
[ { "api_name": "IPython.display.clear_output", "line_number": 26, "usage_type": "call" } ]
5784044260
import atexit import logging.config import logging.handlers import os import tempfile import zmq import slivka _context = zmq.Context() atexit.register(_context.destroy, 0) class ZMQQueueHandler(logging.handlers.QueueHandler): def __init__(self, address, ctx: zmq.Context = None): ctx = ctx or _context socket = ctx.socket(zmq.PUSH) socket.connect(address) super().__init__(socket) def emit(self, record): message = self.format(record) msg = record.__dict__.copy() msg.update( message=message, msg=message, args=None, exc_info=None, exc_text=None ) self.queue.send_json(msg) class ZMQQueueListener(logging.handlers.QueueListener): def __init__(self, address, handlers=(), ctx: zmq.Context = None): self._address = address ctx = ctx or _context self._ctx = ctx socket = ctx.socket(zmq.PULL) socket.bind(address) super().__init__(socket, *handlers, respect_handler_level=False) self.handlers = list(self.handlers) def __enter__(self): self.start() def __exit__(self, exc_type, exc_val, exc_tb): self.stop() self.cleanup() def add_handler(self, handler): self.handlers.append(handler) def remove_handler(self, handler): self.handlers.remove(handler) def dequeue(self, block): msg = self.queue.recv_json() if msg == self._sentinel: self.queue.close(0) return msg else: return logging.makeLogRecord(msg) def enqueue_sentinel(self): socket = self._ctx.socket(zmq.PUSH) socket.connect(self._address) socket.send_json(self._sentinel) def stop(self): super().stop() self.queue.close(0) def cleanup(self): if self._address.startswith('ipc://'): os.unlink(self._address[6:]) def get_logging_sock(): from hashlib import md5 from base64 import b64encode home = slivka.conf.settings.directory.home suffix = b64encode(md5(home.encode()).digest()[:6], b'-_').decode() tmp = tempfile.gettempdir() path = 'ipc://%s/slivka-logging-%s.sock' % (tmp, suffix) return path def _get_default_logging_config(): return { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'full': { 'format': "%(asctime)s %(levelname)-10s %(name)s %(message)s", 'datefmt': "%d/%m/%y %H:%M:%S" }, 'minimal': { 'format': '%(levelname)s %(message)s' } }, 'handlers': { 'slivka.logging_queue': { 'class': 'slivka.conf.logging.ZMQQueueHandler', 'formatter': 'full', 'level': 'DEBUG', 'address': get_logging_sock() }, 'console': { 'class': 'logging.StreamHandler', 'formatter': 'minimal', 'level': 'DEBUG' } }, 'loggers': { 'slivka': { 'level': 'DEBUG', 'propagate': False, 'handlers': ['slivka.logging_queue', 'console'] } } } def configure_logging(config=None): config = config or _get_default_logging_config() logging.config.dictConfig(config)
bartongroup/slivka
slivka/conf/logging.py
logging.py
py
3,460
python
en
code
7
github-code
36
[ { "api_name": "zmq.Context", "line_number": 11, "usage_type": "call" }, { "api_name": "atexit.register", "line_number": 12, "usage_type": "call" }, { "api_name": "logging.config.handlers", "line_number": 15, "usage_type": "attribute" }, { "api_name": "logging.conf...
28198353586
""" FLUX: OPTIMUM RANGE =================== """ from math import isclose from pathlib import Path from typing import Literal import matplotlib.gridspec as gridspec import matplotlib.pyplot as plt import numpy as np import pandas as pd from matplotlib.legend_handler import HandlerTuple from pandas import DataFrame from diive.core.plotting.plotfuncs import save_fig class FindOptimumRange: def __init__(self, df: DataFrame, xcol: str, ycol: str, n_vals_per_bin: int = 300, bins_agg: Literal['median'] = 'median', rwinsize: float = 0.1, ragg: Literal['mean'] = 'mean', define_optimum: Literal['min', 'max'] = 'max'): """ Find x range for optimum y First, x data are aggregated in y bins. By default, the median value of x is calculated for each y bin (*bins_agg*). The number of bins that is used is defined by total length of data divided by *n_vals_per_bin*, i.e., each bin should contain e.g. 300 values. Then, the rolling mean (*ragg*) with window size *rwinsize* is calculated across all binned values. Here, *rwinsize* is given as the fraction of the total number of detected bins. The optimum is detected as the maximum (or other, *define_optimum*) of the values found in the rolling aggregation. Example: VPD (x) range where NEE (y) carbon uptake is highest (=smallest number) Args: df: Data xcol: Column name of x in df ycol: Column name of y in df n_vals_per_bin: Number of values per x bin bins_agg: How data in bins are aggregated rwinsize: Window size for rolling aggregation, expressed as fraction of the total number of bins. The total number of bins is calculated from the total length of the data and *n_vals_per_bin*. The resulting window size is then an integer value that is used in further calculations. If the integer window size results in an even number, +1 is added since the window size must be an odd number. ragg: Rolling aggregation that is used in the rolling window. define_optimum: Optimum can be based on 'min' or 'max' """ self.df = df[[xcol, ycol]].copy() self.xcol = xcol self.ycol = ycol self.n_vals_per_bin = n_vals_per_bin self.bins_agg = bins_agg self.rwinsize = rwinsize self.ragg = ragg self.define_optimum = define_optimum self._results_optrange = {} @property def results_optrange(self) -> dict: """Return optimum range results""" if not self._results_optrange: raise Exception('Results for optimum range are empty') return self._results_optrange def find_optimum(self): # self._prepare_data() todo? bins_df, bin_aggs_df, n_xbins = self._divide_xdata_into_bins() winsize = int(n_xbins * self.rwinsize) winsize = winsize + 1 if (winsize % 2 == 0) else winsize # Must be odd number rbin_aggs_df = self._rolling_agg(bin_aggs_df=bin_aggs_df, use_bin_agg=self.bins_agg, rolling_agg=self.ragg, winsize=winsize) roptimum_bin, roptimum_val = self._find_rolling_optimum(rolling_df=rbin_aggs_df, use_rolling_agg=self.ragg) # rwinsize = int(num_xbins / 5) # Window size for rolling aggs optimum_xstart, optimum_xend, optimum_ymean, \ optimum_start_bin, optimum_end_bin = self._get_optimum_range(grouped_df=bin_aggs_df, roptimum_bin=roptimum_bin, winsize=winsize) self._validate(roptimum_val=roptimum_val, optimum_ymean=optimum_ymean) vals_in_optimum_range_df = \ self._values_in_optimum_range(optimum_xstart=optimum_xstart, optimum_xend=optimum_xend) self._results_optrange = dict( optimum_xstart=optimum_xstart, optimum_xend=optimum_xend, optimum_ymean=optimum_ymean, optimum_start_bin=optimum_start_bin, optimum_end_bin=optimum_end_bin, bin_aggs_df=bin_aggs_df, rbin_aggs_df=rbin_aggs_df, rwinsize=winsize, roptimum_bin=roptimum_bin, roptimum_val=roptimum_val, n_xbins=n_xbins, xcol=self.xcol, ycol=self.ycol, vals_in_optimum_range_df=vals_in_optimum_range_df ) def _values_in_optimum_range(self, optimum_xstart: float, optimum_xend: float) -> pd.DataFrame: df = self.df[[self.xcol, self.ycol]].copy() # Full data range fullrange_df = df.groupby(df.index.year).agg({self.xcol: ['count', 'mean']}) xcounts_df = pd.DataFrame() # xcounts_df['vals_total'] = df.groupby(df.index.year).agg({'count'}) xcounts_df['vals_total'] = \ df.groupby(df.index.year).agg(vals_total=(self.xcol, 'count')) # Data in optimum _filter = (df[self.xcol] > optimum_xstart) & (df[self.xcol] <= optimum_xend) xcounts_df['vals_inoptimum'] = \ df.loc[_filter].groupby(df.loc[_filter].index.year).agg(vals_inoptimum=(self.xcol, 'count')) # Above optimum _filter = (df[self.xcol] > optimum_xend) xcounts_df['vals_aboveoptimum'] = \ df.loc[_filter].groupby(df.loc[_filter].index.year).agg(vals_aboveoptimum=(self.xcol, 'count')) # Below optimum _filter = (df[self.xcol] <= optimum_xstart) xcounts_df['vals_belowoptimum'] = \ df.loc[_filter].groupby(df.loc[_filter].index.year).agg(vals_belowoptimum=(self.xcol, 'count')) # Percentages xcounts_df['vals_inoptimum_perc'] = xcounts_df['vals_inoptimum'].div(xcounts_df['vals_total']).multiply(100) xcounts_df['vals_aboveoptimum_perc'] = xcounts_df['vals_aboveoptimum'].div(xcounts_df['vals_total']).multiply( 100) xcounts_df['vals_belowoptimum_perc'] = xcounts_df['vals_belowoptimum'].div(xcounts_df['vals_total']).multiply( 100) # NaNs correspond to zero, # e.g. if no values above optimum are found xcounts_df = xcounts_df.fillna(0) return xcounts_df def _prepare_data(self): # Keep x values > 0 self.df = self.df.loc[self.df[self.xcol] > 0, :] def _divide_xdata_into_bins(self) -> tuple[DataFrame, DataFrame, int]: """ Divide x data into bins Column w/ bin membership is added to data Args: n_xbins: number of bins """ bins_df = self.df.copy() # Detect number of x bins n_xbins = int(len(bins_df) / self.n_vals_per_bin) # Divide data into bins and add as column xbins = pd.qcut(bins_df[self.xcol], n_xbins, duplicates='drop') # How awesome! bins_df = bins_df.assign(xbins=xbins) # Aggregate by bin membership bin_aggs_df = bins_df.groupby('xbins').agg({self.bins_agg, 'count'}) return bins_df, bin_aggs_df, n_xbins def _rolling_agg(self, bin_aggs_df, use_bin_agg, winsize, rolling_agg): rolling_df = bin_aggs_df[self.ycol][use_bin_agg].rolling(winsize, center=True) return rolling_df.agg({rolling_agg, 'std'}).dropna() def _find_rolling_optimum(self, rolling_df: DataFrame, use_rolling_agg: str = 'mean'): """Find optimum bin in rolling data The rolling data is scanned for the bin with the highest or lowest value. """ # Find bin with rolling mean min or max (e.g. max carbon uptake = minimum NEE value) roptimum_bin = None # Index given as bin interval roptimum_val = None # Value at bin interval if self.define_optimum == 'min': roptimum_bin = rolling_df[use_rolling_agg].idxmin() roptimum_val = rolling_df[use_rolling_agg][roptimum_bin] elif self.define_optimum == 'max': roptimum_bin = rolling_df[use_rolling_agg].idxmax() roptimum_val = rolling_df[use_rolling_agg].iloc[roptimum_bin] print(f"Optimum {self.define_optimum} found in class: {roptimum_bin} / value: {roptimum_val}") return roptimum_bin, roptimum_val def _get_optimum_range(self, grouped_df: DataFrame, roptimum_bin: pd.IntervalIndex, winsize: int): """Get data range (start and end) that was used to calculate rolling optimum""" # Find integer location of bin where rolling optimum value (y min or y max) was found int_loc = grouped_df.index.get_loc(roptimum_bin) print(f"Index integer location of found optimum: {int_loc} / {grouped_df.index[int_loc]}") # Get data range start and end roptimum_start_ix = int_loc - (int(winsize / 2)) roptimum_end_ix = int_loc + (int(winsize / 2) + 1) # was +1 b/c end of range not included in slicing # Optimum end index cannot be larger than available indices roptimum_end_ix = len(grouped_df) - 1 if roptimum_end_ix > len(grouped_df) - 1 else roptimum_end_ix # Optimum start index cannot be smaller than the first available index 0 roptimum_start_ix = 0 if roptimum_start_ix < 0 else roptimum_start_ix # Get data range indices optimum_start_bin = grouped_df.iloc[roptimum_start_ix].name optimum_end_bin = grouped_df.iloc[roptimum_end_ix].name optimum_range_xstart = optimum_start_bin.left optimum_range_xend = optimum_end_bin.right optimum_range_ymean = grouped_df[self.ycol]['median'].iloc[roptimum_start_ix:roptimum_end_ix].mean() return optimum_range_xstart, optimum_range_xend, optimum_range_ymean, \ optimum_start_bin, optimum_end_bin def _validate(self, roptimum_val, optimum_ymean): check = isclose(roptimum_val, optimum_ymean, abs_tol=10 ** -3) if check: print("Validation OK.") else: print("(!)Validation FAILED.") assert isclose(roptimum_val, optimum_ymean) def showfig(self, saveplot: bool = False, title: str = None, path: Path or str = None): fig = plt.figure(figsize=(16, 9)) gs = gridspec.GridSpec(4, 1) # rows, cols gs.update(wspace=.2, hspace=.5, left=.05, right=.95, top=.95, bottom=.05) ax1 = fig.add_subplot(gs[0:2, 0]) ax2 = fig.add_subplot(gs[2, 0]) ax3 = fig.add_subplot(gs[3, 0]) ax = self.plot_vals_in_optimum_range(ax=ax1) ax = self.plot_bin_aggregates(ax=ax2) ax = self.plot_rolling_bin_aggregates(ax=ax3) fig.show() if saveplot: save_fig(fig=fig, title=title, path=path) def plot_vals_in_optimum_range(self, ax): """Plot optimum range: values in, above and below optimum per year""" # kudos: https://matplotlib.org/stable/gallery/lines_bars_and_markers/horizontal_barchart_distribution.html#sphx-glr-gallery-lines-bars-and-markers-horizontal-barchart-distribution-py # Get data df = self.results_optrange['vals_in_optimum_range_df'].copy() plotcols = ['vals_inoptimum_perc', 'vals_aboveoptimum_perc', 'vals_belowoptimum_perc'] df = df[plotcols] df = df.round(1) # xcol = results_optrange['xcol'] # ycol = results_optrange['ycol'] # Names of categories, shown in legend above plot category_names = ['values in optimum range (%)', 'above optimum range (%)', 'below optimum range (%)'] # category_names = ['vals_inoptimum_perc', 'vals_aboveoptimum_perc', 'vals_belowoptimum_perc'] # Format data for bar plot results = {} for ix, row in df.iterrows(): results[ix] = df.loc[ix].to_list() year_labels = list(results.keys()) data = np.array(list(results.values())) data_cum = data.cumsum(axis=1) category_colors = plt.colormaps['RdYlBu_r'](np.linspace(0.20, 0.80, data.shape[1])) # fig, ax = plt.subplots(figsize=(9.2, 5)) ax.invert_yaxis() ax.xaxis.set_visible(False) ax.set_xlim(0, np.sum(data, axis=1).max()) for i, (colname, color) in enumerate(zip(category_names, category_colors)): widths = data[:, i] starts = data_cum[:, i] - widths rects = ax.barh(year_labels, widths, left=starts, height=0.9, label=colname, color=color) r, g, b, _ = color text_color = 'white' if r * g * b < 0.5 else 'darkgrey' ax.bar_label(rects, label_type='center', color=text_color) ax.legend(ncol=len(category_names), bbox_to_anchor=(0, 1), loc='lower left', fontsize='small') # default_format(ax=ax, txt_xlabel="year", txt_ylabel=f'counts', # txt_ylabel_units='[#]') # default_grid(ax=ax) return ax def plot_bin_aggregates(self, ax): """Plot y median in bins of x""" # Get data bin_aggs_df = self.results_optrange['bin_aggs_df'].copy() xcol = self.results_optrange['xcol'] ycol = self.results_optrange['ycol'] n_xbins = self.results_optrange['n_xbins'] optimum_start_bin = self.results_optrange['optimum_start_bin'] optimum_end_bin = self.results_optrange['optimum_end_bin'] optimum_xstart = self.results_optrange['optimum_xstart'] optimum_xend = self.results_optrange['optimum_xend'] # Find min/max of y, used for scaling yaxis ymax = bin_aggs_df[ycol]['median'].max() ymin = bin_aggs_df[ycol]['median'].min() ax.set_ylim(ymin, ymax) # Show rolling mean bin_aggs_df[ycol]['median'].plot(ax=ax, zorder=99, title=f"{ycol} medians in {n_xbins} bins of {xcol}") # Show optimum range optimum_start_bin_ix = bin_aggs_df.index.get_loc(optimum_start_bin) optimum_end_bin_ix = bin_aggs_df.index.get_loc(optimum_end_bin) ax.axvline(optimum_start_bin_ix) ax.axvline(optimum_end_bin_ix) area_opr = ax.fill_between([optimum_start_bin_ix, optimum_end_bin_ix], ymin, ymax, color='#FFC107', alpha=0.5, zorder=1, label=f"optimum range {self.define_optimum} between {optimum_xstart} and {optimum_xend}") l = ax.legend( [area_opr], [area_opr.get_label()], scatterpoints=1, numpoints=1, handler_map={tuple: HandlerTuple(ndivide=None)}, ncol=2) def plot_rolling_bin_aggregates(self, ax): """Plot rolling mean of y medians in bins of x""" # Get data rbin_aggs_df = self.results_optrange['rbin_aggs_df'].copy() xcol = self.results_optrange['xcol'] ycol = self.results_optrange['ycol'] n_xbins = self.results_optrange['n_xbins'] optimum_start_bin = self.results_optrange['optimum_start_bin'] optimum_end_bin = self.results_optrange['optimum_end_bin'] optimum_xstart = self.results_optrange['optimum_xstart'] optimum_xend = self.results_optrange['optimum_xend'] # Find min/max across dataframe, used for scaling yaxis rbin_aggs_df['mean+std'] = rbin_aggs_df['mean'].add(rbin_aggs_df['std']) rbin_aggs_df['mean-std'] = rbin_aggs_df['mean'].sub(rbin_aggs_df['std']) dfmax = rbin_aggs_df[['mean+std', 'mean-std']].max().max() dfmin = rbin_aggs_df.min().min() ax.set_ylim(dfmin, dfmax) # Show rolling mean rbin_aggs_df.plot(ax=ax, y='mean', yerr='std', zorder=99, title=f"Rolling mean of {ycol} medians in {n_xbins} bins of {xcol}") # Show optimum range optimum_start_bin_ix = rbin_aggs_df.index.get_loc(optimum_start_bin) optimum_end_bin_ix = rbin_aggs_df.index.get_loc(optimum_end_bin) ax.axvline(optimum_start_bin_ix) ax.axvline(optimum_end_bin_ix) area_opr = ax.fill_between([optimum_start_bin_ix, optimum_end_bin_ix], dfmin, dfmax, color='#FFC107', alpha=0.5, zorder=1, label=f"optimum range {self.define_optimum} between {optimum_xstart} and {optimum_xend}") l = ax.legend( [area_opr], [area_opr.get_label()], scatterpoints=1, numpoints=1, handler_map={tuple: HandlerTuple(ndivide=None)}, ncol=2) def example(): pd.options.display.width = None pd.options.display.max_columns = None pd.set_option('display.max_rows', 3000) pd.set_option('display.max_columns', 3000) # Test data from diive.core.io.files import load_pickle df_orig = load_pickle( filepath=r"L:\Dropbox\luhk_work\20 - CODING\26 - NOTEBOOKS\GL-NOTEBOOKS\_data\ch-dav\CH-DAV_FP2022.1_1997-2022.08_ID20220826234456_30MIN.diive.csv.pickle") # # Check columns # import fnmatch # [print(col) for col in alldata_df.columns if any(fnmatch.fnmatch(col, ids) for ids in ['NEE_CUT_50*'])] # Select daytime data between May and September df = df_orig.copy() df = df.loc[(df.index.month >= 5) & (df.index.month <= 9)] df = df.loc[df['PotRad_CUT_REF'] > 20] # Optimum range optrange = FindOptimumRange(df=df, xcol='RH', ycol='NEE_CUT_REF_f', define_optimum="min", rwinsize=0.3) optrange.find_optimum() optrange.plot_results() if __name__ == '__main__': example()
holukas/diive
diive/pkgs/analyses/optimumrange.py
optimumrange.py
py
18,102
python
en
code
0
github-code
36
[ { "api_name": "pandas.DataFrame", "line_number": 23, "usage_type": "name" }, { "api_name": "typing.Literal", "line_number": 27, "usage_type": "name" }, { "api_name": "typing.Literal", "line_number": 29, "usage_type": "name" }, { "api_name": "typing.Literal", "...
8567341457
import numpy as np import pandas as pd from scipy.stats import norm from sklearn.preprocessing import MinMaxScaler from dash import ALL from dash.dependencies import Input, Output, State from dash.exceptions import PreventUpdate from contents.app import * @app.callback( Output('var-plot-sliders-container', 'children'), Input('var-plot-sliders', 'value'), State('the-data', 'data'), ) def render_var_plot_sliders(sliders, data): if sliders and data: df = pd.DataFrame.from_dict(data) children = [] for slider in sliders: if slider == 'index': min = df.index.min() max = df.index.max() else: min = df[slider].min() max = df[slider].max() children.append(html.Div([ html.B(slider, style={'margin-top': '5px', 'white-space': 'nowrap'}), html.Div(style={'margin-top': '5px'}), html.Div([ dcc.RangeSlider( id={'type': 'var-plot-slider', 'index': slider}, min=min, max=max, value=[min, max], marks=None, tooltip={'always_visible': False, 'placement': 'bottom'}, ) ], style={'width': '100%', 'margin-top': '10px'}) ], style={'display': 'flex'})) return children return [] @app.callback( Output('var-plot-container', 'children'), Input('var-plot-type', 'value'), Input('var-plot-scale-data', 'on'), Input('var-plot-sliders', 'value'), Input({'type': 'var-plot-slider', 'index': ALL}, 'value'), State('the-data', 'data') ) def render_var_plot(plot_type, scale_data, feature_filters, filter_ranges, data): if plot_type and data: df = pd.DataFrame.from_dict(data) if feature_filters and filter_ranges: for feature, range in zip(feature_filters, filter_ranges): if feature == 'index': df = df[(df.index >= range[0]) & (df.index <= range[1])] else: df = df[(df[feature] >= range[0]) & (df[feature] <= range[1])] if scale_data: df = normalize_df(df) if plot_type == 'box': return [ dcc.Graph( figure={ 'data': [go.Box(y=df[col], name=col, boxpoints='outliers') for col in df.columns], 'layout': go.Layout( title='Feature Box Plot', paper_bgcolor='rgba(0,0,0,0)', plot_bgcolor='rgba(0,0,0,0)', font=dict(color='#FFFFFF'), xaxis=dict(showgrid=False), yaxis=dict(showgrid=False), ), } ), ] elif plot_type == 'violin': return [ dcc.Graph( figure={ 'data': [go.Violin(y=df[col], name=col, points='outliers', meanline_visible=True) for col in df.columns], 'layout': go.Layout( title='Feature Violin Plot', paper_bgcolor='rgba(0,0,0,0)', plot_bgcolor='rgba(0,0,0,0)', font=dict(color='#FFFFFF'), xaxis=dict(showgrid=False), yaxis=dict(showgrid=False), ), } ), ] return [] @app.callback( Output('dist-plot-sliders-container', 'children'), Input('dist-plot-sliders', 'value'), State('the-data', 'data'), ) def render_dist_plot_sliders(sliders, data): if sliders and data: df = pd.DataFrame.from_dict(data) children = [] for slider in sliders: if slider == 'index': min = df.index.min() max = df.index.max() else: min = df[slider].min() max = df[slider].max() children.append(html.Div([ html.B(slider, style={'margin-top': '5px', 'white-space': 'nowrap'}), html.Div(style={'margin-top': '5px'}), html.Div([ dcc.RangeSlider( id={'type': 'dist-plot-slider', 'index': slider}, min=min, max=max, value=[min, max], marks=None, tooltip={'always_visible': False, 'placement': 'bottom'}, ) ], style={'width': '100%', 'margin-top': '10px'}) ], style={'display': 'flex'})) return children return [] @app.callback( Output('dist-plot-container', 'children'), Input('dist-plot-scale-data', 'on'), Input('dist-plot-feature', 'value'), Input('dist-plot-distributions', 'value'), Input('dist-plot-sliders', 'value'), Input({'type': 'dist-plot-slider', 'index': ALL}, 'value'), State('the-data', 'data'), ) def render_dist_plot(scale_data, feature, distributions, feature_filters, filter_ranges, data): if feature and distributions and data: df = pd.DataFrame.from_dict(data) if feature_filters and filter_ranges: for feature, range in zip(feature_filters, filter_ranges): if feature == 'index': df = df[(df.index >= range[0]) & (df.index <= range[1])] else: df = df[(df[feature] >= range[0]) & (df[feature] <= range[1])] if scale_data: df = normalize_df(df) graph = dcc.Graph( figure=go.Figure( layout=go.Layout( title='Empirical vs Theoretical Distributions', paper_bgcolor='rgba(0,0,0,0)', plot_bgcolor='rgba(0,0,0,0)', font=dict(color='#FFFFFF'), xaxis=dict( title=feature, titlefont=dict(color='#FFFFFF'), showgrid=False, ), yaxis=dict( title='Density', titlefont=dict(color='#FFFFFF'), showgrid=False, ), ) ) ) graph.figure.add_trace( go.Histogram( x=df[feature], name=feature, histnorm='probability density', marker=dict(color='#37699b'), ) ) for dist in distributions: if dist == 'normal': mu, std = norm.fit(df[feature]) x = np.linspace(min(df[feature]), max(df[feature]), 100) p = norm.pdf(x, mu, std) graph.figure.add_trace( go.Scatter( x=x, y=p, mode='lines', name='Normal', ) ) elif dist == 'lognormal': pass return graph return [] # Helper Methods def normalize_df(df): """ Normalize a dataframe with MinMaxScaler (Keep the column names) """ cols = df.columns df = pd.DataFrame(MinMaxScaler().fit_transform(df)) df.columns = cols return df
ThomasHuggett/Quant-Toolkit-main
contents/_analysis_tools/distributions.py
distributions.py
py
7,532
python
en
code
0
github-code
36
[ { "api_name": "pandas.DataFrame.from_dict", "line_number": 20, "usage_type": "call" }, { "api_name": "pandas.DataFrame", "line_number": 20, "usage_type": "attribute" }, { "api_name": "dash.dependencies.Output", "line_number": 14, "usage_type": "call" }, { "api_nam...
16146023785
import datetime as dt f = open("def.dat", "r") CLAIM_NUM = int(f.readline()) HST_RATE = float(f.readline) CURR_DATE = dt.datetime.now() f.close() while True: emp_name = input("Employee name: ") emp_num = input("Employee number: ") location = input("Location: ") start_date = "2023-11-06" end_date = "2023-11-09" num_days = 3 car_status = "O" num_km = 0 if car_status == "O": num_km = 1400 if num_days <= 3: per_diem = num_days * 85.00 else: per_diem = num_days * 100.00 if car_status == "O": mileage = num_km * 0.10 else: mileage = num_days * 56.00 claim_amt = per_diem + mileage taxes = claim_amt * HST_RATE claim_total = claim_amt + taxes print(f"Claim num: {CLAIM_NUM}") print(f"Employee num: {emp_num}") print(f"Employee name: {emp_name}") print(f"Location: {location}") print(f"Claim total: ${claim_total}") # write data to a file called claims.dat f = open("claims.dat", "a") # a for append f.write(f"{CLAIM_NUM}, ") f.write(f"{str(CURR_DATE)}, ") f.write(f"{num_days}\n") CLAIM_NUM += 1 print("Claim data written.") cont = input("Continue?: ").upper() if cont == "N": break f = open("def.dat", "w") f.write(f"{CLAIM_NUM}\n") f.write(f"{HST_RATE}\n") f.close() print("Thank you for using the claim processing program.")
sweetboymusik/Python
Lesson 29/question.py
question.py
py
1,411
python
en
code
0
github-code
36
[ { "api_name": "datetime.datetime.now", "line_number": 7, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 7, "usage_type": "attribute" } ]
12338761878
################011011100110010101101111#### ### neo Command Line ####################### ############################################ def getcmdlist(): cmds = { "f" :"Find And Replace : Find and replace in family parameters.", "froxl" :"Family Replacer : Open Excel file.", "frp" :"Family Replacer : in Project : Replace family and types of instances edited in Excel.", "frv" :"Family Replacer : in ActiveView : Replace family and types of instances edited in Excel." } return cmds def runcmd(cmd, msg, recallCL=False): if cmd == 'f': from lib.find import neo_findreplace_main as find find.Main() elif cmd == 'froxl': from lib.xl import neo_xl_type_replacer as tyrep tyrep.GetWb() elif cmd == 'frp': from lib.xl import neo_xl_type_replacer as tyrep tyrep.ImportXl("Project") elif cmd == 'frv': from lib.xl import neo_xl_type_replacer as tyrep tyrep.ImportXl("ActiveView") else: from neocl import unknowncmd unknowncmd(cmd, recallCL, getcmdlist())
0neo/pyRevit.neoCL
neoCL.extension/neocl_f.py
neocl_f.py
py
1,152
python
en
code
7
github-code
36
[ { "api_name": "lib.find.neo_findreplace_main.Main", "line_number": 18, "usage_type": "call" }, { "api_name": "lib.find.neo_findreplace_main", "line_number": 18, "usage_type": "name" }, { "api_name": "lib.xl.neo_xl_type_replacer.GetWb", "line_number": 21, "usage_type": "ca...
20890209940
import os import glob import pickle import logging import argparse from multiprocessing import Pool import numpy as np import pandas as pd from core.utils import timer, do_job # PATH DATA_PATH = os.getenv("DATA_PATH") PREPROCESSED_DATA_PATH = os.getenv("PREPROCESSED_DATA_PATH") TXT_DATA_NAME = os.getenv("TXT_DATA_NAME") print(TXT_DATA_NAME) DW2V_PATH = os.getenv("DW2V_PATH") PARAM_PATH = os.getenv("PARAM_PATH") # Logger LOGGER = logging.getLogger('JobLogging') LOGGER.setLevel(10) fh = logging.FileHandler('job.log') LOGGER.addHandler(fh) formatter = logging.Formatter('%(asctime)s:%(lineno)d:%(levelname)s:%(message)s') fh.setFormatter(formatter) LOGGER.info("job start") parser = argparse.ArgumentParser(description='train Dynamic Word Embeddings') parser.add_argument('--without_preprocess', type=int, default=0, metavar='N', help='if preprocessor is not neccessary, set 1') parser.add_argument('--n_job', type=str, default="10", metavar='N', help='number of cpu for multiprocessing') parser.add_argument('--word_freq_min', type=str, default="5", metavar='N', help='minmiun freqency for target word') args = parser.parse_args() os.environ["N_JOB"] = args.n_job os.environ["WORD_FREQ_MIN"] = args.word_freq_min N_JOB = int(os.getenv("N_JOB")) if __name__ =="__main__": if args.without_preprocess == 0: # 前処理 with do_job("preprocess tweet", LOGGER): from core.preprocess_tweet import preprocess_one_day_tweet TWEETS_PATHS = glob.glob(DATA_PATH+"alldata_20*") if not os.path.exists(PREPROCESSED_DATA_PATH+TXT_DATA_NAME): os.mkdir(PREPROCESSED_DATA_PATH+TXT_DATA_NAME) with Pool(processes=N_JOB) as p: p.map(preprocess_one_day_tweet, TWEETS_PATHS) # 単語の共起を確認 with do_job("make co occ dict", LOGGER): from core.make_DW2V import make_unique_word2idx, make_whole_day_co_occ_dict TWEETS_PATHS = glob.glob(PREPROCESSED_DATA_PATH+TXT_DATA_NAME+"/*") # 全単語のチェック make_unique_word2idx(TWEETS_PATHS) if not os.path.exists(PREPROCESSED_DATA_PATH+"co_occ_dict_word_count/"): os.mkdir(PREPROCESSED_DATA_PATH+"co_occ_dict_word_count/") TWEETS_PATHS = glob.glob(PREPROCESSED_DATA_PATH+TXT_DATA_NAME+"/*") make_whole_day_co_occ_dict(TWEETS_PATHS) # PPMIの計算 with do_job("make PPMI", LOGGER): from core.make_DW2V import make_whole_day_ppmi_list TWEETS_PATHS = sorted(glob.glob(PREPROCESSED_DATA_PATH+TXT_DATA_NAME+"/*")) DICTS_PATHS = sorted(glob.glob(PREPROCESSED_DATA_PATH+"co_occ_dict_word_count/*")) PATH_TUPLES = [(tweet_p, dict_p) for tweet_p, dict_p in zip(TWEETS_PATHS, DICTS_PATHS)] make_whole_day_ppmi_list(PATH_TUPLES) # DW2Vの計算 with do_job("make DW2V", LOGGER): from core.make_DW2V import make_DW2V make_DW2V(PARAM_PATH+"params_0803.json")
GENZITSU/DynamicWordEmbedding
main.py
main.py
py
3,016
python
en
code
1
github-code
36
[ { "api_name": "os.getenv", "line_number": 14, "usage_type": "call" }, { "api_name": "os.getenv", "line_number": 15, "usage_type": "call" }, { "api_name": "os.getenv", "line_number": 16, "usage_type": "call" }, { "api_name": "os.getenv", "line_number": 18, ...
31246785423
import pandas as pd import scipy.stats as stats import operator import numpy as np from time import sleep as sl import argparse from sklearn.metrics import pairwise_distances,pairwise_distances_chunked from sklearn.cluster import AgglomerativeClustering,DBSCAN import time from datetime import timedelta import sys from datetime import date def parseargs(): parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-e','--mgt_epi_data',help = 'csv file with MGT and epidemiological information') parser.add_argument('-m','--mgt_level',help='the level of mgt for clustering, e.g. MGT9 ') parser.add_argument('-n','--number_iso_threshold',help = 'the number of isolates threshold for each type in the tested MGT level') parser.add_argument('-o','--outpath',help='the path of outfiles, e.g. /srv/scratch') parser.add_argument('-f','--mgt_flags',help='the csv file of the flag MGT-STs for meta data') parser.add_argument('-p','--prefix',help='the prefix for the outfile names') parser.add_argument('-c','--country',help='country for filtering isolates for pairwise distance analysis, e.g. -c Australia') parser.add_argument('-t','--transtype',help='international or national cluster, or a list of ODC10 STs in a .txt file without heading, e.g. -t international') parser.add_argument('-a','--whole_allele_profile',help='Allele profile of MGT9, e.g. /srv/scratch/mgt9_alleprofile.txt') parser.add_argument("-d", "--distances", help="file containing distances corresponding to the alleleprofiles file (from previous run of this script if applicable)") parser.add_argument("-l", "--dist_limits", help="comma separated list of cluster cutoffs or range or both i.e 1,2,5 or 1-10 or 1,2,5-10, note ODC0/MGT9 were automatically given",default="1,2,5,10") parser.add_argument("-j", "--no_jobs", help="num jobs to split distance calc into", default=1) args = parser.parse_args() return args def main(): t1 = time.time() args = parseargs() mgt_epi = pd.read_csv(args.mgt_epi_data) threshod_dic = {args.mgt_level: int(args.number_iso_threshold)} if args.mgt_flags == None: mgt_threshod, type_dic, isolate_dic, type_isolist_dic = transmi_tracking(threshod_dic, mgt_epi) if args.mgt_flags != None: flags = pd.read_csv(args.mgt_flags) mgt_threshod, type_dic, isolate_dic, type_isolist_dic = transmi_tracking_flags(threshod_dic, mgt_epi, flags) typedf = pd.DataFrame.from_dict(type_dic, orient='index') typedf.index.name = mgt_threshod isolate_df = pd.DataFrame.from_dict(isolate_dic, orient='index') isolate_df.index.name = 'Accession' typedflist = typedf.columns.tolist() + typedf.values.tolist() isolate_df.to_csv(args.outpath + '/' + args.prefix + args.mgt_level + '_isolate_transmission_link.csv') typedf.to_csv(args.outpath + '/' + args.prefix + args.mgt_level + '_mgttype_transmission_link.csv') ###### allele profile getting for pairwise calculation, if args.country != None: odc_pairwise_list = [] if args.transtype == 'international': typedf2 = typedf[typedf['no_country'] >= 2] ### >=2 is international type_country_dic = typedf2['country_detail'].to_dict() for type, subdic in type_country_dic.items(): for c in subdic.keys(): if args.country == c : no_iso_country = subdic[c] if no_iso_country>= 1 and args.mgt_level == "ODC10": ####### to set the threshold of >= 1 for each cluster in Australia. if type not in odc_pairwise_list and type != "None": odc_pairwise_list.append(type) if args.mgt_level != "ODC10": if type not in odc_pairwise_list and type != "None": odc_pairwise_list.append(type) print({"Total No. of types for pairwise distance calculation" : len(odc_pairwise_list)}) print(odc_pairwise_list) if args.transtype == 'national': typedf2 = typedf[typedf['no_country'] == 1] ### >=2 is international; == 1 is national type_country_dic = typedf2['country_detail'].to_dict() for type, subdic in type_country_dic.items(): for c in subdic.keys(): if args.country == c : no_iso_country = subdic[c] if no_iso_country>= 2 and args.mgt_level == "ODC10": ####### >=2 isolates for national transmission if type not in odc_pairwise_list and type != "None": odc_pairwise_list.append(type) if args.mgt_level != "ODC10": if type not in odc_pairwise_list and type != "None": odc_pairwise_list.append(type) print({"Total No. of types for pairwise distance calculation" : len(odc_pairwise_list)}) print(odc_pairwise_list) if args.transtype == None: typedf2 = typedf[typedf['no_country'] >= 1] ### including both international and national type_country_dic = typedf2['country_detail'].to_dict() for type, subdic in type_country_dic.items(): for c in subdic.keys(): if args.country == c : no_iso_country = subdic[c] if no_iso_country>= 2 and args.mgt_level == "ODC10": ####### >=2 isolates for national transmission if type not in odc_pairwise_list and type != "None": odc_pairwise_list.append(type) if args.mgt_level != "ODC10": if type not in odc_pairwise_list and type != "None": odc_pairwise_list.append(type) print({"Total No. of types for pairwise distance calculation" : len(odc_pairwise_list)}) print(odc_pairwise_list) if args.transtype != None and ".txt" in args.transtype: odc_pairwise_list=open(args.transtype,'r').read().splitlines() # odc_pairwise_list = ['4969'] for type in odc_pairwise_list : if type in type_isolist_dic: print(args.mgt_level + '_' + type) # time_pw(args, mgt_epi, args.mgt_level, type, args.outpath) ### to save the type correlated acc list isolatelistfile = open(args.outpath + '/' + args.mgt_level + '_' + type + '_' + args.country + '_correlated_isolatelist.txt','w') isolatelistfile.write(args.mgt_level + '_' + type + '\n') for acc in type_isolist_dic[type]: isolatelistfile.write(acc + '\n') ### to calculate the pairwise distance of alleles if args.whole_allele_profile != "": allele_prof = open(args.whole_allele_profile, "r").read().splitlines() allele_proflist = get_part_alleleprofil(allele_prof, type_isolist_dic[type]) allele_prof_outfile = open(args.outpath + '/' + args.mgt_level + '_' + type + '_alleleprof.txt', 'w') allele_prof_outlist=[] for a in allele_proflist: allele_prof_outlist.append(a) allele_prof_outfile.write(a + '\n') profs, id_to_strain = process_profiles(allele_prof_outlist) pairw_outfrefix=args.outpath + '/' + args.mgt_level + '_' + type + '_' pairwise_process(args, profs, id_to_strain, pairw_outfrefix) t2 = timecal(t1) ## iso_process_profiles() and iso_pairwise_process() are for pairwise distance of isolates in Australia # iso_profs, iso_id_to_strain = iso_process_profiles(allele_prof_outlist) # print(iso_id_to_strain) # iso_pairwise_process(args, iso_profs, iso_id_to_strain, pairw_outfrefix) def timecal(uptime): timespan = time.time() - uptime print(timedelta(seconds=timespan)) return time.time() def time_metric(a, b): match = 0 missmatch = 0 a = [int(x) for x in a] b = [int(x) for x in b] d0 = date(a[0], a[1], a[2]) d1 = date(b[0], b[1], b[2]) dayinterv = abs((d1 - d0).days) return dayinterv ###note columns have to include 'Accession','Collection Year','Collection Month','Collection Day'. def time_pw(args, metadf,odc,type,outfrefixout): # metadf = pd.read_csv(metapath) metadf[odc] = metadf[odc].astype(str) metadf = metadf[(metadf[odc]==type) | (metadf[odc]== str(type))] timedf = pd.DataFrame(metadf, columns=['Accession','Collection Year','Collection Month','Collection Day']) # timedf = pd.read_csv('E:/2018/2019-06-14-Australia_SEN/test/time.csv') # # timedf['d'] = pd.to_datetime(timedf['Date'],format = '%Y/%m/%d') timedf['Collection Day']=timedf['Collection Day'].replace(np.nan,15) timedf = timedf[timedf['Collection Month'].notnull()] print({'time_input_dfsize': timedf.shape[0]}) datedf=pd.DataFrame(timedf,columns=['Collection Year','Collection Month','Collection Day']) acclist = timedf['Accession'].values.tolist() start_time = time.time() if datedf.shape[0]>1: dayinterv = pairwise_distances(datedf, metric=time_metric, n_jobs=int(args.no_jobs)) # pairw_outfrefix = 'E:/2018/2019-06-14-Australia_SEN/test/time_pairwise_'+ type + '_' if len(dayinterv) >=2 : print("pairwise distance time", (" --- %s seconds ---" % (time.time() - start_time))) np.savetxt(outfrefixout + odc +'_' +type + '_'+"time_pwdistances.txt", dayinterv.astype(int), fmt='%i', header=",".join(acclist), delimiter=",") def unneg(a): if "-" in a: return a.split("_")[0][1:] else: return a def mgt_dist_metric(a, b): match = 0 missmatch = 0 for i in range(len(a)): aAllele = a[i] bAllele = b[i] # print(aAllele,bAllele) # sl(0.1) if aAllele == 0 or bAllele == 0 or aAllele == bAllele: match += 1 else: missmatch += 1 # print(aAllele,bAllele) return missmatch def process_profiles(inprofiles, s=False): profs = {} id_to_strain = {} for line in inprofiles[1:]: col = line.split("\t") if s: if col[0] in s: # print(col[0]) if col[1] not in profs: noneg = [unneg(x) for x in col[3:]] profs[col[1]] = noneg id_to_strain[col[1]] = [str(col[0])] else: id_to_strain[col[1]].append(str(col[0])) else: # print(col[0]) if col[1] not in profs: noneg = [unneg(x) for x in col[3:]] profs[col[1]] = noneg id_to_strain[col[1]] = [str(col[0])] else: id_to_strain[col[1]].append(str(col[0])) return profs, id_to_strain def pairwise_process(args,profs,id_to_strain, pairw_outfrefix): idlist = list(profs.keys()) inprofs = [profs[x] for x in idlist] dfo = pd.DataFrame(inprofs) # distances only calculated if args.distances not set lend = "" if args.distances: # read in distances previosly calculated d = np.loadtxt(args.distances) lend = len(d) ### number of MGT9 STs in this cluster else: start_time = time.time() d = pairwise_distances(inprofs, metric=mgt_dist_metric, n_jobs=int(args.no_jobs)) lend = len(d) # if len(d) >=2 : print("pairwise distance time", (" --- %s seconds ---" % (time.time() - start_time))) np.savetxt(pairw_outfrefix + "mgt9_distances.txt", d.astype(int), fmt='%i', header=",".join(idlist), delimiter=",") # distance cutoffs to calculate if lend >=2: pairw_outfile = open(pairw_outfrefix + 'iso_odc_recal.txt','w') diststring = args.dist_limits dists = diststring.split(",") distances = [] for i in dists: if "-" in i: n = i.split("-") nlist = list(range(int(n[0]) + 1, int(n[1]) + 2)) # distance cutoffs seems to be non inclusive i.e. cutoff of 3 means max distance is 2 # therefore need to add 1 to all values else: nlist = [int(i) + 1] distances += nlist clusterlists = {} preference = [] for id in idlist: preference.append(len(id_to_strain[id])) start_time = time.time() for dist in distances: clusters = AgglomerativeClustering(n_clusters=None, distance_threshold=dist, affinity="precomputed", linkage="single").fit_predict(d) clusterls = list(clusters) clusterlists[dist] = clusterls print("clustering time", (" --- %s seconds ---" % (time.time() - start_time))) realdists = ["ODC" + str(x - 1) for x in distances] pairw_outfile.write("Strain\tMGT9\t{}\n".format("\t".join(realdists))) for i in range(len(idlist)): id = idlist[i] for strain in id_to_strain[id]: pairw_outfile.write(strain + '\t' + str(id)) for d in distances: clust = clusterlists[d][i] pairw_outfile.write("\t" + str(clust + 1)) pairw_outfile.write("\n") pairw_outfile.close() if lend < 2: ### belong to the same MGT9 ST pairw_outfile = open(pairw_outfrefix + 'iso_odc_recal.txt','w') pairw_outfile.write('Strain' + '\t' + 'MGT9' + '\n') for st, isolist in id_to_strain.items(): for iso in isolist: pairw_outfile.write(str(iso) + '\t' + str(st) + '\n') return ##### pairwise distance calculation ### iso_process_profiles() and iso_pairwise_process() are for pairwise distance of isolates in Australia def iso_process_profiles(inprofiles, s=False): profs = {} id_to_strain = {} for line in inprofiles[1:]: col = line.split("\t") if s: if col[0] in s: # print(col[0]) if col[0] not in profs: noneg = [unneg(x) for x in col[3:]] profs[col[0]] = noneg id_to_strain[col[0]] = [str(col[1])] else: id_to_strain[col[0]].append(str(col[1])) else: # print(col[0]) if col[0] not in profs: noneg = [unneg(x) for x in col[3:]] profs[col[0]] = noneg id_to_strain[col[0]] = [str(col[1])] else: id_to_strain[col[0]].append(str(col[1])) return profs, id_to_strain def iso_pairwise_process(args,profs,id_to_strain, pairw_outfrefix): idlist = list(profs.keys()) # print(idlist) inprofs = [profs[x] for x in idlist] # distances only calculated if args.distances not set lend = "" if args.distances: # read in distances previosly calculated d = np.loadtxt(args.distances) lend = len(d) else: start_time = time.time() d = pairwise_distances(inprofs, metric=mgt_dist_metric, n_jobs=int(args.no_jobs)) lend = len(d) if len(d) >=2 : print("pairwise distance time", (" --- %s seconds ---" % (time.time() - start_time))) np.savetxt(pairw_outfrefix + "iso_distances.txt", d.astype(int), fmt='%i', header=",".join(idlist), delimiter=",") def epi_filt(mgt_epi,fi_dic): col_name = mgt_epi.columns.values.tolist() mgt_epi = mgt_epi.values.tolist() for key in fi_dic: epi_filter_out = [] for line in mgt_epi: line_index = col_name.index(key) line_value = str(line[line_index]) if line_value in fi_dic[key]: epi_filter_out.append(line) mgt_epi = epi_filter_out mgt_epi = pd.DataFrame(mgt_epi) mgt_epi.columns = col_name print(mgt_epi.shape) return mgt_epi def flag_reprot(flag_input, test_file,key_list): flag_input = flag_input.set_index('MGT_type') dic_file = flag_input.to_dict() mgt_levels_list = test_file.columns.tolist() mgt_levels_list = [a for a in mgt_levels_list if "MGT" in a] for level in mgt_levels_list: test_file[level] = level + test_file[level].astype(str) test_list = test_file.values.tolist() keyflag_dic = {} for k1 in dic_file: if k1 in key_list: # outfile = open(outpath + '/' + k1 + '.txt','w') dic_file2 = {k:v for k, v in dic_file[k1].items() if "nan" not in str(v)} mgtst_list = [] for line in test_list: for value in line: if value in dic_file2.keys(): mgtst = value mgtst_list.append(mgtst) strain_name = line [0] predict_types = dic_file2[value] # output = "{}\t{}\t{}".format(strain_name,predict_types,mgtst) # print(output) # outfile.write(output + '\n') mgtst_ser = pd.Series(mgtst_list) keyflag_dic[k1] = mgtst_ser.value_counts().to_dict() return keyflag_dic def transmi_tracking_flags(threshod_dic, mgt_epi,flags): for mgt in threshod_dic: gp = mgt_epi.groupby([ mgt])['Strain'].count().fillna(0) pass_filter_type_dic = gp [gp>= threshod_dic[mgt]].to_dict() if 0 in pass_filter_type_dic.keys(): pass_filter_type_dic.pop(0) mgt_threshod = mgt + '_>=_' + str(threshod_dic[mgt]) type_dic = {} type_isolist_dic = {} isolate_dic = {} interspread = 0 limited_year = 0 large_sclale = 0 for type in pass_filter_type_dic.keys(): type_isolist_dic[type] = [] subdf = mgt_epi[mgt_epi[mgt]== type] key_list = ['Population_Structure', 'MDR', 'AR2_1', 'Top_MGT4_STs'] keyflag_dic = flag_reprot(flags, subdf, key_list) country_dic = subdf.groupby(['Country'])['Strain'].count().to_dict() year_dic = subdf.groupby(['Collection Year'])['Strain'].count().to_dict() source_dic = subdf.groupby(['Source Type'])['Strain'].count().to_dict() type_dic[type] = keyflag_dic # type_dic[type]={"no_isolates":{}} type_dic[type]['no_isolates'] = pass_filter_type_dic[type] type_dic[type]['country_detail'] = country_dic if 'None' in country_dic.keys(): type_dic[type]['no_country'] = len(country_dic) - 1 else: type_dic[type]['no_country'] = len(country_dic) type_dic[type]['year_detail'] = year_dic if 'None' in year_dic.keys(): type_dic[type]['no_year'] = len(year_dic) - 1 else: type_dic[type]['no_year'] = len(year_dic) if len(year_dic) <= 2 and len(year_dic)> 0: limited_year =limited_year+1 if len(country_dic) >= 2 : interspread = interspread + 1 if len(year_dic) > 1 and len(year_dic) > 0 and len(country_dic) >= 2 and pass_filter_type_dic[type] > 50: large_sclale = large_sclale + 1 type_dic[type]['source'] = source_dic if 'None' in source_dic.keys(): type_dic[type]['no_source'] = len(source_dic) - 1 else: type_dic[type]['no_source'] = len(source_dic) ########### to product isolate_dic acclist = subdf['Accession'].tolist() for acc in acclist: type_isolist_dic[type].append(acc) isolate_dic[acc] = {} isolate_dic[acc] = type_dic[type] isolate_dic[acc][mgt_threshod] = type print("No. of passed types: " + str(len(pass_filter_type_dic))) print('No. of potential international spreading clusters: ' + str(interspread)) print('No. of potential international spreading clusters within years: ' + str(limited_year)) print('No. of potential international spreading large clusters within years >50: ' + str(large_sclale)) return mgt_threshod,type_dic, isolate_dic, type_isolist_dic def transmi_tracking(threshod_dic, mgt_epi): for mgt in threshod_dic: gp = mgt_epi.groupby([mgt])['Strain'].count().fillna(0) pass_filter_type_dic = gp [gp>= threshod_dic[mgt]].to_dict() if 0 in pass_filter_type_dic.keys(): pass_filter_type_dic.pop(0) mgt_threshod = mgt + '_>=_' + str(threshod_dic[mgt]) type_dic = {} isolate_dic = {} type_isolist_dic ={} interspread = 0 limited_year = 0 large_sclale = 0 for type in pass_filter_type_dic.keys(): type_isolist_dic[type]=[] subdf = mgt_epi[mgt_epi[mgt]== type] key_list = ['Population_Structure', 'MDR', 'AR2_1', 'Top_MGT4_STs'] # keyflag_dic = flag_reprot(flags, subdf, key_list) country_dic = subdf.groupby(['Country'])['Strain'].count().to_dict() year_dic = subdf.groupby(['Collection Year'])['Strain'].count().to_dict() source_dic = subdf.groupby(['Source Type'])['Strain'].count().to_dict() type_dic[type] = {} type_dic[type]['no_isolates'] = pass_filter_type_dic[type] type_dic[type]['country_detail'] = country_dic if 'None' in country_dic.keys(): type_dic[type]['no_country'] = len(country_dic) - 1 else: type_dic[type]['no_country'] = len(country_dic) type_dic[type]['year_detail'] = year_dic if 'None' in year_dic.keys(): type_dic[type]['no_year'] = len(year_dic) - 1 else: type_dic[type]['no_year'] = len(year_dic) if len(year_dic) <= 2 and len(year_dic)> 0: limited_year =limited_year+1 if len(country_dic) >= 2 : interspread = interspread + 1 if len(year_dic) > 1 and len(year_dic) > 0 and len(country_dic) >= 2 and pass_filter_type_dic[type] > 50: large_sclale = large_sclale + 1 type_dic[type]['source'] = source_dic if 'None' in source_dic.keys(): type_dic[type]['no_source'] = len(source_dic) - 1 else: type_dic[type]['no_source'] = len(source_dic) ########### to product isolate_dic acclist = subdf['Accession'].tolist() for acc in acclist: type_isolist_dic[type].append(acc) isolate_dic[acc] = {} isolate_dic[acc] = type_dic[type] isolate_dic[acc][mgt_threshod] = type print("No. of passed types: " + str(len(pass_filter_type_dic))) print('No. of potential international spreading clusters: ' + str(interspread)) print('No. of potential international spreading clusters within years: ' + str(limited_year)) print('No. of potential international spreading large clusters within years >50: '+ str(large_sclale)) return mgt_threshod,type_dic, isolate_dic, type_isolist_dic def get_part_alleleprofil(whol_alleprof, isolist): outlist = [] outlist.append(whol_alleprof[0]) for line in whol_alleprof: col = line.split('\t') for acc in isolist: if acc == col[0]: # or acc + '_cladeC.fasta' == col[0]: outlist.append(line) return outlist if __name__ == "__main__": main()
Adalijuanluo/MGTSEnT
MGTSEnT_MGT9_singlelinkagecluster.py
MGTSEnT_MGT9_singlelinkagecluster.py
py
24,576
python
en
code
0
github-code
36
[ { "api_name": "argparse.ArgumentParser", "line_number": 14, "usage_type": "call" }, { "api_name": "argparse.ArgumentDefaultsHelpFormatter", "line_number": 14, "usage_type": "attribute" }, { "api_name": "time.time", "line_number": 33, "usage_type": "call" }, { "api...
38666222212
from __future__ import absolute_import import logging import string from zipfile import ZipFile, ZIP_STORED, ZIP_DEFLATED import re # py2 vs py3 transition from ..six import text_type as unicode from ..six import string_types as basestring from ..six import ensure_binary from io import BytesIO ## XML isn't as forgiving as HTML, so rather than generate as strings, ## use DOM to generate the XML files. from xml.dom.minidom import getDOMImplementation import bs4 from .base_writer import BaseStoryWriter from ..htmlcleanup import stripHTML,removeEntities from ..story import commaGroups logger = logging.getLogger(__name__) class EpubWriter(BaseStoryWriter): @staticmethod def getFormatName(): return 'epub' @staticmethod def getFormatExt(): return '.epub' def __init__(self, config, story): BaseStoryWriter.__init__(self, config, story) self.EPUB_CSS = string.Template('''${output_css}''') self.EPUB_TITLE_PAGE_START = string.Template('''<?xml version="1.0" encoding="UTF-8"?> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <title>${title} by ${author}</title> <link href="stylesheet.css" type="text/css" rel="stylesheet"/> </head> <body class="fff_titlepage"> <h3><a href="${storyUrl}">${title}</a> by ${authorHTML}</h3> <div> ''') self.EPUB_TITLE_ENTRY = string.Template(''' <b>${label}:</b> ${value}<br /> ''') self.EPUB_NO_TITLE_ENTRY = string.Template(''' ${value}<br /> ''') self.EPUB_TITLE_PAGE_END = string.Template(''' </div> </body> </html> ''') self.EPUB_TABLE_TITLE_PAGE_START = string.Template('''<?xml version="1.0" encoding="UTF-8"?> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <title>${title} by ${author}</title> <link href="stylesheet.css" type="text/css" rel="stylesheet"/> </head> <body class="fff_titlepage"> <h3><a href="${storyUrl}">${title}</a> by ${authorHTML}</h3> <table class="full"> ''') self.EPUB_TABLE_TITLE_ENTRY = string.Template(''' <tr><td><b>${label}:</b></td><td>${value}</td></tr> ''') self.EPUB_TABLE_TITLE_WIDE_ENTRY = string.Template(''' <tr><td colspan="2"><b>${label}:</b> ${value}</td></tr> ''') self.EPUB_TABLE_NO_TITLE_ENTRY = string.Template(''' <tr><td colspan="2">${label}${value}</td></tr> ''') self.EPUB_TABLE_TITLE_PAGE_END = string.Template(''' </table> </body> </html> ''') self.EPUB_TOC_PAGE_START = string.Template('''<?xml version="1.0" encoding="UTF-8"?> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <title>${title} by ${author}</title> <link href="stylesheet.css" type="text/css" rel="stylesheet"/> </head> <body class="fff_tocpage"> <div> <h3>Table of Contents</h3> ''') self.EPUB_TOC_ENTRY = string.Template(''' <a href="file${index04}.xhtml">${chapter}</a><br /> ''') self.EPUB_TOC_PAGE_END = string.Template(''' </div> </body> </html> ''') self.EPUB_CHAPTER_START = string.Template('''<?xml version="1.0" encoding="UTF-8"?> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <title>${chapter}</title> <link href="stylesheet.css" type="text/css" rel="stylesheet"/> <meta name="chapterurl" content="${url}" /> <meta name="chapterorigtitle" content="${origchapter}" /> <meta name="chaptertoctitle" content="${tocchapter}" /> <meta name="chaptertitle" content="${chapter}" /> </head> <body class="fff_chapter"> <h3 class="fff_chapter_title">${chapter}</h3> ''') self.EPUB_CHAPTER_END = string.Template(''' </body> </html> ''') self.EPUB_LOG_PAGE_START = string.Template('''<?xml version="1.0" encoding="UTF-8"?> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <title>Update Log</title> <link href="stylesheet.css" type="text/css" rel="stylesheet"/> </head> <body class="fff_logpage"> <h3>Update Log</h3> ''') self.EPUB_LOG_UPDATE_START = string.Template(''' <p class='log_entry'> ''') self.EPUB_LOG_ENTRY = string.Template(''' <b>${label}:</b> <span id="${id}">${value}</span> ''') self.EPUB_LOG_UPDATE_END = string.Template(''' </p> <hr/> ''') self.EPUB_LOG_PAGE_END = string.Template(''' </body> </html> ''') self.EPUB_LOG_PAGE_END = string.Template(''' </body> </html> ''') self.EPUB_COVER = string.Template(''' <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en"><head><title>Cover</title><style type="text/css" title="override_css"> @page {padding: 0pt; margin:0pt} body { text-align: center; padding:0pt; margin: 0pt; } div { margin: 0pt; padding: 0pt; } </style></head><body class="fff_coverpage"><div> <img src="${coverimg}" alt="cover"/> </div></body></html> ''') def writeLogPage(self, out): """ Write the log page, but only include entries that there's metadata for. START, ENTRY and END are expected to already be string.Template(). START and END are expected to use the same names as Story.metadata, but ENTRY should use id, label and value. """ if self.hasConfig("logpage_start"): START = string.Template(self.getConfig("logpage_start")) else: START = self.EPUB_LOG_PAGE_START if self.hasConfig("logpage_end"): END = string.Template(self.getConfig("logpage_end")) else: END = self.EPUB_LOG_PAGE_END # if there's a self.story.logfile, there's an existing log # to add to. if self.story.logfile: logger.debug("existing logfile found, appending") # logger.debug("existing data:%s"%self._getLastLogData(self.story.logfile)) replace_string = "</body>" # "</h3>" self._write(out,self.story.logfile.replace(replace_string,self._makeLogEntry(self._getLastLogData(self.story.logfile))+replace_string)) else: # otherwise, write a new one. self._write(out,START.substitute(self.story.getAllMetadata())) self._write(out,self._makeLogEntry()) self._write(out,END.substitute(self.story.getAllMetadata())) # self parsing instead of Soup because it should be simple and not # worth the overhead. def _getLastLogData(self,logfile): """ Make a dict() of the most recent(last) log entry for each piece of metadata. Switch rindex to index to search from top instead of bottom. """ values = {} for entry in self.getConfigList("logpage_entries") + self.getConfigList("extra_logpage_entries"): try: # <span id="dateUpdated">1975-04-15</span> span = '<span id="%s">'%entry idx = logfile.rindex(span)+len(span) values[entry] = logfile[idx:logfile.index('</span>\n',idx)] except Exception as e: #print("e:%s"%e) pass return values def _makeLogEntry(self, oldvalues={}): if self.hasConfig("logpage_update_start"): START = string.Template(self.getConfig("logpage_update_start")) else: START = self.EPUB_LOG_UPDATE_START if self.hasConfig("logpage_entry"): ENTRY = string.Template(self.getConfig("logpage_entry")) else: ENTRY = self.EPUB_LOG_ENTRY if self.hasConfig("logpage_update_end"): END = string.Template(self.getConfig("logpage_update_end")) else: END = self.EPUB_LOG_UPDATE_END retval = START.substitute(self.story.getAllMetadata()) ## words_added is only used in logpage because it's the only ## place we know the previous version's word count. if 'words_added' in (self.getConfigList("logpage_entries") + self.getConfigList("extra_logpage_entries")): new_words = self.story.getMetadata('numWords') old_words = oldvalues.get('numWords',None) if new_words and old_words: self.story.setMetadata('words_added',commaGroups(unicode(int(new_words.replace(',',''))-int(old_words.replace(',',''))))) for entry in self.getConfigList("logpage_entries") + self.getConfigList("extra_logpage_entries"): if self.isValidMetaEntry(entry): val = self.story.getMetadata(entry) if val and ( entry not in oldvalues or val != oldvalues[entry] ): label=self.get_label(entry) # if self.hasConfig(entry+"_label"): # label=self.getConfig(entry+"_label") # elif entry in self.titleLabels: # logger.debug("Using fallback label for %s_label"%entry) # label=self.titleLabels[entry] # else: # label="%s"%entry.title() # logger.debug("No known label for %s, fallback to '%s'"%(entry,label)) retval = retval + ENTRY.substitute({'id':entry, 'label':label, 'value':val}) else: # could be useful for introducing extra text, but # mostly it makes it easy to tell when you get the # keyword wrong. retval = retval + entry retval = retval + END.substitute(self.story.getAllMetadata()) if self.getConfig('replace_hr'): # replacing a self-closing tag with a container tag in the # soup is more difficult than it first appears. So cheat. retval = re.sub("<hr[^>]*>","<div class='center'>* * *</div>",retval) return retval def writeStoryImpl(self, out): if self.story.oldcover and \ ( (self.getConfig('use_old_cover') and self.story.getMetadata('cover_image') != 'force' ) or not self.story.cover ): # logger.debug("use_old_cover:%s"%self.getConfig('use_old_cover')) self.use_oldcover = True self.story.setMetadata('cover_image','old') else: self.use_oldcover = False ## Python 2.5 ZipFile is rather more primative than later ## versions. It can operate on a file, or on a BytesIO, but ## not on an open stream. OTOH, I suspect we would have had ## problems with closing and opening again to change the ## compression type anyway. zipio = BytesIO() ## mimetype must be first file and uncompressed. Python 2.5 ## ZipFile can't change compression type file-by-file, so we ## have to close and re-open outputepub = ZipFile(zipio, 'w', compression=ZIP_STORED) outputepub.debug=3 outputepub.writestr('mimetype','application/epub+zip') outputepub.close() ## Re-open file for content. outputepub = ZipFile(zipio, 'a', compression=ZIP_DEFLATED) outputepub.debug=3 ## Create META-INF/container.xml file. The only thing it does is ## point to content.opf containerdom = getDOMImplementation().createDocument(None, "container", None) containertop = containerdom.documentElement containertop.setAttribute("version","1.0") containertop.setAttribute("xmlns","urn:oasis:names:tc:opendocument:xmlns:container") rootfiles = containerdom.createElement("rootfiles") containertop.appendChild(rootfiles) rootfiles.appendChild(newTag(containerdom,"rootfile",{"full-path":"content.opf", "media-type":"application/oebps-package+xml"})) outputepub.writestr("META-INF/container.xml",containerdom.toxml(encoding='utf-8')) containerdom.unlink() del containerdom ## Epub has two metadata files with real data. We're putting ## them in content.opf (pointed to by META-INF/container.xml) ## and toc.ncx (pointed to by content.opf) ## content.opf contains metadata, a 'manifest' list of all ## other included files, and another 'spine' list of the items in the ## file uniqueid= 'fanficfare-uid:%s-u%s-s%s' % ( self.getMetadata('site'), self.story.getList('authorId')[0], self.getMetadata('storyId')) contentdom = getDOMImplementation().createDocument(None, "package", None) package = contentdom.documentElement ## might want 3.1 or something in future. epub3 = self.getConfig("epub_version",default="2.0").startswith("3") if epub3: package.setAttribute("version","3.0") else: package.setAttribute("version","2.0") logger.info("Saving EPUB Version "+package.getAttribute("version")) package.setAttribute("xmlns","http://www.idpf.org/2007/opf") package.setAttribute("unique-identifier","fanficfare-uid") metadata=newTag(contentdom,"metadata", attrs={"xmlns:dc":"http://purl.org/dc/elements/1.1/", "xmlns:opf":"http://www.idpf.org/2007/opf"}) package.appendChild(metadata) metadata.appendChild(newTag(contentdom,"dc:identifier", text=uniqueid, attrs={"id":"fanficfare-uid"})) if self.getMetadata('title'): metadata.appendChild(newTag(contentdom,"dc:title",text=self.getMetadata('title'), attrs={"id":"id"})) def creator_attrs(idnum): if epub3: return {"id":"id-%d"%idnum} else: return {"opf:role":"aut"} idnum = 1 if self.getMetadata('author'): if self.story.isList('author'): for auth in self.story.getList('author'): metadata.appendChild(newTag(contentdom,"dc:creator", attrs=creator_attrs(idnum), text=auth)) idnum += 1 else: metadata.appendChild(newTag(contentdom,"dc:creator", attrs=creator_attrs(idnum), text=self.getMetadata('author'))) idnum += 1 metadata.appendChild(newTag(contentdom,"dc:contributor",text="FanFicFare [https://github.com/JimmXinu/FanFicFare]", attrs={"id":"id-%d"%idnum})) idnum += 1 # metadata.appendChild(newTag(contentdom,"dc:rights",text="")) if self.story.getMetadata('langcode'): langcode=self.story.getMetadata('langcode') else: langcode='en' metadata.appendChild(newTag(contentdom,"dc:language",text=langcode)) # published, created, updated, calibre # Leave calling self.story.getMetadataRaw directly in case date format changes. if epub3: ## epub3 requires an updated modified date on every change of ## any kind, not just *content* change. from datetime import datetime metadata.appendChild(newTag(contentdom,"meta", attrs={"property":"dcterms:modified"}, text=datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ"))) else: if self.story.getMetadataRaw('datePublished'): metadata.appendChild(newTag(contentdom,"dc:date", attrs={"opf:event":"publication"}, text=self.story.getMetadataRaw('datePublished').strftime("%Y-%m-%d"))) if self.story.getMetadataRaw('dateCreated'): metadata.appendChild(newTag(contentdom,"dc:date", attrs={"opf:event":"creation"}, text=self.story.getMetadataRaw('dateCreated').strftime("%Y-%m-%d"))) if self.story.getMetadataRaw('dateUpdated'): metadata.appendChild(newTag(contentdom,"dc:date", attrs={"opf:event":"modification"}, text=self.story.getMetadataRaw('dateUpdated').strftime("%Y-%m-%d"))) metadata.appendChild(newTag(contentdom,"meta", attrs={"name":"calibre:timestamp", "content":self.story.getMetadataRaw('dateUpdated').strftime("%Y-%m-%dT%H:%M:%S")})) series = self.story.getMetadata('series') if series and self.getConfig('calibre_series_meta'): series_index = "0.0" if '[' in series: # logger.debug(series) ## assumed "series [series_index]" series_index = series[series.rindex(' [')+2:-1] series = series[:series.rindex(' [')] ## calibre always outputs a series_index and it's ## always a float with 1 or 2 decimals. FFF usually ## has either an integer or no index. (injected ## calibre series is the only float at this time) series_index = "%.2f" % float(series_index) metadata.appendChild(newTag(contentdom,"meta", attrs={"name":"calibre:series", "content":series})) metadata.appendChild(newTag(contentdom,"meta", attrs={"name":"calibre:series_index", "content":series_index})) if self.getMetadata('description'): metadata.appendChild(newTag(contentdom,"dc:description",text= self.getMetadata('description'))) for subject in self.story.getSubjectTags(): metadata.appendChild(newTag(contentdom,"dc:subject",text=subject)) if self.getMetadata('site'): metadata.appendChild(newTag(contentdom,"dc:publisher", text=self.getMetadata('site'))) if self.getMetadata('storyUrl'): if epub3: metadata.appendChild(newTag(contentdom,"dc:identifier", text="URL:"+self.getMetadata('storyUrl'))) else: metadata.appendChild(newTag(contentdom,"dc:identifier", attrs={"opf:scheme":"URL"}, text=self.getMetadata('storyUrl'))) metadata.appendChild(newTag(contentdom,"dc:source", text=self.getMetadata('storyUrl'))) if epub3: # <meta refines="#id" property="title-type">main</meta> metadata.appendChild(newTag(contentdom,"meta", attrs={"property":"title-type", "refines":"#id", }, text="main")) # epub3 removes attrs that identify dc:creator and # dc:contributor types and instead put them here. # 'aut' for 1-(idnum-1) for j in range(1,idnum-1): #<meta property="role" refines="#id-1" scheme="marc:relators">aut</meta> metadata.appendChild(newTag(contentdom,"meta", attrs={"property":"role", "refines":"#id-%d"%j, "scheme":"marc:relators", }, text="aut")) metadata.appendChild(newTag(contentdom,"meta", attrs={"property":"role", "refines":"#id-%d"%(idnum-1), "scheme":"marc:relators", }, text="bkp")) ## end of metadata, create manifest. items = [] # list of (id, href, type, title) tuples(all strings) itemrefs = [] # list of strings -- idrefs from .opfs' spines items.append(("ncx","toc.ncx","application/x-dtbncx+xml",None)) ## we'll generate the toc.ncx file, ## but it needs to be in the items manifest. guide = None coverIO = None coverimgid = "image0000" if self.use_oldcover: logger.debug("using old cover") (oldcoverhtmlhref, oldcoverhtmltype, oldcoverhtmldata, oldcoverimghref, oldcoverimgtype, oldcoverimgdata) = self.story.oldcover outputepub.writestr(oldcoverhtmlhref,oldcoverhtmldata) outputepub.writestr(oldcoverimghref,oldcoverimgdata) coverimgid = "image0" items.append((coverimgid, oldcoverimghref, oldcoverimgtype, None)) items.append(("cover",oldcoverhtmlhref,oldcoverhtmltype,None)) itemrefs.append("cover") metadata.appendChild(newTag(contentdom,"meta",{"content":"image0", "name":"cover"})) guide = newTag(contentdom,"guide") guide.appendChild(newTag(contentdom,"reference",attrs={"type":"cover", "title":"Cover", "href":oldcoverhtmlhref})) if self.getConfig('include_images'): imgcount=0 for imgmap in self.story.getImgUrls(): imgfile = "OEBPS/"+imgmap['newsrc'] # don't overwrite old cover. if not self.use_oldcover or imgfile != oldcoverimghref: outputepub.writestr(imgfile,imgmap['data']) items.append(("image%04d"%imgcount, imgfile, imgmap['mime'], None)) imgcount+=1 if 'cover' in imgfile: # make sure coverimgid is set to the cover, not # just the first image. coverimgid = items[-1][0] items.append(("style","OEBPS/stylesheet.css","text/css",None)) if self.story.cover and not self.use_oldcover: # Note that the id of the cover xhmtl *must* be 'cover' # for it to work on Nook. items.append(("cover","OEBPS/cover.xhtml","application/xhtml+xml",None)) itemrefs.append("cover") # # <meta name="cover" content="cover.jpg"/> metadata.appendChild(newTag(contentdom,"meta",{"content":coverimgid, "name":"cover"})) # cover stuff for later: # at end of <package>: # <guide> # <reference type="cover" title="Cover" href="Text/cover.xhtml"/> # </guide> guide = newTag(contentdom,"guide") guide.appendChild(newTag(contentdom,"reference",attrs={"type":"cover", "title":"Cover", "href":"OEBPS/cover.xhtml"})) if self.hasConfig("cover_content"): COVER = string.Template(self.getConfig("cover_content")) else: COVER = self.EPUB_COVER coverIO = BytesIO() self._write(coverIO,COVER.substitute(dict(list(self.story.getAllMetadata().items())+list({'coverimg':self.story.cover}.items())))) if self.getConfig("include_titlepage"): items.append(("title_page","OEBPS/title_page.xhtml","application/xhtml+xml","Title Page")) itemrefs.append("title_page") if self.story.getChapterCount() > 1 and self.getConfig("include_tocpage") and not self.metaonly : items.append(("toc_page","OEBPS/toc_page.xhtml","application/xhtml+xml","Table of Contents")) itemrefs.append("toc_page") ## save where to insert logpage. logpage_indices = (len(items),len(itemrefs)) dologpage = ( self.getConfig("include_logpage") == "smart" and \ (self.story.logfile or self.story.getMetadataRaw("status") == "In-Progress") ) \ or self.getConfig("include_logpage") == "true" ## collect chapter urls and file names for internalize_text_links option. chapurlmap = {} for index, chap in enumerate(self.story.getChapters(fortoc=True)): if chap['html']: i=index+1 items.append(("file%s"%chap['index04'], "OEBPS/file%s.xhtml"%chap['index04'], "application/xhtml+xml", chap['title'])) itemrefs.append("file%s"%chap['index04']) chapurlmap[chap['url']]="file%s.xhtml"%chap['index04'] # url -> relative epub file name. if dologpage: if self.getConfig("logpage_at_end") == "true": ## insert logpage after chapters. logpage_indices = (len(items),len(itemrefs)) items.insert(logpage_indices[0],("log_page","OEBPS/log_page.xhtml","application/xhtml+xml","Update Log")) itemrefs.insert(logpage_indices[1],"log_page") manifest = contentdom.createElement("manifest") package.appendChild(manifest) for item in items: (id,href,type,title)=item manifest.appendChild(newTag(contentdom,"item", attrs={'id':id, 'href':href, 'media-type':type})) if epub3: # epub3 nav # <item href="nav.xhtml" id="nav" media-type="application/xhtml+xml" properties="nav"/> manifest.appendChild(newTag(contentdom,"item", attrs={'href':'nav.xhtml', 'id':'nav', 'media-type':'application/xhtml+xml', 'properties':'nav' })) spine = newTag(contentdom,"spine",attrs={"toc":"ncx"}) package.appendChild(spine) for itemref in itemrefs: spine.appendChild(newTag(contentdom,"itemref", attrs={"idref":itemref, "linear":"yes"})) # guide only exists if there's a cover. if guide: package.appendChild(guide) # write content.opf to zip. contentxml = contentdom.toxml(encoding='utf-8') # tweak for brain damaged Nook STR. Nook insists on name before content. contentxml = contentxml.replace(ensure_binary('<meta content="%s" name="cover"/>'%coverimgid), ensure_binary('<meta name="cover" content="%s"/>'%coverimgid)) outputepub.writestr("content.opf",contentxml) contentdom.unlink() del contentdom ## create toc.ncx file tocncxdom = getDOMImplementation().createDocument(None, "ncx", None) ncx = tocncxdom.documentElement ncx.setAttribute("version","2005-1") ncx.setAttribute("xmlns","http://www.daisy.org/z3986/2005/ncx/") head = tocncxdom.createElement("head") ncx.appendChild(head) head.appendChild(newTag(tocncxdom,"meta", attrs={"name":"dtb:uid", "content":uniqueid})) head.appendChild(newTag(tocncxdom,"meta", attrs={"name":"dtb:depth", "content":"1"})) head.appendChild(newTag(tocncxdom,"meta", attrs={"name":"dtb:totalPageCount", "content":"0"})) head.appendChild(newTag(tocncxdom,"meta", attrs={"name":"dtb:maxPageNumber", "content":"0"})) docTitle = tocncxdom.createElement("docTitle") docTitle.appendChild(newTag(tocncxdom,"text",text=self.getMetadata('title'))) ncx.appendChild(docTitle) tocnavMap = tocncxdom.createElement("navMap") ncx.appendChild(tocnavMap) # <navPoint id="<id>" playOrder="<risingnumberfrom0>"> # <navLabel> # <text><chapter title></text> # </navLabel> # <content src="<chapterfile>"/> # </navPoint> index=0 for item in items: (id,href,type,title)=item # only items to be skipped, cover.xhtml, images, toc.ncx, stylesheet.css, should have no title. if title : navPoint = newTag(tocncxdom,"navPoint", attrs={'id':id, 'playOrder':unicode(index)}) tocnavMap.appendChild(navPoint) navLabel = newTag(tocncxdom,"navLabel") navPoint.appendChild(navLabel) ## the xml library will re-escape as needed. navLabel.appendChild(newTag(tocncxdom,"text",text=stripHTML(title))) navPoint.appendChild(newTag(tocncxdom,"content",attrs={"src":href})) index=index+1 # write toc.ncx to zip file outputepub.writestr("toc.ncx",tocncxdom.toxml(encoding='utf-8')) tocncxdom.unlink() del tocncxdom if epub3: ############################################################################################################## ## create nav.xhtml file tocnavdom = getDOMImplementation().createDocument(None, "html", None) navxhtml = tocnavdom.documentElement navxhtml.setAttribute("xmlns","http://www.w3.org/1999/xhtml") navxhtml.setAttribute("xmlns:epub","http://www.idpf.org/2007/ops") navxhtml.setAttribute("lang",langcode) navxhtml.setAttribute("xml:lang",langcode) head = tocnavdom.createElement("head") navxhtml.appendChild(head) head.appendChild(newTag(tocnavdom,"title",text="Navigation")) # <meta http-equiv="Content-Type" content="text/html; charset=utf-8"/> head.appendChild(newTag(tocnavdom,"meta", attrs={"http-equiv":"Content-Type", "content":"text/html; charset=utf-8"})) body = tocnavdom.createElement("body") navxhtml.appendChild(body) nav = newTag(tocnavdom,"nav", attrs={"epub:type":"toc"}) body.appendChild(nav) ol = newTag(tocnavdom,"ol") nav.appendChild(ol) for item in items: (id,href,type,title)=item # only items to be skipped, cover.xhtml, images, toc.nav, # stylesheet.css, should have no title. if title: li = newTag(tocnavdom,"li") ol.appendChild(li) atag = newTag(tocnavdom,"a", attrs={"href":href}, text=stripHTML(title)) li.appendChild(atag) if self.story.cover and not self.use_oldcover: # <nav epub:type="landmarks" hidden=""> # <ol> # <li><a href="OEBPS/cover.xhtml" epub:type="cover">Cover</a></li> # </ol> # </nav> nav = newTag(tocnavdom,"nav", attrs={"epub:type":"landmarks", "hidden":""}) body.appendChild(nav) ol = newTag(tocnavdom,"ol") nav.appendChild(ol) li = newTag(tocnavdom,"li") ol.appendChild(li) atag = newTag(tocnavdom,"a", attrs={"href":"OEBPS/cover.xhtml", "epub:type":"cover"}, text="Cover") li.appendChild(atag) # write nav.xhtml to zip file outputepub.writestr("nav.xhtml",tocnavdom.toxml(encoding='utf-8')) tocnavdom.unlink() del tocnavdom ############################################################################################################## # write stylesheet.css file. outputepub.writestr("OEBPS/stylesheet.css",self.EPUB_CSS.substitute(self.story.getAllMetadata())) # write title page. if self.getConfig("titlepage_use_table"): TITLE_PAGE_START = self.EPUB_TABLE_TITLE_PAGE_START TITLE_ENTRY = self.EPUB_TABLE_TITLE_ENTRY WIDE_TITLE_ENTRY = self.EPUB_TABLE_TITLE_WIDE_ENTRY NO_TITLE_ENTRY = self.EPUB_TABLE_NO_TITLE_ENTRY TITLE_PAGE_END = self.EPUB_TABLE_TITLE_PAGE_END else: TITLE_PAGE_START = self.EPUB_TITLE_PAGE_START TITLE_ENTRY = self.EPUB_TITLE_ENTRY WIDE_TITLE_ENTRY = self.EPUB_TITLE_ENTRY # same, only wide in tables. NO_TITLE_ENTRY = self.EPUB_NO_TITLE_ENTRY TITLE_PAGE_END = self.EPUB_TITLE_PAGE_END if coverIO: outputepub.writestr("OEBPS/cover.xhtml",coverIO.getvalue()) coverIO.close() titlepageIO = BytesIO() self.writeTitlePage(out=titlepageIO, START=TITLE_PAGE_START, ENTRY=TITLE_ENTRY, WIDE_ENTRY=WIDE_TITLE_ENTRY, END=TITLE_PAGE_END, NO_TITLE_ENTRY=NO_TITLE_ENTRY) if titlepageIO.getvalue(): # will be false if no title page. outputepub.writestr("OEBPS/title_page.xhtml",titlepageIO.getvalue()) titlepageIO.close() # write toc page. tocpageIO = BytesIO() self.writeTOCPage(tocpageIO, self.EPUB_TOC_PAGE_START, self.EPUB_TOC_ENTRY, self.EPUB_TOC_PAGE_END) if tocpageIO.getvalue(): # will be false if no toc page. outputepub.writestr("OEBPS/toc_page.xhtml",tocpageIO.getvalue()) tocpageIO.close() if dologpage: # write log page. logpageIO = BytesIO() self.writeLogPage(logpageIO) outputepub.writestr("OEBPS/log_page.xhtml",logpageIO.getvalue()) logpageIO.close() if self.hasConfig('chapter_start'): CHAPTER_START = string.Template(self.getConfig("chapter_start")) else: CHAPTER_START = self.EPUB_CHAPTER_START if self.hasConfig('chapter_end'): CHAPTER_END = string.Template(self.getConfig("chapter_end")) else: CHAPTER_END = self.EPUB_CHAPTER_END for index, chap in enumerate(self.story.getChapters()): # (url,title,html) # logger.debug("chapter:%s %s %s"%(len(chap['html']), chap['title'],chap['url'])) if chap['html']: chap_data = chap['html'] if self.getConfig('internalize_text_links'): soup = bs4.BeautifulSoup(chap['html'],'html5lib') changed=False for alink in soup.find_all('a'): ## Chapters can be inserted in the middle ## which can break existing internal links. ## So let's save the original href and update. # logger.debug("found %s"%alink) if alink.has_attr('data-orighref') and alink['data-orighref'] in chapurlmap: alink['href']=chapurlmap[alink['data-orighref']] # logger.debug("set1 %s"%alink) changed=True elif alink.has_attr('href') and alink['href'] in chapurlmap: if not alink['href'].startswith('file'): # only save orig href if not already internal. alink['data-orighref']=alink['href'] alink['href']=chapurlmap[alink['href']] # logger.debug("set2 %s"%alink) changed=True if changed: chap_data = unicode(soup) # Don't want html, head or body tags in # chapter html--bs4 insists on adding them. chap_data = re.sub(r"</?(html|head|body)[^>]*>\r?\n?","",chap_data) # logger.debug('Writing chapter text for: %s' % chap.title) chap['url']=removeEntities(chap['url']) chap['chapter']=removeEntities(chap['chapter']) chap['title']=removeEntities(chap['title']) chap['origchapter']=removeEntities(chap['origtitle']) chap['tocchapter']=removeEntities(chap['toctitle']) # escape double quotes in all vals. for k,v in chap.items(): if isinstance(v,basestring): chap[k]=v.replace('"','&quot;') fullhtml = CHAPTER_START.substitute(chap) + \ chap_data.strip() + \ CHAPTER_END.substitute(chap) # strip to avoid ever growning numbers of newlines. # ffnet(& maybe others) gives the whole chapter text # as one line. This causes problems for nook(at # least) when the chapter size starts getting big # (200k+) fullhtml = re.sub(r'(</p>|<br ?/>)\n*',r'\1\n',fullhtml) # logger.debug("write OEBPS/file%s.xhtml"%chap['index04']) outputepub.writestr("OEBPS/file%s.xhtml"%chap['index04'],fullhtml.encode('utf-8')) del fullhtml if self.story.calibrebookmark: outputepub.writestr("META-INF/calibre_bookmarks.txt",self.story.calibrebookmark) # declares all the files created by Windows. otherwise, when # it runs in appengine, windows unzips the files as 000 perms. for zf in outputepub.filelist: zf.create_system = 0 outputepub.close() out.write(zipio.getvalue()) zipio.close() ## Utility method for creating new tags. def newTag(dom,name,attrs=None,text=None): tag = dom.createElement(name) if( attrs is not None ): for attr in attrs.keys(): tag.setAttribute(attr,attrs[attr]) if( text is not None ): tag.appendChild(dom.createTextNode(text)) return tag
JimmXinu/FanFicFare
fanficfare/writers/writer_epub.py
writer_epub.py
py
39,444
python
en
code
664
github-code
36
[ { "api_name": "logging.getLogger", "line_number": 23, "usage_type": "call" }, { "api_name": "base_writer.BaseStoryWriter", "line_number": 25, "usage_type": "name" }, { "api_name": "base_writer.BaseStoryWriter.__init__", "line_number": 36, "usage_type": "call" }, { ...
1194624429
""" Definitions of the GA4GH protocol types. """ from __future__ import division from __future__ import print_function from __future__ import unicode_literals import datetime import json import inspect from sys import modules import google.protobuf.json_format as json_format import google.protobuf.message as message import google.protobuf.struct_pb2 as struct_pb2 import ga4gh.pb as pb from ga4gh._protocol_version import version # noqa from ga4gh.common_pb2 import * # noqa from ga4gh.assay_metadata_pb2 import * # noqa from ga4gh.metadata_pb2 import * # noqa from ga4gh.metadata_service_pb2 import * # noqa from ga4gh.read_service_pb2 import * # noqa from ga4gh.reads_pb2 import * # noqa from ga4gh.reference_service_pb2 import * # noqa from ga4gh.references_pb2 import * # noqa from ga4gh.variant_service_pb2 import * # noqa from ga4gh.variants_pb2 import * # noqa from ga4gh.allele_annotations_pb2 import * # noqa from ga4gh.allele_annotation_service_pb2 import * # noqa from ga4gh.sequence_annotations_pb2 import * # noqa from ga4gh.sequence_annotation_service_pb2 import * # noqa from ga4gh.bio_metadata_pb2 import * # noqa from ga4gh.bio_metadata_service_pb2 import * # noqa from ga4gh.genotype_phenotype_pb2 import * # noqa from ga4gh.genotype_phenotype_service_pb2 import * # noqa from ga4gh.rna_quantification_pb2 import * # noqa from ga4gh.rna_quantification_service_pb2 import * # noqa # A map of response objects to the name of the attribute used to # store the values returned. _valueListNameMap = { SearchVariantSetsResponse: "variant_sets", # noqa SearchVariantsResponse: "variants", # noqa SearchDatasetsResponse: "datasets", # noqa SearchReferenceSetsResponse: "reference_sets", # noqa SearchReferencesResponse: "references", # noqa SearchReadGroupSetsResponse: "read_group_sets", # noqa SearchReadsResponse: "alignments", # noqa SearchCallSetsResponse: "call_sets", # noqa SearchVariantAnnotationSetsResponse: "variant_annotation_sets", # noqa SearchVariantAnnotationsResponse: "variant_annotations", # noqa SearchFeatureSetsResponse: "feature_sets", # noqa SearchFeaturesResponse: "features", # noqa SearchBioSamplesResponse: "biosamples", # noqa SearchIndividualsResponse: "individuals", # noqa SearchPhenotypeAssociationSetsResponse: "phenotype_association_sets", # noqa SearchPhenotypesResponse: "phenotypes", # noqa SearchGenotypePhenotypeResponse: "associations", # noqa SearchRnaQuantificationSetsResponse: "rna_quantification_sets", # noqa SearchRnaQuantificationsResponse: "rna_quantifications", # noqa SearchExpressionLevelsResponse: "expression_levels", # noqa } def getValueListName(protocolResponseClass): """ Returns the name of the attribute in the specified protocol class that is used to hold the values in a search response. """ return _valueListNameMap[protocolResponseClass] def convertDatetime(t): """ Converts the specified datetime object into its appropriate protocol value. This is the number of milliseconds from the epoch. """ epoch = datetime.datetime.utcfromtimestamp(0) delta = t - epoch millis = delta.total_seconds() * 1000 return int(millis) def getValueFromValue(value): """ Extract the currently set field from a Value structure """ if type(value) != struct_pb2.Value: raise TypeError("Expected a Value, but got {}".format(type(value))) if value.WhichOneof("kind") is None: raise AttributeError("Nothing set for {}".format(value)) return getattr(value, value.WhichOneof("kind")) def toJson(protoObject, indent=None): """ Serialises a protobuf object as json """ # Using the internal method because this way we can reformat the JSON js = json_format._MessageToJsonObject(protoObject, True) return json.dumps(js, indent=indent) def toJsonDict(protoObject): """ Converts a protobuf object to the raw attributes i.e. a key/value dictionary """ return json.loads(toJson(protoObject)) def fromJson(json, protoClass): """ Deserialise json into an instance of protobuf class """ return json_format.Parse(json, protoClass()) def validate(json, protoClass): """ Check that json represents data that could be used to make a given protobuf class """ try: fromJson(json, protoClass) # The json conversion automatically validates return True except Exception: return False class SearchResponseBuilder(object): """ A class to allow sequential building of SearchResponse objects. """ def __init__(self, responseClass, pageSize, maxBufferSize): """ Allocates a new SearchResponseBuilder for the specified responseClass, user-requested pageSize and the system mandated maxBufferSize (in bytes). The maxBufferSize is an approximate limit on the overall length of the serialised response. """ self._responseClass = responseClass self._pageSize = pageSize self._maxBufferSize = maxBufferSize self._numElements = 0 self._nextPageToken = None self._protoObject = responseClass() self._valueListName = getValueListName(responseClass) self._bufferSize = self._protoObject.ByteSize() def getPageSize(self): """ Returns the page size for this SearchResponseBuilder. This is the user-requested maximum size for the number of elements in the value list. """ return self._pageSize def getMaxBufferSize(self): """ Returns the maximum internal buffer size for responses, which corresponds to total length (in bytes) of the serialised protobuf objects. This will always be less than the size of JSON output. """ return self._maxBufferSize def getNextPageToken(self): """ Returns the value of the nextPageToken for this SearchResponseBuilder. """ return self._nextPageToken def setNextPageToken(self, nextPageToken): """ Sets the nextPageToken to the specified value. """ self._nextPageToken = nextPageToken def addValue(self, protocolElement): """ Appends the specified protocolElement to the value list for this response. """ self._numElements += 1 self._bufferSize += protocolElement.ByteSize() attr = getattr(self._protoObject, self._valueListName) obj = attr.add() obj.CopyFrom(protocolElement) def isFull(self): """ Returns True if the response buffer is full, and False otherwise. The buffer is full if either (1) the number of items in the value list is >= pageSize or (2) the total length of the serialised elements in the page is >= maxBufferSize. If page_size or max_response_length were not set in the request then they're not checked. """ return ( (self._pageSize > 0 and self._numElements >= self._pageSize) or (self._bufferSize >= self._maxBufferSize) ) def getSerializedResponse(self): """ Returns a string version of the SearchResponse that has been built by this SearchResponseBuilder. """ self._protoObject.next_page_token = pb.string(self._nextPageToken) s = toJson(self._protoObject) return s def getProtocolClasses(superclass=message.Message): """ Returns all the protocol classes that are subclasses of the specified superclass. Only 'leaf' classes are returned, corresponding directly to the classes defined in the protocol. """ # We keep a manual list of the superclasses that we define here # so we can filter them out when we're getting the protocol # classes. superclasses = set([message.Message]) thisModule = modules[__name__] subclasses = [] for name, class_ in inspect.getmembers(thisModule): if ((inspect.isclass(class_) and issubclass(class_, superclass) and class_ not in superclasses)): subclasses.append(class_) return subclasses postMethods = \ [('/callsets/search', SearchCallSetsRequest, # noqa SearchCallSetsResponse), # noqa ('/datasets/search', SearchDatasetsRequest, # noqa SearchDatasetsResponse), # noqa ('/readgroupsets/search', SearchReadGroupSetsRequest, # noqa SearchReadGroupSetsResponse), # noqa ('/reads/search', SearchReadsRequest, # noqa SearchReadsResponse), # noqa ('/references/search', SearchReferencesRequest, # noqa SearchReferencesResponse), # noqa ('/referencesets/search', SearchReferenceSetsRequest, # noqa SearchReferenceSetsResponse), # noqa ('/variants/search', SearchVariantsRequest, # noqa SearchVariantsResponse), # noqa ('/datasets/search', SearchDatasetsRequest, # noqa SearchDatasetsResponse), # noqa ('/callsets/search', SearchCallSetsRequest, # noqa SearchCallSetsResponse), # noqa ('/featuresets/search', SearchFeatureSetsRequest, # noqa SearchFeatureSetsResponse), # noqa ('/features/search', SearchFeaturesRequest, # noqa SearchFeaturesResponse), # noqa ('/variantsets/search', SearchVariantSetsRequest, # noqa SearchVariantSetsResponse), # noqa ('/variantannotations/search', SearchVariantAnnotationsRequest, # noqa SearchVariantAnnotationSetsResponse), # noqa ('/variantannotationsets/search', SearchVariantAnnotationSetsRequest, # noqa SearchVariantAnnotationSetsResponse), # noqa ('/rnaquantificationsets/search', SearchRnaQuantificationSetsRequest, # noqa SearchRnaQuantificationSetsResponse), # noqa ('/rnaquantifications/search', SearchRnaQuantificationsRequest, # noqa SearchRnaQuantificationsResponse), # noqa ('/expressionlevels/search', SearchExpressionLevelsRequest, # noqa SearchExpressionLevelsResponse)] # noqa
ga4ghpoc/server
ga4gh/protocol.py
protocol.py
py
10,280
python
en
code
null
github-code
36
[ { "api_name": "datetime.datetime.utcfromtimestamp", "line_number": 81, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 81, "usage_type": "attribute" }, { "api_name": "google.protobuf.struct_pb2.Value", "line_number": 91, "usage_type": "attribute"...
6519474113
from core_functions import Chain, Reel from tabulate import tabulate from colorama import init as colorama_init, Fore class ChainOutput(): # constant: colorama colours for output COLORS = { "element": Fore.LIGHTWHITE_EX, "element_loop": Fore.LIGHTYELLOW_EX, "edge": Fore.LIGHTBLACK_EX, } # constant: symbols for output SYMBOLS = { "edge": " ━ " } def __init__(self, chain: Chain.Chain): """ constructor @param chain: Chain to output OUT: a string of ints representing the chain - each value is separated by an edge symbol - loop values are highlighted in yellow """ # initialise colorama colorama_init() self.out = "" # calculate chain and loop start index array, loop_start = chain # create list to store if idx in array is a loop value self.idx_is_loop = [] for idx, item in enumerate(array): # add item self.out += self.COLORS["element_loop"] if idx >= loop_start else self.COLORS["element"] self.out += str(item) # add edge symbol if not last item self.out += self.COLORS["edge"] + self.SYMBOLS["edge"] if idx != len(array) - 1 else "" # reset highlight; it's the right thing to do :) self.out += Fore.RESET def __str__(self): """ string representation of ChainOutput @return: out """ return self.out class ReelOutput(): # constant: colorama colours for output COLORS = { "separator": Fore.LIGHTBLACK_EX } # constant: symbols for output SYMBOLS = { "separator": "┆" } # constant: table styles for tabulate TABLE_STYLE = "plain" # constant: custom middle separator for tabulate MIDDLE_SEPARATOR = True def __init__(self, reel: Reel.Reel): """ constructor @param reel: Reel to output """ # initialise colorama colorama_init() # prepare table self.array = [["Index", self.COLORS["separator"] + self.SYMBOLS["separator"] + Fore.RESET, "Chain"]] # calculate chain and loop start index for item in reel: self.array.append([item.starting_value, self.COLORS["separator"] + self.SYMBOLS["separator"] + Fore.RESET, ChainOutput(item.chain)]) # remove middle column if MIDDLE_SEPARATOR is False if not self.MIDDLE_SEPARATOR: for col in self.array: col.pop(1) # use tabulate to format table self.out = tabulate(self.array, tablefmt=self.TABLE_STYLE, headers="firstrow") def __str__(self): """ string representation of ReelOutput @return: out """ return self.out
jahinzee/FourHasFourLetters
outputs.py
outputs.py
py
2,885
python
en
code
0
github-code
36
[ { "api_name": "colorama.Fore.LIGHTWHITE_EX", "line_number": 9, "usage_type": "attribute" }, { "api_name": "colorama.Fore", "line_number": 9, "usage_type": "name" }, { "api_name": "colorama.Fore.LIGHTYELLOW_EX", "line_number": 10, "usage_type": "attribute" }, { "ap...
39140033433
import argparse import os from time import sleep # === subroutines === def collect_files_for_removal(root: str) -> tuple[list[str], list[str]]: if not os.path.exists(root): return ([], []) res_files = list() res_folders = list() for (dir_path, dirs, files) in os.walk(root, topdown=False): files = [os.path.join(dir_path, file) for file in files if file.endswith('.class')] res_files.extend(files) res_folders.append(dir_path) res_folders.remove(root) return (res_folders, res_files) # === CLI and Environment === workdir = os.path.abspath(os.curdir) parser = argparse.ArgumentParser() parser.add_argument( '--output-dir', metavar='DIR', default='out', help='a directory with *.CLASS files') args = parser.parse_args() output_dir = args.output_dir # === main === output_dir = os.path.abspath(output_dir) print('[i] Cleaning output directory:', output_dir) dirs, files = collect_files_for_removal(output_dir) # files first for path in files: os.remove(path) # then folders for path in dirs: os.rmdir(path) print('[i] Done!') sleep(1)
vpa-research/jsl-spec-generated
clear.py
clear.py
py
1,122
python
en
code
0
github-code
36
[ { "api_name": "os.path.exists", "line_number": 8, "usage_type": "call" }, { "api_name": "os.path", "line_number": 8, "usage_type": "attribute" }, { "api_name": "os.walk", "line_number": 14, "usage_type": "call" }, { "api_name": "os.path.join", "line_number": 1...
26072099672
from PySide2 import QtWidgets from PySide2.QtCore import Signal # widget to get input for vector 3 types class Vector3Widget(QtWidgets.QWidget): # Signals on_value_changed = Signal(tuple) _main_layout = None def __init__(self, value=(0, 0, 0)): QtWidgets.QWidget.__init__(self) self._value = value # main layout self._main_layout = QtWidgets.QGridLayout() self._main_layout.setSpacing(0) self.setLayout(self._main_layout) # input fields self._unit_1_field = None self._unit_2_field = None self._unit_3_field = None self._create_ui() def _create_ui(self): # field for value x self._unit_1_field = QtWidgets.QSpinBox() self._unit_1_field.setValue(self._value[0]) self._unit_1_field.editingFinished.connect(self._on_field_value_changed) # field for value y self._unit_2_field = QtWidgets.QSpinBox() self._unit_2_field.setValue(self._value[1]) self._unit_2_field.editingFinished.connect(self._on_field_value_changed) # field for value z self._unit_3_field = QtWidgets.QSpinBox() self._unit_3_field.setValue(self._value[2]) self._unit_3_field.editingFinished.connect(self._on_field_value_changed) # add to layout self._main_layout.addWidget(self._unit_1_field, 1, 2) self._main_layout.addWidget(self._unit_2_field, 1, 3) self._main_layout.addWidget(self._unit_3_field, 1, 4) def _on_field_value_changed(self, value=0): self.on_value_changed.emit(self.get_value()) def get_value(self): return (self._unit_1_field.value(), self._unit_2_field.value(), self._unit_3_field.value())
JonathanVeit/building_generator
scripts/gui/Vector3Widget.py
Vector3Widget.py
py
1,746
python
en
code
0
github-code
36
[ { "api_name": "PySide2.QtWidgets.QWidget", "line_number": 6, "usage_type": "attribute" }, { "api_name": "PySide2.QtWidgets", "line_number": 6, "usage_type": "name" }, { "api_name": "PySide2.QtCore.Signal", "line_number": 9, "usage_type": "call" }, { "api_name": "P...
36683998868
import os import json from typing import List from datetime import datetime import pandas as pd #############Load config.json and get input and output paths with open('config.json','r') as f: config = json.load(f) input_folder_path = config['input_folder_path'] output_folder_path = config['output_folder_path'] ingestion_record_file_name = config['ingestion_record_file_name'] ingestion_file_name = config['ingestion_file_name'] #############Function for data ingestion def read_tabular_file(dir_name: str, file_name: str): return pd.read_csv(os.path.join(dir_name, file_name), sep=',', encoding='utf-8') def clean_dataframe(df: pd.DataFrame): # filter out duplicates df = df.drop_duplicates() return df def save_dataframe(df, output_dir_name: str, file_name: str): df.to_csv(os.path.join(output_dir_name, file_name), sep=',', encoding='utf-8', index=False) def write_ingested_file_record(file_name: str, file_dir: str, ingested_file_loc: str, ingested_file_name: str, ingested_file_length: int): with open(os.path.join(file_dir, file_name), 'a') as f: f.write("{datetime}\t{location}\t{filename}\t{length}\n".format( datetime=datetime.now(), location=ingested_file_loc, filename=ingested_file_name, length=ingested_file_length )) def merge_multiple_dataframe(input_folder_dir: str, output_folder_dir: str, output_file_name: str, record_file_name: str, data_cols: List[str] = ["corporation", "lastmonth_activity", "lastyear_activity", "number_of_employees", "exited"]): # check for datasets, compile them together, and write to an output file file_name_ls = os.listdir(input_folder_dir) df_list = pd.DataFrame(columns=data_cols) for file_name in file_name_ls: df = read_tabular_file(input_folder_dir, file_name) df_list = df_list.append(df) write_ingested_file_record(record_file_name, output_folder_dir, input_folder_dir, file_name, len(df)) df_list = clean_dataframe(df_list) save_dataframe(df_list, output_folder_path, output_file_name) if __name__ == '__main__': merge_multiple_dataframe(input_folder_path, output_folder_path, ingestion_file_name, ingestion_record_file_name)
wonyoungseo/ex-risk-assessment-ml-model-deployment-monitoring-system
ingestion.py
ingestion.py
py
2,461
python
en
code
0
github-code
36
[ { "api_name": "json.load", "line_number": 12, "usage_type": "call" }, { "api_name": "pandas.read_csv", "line_number": 23, "usage_type": "call" }, { "api_name": "os.path.join", "line_number": 23, "usage_type": "call" }, { "api_name": "os.path", "line_number": 2...
73521408425
import numpy as np import pandas as pd import os import cv2 import re import torch import torchvision from torchvision import transforms from torchvision.models.detection.faster_rcnn import FastRCNNPredictor from torchvision.models.detection import FasterRCNN from torchvision.models.detection.rpn import AnchorGenerator from torch.utils.data import DataLoader, Dataset from torch.utils.data.sampler import SequentialSampler from matplotlib import pyplot as plt from sklearn.model_selection import KFold from tqdm import tqdm class TurbineDataset(Dataset): """ Custom PyTorch dataset for turbine image data. This dataset is designed for object detection tasks where each image contains annotations of turbine components. It can be used for both training and inference. Args: dataframe (pandas.DataFrame): The DataFrame containing image and annotation information. transforms (callable, optional): A function/transform to apply to the image data. train (bool, optional): Specify if the dataset is for training (True) or inference (False). Attributes: image_ids (numpy.ndarray): Unique image IDs extracted from the DataFrame. df (pandas.DataFrame): The input DataFrame containing image and annotation information. transforms (callable, optional): A function/transform for image data augmentation. train (bool): Indicates whether the dataset is for training (True) or inference (False). Methods: __len__(): Returns the number of unique images in the dataset. __getitem__(index): Retrieves an image and its associated annotations. For training: - Images are loaded and transformed. - Annotations are retrieved and organized into a dictionary. For inference: - Only images are loaded and returned. Returns: If 'train' is True: Tuple containing: - image (torch.Tensor): The preprocessed image. - target (dict): A dictionary containing annotations (boxes, labels, etc.). - image_id (str): ID of the image. If 'train' is False: Tuple containing: - image (torch.Tensor): The preprocessed image. - image_id (str): ID of the image. """ def __init__(self, dataframe, transforms=None, train=True): super().__init__() self.image_ids = dataframe['image'].unique() self.df = dataframe self.transforms = transforms self.train = train def __len__(self) -> int: return self.image_ids.shape[0] def __getitem__(self, index: int): image_id = self.image_ids[index] image = cv2.imread(image_id, cv2.IMREAD_COLOR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32) image /= 255.0 if self.transforms is not None: image = self.transforms(image) if self.train is False: return image, image_id records = self.df[self.df['image'] == image_id] boxes = records[['minx', 'miny', 'maxx', 'maxy']].values boxes = torch.as_tensor(boxes, dtype=torch.float32) area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0]) area = torch.as_tensor(area, dtype=torch.float32) labels = torch.ones((records.shape[0],), dtype=torch.int64) iscrowd = torch.zeros((records.shape[0],), dtype=torch.int64) target = {} target['boxes'] = boxes target['labels'] = labels target['image_id'] = torch.tensor([index]) target['area'] = area target['iscrowd'] = iscrowd return image, target, image_id class Averager: """ A utility class for calculating and maintaining the average of a series of values. This class is typically used for computing the average loss during training iterations. Attributes: current_total (float): The running total of values to be averaged. iterations (float): The number of values added to the running total. Methods: send(value): Add a new value to the running total and update the number of iterations. value: Property that returns the average of the added values. reset(): Reset the running total and number of iterations to zero. Example Usage: ``` avg_loss = Averager() avg_loss.send(2.0) avg_loss.send(3.0) average = avg_loss.value # Returns 2.5 avg_loss.reset() # Resets the total and iterations to zero. ``` Note: If no values are added (iterations = 0), the `value` property returns 0 to prevent division by zero. """ def __init__(self): self.current_total = 0.0 self.iterations = 0.0 def send(self, value): """ Add a new value to the running total and update the number of iterations. Args: value (float): The value to be added to the running total. """ self.current_total += value self.iterations += 1 @property def value(self): """ Get the average value of the added values. Returns: float: The average value, or 0 if no values have been added (iterations = 0). """ if self.iterations == 0: return 0 else: return 1.0 * self.current_total / self.iterations def reset(self): """ Reset the running total and number of iterations to zero. """ self.current_total = 0.0 self.iterations = 0.0 def collate_fn(batch): """ Collates a batch of data elements into a structured format. This function is typically used in data loading pipelines, such as when working with PyTorch's DataLoader. It takes a batch of individual data elements and arranges them into a structured format, often as a tuple or a dictionary, making it suitable for further processing. Args: batch (list): A list of individual data elements to be collated. Returns: tuple: A tuple containing the collated data elements. The specific structure of the returned tuple may vary depending on the data and the application. Example Usage: ``` batch = [(image1, label1), (image2, label2), (image3, label3)] collated_batch = collate_fn(batch) # Example collated_batch: ((image1, image2, image3), (label1, label2, label3)) ``` Note: The structure of the returned tuple should match the requirements of the downstream processing steps, such as model input. """ return tuple(zip(*batch)) def prepare_batches_for_training(folds: dict, selected_data: gdp.GeoDataFrame, number_of_fold: int): trans = transforms.Compose([transforms.ToTensor()]) train_df = selected_data[selected_data['image'].isin(folds[number_of_fold]['train'])] test_df = selected_data[selected_data['image'].isin(folds[number_of_fold]['test'])] train_dataset = TurbineDataset(train_df, trans,True) test_dataset = TurbineDataset(test_df, trans,True) indices = torch.randperm(len(train_dataset)).tolist() train_data_loader = DataLoader( train_dataset, batch_size=16, shuffle=False, num_workers=4, collate_fn=collate_fn ) test_data_loader = DataLoader( test_dataset, batch_size=8, shuffle=False, num_workers=4, collate_fn=collate_fn )
fparaggio/wind-turbine-detector
src/wind_turbine_detector/pipelines/train/nodes.py
nodes.py
py
7,569
python
en
code
0
github-code
36
[ { "api_name": "torch.utils.data.Dataset", "line_number": 20, "usage_type": "name" }, { "api_name": "torchvision.transforms", "line_number": 72, "usage_type": "name" }, { "api_name": "cv2.imread", "line_number": 80, "usage_type": "call" }, { "api_name": "cv2.IMREAD...